diff --git a/.gitattributes b/.gitattributes index ec95814a5795910a2057d535a5cb8f3d90af66f6..042bfe2a6d2998931fc023236a1521c324f17427 100644 --- a/.gitattributes +++ b/.gitattributes @@ -5640,3 +5640,36 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 2025/SimMotionEdit_[[:space:]]Text-Based[[:space:]]Human[[:space:]]Motion[[:space:]]Editing[[:space:]]with[[:space:]]Motion[[:space:]]Similarity[[:space:]]Prediction/35d364b8-3b24-4d25-b0d5-f4f2de5e4e95_origin.pdf filter=lfs diff=lfs merge=lfs -text 2025/SimVS_[[:space:]]Simulating[[:space:]]World[[:space:]]Inconsistencies[[:space:]]for[[:space:]]Robust[[:space:]]View[[:space:]]Synthesis/dfa0e139-5a4d-4319-9718-259db56b3f39_origin.pdf filter=lfs diff=lfs merge=lfs -text 2025/Similarity-Guided[[:space:]]Layer-Adaptive[[:space:]]Vision[[:space:]]Transformer[[:space:]]for[[:space:]]UAV[[:space:]]Tracking/c334a2a6-9747-41af-b8b6-5b13b27ea619_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/(ML)$^2$P-Encoder_[[:space:]]On[[:space:]]Exploration[[:space:]]of[[:space:]]Channel-Class[[:space:]]Correlation[[:space:]]for[[:space:]]Multi-Label[[:space:]]Zero-Shot[[:space:]]Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/1%[[:space:]]VS[[:space:]]100%_[[:space:]]Parameter-Efficient[[:space:]]Low[[:space:]]Rank[[:space:]]Adapter[[:space:]]for[[:space:]]Dense[[:space:]]Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/1000[[:space:]]FPS[[:space:]]HDR[[:space:]]Video[[:space:]]With[[:space:]]a[[:space:]]Spike-RGB[[:space:]]Hybrid[[:space:]]Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/2PCNet_[[:space:]]Two-Phase[[:space:]]Consistency[[:space:]]Training[[:space:]]for[[:space:]]Day-to-Night[[:space:]]Unsupervised[[:space:]]Domain[[:space:]]Adaptive[[:space:]]Object[[:space:]]Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Cinemagraphy[[:space:]]From[[:space:]]a[[:space:]]Single[[:space:]]Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Concept[[:space:]]Learning[[:space:]]and[[:space:]]Reasoning[[:space:]]From[[:space:]]Multi-View[[:space:]]Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]GAN[[:space:]]Inversion[[:space:]]With[[:space:]]Facial[[:space:]]Symmetry[[:space:]]Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Highlighter_[[:space:]]Localizing[[:space:]]Regions[[:space:]]on[[:space:]]3D[[:space:]]Shapes[[:space:]]via[[:space:]]Text[[:space:]]Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Human[[:space:]]Keypoints[[:space:]]Estimation[[:space:]]From[[:space:]]Point[[:space:]]Clouds[[:space:]]in[[:space:]]the[[:space:]]Wild[[:space:]]Without[[:space:]]Human[[:space:]]Labels/833a9b3e-a176-4092-b5fd-3122723612f3_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Human[[:space:]]Mesh[[:space:]]Estimation[[:space:]]From[[:space:]]Virtual[[:space:]]Markers/067f420e-7fdc-4668-8983-b6715ae47be7_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Human[[:space:]]Pose[[:space:]]Estimation[[:space:]]With[[:space:]]Spatio-Temporal[[:space:]]Criss-Cross[[:space:]]Attention/54678f96-220e-4220-837c-0b75958caa1b_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Human[[:space:]]Pose[[:space:]]Estimation[[:space:]]via[[:space:]]Intuitive[[:space:]]Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Line[[:space:]]Mapping[[:space:]]Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Neural[[:space:]]Field[[:space:]]Generation[[:space:]]Using[[:space:]]Triplane[[:space:]]Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Registration[[:space:]]With[[:space:]]Maximal[[:space:]]Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Semantic[[:space:]]Segmentation[[:space:]]in[[:space:]]the[[:space:]]Wild_[[:space:]]Learning[[:space:]]Generalized[[:space:]]Models[[:space:]]for[[:space:]]Adverse-Condition[[:space:]]Point[[:space:]]Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Shape[[:space:]]Reconstruction[[:space:]]of[[:space:]]Semi-Transparent[[:space:]]Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Spatial[[:space:]]Multimodal[[:space:]]Knowledge[[:space:]]Accumulation[[:space:]]for[[:space:]]Scene[[:space:]]Graph[[:space:]]Prediction[[:space:]]in[[:space:]]Point[[:space:]]Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Video[[:space:]]Loops[[:space:]]From[[:space:]]Asynchronous[[:space:]]Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D[[:space:]]Video[[:space:]]Object[[:space:]]Detection[[:space:]]With[[:space:]]Learnable[[:space:]]Object-Centric[[:space:]]Global[[:space:]]Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D-Aware[[:space:]]Conditional[[:space:]]Image[[:space:]]Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D-Aware[[:space:]]Face[[:space:]]Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D-Aware[[:space:]]Facial[[:space:]]Landmark[[:space:]]Detection[[:space:]]via[[:space:]]Multi-View[[:space:]]Consistent[[:space:]]Training[[:space:]]on[[:space:]]Synthetic[[:space:]]Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D-Aware[[:space:]]Multi-Class[[:space:]]Image-to-Image[[:space:]]Translation[[:space:]]With[[:space:]]NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D-Aware[[:space:]]Object[[:space:]]Goal[[:space:]]Navigation[[:space:]]via[[:space:]]Simultaneous[[:space:]]Exploration[[:space:]]and[[:space:]]Identification/e3176243-c1cd-415f-8bca-116983524509_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3D-POP[[:space:]]-[[:space:]]An[[:space:]]Automated[[:space:]]Annotation[[:space:]]Approach[[:space:]]to[[:space:]]Facilitate[[:space:]]Markerless[[:space:]]2D-3D[[:space:]]Tracking[[:space:]]of[[:space:]]Freely[[:space:]]Moving[[:space:]]Birds[[:space:]]With[[:space:]]Marker-Based[[:space:]]Motion[[:space:]]Capture/5371de19-661e-4e67-a303-36ffc7847ea6_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3DAvatarGAN_[[:space:]]Bridging[[:space:]]Domains[[:space:]]for[[:space:]]Personalized[[:space:]]Editable[[:space:]]Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/3Mformer_[[:space:]]Multi-Order[[:space:]]Multi-Mode[[:space:]]Transformer[[:space:]]for[[:space:]]Skeletal[[:space:]]Action[[:space:]]Recognition/59904744-5656-40cd-af70-98473e4f87a7_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/A[[:space:]]Bag-of-Prototypes[[:space:]]Representation[[:space:]]for[[:space:]]Dataset-Level[[:space:]]Applications/f45f628e-fe49-4cb9-b5bd-808953724624_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/A[[:space:]]Characteristic[[:space:]]Function-Based[[:space:]]Method[[:space:]]for[[:space:]]Bottom-Up[[:space:]]Human[[:space:]]Pose[[:space:]]Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/A[[:space:]]Data-Based[[:space:]]Perspective[[:space:]]on[[:space:]]Transfer[[:space:]]Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/A[[:space:]]Dynamic[[:space:]]Multi-Scale[[:space:]]Voxel[[:space:]]Flow[[:space:]]Network[[:space:]]for[[:space:]]Video[[:space:]]Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_origin.pdf filter=lfs diff=lfs merge=lfs -text +2023/A[[:space:]]General[[:space:]]Regret[[:space:]]Bound[[:space:]]of[[:space:]]Preconditioned[[:space:]]Gradient[[:space:]]Method[[:space:]]for[[:space:]]DNN[[:space:]]Training/a806573e-912a-4e15-8891-1f914fce477d_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_content_list.json b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..5260d8dbcb01ebc1d451edc0f9ec549f3434cc41 --- /dev/null +++ b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_content_list.json @@ -0,0 +1,1555 @@ +[ + { + "type": "text", + "text": "$(\\mathbf{ML})^{2}\\mathbf{P}$ -Encoder: On Exploration of Channel-class Correlation for Multi-label Zero-shot Learning", + "text_level": 1, + "bbox": [ + 88, + 128, + 880, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ziming Liu1, Song Guo1,2, Xiaocheng Lu1, Jingcai Guo1,2*, Jiewei Zhang1, Yue Zeng1, Fushuo Huo1 \n1Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China \n2The Hong Kong Polytechnic University Shenzhen Research Institute, Shenzhen, China", + "bbox": [ + 91, + 202, + 875, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{ziming.liu, jiewei.zhang, fushuo.huo}@connect.polyu.hk {song.quo, xiaoclu, jc-jingcai.quo, zengyue.zeng}@polyu.edu.hk", + "bbox": [ + 210, + 258, + 759, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 325, + 313, + 343 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent studies usually approach multi-label zero-shot learning (MLZSL) with visual-semantic mapping on spatial-class correlation, which can be computationally costly, and worse still, fails to capture fine-grained class-specific semantics. We observe that different channels may usually have different sensitivities on classes, which can correspond to specific semantics. Such an intrinsic channel-class correlation suggests a potential alternative for the more accurate and class-harmonious feature representations. In this paper, our interest is to fully explore the power of channel-class correlation as the unique base for MLZSL. Specifically, we propose a light yet efficient Multi-Label MultiLayer Perceptron-based Encoder, dubbed $(ML)^{2}P$ -Encoder, to extract and preserve channel-wise semantics. We reorganize the generated feature maps into several groups, of which each of them can be trained independently with $(ML)^{2}P$ -Encoder. On top of that, a global group-wise attention module is further designed to build the multilabel specific class relationships among different classes, which eventually fulfills a novel Channel-Class Correlation MLZSL framework $(C^{3}$ -MLZSL). Extensive experiments on large-scale MLZSL benchmarks including NUS-WIDE and Open-Images-V4 demonstrate the superiority of our model against other representative state-of-the-art models.", + "bbox": [ + 76, + 358, + 473, + 720 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 750, + 209, + 766 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The proliferation of smart devices has greatly enriched human life when it comes to the era of big data. These smart devices are usually equipped with cameras such that users can easily produce and share their images. With the increasing abundance of public images, how to analyze them accurately has become a challenging problem. Recent years", + "bbox": [ + 75, + 773, + 468, + 867 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/012ebce2bca2908a1ed88b5b724c2627dd75ef1002f2138229d4a579b04f8c43.jpg", + "image_caption": [ + "Figure 1. Example of Channel-Class Correlation. Our method achieves the prediction of unseen classes by exploiting the unique distribution of channel responses as semantic information for the class and building correlations with responses from the same channel (zoom in for a better view)." + ], + "image_footnote": [], + "bbox": [ + 532, + 329, + 852, + 542 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "have witnessed great success in classifying an image into a specific class [20, 37, 39], namely, single-label classification. However, in reality, the images [17,46] usually contain abundant information and thereby consist of multiple labels.", + "bbox": [ + 496, + 645, + 893, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, the multi-label classification has been widely investigated by exploring the relationship among different labels from multiple aspects [9, 13, 14, 16, 42]. However, in some scenarios where extensive collections of images exist, e.g., Flickr $^2$ , users can freely set one or more individual tags/labels for each image, while the presented objects and labels in these images may not be fully shown in any previous collection, and thus result in a domain gap for the recognition. Therefore, in real-world applications, the model is required to gain the ability to predict unseen classes as well. As one of the thriving research topics, zero-", + "bbox": [ + 496, + 707, + 893, + 875 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Jingcai Guo is the corresponding author. \n1Released code: github.com/simonzmliu/cvpr23_mlzsl", + "bbox": [ + 93, + 875, + 452, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://www.flickr.com", + "bbox": [ + 517, + 886, + 650, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "23859", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "shot learning (ZSL) [1, 12, 15, 34] is designed to transfer tasks from seen classes to unseen classes, and naturally recognizes novel objects of unseen classes. Specifically, ZSL has made continuous success in single-label classification [19, 26, 31, 45, 48]. However, these methods can hardly be extended to the multi-label scenario since exploring the cross-class relationships in an image is non-trivial.", + "bbox": [ + 75, + 90, + 470, + 196 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, some works have focused on multi-label zero-shot learning (MLZSL) tasks and obtained some promising results [33, 36, 49]. Other works considered incorporating attention mechanisms into their models, such as $LESA$ [22] and $BiAM$ [35]. $LESA$ [22] designed an attention-sharing mechanism for different patches in the image so that each patch can output the corresponding class. In another way, $BiAM$ [35] designed a bi-level attention to extract relations from regional context and scene context, which can enrich the regional features of the model and separate the features of different classes.", + "bbox": [ + 75, + 199, + 468, + 364 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although previous works have made considerable progress, their designed methods have been limited to the processing of spatial-domain information. First of all, the over-reliance on spatial-class correlation fails to capture fine-grained class-specific semantics. In addition, the additional processing of spatial information greatly increases the computational cost of the model and limits the inference speed. Given the shortcomings of the above methods, we found through analysis that the channel response can be used as the semantic information of the class. Firstly, the response of each class in the channel is unique, which creates conditions for obtaining the unique semantics. Secondly, for classes with certain semantic associations, there must be some channels that capture their common information. Therefore, channel information, as an easily overlooked part after feature extraction, can complete the task of capturing multi-label information. In MLZSL, we can complete the prediction of unseen classes by obtaining the responses of seen classes in the channel domain, and the relationship between seen and unseen classes. Finally, the subsequent analysis of the channel response greatly saves computational costs.", + "bbox": [ + 75, + 369, + 470, + 700 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Specifically, as shown in Figure 1, as seen classes, \"water\" and \"tree\" have unique response distributions on feature channels, and these responses can be used as semantic information for classification tasks. Besides, in order to explore the correlation of classes, we found that although the semantic information of \"water\" and \"tree\" is different, there are still some channels that respond simultaneously (i.e. the blue channel). We need to build this correlation during the training process through modeling so that the model can learn multi-label correlations. In the ZSL process, for the unseen class \"garden\", we know that it is related to \"water\" (i.e. purple layer) and \"tree\" (i.e. green, orange, and gray layer) by obtaining its semantic information and matching", + "bbox": [ + 75, + 704, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "with seen classes. This observation suggests that channels can help not only to classify objects but also to establish associations between classes. Previous methods which only consider spatial information are unable to obtain this intrinsic channel-class correlation and dissimilarity, thus achieving sub-optimal performance on the MLZSL task.", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the above challenges and construct a more accurate and robust MLZSL system, we propose to group the generated feature maps and process them in a group-wise manner, thus enhancing the model by fully exploring the channel-class correlations. Besides, by properly designing a light yet efficient Multi-Label Multi-Layer Perceptron-based Encoder, i.e., $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder, we can easily analyze the local relationship between channels while significantly reducing the computation overhead. Finally, these groups are recombined and then perform the calculation of group attention, indicating that the model is analyzed locally and globally from the perspective of the channels, which can ensure the integrity of the representation.", + "bbox": [ + 496, + 181, + 892, + 377 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions are four-fold:", + "bbox": [ + 519, + 378, + 812, + 393 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. To the best of our knowledge, our method first suggests the concept of channel-class correlation in MLZSL, and proposes a channel-sensitive attention module $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder to extract and preserve channel-wise semantics for channel groups.", + "2. Different from previous works that use spatial-class correlation to extract global and local features, we alternatively explore the channel-class correlation as the unique base for MLZSL.", + "3. In conjunction with $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder, a global group-wise attention is also designed to establish the multi-label specific class relationships among classes.", + "4. Extensive experiments on large-scale datasets NUS-WIDE and Open-Images-V4 demonstrate the effectiveness of our method against other state-of-the-art models." + ], + "bbox": [ + 511, + 402, + 890, + 672 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 686, + 640, + 702 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Multi-Label Classification", + "text_level": 1, + "bbox": [ + 500, + 710, + 735, + 726 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The establishment of graph neural networks (GNNs) brings remarkable success to multi-label classification tasks [8, 25]. Among them, Chen et al. [8] constructs directed graphs for object labels and uses graph convolutional networks (GCN) to map label nodes, which contain word embeddings, into classifiers. In addition, the CNN-based multi-label classification models enable the learning of the characteristics of each label from the spatial information of the image and design a new multi-label classifier [13, 14, 16, 17, 42, 43, 46]. Gao et al. [16] suggests a two-stream framework to identify global and local information", + "bbox": [ + 496, + 734, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "23860", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5c95f6e5e4e83008579a6320d2bbd777ec3c50f5b321ab9aef414704b93cb307.jpg", + "image_caption": [ + "Figure 2. Pipeline for $\\mathbf{C}^3$ -MLZSL. The input image is first passed through the feature extraction network (eg. VGG19), and then multi-layer feature maps are extracted through the Forward Pyramid module. After the feature maps are shuffled and grouped, each group uses $(\\mathbf{ML})^{2}\\mathbf{P}$ -Encoder to extract semantic information. Then, the semantic information generated by all groups is associated through Group Attention to generate the final semantic matrix $\\mathcal{S}$ (zoom in for a better view)." + ], + "image_footnote": [], + "bbox": [ + 93, + 90, + 877, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "separately and a multi-class regional attention module to align them. However, the above methods cannot generalize to unseen classes.", + "bbox": [ + 75, + 377, + 470, + 421 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Zero-Shot Learning", + "text_level": 1, + "bbox": [ + 76, + 433, + 267, + 450 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Zero-shot learning provides a solution to recognize unseen classes. Current studies mostly consider a relatively simple single-label scenario [4, 6, 26, 30, 32, 47, 50, 51]. In practice, existing methods usually focus on finding the main semantic information of training images, and then exploit the semantic relationship, i.e., word vectors [15, 38, 44, 45] or attribute vectors [3, 27, 28], between seen and unseen classes for prediction. The generated semantic information can be inferred from seen to unseen labels by comparing the similarity of the relation vectors between them. For example, Chen et al. [7] proposes a generative flow framework and uses a combinatorial strategy to solve the problems of semantic inconsistency, variance collapse, and structural disorder in zero-shot learning. Gune et al. [18] generates visual proxy samples to simulate the average entropy of the label distribution of the unseen class. However, the above methods only predict single labels with a single representation of images, which can hardly generalize to a more realistic multi-label scenario.", + "bbox": [ + 75, + 457, + 473, + 744 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Multi-Label Zero-Shot Learning", + "text_level": 1, + "bbox": [ + 76, + 755, + 364, + 772 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Multi-label zero-shot learning has received increasing attention recently. For example, Norouzi et al. [36] designs two separate spaces, i.e., the image and semantic embedding spaces, jointly with the convex combination of the label embedding vectors to achieve multi-label recognition in the zero-shot learning framework. Zhang et al. [49] proposes a fast and general model based on the fact that the word vectors of the relevant labels are ranked before", + "bbox": [ + 75, + 780, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the irrelevant word vectors in the main vector of the image. Different from the above methods, Lee et al. [29] uses the knowledge graph to connect different labels. In recent years, attention-based methods become the mainstream. For example, LESA [22] applies an attention-sharing mechanism to the multi-label environment, allowing the model to focus on the key areas of each label. Narayan et al. [35] uses a bi-layer attention module to combine global context information and local features and map the generated information to the semantic space. However, the above methods only stay at the two-dimensional space level $(H\\times W)$ , and do not consider the response between different feature channels with respect to classes.", + "bbox": [ + 496, + 377, + 893, + 574 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methods", + "text_level": 1, + "bbox": [ + 500, + 590, + 599, + 607 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Problem Setting", + "text_level": 1, + "bbox": [ + 500, + 618, + 661, + 635 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Before proposing our method, we first explain the definition of the MLZSL problem. Given $n$ input samples $\\{(I_1,Y_1),\\ldots ,(I_i,Y_i),\\ldots ,(I_n,Y_n)\\}$ , where $I_{i}$ represents the input image of the $i$ -th train-set, and $Y_{i}$ represents the training labels corresponding to the input images, which are also called 'seen labels'. On the label distribution, let us set the seen label in the dataset as $C_s$ , where the seen label refers to the label known by the model. $C_s$ is mainly used for the train-set of the model in zero-shot learning. We set the unseen label to $C_u$ , and the unseen label is generally used in the test-set. The label relationship in the dataset is defined as $\\mathcal{C} = \\mathcal{C}_s\\cup \\mathcal{C}_u$ , where $\\mathcal{C}$ represents the set of all labels in the dataset. Based on the above definition, after the model is trained on the train-set, in the testing part of MLZSL, given the image $I_{u}$ , the model can output the prediction result $y_{u}\\subset C_{u}$ . While in the generalized zero-shot learning task, given an image $I_{u}$ , the output of the model is", + "bbox": [ + 496, + 643, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "23861", + "bbox": [ + 478, + 944, + 517, + 957 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$y_{u} \\subset \\mathcal{C}$ , which means the model needs to output both the seen label and the unseen label that exist in the image.", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. $(\\mathbf{ML})^{2}\\mathbf{P}$ -Encoder", + "text_level": 1, + "bbox": [ + 76, + 128, + 243, + 145 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proposed network structure is shown in Figure 2. For input images $I$ , we first use a pre-trained feature extraction network to obtain the corresponding image features $\\mathcal{F}$ . We extract the features from the last three layers of the feature extraction network, and keep the two layers with the larger size consistent with the smallest size layer by downsampling. For example, assuming that the used and training network is VGG19 [37], the size of the last three layers of feature maps is $\\{28 \\times 28, 14 \\times 14, 7 \\times 7\\}$ . We use max-pooling to down-sample the large-scale feature maps to obtain equivalent $7 \\times 7$ feature maps. This step is called the \"Forward Pyramid\". After that, we obtain feature maps at different levels with the same scale. Then we randomly shuffle them to get the feature map $\\mathcal{F}_a$ and re-group them into $g$ different groups, each group has $d_w$ channels, which is the same length as the word vectors in the ground-truth semantic space. The purpose of this operation is to generate specific semantic vectors to express the semantic information contained in each group.", + "bbox": [ + 75, + 152, + 468, + 439 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Next, the features of each group are fed into $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder. First, we need to calculate the correlation between channels within each group. In traditional self-attention, the cost of computation greatly consumes the inference speed of the model, and the traditional self-attention module cannot accurately reflect the relationship between each channel. To solve the loss caused by the amount of calculation and accurately reflect the channel correlation, we designed a new self-attention structure to achieve this.", + "bbox": [ + 75, + 439, + 468, + 574 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For features $\\mathcal{F}_a$ in group $i$ , which is $\\mathcal{F}_a^i \\in \\mathbb{R}^{H \\times W \\times d_w}$ . We first generate Query (Q), Value (V) and Key (K) through three convolution operations:", + "bbox": [ + 76, + 574, + 468, + 621 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} = W _ {p} ^ {Q} \\mathcal {F} _ {a} ^ {i} \\quad \\mathbf {K} = W _ {p} ^ {K} \\mathcal {F} _ {a} ^ {i} \\quad \\mathbf {V} = W _ {p} ^ {V} \\mathcal {F} _ {a} ^ {i} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 125, + 631, + 468, + 651 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $W_{p}^{(\\cdot)}$ means the convolution operation. Next, to obtain the channel correlation matrix $\\mathcal{R}$ , we reshape $\\mathbf{Q},\\mathbf{K}$ and $\\mathbf{V}$ in the spatial domain $(H\\times W)$ to get $\\widehat{\\mathbf{Q}}\\in \\mathbb{R}^{HW\\times d_w}$ , $\\widehat{\\mathbf{K}}\\in \\mathbb{R}^{d_w\\times HW}$ and $\\widehat{\\mathbf{V}}\\in \\mathbb{R}^{HW\\times d_w}$ . Then perform a dot product operation on $\\mathbf{Q}$ and $\\mathbf{K}$ to obtain the channel correlation matrix $\\mathcal{R}\\in \\mathbb{R}^{d_w\\times d_w}$ . After that, we do the dot product between $\\mathcal{R}$ and $\\mathbf{V}$ , finally, add with the input $\\mathcal{F}_a^i$ to get the output $\\widehat{\\mathcal{F}}_a^i\\in \\mathbb{R}^{H\\times W\\times d_w}$ :", + "bbox": [ + 76, + 662, + 468, + 786 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {A t t} (\\widehat {\\mathbf {Q}}, \\widehat {\\mathbf {K}}, \\widehat {\\mathbf {V}}) = \\widehat {\\mathbf {V}} \\cdot \\underset {\\mathcal {R}} {\\operatorname {s o f t m a x}} (\\underbrace {\\widehat {\\mathbf {K}} \\cdot \\widehat {\\mathbf {Q}}} _ {\\mathcal {R}}) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 147, + 795, + 468, + 832 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\widehat {\\mathcal {F}} _ {a} ^ {i} = \\mathcal {F} _ {a} ^ {i} + \\operatorname {A t t} (\\widehat {\\mathbf {Q}}, \\widehat {\\mathbf {K}}, \\widehat {\\mathbf {V}}) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 844, + 468, + 864 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After enhancing the correlation between channels, we need to extract and analyze the feature information contained in", + "bbox": [ + 76, + 869, + 468, + 898 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "each channel. We reshape the information in the spatial domain into a one-dimensional vector, then we decide to use the Multi-Layer Perceptron (MLP) to encode the features. Compared with the traditional convolution structure, the MLP structure is convenient to perform information fusion between local regions. Specifically, for the input feature $\\widehat{\\mathcal{F}}_a^i\\in \\mathbb{R}^{H\\times W\\times d_w}$ , we first change the dimension from $H\\times W\\times d_w$ to $\\mathcal{F}_{mlp}^i\\in \\mathbb{R}^{d_w\\times HW}$ , then we use LayerNorm to normalize the input. Our MLP structure includes two different MLPs: MLP1 is used to extract the spatial information contained in each channel, and MLP2 is proposed to obtain local information of different channels in the spatial domain:", + "bbox": [ + 496, + 90, + 890, + 286 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} _ {m l p 1} ^ {i} = \\mathcal {F} _ {m l p} ^ {i} + \\mathbf {W} _ {2} \\sigma \\left(\\mathbf {W} _ {1} \\text {L a y e r N o r m} \\left(\\mathcal {F} _ {m l p} ^ {i}\\right)\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 295, + 890, + 314 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {M} = \\mathcal {F} _ {m l p 1} ^ {i} + \\mathbf {W} _ {4} \\sigma \\left(\\mathbf {W} _ {3} \\text {L a y e r N o r m} \\left(\\mathcal {F} _ {m l p 1} ^ {i}\\right)\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 537, + 321, + 890, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{F}_{mlp1}^i$ is the output after MLP1. $\\mathbf{W}_1$ , $\\mathbf{W}_2$ is the parameter of MLP1, and $\\mathbf{W}_3$ , $\\mathbf{W}_4$ is the parameter of MLP2. $\\sigma$ is an element-wise non-linearity GELU [21]. Then we use max-pooling to filter out the best semantic vector in the spatial domain, which can more accurately represent the semantic information of this group. This max-pooling operation is also to be able to directly extract the channel response. So we obtain group semantic vectors $\\mathcal{X} \\in \\mathbb{R}^{g \\times d_w}$ and send them into Group Attention.", + "bbox": [ + 496, + 345, + 890, + 481 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Group Attention", + "text_level": 1, + "bbox": [ + 500, + 489, + 663, + 505 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Although we obtained group semantic vectors $\\mathcal{X}$ through $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder, the semantic vectors generated by each group did not establish a relationship with each other at this time. As we already know, the key to improving the accuracy of multi-label image classification is to construct the correlation of labels within the image. So we use Group Attention to build the mutual information and also to find similar responses between different labels. We pass a series of linear layers to $\\mathcal{X}$ :", + "bbox": [ + 496, + 512, + 890, + 648 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} _ {\\mathbf {x}} = W _ {x} ^ {Q} \\mathcal {X} \\quad \\mathbf {K} _ {\\mathbf {x}} = W _ {x} ^ {K} \\mathcal {X} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 655, + 890, + 674 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} = \\left(\\mathbf {Q} _ {\\mathbf {x}} \\cdot \\mathbf {K} _ {\\mathbf {x}}\\right) \\cdot \\mathcal {X} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 681, + 890, + 698 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{Q}_{\\mathbf{x}} \\in \\mathbb{R}^{g \\times d_w}$ , and we transpose $\\mathbf{K}_{\\mathbf{x}}$ into $\\mathbf{K}_{\\mathbf{x}} \\in \\mathbb{R}^{d_w \\times g}$ . $W_x^Q$ and $W_x^K$ are different linear weights. $S \\in \\mathbb{R}^{g \\times d_w}$ is the semantic matrix, which contains all the semantic information of the input image. In the loss function, we will make each semantic vector in $S$ approximate the semantic information of seen classes appearing in the image. Therefore, from another perspective, the semantic vectors in $S$ are related to seen classes.", + "bbox": [ + 496, + 702, + 890, + 821 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Loss Function", + "text_level": 1, + "bbox": [ + 500, + 832, + 643, + 845 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During training, some semantic vectors are generated for each input image. The semantic matrix $S$ includes the semantic information in the image and is sent to the prediction", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "23862", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "module. The loss function consists of two parts. First of all, to make the positive class (seen class appear in each training image) get a higher ranking than the negative class (seen class which does not appear in the training image). Inspired by [49], we choose to adopt ranknet loss [5] as the main component of the loss function. We use", + "bbox": [ + 75, + 90, + 468, + 181 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {i j} = \\max \\left(\\mathcal {S} \\cdot n _ {i}\\right) - \\max \\left(\\mathcal {S} \\cdot p _ {j}\\right), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 150, + 195, + 468, + 212 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "to indicate the number of violations of any of these ranking constraints, where $n_i$ represents the semantic vector of the negative class, and $p_j$ denotes the semantic vector of the positive class. max is used to maximize this gap between negative and positive, and constrain it in subsequent steps.", + "bbox": [ + 75, + 223, + 468, + 299 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Next, to minimize the gap, we design the loss function as the following:", + "bbox": [ + 76, + 300, + 468, + 330 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {r a n k}} = \\frac {1}{(| P | | \\bar {P} |)} \\sum_ {i} \\sum_ {j} \\log \\left(1 + e ^ {\\mu_ {i j}}\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 340, + 468, + 378 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\frac{1}{(|P| |\\bar{P}|)}$ is used to normalize the ranknet loss, and $|P|$ denotes the number of positive class, $|\\bar{P}|$ represents the number of negative class. When an image contains a large number of positive labels, the image becomes difficult to classify. So we need the model to value these hard samples during training. Therefore, we add the class weight $\\omega$ to improve the performance of the model in the face of hard samples. $\\omega$ is represented as:", + "bbox": [ + 75, + 391, + 468, + 518 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\omega = 1 + \\sum_ {i} v a r (P ^ {i}), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 532, + 468, + 561 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $P^i$ represents the vector of the $i$ -th positive class, $var$ means the variance. The higher $\\omega$ means the image contains more complex labels. To prevent the direction of the semantic vectors generated by the model from being too divergent, it needs to be controlled by the loss function. Therefore, we believe that the addition of regularization terms can reduce the difference between the generated semantic vectors when the model faces complex input images. This reduction in variance helps the model learn relevant information between different classes.", + "bbox": [ + 75, + 575, + 468, + 726 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r e g} = \\left\\| \\sum_ {n} v a r \\left(\\mathcal {S} _ {n}\\right) \\right\\| _ {1}. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 736, + 468, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, the loss function of the model is defined as:", + "bbox": [ + 76, + 789, + 418, + 804 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} ((1 - \\lambda) \\cdot \\omega \\mathcal {L} _ {\\text {r a n k}} (\\mathcal {S} _ {i}, Y _ {i}) + \\lambda \\mathcal {L} _ {\\text {r e g}} (\\mathcal {S} _ {i})) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 816, + 468, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $N$ means the number of batch size, and $\\lambda$ is a hyperparameter that denotes the regularization term's weight.", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 90, + 633, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 500, + 114, + 689, + 132 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets: First, we use the NUS-WIDE dataset [10] to conduct MLZSL experiments. The NUS-WIDE dataset contains about 270,000 images, and each image contains 925 labels, which are automatically extracted from Flickr user tags. In addition, it also contains 81 labels that are manually annotated by humans, and these labels are called 'GroundTruth'. During the experiment, 925 labels were used as 'seen labels', and 81 labels were used as 'unseen labels'. This setting is similar with [22]. Another dataset is called the Open-Images-V4 dataset. This dataset contains nearly 9 million training images, 125,456 images as test images, and 41,620 images in the validation set. The train-set contains 7,186 labels, which are 'seen labels' that appear at least 100 times in the train-set. While the remaining 400 most frequent labels that do not appear in the train-set are used as test-set labels, they are also used as 'unseen labels'. Each unseen label has at least appeared 75 times.", + "bbox": [ + 496, + 138, + 890, + 396 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation Metrics: To better allow our proposed new model and other comparative models to perform an unbiased comparison on the task of MLZSL, we use the two most common evaluation metrics, the mean Average Precision (mAP) [22, 41] and F1-Score. Among them, top-K F1-Score is used to measure the accuracy of the model for label prediction, and mAP is used to reflect the accuracy for unseen label retrieval of the image.", + "bbox": [ + 496, + 398, + 890, + 518 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details: Our model can support end-to-end training. We choose VGG19 [37], pre-trained on ImageNet dataset [11], as the backbone network. Unlike other methods, our model uses multi-scale feature maps and aggregates them. The sizes of the feature maps are $28 \\times 28$ , $14 \\times 14$ , and $7 \\times 7$ , respectively.", + "bbox": [ + 496, + 520, + 890, + 611 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In terms of the optimizer, we choose to use the Adam optimizer [24], which requires less memory and is suitable for large datasets. The weight decay of the Adam optimizer is set to $4e^{-3}$ . In the NUS-WIDE dataset experiments, the initial learning rate of the model is $5e^{-5}$ , and then the learning rate decreases by $\\frac{1}{10}$ at the 7th epoch. The entire experimental process of the NUS-WIDE dataset requires a total of 20 epochs with a batch size of 48. In the experiments using the Open-Images-V4 dataset, our learning rate, batch size, and decay rate remain the same as the NUS-WIDE dataset, but the number of epochs is 7.", + "bbox": [ + 496, + 612, + 890, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines: We will compare the proposed method with several state-of-the-art deep learning-based MLZSL models. These comparative methods have been published in recent years and cover a fairly rich variety of techniques, such as the attention mechanism with the most common CNNs. These comparison methods include: CONSE [36], LabelEM [2], Fast0Tag [49], Kim et al. [23], LESA Attention per Cluster (ApC) [22], LESA [22], and BiAM [35]. All", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "23863", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "comparison methods using VGG19 [37] are not fine-tuned. In addition to comparing with comparison models, we will also test the model's performance under different settings of hyper-parameters $g$ and $\\lambda$ . At the same time, we will conduct ablation experiments to verify the integrity of the model's architecture.", + "bbox": [ + 75, + 90, + 470, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. State-of-the-art Comparison", + "text_level": 1, + "bbox": [ + 76, + 194, + 331, + 210 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "NUS-WIDE: Table 1 shows the performance of ours and competitive methods on the NUS-WIDE test-set. The table contains the results of both ZSL and GZSL. CONSE [36] and LabelEM [2], as the methods proposed earlier, do not perform well on large-scale datasets. Fast0Tag [49] achieves more competitive results by sorting the positive labels to find the principal directions of the image. LESA [22] and BiAM [35] are currently the most advanced models that rely on spatial attention mechanism to generate semantic information. Compared to BiAM, our method achieves a $3.6\\%$ improvement on mAP in the ZSL task. Besides, we lead BiAM by $0.8\\%$ and $2.9\\%$ in F1-Score of $K = 3$ and $K = 5$ , respectively. On the GZSL task, we also surpass BiAM. BiAM deals with higher-dimensional and richer spatial information, while our method is more inclined to single-dimensional channel responses. Therefore, it is not easy to achieve such results with $1.3\\%$ improvement in mAP and $0.3\\%$ and $0.7\\%$ in F1-Score of $K = 3$ and $K = 5$ , respectively. Good results on NUS-WIDE dataset imply the effectiveness of our method.", + "bbox": [ + 75, + 218, + 470, + 518 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Attention Visualization on NUS-WIDE: Figure 6 illustrate the attention regions of the model when our method predicts unseen labels. Figure 6(a) shows that our model can clearly distinguish scene information from all unseen classes. The attention areas of \"Rocks\" and \"Mountain\" in the figure are roughly the same, which indicates that the two classes have similar semantics and dependencies, and the existence of Group Attention enables the model to learn this mutual information well. Figure 6(b) is a comparison with BiAM [35], the best existing model for mining spatial domain information. This result fully shows the effective use of channel information can more accurately grasp the response between classes. While BiAM's over-exploration of spatial information improves the acquisition of regional information, it loses the scene-level response at the same time. For more comparison results, please refer to appendix.", + "bbox": [ + 75, + 521, + 468, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Open-Images-V4: From Table 2, we show the results of ours and the baseline models on Open-Images-V4. We follow the evaluation setting of [22, 35]. This dataset contains more seen and unseen labels than NUS-WIDE. With a large increase in the number of classes, all methods get poor F1-Score on the ZSL task. Among them, Fast0Tag has made great progress compared with past methods, especially in the GZSL task. LESA [22] and BiAM [35], as the two best methods, represent the highest level of extracting spatial re", + "bbox": [ + 75, + 763, + 470, + 901 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/53129e7f868080edbe375db7824014f7964f483c936020db97aa7120d8b23462.jpg", + "table_caption": [ + "Table 1. State-of-the-art comparison for multi-label ZSL and GZSL tasks on the NUS-WIDE dataset. We show the indicators of F1-Score in the case of $K \\in 3,5$ and mAP. The best results are shown in bold." + ], + "table_footnote": [], + "table_body": "
MethodTaskmAPF1 (K = 3)F1 (K = 5)
CONSE [36]ZSL9.421.620.2
GZSL2.17.08.1
LabelEM [2]ZSL7.119.219.5
GZSL2.29.511.3
Fast0Tag [49]ZSL15.127.826.4
GZSL3.711.513.5
Kim et al. [23]ZSL10.425.823.6
GZSL3.710.913.2
Attention per Cluster [22]ZSL12.924.622.9
GZSL2.66.47.7
LESA [22]ZSL19.431.628.7
GZSL5.614.416.8
BiAM [35]ZSL25.832.029.4
GZSL8.915.518.5
Our ApproachZSL29.432.832.3
GZSL10.215.819.2
", + "bbox": [ + 514, + 155, + 883, + 393 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/ab89dc01ca74cb24bd667d6cc1ec20d8ebfab87b3653f7442de766cd4e134fd5.jpg", + "table_caption": [ + "Table 2. State-of-the-art comparison for multi-label ZSL and GZSL tasks on the Open-Images-V4 dataset. We show the indicators of F1-Score in the case of $K \\in {10},{20}$ and mAP. Best results are shown in bold." + ], + "table_footnote": [], + "table_body": "
MethodTaskmAPF1 (K = 10)F1 (K = 20)
CONSE [36]ZSL40.40.40.3
GZSL43.52.62.4
LabelEM [2]ZSL40.50.50.4
GZSL45.25.25.1
Fast0Tag [49]ZSL41.20.70.6
GZSL45.216.013.0
Attention per Cluster [22]ZSL40.71.20.9
GZSL44.916.913.5
LESA [22]ZSL41.71.41.0
GZSL45.417.414.3
BiAM [35]ZSL62.84.13.7
GZSL79.617.615.1
Our ApproachZSL65.77.56.5
GZSL79.927.624.1
", + "bbox": [ + 506, + 472, + 888, + 683 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "sponses. BiAM achieves very large progress in mAP metrics on both ZSL and GZSL tasks. But our method achieves the best results in the mAP of ZSL, while leading by $3.4\\%$ and $2.8\\%$ in F1-Score with $K = 3$ and $K = 5$ , respectively. Most importantly, for the GZSL task, our F1-Score results also achieve huge advantages by $10.0\\%$ and $9.0\\%$ . This shows that the channel-class correlation as semantic information can fully cope with the complex situation of a large number of labels.", + "bbox": [ + 496, + 703, + 890, + 838 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 5 shows the mAP, inference time, and GFLOPs comparisons between our model for obtaining semantic information based on channel responses and the two methods (LESA [22] and BiAM [35]) for acquiring semantic informa", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "23864", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tion based on spatial features and achieving optimal results. In the mAP comparison, it can be seen that we have the highest accuracy for prediction in the ZSL task. At the same time, due to the small amount of data to be processed, the inference speed is the fastest of all comparison methods when we use the same GPU of NVIDIA RTX 3090. Finally, precisely because the model only needs to deal with a single-dimensional channel response, our $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder module requires much less computation than $LESA$ and $BiAM$ that deal with spatial attention. At the same time, the feature map is grouped to avoid the geometric increase of the computational complexity caused by the feature pyramid. This shows that our $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder can be more efficient.", + "bbox": [ + 75, + 90, + 472, + 287 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b9cc6f11cdb2e6723a9f25ccd2ae12b178a3ecf8a6c6305042d2560d22679364.jpg", + "table_caption": [ + "Table 3. Ablation study shows the contribution of the different components in our proposed approach. The baseline methods are performed on the NUS-WIDE test-set." + ], + "table_footnote": [], + "table_body": "
abcdours
Forward Pyramid (ML)2P-Encoder Group Attention
mAPZSL25.327.328.427.929.4
GZSL8.18.59.28.810.2
", + "bbox": [ + 84, + 349, + 464, + 448 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8458772c1181ee73b3a85ca3b2e670d4a44bffd3c2387a13e1980532fbf7d75e.jpg", + "image_caption": [ + "(a) W/O (ML) $^2$ P-Encoder" + ], + "image_footnote": [], + "bbox": [ + 124, + 484, + 254, + 561 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7fcdb68df0c0c5df9c57d97fe0df8368ca7255d03e4d7cbd853a81342d753669.jpg", + "image_caption": [ + "(b) With $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder", + "Figure 3. Evaluation of t-SNE (zoom in for a better view)." + ], + "image_footnote": [], + "bbox": [ + 277, + 465, + 455, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Hyper-parameter Selection", + "text_level": 1, + "bbox": [ + 76, + 635, + 321, + 651 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our method includes two hyper-parameters, the number of groups $g$ and the weight of the regularization term $\\lambda$ . We use the control variable method. In terms of initializing hyper-parameters, the number of output semantic vectors $g$ is set to 7, and the value of $\\lambda$ is set to 0.4. The line graph in Figure 4 shows the mAP results achieved on the ZSL and GZSL tasks with different hyper-parameters, respectively. In addition, we can also see the impact of changes in hyperparameters on the prediction accuracy of the model. It can be seen that the number of $g$ does not have a very significant effect on the mAP of the ZSL task. But the impact on GZSL is more obvious. After comparison, we believe that when $g = 7$ , two different tasks can be well balanced. For the choice of the value of $\\lambda$ , we found that its change will have a greater impact on mAP. But only when $\\lambda = 0.4$ , the performance of GZSL is far better than other results, and", + "bbox": [ + 75, + 659, + 472, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ZSL also achieves the optimal result. So the optimal hyperparameters we choose $g = 7$ and $\\lambda = 0.4$ .", + "bbox": [ + 498, + 90, + 890, + 121 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a1657c3c9aeb342e41cd7a891fefffd25bd38870895024e77f74a63090faedec.jpg", + "image_caption": [ + "(a) $g$" + ], + "image_footnote": [], + "bbox": [ + 509, + 136, + 694, + 229 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bea5c061b33965db1460c351ea8d6dc9e5548fde810a154ead8e86167d85a405.jpg", + "image_caption": [ + "(b) Weights" + ], + "image_footnote": [], + "bbox": [ + 699, + 137, + 883, + 229 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2e12f6e418094ab26027de6309daa2eb11ad2e6a6744aca0b614f345deb31613.jpg", + "image_caption": [ + "Figure 4. Hyper-Parameter selection. The higher the mAP the better. All the experiments are performed on the NUS-WIDE test-set.", + "(a) mAP" + ], + "image_footnote": [], + "bbox": [ + 504, + 329, + 620, + 398 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/be864e674cccd18cea12bbdcc3f4e639ca3700f6bc6523390a63dd95126fd3e0.jpg", + "image_caption": [ + "(b) Inference time (ms)", + "Figure 5. Comparison of our $(\\mathbf{ML})^2\\mathbf{P}$ -Encoder with BiAM and LESA in mAP, inference time, and FLOPs. The higher the mAP the better, the lower the Inference time and GFLOPs the better. All methods are performed on the NUS-WIDE test-set." + ], + "image_footnote": [], + "bbox": [ + 635, + 329, + 746, + 398 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2455e527a5bdeb3bb22c7b815463f0c08e291f6c7add9f25ddff4192a6829139.jpg", + "image_caption": [ + "(c) GFLOPs" + ], + "image_footnote": [], + "bbox": [ + 759, + 329, + 872, + 398 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 498, + 653, + 513 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation Study: To illustrate the effectiveness of each module designed in our method, we arrange three comparative experiments. The specific results are shown in Table 3. As the most primitive structure, model 'a' only contains shuffle and grouping operations. But after adding the 'Forward Pyramid', the model expands the number of features. As the number of optional feature channels increases, the amount of information brought by the channel also increases, thus achieving more competitive results. The addition of $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder enables the model to process the channel response of specific classes. The supplement of Group Attention is to give the model-specific information for solving multi-label tasks, that is, inter-class correlation. The combination of $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder and Group Attention greatly improves the prediction ability of the model in ZSL and GZSL tasks, indicating that our model construction has achieved great success.", + "bbox": [ + 496, + 521, + 890, + 777 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "t-SNE: Figure 3 shows the performance of $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder in t-SNE visualization. It can be seen that after using $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder, the boundaries of inter-class become much clearer, proving the correctness of our exploration for class-specific channel responses.", + "bbox": [ + 496, + 777, + 890, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Different Backbones: Table 4 shows the results produced by our method using different backbones. It can be seen from the results that ResNet [20] has obvious advantages", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "23865", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "over VGG [37]. As the ResNet network deepens and the number of parameters increases, the results obtained by our model become better. This is exactly in line with the result variation of an end-to-end model.", + "bbox": [ + 75, + 90, + 470, + 151 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/0ededf22f384f4ae5ec4f8b10f45a702bb8521ddc0a28618eb5eecf6a9dfdb1f.jpg", + "table_caption": [ + "Table 4. Our $\\mathbf{C}^3$ -MLZSL approach with different backbones for multi-label ZSL and GZSL tasks on the NUS-WIDE dataset. We show the indicators of F1-Score in the case of $K \\in 3, 5$ and mAP. The best results are shown in bold." + ], + "table_footnote": [], + "table_body": "
BackbonesTaskmAPF1 (K = 3)F1 (K = 5)
VGG19 [37]ZSL29.432.832.3
GZSL10.215.819.2
ResNet50 [20]ZSL30.933.633.2
GZSL10.715.919.4
ResNet101 [20]ZSL31.233.933.9
GZSL10.916.119.5
", + "bbox": [ + 81, + 229, + 460, + 351 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/35da844c0da731cac057f029763ba3bb2787c08a55a9372c0d286eebe3ef9a00.jpg", + "image_caption": [ + "Figure 6. Attention visualization. where (a) is the attention response of our $\\mathbf{C}^3$ -MLZSL when faced with unseen labels. (b) is the comparison of attention visualization results of our $\\mathbf{C}^3$ -MLZSL and BiAM [35] models. See appendix for more results." + ], + "image_footnote": [], + "bbox": [ + 81, + 369, + 465, + 571 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Multi-Label Learning", + "text_level": 1, + "bbox": [ + 76, + 665, + 282, + 681 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5 shows the results of the model for multi-label image classification. The baselines we compare include not only state-of-the-art MLZSL models, but also multi-label image classification models including Logistic Regression [40], WSABIE [43], WARP [17] and CNN-RNN [42]. As can be seen from the results, our model far surpasses many multi-label image classification models and the classic Fast0Tag [49] algorithm in mAP performance. This is because the above models only process the input image into a single semantic vector, and limited image embedding cannot build the semantic diversity for multi-label classification. For other methods such as LESA [22] and BiAM [35], they noticed that the attention regions of different objects in multi-label images are different, and thus define the label", + "bbox": [ + 75, + 688, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/2d41ff5bccca30a3291db40de3b266d2f897d4e468be17fac5193a0af7e92cc9.jpg", + "table_caption": [ + "Table 5. Performance of Multi-label image classification task on NUS-WIDE datasets. The best results are in bold." + ], + "table_footnote": [], + "table_body": "
MethodF1(K=3)(↑)F1(K=5)(↑)mAP(↑)
Logistic [40]51.146.121.6
WARP [17]54.449.43.1
WSABIE [43]53.849.23.1
Fast0Tag [49]53.848.622.4
CNN-RNN [42]55.250.828.3
Kim et al. [23]56.851.332.6
LESA ApC [22]56.650.731.7
LESA [22]58.052.031.5
BiAM [35]59.653.447.8
Ours59.853.848.0
", + "bbox": [ + 506, + 128, + 885, + 301 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "related embeddings from the perspective of the spatial domain. However, after feature extraction, our model takes into account that the channel response can be important information representing the class semantics, and this superior performance just verifies the rationality of the exploration.", + "bbox": [ + 496, + 325, + 890, + 415 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 445, + 617, + 460 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we focus on the neglect of channel-wise class information and over-reliance on spatial-wise class information in previous MLZSL models, then propose C3-MLZSL structure and the $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder component. The C3-MLZSL structure first group multi-scale features, then use the $(\\mathrm{ML})^{2}\\mathrm{P}$ -Encoder to calculate the correlation of channels within each group and perform information fusion to get the semantic vectors. These semantic vectors are then aggregated through group attention to learn mutual information between groups. Finally, the model successfully learns channel-class correlation. Extensive experiments on the large-scale NUS-WIDE and Open-Images-V4 datasets show that our model has achieved very competitive results on MLZSL compared with other state-of-the-art models.", + "bbox": [ + 496, + 476, + 890, + 688 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Acknowledgment", + "text_level": 1, + "bbox": [ + 500, + 717, + 669, + 734 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This research was supported by fundings from the Key-Area Research and Development Program of Guangdong Province (No. 2021B0101400003), Hong Kong RGC Research Impact Fund (No. R5060-19), Areas of Excellence Scheme (AoE/E-601/22-R), General Research Fund (No. 152203/20E, 152244/21E, 152169/22E, 152211/23E), Shenzhen Science and Technology Innovation Commission (JCYJ20200109142008673), the National Natural Science Foundation of China (No. 62102327), and PolyU Internal Fund (No. P0043932).", + "bbox": [ + 496, + 747, + 890, + 887 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "23866", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Zeynep Akata, Mateusz Malinowski, Mario Fritz, and Bernt Schiele. Multi-cue zero-shot learning with strong supervision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 59-68, 2016. 2", + "[2] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. IEEE transactions on pattern analysis and machine intelligence, 38(7):1425–1438, 2015. 5, 6", + "[3] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. IEEE transactions on pattern analysis and machine intelligence, 38(7):1425–1438, 2016. 3", + "[4] Maxime Bucher, Stéphane Herbin, and Frédéric Jurie. Improving semantic embedding consistency by metric learning for zero-shot classification. In European Conference on Computer Vision, pages 730-746. Springer, 2016. 3", + "[5] Chris Burges, Tal Shaked, Erin Renshaw, Ari Lazier, Matt Deeds, Nicole Hamilton, and Greg Hullender. Learning to rank using gradient descent. In Proceedings of the 22nd international conference on Machine learning, pages 89-96, 2005. 5", + "[6] Soravit Changpinyo, Wei-Lun Chao, Boqing Gong, and Fei Sha. Synthesized classifiers for zero-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5327-5336, 2016. 3", + "[7] Zhi Chen, Yadan Luo, Sen Wang, Ruihong Qiu, Jingjing Li, and Zi Huang. Mitigating generation shifts for generalized zero-shot learning. In Proceedings of the 29th ACM International Conference on Multimedia, pages 844-852, 2021. 3", + "[8] Zhao-Min Chen, Xiu-Shen Wei, Peng Wang, and Yanwen Guo. Multi-label image recognition with graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5177-5186, 2019. 2", + "[9] Xing Cheng, Hezheng Lin, Xiangyu Wu, Fan Yang, Dong Shen, Zhongyuan Wang, Nian Shi, and Honglin Liu. Mltr: Multi-label classification with transformer. arXiv preprint arXiv:2106.06195, 2021. 1", + "[10] Tat-Seng Chua, Jinhui Tang, Richang Hong, Haojie Li, Zhiping Luo, and Yantao Zheng. Nus-wide: a real-world web image database from national university ofSingapore. In Proceedings of the ACM international conference on image and video retrieval, pages 1-9, 2009. 5", + "[11] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 5", + "[12] Shay Deutsch, Soheil Kolouri, Kyungnam Kim, Yuri Owechko, and Stefano Soatto. Zero shot learning via multi-scale manifold regularization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7112-7119, 2017. 2", + "[13] Thibaut Durand, Nazanin Mehrasa, and Greg Mori. Learning a deep convnet for multi-label classification with partial" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "labels. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 647-657, 2019. 1, 2", + "[14] Lei Feng, Bo An, and Shuo He. Collaboration based multi-label learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 3550-3557, 2019. 1, 2", + "[15] Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. Advances in neural information processing systems, 26, 2013. 2, 3", + "[16] Bin-Bin Gao and Hong-Yu Zhou. Learning to discover multi-class attentional regions for multi-label image recognition. IEEE Transactions on Image Processing, 30:5920-5932, 2021. 1, 2", + "[17] Yunchao Gong, Yangqing Jia, Thomas Leung, Alexander Toshev, and Sergey Ioffe. Deep convolutional ranking for multilabel image annotation. arXiv preprint arXiv:1312.4894, 2013. 1, 2, 8", + "[18] Omkar Gune, Biplab Banerjee, Subhasis Chaudhuri, and Fabio Cuzzolin. Generalized zero-shot learning using generated proxy unseen samples and entropy separation. In Proceedings of the 28th ACM International Conference on Multimedia, pages 4262-4270, 2020. 3", + "[19] Jingcai Guo and Song Guo. A novel perspective to zero-shot learning: Towards an alignment of manifold structures via semantic feature expansion. IEEE Transactions on Multimedia, 23:524-537, 2020. 2", + "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 1, 7, 8", + "[21] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 4", + "[22] Dat Huynh and Ehsan Elhamifar. A shared multi-attention framework for multi-label zero-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8776–8786, 2020. 2, 3, 5, 6, 8", + "[23] Jin-Hwa Kim, Jaehyun Jun, and Byoung-Tak Zhang. Bilinear attention networks. arXiv preprint arXiv:1805.07932, 2018. 5, 6, 8", + "[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5", + "[25] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016. 2", + "[26] Elyor Kodirov, Tao Xiang, and Shaogang Gong. Semantic autoencoder for zero-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3174-3183, 2017. 2, 3", + "[27] Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. Learning to detect unseen object classes by between-class attribute transfer. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 951-958. IEEE, 2009. 3" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "23867", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. Attribute-based classification for zero-shot visual object categorization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(3):453-465, 2014. 3", + "[29] Chung-Wei Lee, Wei Fang, Chih-Kuan Yeh, and Yu-Chiang Frank Wang. Multi-label zero-shot learning with structured knowledge graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1576–1585, 2018. 3", + "[30] Jingjing Li, Mengmeng Jing, Lei Zhu, Zhengming Ding, Ke Lu, and Yang Yang. Learning modality-invariant latent representations for generalized zero-shot learning. In Proceedings of the 28th ACM International Conference on Multimedia, pages 1348-1356, 2020. 3", + "[31] Yanan Li, Donghui Wang, Huanhang Hu, Yuetan Lin, and Yueting Zhuang. Zero-shot recognition using dual visual-semantic mapping paths. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3279–3287, 2017. 2", + "[32] Teng Long, Xing Xu, Youyou Li, Fumin Shen, Jingkuan Song, and Heng Tao Shen. Pseudo transfer with marginalized corrupted attribute for zero-shot learning. In Proceedings of the 26th ACM international conference on Multimedia, pages 1802-1810, 2018. 3", + "[33] Thomas Mensink, Efstratios Gavves, and Cees GM Snoek. Costa: Co-occurrence statistics for zero-shot classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2441-2448, 2014. 2", + "[34] Pedro Morgado and Nuno Vasconcelos. Semantically consistent regularization for zero-shot recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6060-6069, 2017. 2", + "[35] Sanath Narayan, Akshita Gupta, Salman Khan, Fahad Shahbaz Khan, Ling Shao, and Mubarak Shah. Discriminative region-based multi-label zero-shot learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8731-8740, 2021. 2, 3, 5, 6, 8", + "[36] Mohammad Norouzi, Tomas Mikolov, Samy Bengio, Yoram Singer, Jonathon Shlens, Andrea Frome, Greg S Corrado, and Jeffrey Dean. Zero-shot learning by convex combination of semantic embeddings. In 2nd International Conference on Learning Representations, ICLR 2014, 2014. 2, 3, 5, 6", + "[37] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 1, 4, 5, 6, 8", + "[38] Richard Socher, Milind Ganjoo, Christopher D Manning, and Andrew Ng. Zero-shot learning through cross-modal transfer. In Advances in neural information processing systems, pages 935-943, 2013. 3", + "[39] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich, et al. Going deeper with convolutions. Cvpr, 2015. 1", + "[40] Grigorios Tsoumakas and Ioannis Katakis. Multi-label classification: An overview. International Journal of Data Warehousing and Mining (IJDWM), 3(3):1-13, 2007. 8" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[41] Andreas Veit, Neil Alldrin, Gal Chechik, Ivan Krasin, Abhinav Gupta, and Serge Belongie. Learning from noisy large-scale datasets with minimal supervision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 839-847, 2017. 5", + "[42] Jiang Wang, Yi Yang, Junhua Mao, Zhiheng Huang, Chang Huang, and Wei Xu. Cnn-rnn: A unified framework for multi-label image classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2285-2294, 2016. 1, 2, 8", + "[43] Jason Weston, Samy Bengio, and Nicolas Usunier. Wsabie: Scaling up to large vocabulary image annotation. In Twenty-Second International Joint Conference on Artificial Intelligence, 2011. 2, 8", + "[44] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 69-77, 2016. 3", + "[45] Yongqin Xian, Bernt Schiele, and Zeynep Akata. Zero-shot learning-the good, the bad and the ugly. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4582-4591, 2017. 2, 3", + "[46] Hsiang-Fu Yu, Prateek Jain, Purushottam Kar, and Inderjit Dhillon. Large-scale multi-label learning with missing labels. In International conference on machine learning, pages 593-601. PMLR, 2014. 1, 2", + "[47] Chenrui Zhang, Xiaoqing Lyu, and Zhi Tang. Tgg: Transferable graph generation for zero-shot and few-shot learning. In Proceedings of the 27th ACM International Conference on Multimedia, pages 1641-1649, 2019. 3", + "[48] Li Zhang, Tao Xiang, and Shaogang Gong. Learning a deep embedding model for zero-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2021-2030, 2017. 2", + "[49] Yang Zhang, Boqing Gong, and Mubarak Shah. Fast zero-shot image tagging. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5985-5994. IEEE, 2016. 2, 3, 5, 6, 8", + "[50] Ziming Zhang and Venkatesh Saligrama. Zero-shot learning via joint latent similarity embedding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6034-6042, 2016. 3", + "[51] Yizhe Zhu, Mohamed Elhoseiny, Bingchen Liu, Xi Peng, and Ahmed Elgammal. A generative adversarial approach for zero-shot learning from noisy texts. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 3" + ], + "bbox": [ + 501, + 92, + 892, + 768 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "23868", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_model.json b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_model.json new file mode 100644 index 0000000000000000000000000000000000000000..245d5e61a230c59d15e90a5ed3c0c79e6684d6be --- /dev/null +++ b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_model.json @@ -0,0 +1,2222 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.13, + 0.882, + 0.177 + ], + "angle": 0, + "content": "\\((\\mathbf{ML})^{2}\\mathbf{P}\\)-Encoder: On Exploration of Channel-class Correlation for Multi-label Zero-shot Learning" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.203, + 0.877, + 0.258 + ], + "angle": 0, + "content": "Ziming Liu1, Song Guo1,2, Xiaocheng Lu1, Jingcai Guo1,2*, Jiewei Zhang1, Yue Zeng1, Fushuo Huo1 \n1Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China \n2The Hong Kong Polytechnic University Shenzhen Research Institute, Shenzhen, China" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.26, + 0.761, + 0.294 + ], + "angle": 0, + "content": "{ziming.liu, jiewei.zhang, fushuo.huo}@connect.polyu.hk {song.quo, xiaoclu, jc-jingcai.quo, zengyue.zeng}@polyu.edu.hk" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.327, + 0.314, + 0.344 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.359, + 0.474, + 0.722 + ], + "angle": 0, + "content": "Recent studies usually approach multi-label zero-shot learning (MLZSL) with visual-semantic mapping on spatial-class correlation, which can be computationally costly, and worse still, fails to capture fine-grained class-specific semantics. We observe that different channels may usually have different sensitivities on classes, which can correspond to specific semantics. Such an intrinsic channel-class correlation suggests a potential alternative for the more accurate and class-harmonious feature representations. In this paper, our interest is to fully explore the power of channel-class correlation as the unique base for MLZSL. Specifically, we propose a light yet efficient Multi-Label MultiLayer Perceptron-based Encoder, dubbed \\((ML)^{2}P\\)-Encoder, to extract and preserve channel-wise semantics. We reorganize the generated feature maps into several groups, of which each of them can be trained independently with \\((ML)^{2}P\\)-Encoder. On top of that, a global group-wise attention module is further designed to build the multilabel specific class relationships among different classes, which eventually fulfills a novel Channel-Class Correlation MLZSL framework \\((C^{3}\\)-MLZSL). Extensive experiments on large-scale MLZSL benchmarks including NUS-WIDE and Open-Images-V4 demonstrate the superiority of our model against other representative state-of-the-art models." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.75, + 0.21, + 0.767 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.775, + 0.47, + 0.868 + ], + "angle": 0, + "content": "The proliferation of smart devices has greatly enriched human life when it comes to the era of big data. These smart devices are usually equipped with cameras such that users can easily produce and share their images. With the increasing abundance of public images, how to analyze them accurately has become a challenging problem. Recent years" + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.33, + 0.853, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.559, + 0.895, + 0.63 + ], + "angle": 0, + "content": "Figure 1. Example of Channel-Class Correlation. Our method achieves the prediction of unseen classes by exploiting the unique distribution of channel responses as semantic information for the class and building correlations with responses from the same channel (zoom in for a better view)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.646, + 0.894, + 0.707 + ], + "angle": 0, + "content": "have witnessed great success in classifying an image into a specific class [20, 37, 39], namely, single-label classification. However, in reality, the images [17,46] usually contain abundant information and thereby consist of multiple labels." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.708, + 0.895, + 0.875 + ], + "angle": 0, + "content": "In recent years, the multi-label classification has been widely investigated by exploring the relationship among different labels from multiple aspects [9, 13, 14, 16, 42]. However, in some scenarios where extensive collections of images exist, e.g., Flickr\\(^2\\), users can freely set one or more individual tags/labels for each image, while the presented objects and labels in these images may not be fully shown in any previous collection, and thus result in a domain gap for the recognition. Therefore, in real-world applications, the model is required to gain the ability to predict unseen classes as well. As one of the thriving research topics, zero-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.094, + 0.875, + 0.454, + 0.901 + ], + "angle": 0, + "content": "*Jingcai Guo is the corresponding author. \n1Released code: github.com/simonzmliu/cvpr23_mlzsl" + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.887, + 0.651, + 0.901 + ], + "angle": 0, + "content": "\\(^{2}\\)https://www.flickr.com" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "23859" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.198 + ], + "angle": 0, + "content": "shot learning (ZSL) [1, 12, 15, 34] is designed to transfer tasks from seen classes to unseen classes, and naturally recognizes novel objects of unseen classes. Specifically, ZSL has made continuous success in single-label classification [19, 26, 31, 45, 48]. However, these methods can hardly be extended to the multi-label scenario since exploring the cross-class relationships in an image is non-trivial." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.2, + 0.47, + 0.365 + ], + "angle": 0, + "content": "Recently, some works have focused on multi-label zero-shot learning (MLZSL) tasks and obtained some promising results [33, 36, 49]. Other works considered incorporating attention mechanisms into their models, such as \\(LESA\\) [22] and \\(BiAM\\) [35]. \\(LESA\\) [22] designed an attention-sharing mechanism for different patches in the image so that each patch can output the corresponding class. In another way, \\(BiAM\\) [35] designed a bi-level attention to extract relations from regional context and scene context, which can enrich the regional features of the model and separate the features of different classes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.37, + 0.471, + 0.701 + ], + "angle": 0, + "content": "Although previous works have made considerable progress, their designed methods have been limited to the processing of spatial-domain information. First of all, the over-reliance on spatial-class correlation fails to capture fine-grained class-specific semantics. In addition, the additional processing of spatial information greatly increases the computational cost of the model and limits the inference speed. Given the shortcomings of the above methods, we found through analysis that the channel response can be used as the semantic information of the class. Firstly, the response of each class in the channel is unique, which creates conditions for obtaining the unique semantics. Secondly, for classes with certain semantic associations, there must be some channels that capture their common information. Therefore, channel information, as an easily overlooked part after feature extraction, can complete the task of capturing multi-label information. In MLZSL, we can complete the prediction of unseen classes by obtaining the responses of seen classes in the channel domain, and the relationship between seen and unseen classes. Finally, the subsequent analysis of the channel response greatly saves computational costs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Specifically, as shown in Figure 1, as seen classes, \"water\" and \"tree\" have unique response distributions on feature channels, and these responses can be used as semantic information for classification tasks. Besides, in order to explore the correlation of classes, we found that although the semantic information of \"water\" and \"tree\" is different, there are still some channels that respond simultaneously (i.e. the blue channel). We need to build this correlation during the training process through modeling so that the model can learn multi-label correlations. In the ZSL process, for the unseen class \"garden\", we know that it is related to \"water\" (i.e. purple layer) and \"tree\" (i.e. green, orange, and gray layer) by obtaining its semantic information and matching" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.182 + ], + "angle": 0, + "content": "with seen classes. This observation suggests that channels can help not only to classify objects but also to establish associations between classes. Previous methods which only consider spatial information are unable to obtain this intrinsic channel-class correlation and dissimilarity, thus achieving sub-optimal performance on the MLZSL task." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.183, + 0.893, + 0.378 + ], + "angle": 0, + "content": "To address the above challenges and construct a more accurate and robust MLZSL system, we propose to group the generated feature maps and process them in a group-wise manner, thus enhancing the model by fully exploring the channel-class correlations. Besides, by properly designing a light yet efficient Multi-Label Multi-Layer Perceptron-based Encoder, i.e., \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder, we can easily analyze the local relationship between channels while significantly reducing the computation overhead. Finally, these groups are recombined and then perform the calculation of group attention, indicating that the model is analyzed locally and globally from the perspective of the channels, which can ensure the integrity of the representation." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.379, + 0.813, + 0.394 + ], + "angle": 0, + "content": "In summary, our contributions are four-fold:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.403, + 0.892, + 0.479 + ], + "angle": 0, + "content": "1. To the best of our knowledge, our method first suggests the concept of channel-class correlation in MLZSL, and proposes a channel-sensitive attention module \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder to extract and preserve channel-wise semantics for channel groups." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.488, + 0.892, + 0.548 + ], + "angle": 0, + "content": "2. Different from previous works that use spatial-class correlation to extract global and local features, we alternatively explore the channel-class correlation as the unique base for MLZSL." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.558, + 0.892, + 0.604 + ], + "angle": 0, + "content": "3. In conjunction with \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder, a global group-wise attention is also designed to establish the multi-label specific class relationships among classes." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.613, + 0.892, + 0.673 + ], + "angle": 0, + "content": "4. Extensive experiments on large-scale datasets NUS-WIDE and Open-Images-V4 demonstrate the effectiveness of our method against other state-of-the-art models." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.403, + 0.892, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.687, + 0.642, + 0.703 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.712, + 0.736, + 0.727 + ], + "angle": 0, + "content": "2.1. Multi-Label Classification" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The establishment of graph neural networks (GNNs) brings remarkable success to multi-label classification tasks [8, 25]. Among them, Chen et al. [8] constructs directed graphs for object labels and uses graph convolutional networks (GCN) to map label nodes, which contain word embeddings, into classifiers. In addition, the CNN-based multi-label classification models enable the learning of the characteristics of each label from the spatial information of the image and design a new multi-label classifier [13, 14, 16, 17, 42, 43, 46]. Gao et al. [16] suggests a two-stream framework to identify global and local information" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "23860" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.092, + 0.878, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.301, + 0.896, + 0.359 + ], + "angle": 0, + "content": "Figure 2. Pipeline for \\(\\mathbf{C}^3\\)-MLZSL. The input image is first passed through the feature extraction network (eg. VGG19), and then multi-layer feature maps are extracted through the Forward Pyramid module. After the feature maps are shuffled and grouped, each group uses \\((\\mathbf{ML})^{2}\\mathbf{P}\\)-Encoder to extract semantic information. Then, the semantic information generated by all groups is associated through Group Attention to generate the final semantic matrix \\(\\mathcal{S}\\) (zoom in for a better view)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.378, + 0.471, + 0.422 + ], + "angle": 0, + "content": "separately and a multi-class regional attention module to align them. However, the above methods cannot generalize to unseen classes." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.434, + 0.269, + 0.451 + ], + "angle": 0, + "content": "2.2. Zero-Shot Learning" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.458, + 0.474, + 0.745 + ], + "angle": 0, + "content": "Zero-shot learning provides a solution to recognize unseen classes. Current studies mostly consider a relatively simple single-label scenario [4, 6, 26, 30, 32, 47, 50, 51]. In practice, existing methods usually focus on finding the main semantic information of training images, and then exploit the semantic relationship, i.e., word vectors [15, 38, 44, 45] or attribute vectors [3, 27, 28], between seen and unseen classes for prediction. The generated semantic information can be inferred from seen to unseen labels by comparing the similarity of the relation vectors between them. For example, Chen et al. [7] proposes a generative flow framework and uses a combinatorial strategy to solve the problems of semantic inconsistency, variance collapse, and structural disorder in zero-shot learning. Gune et al. [18] generates visual proxy samples to simulate the average entropy of the label distribution of the unseen class. However, the above methods only predict single labels with a single representation of images, which can hardly generalize to a more realistic multi-label scenario." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.756, + 0.365, + 0.773 + ], + "angle": 0, + "content": "2.3. Multi-Label Zero-Shot Learning" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Multi-label zero-shot learning has received increasing attention recently. For example, Norouzi et al. [36] designs two separate spaces, i.e., the image and semantic embedding spaces, jointly with the convex combination of the label embedding vectors to achieve multi-label recognition in the zero-shot learning framework. Zhang et al. [49] proposes a fast and general model based on the fact that the word vectors of the relevant labels are ranked before" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.378, + 0.895, + 0.575 + ], + "angle": 0, + "content": "the irrelevant word vectors in the main vector of the image. Different from the above methods, Lee et al. [29] uses the knowledge graph to connect different labels. In recent years, attention-based methods become the mainstream. For example, LESA [22] applies an attention-sharing mechanism to the multi-label environment, allowing the model to focus on the key areas of each label. Narayan et al. [35] uses a bi-layer attention module to combine global context information and local features and map the generated information to the semantic space. However, the above methods only stay at the two-dimensional space level \\((H\\times W)\\), and do not consider the response between different feature channels with respect to classes." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.592, + 0.6, + 0.608 + ], + "angle": 0, + "content": "3. Methods" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.619, + 0.662, + 0.636 + ], + "angle": 0, + "content": "3.1. Problem Setting" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Before proposing our method, we first explain the definition of the MLZSL problem. Given \\( n \\) input samples \\( \\{(I_1,Y_1),\\ldots ,(I_i,Y_i),\\ldots ,(I_n,Y_n)\\} \\), where \\( I_{i} \\) represents the input image of the \\( i \\)-th train-set, and \\( Y_{i} \\) represents the training labels corresponding to the input images, which are also called 'seen labels'. On the label distribution, let us set the seen label in the dataset as \\( C_s \\), where the seen label refers to the label known by the model. \\( C_s \\) is mainly used for the train-set of the model in zero-shot learning. We set the unseen label to \\( C_u \\), and the unseen label is generally used in the test-set. The label relationship in the dataset is defined as \\( \\mathcal{C} = \\mathcal{C}_s\\cup \\mathcal{C}_u \\), where \\( \\mathcal{C} \\) represents the set of all labels in the dataset. Based on the above definition, after the model is trained on the train-set, in the testing part of MLZSL, given the image \\( I_{u} \\), the model can output the prediction result \\( y_{u}\\subset C_{u} \\). While in the generalized zero-shot learning task, given an image \\( I_{u} \\), the output of the model is" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.958 + ], + "angle": 0, + "content": "23861" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "\\(y_{u} \\subset \\mathcal{C}\\), which means the model needs to output both the seen label and the unseen label that exist in the image." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.13, + 0.245, + 0.146 + ], + "angle": 0, + "content": "3.2. \\((\\mathbf{ML})^{2}\\mathbf{P}\\) -Encoder" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.154, + 0.47, + 0.44 + ], + "angle": 0, + "content": "The proposed network structure is shown in Figure 2. For input images \\(I\\), we first use a pre-trained feature extraction network to obtain the corresponding image features \\(\\mathcal{F}\\). We extract the features from the last three layers of the feature extraction network, and keep the two layers with the larger size consistent with the smallest size layer by downsampling. For example, assuming that the used and training network is VGG19 [37], the size of the last three layers of feature maps is \\(\\{28 \\times 28, 14 \\times 14, 7 \\times 7\\}\\). We use max-pooling to down-sample the large-scale feature maps to obtain equivalent \\(7 \\times 7\\) feature maps. This step is called the \"Forward Pyramid\". After that, we obtain feature maps at different levels with the same scale. Then we randomly shuffle them to get the feature map \\(\\mathcal{F}_a\\) and re-group them into \\(g\\) different groups, each group has \\(d_w\\) channels, which is the same length as the word vectors in the ground-truth semantic space. The purpose of this operation is to generate specific semantic vectors to express the semantic information contained in each group." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.44, + 0.469, + 0.575 + ], + "angle": 0, + "content": "Next, the features of each group are fed into \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder. First, we need to calculate the correlation between channels within each group. In traditional self-attention, the cost of computation greatly consumes the inference speed of the model, and the traditional self-attention module cannot accurately reflect the relationship between each channel. To solve the loss caused by the amount of calculation and accurately reflect the channel correlation, we designed a new self-attention structure to achieve this." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.575, + 0.469, + 0.622 + ], + "angle": 0, + "content": "For features \\(\\mathcal{F}_a\\) in group \\(i\\), which is \\(\\mathcal{F}_a^i \\in \\mathbb{R}^{H \\times W \\times d_w}\\). We first generate Query (Q), Value (V) and Key (K) through three convolution operations:" + }, + { + "type": "equation", + "bbox": [ + 0.127, + 0.632, + 0.469, + 0.652 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} = W _ {p} ^ {Q} \\mathcal {F} _ {a} ^ {i} \\quad \\mathbf {K} = W _ {p} ^ {K} \\mathcal {F} _ {a} ^ {i} \\quad \\mathbf {V} = W _ {p} ^ {V} \\mathcal {F} _ {a} ^ {i} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.663, + 0.47, + 0.787 + ], + "angle": 0, + "content": "where \\(W_{p}^{(\\cdot)}\\) means the convolution operation. Next, to obtain the channel correlation matrix \\(\\mathcal{R}\\), we reshape \\(\\mathbf{Q},\\mathbf{K}\\) and \\(\\mathbf{V}\\) in the spatial domain \\((H\\times W)\\) to get \\(\\widehat{\\mathbf{Q}}\\in \\mathbb{R}^{HW\\times d_w}\\), \\(\\widehat{\\mathbf{K}}\\in \\mathbb{R}^{d_w\\times HW}\\) and \\(\\widehat{\\mathbf{V}}\\in \\mathbb{R}^{HW\\times d_w}\\). Then perform a dot product operation on \\(\\mathbf{Q}\\) and \\(\\mathbf{K}\\) to obtain the channel correlation matrix \\(\\mathcal{R}\\in \\mathbb{R}^{d_w\\times d_w}\\). After that, we do the dot product between \\(\\mathcal{R}\\) and \\(\\mathbf{V}\\), finally, add with the input \\(\\mathcal{F}_a^i\\) to get the output \\(\\widehat{\\mathcal{F}}_a^i\\in \\mathbb{R}^{H\\times W\\times d_w}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.148, + 0.796, + 0.469, + 0.833 + ], + "angle": 0, + "content": "\\[\n\\operatorname {A t t} (\\widehat {\\mathbf {Q}}, \\widehat {\\mathbf {K}}, \\widehat {\\mathbf {V}}) = \\widehat {\\mathbf {V}} \\cdot \\underset {\\mathcal {R}} {\\operatorname {s o f t m a x}} (\\underbrace {\\widehat {\\mathbf {K}} \\cdot \\widehat {\\mathbf {Q}}} _ {\\mathcal {R}}) \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.845, + 0.469, + 0.866 + ], + "angle": 0, + "content": "\\[\n\\widehat {\\mathcal {F}} _ {a} ^ {i} = \\mathcal {F} _ {a} ^ {i} + \\operatorname {A t t} (\\widehat {\\mathbf {Q}}, \\widehat {\\mathbf {K}}, \\widehat {\\mathbf {V}}) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.9 + ], + "angle": 0, + "content": "After enhancing the correlation between channels, we need to extract and analyze the feature information contained in" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.287 + ], + "angle": 0, + "content": "each channel. We reshape the information in the spatial domain into a one-dimensional vector, then we decide to use the Multi-Layer Perceptron (MLP) to encode the features. Compared with the traditional convolution structure, the MLP structure is convenient to perform information fusion between local regions. Specifically, for the input feature \\(\\widehat{\\mathcal{F}}_a^i\\in \\mathbb{R}^{H\\times W\\times d_w}\\), we first change the dimension from \\(H\\times W\\times d_w\\) to \\(\\mathcal{F}_{mlp}^i\\in \\mathbb{R}^{d_w\\times HW}\\), then we use LayerNorm to normalize the input. Our MLP structure includes two different MLPs: MLP1 is used to extract the spatial information contained in each channel, and MLP2 is proposed to obtain local information of different channels in the spatial domain:" + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.296, + 0.892, + 0.315 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} _ {m l p 1} ^ {i} = \\mathcal {F} _ {m l p} ^ {i} + \\mathbf {W} _ {2} \\sigma \\left(\\mathbf {W} _ {1} \\text {L a y e r N o r m} \\left(\\mathcal {F} _ {m l p} ^ {i}\\right)\\right) \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.539, + 0.323, + 0.892, + 0.341 + ], + "angle": 0, + "content": "\\[\n\\mathcal {M} = \\mathcal {F} _ {m l p 1} ^ {i} + \\mathbf {W} _ {4} \\sigma \\left(\\mathbf {W} _ {3} \\text {L a y e r N o r m} \\left(\\mathcal {F} _ {m l p 1} ^ {i}\\right)\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.346, + 0.892, + 0.482 + ], + "angle": 0, + "content": "where \\(\\mathcal{F}_{mlp1}^i\\) is the output after MLP1. \\(\\mathbf{W}_1\\), \\(\\mathbf{W}_2\\) is the parameter of MLP1, and \\(\\mathbf{W}_3\\), \\(\\mathbf{W}_4\\) is the parameter of MLP2. \\(\\sigma\\) is an element-wise non-linearity GELU [21]. Then we use max-pooling to filter out the best semantic vector in the spatial domain, which can more accurately represent the semantic information of this group. This max-pooling operation is also to be able to directly extract the channel response. So we obtain group semantic vectors \\(\\mathcal{X} \\in \\mathbb{R}^{g \\times d_w}\\) and send them into Group Attention." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.49, + 0.665, + 0.506 + ], + "angle": 0, + "content": "3.3. Group Attention" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.513, + 0.892, + 0.649 + ], + "angle": 0, + "content": "Although we obtained group semantic vectors \\(\\mathcal{X}\\) through \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder, the semantic vectors generated by each group did not establish a relationship with each other at this time. As we already know, the key to improving the accuracy of multi-label image classification is to construct the correlation of labels within the image. So we use Group Attention to build the mutual information and also to find similar responses between different labels. We pass a series of linear layers to \\(\\mathcal{X}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.656, + 0.891, + 0.675 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} _ {\\mathbf {x}} = W _ {x} ^ {Q} \\mathcal {X} \\quad \\mathbf {K} _ {\\mathbf {x}} = W _ {x} ^ {K} \\mathcal {X} \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.629, + 0.682, + 0.891, + 0.699 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} = \\left(\\mathbf {Q} _ {\\mathbf {x}} \\cdot \\mathbf {K} _ {\\mathbf {x}}\\right) \\cdot \\mathcal {X} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.703, + 0.892, + 0.823 + ], + "angle": 0, + "content": "where \\(\\mathbf{Q}_{\\mathbf{x}} \\in \\mathbb{R}^{g \\times d_w}\\), and we transpose \\(\\mathbf{K}_{\\mathbf{x}}\\) into \\(\\mathbf{K}_{\\mathbf{x}} \\in \\mathbb{R}^{d_w \\times g}\\). \\(W_x^Q\\) and \\(W_x^K\\) are different linear weights. \\(S \\in \\mathbb{R}^{g \\times d_w}\\) is the semantic matrix, which contains all the semantic information of the input image. In the loss function, we will make each semantic vector in \\(S\\) approximate the semantic information of seen classes appearing in the image. Therefore, from another perspective, the semantic vectors in \\(S\\) are related to seen classes." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.833, + 0.645, + 0.847 + ], + "angle": 0, + "content": "3.4. Loss Function" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "During training, some semantic vectors are generated for each input image. The semantic matrix \\( S \\) includes the semantic information in the image and is sent to the prediction" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23862" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.182 + ], + "angle": 0, + "content": "module. The loss function consists of two parts. First of all, to make the positive class (seen class appear in each training image) get a higher ranking than the negative class (seen class which does not appear in the training image). Inspired by [49], we choose to adopt ranknet loss [5] as the main component of the loss function. We use" + }, + { + "type": "equation", + "bbox": [ + 0.151, + 0.196, + 0.47, + 0.213 + ], + "angle": 0, + "content": "\\[\n\\mu_ {i j} = \\max \\left(\\mathcal {S} \\cdot n _ {i}\\right) - \\max \\left(\\mathcal {S} \\cdot p _ {j}\\right), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.224, + 0.469, + 0.3 + ], + "angle": 0, + "content": "to indicate the number of violations of any of these ranking constraints, where \\( n_i \\) represents the semantic vector of the negative class, and \\( p_j \\) denotes the semantic vector of the positive class. max is used to maximize this gap between negative and positive, and constrain it in subsequent steps." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.301, + 0.469, + 0.331 + ], + "angle": 0, + "content": "Next, to minimize the gap, we design the loss function as the following:" + }, + { + "type": "equation", + "bbox": [ + 0.129, + 0.342, + 0.469, + 0.379 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {r a n k}} = \\frac {1}{(| P | | \\bar {P} |)} \\sum_ {i} \\sum_ {j} \\log \\left(1 + e ^ {\\mu_ {i j}}\\right), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.392, + 0.47, + 0.52 + ], + "angle": 0, + "content": "where \\(\\frac{1}{(|P| |\\bar{P}|)}\\) is used to normalize the ranknet loss, and \\(|P|\\) denotes the number of positive class, \\(|\\bar{P}|\\) represents the number of negative class. When an image contains a large number of positive labels, the image becomes difficult to classify. So we need the model to value these hard samples during training. Therefore, we add the class weight \\(\\omega\\) to improve the performance of the model in the face of hard samples. \\(\\omega\\) is represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.533, + 0.469, + 0.563 + ], + "angle": 0, + "content": "\\[\n\\omega = 1 + \\sum_ {i} v a r (P ^ {i}), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.577, + 0.469, + 0.727 + ], + "angle": 0, + "content": "where \\( P^i \\) represents the vector of the \\( i \\)-th positive class, \\( var \\) means the variance. The higher \\( \\omega \\) means the image contains more complex labels. To prevent the direction of the semantic vectors generated by the model from being too divergent, it needs to be controlled by the loss function. Therefore, we believe that the addition of regularization terms can reduce the difference between the generated semantic vectors when the model faces complex input images. This reduction in variance helps the model learn relevant information between different classes." + }, + { + "type": "equation", + "bbox": [ + 0.186, + 0.737, + 0.469, + 0.779 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r e g} = \\left\\| \\sum_ {n} v a r \\left(\\mathcal {S} _ {n}\\right) \\right\\| _ {1}. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.79, + 0.419, + 0.805 + ], + "angle": 0, + "content": "Finally, the loss function of the model is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.817, + 0.469, + 0.87 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} ((1 - \\lambda) \\cdot \\omega \\mathcal {L} _ {\\text {r a n k}} (\\mathcal {S} _ {i}, Y _ {i}) + \\lambda \\mathcal {L} _ {\\text {r e g}} (\\mathcal {S} _ {i})) \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "where \\(N\\) means the number of batch size, and \\(\\lambda\\) is a hyperparameter that denotes the regularization term's weight." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.634, + 0.108 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.116, + 0.691, + 0.133 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.14, + 0.892, + 0.397 + ], + "angle": 0, + "content": "Datasets: First, we use the NUS-WIDE dataset [10] to conduct MLZSL experiments. The NUS-WIDE dataset contains about 270,000 images, and each image contains 925 labels, which are automatically extracted from Flickr user tags. In addition, it also contains 81 labels that are manually annotated by humans, and these labels are called 'GroundTruth'. During the experiment, 925 labels were used as 'seen labels', and 81 labels were used as 'unseen labels'. This setting is similar with [22]. Another dataset is called the Open-Images-V4 dataset. This dataset contains nearly 9 million training images, 125,456 images as test images, and 41,620 images in the validation set. The train-set contains 7,186 labels, which are 'seen labels' that appear at least 100 times in the train-set. While the remaining 400 most frequent labels that do not appear in the train-set are used as test-set labels, they are also used as 'unseen labels'. Each unseen label has at least appeared 75 times." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.399, + 0.892, + 0.519 + ], + "angle": 0, + "content": "Evaluation Metrics: To better allow our proposed new model and other comparative models to perform an unbiased comparison on the task of MLZSL, we use the two most common evaluation metrics, the mean Average Precision (mAP) [22, 41] and F1-Score. Among them, top-K F1-Score is used to measure the accuracy of the model for label prediction, and mAP is used to reflect the accuracy for unseen label retrieval of the image." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.521, + 0.892, + 0.612 + ], + "angle": 0, + "content": "Implementation Details: Our model can support end-to-end training. We choose VGG19 [37], pre-trained on ImageNet dataset [11], as the backbone network. Unlike other methods, our model uses multi-scale feature maps and aggregates them. The sizes of the feature maps are \\(28 \\times 28\\), \\(14 \\times 14\\), and \\(7 \\times 7\\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.613, + 0.892, + 0.779 + ], + "angle": 0, + "content": "In terms of the optimizer, we choose to use the Adam optimizer [24], which requires less memory and is suitable for large datasets. The weight decay of the Adam optimizer is set to \\( 4e^{-3} \\). In the NUS-WIDE dataset experiments, the initial learning rate of the model is \\( 5e^{-5} \\), and then the learning rate decreases by \\( \\frac{1}{10} \\) at the 7th epoch. The entire experimental process of the NUS-WIDE dataset requires a total of 20 epochs with a batch size of 48. In the experiments using the Open-Images-V4 dataset, our learning rate, batch size, and decay rate remain the same as the NUS-WIDE dataset, but the number of epochs is 7." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Baselines: We will compare the proposed method with several state-of-the-art deep learning-based MLZSL models. These comparative methods have been published in recent years and cover a fairly rich variety of techniques, such as the attention mechanism with the most common CNNs. These comparison methods include: CONSE [36], LabelEM [2], Fast0Tag [49], Kim et al. [23], LESA Attention per Cluster (ApC) [22], LESA [22], and BiAM [35]. All" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23863" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.471, + 0.182 + ], + "angle": 0, + "content": "comparison methods using VGG19 [37] are not fine-tuned. In addition to comparing with comparison models, we will also test the model's performance under different settings of hyper-parameters \\( g \\) and \\( \\lambda \\). At the same time, we will conduct ablation experiments to verify the integrity of the model's architecture." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.195, + 0.333, + 0.212 + ], + "angle": 0, + "content": "4.2. State-of-the-art Comparison" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.219, + 0.471, + 0.52 + ], + "angle": 0, + "content": "NUS-WIDE: Table 1 shows the performance of ours and competitive methods on the NUS-WIDE test-set. The table contains the results of both ZSL and GZSL. CONSE [36] and LabelEM [2], as the methods proposed earlier, do not perform well on large-scale datasets. Fast0Tag [49] achieves more competitive results by sorting the positive labels to find the principal directions of the image. LESA [22] and BiAM [35] are currently the most advanced models that rely on spatial attention mechanism to generate semantic information. Compared to BiAM, our method achieves a \\(3.6\\%\\) improvement on mAP in the ZSL task. Besides, we lead BiAM by \\(0.8\\%\\) and \\(2.9\\%\\) in F1-Score of \\(K = 3\\) and \\(K = 5\\), respectively. On the GZSL task, we also surpass BiAM. BiAM deals with higher-dimensional and richer spatial information, while our method is more inclined to single-dimensional channel responses. Therefore, it is not easy to achieve such results with \\(1.3\\%\\) improvement in mAP and \\(0.3\\%\\) and \\(0.7\\%\\) in F1-Score of \\(K = 3\\) and \\(K = 5\\), respectively. Good results on NUS-WIDE dataset imply the effectiveness of our method." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.522, + 0.47, + 0.763 + ], + "angle": 0, + "content": "Attention Visualization on NUS-WIDE: Figure 6 illustrate the attention regions of the model when our method predicts unseen labels. Figure 6(a) shows that our model can clearly distinguish scene information from all unseen classes. The attention areas of \"Rocks\" and \"Mountain\" in the figure are roughly the same, which indicates that the two classes have similar semantics and dependencies, and the existence of Group Attention enables the model to learn this mutual information well. Figure 6(b) is a comparison with BiAM [35], the best existing model for mining spatial domain information. This result fully shows the effective use of channel information can more accurately grasp the response between classes. While BiAM's over-exploration of spatial information improves the acquisition of regional information, it loses the scene-level response at the same time. For more comparison results, please refer to appendix." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Open-Images-V4: From Table 2, we show the results of ours and the baseline models on Open-Images-V4. We follow the evaluation setting of [22, 35]. This dataset contains more seen and unseen labels than NUS-WIDE. With a large increase in the number of classes, all methods get poor F1-Score on the ZSL task. Among them, Fast0Tag has made great progress compared with past methods, especially in the GZSL task. LESA [22] and BiAM [35], as the two best methods, represent the highest level of extracting spatial re" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.089, + 0.892, + 0.145 + ], + "angle": 0, + "content": "Table 1. State-of-the-art comparison for multi-label ZSL and GZSL tasks on the NUS-WIDE dataset. We show the indicators of F1-Score in the case of \\( K \\in 3,5 \\) and mAP. The best results are shown in bold." + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.156, + 0.885, + 0.395 + ], + "angle": 0, + "content": "
MethodTaskmAPF1 (K = 3)F1 (K = 5)
CONSE [36]ZSL9.421.620.2
GZSL2.17.08.1
LabelEM [2]ZSL7.119.219.5
GZSL2.29.511.3
Fast0Tag [49]ZSL15.127.826.4
GZSL3.711.513.5
Kim et al. [23]ZSL10.425.823.6
GZSL3.710.913.2
Attention per Cluster [22]ZSL12.924.622.9
GZSL2.66.47.7
LESA [22]ZSL19.431.628.7
GZSL5.614.416.8
BiAM [35]ZSL25.832.029.4
GZSL8.915.518.5
Our ApproachZSL29.432.832.3
GZSL10.215.819.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.406, + 0.892, + 0.462 + ], + "angle": 0, + "content": "Table 2. State-of-the-art comparison for multi-label ZSL and GZSL tasks on the Open-Images-V4 dataset. We show the indicators of F1-Score in the case of \\( K \\in {10},{20} \\) and mAP. Best results are shown in bold." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.473, + 0.89, + 0.684 + ], + "angle": 0, + "content": "
MethodTaskmAPF1 (K = 10)F1 (K = 20)
CONSE [36]ZSL40.40.40.3
GZSL43.52.62.4
LabelEM [2]ZSL40.50.50.4
GZSL45.25.25.1
Fast0Tag [49]ZSL41.20.70.6
GZSL45.216.013.0
Attention per Cluster [22]ZSL40.71.20.9
GZSL44.916.913.5
LESA [22]ZSL41.71.41.0
GZSL45.417.414.3
BiAM [35]ZSL62.84.13.7
GZSL79.617.615.1
Our ApproachZSL65.77.56.5
GZSL79.927.624.1
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.892, + 0.839 + ], + "angle": 0, + "content": "sponses. BiAM achieves very large progress in mAP metrics on both ZSL and GZSL tasks. But our method achieves the best results in the mAP of ZSL, while leading by \\(3.4\\%\\) and \\(2.8\\%\\) in F1-Score with \\(K = 3\\) and \\(K = 5\\), respectively. Most importantly, for the GZSL task, our F1-Score results also achieve huge advantages by \\(10.0\\%\\) and \\(9.0\\%\\). This shows that the channel-class correlation as semantic information can fully cope with the complex situation of a large number of labels." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Figure 5 shows the mAP, inference time, and GFLOPs comparisons between our model for obtaining semantic information based on channel responses and the two methods (LESA [22] and BiAM [35]) for acquiring semantic informa" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "23864" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.288 + ], + "angle": 0, + "content": "tion based on spatial features and achieving optimal results. In the mAP comparison, it can be seen that we have the highest accuracy for prediction in the ZSL task. At the same time, due to the small amount of data to be processed, the inference speed is the fastest of all comparison methods when we use the same GPU of NVIDIA RTX 3090. Finally, precisely because the model only needs to deal with a single-dimensional channel response, our \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder module requires much less computation than \\(LESA\\) and \\(BiAM\\) that deal with spatial attention. At the same time, the feature map is grouped to avoid the geometric increase of the computational complexity caused by the feature pyramid. This shows that our \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder can be more efficient." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.299, + 0.472, + 0.342 + ], + "angle": 0, + "content": "Table 3. Ablation study shows the contribution of the different components in our proposed approach. The baseline methods are performed on the NUS-WIDE test-set." + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.351, + 0.465, + 0.449 + ], + "angle": 0, + "content": "
abcdours
Forward Pyramid (ML)2P-Encoder Group Attention
mAPZSL25.327.328.427.929.4
GZSL8.18.59.28.810.2
" + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.485, + 0.255, + 0.562 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.585, + 0.253, + 0.599 + ], + "angle": 0, + "content": "(a) W/O (ML)\\(^2\\)P-Encoder" + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.466, + 0.457, + 0.581 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.295, + 0.585, + 0.436, + 0.599 + ], + "angle": 0, + "content": "(b) With \\((\\mathrm{ML})^{2}\\mathrm{P}\\) -Encoder" + }, + { + "type": "image_caption", + "bbox": [ + 0.096, + 0.611, + 0.45, + 0.625 + ], + "angle": 0, + "content": "Figure 3. Evaluation of t-SNE (zoom in for a better view)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.636, + 0.323, + 0.652 + ], + "angle": 0, + "content": "4.3. Hyper-parameter Selection" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.473, + 0.901 + ], + "angle": 0, + "content": "Our method includes two hyper-parameters, the number of groups \\( g \\) and the weight of the regularization term \\( \\lambda \\). We use the control variable method. In terms of initializing hyper-parameters, the number of output semantic vectors \\( g \\) is set to 7, and the value of \\( \\lambda \\) is set to 0.4. The line graph in Figure 4 shows the mAP results achieved on the ZSL and GZSL tasks with different hyper-parameters, respectively. In addition, we can also see the impact of changes in hyperparameters on the prediction accuracy of the model. It can be seen that the number of \\( g \\) does not have a very significant effect on the mAP of the ZSL task. But the impact on GZSL is more obvious. After comparison, we believe that when \\( g = 7 \\), two different tasks can be well balanced. For the choice of the value of \\( \\lambda \\), we found that its change will have a greater impact on mAP. But only when \\( \\lambda = 0.4 \\), the performance of GZSL is far better than other results, and" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.122 + ], + "angle": 0, + "content": "ZSL also achieves the optimal result. So the optimal hyperparameters we choose \\( g = 7 \\) and \\( \\lambda = 0.4 \\)." + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.137, + 0.695, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.589, + 0.237, + 0.615, + 0.248 + ], + "angle": 0, + "content": "(a) \\(g\\)" + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.138, + 0.885, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.76, + 0.236, + 0.824, + 0.248 + ], + "angle": 0, + "content": "(b) Weights" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.26, + 0.894, + 0.302 + ], + "angle": 0, + "content": "Figure 4. Hyper-Parameter selection. The higher the mAP the better. All the experiments are performed on the NUS-WIDE test-set." + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.33, + 0.621, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.403, + 0.593, + 0.415 + ], + "angle": 0, + "content": "(a) mAP" + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.33, + 0.748, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.635, + 0.403, + 0.758, + 0.415 + ], + "angle": 0, + "content": "(b) Inference time (ms)" + }, + { + "type": "image", + "bbox": [ + 0.76, + 0.33, + 0.873, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.792, + 0.403, + 0.858, + 0.415 + ], + "angle": 0, + "content": "(c) GFLOPs" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.427, + 0.894, + 0.483 + ], + "angle": 0, + "content": "Figure 5. Comparison of our \\((\\mathbf{ML})^2\\mathbf{P}\\)-Encoder with BiAM and LESA in mAP, inference time, and FLOPs. The higher the mAP the better, the lower the Inference time and GFLOPs the better. All methods are performed on the NUS-WIDE test-set." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.499, + 0.654, + 0.515 + ], + "angle": 0, + "content": "4.4. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.522, + 0.892, + 0.779 + ], + "angle": 0, + "content": "Ablation Study: To illustrate the effectiveness of each module designed in our method, we arrange three comparative experiments. The specific results are shown in Table 3. As the most primitive structure, model 'a' only contains shuffle and grouping operations. But after adding the 'Forward Pyramid', the model expands the number of features. As the number of optional feature channels increases, the amount of information brought by the channel also increases, thus achieving more competitive results. The addition of \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder enables the model to process the channel response of specific classes. The supplement of Group Attention is to give the model-specific information for solving multi-label tasks, that is, inter-class correlation. The combination of \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder and Group Attention greatly improves the prediction ability of the model in ZSL and GZSL tasks, indicating that our model construction has achieved great success." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.779, + 0.892, + 0.856 + ], + "angle": 0, + "content": "t-SNE: Figure 3 shows the performance of \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder in t-SNE visualization. It can be seen that after using \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder, the boundaries of inter-class become much clearer, proving the correctness of our exploration for class-specific channel responses." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Different Backbones: Table 4 shows the results produced by our method using different backbones. It can be seen from the results that ResNet [20] has obvious advantages" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23865" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.471, + 0.152 + ], + "angle": 0, + "content": "over VGG [37]. As the ResNet network deepens and the number of parameters increases, the results obtained by our model become better. This is exactly in line with the result variation of an end-to-end model." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.163, + 0.47, + 0.219 + ], + "angle": 0, + "content": "Table 4. Our \\(\\mathbf{C}^3\\)-MLZSL approach with different backbones for multi-label ZSL and GZSL tasks on the NUS-WIDE dataset. We show the indicators of F1-Score in the case of \\(K \\in 3, 5\\) and mAP. The best results are shown in bold." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.231, + 0.462, + 0.352 + ], + "angle": 0, + "content": "
BackbonesTaskmAPF1 (K = 3)F1 (K = 5)
VGG19 [37]ZSL29.432.832.3
GZSL10.215.819.2
ResNet50 [20]ZSL30.933.633.2
GZSL10.715.919.4
ResNet101 [20]ZSL31.233.933.9
GZSL10.916.119.5
" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.371, + 0.466, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.587, + 0.47, + 0.643 + ], + "angle": 0, + "content": "Figure 6. Attention visualization. where (a) is the attention response of our \\(\\mathbf{C}^3\\)-MLZSL when faced with unseen labels. (b) is the comparison of attention visualization results of our \\(\\mathbf{C}^3\\)-MLZSL and BiAM [35] models. See appendix for more results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.666, + 0.283, + 0.683 + ], + "angle": 0, + "content": "4.5. Multi-Label Learning" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Table 5 shows the results of the model for multi-label image classification. The baselines we compare include not only state-of-the-art MLZSL models, but also multi-label image classification models including Logistic Regression [40], WSABIE [43], WARP [17] and CNN-RNN [42]. As can be seen from the results, our model far surpasses many multi-label image classification models and the classic Fast0Tag [49] algorithm in mAP performance. This is because the above models only process the input image into a single semantic vector, and limited image embedding cannot build the semantic diversity for multi-label classification. For other methods such as LESA [22] and BiAM [35], they noticed that the attention regions of different objects in multi-label images are different, and thus define the label" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.09, + 0.892, + 0.117 + ], + "angle": 0, + "content": "Table 5. Performance of Multi-label image classification task on NUS-WIDE datasets. The best results are in bold." + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.129, + 0.887, + 0.303 + ], + "angle": 0, + "content": "
MethodF1(K=3)(↑)F1(K=5)(↑)mAP(↑)
Logistic [40]51.146.121.6
WARP [17]54.449.43.1
WSABIE [43]53.849.23.1
Fast0Tag [49]53.848.622.4
CNN-RNN [42]55.250.828.3
Kim et al. [23]56.851.332.6
LESA ApC [22]56.650.731.7
LESA [22]58.052.031.5
BiAM [35]59.653.447.8
Ours59.853.848.0
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.326, + 0.892, + 0.416 + ], + "angle": 0, + "content": "related embeddings from the perspective of the spatial domain. However, after feature extraction, our model takes into account that the channel response can be important information representing the class semantics, and this superior performance just verifies the rationality of the exploration." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.446, + 0.619, + 0.461 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.477, + 0.892, + 0.689 + ], + "angle": 0, + "content": "In this paper, we focus on the neglect of channel-wise class information and over-reliance on spatial-wise class information in previous MLZSL models, then propose C3-MLZSL structure and the \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder component. The C3-MLZSL structure first group multi-scale features, then use the \\((\\mathrm{ML})^{2}\\mathrm{P}\\)-Encoder to calculate the correlation of channels within each group and perform information fusion to get the semantic vectors. These semantic vectors are then aggregated through group attention to learn mutual information between groups. Finally, the model successfully learns channel-class correlation. Extensive experiments on the large-scale NUS-WIDE and Open-Images-V4 datasets show that our model has achieved very competitive results on MLZSL compared with other state-of-the-art models." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.718, + 0.671, + 0.736 + ], + "angle": 0, + "content": "6. Acknowledgment" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.748, + 0.892, + 0.888 + ], + "angle": 0, + "content": "This research was supported by fundings from the Key-Area Research and Development Program of Guangdong Province (No. 2021B0101400003), Hong Kong RGC Research Impact Fund (No. R5060-19), Areas of Excellence Scheme (AoE/E-601/22-R), General Research Fund (No. 152203/20E, 152244/21E, 152169/22E, 152211/23E), Shenzhen Science and Technology Innovation Commission (JCYJ20200109142008673), the National Natural Science Foundation of China (No. 62102327), and PolyU Internal Fund (No. P0043932)." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.521, + 0.957 + ], + "angle": 0, + "content": "23866" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Zeynep Akata, Mateusz Malinowski, Mario Fritz, and Bernt Schiele. Multi-cue zero-shot learning with strong supervision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 59-68, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.228 + ], + "angle": 0, + "content": "[2] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. IEEE transactions on pattern analysis and machine intelligence, 38(7):1425–1438, 2015. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.231, + 0.47, + 0.286 + ], + "angle": 0, + "content": "[3] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. IEEE transactions on pattern analysis and machine intelligence, 38(7):1425–1438, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.288, + 0.47, + 0.343 + ], + "angle": 0, + "content": "[4] Maxime Bucher, Stéphane Herbin, and Frédéric Jurie. Improving semantic embedding consistency by metric learning for zero-shot classification. In European Conference on Computer Vision, pages 730-746. Springer, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.345, + 0.47, + 0.413 + ], + "angle": 0, + "content": "[5] Chris Burges, Tal Shaked, Erin Renshaw, Ari Lazier, Matt Deeds, Nicole Hamilton, and Greg Hullender. Learning to rank using gradient descent. In Proceedings of the 22nd international conference on Machine learning, pages 89-96, 2005. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.416, + 0.47, + 0.471 + ], + "angle": 0, + "content": "[6] Soravit Changpinyo, Wei-Lun Chao, Boqing Gong, and Fei Sha. Synthesized classifiers for zero-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5327-5336, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.474, + 0.469, + 0.543 + ], + "angle": 0, + "content": "[7] Zhi Chen, Yadan Luo, Sen Wang, Ruihong Qiu, Jingjing Li, and Zi Huang. Mitigating generation shifts for generalized zero-shot learning. In Proceedings of the 29th ACM International Conference on Multimedia, pages 844-852, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.545, + 0.469, + 0.613 + ], + "angle": 0, + "content": "[8] Zhao-Min Chen, Xiu-Shen Wei, Peng Wang, and Yanwen Guo. Multi-label image recognition with graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5177-5186, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.616, + 0.469, + 0.671 + ], + "angle": 0, + "content": "[9] Xing Cheng, Hezheng Lin, Xiangyu Wu, Fan Yang, Dong Shen, Zhongyuan Wang, Nian Shi, and Honglin Liu. Mltr: Multi-label classification with transformer. arXiv preprint arXiv:2106.06195, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.673, + 0.469, + 0.743 + ], + "angle": 0, + "content": "[10] Tat-Seng Chua, Jinhui Tang, Richang Hong, Haojie Li, Zhiping Luo, and Yantao Zheng. Nus-wide: a real-world web image database from national university ofSingapore. In Proceedings of the ACM international conference on image and video retrieval, pages 1-9, 2009. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.745, + 0.469, + 0.8 + ], + "angle": 0, + "content": "[11] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.802, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[12] Shay Deutsch, Soheil Kolouri, Kyungnam Kim, Yuri Owechko, and Stefano Soatto. Zero shot learning via multi-scale manifold regularization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7112-7119, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.902 + ], + "angle": 0, + "content": "[13] Thibaut Durand, Nazanin Mehrasa, and Greg Mori. Learning a deep convnet for multi-label classification with partial" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "labels. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 647-657, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.137, + 0.892, + 0.191 + ], + "angle": 0, + "content": "[14] Lei Feng, Bo An, and Shuo He. Collaboration based multi-label learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 3550-3557, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.194, + 0.892, + 0.25 + ], + "angle": 0, + "content": "[15] Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. Advances in neural information processing systems, 26, 2013. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.252, + 0.892, + 0.307 + ], + "angle": 0, + "content": "[16] Bin-Bin Gao and Hong-Yu Zhou. Learning to discover multi-class attentional regions for multi-label image recognition. IEEE Transactions on Image Processing, 30:5920-5932, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.31, + 0.892, + 0.364 + ], + "angle": 0, + "content": "[17] Yunchao Gong, Yangqing Jia, Thomas Leung, Alexander Toshev, and Sergey Ioffe. Deep convolutional ranking for multilabel image annotation. arXiv preprint arXiv:1312.4894, 2013. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.367, + 0.892, + 0.436 + ], + "angle": 0, + "content": "[18] Omkar Gune, Biplab Banerjee, Subhasis Chaudhuri, and Fabio Cuzzolin. Generalized zero-shot learning using generated proxy unseen samples and entropy separation. In Proceedings of the 28th ACM International Conference on Multimedia, pages 4262-4270, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.439, + 0.892, + 0.493 + ], + "angle": 0, + "content": "[19] Jingcai Guo and Song Guo. A novel perspective to zero-shot learning: Towards an alignment of manifold structures via semantic feature expansion. IEEE Transactions on Multimedia, 23:524-537, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.497, + 0.892, + 0.552 + ], + "angle": 0, + "content": "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 1, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.555, + 0.892, + 0.582 + ], + "angle": 0, + "content": "[21] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.584, + 0.892, + 0.64 + ], + "angle": 0, + "content": "[22] Dat Huynh and Ehsan Elhamifar. A shared multi-attention framework for multi-label zero-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8776–8786, 2020. 2, 3, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.642, + 0.892, + 0.683 + ], + "angle": 0, + "content": "[23] Jin-Hwa Kim, Jaehyun Jun, and Byoung-Tak Zhang. Bilinear attention networks. arXiv preprint arXiv:1805.07932, 2018. 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.686, + 0.892, + 0.727 + ], + "angle": 0, + "content": "[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.73, + 0.892, + 0.771 + ], + "angle": 0, + "content": "[25] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.774, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[26] Elyor Kodirov, Tao Xiang, and Shaogang Gong. Semantic autoencoder for zero-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3174-3183, 2017. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.899 + ], + "angle": 0, + "content": "[27] Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. Learning to detect unseen object classes by between-class attribute transfer. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 951-958. IEEE, 2009. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "23867" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[28] Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. Attribute-based classification for zero-shot visual object categorization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(3):453-465, 2014. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.151, + 0.472, + 0.22 + ], + "angle": 0, + "content": "[29] Chung-Wei Lee, Wei Fang, Chih-Kuan Yeh, and Yu-Chiang Frank Wang. Multi-label zero-shot learning with structured knowledge graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1576–1585, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.223, + 0.47, + 0.292 + ], + "angle": 0, + "content": "[30] Jingjing Li, Mengmeng Jing, Lei Zhu, Zhengming Ding, Ke Lu, and Yang Yang. Learning modality-invariant latent representations for generalized zero-shot learning. In Proceedings of the 28th ACM International Conference on Multimedia, pages 1348-1356, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.295, + 0.47, + 0.363 + ], + "angle": 0, + "content": "[31] Yanan Li, Donghui Wang, Huanhang Hu, Yuetan Lin, and Yueting Zhuang. Zero-shot recognition using dual visual-semantic mapping paths. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3279–3287, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.367, + 0.469, + 0.435 + ], + "angle": 0, + "content": "[32] Teng Long, Xing Xu, Youyou Li, Fumin Shen, Jingkuan Song, and Heng Tao Shen. Pseudo transfer with marginalized corrupted attribute for zero-shot learning. In Proceedings of the 26th ACM international conference on Multimedia, pages 1802-1810, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.439, + 0.469, + 0.493 + ], + "angle": 0, + "content": "[33] Thomas Mensink, Efstratios Gavves, and Cees GM Snoek. Costa: Co-occurrence statistics for zero-shot classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2441-2448, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.469, + 0.552 + ], + "angle": 0, + "content": "[34] Pedro Morgado and Nuno Vasconcelos. Semantically consistent regularization for zero-shot recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6060-6069, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.555, + 0.469, + 0.623 + ], + "angle": 0, + "content": "[35] Sanath Narayan, Akshita Gupta, Salman Khan, Fahad Shahbaz Khan, Ling Shao, and Mubarak Shah. Discriminative region-based multi-label zero-shot learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8731-8740, 2021. 2, 3, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.627, + 0.469, + 0.696 + ], + "angle": 0, + "content": "[36] Mohammad Norouzi, Tomas Mikolov, Samy Bengio, Yoram Singer, Jonathon Shlens, Andrea Frome, Greg S Corrado, and Jeffrey Dean. Zero-shot learning by convex combination of semantic embeddings. In 2nd International Conference on Learning Representations, ICLR 2014, 2014. 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.699, + 0.469, + 0.74 + ], + "angle": 0, + "content": "[37] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 1, 4, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.743, + 0.469, + 0.798 + ], + "angle": 0, + "content": "[38] Richard Socher, Milind Ganjoo, Christopher D Manning, and Andrew Ng. Zero-shot learning through cross-modal transfer. In Advances in neural information processing systems, pages 935-943, 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.802, + 0.469, + 0.857 + ], + "angle": 0, + "content": "[39] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich, et al. Going deeper with convolutions. Cvpr, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[40] Grigorios Tsoumakas and Ioannis Katakis. Multi-label classification: An overview. International Journal of Data Warehousing and Mining (IJDWM), 3(3):1-13, 2007. 8" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.161 + ], + "angle": 0, + "content": "[41] Andreas Veit, Neil Alldrin, Gal Chechik, Ivan Krasin, Abhinav Gupta, and Serge Belongie. Learning from noisy large-scale datasets with minimal supervision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 839-847, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.893, + 0.232 + ], + "angle": 0, + "content": "[42] Jiang Wang, Yi Yang, Junhua Mao, Zhiheng Huang, Chang Huang, and Wei Xu. Cnn-rnn: A unified framework for multi-label image classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2285-2294, 2016. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.234, + 0.893, + 0.289 + ], + "angle": 0, + "content": "[43] Jason Weston, Samy Bengio, and Nicolas Usunier. Wsabie: Scaling up to large vocabulary image annotation. In Twenty-Second International Joint Conference on Artificial Intelligence, 2011. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.291, + 0.893, + 0.359 + ], + "angle": 0, + "content": "[44] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 69-77, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.361, + 0.893, + 0.415 + ], + "angle": 0, + "content": "[45] Yongqin Xian, Bernt Schiele, and Zeynep Akata. Zero-shot learning-the good, the bad and the ugly. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4582-4591, 2017. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.418, + 0.893, + 0.471 + ], + "angle": 0, + "content": "[46] Hsiang-Fu Yu, Prateek Jain, Purushottam Kar, and Inderjit Dhillon. Large-scale multi-label learning with missing labels. In International conference on machine learning, pages 593-601. PMLR, 2014. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.474, + 0.893, + 0.529 + ], + "angle": 0, + "content": "[47] Chenrui Zhang, Xiaoqing Lyu, and Zhi Tang. Tgg: Transferable graph generation for zero-shot and few-shot learning. In Proceedings of the 27th ACM International Conference on Multimedia, pages 1641-1649, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.531, + 0.893, + 0.585 + ], + "angle": 0, + "content": "[48] Li Zhang, Tao Xiang, and Shaogang Gong. Learning a deep embedding model for zero-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2021-2030, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.587, + 0.893, + 0.642 + ], + "angle": 0, + "content": "[49] Yang Zhang, Boqing Gong, and Mubarak Shah. Fast zero-shot image tagging. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5985-5994. IEEE, 2016. 2, 3, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.644, + 0.893, + 0.699 + ], + "angle": 0, + "content": "[50] Ziming Zhang and Venkatesh Saligrama. Zero-shot learning via joint latent similarity embedding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6034-6042, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.7, + 0.893, + 0.769 + ], + "angle": 0, + "content": "[51] Yizhe Zhu, Mohamed Elhoseiny, Bingchen Liu, Xi Peng, and Ahmed Elgammal. A generative adversarial approach for zero-shot learning from noisy texts. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.769 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "23868" + } + ] +] \ No newline at end of file diff --git a/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_origin.pdf b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e246a6fbf23213564e33824cbc005c6c4fbb918d --- /dev/null +++ b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/2f5d72e4-31bc-4c21-9948-28d1063a50fb_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eff44f1bd552a7f9ef9429d6a172b9137d667898c54240af0448d441cc59d8cb +size 3354382 diff --git a/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/full.md b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b9119a6e5cd4e30f7eeb77c079bdc2544bf7190d --- /dev/null +++ b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/full.md @@ -0,0 +1,323 @@ +# $(\mathbf{ML})^{2}\mathbf{P}$ -Encoder: On Exploration of Channel-class Correlation for Multi-label Zero-shot Learning + +Ziming Liu1, Song Guo1,2, Xiaocheng Lu1, Jingcai Guo1,2*, Jiewei Zhang1, Yue Zeng1, Fushuo Huo1 +1Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China +2The Hong Kong Polytechnic University Shenzhen Research Institute, Shenzhen, China + +{ziming.liu, jiewei.zhang, fushuo.huo}@connect.polyu.hk {song.quo, xiaoclu, jc-jingcai.quo, zengyue.zeng}@polyu.edu.hk + +# Abstract + +Recent studies usually approach multi-label zero-shot learning (MLZSL) with visual-semantic mapping on spatial-class correlation, which can be computationally costly, and worse still, fails to capture fine-grained class-specific semantics. We observe that different channels may usually have different sensitivities on classes, which can correspond to specific semantics. Such an intrinsic channel-class correlation suggests a potential alternative for the more accurate and class-harmonious feature representations. In this paper, our interest is to fully explore the power of channel-class correlation as the unique base for MLZSL. Specifically, we propose a light yet efficient Multi-Label MultiLayer Perceptron-based Encoder, dubbed $(ML)^{2}P$ -Encoder, to extract and preserve channel-wise semantics. We reorganize the generated feature maps into several groups, of which each of them can be trained independently with $(ML)^{2}P$ -Encoder. On top of that, a global group-wise attention module is further designed to build the multilabel specific class relationships among different classes, which eventually fulfills a novel Channel-Class Correlation MLZSL framework $(C^{3}$ -MLZSL). Extensive experiments on large-scale MLZSL benchmarks including NUS-WIDE and Open-Images-V4 demonstrate the superiority of our model against other representative state-of-the-art models. + +# 1. Introduction + +The proliferation of smart devices has greatly enriched human life when it comes to the era of big data. These smart devices are usually equipped with cameras such that users can easily produce and share their images. With the increasing abundance of public images, how to analyze them accurately has become a challenging problem. Recent years + +![](images/012ebce2bca2908a1ed88b5b724c2627dd75ef1002f2138229d4a579b04f8c43.jpg) +Figure 1. Example of Channel-Class Correlation. Our method achieves the prediction of unseen classes by exploiting the unique distribution of channel responses as semantic information for the class and building correlations with responses from the same channel (zoom in for a better view). + +have witnessed great success in classifying an image into a specific class [20, 37, 39], namely, single-label classification. However, in reality, the images [17,46] usually contain abundant information and thereby consist of multiple labels. + +In recent years, the multi-label classification has been widely investigated by exploring the relationship among different labels from multiple aspects [9, 13, 14, 16, 42]. However, in some scenarios where extensive collections of images exist, e.g., Flickr $^2$ , users can freely set one or more individual tags/labels for each image, while the presented objects and labels in these images may not be fully shown in any previous collection, and thus result in a domain gap for the recognition. Therefore, in real-world applications, the model is required to gain the ability to predict unseen classes as well. As one of the thriving research topics, zero- + +shot learning (ZSL) [1, 12, 15, 34] is designed to transfer tasks from seen classes to unseen classes, and naturally recognizes novel objects of unseen classes. Specifically, ZSL has made continuous success in single-label classification [19, 26, 31, 45, 48]. However, these methods can hardly be extended to the multi-label scenario since exploring the cross-class relationships in an image is non-trivial. + +Recently, some works have focused on multi-label zero-shot learning (MLZSL) tasks and obtained some promising results [33, 36, 49]. Other works considered incorporating attention mechanisms into their models, such as $LESA$ [22] and $BiAM$ [35]. $LESA$ [22] designed an attention-sharing mechanism for different patches in the image so that each patch can output the corresponding class. In another way, $BiAM$ [35] designed a bi-level attention to extract relations from regional context and scene context, which can enrich the regional features of the model and separate the features of different classes. + +Although previous works have made considerable progress, their designed methods have been limited to the processing of spatial-domain information. First of all, the over-reliance on spatial-class correlation fails to capture fine-grained class-specific semantics. In addition, the additional processing of spatial information greatly increases the computational cost of the model and limits the inference speed. Given the shortcomings of the above methods, we found through analysis that the channel response can be used as the semantic information of the class. Firstly, the response of each class in the channel is unique, which creates conditions for obtaining the unique semantics. Secondly, for classes with certain semantic associations, there must be some channels that capture their common information. Therefore, channel information, as an easily overlooked part after feature extraction, can complete the task of capturing multi-label information. In MLZSL, we can complete the prediction of unseen classes by obtaining the responses of seen classes in the channel domain, and the relationship between seen and unseen classes. Finally, the subsequent analysis of the channel response greatly saves computational costs. + +Specifically, as shown in Figure 1, as seen classes, "water" and "tree" have unique response distributions on feature channels, and these responses can be used as semantic information for classification tasks. Besides, in order to explore the correlation of classes, we found that although the semantic information of "water" and "tree" is different, there are still some channels that respond simultaneously (i.e. the blue channel). We need to build this correlation during the training process through modeling so that the model can learn multi-label correlations. In the ZSL process, for the unseen class "garden", we know that it is related to "water" (i.e. purple layer) and "tree" (i.e. green, orange, and gray layer) by obtaining its semantic information and matching + +with seen classes. This observation suggests that channels can help not only to classify objects but also to establish associations between classes. Previous methods which only consider spatial information are unable to obtain this intrinsic channel-class correlation and dissimilarity, thus achieving sub-optimal performance on the MLZSL task. + +To address the above challenges and construct a more accurate and robust MLZSL system, we propose to group the generated feature maps and process them in a group-wise manner, thus enhancing the model by fully exploring the channel-class correlations. Besides, by properly designing a light yet efficient Multi-Label Multi-Layer Perceptron-based Encoder, i.e., $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder, we can easily analyze the local relationship between channels while significantly reducing the computation overhead. Finally, these groups are recombined and then perform the calculation of group attention, indicating that the model is analyzed locally and globally from the perspective of the channels, which can ensure the integrity of the representation. + +In summary, our contributions are four-fold: + +1. To the best of our knowledge, our method first suggests the concept of channel-class correlation in MLZSL, and proposes a channel-sensitive attention module $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder to extract and preserve channel-wise semantics for channel groups. +2. Different from previous works that use spatial-class correlation to extract global and local features, we alternatively explore the channel-class correlation as the unique base for MLZSL. +3. In conjunction with $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder, a global group-wise attention is also designed to establish the multi-label specific class relationships among classes. +4. Extensive experiments on large-scale datasets NUS-WIDE and Open-Images-V4 demonstrate the effectiveness of our method against other state-of-the-art models. + +# 2. Related Work + +# 2.1. Multi-Label Classification + +The establishment of graph neural networks (GNNs) brings remarkable success to multi-label classification tasks [8, 25]. Among them, Chen et al. [8] constructs directed graphs for object labels and uses graph convolutional networks (GCN) to map label nodes, which contain word embeddings, into classifiers. In addition, the CNN-based multi-label classification models enable the learning of the characteristics of each label from the spatial information of the image and design a new multi-label classifier [13, 14, 16, 17, 42, 43, 46]. Gao et al. [16] suggests a two-stream framework to identify global and local information + +![](images/5c95f6e5e4e83008579a6320d2bbd777ec3c50f5b321ab9aef414704b93cb307.jpg) +Figure 2. Pipeline for $\mathbf{C}^3$ -MLZSL. The input image is first passed through the feature extraction network (eg. VGG19), and then multi-layer feature maps are extracted through the Forward Pyramid module. After the feature maps are shuffled and grouped, each group uses $(\mathbf{ML})^{2}\mathbf{P}$ -Encoder to extract semantic information. Then, the semantic information generated by all groups is associated through Group Attention to generate the final semantic matrix $\mathcal{S}$ (zoom in for a better view). + +separately and a multi-class regional attention module to align them. However, the above methods cannot generalize to unseen classes. + +# 2.2. Zero-Shot Learning + +Zero-shot learning provides a solution to recognize unseen classes. Current studies mostly consider a relatively simple single-label scenario [4, 6, 26, 30, 32, 47, 50, 51]. In practice, existing methods usually focus on finding the main semantic information of training images, and then exploit the semantic relationship, i.e., word vectors [15, 38, 44, 45] or attribute vectors [3, 27, 28], between seen and unseen classes for prediction. The generated semantic information can be inferred from seen to unseen labels by comparing the similarity of the relation vectors between them. For example, Chen et al. [7] proposes a generative flow framework and uses a combinatorial strategy to solve the problems of semantic inconsistency, variance collapse, and structural disorder in zero-shot learning. Gune et al. [18] generates visual proxy samples to simulate the average entropy of the label distribution of the unseen class. However, the above methods only predict single labels with a single representation of images, which can hardly generalize to a more realistic multi-label scenario. + +# 2.3. Multi-Label Zero-Shot Learning + +Multi-label zero-shot learning has received increasing attention recently. For example, Norouzi et al. [36] designs two separate spaces, i.e., the image and semantic embedding spaces, jointly with the convex combination of the label embedding vectors to achieve multi-label recognition in the zero-shot learning framework. Zhang et al. [49] proposes a fast and general model based on the fact that the word vectors of the relevant labels are ranked before + +the irrelevant word vectors in the main vector of the image. Different from the above methods, Lee et al. [29] uses the knowledge graph to connect different labels. In recent years, attention-based methods become the mainstream. For example, LESA [22] applies an attention-sharing mechanism to the multi-label environment, allowing the model to focus on the key areas of each label. Narayan et al. [35] uses a bi-layer attention module to combine global context information and local features and map the generated information to the semantic space. However, the above methods only stay at the two-dimensional space level $(H\times W)$ , and do not consider the response between different feature channels with respect to classes. + +# 3. Methods + +# 3.1. Problem Setting + +Before proposing our method, we first explain the definition of the MLZSL problem. Given $n$ input samples $\{(I_1,Y_1),\ldots ,(I_i,Y_i),\ldots ,(I_n,Y_n)\}$ , where $I_{i}$ represents the input image of the $i$ -th train-set, and $Y_{i}$ represents the training labels corresponding to the input images, which are also called 'seen labels'. On the label distribution, let us set the seen label in the dataset as $C_s$ , where the seen label refers to the label known by the model. $C_s$ is mainly used for the train-set of the model in zero-shot learning. We set the unseen label to $C_u$ , and the unseen label is generally used in the test-set. The label relationship in the dataset is defined as $\mathcal{C} = \mathcal{C}_s\cup \mathcal{C}_u$ , where $\mathcal{C}$ represents the set of all labels in the dataset. Based on the above definition, after the model is trained on the train-set, in the testing part of MLZSL, given the image $I_{u}$ , the model can output the prediction result $y_{u}\subset C_{u}$ . While in the generalized zero-shot learning task, given an image $I_{u}$ , the output of the model is + +$y_{u} \subset \mathcal{C}$ , which means the model needs to output both the seen label and the unseen label that exist in the image. + +# 3.2. $(\mathbf{ML})^{2}\mathbf{P}$ -Encoder + +The proposed network structure is shown in Figure 2. For input images $I$ , we first use a pre-trained feature extraction network to obtain the corresponding image features $\mathcal{F}$ . We extract the features from the last three layers of the feature extraction network, and keep the two layers with the larger size consistent with the smallest size layer by downsampling. For example, assuming that the used and training network is VGG19 [37], the size of the last three layers of feature maps is $\{28 \times 28, 14 \times 14, 7 \times 7\}$ . We use max-pooling to down-sample the large-scale feature maps to obtain equivalent $7 \times 7$ feature maps. This step is called the "Forward Pyramid". After that, we obtain feature maps at different levels with the same scale. Then we randomly shuffle them to get the feature map $\mathcal{F}_a$ and re-group them into $g$ different groups, each group has $d_w$ channels, which is the same length as the word vectors in the ground-truth semantic space. The purpose of this operation is to generate specific semantic vectors to express the semantic information contained in each group. + +Next, the features of each group are fed into $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder. First, we need to calculate the correlation between channels within each group. In traditional self-attention, the cost of computation greatly consumes the inference speed of the model, and the traditional self-attention module cannot accurately reflect the relationship between each channel. To solve the loss caused by the amount of calculation and accurately reflect the channel correlation, we designed a new self-attention structure to achieve this. + +For features $\mathcal{F}_a$ in group $i$ , which is $\mathcal{F}_a^i \in \mathbb{R}^{H \times W \times d_w}$ . We first generate Query (Q), Value (V) and Key (K) through three convolution operations: + +$$ +\mathbf {Q} = W _ {p} ^ {Q} \mathcal {F} _ {a} ^ {i} \quad \mathbf {K} = W _ {p} ^ {K} \mathcal {F} _ {a} ^ {i} \quad \mathbf {V} = W _ {p} ^ {V} \mathcal {F} _ {a} ^ {i} \tag {1} +$$ + +where $W_{p}^{(\cdot)}$ means the convolution operation. Next, to obtain the channel correlation matrix $\mathcal{R}$ , we reshape $\mathbf{Q},\mathbf{K}$ and $\mathbf{V}$ in the spatial domain $(H\times W)$ to get $\widehat{\mathbf{Q}}\in \mathbb{R}^{HW\times d_w}$ , $\widehat{\mathbf{K}}\in \mathbb{R}^{d_w\times HW}$ and $\widehat{\mathbf{V}}\in \mathbb{R}^{HW\times d_w}$ . Then perform a dot product operation on $\mathbf{Q}$ and $\mathbf{K}$ to obtain the channel correlation matrix $\mathcal{R}\in \mathbb{R}^{d_w\times d_w}$ . After that, we do the dot product between $\mathcal{R}$ and $\mathbf{V}$ , finally, add with the input $\mathcal{F}_a^i$ to get the output $\widehat{\mathcal{F}}_a^i\in \mathbb{R}^{H\times W\times d_w}$ : + +$$ +\operatorname {A t t} (\widehat {\mathbf {Q}}, \widehat {\mathbf {K}}, \widehat {\mathbf {V}}) = \widehat {\mathbf {V}} \cdot \underset {\mathcal {R}} {\operatorname {s o f t m a x}} (\underbrace {\widehat {\mathbf {K}} \cdot \widehat {\mathbf {Q}}} _ {\mathcal {R}}) \tag {2} +$$ + +$$ +\widehat {\mathcal {F}} _ {a} ^ {i} = \mathcal {F} _ {a} ^ {i} + \operatorname {A t t} (\widehat {\mathbf {Q}}, \widehat {\mathbf {K}}, \widehat {\mathbf {V}}) \tag {3} +$$ + +After enhancing the correlation between channels, we need to extract and analyze the feature information contained in + +each channel. We reshape the information in the spatial domain into a one-dimensional vector, then we decide to use the Multi-Layer Perceptron (MLP) to encode the features. Compared with the traditional convolution structure, the MLP structure is convenient to perform information fusion between local regions. Specifically, for the input feature $\widehat{\mathcal{F}}_a^i\in \mathbb{R}^{H\times W\times d_w}$ , we first change the dimension from $H\times W\times d_w$ to $\mathcal{F}_{mlp}^i\in \mathbb{R}^{d_w\times HW}$ , then we use LayerNorm to normalize the input. Our MLP structure includes two different MLPs: MLP1 is used to extract the spatial information contained in each channel, and MLP2 is proposed to obtain local information of different channels in the spatial domain: + +$$ +\mathcal {F} _ {m l p 1} ^ {i} = \mathcal {F} _ {m l p} ^ {i} + \mathbf {W} _ {2} \sigma \left(\mathbf {W} _ {1} \text {L a y e r N o r m} \left(\mathcal {F} _ {m l p} ^ {i}\right)\right) \tag {4} +$$ + +$$ +\mathcal {M} = \mathcal {F} _ {m l p 1} ^ {i} + \mathbf {W} _ {4} \sigma \left(\mathbf {W} _ {3} \text {L a y e r N o r m} \left(\mathcal {F} _ {m l p 1} ^ {i}\right)\right) \tag {5} +$$ + +where $\mathcal{F}_{mlp1}^i$ is the output after MLP1. $\mathbf{W}_1$ , $\mathbf{W}_2$ is the parameter of MLP1, and $\mathbf{W}_3$ , $\mathbf{W}_4$ is the parameter of MLP2. $\sigma$ is an element-wise non-linearity GELU [21]. Then we use max-pooling to filter out the best semantic vector in the spatial domain, which can more accurately represent the semantic information of this group. This max-pooling operation is also to be able to directly extract the channel response. So we obtain group semantic vectors $\mathcal{X} \in \mathbb{R}^{g \times d_w}$ and send them into Group Attention. + +# 3.3. Group Attention + +Although we obtained group semantic vectors $\mathcal{X}$ through $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder, the semantic vectors generated by each group did not establish a relationship with each other at this time. As we already know, the key to improving the accuracy of multi-label image classification is to construct the correlation of labels within the image. So we use Group Attention to build the mutual information and also to find similar responses between different labels. We pass a series of linear layers to $\mathcal{X}$ : + +$$ +\mathbf {Q} _ {\mathbf {x}} = W _ {x} ^ {Q} \mathcal {X} \quad \mathbf {K} _ {\mathbf {x}} = W _ {x} ^ {K} \mathcal {X} \tag {6} +$$ + +$$ +\mathcal {S} = \left(\mathbf {Q} _ {\mathbf {x}} \cdot \mathbf {K} _ {\mathbf {x}}\right) \cdot \mathcal {X} \tag {7} +$$ + +where $\mathbf{Q}_{\mathbf{x}} \in \mathbb{R}^{g \times d_w}$ , and we transpose $\mathbf{K}_{\mathbf{x}}$ into $\mathbf{K}_{\mathbf{x}} \in \mathbb{R}^{d_w \times g}$ . $W_x^Q$ and $W_x^K$ are different linear weights. $S \in \mathbb{R}^{g \times d_w}$ is the semantic matrix, which contains all the semantic information of the input image. In the loss function, we will make each semantic vector in $S$ approximate the semantic information of seen classes appearing in the image. Therefore, from another perspective, the semantic vectors in $S$ are related to seen classes. + +# 3.4. Loss Function + +During training, some semantic vectors are generated for each input image. The semantic matrix $S$ includes the semantic information in the image and is sent to the prediction + +module. The loss function consists of two parts. First of all, to make the positive class (seen class appear in each training image) get a higher ranking than the negative class (seen class which does not appear in the training image). Inspired by [49], we choose to adopt ranknet loss [5] as the main component of the loss function. We use + +$$ +\mu_ {i j} = \max \left(\mathcal {S} \cdot n _ {i}\right) - \max \left(\mathcal {S} \cdot p _ {j}\right), \tag {8} +$$ + +to indicate the number of violations of any of these ranking constraints, where $n_i$ represents the semantic vector of the negative class, and $p_j$ denotes the semantic vector of the positive class. max is used to maximize this gap between negative and positive, and constrain it in subsequent steps. + +Next, to minimize the gap, we design the loss function as the following: + +$$ +\mathcal {L} _ {\text {r a n k}} = \frac {1}{(| P | | \bar {P} |)} \sum_ {i} \sum_ {j} \log \left(1 + e ^ {\mu_ {i j}}\right), \tag {9} +$$ + +where $\frac{1}{(|P| |\bar{P}|)}$ is used to normalize the ranknet loss, and $|P|$ denotes the number of positive class, $|\bar{P}|$ represents the number of negative class. When an image contains a large number of positive labels, the image becomes difficult to classify. So we need the model to value these hard samples during training. Therefore, we add the class weight $\omega$ to improve the performance of the model in the face of hard samples. $\omega$ is represented as: + +$$ +\omega = 1 + \sum_ {i} v a r (P ^ {i}), \tag {10} +$$ + +where $P^i$ represents the vector of the $i$ -th positive class, $var$ means the variance. The higher $\omega$ means the image contains more complex labels. To prevent the direction of the semantic vectors generated by the model from being too divergent, it needs to be controlled by the loss function. Therefore, we believe that the addition of regularization terms can reduce the difference between the generated semantic vectors when the model faces complex input images. This reduction in variance helps the model learn relevant information between different classes. + +$$ +\mathcal {L} _ {r e g} = \left\| \sum_ {n} v a r \left(\mathcal {S} _ {n}\right) \right\| _ {1}. \tag {11} +$$ + +Finally, the loss function of the model is defined as: + +$$ +\mathcal {L} = \frac {1}{N} \sum_ {i = 1} ^ {N} ((1 - \lambda) \cdot \omega \mathcal {L} _ {\text {r a n k}} (\mathcal {S} _ {i}, Y _ {i}) + \lambda \mathcal {L} _ {\text {r e g}} (\mathcal {S} _ {i})) \tag {12} +$$ + +where $N$ means the number of batch size, and $\lambda$ is a hyperparameter that denotes the regularization term's weight. + +# 4. Experiments + +# 4.1. Experimental Setup + +Datasets: First, we use the NUS-WIDE dataset [10] to conduct MLZSL experiments. The NUS-WIDE dataset contains about 270,000 images, and each image contains 925 labels, which are automatically extracted from Flickr user tags. In addition, it also contains 81 labels that are manually annotated by humans, and these labels are called 'GroundTruth'. During the experiment, 925 labels were used as 'seen labels', and 81 labels were used as 'unseen labels'. This setting is similar with [22]. Another dataset is called the Open-Images-V4 dataset. This dataset contains nearly 9 million training images, 125,456 images as test images, and 41,620 images in the validation set. The train-set contains 7,186 labels, which are 'seen labels' that appear at least 100 times in the train-set. While the remaining 400 most frequent labels that do not appear in the train-set are used as test-set labels, they are also used as 'unseen labels'. Each unseen label has at least appeared 75 times. + +Evaluation Metrics: To better allow our proposed new model and other comparative models to perform an unbiased comparison on the task of MLZSL, we use the two most common evaluation metrics, the mean Average Precision (mAP) [22, 41] and F1-Score. Among them, top-K F1-Score is used to measure the accuracy of the model for label prediction, and mAP is used to reflect the accuracy for unseen label retrieval of the image. + +Implementation Details: Our model can support end-to-end training. We choose VGG19 [37], pre-trained on ImageNet dataset [11], as the backbone network. Unlike other methods, our model uses multi-scale feature maps and aggregates them. The sizes of the feature maps are $28 \times 28$ , $14 \times 14$ , and $7 \times 7$ , respectively. + +In terms of the optimizer, we choose to use the Adam optimizer [24], which requires less memory and is suitable for large datasets. The weight decay of the Adam optimizer is set to $4e^{-3}$ . In the NUS-WIDE dataset experiments, the initial learning rate of the model is $5e^{-5}$ , and then the learning rate decreases by $\frac{1}{10}$ at the 7th epoch. The entire experimental process of the NUS-WIDE dataset requires a total of 20 epochs with a batch size of 48. In the experiments using the Open-Images-V4 dataset, our learning rate, batch size, and decay rate remain the same as the NUS-WIDE dataset, but the number of epochs is 7. + +Baselines: We will compare the proposed method with several state-of-the-art deep learning-based MLZSL models. These comparative methods have been published in recent years and cover a fairly rich variety of techniques, such as the attention mechanism with the most common CNNs. These comparison methods include: CONSE [36], LabelEM [2], Fast0Tag [49], Kim et al. [23], LESA Attention per Cluster (ApC) [22], LESA [22], and BiAM [35]. All + +comparison methods using VGG19 [37] are not fine-tuned. In addition to comparing with comparison models, we will also test the model's performance under different settings of hyper-parameters $g$ and $\lambda$ . At the same time, we will conduct ablation experiments to verify the integrity of the model's architecture. + +# 4.2. State-of-the-art Comparison + +NUS-WIDE: Table 1 shows the performance of ours and competitive methods on the NUS-WIDE test-set. The table contains the results of both ZSL and GZSL. CONSE [36] and LabelEM [2], as the methods proposed earlier, do not perform well on large-scale datasets. Fast0Tag [49] achieves more competitive results by sorting the positive labels to find the principal directions of the image. LESA [22] and BiAM [35] are currently the most advanced models that rely on spatial attention mechanism to generate semantic information. Compared to BiAM, our method achieves a $3.6\%$ improvement on mAP in the ZSL task. Besides, we lead BiAM by $0.8\%$ and $2.9\%$ in F1-Score of $K = 3$ and $K = 5$ , respectively. On the GZSL task, we also surpass BiAM. BiAM deals with higher-dimensional and richer spatial information, while our method is more inclined to single-dimensional channel responses. Therefore, it is not easy to achieve such results with $1.3\%$ improvement in mAP and $0.3\%$ and $0.7\%$ in F1-Score of $K = 3$ and $K = 5$ , respectively. Good results on NUS-WIDE dataset imply the effectiveness of our method. + +Attention Visualization on NUS-WIDE: Figure 6 illustrate the attention regions of the model when our method predicts unseen labels. Figure 6(a) shows that our model can clearly distinguish scene information from all unseen classes. The attention areas of "Rocks" and "Mountain" in the figure are roughly the same, which indicates that the two classes have similar semantics and dependencies, and the existence of Group Attention enables the model to learn this mutual information well. Figure 6(b) is a comparison with BiAM [35], the best existing model for mining spatial domain information. This result fully shows the effective use of channel information can more accurately grasp the response between classes. While BiAM's over-exploration of spatial information improves the acquisition of regional information, it loses the scene-level response at the same time. For more comparison results, please refer to appendix. + +Open-Images-V4: From Table 2, we show the results of ours and the baseline models on Open-Images-V4. We follow the evaluation setting of [22, 35]. This dataset contains more seen and unseen labels than NUS-WIDE. With a large increase in the number of classes, all methods get poor F1-Score on the ZSL task. Among them, Fast0Tag has made great progress compared with past methods, especially in the GZSL task. LESA [22] and BiAM [35], as the two best methods, represent the highest level of extracting spatial re + +Table 1. State-of-the-art comparison for multi-label ZSL and GZSL tasks on the NUS-WIDE dataset. We show the indicators of F1-Score in the case of $K \in 3,5$ and mAP. The best results are shown in bold. + +
MethodTaskmAPF1 (K = 3)F1 (K = 5)
CONSE [36]ZSL9.421.620.2
GZSL2.17.08.1
LabelEM [2]ZSL7.119.219.5
GZSL2.29.511.3
Fast0Tag [49]ZSL15.127.826.4
GZSL3.711.513.5
Kim et al. [23]ZSL10.425.823.6
GZSL3.710.913.2
Attention per Cluster [22]ZSL12.924.622.9
GZSL2.66.47.7
LESA [22]ZSL19.431.628.7
GZSL5.614.416.8
BiAM [35]ZSL25.832.029.4
GZSL8.915.518.5
Our ApproachZSL29.432.832.3
GZSL10.215.819.2
+ +Table 2. State-of-the-art comparison for multi-label ZSL and GZSL tasks on the Open-Images-V4 dataset. We show the indicators of F1-Score in the case of $K \in {10},{20}$ and mAP. Best results are shown in bold. + +
MethodTaskmAPF1 (K = 10)F1 (K = 20)
CONSE [36]ZSL40.40.40.3
GZSL43.52.62.4
LabelEM [2]ZSL40.50.50.4
GZSL45.25.25.1
Fast0Tag [49]ZSL41.20.70.6
GZSL45.216.013.0
Attention per Cluster [22]ZSL40.71.20.9
GZSL44.916.913.5
LESA [22]ZSL41.71.41.0
GZSL45.417.414.3
BiAM [35]ZSL62.84.13.7
GZSL79.617.615.1
Our ApproachZSL65.77.56.5
GZSL79.927.624.1
+ +sponses. BiAM achieves very large progress in mAP metrics on both ZSL and GZSL tasks. But our method achieves the best results in the mAP of ZSL, while leading by $3.4\%$ and $2.8\%$ in F1-Score with $K = 3$ and $K = 5$ , respectively. Most importantly, for the GZSL task, our F1-Score results also achieve huge advantages by $10.0\%$ and $9.0\%$ . This shows that the channel-class correlation as semantic information can fully cope with the complex situation of a large number of labels. + +Figure 5 shows the mAP, inference time, and GFLOPs comparisons between our model for obtaining semantic information based on channel responses and the two methods (LESA [22] and BiAM [35]) for acquiring semantic informa + +tion based on spatial features and achieving optimal results. In the mAP comparison, it can be seen that we have the highest accuracy for prediction in the ZSL task. At the same time, due to the small amount of data to be processed, the inference speed is the fastest of all comparison methods when we use the same GPU of NVIDIA RTX 3090. Finally, precisely because the model only needs to deal with a single-dimensional channel response, our $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder module requires much less computation than $LESA$ and $BiAM$ that deal with spatial attention. At the same time, the feature map is grouped to avoid the geometric increase of the computational complexity caused by the feature pyramid. This shows that our $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder can be more efficient. + +Table 3. Ablation study shows the contribution of the different components in our proposed approach. The baseline methods are performed on the NUS-WIDE test-set. + +
abcdours
Forward Pyramid (ML)2P-Encoder Group Attention
mAPZSL25.327.328.427.929.4
GZSL8.18.59.28.810.2
+ +![](images/8458772c1181ee73b3a85ca3b2e670d4a44bffd3c2387a13e1980532fbf7d75e.jpg) +(a) W/O (ML) $^2$ P-Encoder + +![](images/7fcdb68df0c0c5df9c57d97fe0df8368ca7255d03e4d7cbd853a81342d753669.jpg) +(b) With $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder +Figure 3. Evaluation of t-SNE (zoom in for a better view). + +# 4.3. Hyper-parameter Selection + +Our method includes two hyper-parameters, the number of groups $g$ and the weight of the regularization term $\lambda$ . We use the control variable method. In terms of initializing hyper-parameters, the number of output semantic vectors $g$ is set to 7, and the value of $\lambda$ is set to 0.4. The line graph in Figure 4 shows the mAP results achieved on the ZSL and GZSL tasks with different hyper-parameters, respectively. In addition, we can also see the impact of changes in hyperparameters on the prediction accuracy of the model. It can be seen that the number of $g$ does not have a very significant effect on the mAP of the ZSL task. But the impact on GZSL is more obvious. After comparison, we believe that when $g = 7$ , two different tasks can be well balanced. For the choice of the value of $\lambda$ , we found that its change will have a greater impact on mAP. But only when $\lambda = 0.4$ , the performance of GZSL is far better than other results, and + +ZSL also achieves the optimal result. So the optimal hyperparameters we choose $g = 7$ and $\lambda = 0.4$ . + +![](images/a1657c3c9aeb342e41cd7a891fefffd25bd38870895024e77f74a63090faedec.jpg) +(a) $g$ + +![](images/bea5c061b33965db1460c351ea8d6dc9e5548fde810a154ead8e86167d85a405.jpg) +(b) Weights + +![](images/2e12f6e418094ab26027de6309daa2eb11ad2e6a6744aca0b614f345deb31613.jpg) +Figure 4. Hyper-Parameter selection. The higher the mAP the better. All the experiments are performed on the NUS-WIDE test-set. +(a) mAP + +![](images/be864e674cccd18cea12bbdcc3f4e639ca3700f6bc6523390a63dd95126fd3e0.jpg) +(b) Inference time (ms) +Figure 5. Comparison of our $(\mathbf{ML})^2\mathbf{P}$ -Encoder with BiAM and LESA in mAP, inference time, and FLOPs. The higher the mAP the better, the lower the Inference time and GFLOPs the better. All methods are performed on the NUS-WIDE test-set. + +![](images/2455e527a5bdeb3bb22c7b815463f0c08e291f6c7add9f25ddff4192a6829139.jpg) +(c) GFLOPs + +# 4.4. Ablation Study + +Ablation Study: To illustrate the effectiveness of each module designed in our method, we arrange three comparative experiments. The specific results are shown in Table 3. As the most primitive structure, model 'a' only contains shuffle and grouping operations. But after adding the 'Forward Pyramid', the model expands the number of features. As the number of optional feature channels increases, the amount of information brought by the channel also increases, thus achieving more competitive results. The addition of $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder enables the model to process the channel response of specific classes. The supplement of Group Attention is to give the model-specific information for solving multi-label tasks, that is, inter-class correlation. The combination of $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder and Group Attention greatly improves the prediction ability of the model in ZSL and GZSL tasks, indicating that our model construction has achieved great success. + +t-SNE: Figure 3 shows the performance of $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder in t-SNE visualization. It can be seen that after using $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder, the boundaries of inter-class become much clearer, proving the correctness of our exploration for class-specific channel responses. + +Different Backbones: Table 4 shows the results produced by our method using different backbones. It can be seen from the results that ResNet [20] has obvious advantages + +over VGG [37]. As the ResNet network deepens and the number of parameters increases, the results obtained by our model become better. This is exactly in line with the result variation of an end-to-end model. + +Table 4. Our $\mathbf{C}^3$ -MLZSL approach with different backbones for multi-label ZSL and GZSL tasks on the NUS-WIDE dataset. We show the indicators of F1-Score in the case of $K \in 3, 5$ and mAP. The best results are shown in bold. + +
BackbonesTaskmAPF1 (K = 3)F1 (K = 5)
VGG19 [37]ZSL29.432.832.3
GZSL10.215.819.2
ResNet50 [20]ZSL30.933.633.2
GZSL10.715.919.4
ResNet101 [20]ZSL31.233.933.9
GZSL10.916.119.5
+ +![](images/35da844c0da731cac057f029763ba3bb2787c08a55a9372c0d286eebe3ef9a00.jpg) +Figure 6. Attention visualization. where (a) is the attention response of our $\mathbf{C}^3$ -MLZSL when faced with unseen labels. (b) is the comparison of attention visualization results of our $\mathbf{C}^3$ -MLZSL and BiAM [35] models. See appendix for more results. + +# 4.5. Multi-Label Learning + +Table 5 shows the results of the model for multi-label image classification. The baselines we compare include not only state-of-the-art MLZSL models, but also multi-label image classification models including Logistic Regression [40], WSABIE [43], WARP [17] and CNN-RNN [42]. As can be seen from the results, our model far surpasses many multi-label image classification models and the classic Fast0Tag [49] algorithm in mAP performance. This is because the above models only process the input image into a single semantic vector, and limited image embedding cannot build the semantic diversity for multi-label classification. For other methods such as LESA [22] and BiAM [35], they noticed that the attention regions of different objects in multi-label images are different, and thus define the label + +Table 5. Performance of Multi-label image classification task on NUS-WIDE datasets. The best results are in bold. + +
MethodF1(K=3)(↑)F1(K=5)(↑)mAP(↑)
Logistic [40]51.146.121.6
WARP [17]54.449.43.1
WSABIE [43]53.849.23.1
Fast0Tag [49]53.848.622.4
CNN-RNN [42]55.250.828.3
Kim et al. [23]56.851.332.6
LESA ApC [22]56.650.731.7
LESA [22]58.052.031.5
BiAM [35]59.653.447.8
Ours59.853.848.0
+ +related embeddings from the perspective of the spatial domain. However, after feature extraction, our model takes into account that the channel response can be important information representing the class semantics, and this superior performance just verifies the rationality of the exploration. + +# 5. Conclusion + +In this paper, we focus on the neglect of channel-wise class information and over-reliance on spatial-wise class information in previous MLZSL models, then propose C3-MLZSL structure and the $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder component. The C3-MLZSL structure first group multi-scale features, then use the $(\mathrm{ML})^{2}\mathrm{P}$ -Encoder to calculate the correlation of channels within each group and perform information fusion to get the semantic vectors. These semantic vectors are then aggregated through group attention to learn mutual information between groups. Finally, the model successfully learns channel-class correlation. Extensive experiments on the large-scale NUS-WIDE and Open-Images-V4 datasets show that our model has achieved very competitive results on MLZSL compared with other state-of-the-art models. + +# 6. Acknowledgment + +This research was supported by fundings from the Key-Area Research and Development Program of Guangdong Province (No. 2021B0101400003), Hong Kong RGC Research Impact Fund (No. R5060-19), Areas of Excellence Scheme (AoE/E-601/22-R), General Research Fund (No. 152203/20E, 152244/21E, 152169/22E, 152211/23E), Shenzhen Science and Technology Innovation Commission (JCYJ20200109142008673), the National Natural Science Foundation of China (No. 62102327), and PolyU Internal Fund (No. P0043932). + +# References + +[1] Zeynep Akata, Mateusz Malinowski, Mario Fritz, and Bernt Schiele. Multi-cue zero-shot learning with strong supervision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 59-68, 2016. 2 +[2] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. IEEE transactions on pattern analysis and machine intelligence, 38(7):1425–1438, 2015. 5, 6 +[3] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. IEEE transactions on pattern analysis and machine intelligence, 38(7):1425–1438, 2016. 3 +[4] Maxime Bucher, Stéphane Herbin, and Frédéric Jurie. Improving semantic embedding consistency by metric learning for zero-shot classification. In European Conference on Computer Vision, pages 730-746. Springer, 2016. 3 +[5] Chris Burges, Tal Shaked, Erin Renshaw, Ari Lazier, Matt Deeds, Nicole Hamilton, and Greg Hullender. Learning to rank using gradient descent. In Proceedings of the 22nd international conference on Machine learning, pages 89-96, 2005. 5 +[6] Soravit Changpinyo, Wei-Lun Chao, Boqing Gong, and Fei Sha. Synthesized classifiers for zero-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5327-5336, 2016. 3 +[7] Zhi Chen, Yadan Luo, Sen Wang, Ruihong Qiu, Jingjing Li, and Zi Huang. Mitigating generation shifts for generalized zero-shot learning. In Proceedings of the 29th ACM International Conference on Multimedia, pages 844-852, 2021. 3 +[8] Zhao-Min Chen, Xiu-Shen Wei, Peng Wang, and Yanwen Guo. Multi-label image recognition with graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5177-5186, 2019. 2 +[9] Xing Cheng, Hezheng Lin, Xiangyu Wu, Fan Yang, Dong Shen, Zhongyuan Wang, Nian Shi, and Honglin Liu. Mltr: Multi-label classification with transformer. arXiv preprint arXiv:2106.06195, 2021. 1 +[10] Tat-Seng Chua, Jinhui Tang, Richang Hong, Haojie Li, Zhiping Luo, and Yantao Zheng. Nus-wide: a real-world web image database from national university ofSingapore. In Proceedings of the ACM international conference on image and video retrieval, pages 1-9, 2009. 5 +[11] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 5 +[12] Shay Deutsch, Soheil Kolouri, Kyungnam Kim, Yuri Owechko, and Stefano Soatto. Zero shot learning via multi-scale manifold regularization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7112-7119, 2017. 2 +[13] Thibaut Durand, Nazanin Mehrasa, and Greg Mori. Learning a deep convnet for multi-label classification with partial + +labels. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 647-657, 2019. 1, 2 +[14] Lei Feng, Bo An, and Shuo He. Collaboration based multi-label learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 3550-3557, 2019. 1, 2 +[15] Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. Advances in neural information processing systems, 26, 2013. 2, 3 +[16] Bin-Bin Gao and Hong-Yu Zhou. Learning to discover multi-class attentional regions for multi-label image recognition. IEEE Transactions on Image Processing, 30:5920-5932, 2021. 1, 2 +[17] Yunchao Gong, Yangqing Jia, Thomas Leung, Alexander Toshev, and Sergey Ioffe. Deep convolutional ranking for multilabel image annotation. arXiv preprint arXiv:1312.4894, 2013. 1, 2, 8 +[18] Omkar Gune, Biplab Banerjee, Subhasis Chaudhuri, and Fabio Cuzzolin. Generalized zero-shot learning using generated proxy unseen samples and entropy separation. In Proceedings of the 28th ACM International Conference on Multimedia, pages 4262-4270, 2020. 3 +[19] Jingcai Guo and Song Guo. A novel perspective to zero-shot learning: Towards an alignment of manifold structures via semantic feature expansion. IEEE Transactions on Multimedia, 23:524-537, 2020. 2 +[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 1, 7, 8 +[21] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 4 +[22] Dat Huynh and Ehsan Elhamifar. A shared multi-attention framework for multi-label zero-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8776–8786, 2020. 2, 3, 5, 6, 8 +[23] Jin-Hwa Kim, Jaehyun Jun, and Byoung-Tak Zhang. Bilinear attention networks. arXiv preprint arXiv:1805.07932, 2018. 5, 6, 8 +[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5 +[25] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016. 2 +[26] Elyor Kodirov, Tao Xiang, and Shaogang Gong. Semantic autoencoder for zero-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3174-3183, 2017. 2, 3 +[27] Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. Learning to detect unseen object classes by between-class attribute transfer. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 951-958. IEEE, 2009. 3 + +[28] Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. Attribute-based classification for zero-shot visual object categorization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(3):453-465, 2014. 3 +[29] Chung-Wei Lee, Wei Fang, Chih-Kuan Yeh, and Yu-Chiang Frank Wang. Multi-label zero-shot learning with structured knowledge graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1576–1585, 2018. 3 +[30] Jingjing Li, Mengmeng Jing, Lei Zhu, Zhengming Ding, Ke Lu, and Yang Yang. Learning modality-invariant latent representations for generalized zero-shot learning. In Proceedings of the 28th ACM International Conference on Multimedia, pages 1348-1356, 2020. 3 +[31] Yanan Li, Donghui Wang, Huanhang Hu, Yuetan Lin, and Yueting Zhuang. Zero-shot recognition using dual visual-semantic mapping paths. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3279–3287, 2017. 2 +[32] Teng Long, Xing Xu, Youyou Li, Fumin Shen, Jingkuan Song, and Heng Tao Shen. Pseudo transfer with marginalized corrupted attribute for zero-shot learning. In Proceedings of the 26th ACM international conference on Multimedia, pages 1802-1810, 2018. 3 +[33] Thomas Mensink, Efstratios Gavves, and Cees GM Snoek. Costa: Co-occurrence statistics for zero-shot classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2441-2448, 2014. 2 +[34] Pedro Morgado and Nuno Vasconcelos. Semantically consistent regularization for zero-shot recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6060-6069, 2017. 2 +[35] Sanath Narayan, Akshita Gupta, Salman Khan, Fahad Shahbaz Khan, Ling Shao, and Mubarak Shah. Discriminative region-based multi-label zero-shot learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8731-8740, 2021. 2, 3, 5, 6, 8 +[36] Mohammad Norouzi, Tomas Mikolov, Samy Bengio, Yoram Singer, Jonathon Shlens, Andrea Frome, Greg S Corrado, and Jeffrey Dean. Zero-shot learning by convex combination of semantic embeddings. In 2nd International Conference on Learning Representations, ICLR 2014, 2014. 2, 3, 5, 6 +[37] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 1, 4, 5, 6, 8 +[38] Richard Socher, Milind Ganjoo, Christopher D Manning, and Andrew Ng. Zero-shot learning through cross-modal transfer. In Advances in neural information processing systems, pages 935-943, 2013. 3 +[39] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich, et al. Going deeper with convolutions. Cvpr, 2015. 1 +[40] Grigorios Tsoumakas and Ioannis Katakis. Multi-label classification: An overview. International Journal of Data Warehousing and Mining (IJDWM), 3(3):1-13, 2007. 8 + +[41] Andreas Veit, Neil Alldrin, Gal Chechik, Ivan Krasin, Abhinav Gupta, and Serge Belongie. Learning from noisy large-scale datasets with minimal supervision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 839-847, 2017. 5 +[42] Jiang Wang, Yi Yang, Junhua Mao, Zhiheng Huang, Chang Huang, and Wei Xu. Cnn-rnn: A unified framework for multi-label image classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2285-2294, 2016. 1, 2, 8 +[43] Jason Weston, Samy Bengio, and Nicolas Usunier. Wsabie: Scaling up to large vocabulary image annotation. In Twenty-Second International Joint Conference on Artificial Intelligence, 2011. 2, 8 +[44] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 69-77, 2016. 3 +[45] Yongqin Xian, Bernt Schiele, and Zeynep Akata. Zero-shot learning-the good, the bad and the ugly. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4582-4591, 2017. 2, 3 +[46] Hsiang-Fu Yu, Prateek Jain, Purushottam Kar, and Inderjit Dhillon. Large-scale multi-label learning with missing labels. In International conference on machine learning, pages 593-601. PMLR, 2014. 1, 2 +[47] Chenrui Zhang, Xiaoqing Lyu, and Zhi Tang. Tgg: Transferable graph generation for zero-shot and few-shot learning. In Proceedings of the 27th ACM International Conference on Multimedia, pages 1641-1649, 2019. 3 +[48] Li Zhang, Tao Xiang, and Shaogang Gong. Learning a deep embedding model for zero-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2021-2030, 2017. 2 +[49] Yang Zhang, Boqing Gong, and Mubarak Shah. Fast zero-shot image tagging. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5985-5994. IEEE, 2016. 2, 3, 5, 6, 8 +[50] Ziming Zhang and Venkatesh Saligrama. Zero-shot learning via joint latent similarity embedding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6034-6042, 2016. 3 +[51] Yizhe Zhu, Mohamed Elhoseiny, Bingchen Liu, Xi Peng, and Ahmed Elgammal. A generative adversarial approach for zero-shot learning from noisy texts. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 3 \ No newline at end of file diff --git a/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/images.zip b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d8224878089008c30f03969001231062a8fd223e --- /dev/null +++ b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d974b5e1f9203817534a70933e996fd4116431aefa4823c0205bb99b6c788cf7 +size 461493 diff --git a/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/layout.json b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b1ef8c9fc32e685640db0a3bdaa9ae101aeebf11 --- /dev/null +++ b/2023/(ML)$^2$P-Encoder_ On Exploration of Channel-Class Correlation for Multi-Label Zero-Shot Learning/layout.json @@ -0,0 +1,9718 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 54, + 102, + 539, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 102, + 539, + 140 + ], + "spans": [ + { + "bbox": [ + 54, + 102, + 539, + 140 + ], + "type": "inline_equation", + "content": "(\\mathbf{ML})^{2}\\mathbf{P}" + }, + { + "bbox": [ + 54, + 102, + 539, + 140 + ], + "type": "text", + "content": "-Encoder: On Exploration of Channel-class Correlation for Multi-label Zero-shot Learning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 160, + 536, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 160, + 536, + 204 + ], + "spans": [ + { + "bbox": [ + 56, + 160, + 536, + 204 + ], + "type": "text", + "content": "Ziming Liu1, Song Guo1,2, Xiaocheng Lu1, Jingcai Guo1,2*, Jiewei Zhang1, Yue Zeng1, Fushuo Huo1 \n1Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China \n2The Hong Kong Polytechnic University Shenzhen Research Institute, Shenzhen, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 129, + 205, + 465, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 205, + 465, + 232 + ], + "spans": [ + { + "bbox": [ + 129, + 205, + 465, + 232 + ], + "type": "text", + "content": "{ziming.liu, jiewei.zhang, fushuo.huo}@connect.polyu.hk {song.quo, xiaoclu, jc-jingcai.quo, zengyue.zeng}@polyu.edu.hk" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 258, + 192, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 258, + 192, + 272 + ], + "spans": [ + { + "bbox": [ + 143, + 258, + 192, + 272 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "spans": [ + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "text", + "content": "Recent studies usually approach multi-label zero-shot learning (MLZSL) with visual-semantic mapping on spatial-class correlation, which can be computationally costly, and worse still, fails to capture fine-grained class-specific semantics. We observe that different channels may usually have different sensitivities on classes, which can correspond to specific semantics. Such an intrinsic channel-class correlation suggests a potential alternative for the more accurate and class-harmonious feature representations. In this paper, our interest is to fully explore the power of channel-class correlation as the unique base for MLZSL. Specifically, we propose a light yet efficient Multi-Label MultiLayer Perceptron-based Encoder, dubbed " + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "inline_equation", + "content": "(ML)^{2}P" + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "text", + "content": "-Encoder, to extract and preserve channel-wise semantics. We reorganize the generated feature maps into several groups, of which each of them can be trained independently with " + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "inline_equation", + "content": "(ML)^{2}P" + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "text", + "content": "-Encoder. On top of that, a global group-wise attention module is further designed to build the multilabel specific class relationships among different classes, which eventually fulfills a novel Channel-Class Correlation MLZSL framework " + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "inline_equation", + "content": "(C^{3}" + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "text", + "content": "-MLZSL). Extensive experiments on large-scale MLZSL benchmarks including NUS-WIDE and Open-Images-V4 demonstrate the superiority of our model against other representative state-of-the-art models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 594, + 128, + 607 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 594, + 128, + 607 + ], + "spans": [ + { + "bbox": [ + 47, + 594, + 128, + 607 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 613, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 613, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 613, + 287, + 687 + ], + "type": "text", + "content": "The proliferation of smart devices has greatly enriched human life when it comes to the era of big data. These smart devices are usually equipped with cameras such that users can easily produce and share their images. With the increasing abundance of public images, how to analyze them accurately has become a challenging problem. Recent years" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 326, + 261, + 522, + 430 + ], + "blocks": [ + { + "bbox": [ + 326, + 261, + 522, + 430 + ], + "lines": [ + { + "bbox": [ + 326, + 261, + 522, + 430 + ], + "spans": [ + { + "bbox": [ + 326, + 261, + 522, + 430 + ], + "type": "image", + "image_path": "012ebce2bca2908a1ed88b5b724c2627dd75ef1002f2138229d4a579b04f8c43.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 442, + 547, + 498 + ], + "lines": [ + { + "bbox": [ + 305, + 442, + 547, + 498 + ], + "spans": [ + { + "bbox": [ + 305, + 442, + 547, + 498 + ], + "type": "text", + "content": "Figure 1. Example of Channel-Class Correlation. Our method achieves the prediction of unseen classes by exploiting the unique distribution of channel responses as semantic information for the class and building correlations with responses from the same channel (zoom in for a better view)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 511, + 547, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 511, + 547, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 511, + 547, + 559 + ], + "type": "text", + "content": "have witnessed great success in classifying an image into a specific class [20, 37, 39], namely, single-label classification. However, in reality, the images [17,46] usually contain abundant information and thereby consist of multiple labels." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 560, + 547, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 560, + 547, + 693 + ], + "spans": [ + { + "bbox": [ + 304, + 560, + 547, + 693 + ], + "type": "text", + "content": "In recent years, the multi-label classification has been widely investigated by exploring the relationship among different labels from multiple aspects [9, 13, 14, 16, 42]. However, in some scenarios where extensive collections of images exist, e.g., Flickr" + }, + { + "bbox": [ + 304, + 560, + 547, + 693 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 304, + 560, + 547, + 693 + ], + "type": "text", + "content": ", users can freely set one or more individual tags/labels for each image, while the presented objects and labels in these images may not be fully shown in any previous collection, and thus result in a domain gap for the recognition. Therefore, in real-world applications, the model is required to gain the ability to predict unseen classes as well. As one of the thriving research topics, zero-" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 693, + 277, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 693, + 277, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 693, + 277, + 713 + ], + "type": "text", + "content": "*Jingcai Guo is the corresponding author. \n1Released code: github.com/simonzmliu/cvpr23_mlzsl" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 702, + 398, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 702, + 398, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 702, + 398, + 713 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 317, + 702, + 398, + 713 + ], + "type": "text", + "content": "https://www.flickr.com" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "23859" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": "shot learning (ZSL) [1, 12, 15, 34] is designed to transfer tasks from seen classes to unseen classes, and naturally recognizes novel objects of unseen classes. Specifically, ZSL has made continuous success in single-label classification [19, 26, 31, 45, 48]. However, these methods can hardly be extended to the multi-label scenario since exploring the cross-class relationships in an image is non-trivial." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "text", + "content": "Recently, some works have focused on multi-label zero-shot learning (MLZSL) tasks and obtained some promising results [33, 36, 49]. Other works considered incorporating attention mechanisms into their models, such as " + }, + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "inline_equation", + "content": "LESA" + }, + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "text", + "content": " [22] and " + }, + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "inline_equation", + "content": "BiAM" + }, + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "text", + "content": " [35]. " + }, + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "inline_equation", + "content": "LESA" + }, + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "text", + "content": " [22] designed an attention-sharing mechanism for different patches in the image so that each patch can output the corresponding class. In another way, " + }, + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "inline_equation", + "content": "BiAM" + }, + { + "bbox": [ + 46, + 158, + 287, + 289 + ], + "type": "text", + "content": " [35] designed a bi-level attention to extract relations from regional context and scene context, which can enrich the regional features of the model and separate the features of different classes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 293, + 288, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 293, + 288, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 293, + 288, + 555 + ], + "type": "text", + "content": "Although previous works have made considerable progress, their designed methods have been limited to the processing of spatial-domain information. First of all, the over-reliance on spatial-class correlation fails to capture fine-grained class-specific semantics. In addition, the additional processing of spatial information greatly increases the computational cost of the model and limits the inference speed. Given the shortcomings of the above methods, we found through analysis that the channel response can be used as the semantic information of the class. Firstly, the response of each class in the channel is unique, which creates conditions for obtaining the unique semantics. Secondly, for classes with certain semantic associations, there must be some channels that capture their common information. Therefore, channel information, as an easily overlooked part after feature extraction, can complete the task of capturing multi-label information. In MLZSL, we can complete the prediction of unseen classes by obtaining the responses of seen classes in the channel domain, and the relationship between seen and unseen classes. Finally, the subsequent analysis of the channel response greatly saves computational costs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 558, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 287, + 714 + ], + "type": "text", + "content": "Specifically, as shown in Figure 1, as seen classes, \"water\" and \"tree\" have unique response distributions on feature channels, and these responses can be used as semantic information for classification tasks. Besides, in order to explore the correlation of classes, we found that although the semantic information of \"water\" and \"tree\" is different, there are still some channels that respond simultaneously (i.e. the blue channel). We need to build this correlation during the training process through modeling so that the model can learn multi-label correlations. In the ZSL process, for the unseen class \"garden\", we know that it is related to \"water\" (i.e. purple layer) and \"tree\" (i.e. green, orange, and gray layer) by obtaining its semantic information and matching" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "with seen classes. This observation suggests that channels can help not only to classify objects but also to establish associations between classes. Previous methods which only consider spatial information are unable to obtain this intrinsic channel-class correlation and dissimilarity, thus achieving sub-optimal performance on the MLZSL task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 144, + 546, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 144, + 546, + 299 + ], + "spans": [ + { + "bbox": [ + 304, + 144, + 546, + 299 + ], + "type": "text", + "content": "To address the above challenges and construct a more accurate and robust MLZSL system, we propose to group the generated feature maps and process them in a group-wise manner, thus enhancing the model by fully exploring the channel-class correlations. Besides, by properly designing a light yet efficient Multi-Label Multi-Layer Perceptron-based Encoder, i.e., " + }, + { + "bbox": [ + 304, + 144, + 546, + 299 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 304, + 144, + 546, + 299 + ], + "type": "text", + "content": "-Encoder, we can easily analyze the local relationship between channels while significantly reducing the computation overhead. Finally, these groups are recombined and then perform the calculation of group attention, indicating that the model is analyzed locally and globally from the perspective of the channels, which can ensure the integrity of the representation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 318, + 300, + 497, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 300, + 497, + 312 + ], + "spans": [ + { + "bbox": [ + 318, + 300, + 497, + 312 + ], + "type": "text", + "content": "In summary, our contributions are four-fold:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 319, + 545, + 533 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 313, + 319, + 545, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 545, + 379 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 545, + 379 + ], + "type": "text", + "content": "1. To the best of our knowledge, our method first suggests the concept of channel-class correlation in MLZSL, and proposes a channel-sensitive attention module " + }, + { + "bbox": [ + 313, + 319, + 545, + 379 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 313, + 319, + 545, + 379 + ], + "type": "text", + "content": "-Encoder to extract and preserve channel-wise semantics for channel groups." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 386, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 386, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 313, + 386, + 545, + 434 + ], + "type": "text", + "content": "2. Different from previous works that use spatial-class correlation to extract global and local features, we alternatively explore the channel-class correlation as the unique base for MLZSL." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 441, + 545, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 441, + 545, + 478 + ], + "spans": [ + { + "bbox": [ + 313, + 441, + 545, + 478 + ], + "type": "text", + "content": "3. In conjunction with " + }, + { + "bbox": [ + 313, + 441, + 545, + 478 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 313, + 441, + 545, + 478 + ], + "type": "text", + "content": "-Encoder, a global group-wise attention is also designed to establish the multi-label specific class relationships among classes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 485, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 485, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 313, + 485, + 545, + 533 + ], + "type": "text", + "content": "4. Extensive experiments on large-scale datasets NUS-WIDE and Open-Images-V4 demonstrate the effectiveness of our method against other state-of-the-art models." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 544, + 392, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 544, + 392, + 556 + ], + "spans": [ + { + "bbox": [ + 306, + 544, + 392, + 556 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 563, + 450, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 563, + 450, + 575 + ], + "spans": [ + { + "bbox": [ + 306, + 563, + 450, + 575 + ], + "type": "text", + "content": "2.1. Multi-Label Classification" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 713 + ], + "type": "text", + "content": "The establishment of graph neural networks (GNNs) brings remarkable success to multi-label classification tasks [8, 25]. Among them, Chen et al. [8] constructs directed graphs for object labels and uses graph convolutional networks (GCN) to map label nodes, which contain word embeddings, into classifiers. In addition, the CNN-based multi-label classification models enable the learning of the characteristics of each label from the spatial information of the image and design a new multi-label classifier [13, 14, 16, 17, 42, 43, 46]. Gao et al. [16] suggests a two-stream framework to identify global and local information" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "23860" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 72, + 537, + 228 + ], + "blocks": [ + { + "bbox": [ + 57, + 72, + 537, + 228 + ], + "lines": [ + { + "bbox": [ + 57, + 72, + 537, + 228 + ], + "spans": [ + { + "bbox": [ + 57, + 72, + 537, + 228 + ], + "type": "image", + "image_path": "5c95f6e5e4e83008579a6320d2bbd777ec3c50f5b321ab9aef414704b93cb307.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 238, + 548, + 284 + ], + "lines": [ + { + "bbox": [ + 46, + 238, + 548, + 284 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 548, + 284 + ], + "type": "text", + "content": "Figure 2. Pipeline for " + }, + { + "bbox": [ + 46, + 238, + 548, + 284 + ], + "type": "inline_equation", + "content": "\\mathbf{C}^3" + }, + { + "bbox": [ + 46, + 238, + 548, + 284 + ], + "type": "text", + "content": "-MLZSL. The input image is first passed through the feature extraction network (eg. VGG19), and then multi-layer feature maps are extracted through the Forward Pyramid module. After the feature maps are shuffled and grouped, each group uses " + }, + { + "bbox": [ + 46, + 238, + 548, + 284 + ], + "type": "inline_equation", + "content": "(\\mathbf{ML})^{2}\\mathbf{P}" + }, + { + "bbox": [ + 46, + 238, + 548, + 284 + ], + "type": "text", + "content": "-Encoder to extract semantic information. Then, the semantic information generated by all groups is associated through Group Attention to generate the final semantic matrix " + }, + { + "bbox": [ + 46, + 238, + 548, + 284 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 46, + 238, + 548, + 284 + ], + "type": "text", + "content": " (zoom in for a better view)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 299, + 288, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 299, + 288, + 334 + ], + "spans": [ + { + "bbox": [ + 46, + 299, + 288, + 334 + ], + "type": "text", + "content": "separately and a multi-class regional attention module to align them. However, the above methods cannot generalize to unseen classes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 343, + 164, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 343, + 164, + 357 + ], + "spans": [ + { + "bbox": [ + 47, + 343, + 164, + 357 + ], + "type": "text", + "content": "2.2. Zero-Shot Learning" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 362, + 290, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 362, + 290, + 590 + ], + "spans": [ + { + "bbox": [ + 46, + 362, + 290, + 590 + ], + "type": "text", + "content": "Zero-shot learning provides a solution to recognize unseen classes. Current studies mostly consider a relatively simple single-label scenario [4, 6, 26, 30, 32, 47, 50, 51]. In practice, existing methods usually focus on finding the main semantic information of training images, and then exploit the semantic relationship, i.e., word vectors [15, 38, 44, 45] or attribute vectors [3, 27, 28], between seen and unseen classes for prediction. The generated semantic information can be inferred from seen to unseen labels by comparing the similarity of the relation vectors between them. For example, Chen et al. [7] proposes a generative flow framework and uses a combinatorial strategy to solve the problems of semantic inconsistency, variance collapse, and structural disorder in zero-shot learning. Gune et al. [18] generates visual proxy samples to simulate the average entropy of the label distribution of the unseen class. However, the above methods only predict single labels with a single representation of images, which can hardly generalize to a more realistic multi-label scenario." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 598, + 223, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 598, + 223, + 612 + ], + "spans": [ + { + "bbox": [ + 47, + 598, + 223, + 612 + ], + "type": "text", + "content": "2.3. Multi-Label Zero-Shot Learning" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 618, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 288, + 713 + ], + "type": "text", + "content": "Multi-label zero-shot learning has received increasing attention recently. For example, Norouzi et al. [36] designs two separate spaces, i.e., the image and semantic embedding spaces, jointly with the convex combination of the label embedding vectors to achieve multi-label recognition in the zero-shot learning framework. Zhang et al. [49] proposes a fast and general model based on the fact that the word vectors of the relevant labels are ranked before" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 299, + 547, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 299, + 547, + 455 + ], + "spans": [ + { + "bbox": [ + 304, + 299, + 547, + 455 + ], + "type": "text", + "content": "the irrelevant word vectors in the main vector of the image. Different from the above methods, Lee et al. [29] uses the knowledge graph to connect different labels. In recent years, attention-based methods become the mainstream. For example, LESA [22] applies an attention-sharing mechanism to the multi-label environment, allowing the model to focus on the key areas of each label. Narayan et al. [35] uses a bi-layer attention module to combine global context information and local features and map the generated information to the semantic space. However, the above methods only stay at the two-dimensional space level " + }, + { + "bbox": [ + 304, + 299, + 547, + 455 + ], + "type": "inline_equation", + "content": "(H\\times W)" + }, + { + "bbox": [ + 304, + 299, + 547, + 455 + ], + "type": "text", + "content": ", and do not consider the response between different feature channels with respect to classes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 468, + 367, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 468, + 367, + 481 + ], + "spans": [ + { + "bbox": [ + 306, + 468, + 367, + 481 + ], + "type": "text", + "content": "3. Methods" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 490, + 405, + 503 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 490, + 405, + 503 + ], + "spans": [ + { + "bbox": [ + 306, + 490, + 405, + 503 + ], + "type": "text", + "content": "3.1. Problem Setting" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": "Before proposing our method, we first explain the definition of the MLZSL problem. Given " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": " input samples " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\{(I_1,Y_1),\\ldots ,(I_i,Y_i),\\ldots ,(I_n,Y_n)\\}" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "I_{i}" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": " represents the input image of the " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": "-th train-set, and " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "Y_{i}" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": " represents the training labels corresponding to the input images, which are also called 'seen labels'. On the label distribution, let us set the seen label in the dataset as " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "C_s" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": ", where the seen label refers to the label known by the model. " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "C_s" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": " is mainly used for the train-set of the model in zero-shot learning. We set the unseen label to " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "C_u" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": ", and the unseen label is generally used in the test-set. The label relationship in the dataset is defined as " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{C} = \\mathcal{C}_s\\cup \\mathcal{C}_u" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": " represents the set of all labels in the dataset. Based on the above definition, after the model is trained on the train-set, in the testing part of MLZSL, given the image " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "I_{u}" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": ", the model can output the prediction result " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "y_{u}\\subset C_{u}" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": ". While in the generalized zero-shot learning task, given an image " + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "inline_equation", + "content": "I_{u}" + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": ", the output of the model is" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "text", + "content": "23861" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "inline_equation", + "content": "y_{u} \\subset \\mathcal{C}" + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": ", which means the model needs to output both the seen label and the unseen label that exist in the image." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 102, + 149, + 115 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 102, + 149, + 115 + ], + "spans": [ + { + "bbox": [ + 47, + 102, + 149, + 115 + ], + "type": "text", + "content": "3.2. " + }, + { + "bbox": [ + 47, + 102, + 149, + 115 + ], + "type": "inline_equation", + "content": "(\\mathbf{ML})^{2}\\mathbf{P}" + }, + { + "bbox": [ + 47, + 102, + 149, + 115 + ], + "type": "text", + "content": " -Encoder" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "spans": [ + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "text", + "content": "The proposed network structure is shown in Figure 2. For input images " + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "text", + "content": ", we first use a pre-trained feature extraction network to obtain the corresponding image features " + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "text", + "content": ". We extract the features from the last three layers of the feature extraction network, and keep the two layers with the larger size consistent with the smallest size layer by downsampling. For example, assuming that the used and training network is VGG19 [37], the size of the last three layers of feature maps is " + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\{28 \\times 28, 14 \\times 14, 7 \\times 7\\}" + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "text", + "content": ". We use max-pooling to down-sample the large-scale feature maps to obtain equivalent " + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "inline_equation", + "content": "7 \\times 7" + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "text", + "content": " feature maps. This step is called the \"Forward Pyramid\". After that, we obtain feature maps at different levels with the same scale. Then we randomly shuffle them to get the feature map " + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_a" + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "text", + "content": " and re-group them into " + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "text", + "content": " different groups, each group has " + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "inline_equation", + "content": "d_w" + }, + { + "bbox": [ + 46, + 121, + 287, + 348 + ], + "type": "text", + "content": " channels, which is the same length as the word vectors in the ground-truth semantic space. The purpose of this operation is to generate specific semantic vectors to express the semantic information contained in each group." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 348, + 287, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 348, + 287, + 455 + ], + "spans": [ + { + "bbox": [ + 46, + 348, + 287, + 455 + ], + "type": "text", + "content": "Next, the features of each group are fed into " + }, + { + "bbox": [ + 46, + 348, + 287, + 455 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 46, + 348, + 287, + 455 + ], + "type": "text", + "content": "-Encoder. First, we need to calculate the correlation between channels within each group. In traditional self-attention, the cost of computation greatly consumes the inference speed of the model, and the traditional self-attention module cannot accurately reflect the relationship between each channel. To solve the loss caused by the amount of calculation and accurately reflect the channel correlation, we designed a new self-attention structure to achieve this." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 455, + 287, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 455, + 287, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 455, + 287, + 492 + ], + "type": "text", + "content": "For features " + }, + { + "bbox": [ + 47, + 455, + 287, + 492 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_a" + }, + { + "bbox": [ + 47, + 455, + 287, + 492 + ], + "type": "text", + "content": " in group " + }, + { + "bbox": [ + 47, + 455, + 287, + 492 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 455, + 287, + 492 + ], + "type": "text", + "content": ", which is " + }, + { + "bbox": [ + 47, + 455, + 287, + 492 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_a^i \\in \\mathbb{R}^{H \\times W \\times d_w}" + }, + { + "bbox": [ + 47, + 455, + 287, + 492 + ], + "type": "text", + "content": ". We first generate Query (Q), Value (V) and Key (K) through three convolution operations:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 500, + 287, + 516 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 500, + 287, + 516 + ], + "spans": [ + { + "bbox": [ + 77, + 500, + 287, + 516 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} = W _ {p} ^ {Q} \\mathcal {F} _ {a} ^ {i} \\quad \\mathbf {K} = W _ {p} ^ {K} \\mathcal {F} _ {a} ^ {i} \\quad \\mathbf {V} = W _ {p} ^ {V} \\mathcal {F} _ {a} ^ {i} \\tag {1}", + "image_path": "b52628feec22a4f5a8837a9b3dc1a5b1ad7b9912de9df259f1c0e897488e43f6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "spans": [ + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "W_{p}^{(\\cdot)}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": " means the convolution operation. Next, to obtain the channel correlation matrix " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": ", we reshape " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\mathbf{Q},\\mathbf{K}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": " in the spatial domain " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "(H\\times W)" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": " to get " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{Q}}\\in \\mathbb{R}^{HW\\times d_w}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{K}}\\in \\mathbb{R}^{d_w\\times HW}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{V}}\\in \\mathbb{R}^{HW\\times d_w}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": ". Then perform a dot product operation on " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": " to obtain the channel correlation matrix " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\mathcal{R}\\in \\mathbb{R}^{d_w\\times d_w}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": ". After that, we do the dot product between " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": ", finally, add with the input " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_a^i" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": " to get the output " + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{F}}_a^i\\in \\mathbb{R}^{H\\times W\\times d_w}" + }, + { + "bbox": [ + 47, + 525, + 287, + 623 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 90, + 630, + 287, + 659 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 630, + 287, + 659 + ], + "spans": [ + { + "bbox": [ + 90, + 630, + 287, + 659 + ], + "type": "interline_equation", + "content": "\\operatorname {A t t} (\\widehat {\\mathbf {Q}}, \\widehat {\\mathbf {K}}, \\widehat {\\mathbf {V}}) = \\widehat {\\mathbf {V}} \\cdot \\underset {\\mathcal {R}} {\\operatorname {s o f t m a x}} (\\underbrace {\\widehat {\\mathbf {K}} \\cdot \\widehat {\\mathbf {Q}}} _ {\\mathcal {R}}) \\tag {2}", + "image_path": "0afc59fe346b0e9e95acde906e2e942becb39b36a7eea85131d24fea0093fd41.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 669, + 287, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 669, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 113, + 669, + 287, + 685 + ], + "type": "interline_equation", + "content": "\\widehat {\\mathcal {F}} _ {a} ^ {i} = \\mathcal {F} _ {a} ^ {i} + \\operatorname {A t t} (\\widehat {\\mathbf {Q}}, \\widehat {\\mathbf {K}}, \\widehat {\\mathbf {V}}) \\tag {3}", + "image_path": "9fe7c0d939e37f8ef54ad047a9d170fb2a46ccb02b070366a929c0417cfeb120.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 712 + ], + "type": "text", + "content": "After enhancing the correlation between channels, we need to extract and analyze the feature information contained in" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 227 + ], + "type": "text", + "content": "each channel. We reshape the information in the spatial domain into a one-dimensional vector, then we decide to use the Multi-Layer Perceptron (MLP) to encode the features. Compared with the traditional convolution structure, the MLP structure is convenient to perform information fusion between local regions. Specifically, for the input feature " + }, + { + "bbox": [ + 304, + 72, + 545, + 227 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{F}}_a^i\\in \\mathbb{R}^{H\\times W\\times d_w}" + }, + { + "bbox": [ + 304, + 72, + 545, + 227 + ], + "type": "text", + "content": ", we first change the dimension from " + }, + { + "bbox": [ + 304, + 72, + 545, + 227 + ], + "type": "inline_equation", + "content": "H\\times W\\times d_w" + }, + { + "bbox": [ + 304, + 72, + 545, + 227 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 72, + 545, + 227 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{mlp}^i\\in \\mathbb{R}^{d_w\\times HW}" + }, + { + "bbox": [ + 304, + 72, + 545, + 227 + ], + "type": "text", + "content": ", then we use LayerNorm to normalize the input. Our MLP structure includes two different MLPs: MLP1 is used to extract the spatial information contained in each channel, and MLP2 is proposed to obtain local information of different channels in the spatial domain:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 321, + 234, + 545, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 234, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 321, + 234, + 545, + 249 + ], + "type": "interline_equation", + "content": "\\mathcal {F} _ {m l p 1} ^ {i} = \\mathcal {F} _ {m l p} ^ {i} + \\mathbf {W} _ {2} \\sigma \\left(\\mathbf {W} _ {1} \\text {L a y e r N o r m} \\left(\\mathcal {F} _ {m l p} ^ {i}\\right)\\right) \\tag {4}", + "image_path": "03090ef5e92e2f52a2ef424517543b4bd30e62109d8f0be2a2561881d414fab8.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 329, + 255, + 545, + 270 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 255, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 329, + 255, + 545, + 270 + ], + "type": "interline_equation", + "content": "\\mathcal {M} = \\mathcal {F} _ {m l p 1} ^ {i} + \\mathbf {W} _ {4} \\sigma \\left(\\mathbf {W} _ {3} \\text {L a y e r N o r m} \\left(\\mathcal {F} _ {m l p 1} ^ {i}\\right)\\right) \\tag {5}", + "image_path": "807ed0612853383d4b4d1317ed01406303874a82726aa727ebc72e32f343cdac.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{mlp1}^i" + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "text", + "content": " is the output after MLP1. " + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_1" + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_2" + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "text", + "content": " is the parameter of MLP1, and " + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_3" + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_4" + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "text", + "content": " is the parameter of MLP2. " + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "text", + "content": " is an element-wise non-linearity GELU [21]. Then we use max-pooling to filter out the best semantic vector in the spatial domain, which can more accurately represent the semantic information of this group. This max-pooling operation is also to be able to directly extract the channel response. So we obtain group semantic vectors " + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{X} \\in \\mathbb{R}^{g \\times d_w}" + }, + { + "bbox": [ + 304, + 274, + 545, + 381 + ], + "type": "text", + "content": " and send them into Group Attention." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 388, + 406, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 388, + 406, + 400 + ], + "spans": [ + { + "bbox": [ + 306, + 388, + 406, + 400 + ], + "type": "text", + "content": "3.3. Group Attention" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 406, + 545, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 406, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 304, + 406, + 545, + 514 + ], + "type": "text", + "content": "Although we obtained group semantic vectors " + }, + { + "bbox": [ + 304, + 406, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 304, + 406, + 545, + 514 + ], + "type": "text", + "content": " through " + }, + { + "bbox": [ + 304, + 406, + 545, + 514 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 304, + 406, + 545, + 514 + ], + "type": "text", + "content": "-Encoder, the semantic vectors generated by each group did not establish a relationship with each other at this time. As we already know, the key to improving the accuracy of multi-label image classification is to construct the correlation of labels within the image. So we use Group Attention to build the mutual information and also to find similar responses between different labels. We pass a series of linear layers to " + }, + { + "bbox": [ + 304, + 406, + 545, + 514 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 304, + 406, + 545, + 514 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 365, + 519, + 545, + 534 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 519, + 545, + 534 + ], + "spans": [ + { + "bbox": [ + 365, + 519, + 545, + 534 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} _ {\\mathbf {x}} = W _ {x} ^ {Q} \\mathcal {X} \\quad \\mathbf {K} _ {\\mathbf {x}} = W _ {x} ^ {K} \\mathcal {X} \\tag {6}", + "image_path": "efbe5257ebc582fa241ad5617c922035835ee8bd54604700bbc72bedacf0b5b9.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 384, + 540, + 545, + 553 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 540, + 545, + 553 + ], + "spans": [ + { + "bbox": [ + 384, + 540, + 545, + 553 + ], + "type": "interline_equation", + "content": "\\mathcal {S} = \\left(\\mathbf {Q} _ {\\mathbf {x}} \\cdot \\mathbf {K} _ {\\mathbf {x}}\\right) \\cdot \\mathcal {X} \\tag {7}", + "image_path": "94a2e86a7844c64e61dd4423f1d5bfef9901dc70aa3d626cb6ff221e526ec9d1.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\mathbf{x}} \\in \\mathbb{R}^{g \\times d_w}" + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "content": ", and we transpose " + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{\\mathbf{x}}" + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{\\mathbf{x}} \\in \\mathbb{R}^{d_w \\times g}" + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "inline_equation", + "content": "W_x^Q" + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "inline_equation", + "content": "W_x^K" + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "content": " are different linear weights. " + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "inline_equation", + "content": "S \\in \\mathbb{R}^{g \\times d_w}" + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "content": " is the semantic matrix, which contains all the semantic information of the input image. In the loss function, we will make each semantic vector in " + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "content": " approximate the semantic information of seen classes appearing in the image. Therefore, from another perspective, the semantic vectors in " + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 556, + 545, + 651 + ], + "type": "text", + "content": " are related to seen classes." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 659, + 394, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 659, + 394, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 659, + 394, + 670 + ], + "type": "text", + "content": "3.4. Loss Function" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "During training, some semantic vectors are generated for each input image. The semantic matrix " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": " includes the semantic information in the image and is sent to the prediction" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23862" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": "module. The loss function consists of two parts. First of all, to make the positive class (seen class appear in each training image) get a higher ranking than the negative class (seen class which does not appear in the training image). Inspired by [49], we choose to adopt ranknet loss [5] as the main component of the loss function. We use" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 92, + 155, + 287, + 168 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 155, + 287, + 168 + ], + "spans": [ + { + "bbox": [ + 92, + 155, + 287, + 168 + ], + "type": "interline_equation", + "content": "\\mu_ {i j} = \\max \\left(\\mathcal {S} \\cdot n _ {i}\\right) - \\max \\left(\\mathcal {S} \\cdot p _ {j}\\right), \\tag {8}", + "image_path": "66cbb052130c27ebcc632327f87a654aaa4d575926c8fa5d9e62370167b587a8.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 177, + 287, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 177, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 46, + 177, + 287, + 237 + ], + "type": "text", + "content": "to indicate the number of violations of any of these ranking constraints, where " + }, + { + "bbox": [ + 46, + 177, + 287, + 237 + ], + "type": "inline_equation", + "content": "n_i" + }, + { + "bbox": [ + 46, + 177, + 287, + 237 + ], + "type": "text", + "content": " represents the semantic vector of the negative class, and " + }, + { + "bbox": [ + 46, + 177, + 287, + 237 + ], + "type": "inline_equation", + "content": "p_j" + }, + { + "bbox": [ + 46, + 177, + 287, + 237 + ], + "type": "text", + "content": " denotes the semantic vector of the positive class. max is used to maximize this gap between negative and positive, and constrain it in subsequent steps." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 238, + 287, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 238, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 47, + 238, + 287, + 262 + ], + "type": "text", + "content": "Next, to minimize the gap, we design the loss function as the following:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 270, + 287, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 270, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 78, + 270, + 287, + 300 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {r a n k}} = \\frac {1}{(| P | | \\bar {P} |)} \\sum_ {i} \\sum_ {j} \\log \\left(1 + e ^ {\\mu_ {i j}}\\right), \\tag {9}", + "image_path": "9f6d69065c22eff623eb2cfa4387ff023979b3f38514be04500684881e748cfc.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "spans": [ + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\frac{1}{(|P| |\\bar{P}|)}" + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "text", + "content": " is used to normalize the ranknet loss, and " + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "inline_equation", + "content": "|P|" + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "text", + "content": " denotes the number of positive class, " + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "inline_equation", + "content": "|\\bar{P}|" + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "text", + "content": " represents the number of negative class. When an image contains a large number of positive labels, the image becomes difficult to classify. So we need the model to value these hard samples during training. Therefore, we add the class weight " + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "text", + "content": " to improve the performance of the model in the face of hard samples. " + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 46, + 310, + 287, + 411 + ], + "type": "text", + "content": " is represented as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 119, + 422, + 287, + 445 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 422, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 119, + 422, + 287, + 445 + ], + "type": "interline_equation", + "content": "\\omega = 1 + \\sum_ {i} v a r (P ^ {i}), \\tag {10}", + "image_path": "3e6ca9de9638ff975f2a0d88ea471e8c8c4f2cf8636c43c1433d24bfe953366f.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "spans": [ + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "inline_equation", + "content": "P^i" + }, + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "text", + "content": " represents the vector of the " + }, + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "text", + "content": "-th positive class, " + }, + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "inline_equation", + "content": "var" + }, + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "text", + "content": " means the variance. The higher " + }, + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 46, + 456, + 287, + 575 + ], + "type": "text", + "content": " means the image contains more complex labels. To prevent the direction of the semantic vectors generated by the model from being too divergent, it needs to be controlled by the loss function. Therefore, we believe that the addition of regularization terms can reduce the difference between the generated semantic vectors when the model faces complex input images. This reduction in variance helps the model learn relevant information between different classes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 583, + 287, + 616 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 583, + 287, + 616 + ], + "spans": [ + { + "bbox": [ + 113, + 583, + 287, + 616 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r e g} = \\left\\| \\sum_ {n} v a r \\left(\\mathcal {S} _ {n}\\right) \\right\\| _ {1}. \\tag {11}", + "image_path": "22150173c887e86c0f5f8db372ea82b8383f2e3044141927c5beaa045d12b7fd.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 625, + 256, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 625, + 256, + 637 + ], + "spans": [ + { + "bbox": [ + 47, + 625, + 256, + 637 + ], + "type": "text", + "content": "Finally, the loss function of the model is defined as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 647, + 287, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 287, + 689 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} ((1 - \\lambda) \\cdot \\omega \\mathcal {L} _ {\\text {r a n k}} (\\mathcal {S} _ {i}, Y _ {i}) + \\lambda \\mathcal {L} _ {\\text {r e g}} (\\mathcal {S} _ {i})) \\tag {12}", + "image_path": "4f08309490c99ab6006efcaac2be14356232a6d41eb46d32d6974675ca3e0165.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": " means the number of batch size, and " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": " is a hyperparameter that denotes the regularization term's weight." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 72, + 388, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 388, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 388, + 85 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 91, + 422, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 91, + 422, + 105 + ], + "spans": [ + { + "bbox": [ + 306, + 91, + 422, + 105 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 110, + 545, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 110, + 545, + 314 + ], + "spans": [ + { + "bbox": [ + 304, + 110, + 545, + 314 + ], + "type": "text", + "content": "Datasets: First, we use the NUS-WIDE dataset [10] to conduct MLZSL experiments. The NUS-WIDE dataset contains about 270,000 images, and each image contains 925 labels, which are automatically extracted from Flickr user tags. In addition, it also contains 81 labels that are manually annotated by humans, and these labels are called 'GroundTruth'. During the experiment, 925 labels were used as 'seen labels', and 81 labels were used as 'unseen labels'. This setting is similar with [22]. Another dataset is called the Open-Images-V4 dataset. This dataset contains nearly 9 million training images, 125,456 images as test images, and 41,620 images in the validation set. The train-set contains 7,186 labels, which are 'seen labels' that appear at least 100 times in the train-set. While the remaining 400 most frequent labels that do not appear in the train-set are used as test-set labels, they are also used as 'unseen labels'. Each unseen label has at least appeared 75 times." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 316, + 545, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 545, + 411 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 545, + 411 + ], + "type": "text", + "content": "Evaluation Metrics: To better allow our proposed new model and other comparative models to perform an unbiased comparison on the task of MLZSL, we use the two most common evaluation metrics, the mean Average Precision (mAP) [22, 41] and F1-Score. Among them, top-K F1-Score is used to measure the accuracy of the model for label prediction, and mAP is used to reflect the accuracy for unseen label retrieval of the image." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 412, + 545, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 545, + 484 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 545, + 484 + ], + "type": "text", + "content": "Implementation Details: Our model can support end-to-end training. We choose VGG19 [37], pre-trained on ImageNet dataset [11], as the backbone network. Unlike other methods, our model uses multi-scale feature maps and aggregates them. The sizes of the feature maps are " + }, + { + "bbox": [ + 304, + 412, + 545, + 484 + ], + "type": "inline_equation", + "content": "28 \\times 28" + }, + { + "bbox": [ + 304, + 412, + 545, + 484 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 412, + 545, + 484 + ], + "type": "inline_equation", + "content": "14 \\times 14" + }, + { + "bbox": [ + 304, + 412, + 545, + 484 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 412, + 545, + 484 + ], + "type": "inline_equation", + "content": "7 \\times 7" + }, + { + "bbox": [ + 304, + 412, + 545, + 484 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 485, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 485, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 485, + 545, + 616 + ], + "type": "text", + "content": "In terms of the optimizer, we choose to use the Adam optimizer [24], which requires less memory and is suitable for large datasets. The weight decay of the Adam optimizer is set to " + }, + { + "bbox": [ + 304, + 485, + 545, + 616 + ], + "type": "inline_equation", + "content": "4e^{-3}" + }, + { + "bbox": [ + 304, + 485, + 545, + 616 + ], + "type": "text", + "content": ". In the NUS-WIDE dataset experiments, the initial learning rate of the model is " + }, + { + "bbox": [ + 304, + 485, + 545, + 616 + ], + "type": "inline_equation", + "content": "5e^{-5}" + }, + { + "bbox": [ + 304, + 485, + 545, + 616 + ], + "type": "text", + "content": ", and then the learning rate decreases by " + }, + { + "bbox": [ + 304, + 485, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\frac{1}{10}" + }, + { + "bbox": [ + 304, + 485, + 545, + 616 + ], + "type": "text", + "content": " at the 7th epoch. The entire experimental process of the NUS-WIDE dataset requires a total of 20 epochs with a batch size of 48. In the experiments using the Open-Images-V4 dataset, our learning rate, batch size, and decay rate remain the same as the NUS-WIDE dataset, but the number of epochs is 7." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "Baselines: We will compare the proposed method with several state-of-the-art deep learning-based MLZSL models. These comparative methods have been published in recent years and cover a fairly rich variety of techniques, such as the attention mechanism with the most common CNNs. These comparison methods include: CONSE [36], LabelEM [2], Fast0Tag [49], Kim et al. [23], LESA Attention per Cluster (ApC) [22], LESA [22], and BiAM [35]. All" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23863" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 144 + ], + "type": "text", + "content": "comparison methods using VGG19 [37] are not fine-tuned. In addition to comparing with comparison models, we will also test the model's performance under different settings of hyper-parameters " + }, + { + "bbox": [ + 46, + 72, + 288, + 144 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 46, + 72, + 288, + 144 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 72, + 288, + 144 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 72, + 288, + 144 + ], + "type": "text", + "content": ". At the same time, we will conduct ablation experiments to verify the integrity of the model's architecture." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 154, + 203, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 154, + 203, + 167 + ], + "spans": [ + { + "bbox": [ + 47, + 154, + 203, + 167 + ], + "type": "text", + "content": "4.2. State-of-the-art Comparison" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "spans": [ + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": "NUS-WIDE: Table 1 shows the performance of ours and competitive methods on the NUS-WIDE test-set. The table contains the results of both ZSL and GZSL. CONSE [36] and LabelEM [2], as the methods proposed earlier, do not perform well on large-scale datasets. Fast0Tag [49] achieves more competitive results by sorting the positive labels to find the principal directions of the image. LESA [22] and BiAM [35] are currently the most advanced models that rely on spatial attention mechanism to generate semantic information. Compared to BiAM, our method achieves a " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "3.6\\%" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": " improvement on mAP in the ZSL task. Besides, we lead BiAM by " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "0.8\\%" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "2.9\\%" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": " in F1-Score of " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "K = 3" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "K = 5" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": ", respectively. On the GZSL task, we also surpass BiAM. BiAM deals with higher-dimensional and richer spatial information, while our method is more inclined to single-dimensional channel responses. Therefore, it is not easy to achieve such results with " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "1.3\\%" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": " improvement in mAP and " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "0.3\\%" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "0.7\\%" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": " in F1-Score of " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "K = 3" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "inline_equation", + "content": "K = 5" + }, + { + "bbox": [ + 46, + 173, + 288, + 411 + ], + "type": "text", + "content": ", respectively. Good results on NUS-WIDE dataset imply the effectiveness of our method." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 413, + 287, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 413, + 287, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 413, + 287, + 604 + ], + "type": "text", + "content": "Attention Visualization on NUS-WIDE: Figure 6 illustrate the attention regions of the model when our method predicts unseen labels. Figure 6(a) shows that our model can clearly distinguish scene information from all unseen classes. The attention areas of \"Rocks\" and \"Mountain\" in the figure are roughly the same, which indicates that the two classes have similar semantics and dependencies, and the existence of Group Attention enables the model to learn this mutual information well. Figure 6(b) is a comparison with BiAM [35], the best existing model for mining spatial domain information. This result fully shows the effective use of channel information can more accurately grasp the response between classes. While BiAM's over-exploration of spatial information improves the acquisition of regional information, it loses the scene-level response at the same time. For more comparison results, please refer to appendix." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": "Open-Images-V4: From Table 2, we show the results of ours and the baseline models on Open-Images-V4. We follow the evaluation setting of [22, 35]. This dataset contains more seen and unseen labels than NUS-WIDE. With a large increase in the number of classes, all methods get poor F1-Score on the ZSL task. Among them, Fast0Tag has made great progress compared with past methods, especially in the GZSL task. LESA [22] and BiAM [35], as the two best methods, represent the highest level of extracting spatial re" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 315, + 123, + 541, + 312 + ], + "blocks": [ + { + "bbox": [ + 304, + 70, + 545, + 114 + ], + "lines": [ + { + "bbox": [ + 304, + 70, + 545, + 114 + ], + "spans": [ + { + "bbox": [ + 304, + 70, + 545, + 114 + ], + "type": "text", + "content": "Table 1. State-of-the-art comparison for multi-label ZSL and GZSL tasks on the NUS-WIDE dataset. We show the indicators of F1-Score in the case of " + }, + { + "bbox": [ + 304, + 70, + 545, + 114 + ], + "type": "inline_equation", + "content": "K \\in 3,5" + }, + { + "bbox": [ + 304, + 70, + 545, + 114 + ], + "type": "text", + "content": " and mAP. The best results are shown in bold." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 123, + 541, + 312 + ], + "lines": [ + { + "bbox": [ + 315, + 123, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 315, + 123, + 541, + 312 + ], + "type": "table", + "html": "
MethodTaskmAPF1 (K = 3)F1 (K = 5)
CONSE [36]ZSL9.421.620.2
GZSL2.17.08.1
LabelEM [2]ZSL7.119.219.5
GZSL2.29.511.3
Fast0Tag [49]ZSL15.127.826.4
GZSL3.711.513.5
Kim et al. [23]ZSL10.425.823.6
GZSL3.710.913.2
Attention per Cluster [22]ZSL12.924.622.9
GZSL2.66.47.7
LESA [22]ZSL19.431.628.7
GZSL5.614.416.8
BiAM [35]ZSL25.832.029.4
GZSL8.915.518.5
Our ApproachZSL29.432.832.3
GZSL10.215.819.2
", + "image_path": "53129e7f868080edbe375db7824014f7964f483c936020db97aa7120d8b23462.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 310, + 374, + 544, + 541 + ], + "blocks": [ + { + "bbox": [ + 304, + 321, + 545, + 365 + ], + "lines": [ + { + "bbox": [ + 304, + 321, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 304, + 321, + 545, + 365 + ], + "type": "text", + "content": "Table 2. State-of-the-art comparison for multi-label ZSL and GZSL tasks on the Open-Images-V4 dataset. We show the indicators of F1-Score in the case of " + }, + { + "bbox": [ + 304, + 321, + 545, + 365 + ], + "type": "inline_equation", + "content": "K \\in {10},{20}" + }, + { + "bbox": [ + 304, + 321, + 545, + 365 + ], + "type": "text", + "content": " and mAP. Best results are shown in bold." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 374, + 544, + 541 + ], + "lines": [ + { + "bbox": [ + 310, + 374, + 544, + 541 + ], + "spans": [ + { + "bbox": [ + 310, + 374, + 544, + 541 + ], + "type": "table", + "html": "
MethodTaskmAPF1 (K = 10)F1 (K = 20)
CONSE [36]ZSL40.40.40.3
GZSL43.52.62.4
LabelEM [2]ZSL40.50.50.4
GZSL45.25.25.1
Fast0Tag [49]ZSL41.20.70.6
GZSL45.216.013.0
Attention per Cluster [22]ZSL40.71.20.9
GZSL44.916.913.5
LESA [22]ZSL41.71.41.0
GZSL45.417.414.3
BiAM [35]ZSL62.84.13.7
GZSL79.617.615.1
Our ApproachZSL65.77.56.5
GZSL79.927.624.1
", + "image_path": "ab89dc01ca74cb24bd667d6cc1ec20d8ebfab87b3653f7442de766cd4e134fd5.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "text", + "content": "sponses. BiAM achieves very large progress in mAP metrics on both ZSL and GZSL tasks. But our method achieves the best results in the mAP of ZSL, while leading by " + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "inline_equation", + "content": "3.4\\%" + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "inline_equation", + "content": "2.8\\%" + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "text", + "content": " in F1-Score with " + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "inline_equation", + "content": "K = 3" + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "inline_equation", + "content": "K = 5" + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "text", + "content": ", respectively. Most importantly, for the GZSL task, our F1-Score results also achieve huge advantages by " + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "inline_equation", + "content": "10.0\\%" + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "inline_equation", + "content": "9.0\\%" + }, + { + "bbox": [ + 304, + 557, + 545, + 664 + ], + "type": "text", + "content": ". This shows that the channel-class correlation as semantic information can fully cope with the complex situation of a large number of labels." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Figure 5 shows the mAP, inference time, and GFLOPs comparisons between our model for obtaining semantic information based on channel responses and the two methods (LESA [22] and BiAM [35]) for acquiring semantic informa" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "23864" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "tion based on spatial features and achieving optimal results. In the mAP comparison, it can be seen that we have the highest accuracy for prediction in the ZSL task. At the same time, due to the small amount of data to be processed, the inference speed is the fastest of all comparison methods when we use the same GPU of NVIDIA RTX 3090. Finally, precisely because the model only needs to deal with a single-dimensional channel response, our " + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "-Encoder module requires much less computation than " + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "inline_equation", + "content": "LESA" + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "inline_equation", + "content": "BiAM" + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": " that deal with spatial attention. At the same time, the feature map is grouped to avoid the geometric increase of the computational complexity caused by the feature pyramid. This shows that our " + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "-Encoder can be more efficient." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 52, + 277, + 284, + 355 + ], + "blocks": [ + { + "bbox": [ + 46, + 236, + 288, + 270 + ], + "lines": [ + { + "bbox": [ + 46, + 236, + 288, + 270 + ], + "spans": [ + { + "bbox": [ + 46, + 236, + 288, + 270 + ], + "type": "text", + "content": "Table 3. Ablation study shows the contribution of the different components in our proposed approach. The baseline methods are performed on the NUS-WIDE test-set." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 277, + 284, + 355 + ], + "lines": [ + { + "bbox": [ + 52, + 277, + 284, + 355 + ], + "spans": [ + { + "bbox": [ + 52, + 277, + 284, + 355 + ], + "type": "table", + "html": "
abcdours
Forward Pyramid (ML)2P-Encoder Group Attention
mAPZSL25.327.328.427.929.4
GZSL8.18.59.28.810.2
", + "image_path": "b9cc6f11cdb2e6723a9f25ccd2ae12b178a3ecf8a6c6305042d2560d22679364.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 76, + 384, + 156, + 445 + ], + "blocks": [ + { + "bbox": [ + 76, + 384, + 156, + 445 + ], + "lines": [ + { + "bbox": [ + 76, + 384, + 156, + 445 + ], + "spans": [ + { + "bbox": [ + 76, + 384, + 156, + 445 + ], + "type": "image", + "image_path": "8458772c1181ee73b3a85ca3b2e670d4a44bffd3c2387a13e1980532fbf7d75e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 69, + 463, + 154, + 474 + ], + "lines": [ + { + "bbox": [ + 69, + 463, + 154, + 474 + ], + "spans": [ + { + "bbox": [ + 69, + 463, + 154, + 474 + ], + "type": "text", + "content": "(a) W/O (ML)" + }, + { + "bbox": [ + 69, + 463, + 154, + 474 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 69, + 463, + 154, + 474 + ], + "type": "text", + "content": "P-Encoder" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 170, + 369, + 279, + 460 + ], + "blocks": [ + { + "bbox": [ + 170, + 369, + 279, + 460 + ], + "lines": [ + { + "bbox": [ + 170, + 369, + 279, + 460 + ], + "spans": [ + { + "bbox": [ + 170, + 369, + 279, + 460 + ], + "type": "image", + "image_path": "7fcdb68df0c0c5df9c57d97fe0df8368ca7255d03e4d7cbd853a81342d753669.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 180, + 463, + 266, + 474 + ], + "lines": [ + { + "bbox": [ + 180, + 463, + 266, + 474 + ], + "spans": [ + { + "bbox": [ + 180, + 463, + 266, + 474 + ], + "type": "text", + "content": "(b) With " + }, + { + "bbox": [ + 180, + 463, + 266, + 474 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 180, + 463, + 266, + 474 + ], + "type": "text", + "content": " -Encoder" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 58, + 483, + 275, + 495 + ], + "lines": [ + { + "bbox": [ + 58, + 483, + 275, + 495 + ], + "spans": [ + { + "bbox": [ + 58, + 483, + 275, + 495 + ], + "type": "text", + "content": "Figure 3. Evaluation of t-SNE (zoom in for a better view)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 503, + 197, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 503, + 197, + 516 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 197, + 516 + ], + "type": "text", + "content": "4.3. Hyper-parameter Selection" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": "Our method includes two hyper-parameters, the number of groups " + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": " and the weight of the regularization term " + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": ". We use the control variable method. In terms of initializing hyper-parameters, the number of output semantic vectors " + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": " is set to 7, and the value of " + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": " is set to 0.4. The line graph in Figure 4 shows the mAP results achieved on the ZSL and GZSL tasks with different hyper-parameters, respectively. In addition, we can also see the impact of changes in hyperparameters on the prediction accuracy of the model. It can be seen that the number of " + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": " does not have a very significant effect on the mAP of the ZSL task. But the impact on GZSL is more obvious. After comparison, we believe that when " + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "inline_equation", + "content": "g = 7" + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": ", two different tasks can be well balanced. For the choice of the value of " + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": ", we found that its change will have a greater impact on mAP. But only when " + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\lambda = 0.4" + }, + { + "bbox": [ + 46, + 522, + 289, + 713 + ], + "type": "text", + "content": ", the performance of GZSL is far better than other results, and" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "content": "ZSL also achieves the optimal result. So the optimal hyperparameters we choose " + }, + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "inline_equation", + "content": "g = 7" + }, + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "inline_equation", + "content": "\\lambda = 0.4" + }, + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 312, + 108, + 425, + 182 + ], + "blocks": [ + { + "bbox": [ + 312, + 108, + 425, + 182 + ], + "lines": [ + { + "bbox": [ + 312, + 108, + 425, + 182 + ], + "spans": [ + { + "bbox": [ + 312, + 108, + 425, + 182 + ], + "type": "image", + "image_path": "a1657c3c9aeb342e41cd7a891fefffd25bd38870895024e77f74a63090faedec.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 360, + 187, + 376, + 196 + ], + "lines": [ + { + "bbox": [ + 360, + 187, + 376, + 196 + ], + "spans": [ + { + "bbox": [ + 360, + 187, + 376, + 196 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 360, + 187, + 376, + 196 + ], + "type": "inline_equation", + "content": "g" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 428, + 109, + 541, + 182 + ], + "blocks": [ + { + "bbox": [ + 428, + 109, + 541, + 182 + ], + "lines": [ + { + "bbox": [ + 428, + 109, + 541, + 182 + ], + "spans": [ + { + "bbox": [ + 428, + 109, + 541, + 182 + ], + "type": "image", + "image_path": "bea5c061b33965db1460c351ea8d6dc9e5548fde810a154ead8e86167d85a405.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 465, + 186, + 504, + 196 + ], + "lines": [ + { + "bbox": [ + 465, + 186, + 504, + 196 + ], + "spans": [ + { + "bbox": [ + 465, + 186, + 504, + 196 + ], + "type": "text", + "content": "(b) Weights" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 309, + 261, + 380, + 316 + ], + "blocks": [ + { + "bbox": [ + 304, + 205, + 547, + 239 + ], + "lines": [ + { + "bbox": [ + 304, + 205, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 304, + 205, + 547, + 239 + ], + "type": "text", + "content": "Figure 4. Hyper-Parameter selection. The higher the mAP the better. All the experiments are performed on the NUS-WIDE test-set." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 261, + 380, + 316 + ], + "lines": [ + { + "bbox": [ + 309, + 261, + 380, + 316 + ], + "spans": [ + { + "bbox": [ + 309, + 261, + 380, + 316 + ], + "type": "image", + "image_path": "2e12f6e418094ab26027de6309daa2eb11ad2e6a6744aca0b614f345deb31613.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 333, + 319, + 362, + 328 + ], + "lines": [ + { + "bbox": [ + 333, + 319, + 362, + 328 + ], + "spans": [ + { + "bbox": [ + 333, + 319, + 362, + 328 + ], + "type": "text", + "content": "(a) mAP" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 389, + 261, + 457, + 316 + ], + "blocks": [ + { + "bbox": [ + 389, + 261, + 457, + 316 + ], + "lines": [ + { + "bbox": [ + 389, + 261, + 457, + 316 + ], + "spans": [ + { + "bbox": [ + 389, + 261, + 457, + 316 + ], + "type": "image", + "image_path": "be864e674cccd18cea12bbdcc3f4e639ca3700f6bc6523390a63dd95126fd3e0.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 388, + 319, + 463, + 328 + ], + "lines": [ + { + "bbox": [ + 388, + 319, + 463, + 328 + ], + "spans": [ + { + "bbox": [ + 388, + 319, + 463, + 328 + ], + "type": "text", + "content": "(b) Inference time (ms)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 338, + 547, + 382 + ], + "lines": [ + { + "bbox": [ + 304, + 338, + 547, + 382 + ], + "spans": [ + { + "bbox": [ + 304, + 338, + 547, + 382 + ], + "type": "text", + "content": "Figure 5. Comparison of our " + }, + { + "bbox": [ + 304, + 338, + 547, + 382 + ], + "type": "inline_equation", + "content": "(\\mathbf{ML})^2\\mathbf{P}" + }, + { + "bbox": [ + 304, + 338, + 547, + 382 + ], + "type": "text", + "content": "-Encoder with BiAM and LESA in mAP, inference time, and FLOPs. The higher the mAP the better, the lower the Inference time and GFLOPs the better. All methods are performed on the NUS-WIDE test-set." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 465, + 261, + 534, + 316 + ], + "blocks": [ + { + "bbox": [ + 465, + 261, + 534, + 316 + ], + "lines": [ + { + "bbox": [ + 465, + 261, + 534, + 316 + ], + "spans": [ + { + "bbox": [ + 465, + 261, + 534, + 316 + ], + "type": "image", + "image_path": "2455e527a5bdeb3bb22c7b815463f0c08e291f6c7add9f25ddff4192a6829139.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 484, + 319, + 525, + 328 + ], + "lines": [ + { + "bbox": [ + 484, + 319, + 525, + 328 + ], + "spans": [ + { + "bbox": [ + 484, + 319, + 525, + 328 + ], + "type": "text", + "content": "(c) GFLOPs" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 395, + 400, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 395, + 400, + 407 + ], + "spans": [ + { + "bbox": [ + 306, + 395, + 400, + 407 + ], + "type": "text", + "content": "4.4. Ablation Study" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 413, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 413, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 413, + 545, + 616 + ], + "type": "text", + "content": "Ablation Study: To illustrate the effectiveness of each module designed in our method, we arrange three comparative experiments. The specific results are shown in Table 3. As the most primitive structure, model 'a' only contains shuffle and grouping operations. But after adding the 'Forward Pyramid', the model expands the number of features. As the number of optional feature channels increases, the amount of information brought by the channel also increases, thus achieving more competitive results. The addition of " + }, + { + "bbox": [ + 304, + 413, + 545, + 616 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 304, + 413, + 545, + 616 + ], + "type": "text", + "content": "-Encoder enables the model to process the channel response of specific classes. The supplement of Group Attention is to give the model-specific information for solving multi-label tasks, that is, inter-class correlation. The combination of " + }, + { + "bbox": [ + 304, + 413, + 545, + 616 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 304, + 413, + 545, + 616 + ], + "type": "text", + "content": "-Encoder and Group Attention greatly improves the prediction ability of the model in ZSL and GZSL tasks, indicating that our model construction has achieved great success." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 616, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 616, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 616, + 545, + 677 + ], + "type": "text", + "content": "t-SNE: Figure 3 shows the performance of " + }, + { + "bbox": [ + 304, + 616, + 545, + 677 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 304, + 616, + 545, + 677 + ], + "type": "text", + "content": "-Encoder in t-SNE visualization. It can be seen that after using " + }, + { + "bbox": [ + 304, + 616, + 545, + 677 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 304, + 616, + 545, + 677 + ], + "type": "text", + "content": "-Encoder, the boundaries of inter-class become much clearer, proving the correctness of our exploration for class-specific channel responses." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Different Backbones: Table 4 shows the results produced by our method using different backbones. It can be seen from the results that ResNet [20] has obvious advantages" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "23865" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 120 + ], + "type": "text", + "content": "over VGG [37]. As the ResNet network deepens and the number of parameters increases, the results obtained by our model become better. This is exactly in line with the result variation of an end-to-end model." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 182, + 282, + 278 + ], + "blocks": [ + { + "bbox": [ + 46, + 129, + 287, + 173 + ], + "lines": [ + { + "bbox": [ + 46, + 129, + 287, + 173 + ], + "spans": [ + { + "bbox": [ + 46, + 129, + 287, + 173 + ], + "type": "text", + "content": "Table 4. Our " + }, + { + "bbox": [ + 46, + 129, + 287, + 173 + ], + "type": "inline_equation", + "content": "\\mathbf{C}^3" + }, + { + "bbox": [ + 46, + 129, + 287, + 173 + ], + "type": "text", + "content": "-MLZSL approach with different backbones for multi-label ZSL and GZSL tasks on the NUS-WIDE dataset. We show the indicators of F1-Score in the case of " + }, + { + "bbox": [ + 46, + 129, + 287, + 173 + ], + "type": "inline_equation", + "content": "K \\in 3, 5" + }, + { + "bbox": [ + 46, + 129, + 287, + 173 + ], + "type": "text", + "content": " and mAP. The best results are shown in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 182, + 282, + 278 + ], + "lines": [ + { + "bbox": [ + 50, + 182, + 282, + 278 + ], + "spans": [ + { + "bbox": [ + 50, + 182, + 282, + 278 + ], + "type": "table", + "html": "
BackbonesTaskmAPF1 (K = 3)F1 (K = 5)
VGG19 [37]ZSL29.432.832.3
GZSL10.215.819.2
ResNet50 [20]ZSL30.933.633.2
GZSL10.715.919.4
ResNet101 [20]ZSL31.233.933.9
GZSL10.916.119.5
", + "image_path": "0ededf22f384f4ae5ec4f8b10f45a702bb8521ddc0a28618eb5eecf6a9dfdb1f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 50, + 293, + 285, + 453 + ], + "blocks": [ + { + "bbox": [ + 50, + 293, + 285, + 453 + ], + "lines": [ + { + "bbox": [ + 50, + 293, + 285, + 453 + ], + "spans": [ + { + "bbox": [ + 50, + 293, + 285, + 453 + ], + "type": "image", + "image_path": "35da844c0da731cac057f029763ba3bb2787c08a55a9372c0d286eebe3ef9a00.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 464, + 287, + 509 + ], + "lines": [ + { + "bbox": [ + 46, + 464, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 46, + 464, + 287, + 509 + ], + "type": "text", + "content": "Figure 6. Attention visualization. where (a) is the attention response of our " + }, + { + "bbox": [ + 46, + 464, + 287, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{C}^3" + }, + { + "bbox": [ + 46, + 464, + 287, + 509 + ], + "type": "text", + "content": "-MLZSL when faced with unseen labels. (b) is the comparison of attention visualization results of our " + }, + { + "bbox": [ + 46, + 464, + 287, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{C}^3" + }, + { + "bbox": [ + 46, + 464, + 287, + 509 + ], + "type": "text", + "content": "-MLZSL and BiAM [35] models. See appendix for more results." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 527, + 173, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 173, + 540 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 173, + 540 + ], + "type": "text", + "content": "4.5. Multi-Label Learning" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 713 + ], + "type": "text", + "content": "Table 5 shows the results of the model for multi-label image classification. The baselines we compare include not only state-of-the-art MLZSL models, but also multi-label image classification models including Logistic Regression [40], WSABIE [43], WARP [17] and CNN-RNN [42]. As can be seen from the results, our model far surpasses many multi-label image classification models and the classic Fast0Tag [49] algorithm in mAP performance. This is because the above models only process the input image into a single semantic vector, and limited image embedding cannot build the semantic diversity for multi-label classification. For other methods such as LESA [22] and BiAM [35], they noticed that the attention regions of different objects in multi-label images are different, and thus define the label" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 310, + 102, + 542, + 239 + ], + "blocks": [ + { + "bbox": [ + 306, + 71, + 545, + 92 + ], + "lines": [ + { + "bbox": [ + 306, + 71, + 545, + 92 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 545, + 92 + ], + "type": "text", + "content": "Table 5. Performance of Multi-label image classification task on NUS-WIDE datasets. The best results are in bold." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 102, + 542, + 239 + ], + "lines": [ + { + "bbox": [ + 310, + 102, + 542, + 239 + ], + "spans": [ + { + "bbox": [ + 310, + 102, + 542, + 239 + ], + "type": "table", + "html": "
MethodF1(K=3)(↑)F1(K=5)(↑)mAP(↑)
Logistic [40]51.146.121.6
WARP [17]54.449.43.1
WSABIE [43]53.849.23.1
Fast0Tag [49]53.848.622.4
CNN-RNN [42]55.250.828.3
Kim et al. [23]56.851.332.6
LESA ApC [22]56.650.731.7
LESA [22]58.052.031.5
BiAM [35]59.653.447.8
Ours59.853.848.0
", + "image_path": "2d41ff5bccca30a3291db40de3b266d2f897d4e468be17fac5193a0af7e92cc9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 258, + 545, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 545, + 329 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 545, + 329 + ], + "type": "text", + "content": "related embeddings from the perspective of the spatial domain. However, after feature extraction, our model takes into account that the channel response can be important information representing the class semantics, and this superior performance just verifies the rationality of the exploration." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 353, + 378, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 353, + 378, + 365 + ], + "spans": [ + { + "bbox": [ + 306, + 353, + 378, + 365 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 377, + 545, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 377, + 545, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 377, + 545, + 545 + ], + "type": "text", + "content": "In this paper, we focus on the neglect of channel-wise class information and over-reliance on spatial-wise class information in previous MLZSL models, then propose C3-MLZSL structure and the " + }, + { + "bbox": [ + 304, + 377, + 545, + 545 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 304, + 377, + 545, + 545 + ], + "type": "text", + "content": "-Encoder component. The C3-MLZSL structure first group multi-scale features, then use the " + }, + { + "bbox": [ + 304, + 377, + 545, + 545 + ], + "type": "inline_equation", + "content": "(\\mathrm{ML})^{2}\\mathrm{P}" + }, + { + "bbox": [ + 304, + 377, + 545, + 545 + ], + "type": "text", + "content": "-Encoder to calculate the correlation of channels within each group and perform information fusion to get the semantic vectors. These semantic vectors are then aggregated through group attention to learn mutual information between groups. Finally, the model successfully learns channel-class correlation. Extensive experiments on the large-scale NUS-WIDE and Open-Images-V4 datasets show that our model has achieved very competitive results on MLZSL compared with other state-of-the-art models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 568, + 410, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 568, + 410, + 582 + ], + "spans": [ + { + "bbox": [ + 306, + 568, + 410, + 582 + ], + "type": "text", + "content": "6. Acknowledgment" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 592, + 545, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 592, + 545, + 703 + ], + "spans": [ + { + "bbox": [ + 304, + 592, + 545, + 703 + ], + "type": "text", + "content": "This research was supported by fundings from the Key-Area Research and Development Program of Guangdong Province (No. 2021B0101400003), Hong Kong RGC Research Impact Fund (No. R5060-19), Areas of Excellence Scheme (AoE/E-601/22-R), General Research Fund (No. 152203/20E, 152244/21E, 152169/22E, 152211/23E), Shenzhen Science and Technology Innovation Commission (JCYJ20200109142008673), the National Natural Science Foundation of China (No. 62102327), and PolyU Internal Fund (No. P0043932)." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "23866" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Zeynep Akata, Mateusz Malinowski, Mario Fritz, and Bernt Schiele. Multi-cue zero-shot learning with strong supervision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 59-68, 2016. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 180 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 180 + ], + "type": "text", + "content": "[2] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. IEEE transactions on pattern analysis and machine intelligence, 38(7):1425–1438, 2015. 5, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 182, + 287, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 287, + 226 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 287, + 226 + ], + "type": "text", + "content": "[3] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. IEEE transactions on pattern analysis and machine intelligence, 38(7):1425–1438, 2016. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 228, + 287, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 228, + 287, + 271 + ], + "spans": [ + { + "bbox": [ + 53, + 228, + 287, + 271 + ], + "type": "text", + "content": "[4] Maxime Bucher, Stéphane Herbin, and Frédéric Jurie. Improving semantic embedding consistency by metric learning for zero-shot classification. In European Conference on Computer Vision, pages 730-746. Springer, 2016. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 273, + 287, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 273, + 287, + 327 + ], + "spans": [ + { + "bbox": [ + 53, + 273, + 287, + 327 + ], + "type": "text", + "content": "[5] Chris Burges, Tal Shaked, Erin Renshaw, Ari Lazier, Matt Deeds, Nicole Hamilton, and Greg Hullender. Learning to rank using gradient descent. In Proceedings of the 22nd international conference on Machine learning, pages 89-96, 2005. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 329, + 287, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 329, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 53, + 329, + 287, + 373 + ], + "type": "text", + "content": "[6] Soravit Changpinyo, Wei-Lun Chao, Boqing Gong, and Fei Sha. Synthesized classifiers for zero-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5327-5336, 2016. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 375, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 375, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 375, + 287, + 430 + ], + "type": "text", + "content": "[7] Zhi Chen, Yadan Luo, Sen Wang, Ruihong Qiu, Jingjing Li, and Zi Huang. Mitigating generation shifts for generalized zero-shot learning. In Proceedings of the 29th ACM International Conference on Multimedia, pages 844-852, 2021. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 431, + 287, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 431, + 287, + 485 + ], + "spans": [ + { + "bbox": [ + 53, + 431, + 287, + 485 + ], + "type": "text", + "content": "[8] Zhao-Min Chen, Xiu-Shen Wei, Peng Wang, and Yanwen Guo. Multi-label image recognition with graph convolutional networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5177-5186, 2019. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 487, + 287, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 487, + 287, + 531 + ], + "spans": [ + { + "bbox": [ + 53, + 487, + 287, + 531 + ], + "type": "text", + "content": "[9] Xing Cheng, Hezheng Lin, Xiangyu Wu, Fan Yang, Dong Shen, Zhongyuan Wang, Nian Shi, and Honglin Liu. Mltr: Multi-label classification with transformer. arXiv preprint arXiv:2106.06195, 2021. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 533, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 533, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 533, + 287, + 588 + ], + "type": "text", + "content": "[10] Tat-Seng Chua, Jinhui Tang, Richang Hong, Haojie Li, Zhiping Luo, and Yantao Zheng. Nus-wide: a real-world web image database from national university ofSingapore. In Proceedings of the ACM international conference on image and video retrieval, pages 1-9, 2009. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 633 + ], + "type": "text", + "content": "[11] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "text", + "content": "[12] Shay Deutsch, Soheil Kolouri, Kyungnam Kim, Yuri Owechko, and Stefano Soatto. Zero shot learning via multi-scale manifold regularization. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7112-7119, 2017. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 714 + ], + "type": "text", + "content": "[13] Thibaut Durand, Nazanin Mehrasa, and Greg Mori. Learning a deep convnet for multi-label classification with partial" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "labels. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 647-657, 2019. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 108, + 545, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 108, + 545, + 151 + ], + "spans": [ + { + "bbox": [ + 308, + 108, + 545, + 151 + ], + "type": "text", + "content": "[14] Lei Feng, Bo An, and Shuo He. Collaboration based multi-label learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 3550-3557, 2019. 1, 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 153, + 545, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 153, + 545, + 198 + ], + "spans": [ + { + "bbox": [ + 307, + 153, + 545, + 198 + ], + "type": "text", + "content": "[15] Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. Advances in neural information processing systems, 26, 2013. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 199, + 545, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 199, + 545, + 243 + ], + "spans": [ + { + "bbox": [ + 308, + 199, + 545, + 243 + ], + "type": "text", + "content": "[16] Bin-Bin Gao and Hong-Yu Zhou. Learning to discover multi-class attentional regions for multi-label image recognition. IEEE Transactions on Image Processing, 30:5920-5932, 2021. 1, 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 245, + 545, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 245, + 545, + 288 + ], + "spans": [ + { + "bbox": [ + 308, + 245, + 545, + 288 + ], + "type": "text", + "content": "[17] Yunchao Gong, Yangqing Jia, Thomas Leung, Alexander Toshev, and Sergey Ioffe. Deep convolutional ranking for multilabel image annotation. arXiv preprint arXiv:1312.4894, 2013. 1, 2, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 290, + 545, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 290, + 545, + 345 + ], + "spans": [ + { + "bbox": [ + 308, + 290, + 545, + 345 + ], + "type": "text", + "content": "[18] Omkar Gune, Biplab Banerjee, Subhasis Chaudhuri, and Fabio Cuzzolin. Generalized zero-shot learning using generated proxy unseen samples and entropy separation. In Proceedings of the 28th ACM International Conference on Multimedia, pages 4262-4270, 2020. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 347, + 545, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 347, + 545, + 390 + ], + "spans": [ + { + "bbox": [ + 308, + 347, + 545, + 390 + ], + "type": "text", + "content": "[19] Jingcai Guo and Song Guo. A novel perspective to zero-shot learning: Towards an alignment of manifold structures via semantic feature expansion. IEEE Transactions on Multimedia, 23:524-537, 2020. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "type": "text", + "content": "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 1, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 439, + 545, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 439, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 308, + 439, + 545, + 460 + ], + "type": "text", + "content": "[21] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 462, + 545, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 462, + 545, + 506 + ], + "spans": [ + { + "bbox": [ + 308, + 462, + 545, + 506 + ], + "type": "text", + "content": "[22] Dat Huynh and Ehsan Elhamifar. A shared multi-attention framework for multi-label zero-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8776–8786, 2020. 2, 3, 5, 6, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 508, + 545, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 508, + 545, + 540 + ], + "spans": [ + { + "bbox": [ + 308, + 508, + 545, + 540 + ], + "type": "text", + "content": "[23] Jin-Hwa Kim, Jaehyun Jun, and Byoung-Tak Zhang. Bilinear attention networks. arXiv preprint arXiv:1805.07932, 2018. 5, 6, 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 543, + 545, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 543, + 545, + 575 + ], + "spans": [ + { + "bbox": [ + 308, + 543, + 545, + 575 + ], + "type": "text", + "content": "[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 578, + 545, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 578, + 545, + 610 + ], + "spans": [ + { + "bbox": [ + 308, + 578, + 545, + 610 + ], + "type": "text", + "content": "[25] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 613, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 545, + 656 + ], + "type": "text", + "content": "[26] Elyor Kodirov, Tao Xiang, and Shaogang Gong. Semantic autoencoder for zero-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3174-3183, 2017. 2, 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "text", + "content": "[27] Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. Learning to detect unseen object classes by between-class attribute transfer. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 951-958. IEEE, 2009. 3" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "23867" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[28] Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. Attribute-based classification for zero-shot visual object categorization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(3):453-465, 2014. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 119, + 288, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 119, + 288, + 174 + ], + "spans": [ + { + "bbox": [ + 48, + 119, + 288, + 174 + ], + "type": "text", + "content": "[29] Chung-Wei Lee, Wei Fang, Chih-Kuan Yeh, and Yu-Chiang Frank Wang. Multi-label zero-shot learning with structured knowledge graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1576–1585, 2018. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 176, + 287, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 176, + 287, + 231 + ], + "spans": [ + { + "bbox": [ + 48, + 176, + 287, + 231 + ], + "type": "text", + "content": "[30] Jingjing Li, Mengmeng Jing, Lei Zhu, Zhengming Ding, Ke Lu, and Yang Yang. Learning modality-invariant latent representations for generalized zero-shot learning. In Proceedings of the 28th ACM International Conference on Multimedia, pages 1348-1356, 2020. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 233, + 287, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 233, + 287, + 287 + ], + "spans": [ + { + "bbox": [ + 48, + 233, + 287, + 287 + ], + "type": "text", + "content": "[31] Yanan Li, Donghui Wang, Huanhang Hu, Yuetan Lin, and Yueting Zhuang. Zero-shot recognition using dual visual-semantic mapping paths. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3279–3287, 2017. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 290, + 287, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 290, + 287, + 344 + ], + "spans": [ + { + "bbox": [ + 48, + 290, + 287, + 344 + ], + "type": "text", + "content": "[32] Teng Long, Xing Xu, Youyou Li, Fumin Shen, Jingkuan Song, and Heng Tao Shen. Pseudo transfer with marginalized corrupted attribute for zero-shot learning. In Proceedings of the 26th ACM international conference on Multimedia, pages 1802-1810, 2018. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 347, + 287, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 347, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 48, + 347, + 287, + 390 + ], + "type": "text", + "content": "[33] Thomas Mensink, Efstratios Gavves, and Cees GM Snoek. Costa: Co-occurrence statistics for zero-shot classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2441-2448, 2014. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 393, + 287, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 287, + 437 + ], + "type": "text", + "content": "[34] Pedro Morgado and Nuno Vasconcelos. Semantically consistent regularization for zero-shot recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6060-6069, 2017. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 439, + 287, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 439, + 287, + 493 + ], + "spans": [ + { + "bbox": [ + 48, + 439, + 287, + 493 + ], + "type": "text", + "content": "[35] Sanath Narayan, Akshita Gupta, Salman Khan, Fahad Shahbaz Khan, Ling Shao, and Mubarak Shah. Discriminative region-based multi-label zero-shot learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8731-8740, 2021. 2, 3, 5, 6, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 496, + 287, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 496, + 287, + 551 + ], + "spans": [ + { + "bbox": [ + 48, + 496, + 287, + 551 + ], + "type": "text", + "content": "[36] Mohammad Norouzi, Tomas Mikolov, Samy Bengio, Yoram Singer, Jonathon Shlens, Andrea Frome, Greg S Corrado, and Jeffrey Dean. Zero-shot learning by convex combination of semantic embeddings. In 2nd International Conference on Learning Representations, ICLR 2014, 2014. 2, 3, 5, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 553, + 287, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 553, + 287, + 586 + ], + "spans": [ + { + "bbox": [ + 48, + 553, + 287, + 586 + ], + "type": "text", + "content": "[37] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 1, 4, 5, 6, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 588, + 287, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 588, + 287, + 632 + ], + "spans": [ + { + "bbox": [ + 48, + 588, + 287, + 632 + ], + "type": "text", + "content": "[38] Richard Socher, Milind Ganjoo, Christopher D Manning, and Andrew Ng. Zero-shot learning through cross-modal transfer. In Advances in neural information processing systems, pages 935-943, 2013. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 635, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 678 + ], + "type": "text", + "content": "[39] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich, et al. Going deeper with convolutions. Cvpr, 2015. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "text", + "content": "[40] Grigorios Tsoumakas and Ioannis Katakis. Multi-label classification: An overview. International Journal of Data Warehousing and Mining (IJDWM), 3(3):1-13, 2007. 8" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 546, + 609 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 307, + 73, + 546, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 546, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 546, + 127 + ], + "type": "text", + "content": "[41] Andreas Veit, Neil Alldrin, Gal Chechik, Ivan Krasin, Abhinav Gupta, and Serge Belongie. Learning from noisy large-scale datasets with minimal supervision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 839-847, 2017. 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 546, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 546, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 546, + 183 + ], + "type": "text", + "content": "[42] Jiang Wang, Yi Yang, Junhua Mao, Zhiheng Huang, Chang Huang, and Wei Xu. Cnn-rnn: A unified framework for multi-label image classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2285-2294, 2016. 1, 2, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 185, + 546, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 185, + 546, + 228 + ], + "spans": [ + { + "bbox": [ + 308, + 185, + 546, + 228 + ], + "type": "text", + "content": "[43] Jason Weston, Samy Bengio, and Nicolas Usunier. Wsabie: Scaling up to large vocabulary image annotation. In Twenty-Second International Joint Conference on Artificial Intelligence, 2011. 2, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 230, + 546, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 230, + 546, + 284 + ], + "spans": [ + { + "bbox": [ + 308, + 230, + 546, + 284 + ], + "type": "text", + "content": "[44] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 69-77, 2016. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 285, + 546, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 285, + 546, + 328 + ], + "spans": [ + { + "bbox": [ + 308, + 285, + 546, + 328 + ], + "type": "text", + "content": "[45] Yongqin Xian, Bernt Schiele, and Zeynep Akata. Zero-shot learning-the good, the bad and the ugly. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4582-4591, 2017. 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 331, + 546, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 331, + 546, + 373 + ], + "spans": [ + { + "bbox": [ + 308, + 331, + 546, + 373 + ], + "type": "text", + "content": "[46] Hsiang-Fu Yu, Prateek Jain, Purushottam Kar, and Inderjit Dhillon. Large-scale multi-label learning with missing labels. In International conference on machine learning, pages 593-601. PMLR, 2014. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 375, + 546, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 375, + 546, + 418 + ], + "spans": [ + { + "bbox": [ + 308, + 375, + 546, + 418 + ], + "type": "text", + "content": "[47] Chenrui Zhang, Xiaoqing Lyu, and Zhi Tang. Tgg: Transferable graph generation for zero-shot and few-shot learning. In Proceedings of the 27th ACM International Conference on Multimedia, pages 1641-1649, 2019. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 420, + 546, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 420, + 546, + 463 + ], + "spans": [ + { + "bbox": [ + 308, + 420, + 546, + 463 + ], + "type": "text", + "content": "[48] Li Zhang, Tao Xiang, and Shaogang Gong. Learning a deep embedding model for zero-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2021-2030, 2017. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 464, + 546, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 464, + 546, + 508 + ], + "spans": [ + { + "bbox": [ + 308, + 464, + 546, + 508 + ], + "type": "text", + "content": "[49] Yang Zhang, Boqing Gong, and Mubarak Shah. Fast zero-shot image tagging. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 5985-5994. IEEE, 2016. 2, 3, 5, 6, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 510, + 546, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 510, + 546, + 553 + ], + "spans": [ + { + "bbox": [ + 308, + 510, + 546, + 553 + ], + "type": "text", + "content": "[50] Ziming Zhang and Venkatesh Saligrama. Zero-shot learning via joint latent similarity embedding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6034-6042, 2016. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 554, + 546, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 554, + 546, + 609 + ], + "spans": [ + { + "bbox": [ + 308, + 554, + 546, + 609 + ], + "type": "text", + "content": "[51] Yizhe Zhu, Mohamed Elhoseiny, Bingchen Liu, Xi Peng, and Ahmed Elgammal. A generative adversarial approach for zero-shot learning from noisy texts. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 3" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "23868" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_content_list.json b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..267a02203022c0a78abc52783ad7ff4e97564d36 --- /dev/null +++ b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_content_list.json @@ -0,0 +1,1685 @@ +[ + { + "type": "text", + "text": "1% VS 100%: Parameter-Efficient Low Rank Adapter for Dense Predictions", + "text_level": 1, + "bbox": [ + 98, + 130, + 870, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dongshuo Yin $^{1,2,\\dagger}$ , Yiran Yang $^{1,2,\\dagger}$ , Zhechao Wang $^{1,2}$ , Hongfeng Yu $^{1}$ , Kaiwen Wei $^{1,2}$ , Xian Sun $^{1,2,*}$ $^{1}$ Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences \n $^{2}$ School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences", + "bbox": [ + 99, + 178, + 875, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{yindongshuo19, yangyiran19, wangzhechao21, weikaiwen19}@mails.ucas.ac.cn {yuhf, sunxian}@aircas.ac.cn", + "bbox": [ + 161, + 271, + 807, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 339, + 313, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fine-tuning large-scale pre-trained vision models to downstream tasks is a standard technique for achieving state-of-the-art performance on computer vision benchmarks. However, fine-tuning the whole model with millions of parameters is inefficient as it requires storing a samesized new model copy for each task. In this work, we propose LoRand, a method for fine-tuning large-scale vision models with a better trade-off between task performance and the number of trainable parameters. LoRand generates tiny adapter structures with low-rank synthesis while keeping the original backbone parameters fixed, resulting in high parameter sharing. To demonstrate LoRand's effectiveness, we implement extensive experiments on object detection, semantic segmentation, and instance segmentation tasks. By only training a small percentage (1% to 3%) of the pre-trained backbone parameters, LoRand achieves comparable performance to standard fine-tuning on COCO and ADE20K and outperforms fine-tuning in low-resource PASCAL VOC dataset.", + "bbox": [ + 75, + 371, + 473, + 657 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 686, + 209, + 702 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the rapid development of computer vision, parameters in deep models are surging. Giant models need to be trained with massive resources to achieve superior performance [3, 17, 47, 58], which is often unavailable to many academics and institutions. \"Pretrain & Finetuning\" paradigm is widely used to alleviate this dilemma. Teams with sufficient computation resources utilise enormous datasets [2, 9, 40, 50] to train superior backbones [4, 32, 40, 48] and optimise the models with ideal performances. Models pretrained in this way usually have a su", + "bbox": [ + 75, + 713, + 468, + 864 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/284244850d213821e678546c56bd87e129870b5533c18d88c83c8caed88f03ff.jpg", + "image_caption": [ + "Figure 1. Comparisons of trainable backbone parameters between our methods (red) and fine-tuning (black). In COCO, we achieve advanced performances and outperform most existing backbones with only $0.9\\sim 2.5\\mathrm{M}$ new backbone parameters (Cascade-RCNN is employed as the detector). The fine-tuning paradigm produces massive redundant backbone parameters, whereas our approach saves over $97\\%$ of hardware resources with competitive performances. The sizes of the circles intuitively compare the number of trainable parameters." + ], + "image_footnote": [], + "bbox": [ + 522, + 339, + 870, + 534 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "perior understanding of homogeneous data. After that, researchers with limited computational resources can transfer the understanding capabilities of the pre-trained models to downstream tasks with promising performances by finetuning [1,26,46,53].", + "bbox": [ + 496, + 688, + 890, + 763 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, the fine-tuned model will produce a new set of parameters as large as the pre-trained model. New parameters are independent of the pre-trained models and unshareable, which are very hardware intensive for cloud service providers [23, 49]. Figure 1 compares the parameter quantities of some remarkable backbones and their performances on the COCO [28] dataset. Recent advances in natural language processing (NLP) [30, 38] show that large pre-trained models trained with rich data have strong gener", + "bbox": [ + 496, + 763, + 892, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 93, + 875, + 222, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Equal contribution.", + "bbox": [ + 96, + 887, + 205, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "20116", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b58c6b179031f202b13762129589750230c30d575d80af5f850f0a002de939e8.jpg", + "image_caption": [ + "Swin-Transformer Block" + ], + "image_footnote": [], + "bbox": [ + 122, + 87, + 251, + 318 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7edbd25abc95b7d7da6fcc332f29fee39ad6e9b53dd1dfb97cf12b8d90b381de.jpg", + "image_caption": [ + "LoRand Layer", + "Figure 2. Architecture of the adapter module and its integration with the Transformer. Left: We add two LoRand structures to each SwinBlock located behind the W/SW-MSA and MLP structures respectively. Right: LoRand contains two Multi-branch low-rank projections and nonlinearity. We include skip-connection to LoRand to enhance its robustness." + ], + "image_footnote": [], + "bbox": [ + 267, + 87, + 424, + 320 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "alisability, which means most parameters in the pre-trained models can be shared with the new tasks [22, 36, 37, 44, 59]. Moreover, recent literature demonstrates that the feature understanding of pre-trained models could be reduced when they are fine-tuned in low-resource situations [12, 36]. To tackle these issues, NLP researchers propose two new training paradigms based on pre-trained models: Adapter Tuning [22] and Prompt Tuning [30], both of which tune the new models by fixing the pre-trained parameters and adding a few trainable structures (less than $10\\%$ of the backbone). These paradigms create a new buzz in NLP and achieve impressive performances which can be competitive with finetuning [12, 22, 30, 36-38, 44, 59]. Advances in NLP also shed new light on computer vision. Jia et al. [24] propose Visual Prompt Tuning (VPT) and demonstrate that VPT can outperform fine-tuning on image classification tasks by training a small number of trainable parameters. Nevertheless, VPT shows weakness on more challenging dense predictions like semantic segmentation compared with finetuning [24].", + "bbox": [ + 75, + 458, + 468, + 760 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To find a parameter-efficient paradigm with promising performance in computer vision, we explore the potential of Adapter Tuning for visual dense predictions. We employ the advanced Swin Transformer [32] trained with ImageNet-22K [9] as the pre-trained model. After that, we add bottleneck adapter structures [22] behind each SwinBlock and freeze the original backbone parameters when training, but this approach cannot achieve comparable performance to fine-tuning as mentioned in [24]. In the experi", + "bbox": [ + 75, + 763, + 467, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "periments, we find that the models perform better with sparser adapter structures. To improve the performance of Adapter Tuning, we propose Low-Rank Adapter (LoRand) to reduce the adapter parameters, as shown in Figure 2. LoRand sparsely parameterizes the matrices in adapters by low-rank synthesis. Specifically, the projection matrix of the fully-connected layer (FC) in LoRand is a product of multiple low-rank matrices, which reduces FC parameters by more than $80\\%$ . We implement extensive experiments on object detection (PASCAL VOC [14]), semantic segmentation (ADE20K [62]), and instance segmentation (MS COCO [28]) to verify the capability of LoRand. Experimental results show that LoRand-Tuning is comparable to fine-tuning on multiple tasks with only $1.8\\%$ to $2.8\\%$ new backbone parameters, which suggests that the pre-trained backbone parameters can be fully shared. More interestingly, our method completely outperforms fine-tuning on the PASCAL VOC dataset, illustrating that LoRand-Tuning can reduce the impairment of fine-tuning on pre-trained models in low-resource configurations. Our method demonstrates that the LoRand-Tuning paradigm can substantially save storage resources and achieve competitive performances on most dense prediction tasks. In summary, our contributions are three-fold:", + "bbox": [ + 496, + 90, + 890, + 452 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We demonstrate that visual pre-trained models are highly generalisable and shareable. With our training methods, new tasks require only a few trainable parameters to achieve performances comparable to finetuning, which can save massive hardware resources.", + "- We propose the LoRand structure for sparser adapters based on low-rank synthesis. We demonstrate that the backbone parameters in fine-tuning are highly redundant, which can be replaced by $1.8\\%$ to $2.8\\%$ additional parameters in LoRand.", + "- Extensive experiments on object detection, semantic segmentation, and instance segmentation show that LoRand-Tuning can achieve remarkable performances and reduce massive new parameters in challenging dense prediction tasks." + ], + "bbox": [ + 516, + 462, + 890, + 705 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 715, + 640, + 732 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Training Paradigms in NLP", + "text_level": 1, + "bbox": [ + 500, + 741, + 750, + 758 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Computer vision has been continuously inspired by NLP in recent years, including the visual transformer series [5,13,29,32] and self-supervised MAE series [15,19,60]. In fact, NLP is leading new training trends different from finetuning. Fine-tuning produces a new parameter set for each new task, which is parametrically inefficient for plenty of linguistic tasks [22,30]. To solve this problem, [30] and [22] have proposed \"Prompt Tuning\" and \"Adapter Tuning\" respectively, both of which fix all parameters of the backbone", + "bbox": [ + 496, + 763, + 890, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "20117", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and plug a few tiny trainable structures (less than $10\\%$ of the backbone) to adapt the pre-trained model to the new tasks. \"Prompt tuning\" adds learnable parameters (also known as prompts) to the input or intermediate layers to change the input space of the new tasks. \"Prompts\" can motivate the model to remember knowledge learned in the previous tasks. \"Adapter tuning\" adds learnable bottleneck structures after each block to connect the pre-trained model with new tasks. Adapter and prompt demonstrate the coexistence of parameter efficiency and high performances in NLP, stimulating studies in CV. [24] proposes Visual Prompt Tuning (VPT) for image classification and semantic segmentation, but the performance of VPT on semantic segmentation is still far from fine-tuning. This phenomenon motivates us to explore whether adapter tuning can bring a new paradigm in computer vision with fewer parameters and better performances. In this work, we try to explore parameter-efficient and high-performance adapter structures.", + "bbox": [ + 75, + 90, + 472, + 363 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Adapter Tuning", + "text_level": 1, + "bbox": [ + 76, + 377, + 236, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Adapters have been widely studied in NLP. Houlsby et al. [22] first add a bottleneck adapter structure to the transformer blocks and fix the original backbone, which achieves comparable performances to fine-tuning. Figure 3 illustrates the differences between fine-tuning and adaptertuning. [37,44,59] further reduce parameters in the adapter with closer performances to fine-tuning. [18,34,39] outperform fine-tuning on low-resource tasks, demonstrating that more parameters may not improve performance when finetuning pre-trained models [36]. In computer vision, [41] add convolutional adapters to the ResNet [20] and obtain competitive results in image classification. Adapter concept has also been applied in multimodal [33], vision-and-language [51], and domain adaptation [56], but these methods are only applicable under specific conditions. [7, 21, 25, 31] investigate the potential of adapter-tuning for visual classification. [8] apply the adapter structure to visual dense predictions without fixing any original parameters, which indeed trades more parameters for better performances.", + "bbox": [ + 75, + 401, + 472, + 690 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Low-rank Approximation", + "text_level": 1, + "bbox": [ + 76, + 702, + 312, + 718 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The low-rank approximation uses multiple low-dimensional tensors to approximate a larger tensor with higher dimensions. Tensor dimensions and sizes in machine learning are very large, so low-rank approximations are widely used in face recognition [61], distributed training [54], transfer learning [11], and cross-domain [10]. A $b \\times c$ matrix $M$ can be approximated with $N$ low-rank matrices $Q$ by the following equation:", + "bbox": [ + 75, + 727, + 468, + 849 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nM _ {b \\times c} = \\prod_ {i = 1} ^ {N} Q _ {r _ {i} \\times s _ {i}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 862, + 468, + 904 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/076b68aad349b00b6a3bfa1feb3c01031b3a22e132f2f3e1d5dbafcabaff3fd7.jpg", + "image_caption": [ + "Figure 3. Comparison between Adapter-Tuning and Fine-Tuning paradigms. Fine-Tuning tunes ( $\\mathcal{A}$ ) all parameters delivered by the pre-trained model. Adapter-Tuning freezes ( $\\mathcal{A}$ ) all structures and parameters in the pre-trained model and only trains ( $\\mathcal{A}$ ) the additional parameters in adapters. Parameters in the decoder and head are trainable in both paradigms." + ], + "image_footnote": [], + "bbox": [ + 506, + 90, + 888, + 287 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $N$ has different values depending on the approximation methods, we implement low-rank approximation of the adapter matrices by heuristic learning.", + "bbox": [ + 496, + 407, + 890, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 467, + 589, + 482 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we will elaborate on the proposed low-rank adapter (LoRand) in three parts: adapter tuning paradigm, LoRand, and parameter analysis.", + "bbox": [ + 496, + 492, + 890, + 539 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Adapter Tuning Paradigm", + "text_level": 1, + "bbox": [ + 500, + 547, + 738, + 564 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For dataset $D = \\{(x_{i},y_{i})\\}_{i = 1}^{N}$ , fine-tuning calculates the loss between inference results and labels according to the formula:", + "bbox": [ + 496, + 570, + 890, + 614 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL (D, \\theta) = \\sum_ {i = 1} ^ {N} \\operatorname {l o s s} \\left(f _ {\\theta} \\left(x _ {i}\\right), y _ {i}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 623, + 890, + 666 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $f_{\\theta}$ denotes the network forward function and loss represents the loss function. After that, $\\theta$ is optimized through", + "bbox": [ + 496, + 674, + 890, + 719 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\theta \\leftarrow \\underset {\\theta} {\\arg \\min } L (D, \\theta). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 617, + 719, + 890, + 744 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In adapter tuning paradigm, parameters consist of two parts, including parameters in adapter $\\theta_{A}$ and parameters in the original architecture $\\theta$ . Here, $\\theta$ is further divided into frozen part $\\theta_{F}$ and trainable part $\\theta_{T}$ , noted as $\\theta = \\{\\theta_{F},\\theta_{T}\\}$ . Let $\\Omega$ be all the trainable parameters, then $\\Omega = \\{\\theta_{A},\\theta_{T}\\}$ . The loss function and optimization formula in adapter can be written as:", + "bbox": [ + 496, + 750, + 890, + 853 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL \\left(D, \\theta_ {F}, \\Omega\\right) = \\sum_ {i = 1} ^ {N} \\operatorname {l o s s} \\left(f _ {\\theta_ {F}, \\Omega} \\left(x _ {i}\\right), y _ {i}\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 862, + 890, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "20118", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1121da732dac8d417df762986ffe346f6c0ca9d44752793fbadaa91c64385b68.jpg", + "image_caption": [ + "Figure 4. Left: Multi-branch projection in LoRand. The down-projection $W^{D}$ and up-projection $W^{U}$ matrices are the summation of $\\alpha$ branches $W_{1}^{D}(W_{1}^{U})\\ldots W_{\\alpha}^{D}(W_{\\alpha}^{U})$ . $K_{i}$ in $i$ -th branch is shared between $W_{i}^{D}$ and $W_{i}^{U}$ . All the $P, Q,$ and $K$ are trainable, while all the $W$ matrices are calculated. Right: Comparisons of the same-sized projection matrices between LoRand and Adapter. $(m,n)$ in the table are typical values in SwinBlocks. LoRand has far fewer parameters than Adapter. With the same projection dimension, LoRand saves over 80% parameters of the Adapter in Swin Transformers. $(\\alpha ,\\beta)$ here are (2,8), the same as the experiments." + ], + "image_footnote": [], + "bbox": [ + 86, + 90, + 665, + 335 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/5eca91dd2736066fb84fb54d46ca61d14724c9686fdcea2d77de0ad60e1368a3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
(m,n)PLoRandPAdapter%
(96,48)4736921651.39%
(192,96)93443686425.35%
(384,192)1856014745612.59%
(768,384)369925898246.27%
……………………
", + "bbox": [ + 669, + 148, + 880, + 279 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\Omega \\leftarrow \\underset {\\Omega} {\\arg \\min } L (D, \\theta_ {F}, \\Omega). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 441, + 468, + 467 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. LoRand", + "text_level": 1, + "bbox": [ + 76, + 474, + 176, + 489 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Before introducing LoRand, we first review the existing adapter structure. Conventional adapters are bottleneck structures containing a down-projection, an up-projection, and a non-linear activation function. Besides, adapters ensure the robustness of the model by adding residual [20] structures. Adapter layer can be formulated as follows:", + "bbox": [ + 75, + 497, + 468, + 589 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nA ^ {l} = U ^ {l} \\left(G e L U (D ^ {l} (x))\\right) + x, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 595, + 468, + 616 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $U^l$ and $D^l$ represent the up and down projections in the $l$ -th adapter layer, and GeLU is the activation function. It is clear that the parameters in adapter come from the projections. The projection process can be written as:", + "bbox": [ + 75, + 623, + 468, + 684 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ny = W x + b, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 694, + 468, + 709 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "which means most adapter parameters are in $W$ .", + "bbox": [ + 76, + 719, + 398, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To reduce the adapter parameters, we propose a low-rank adapter (LoRand) structure to replace the $W$ in the projection structures. Figure 2 shows the simplified structure of LoRand. Here we approximate not a specific matrix $W$ but an ideal matrix $W_{best}$ that can transform the feature space of the pre-trained model into new tasks by heuristic learning. The approximation matrix $\\hat{W}$ has the same size as $W$ , but the low-rank design makes $\\hat{W}$ have far fewer free degrees than a common $W$ .", + "bbox": [ + 75, + 734, + 468, + 868 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, we synthesize each $W$ by multiplying three low-rank matrices $P \\in \\mathbb{R}^{\\beta \\times m}$ , $K \\in \\mathbb{R}^{\\beta \\times \\beta}$ , $Q \\in \\mathbb{R}^{\\beta \\times n}$", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "that is:", + "bbox": [ + 500, + 444, + 547, + 455 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nW = P ^ {T} K Q, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 645, + 458, + 890, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\beta \\ll \\min(m, n)$ ensuring that $P$ and $Q$ are low-rank matrices. $K$ can be regarded as a kernel matrix that controls the parameter size of LoRand.", + "bbox": [ + 498, + 483, + 890, + 527 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After that, we add multi-branch structures to LoRand to increase the robustness and stability of low-rank matrices, which is inspired by MoE [43] and adaboost [45,52]. Every $W$ consists of $\\alpha$ branches, that is:", + "bbox": [ + 498, + 529, + 890, + 589 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nW = \\sum_ {i = 1} ^ {\\alpha} W _ {i} = \\sum_ {i = 1} ^ {\\alpha} P _ {i} ^ {T} K _ {i} Q _ {i}. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 593, + 601, + 890, + 640 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In addition, we share the kernel matrix $K$ of the two projection layers within each branch. We hope the sharing mechanism can promote the coherence of two projection layers during training process. Besides, the shared $K$ also slightly reduces the number of LoRand parameters. Up to now, the $W^{U}$ and $W^{D}$ in a complete LoRand structure can be represented as:", + "bbox": [ + 496, + 651, + 893, + 756 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {U} = \\sum_ {i = 1} ^ {\\alpha} W _ {i} ^ {U} = \\sum_ {i = 1} ^ {\\alpha} \\left(P _ {i} ^ {U}\\right) ^ {T} K _ {i} Q _ {i} ^ {U}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 768, + 890, + 809 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {D} = \\sum_ {i = 1} ^ {\\alpha} W _ {i} ^ {D} = \\sum_ {i = 1} ^ {\\alpha} \\left(P _ {i} ^ {D}\\right) ^ {T} K _ {i} Q _ {i} ^ {D}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 821, + 890, + 861 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $K_{i}$ is shared in $W^{U}$ and $W^{D}$ . Figure 4 presents the detailed designs of the multi-branch projection.", + "bbox": [ + 498, + 869, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "20119", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Parameter Analysis", + "text_level": 1, + "bbox": [ + 76, + 90, + 266, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we will compare the parameters of Lo-Rand and typical adapter [22] with the same size of projection matrix.", + "bbox": [ + 76, + 114, + 468, + 159 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Adapter Let $m$ be the input dimension of the adapter and $n$ be the middle layer dimension after down projection. Then the number of parameters in each adapter is $2mn$ (ignoring the few biases). In general, adapter tuning places two adapter modules in each block, so the space complexity of all adapter parameters in $\\gamma$ blocks can be written as:", + "bbox": [ + 76, + 167, + 468, + 258 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nO (4 \\gamma m n). \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 233, + 271, + 468, + 287 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "LoRand According to section 3.2, each $W$ contains $\\alpha$ sets of $\\{P,Q,K\\}$ , that is:", + "bbox": [ + 76, + 306, + 468, + 339 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\alpha \\left(m \\beta + \\beta^ {2} + n \\beta\\right). \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 349, + 468, + 367 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Each LoRand consists of two $W$ and $\\alpha$ shared $K$ , so the parameter quantity of each LoRand is:", + "bbox": [ + 76, + 378, + 468, + 409 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n2 \\alpha (m \\beta + \\beta^ {2} + n \\beta) - \\alpha \\beta^ {2} = 2 \\alpha \\beta (m + n + \\beta / 2). \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 419, + 468, + 438 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Each block has two LoRand structures, so the number of parameters in $\\gamma$ blocks is:", + "bbox": [ + 76, + 449, + 468, + 479 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n4 \\alpha \\beta \\gamma (m + n) + 2 \\alpha \\beta^ {2} \\gamma . \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 491, + 468, + 508 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As $\\alpha, \\beta, \\gamma \\ll \\min(m, n)$ , the space complexity here can be written as:", + "bbox": [ + 76, + 520, + 468, + 547 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nO \\left(4 \\alpha \\beta \\gamma (m + n)\\right). \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 550, + 468, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Comparison between Formulas 12 and 16 can be simplified as:", + "bbox": [ + 76, + 573, + 468, + 602 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nO (m n), \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 243, + 603, + 468, + 619 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 76, + 627, + 106, + 638 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nO (\\alpha \\beta (m + n)). \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 642, + 468, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given that $\\alpha, \\beta \\ll \\min(m, n)$ , the space complexity of LoRand is far lower than the typical adapter. The table in Figure 4 illustrates that LoRand saves most Adapter parameters with the same projecting dimension.", + "bbox": [ + 76, + 665, + 468, + 726 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 739, + 209, + 757 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate LoRand on multiple dense prediction tasks, including object detection, semantic segmentation, and instance segmentation. We also evaluate LoRand under low-resource conditions. We first describe our experimental setup in Section 4.1, including pre-trained backbones, baselines, LoRand settings, and downstream tasks. Then we present the main results of three benchmarks in Section 4.2. We also implement ablation study in Section 4.3 to investigate the impact of structural settings in LoRand.", + "bbox": [ + 76, + 763, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 500, + 90, + 689, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pretrained Backbones We conduct experiments on the advanced Swin Transformer [32] architectures. All backbones in this section are pre-trained by ImageNet-22k [9]. Pre-trained models are provided by OpenMMLab [6].", + "bbox": [ + 498, + 122, + 890, + 184 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines We compare LoRand with three other common training methods:", + "bbox": [ + 498, + 215, + 890, + 247 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) FULL: update all parameters in the architecture.", + "(b) FIXED: fix pre-trained parameters in Swin and train other parts of the architecture (neck, head).", + "(c) ADAPTER: add two trainable adapter structures in each SwinBlock following [22], and freeze other parts of the backbone. We evaluate two forms of adapter with different middle layer dimensions $(D_{ML})$ :", + "- ADAPTER-B: $D_{ML}$ is a half of input dimension.", + "- ADAPTER-T: $D_{ML}$ is a quarter of input dimension." + ], + "bbox": [ + 498, + 258, + 890, + 425 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "LoRand Settings We conducted experiments on three Lo-Rand variants, which have different branch numbers $\\alpha$ and kernel matrix dimensions $\\beta$ .", + "bbox": [ + 498, + 445, + 890, + 489 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- LoRand: $\\alpha = 2$ , $\\beta = 8$ (Standard).", + "- LoRand+: $\\alpha = 4, \\beta = 8$ .", + "- LoRand++: $\\alpha = 4, \\beta = 16$ ." + ], + "bbox": [ + 517, + 500, + 764, + 564 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Downstream Tasks We conducted experiments on COCO [28], ADE20K [62], and PASCAL VOC [14] benchmarks to widely evaluate LoRand's performance on main dense prediction tasks.", + "bbox": [ + 498, + 583, + 890, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "COCO 2017 [28] is the most commonly used dataset for object detection and instance segmentation, which contains 118K training and 5K validation images. We perform experiments on the validation set. For a fair comparison, all experiments performed on COCO employ Cascade MASK R-CNN [32] as the detector.", + "bbox": [ + 498, + 643, + 890, + 734 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ADE20K [62] is the most widely used semantic segmentation dataset, which contains 20K training and 2K validation images. We also conduct experiments on the ADE20K validation set and utilise UperNet [57] as the framework.", + "bbox": [ + 498, + 734, + 890, + 795 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "PASCAL VOC 0712 [14] is also widely used in object detection, which contains about 16K training and 5K validation images. VOC 0712 is much smaller than the latest benchmarks, so we treat it as a low-resource case. We adopt Faster RCNN [42] as the detector for VOC 0712.", + "bbox": [ + 498, + 795, + 890, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "All our experiments are conducted with 8x NVIDIA Tesla V100 GPUs. The experiments on PASCAL VOC and", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "20120", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/78ed1d7df5666048085291a177d60527e41079be4303c0fa2fdf632339889cc3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Swin-L (198M)Trained* Params%ΔFullExtra StructurePascal VOC (Faster RCNN)ADE20K (UperNet)
APBoxΔLoRandmIoUΔLoRand
Baselines
FULL198.58 M100.00 %-X84.43 %- 2.69 %53.25 %+ 1.34 %
FIXED0.00 M0.00 %- 100.00 %X85.19 %- 1.93 %32.21 %- 19.70 %
ADAPTER-B32.04 M16.13 %- 83.87 %80.93 %- 6.19 %46.23 %- 5.68 %
ADAPTER-T16.04 M8.08 %- 91.92 %78.10 %- 9.02 %43.51 %- 8.40 %
Our Methods
LORAND3.59 M1.84 %- 98.16 %87.12 %-50.67 %-
LORAND+7.19 M3.62 %- 96.38 %87.63 %+ 0.51 %51.13 %+ 0.46 %
LORAND++14.24 M7.17 %- 92.83 %88.11 %+ 0.99 %51.87 %+ 1.20 %
", + "bbox": [ + 86, + 88, + 885, + 291 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/4c26357a7976afde004ccdb1b5d7da94ceef118863e7dcbd3c2763f83235aa15.jpg", + "table_caption": [ + "Table 1. Results of baselines and our methods on Pascal VOC and ADE20K benchmarks. Swin-L is employed as the pre-trained model here. We present the numbers and percentages of trainable backbone parameters on the left and all the performances on the right. * denotes the trainable parameters in backbones." + ], + "table_footnote": [], + "table_body": "
Swin-B (89M)Trained* Params%ΔFullExtra StructureCOCO (Cascade Mask R-CNN)
APBoxΔLoRandAPMaskΔLoRand
Baselines
FULL89.14 M100.00 %-X51.90 %+0.80 %45.00 %+0.90 %
FIXED0.00 M0.00 %-100.00 %X15.30 %-35.80 %10.80 %-33.8 %
ADAPTER-B14.38 M16.13 %-83.87 %46.50 %-4.60 %40.20 %-3.90 %
ADAPTER-T7.20 M8.08 %-91.92 %43.20 %-7.90 %38.70 %-5.40 %
Our Methods
LORAND2.39 M2.76 %-97.24 %51.10 %-44.10 %-
LORAND+4.73 M5.31 %-94.69 %51.20 %+0.10 %44.30 %+0.20 %
LORAND++9.32 M10.46 %-89.54 %51.50 %+0.40 %44.40 %+0.30 %
", + "bbox": [ + 86, + 347, + 888, + 547 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Results of baselines and our methods on COCO benchmarks. Swin-B is employed as the pre-trained model here. We present the numbers and percentages of trainable backbone parameters on the left and all the performances on the right. * denotes the trainable parameters in backbones.", + "bbox": [ + 75, + 558, + 892, + 599 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ADE20K are based on Swin-S, Swin-B, and Swin-L pretrained models. Limited by GPU memory, the COCO experiments are based on Swin-T, Swin-S, and Swin-B.", + "bbox": [ + 75, + 609, + 468, + 655 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Main Results", + "text_level": 1, + "bbox": [ + 76, + 665, + 217, + 679 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first compare the trainable backbone parameters and performance of these methods on three benchmarks in Tables 1 and 2. Table 1 shows the results of PASCAL VOC and ADE20K datasets based on Swin-L, and Table 2 shows the results of COCO based on Swin-B. From Tables 1 and 2, we can see that:", + "bbox": [ + 75, + 689, + 468, + 777 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "1) LoRand can effectively address the dilemma of fine-tuning in low-resource situations. Table 1 shows that FIXED outperforms FULL on the PASCAL VOC dataset, which implies that the powerful generalization ability of pre-trained model is severely weakened during fine-tuning. Fine-tuning with low-resource data reduces the feature understanding of pre-trained models, which leads to the poor performance on downstream tasks. LoRand avoids this dis", + "bbox": [ + 75, + 779, + 470, + 900 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "advantage by fixing the original parameters. More importantly, LoRand can absorb features from the new data by its smaller trainable structures. Table 1 indicates that LoRand outperforms FULL and FIXED by $2.69\\%$ and $1.93\\%$ on the low-resource dataset with only $1.84\\%$ trainable backbone parameters. LoRand+ and LoRand++ also outperform FULL by $3.2\\%$ and $3.68\\%$ with $3.62\\%$ and $7.17\\%$ backbone parameters. In fact, there are many other common computer vision datasets with similar volumes to the PASCAL VOC, including CUB-200-2011 [55], Oxford 102 Flowers [35], Stanford Cars [27], and Caltech-256 [16]. The prevalence of \"Pretrained & Finetuning\" leads us to focus more on giant benchmarks, but Table 1 suggests we need a better training paradigm to cope with many low-resource situations in industrial applications. LoRand-Tuning proves to be a competitive candidate who brings promising performance and parameter-efficient approaches to low-resource cases.", + "2) LoRand effectively balances the number of trainable backbone parameters and downstream task per" + ], + "bbox": [ + 496, + 609, + 890, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "20121", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "formance. Tables 1 and 2 demonstrate that LoRand (standard) performs very closely to FULL on large benchmarks with only $1.84\\%$ to $2.76\\%$ trainable parameters. By tuning less than 3.6M backbone parameters, LoRand (standard) achieves $50.67\\%$ (mIOU) on ADE20K, and $51.10\\%$ $(\\mathrm{AP}_{\\mathrm{Box}})$ / $44.10\\%$ $(\\mathrm{AP}_{\\mathrm{Mask}})$ on COCO, which is only about $1.5\\%$ off on average compared to FULL. LoRand+ and LoRand++ further reduce the gap between these two paradigms to approximately $1\\%$ with slight parameter increases. For Swin-L, LoRand saves about 195M parameters per copy compared to FULL. For Swin-B, LoRand saves about $86\\mathrm{M}$ . These results are interesting, which means we do not have to spend plenty of hardware resources to store these redundant parameters. Industrial service providers deliver thousands of model training tasks every day. With LoRand-Tuning, millions of gigabytes per year for model storage could be saved.", + "bbox": [ + 76, + 90, + 472, + 347 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3) LoRand effectively broadens the potential of conventional parameter-efficient adapter structures in dense predictions. From the results, we can draw similar conclusions to [24] that the standard adapter [22] performs worse than fine-tuning on dense predictions. Tables 1 and 2 illustrate that the ADAPTER's performance is far from FULL, although it reduces $80\\%$ of trainable backbone parameters. Also adding new structures, LoRand achieves comparable performance to FULL by training fewer parameters than the ADAPTER. Overall, Tables 1 and 2 demonstrate the feasibility of parameter-efficient tuning paradigm in visual dense prediction tasks.", + "bbox": [ + 76, + 349, + 470, + 532 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparisons with other fine-tuned backbone. We then show the comparisons of LoRand with some other remarkable fine-tuned backbones in Table 3. Table 3a shows the results based on UperNet and ADE20K, and 3b shows the results based on Cascade MASK R-CNN and COCO. Table 3 shows that LoRand (based on Swin-Transformer) can outperform most existing fine-tuned backbones with less than 2M parameters. Compared to these backbones, LoRand not only presents more robust and superior results but also saves massive hardware resources in this era of parameter explosion. Specifically, LoRand (Swin-T) exceeds COCO by $1.9\\%$ $\\mathrm{(AP_{Box})}$ and $1.2\\%$ $\\mathrm{(AP_{Mask})}$ with 80.12M fewer new backbone parameters than ResNeXt-101-64. Similarly, LoRand (Swin-L) surpasses $5.82\\%$ (mIoU) on ADE20K with 40.41M fewer trainable backbone parameters than ResNet-101.", + "bbox": [ + 76, + 535, + 472, + 775 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparisons on different backbone scales. In addition to Swin-L and Swin-B, we also conduct extensive experiments on Swin-S and Swin-T. We illustrate the performance of baselines and LoRand on multiple backbones. Figure 5 shows the performance of the six methods on different backbone scales, which includes three Swin variants for each benchmark. As FIXED's performance on COCO and ADE20K is too low to display, we only show FIXED's re", + "bbox": [ + 76, + 779, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ac81b040aef3ed5169607b06418b5ad765aa8022c1df5b4c5231108821303a27.jpg", + "table_caption": [ + "(a) Comparisons between LoRand-Tuning and Fine-Tuning on COCO." + ], + "table_footnote": [ + "Table 3. Comparisons between LoRand-Tuning and Fine-Tuning on ADE20K and COCO. We fine-tune multiple backbones and compare their performances with LoRand series. Architectures in (a) and (b) are Cascade Mask R-CNN and UperNet. Parameters in decoder and head are updated in both paradigms. * denotes the trainable parameters in backbones." + ], + "table_body": "
BackboneTrained \nParams*APBoxAPMask
Fine-Tuning Paradigm
ResNet-10144 M47.9 %41.5 %
ResNeXt-101-3240 M48.1 %41.6 %
ResNeXt-101-6481 M48.3 %41.7 %
DeiT-S22 M48.0 %41.4 %
Swin-T29 M50.5 %43.7 %
Swin-S50 M51.8 %44.7 %
Swin-B88 M51.9 %45.0 %
LoRand-Tuning
LoRand (Swin-T)0.88 M50.2 %42.9 %
LoRand (Swin-S)1.80 M50.7 %43.8 %
LoRand (Swin-B)2.39 M51.1 %44.3 %
(b) Comparisons between LoRand-Tuning and Fine-Tuning on ADE20K.
BackboneTrained Params*APMask
Fine-Tuning
ResNet-1812 M39.97 %
ResNet-5025 M42.78 %
ResNet-10144 M44.85 %
DeiT-S22 M44.01 %
Swin-S50 M49.30 %
Swin-B88 M51.60 %
Swin-L197 M53.25 %
LoRand-Tuning
LoRand (Swin-S)1.80 M47.33 %
LoRand (Swin-B)2.39 M49.62 %
LoRand (Swin-L)3.59 M50.67 %
", + "bbox": [ + 503, + 103, + 883, + 674 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "sults in the PASCAL VOC. Figure 5 indicates that the performance of most methods improves as the backbone scale gets larger. For the LoRand series, more parameters bring better performance, but it is still challenging to outperform FULL on large datasets. For the ADAPTER, ADAPTER-B performs better than ADAPTER-T, suggesting that adding extra parameters does help improve adapter-tuning performance. Experiments on Swin variants systematically", + "bbox": [ + 496, + 779, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "20122", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4799d8629c462496de8ce6fadebc123984f57f3c6291d7d764ef3994d63168c8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 119, + 90, + 852, + 114 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f1de32ed5f910da96337d105f6c2a278695e25f9594dff66acc215e4c8e21230.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 91, + 116, + 336, + 255 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cf498dcc0f0551d854e6089165108833da42aef14b8789312e63f10306302f53.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 359, + 117, + 607, + 255 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/780459435f7dcd851bc2a17b3a882ae4f0dd704c150286faeb4d32782ced5b5b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 627, + 117, + 875, + 255 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c8f8fb199094b4d44297a887101b9142ca0aa4a8533d9a78d2804c16a7a98a80.jpg", + "image_caption": [ + "Figure 5. Seven methods on different backbone scales. Figures show results on PASCAL VOC, COCO, and ADE20K from left to right. Swin-S, Swin-B, and Swin-L are employed as the pre-trained models for PASCAL VOC and ADE20K. Swin-T, Swin-S, and Swin-B are employed for COCO. FIXED's performances are so low on COCO and ADE20K that they reduce the intuitiveness of the other six methods, so FIXED is only presented in PASCAL VOC comparisons." + ], + "image_footnote": [], + "bbox": [ + 558, + 321, + 879, + 340 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8beae4f9a09d00f90cecc036a470adcd74773704abd7430320102b6b1e266291.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 91, + 345, + 339, + 487 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/128d7434bbe4dec4e4d11a3c91b0ddb00bf1ce980db749fe4ff84624fbeb0a25.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 345, + 607, + 487 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/80ef1c9114fff622842c4aebee06a2c60b1be5b51ab9208d703be709a5193c33.jpg", + "image_caption": [ + "Figure 6. Ablation Study for $\\alpha$ and $\\beta$ . $\\alpha$ ranges from 2, 4, 6, and $\\beta$ ranges from 4, 8, 16. Figures from left to right present experiments on three benchmarks respectively. We only present $\\mathrm{AP_{Box}}$ changes for COCO benchmark considering the strong correlation between the values of $\\mathrm{AP_{Box}}$ and $\\mathrm{AP_{Mask}}$ in COCO." + ], + "image_footnote": [], + "bbox": [ + 622, + 345, + 877, + 488 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "demonstrate that LoRand can outperform both FULL and traditional adapter structures in low-resource cases and perform very closely to FULL in large benchmarks.", + "bbox": [ + 75, + 566, + 468, + 612 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Ablation Study", + "text_level": 1, + "bbox": [ + 75, + 619, + 230, + 636 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we ablate two key hyperparameters in LoRand: the LoRand branch number $\\alpha$ and the kernel matrix dimension $\\beta$ . $\\alpha$ affects the distributed decision-making of LoRand, while $\\beta$ focuses on a single branch's learning capability and consistency.", + "bbox": [ + 75, + 643, + 468, + 718 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Several sets of ablation experiments are designed and implemented to investigate the effect of $\\alpha$ and $\\beta$ on the performance of LoRand. The ablation experiments were conducted on the same three benchmarks. In order to improve the upper limit of LoRand, our experiments are conducted on the largest backbone of each dataset (ADE20K/PASCAL VOC: Swin-L, COCO: Swin-B). The value sets of $\\alpha$ and $\\beta$ are $\\{2,4,6\\}$ and $\\{4,8,16\\}$ . Figure 6 shows the results of ablation studies on three datasets. In most cases, LoRand's performance increases slightly as $\\alpha$ and $\\beta$ become larger but hardly outperforms fine-tuning on large benchmarks. Besides, exponentially increasing the size of the LoRand does", + "bbox": [ + 73, + 719, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "not result in an equivalent performance improvement and even leads to a reduction ( $\\alpha = 6$ in VOC and COCO). Ablation studies demonstrate that larger LoRands have fewer gains both in parameter efficiency and performance. We have considered this trade-off when designing the LoRand standard, LoRand+, and LoRand++.", + "bbox": [ + 496, + 566, + 890, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 678, + 617, + 694 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This paper presents LoRand, a parameter-efficient low-rank adapter for dense predictions, which completely shares the feature understanding of advanced pre-trained models and effectively transfers it to downstream tasks. LoRand performs on par with fine-tuning in COCO instance segmentation, ADE20K semantic segmentation, and PASCAL VOC object detection with only $1\\%$ to $3\\%$ trainable backbone parameters. Moreover, LoRand effectively avoids the disadvantages of the fine-tuning paradigm and delivers better performance in low-resource situations. We hope that parameter-efficient LoRand can save massive redundant storage resources and facilitate a unified training paradigm for vision and language.", + "bbox": [ + 496, + 704, + 890, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "20123", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Caisse Amisse, Mario Ernesto Jijón-Palma, and Jorge Antonio Silva Centeno. Fine-tuning deep learning models for pedestrian detection. *Boletim de Ciências Geólicas*, 27, 2021. 1", + "[2] Alexei Baevski, Sergey Edunov, Yinhan Liu, Luke Zettle-moyer, and Michael Auli. Cloze-driven pretraining of self-attention networks. arXiv preprint arXiv:1903.07785, 2019. 1", + "[3] Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021. 1", + "[4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 1", + "[5] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2", + "[6] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 5", + "[7] Shoufa Chen, Chongjian Ge, Zhan Tong, Jiangliu Wang, Yibing Song, Jue Wang, and Ping Luo. Adaptformer: Adapting vision transformers for scalable visual recognition. arXiv preprint arXiv:2205.13535, 2022. 3", + "[8] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. arXiv preprint arXiv:2205.08534, 2022. 3", + "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 1, 2, 5", + "[10] Zhengming Ding and Yun Fu. Deep transfer low-rank coding for cross-domain learning. IEEE transactions on neural networks and learning systems, 30(6):1768-1779, 2018. 3", + "[11] Zhengming Ding, Ming Shao, and Yun Fu. Deep low-rank coding for transfer learning. In Twenty-Fourth International Joint Conference on Artificial Intelligence, 2015. 3", + "[12] Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah Smith. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. arXiv preprint arXiv:2002.06305, 2020. 2", + "[13] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. 2" + ], + "bbox": [ + 78, + 114, + 468, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Mark Everingham, SM Eslami, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes challenge: A retrospective. International journal of computer vision, 111(1):98-136, 2015. 2, 5", + "[15] Christoph Feichtenhofer, Haoqi Fan, Yanghao Li, and Kaiming He. Masked autoencoders as spatiotemporal learners. arXiv preprint arXiv:2205.09113, 2022. 2", + "[16] Gregory Griffin, Alex Holub, and Pietro Perona. Caltech-256 object category dataset. 2007. 6", + "[17] Xu Han, Zhengyan Zhang, Ning Ding, Yuxian Gu, Xiao Liu, Yuqi Huo, Jiezhong Qiu, Yuan Yao, Ao Zhang, Liang Zhang, et al. Pre-trained models: Past, present and future. AI Open, 2:225-250, 2021. 1", + "[18] Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig. Towards a unified view of parameter-efficient transfer learning. In International Conference on Learning Representations, 2021. 3", + "[19] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16000-16009, 2022. 2", + "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 3, 4", + "[21] Xuehai He, Chunyuan Li, Pengchuan Zhang, Jianwei Yang, and Xin Eric Wang. Parameter-efficient fine-tuning for vision transformers. arXiv preprint arXiv:2203.16329, 2022. 3", + "[22] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 3, 5, 7", + "[23] Fatsuma Jauro, Haruna Chiroma, Abdulsalam Y Gital, Mubarak Almutairi, M Abdulhamid Shafi'i, and Jemal H Abawajy. Deep learning architectures in emerging cloud computing architectures: Recent development, challenges and next research trend. Applied Soft Computing, 96:106582, 2020. 1", + "[24] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. arXiv preprint arXiv:2203.12119, 2022. 2, 3, 7", + "[25] Shibo Jie and Zhi-Hong Deng. Convolutional bypasses are better vision transformer adapters. arXiv preprint arXiv:2207.07039, 2022. 3", + "[26] Christoph Käding, Erik Rodner, Alexander Freytag, and Joachim Denzler. Fine-tuning deep neural networks in continuous learning scenarios. In *Asian Conference on Computer Vision*, pages 588–605. Springer, 2016. 1", + "[27] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pages 554–561, 2013. 6" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "20124", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 1, 2, 5", + "[29] Fanfan Liu, Haoran Wei, Wenzhe Zhao, Guozhen Li, Jingquan Peng, and Zihao Li. Wb-detr: Transformer-based detector without backbone. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2979-2987, 2021. 2", + "[30] Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. Pre-train, prompt, and predict: A systematic survey of prompting methods in natural language processing. arXiv preprint arXiv:2107.13586, 2021. 1, 2", + "[31] Yen-Cheng Liu, Chih-Yao Ma, Junjiao Tian, Zijian He, and Zsolt Kira. Polyhistor: Parameter-efficient multi-task adaptation for dense vision tasks. arXiv preprint arXiv:2210.03265, 2022. 3", + "[32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 1, 2, 5", + "[33] Cheng Long Li, Andong Lu, Ai Hua Zheng, Zhengzheng Tu, and Jin Tang. Multi-adapter rgbt tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, pages 0-0, 2019. 3", + "[34] Yuning Mao, Lambert Mathias, Rui Hou, Amjad Alma-hairi, Hao Ma, Jiawei Han, Scott Yih, and Madian Khabsa. Unipelt: A unified framework for parameter-efficient language model tuning. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6253-6264, 2022. 3", + "[35] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pages 722-729. IEEE, 2008. 6", + "[36] Matthew E Peters, Sebastian Ruder, and Noah A Smith. To tune or not to tune? adapting pretrained representations to diverse tasks. arXiv preprint arXiv:1903.05987, 2019. 2, 3", + "[37] Jonas Pfeiffer, Aishwarya Kamath, Andreas Rückle, Kyunghyun Cho, and Iryna Gurevych. Adapterfusion: Nondestructive task composition for transfer learning. In 16th Conference of the European Chapter of the Association for Computational Linguistics, EACL 2021, pages 487-503. Association for Computational Linguistics (ACL), 2021. 2, 3", + "[38] Jonas Pfeiffer, Andreas Rücklé, Clifton Poth, Aishwarya Kamath, Ivan Vulić, Sebastian Ruder, Kyunghyun Cho, and Iryna Gurevych. Adapterhub: A framework for adapting transformers. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 46-54, 2020. 1, 2", + "[39] Jonathan Pilault, Christopher Pal, et al. Conditionally adaptive multi-task learning: Improving transfer learning in nlp using fewer parameters & less data. In International Conference on Learning Representations, 2020. 3" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[40] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J Liu, et al. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140):1-67, 2020. 1", + "[41] Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. Advances in neural information processing systems, 30, 2017. 3", + "[42] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 5", + "[43] Carlos Riquelme, Joan Puigcerver, Basil Mustafa, Maxim Neumann, Rodolphe Jenatton, André Susano Pinto, Daniel Keysers, and Neil Houlsby. Scaling vision with sparse mixture of experts. Advances in Neural Information Processing Systems, 34:8583-8595, 2021. 4", + "[44] Andreas Rücklé, Gregor Geigle, Max Glockner, Tilman Beck, Jonas Pfeiffer, Nils Reimers, and Iryna Gurevych. Adapterdrop: On the efficiency of adapters in transformers. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7930-7946, 2021. 2, 3", + "[45] Omer Sagi and Lior Rokach. Ensemble learning: A survey. Wiley Interdisciplinary Reviews: Data Mining and Knowledge Discovery, 8(4):e1249, 2018. 4", + "[46] Chompunuch Sarasaen, Soumick Chatterjee, Mario Breitkopf, Georg Rose, Andreas Nurnberger, and Oliver Speck. Fine-tuning deep learning model parameters for improved super-resolution of dynamic mri with prior-knowledge. Artificial Intelligence in Medicine, 121:102196, 2021. 1", + "[47] Jaime Sevilla, Lennart Heim, Anson Ho, Tamay Besiroglu, Marius Hobbahn, and Pablo Villalobos. Compute trends across three eras of machine learning. arXiv preprint arXiv:2202.05924, 2022.1", + "[48] Shaden Smith, Mostofa Patwary, Brandon Norick, Patrick LeGresley, Samyam Rajbhandari, Jared Casper, Zhun Liu, Shrimai Prabhumoye, George Zerveas, Vijay Korthikanti, et al. Using deepspeed and megatron to train megatron-turing nlg 530b, a large-scale generative language model. arXiv preprint arXiv:2201.11990, 2022. 1", + "[49] Emma Strubell, Ananya Ganesh, and Andrew McCallum. Energy and policy considerations for deep learning in nlp. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3645–3650, 2019. 1", + "[50] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 1", + "[51] Yi-Lin Sung, Jaemin Cho, and Mohit Bansal. Vl-adapter: Parameter-efficient transfer learning for vision-and-language tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5227-5237, 2022. 3" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "20125", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] B Thilagavathi, K Suthendran, and K Srujanraju. Evaluating the adaboost algorithm for biometric-based face recognition. In Data Engineering and Communication Technology, pages 669-678. Springer, 2021. 4", + "[53] Edna Chebet Too, Li Yujiang, Sam Njuki, and Liu Yingchun. A comparative study of fine-tuning deep learning models for plant disease identification. Computers and Electronics in Agriculture, 161:272-279, 2019. 1", + "[54] Thijs Vogels, Sai Praneeth Karimireddy, and Martin Jaggi. Practical low-rank communication compression in decentralized deep learning. Advances in Neural Information Processing Systems, 33:14171-14181, 2020. 3", + "[55] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 6", + "[56] Xudong Wang, Zhaowei Cai, Dashan Gao, and Nuno Vasconcelos. Towards universal object detection by domain attention. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7289-7298, 2019. 3", + "[57] Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, and Jian Sun. Unified perceptual parsing for scene understanding. In Proceedings of the European conference on computer vision (ECCV), pages 418-434, 2018. 5", + "[58] Sha Yuan, Hanyu Zhao, Shuai Zhao, Jiahong Leng, Yangxiao Liang, Xiaozhi Wang, Jifan Yu, Xin Lv, Zhou Shao, Jiaao He, et al. A roadmap for big model. arXiv preprint arXiv:2203.14101, 2022. 1", + "[59] Aston Zhang, Yi Tay, SHUAI Zhang, Alvin Chan, Anh Tuan Luu, Siu Hui, and Jie Fu. Beyond fully-connected layers with quaternions: Parameterization of hypercomplex multiplications with $1/n$ parameters. In International Conference on Learning Representations, 2020. 2, 3", + "[60] Chaoning Zhang, Chenshuang Zhang, Junha Song, John Seon Keun Yi, Kang Zhang, and In So Kweon. A survey on masked autoencoder for self-supervised learning in vision and beyond. arXiv preprint arXiv:2208.00173, 2022. 2", + "[61] Jianwei Zhao, Yongbiao Lv, Zhenghua Zhou, and Feilong Cao. A novel deep learning algorithm for incomplete face recognition: Low-rank-recovery network. Neural Networks, 94:115-124, 2017. 3", + "[62] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017. 2, 5" + ], + "bbox": [ + 78, + 90, + 468, + 739 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "20126", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_model.json b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_model.json new file mode 100644 index 0000000000000000000000000000000000000000..618a9dfc210a4518b5f0be0a350e0cb97756c32c --- /dev/null +++ b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_model.json @@ -0,0 +1,2477 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.099, + 0.131, + 0.872, + 0.152 + ], + "angle": 0, + "content": "1% VS 100%: Parameter-Efficient Low Rank Adapter for Dense Predictions" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.179, + 0.877, + 0.27 + ], + "angle": 0, + "content": "Dongshuo Yin\\(^{1,2,\\dagger}\\), Yiran Yang\\(^{1,2,\\dagger}\\), Zhechao Wang\\(^{1,2}\\), Hongfeng Yu\\(^{1}\\), Kaiwen Wei\\(^{1,2}\\), Xian Sun\\(^{1,2,*}\\) \n\\(^{1}\\)Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences \n\\(^{2}\\)School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.272, + 0.808, + 0.305 + ], + "angle": 0, + "content": "{yindongshuo19, yangyiran19, wangzhechao21, weikaiwen19}@mails.ucas.ac.cn {yuhf, sunxian}@aircas.ac.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.34, + 0.314, + 0.356 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.372, + 0.474, + 0.658 + ], + "angle": 0, + "content": "Fine-tuning large-scale pre-trained vision models to downstream tasks is a standard technique for achieving state-of-the-art performance on computer vision benchmarks. However, fine-tuning the whole model with millions of parameters is inefficient as it requires storing a samesized new model copy for each task. In this work, we propose LoRand, a method for fine-tuning large-scale vision models with a better trade-off between task performance and the number of trainable parameters. LoRand generates tiny adapter structures with low-rank synthesis while keeping the original backbone parameters fixed, resulting in high parameter sharing. To demonstrate LoRand's effectiveness, we implement extensive experiments on object detection, semantic segmentation, and instance segmentation tasks. By only training a small percentage (1% to 3%) of the pre-trained backbone parameters, LoRand achieves comparable performance to standard fine-tuning on COCO and ADE20K and outperforms fine-tuning in low-resource PASCAL VOC dataset." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.688, + 0.21, + 0.703 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.714, + 0.47, + 0.866 + ], + "angle": 0, + "content": "With the rapid development of computer vision, parameters in deep models are surging. Giant models need to be trained with massive resources to achieve superior performance [3, 17, 47, 58], which is often unavailable to many academics and institutions. \"Pretrain & Finetuning\" paradigm is widely used to alleviate this dilemma. Teams with sufficient computation resources utilise enormous datasets [2, 9, 40, 50] to train superior backbones [4, 32, 40, 48] and optimise the models with ideal performances. Models pretrained in this way usually have a su" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.34, + 0.872, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.545, + 0.895, + 0.671 + ], + "angle": 0, + "content": "Figure 1. Comparisons of trainable backbone parameters between our methods (red) and fine-tuning (black). In COCO, we achieve advanced performances and outperform most existing backbones with only \\(0.9\\sim 2.5\\mathrm{M}\\) new backbone parameters (Cascade-RCNN is employed as the detector). The fine-tuning paradigm produces massive redundant backbone parameters, whereas our approach saves over \\(97\\%\\) of hardware resources with competitive performances. The sizes of the circles intuitively compare the number of trainable parameters." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.892, + 0.764 + ], + "angle": 0, + "content": "perior understanding of homogeneous data. After that, researchers with limited computational resources can transfer the understanding capabilities of the pre-trained models to downstream tasks with promising performances by finetuning [1,26,46,53]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.893, + 0.903 + ], + "angle": 0, + "content": "However, the fine-tuned model will produce a new set of parameters as large as the pre-trained model. New parameters are independent of the pre-trained models and unshareable, which are very hardware intensive for cloud service providers [23, 49]. Figure 1 compares the parameter quantities of some remarkable backbones and their performances on the COCO [28] dataset. Recent advances in natural language processing (NLP) [30, 38] show that large pre-trained models trained with rich data have strong gener" + }, + { + "type": "page_footnote", + "bbox": [ + 0.094, + 0.875, + 0.223, + 0.888 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.888, + 0.207, + 0.9 + ], + "angle": 0, + "content": "Equal contribution." + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.875, + 0.223, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20116" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.124, + 0.088, + 0.252, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.128, + 0.327, + 0.244, + 0.337 + ], + "angle": 0, + "content": "Swin-Transformer Block" + }, + { + "type": "image", + "bbox": [ + 0.268, + 0.088, + 0.426, + 0.321 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.327, + 0.382, + 0.337 + ], + "angle": 0, + "content": "LoRand Layer" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.349, + 0.47, + 0.432 + ], + "angle": 0, + "content": "Figure 2. Architecture of the adapter module and its integration with the Transformer. Left: We add two LoRand structures to each SwinBlock located behind the W/SW-MSA and MLP structures respectively. Right: LoRand contains two Multi-branch low-rank projections and nonlinearity. We include skip-connection to LoRand to enhance its robustness." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.47, + 0.761 + ], + "angle": 0, + "content": "alisability, which means most parameters in the pre-trained models can be shared with the new tasks [22, 36, 37, 44, 59]. Moreover, recent literature demonstrates that the feature understanding of pre-trained models could be reduced when they are fine-tuned in low-resource situations [12, 36]. To tackle these issues, NLP researchers propose two new training paradigms based on pre-trained models: Adapter Tuning [22] and Prompt Tuning [30], both of which tune the new models by fixing the pre-trained parameters and adding a few trainable structures (less than \\(10\\%\\) of the backbone). These paradigms create a new buzz in NLP and achieve impressive performances which can be competitive with finetuning [12, 22, 30, 36-38, 44, 59]. Advances in NLP also shed new light on computer vision. Jia et al. [24] propose Visual Prompt Tuning (VPT) and demonstrate that VPT can outperform fine-tuning on image classification tasks by training a small number of trainable parameters. Nevertheless, VPT shows weakness on more challenging dense predictions like semantic segmentation compared with finetuning [24]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.468, + 0.903 + ], + "angle": 0, + "content": "To find a parameter-efficient paradigm with promising performance in computer vision, we explore the potential of Adapter Tuning for visual dense predictions. We employ the advanced Swin Transformer [32] trained with ImageNet-22K [9] as the pre-trained model. After that, we add bottleneck adapter structures [22] behind each SwinBlock and freeze the original backbone parameters when training, but this approach cannot achieve comparable performance to fine-tuning as mentioned in [24]. In the experi" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.453 + ], + "angle": 0, + "content": "periments, we find that the models perform better with sparser adapter structures. To improve the performance of Adapter Tuning, we propose Low-Rank Adapter (LoRand) to reduce the adapter parameters, as shown in Figure 2. LoRand sparsely parameterizes the matrices in adapters by low-rank synthesis. Specifically, the projection matrix of the fully-connected layer (FC) in LoRand is a product of multiple low-rank matrices, which reduces FC parameters by more than \\(80\\%\\). We implement extensive experiments on object detection (PASCAL VOC [14]), semantic segmentation (ADE20K [62]), and instance segmentation (MS COCO [28]) to verify the capability of LoRand. Experimental results show that LoRand-Tuning is comparable to fine-tuning on multiple tasks with only \\(1.8\\%\\) to \\(2.8\\%\\) new backbone parameters, which suggests that the pre-trained backbone parameters can be fully shared. More interestingly, our method completely outperforms fine-tuning on the PASCAL VOC dataset, illustrating that LoRand-Tuning can reduce the impairment of fine-tuning on pre-trained models in low-resource configurations. Our method demonstrates that the LoRand-Tuning paradigm can substantially save storage resources and achieve competitive performances on most dense prediction tasks. In summary, our contributions are three-fold:" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.463, + 0.892, + 0.539 + ], + "angle": 0, + "content": "- We demonstrate that visual pre-trained models are highly generalisable and shareable. With our training methods, new tasks require only a few trainable parameters to achieve performances comparable to finetuning, which can save massive hardware resources." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.548, + 0.892, + 0.623 + ], + "angle": 0, + "content": "- We propose the LoRand structure for sparser adapters based on low-rank synthesis. We demonstrate that the backbone parameters in fine-tuning are highly redundant, which can be replaced by \\(1.8\\%\\) to \\(2.8\\%\\) additional parameters in LoRand." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.632, + 0.892, + 0.707 + ], + "angle": 0, + "content": "- Extensive experiments on object detection, semantic segmentation, and instance segmentation show that LoRand-Tuning can achieve remarkable performances and reduce massive new parameters in challenging dense prediction tasks." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.463, + 0.892, + 0.707 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.717, + 0.641, + 0.733 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.742, + 0.75, + 0.759 + ], + "angle": 0, + "content": "2.1. Training Paradigms in NLP" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Computer vision has been continuously inspired by NLP in recent years, including the visual transformer series [5,13,29,32] and self-supervised MAE series [15,19,60]. In fact, NLP is leading new training trends different from finetuning. Fine-tuning produces a new parameter set for each new task, which is parametrically inefficient for plenty of linguistic tasks [22,30]. To solve this problem, [30] and [22] have proposed \"Prompt Tuning\" and \"Adapter Tuning\" respectively, both of which fix all parameters of the backbone" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20117" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.364 + ], + "angle": 0, + "content": "and plug a few tiny trainable structures (less than \\(10\\%\\) of the backbone) to adapt the pre-trained model to the new tasks. \"Prompt tuning\" adds learnable parameters (also known as prompts) to the input or intermediate layers to change the input space of the new tasks. \"Prompts\" can motivate the model to remember knowledge learned in the previous tasks. \"Adapter tuning\" adds learnable bottleneck structures after each block to connect the pre-trained model with new tasks. Adapter and prompt demonstrate the coexistence of parameter efficiency and high performances in NLP, stimulating studies in CV. [24] proposes Visual Prompt Tuning (VPT) for image classification and semantic segmentation, but the performance of VPT on semantic segmentation is still far from fine-tuning. This phenomenon motivates us to explore whether adapter tuning can bring a new paradigm in computer vision with fewer parameters and better performances. In this work, we try to explore parameter-efficient and high-performance adapter structures." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.378, + 0.238, + 0.394 + ], + "angle": 0, + "content": "2.2. Adapter Tuning" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.402, + 0.473, + 0.691 + ], + "angle": 0, + "content": "Adapters have been widely studied in NLP. Houlsby et al. [22] first add a bottleneck adapter structure to the transformer blocks and fix the original backbone, which achieves comparable performances to fine-tuning. Figure 3 illustrates the differences between fine-tuning and adaptertuning. [37,44,59] further reduce parameters in the adapter with closer performances to fine-tuning. [18,34,39] outperform fine-tuning on low-resource tasks, demonstrating that more parameters may not improve performance when finetuning pre-trained models [36]. In computer vision, [41] add convolutional adapters to the ResNet [20] and obtain competitive results in image classification. Adapter concept has also been applied in multimodal [33], vision-and-language [51], and domain adaptation [56], but these methods are only applicable under specific conditions. [7, 21, 25, 31] investigate the potential of adapter-tuning for visual classification. [8] apply the adapter structure to visual dense predictions without fixing any original parameters, which indeed trades more parameters for better performances." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.703, + 0.313, + 0.719 + ], + "angle": 0, + "content": "2.3. Low-rank Approximation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.728, + 0.47, + 0.85 + ], + "angle": 0, + "content": "The low-rank approximation uses multiple low-dimensional tensors to approximate a larger tensor with higher dimensions. Tensor dimensions and sizes in machine learning are very large, so low-rank approximations are widely used in face recognition [61], distributed training [54], transfer learning [11], and cross-domain [10]. A \\( b \\times c \\) matrix \\( M \\) can be approximated with \\( N \\) low-rank matrices \\( Q \\) by the following equation:" + }, + { + "type": "equation", + "bbox": [ + 0.203, + 0.863, + 0.47, + 0.905 + ], + "angle": 0, + "content": "\\[\nM _ {b \\times c} = \\prod_ {i = 1} ^ {N} Q _ {r _ {i} \\times s _ {i}}, \\tag {1}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.091, + 0.889, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.3, + 0.892, + 0.384 + ], + "angle": 0, + "content": "Figure 3. Comparison between Adapter-Tuning and Fine-Tuning paradigms. Fine-Tuning tunes ( \\(\\mathcal{A}\\)) all parameters delivered by the pre-trained model. Adapter-Tuning freezes (\\(\\mathcal{A}\\)) all structures and parameters in the pre-trained model and only trains (\\(\\mathcal{A}\\)) the additional parameters in adapters. Parameters in the decoder and head are trainable in both paradigms." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.409, + 0.892, + 0.456 + ], + "angle": 0, + "content": "where \\(N\\) has different values depending on the approximation methods, we implement low-rank approximation of the adapter matrices by heuristic learning." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.468, + 0.591, + 0.483 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.493, + 0.892, + 0.54 + ], + "angle": 0, + "content": "In this section, we will elaborate on the proposed low-rank adapter (LoRand) in three parts: adapter tuning paradigm, LoRand, and parameter analysis." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.548, + 0.74, + 0.565 + ], + "angle": 0, + "content": "3.1. Adapter Tuning Paradigm" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.571, + 0.892, + 0.616 + ], + "angle": 0, + "content": "For dataset \\(D = \\{(x_{i},y_{i})\\}_{i = 1}^{N}\\), fine-tuning calculates the loss between inference results and labels according to the formula:" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.624, + 0.892, + 0.667 + ], + "angle": 0, + "content": "\\[\nL (D, \\theta) = \\sum_ {i = 1} ^ {N} \\operatorname {l o s s} \\left(f _ {\\theta} \\left(x _ {i}\\right), y _ {i}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.72 + ], + "angle": 0, + "content": "where \\( f_{\\theta} \\) denotes the network forward function and loss represents the loss function. After that, \\( \\theta \\) is optimized through" + }, + { + "type": "equation", + "bbox": [ + 0.619, + 0.72, + 0.891, + 0.745 + ], + "angle": 0, + "content": "\\[\n\\theta \\leftarrow \\underset {\\theta} {\\arg \\min } L (D, \\theta). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.854 + ], + "angle": 0, + "content": "In adapter tuning paradigm, parameters consist of two parts, including parameters in adapter \\( \\theta_{A} \\) and parameters in the original architecture \\( \\theta \\). Here, \\( \\theta \\) is further divided into frozen part \\( \\theta_{F} \\) and trainable part \\( \\theta_{T} \\), noted as \\( \\theta = \\{\\theta_{F},\\theta_{T}\\} \\). Let \\( \\Omega \\) be all the trainable parameters, then \\( \\Omega = \\{\\theta_{A},\\theta_{T}\\} \\). The loss function and optimization formula in adapter can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.863, + 0.892, + 0.904 + ], + "angle": 0, + "content": "\\[\nL \\left(D, \\theta_ {F}, \\Omega\\right) = \\sum_ {i = 1} ^ {N} \\operatorname {l o s s} \\left(f _ {\\theta_ {F}, \\Omega} \\left(x _ {i}\\right), y _ {i}\\right), \\tag {4}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20118" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.092, + 0.666, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.671, + 0.149, + 0.882, + 0.28 + ], + "angle": 0, + "content": "
(m,n)PLoRandPAdapter%
(96,48)4736921651.39%
(192,96)93443686425.35%
(384,192)1856014745612.59%
(768,384)369925898246.27%
……………………
" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.347, + 0.894, + 0.417 + ], + "angle": 0, + "content": "Figure 4. Left: Multi-branch projection in LoRand. The down-projection \\( W^{D} \\) and up-projection \\( W^{U} \\) matrices are the summation of \\( \\alpha \\) branches \\( W_{1}^{D}(W_{1}^{U})\\ldots W_{\\alpha}^{D}(W_{\\alpha}^{U}) \\). \\( K_{i} \\) in \\( i \\)-th branch is shared between \\( W_{i}^{D} \\) and \\( W_{i}^{U} \\). All the \\( P, Q, \\) and \\( K \\) are trainable, while all the \\( W \\) matrices are calculated. Right: Comparisons of the same-sized projection matrices between LoRand and Adapter. \\( (m,n) \\) in the table are typical values in SwinBlocks. LoRand has far fewer parameters than Adapter. With the same projection dimension, LoRand saves over 80% parameters of the Adapter in Swin Transformers. \\( (\\alpha ,\\beta) \\) here are (2,8), the same as the experiments." + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.443, + 0.47, + 0.468 + ], + "angle": 0, + "content": "\\[\n\\Omega \\leftarrow \\underset {\\Omega} {\\arg \\min } L (D, \\theta_ {F}, \\Omega). \\tag {5}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.475, + 0.178, + 0.49 + ], + "angle": 0, + "content": "3.2. LoRand" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.498, + 0.47, + 0.59 + ], + "angle": 0, + "content": "Before introducing LoRand, we first review the existing adapter structure. Conventional adapters are bottleneck structures containing a down-projection, an up-projection, and a non-linear activation function. Besides, adapters ensure the robustness of the model by adding residual [20] structures. Adapter layer can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.164, + 0.597, + 0.47, + 0.617 + ], + "angle": 0, + "content": "\\[\nA ^ {l} = U ^ {l} \\left(G e L U (D ^ {l} (x))\\right) + x, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.624, + 0.47, + 0.685 + ], + "angle": 0, + "content": "where \\( U^l \\) and \\( D^l \\) represent the up and down projections in the \\( l \\)-th adapter layer, and GeLU is the activation function. It is clear that the parameters in adapter come from the projections. The projection process can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.225, + 0.695, + 0.47, + 0.71 + ], + "angle": 0, + "content": "\\[\ny = W x + b, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.72, + 0.399, + 0.735 + ], + "angle": 0, + "content": "which means most adapter parameters are in \\( W \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.869 + ], + "angle": 0, + "content": "To reduce the adapter parameters, we propose a low-rank adapter (LoRand) structure to replace the \\(W\\) in the projection structures. Figure 2 shows the simplified structure of LoRand. Here we approximate not a specific matrix \\(W\\) but an ideal matrix \\(W_{best}\\) that can transform the feature space of the pre-trained model into new tasks by heuristic learning. The approximation matrix \\(\\hat{W}\\) has the same size as \\(W\\), but the low-rank design makes \\(\\hat{W}\\) have far fewer free degrees than a common \\(W\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Specifically, we synthesize each \\(W\\) by multiplying three low-rank matrices \\(P \\in \\mathbb{R}^{\\beta \\times m}\\), \\(K \\in \\mathbb{R}^{\\beta \\times \\beta}\\), \\(Q \\in \\mathbb{R}^{\\beta \\times n}\\)" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.445, + 0.549, + 0.457 + ], + "angle": 0, + "content": "that is:" + }, + { + "type": "equation", + "bbox": [ + 0.646, + 0.459, + 0.892, + 0.476 + ], + "angle": 0, + "content": "\\[\nW = P ^ {T} K Q, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.484, + 0.892, + 0.529 + ], + "angle": 0, + "content": "where \\(\\beta \\ll \\min(m, n)\\) ensuring that \\(P\\) and \\(Q\\) are low-rank matrices. \\(K\\) can be regarded as a kernel matrix that controls the parameter size of LoRand." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.53, + 0.892, + 0.59 + ], + "angle": 0, + "content": "After that, we add multi-branch structures to LoRand to increase the robustness and stability of low-rank matrices, which is inspired by MoE [43] and adaboost [45,52]. Every \\(W\\) consists of \\(\\alpha\\) branches, that is:" + }, + { + "type": "equation", + "bbox": [ + 0.594, + 0.602, + 0.892, + 0.641 + ], + "angle": 0, + "content": "\\[\nW = \\sum_ {i = 1} ^ {\\alpha} W _ {i} = \\sum_ {i = 1} ^ {\\alpha} P _ {i} ^ {T} K _ {i} Q _ {i}. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.652, + 0.894, + 0.757 + ], + "angle": 0, + "content": "In addition, we share the kernel matrix \\( K \\) of the two projection layers within each branch. We hope the sharing mechanism can promote the coherence of two projection layers during training process. Besides, the shared \\( K \\) also slightly reduces the number of LoRand parameters. Up to now, the \\( W^{U} \\) and \\( W^{D} \\) in a complete LoRand structure can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.57, + 0.77, + 0.892, + 0.81 + ], + "angle": 0, + "content": "\\[\nW ^ {U} = \\sum_ {i = 1} ^ {\\alpha} W _ {i} ^ {U} = \\sum_ {i = 1} ^ {\\alpha} \\left(P _ {i} ^ {U}\\right) ^ {T} K _ {i} Q _ {i} ^ {U}, \\tag {10}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.823, + 0.892, + 0.862 + ], + "angle": 0, + "content": "\\[\nW ^ {D} = \\sum_ {i = 1} ^ {\\alpha} W _ {i} ^ {D} = \\sum_ {i = 1} ^ {\\alpha} \\left(P _ {i} ^ {D}\\right) ^ {T} K _ {i} Q _ {i} ^ {D}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.87, + 0.892, + 0.902 + ], + "angle": 0, + "content": "where \\( K_{i} \\) is shared in \\( W^{U} \\) and \\( W^{D} \\). Figure 4 presents the detailed designs of the multi-branch projection." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20119" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.091, + 0.267, + 0.108 + ], + "angle": 0, + "content": "3.3. Parameter Analysis" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.115, + 0.47, + 0.16 + ], + "angle": 0, + "content": "In this section, we will compare the parameters of Lo-Rand and typical adapter [22] with the same size of projection matrix." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.168, + 0.47, + 0.259 + ], + "angle": 0, + "content": "Adapter Let \\( m \\) be the input dimension of the adapter and \\( n \\) be the middle layer dimension after down projection. Then the number of parameters in each adapter is \\( 2mn \\) (ignoring the few biases). In general, adapter tuning places two adapter modules in each block, so the space complexity of all adapter parameters in \\( \\gamma \\) blocks can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.235, + 0.272, + 0.469, + 0.288 + ], + "angle": 0, + "content": "\\[\nO (4 \\gamma m n). \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.308, + 0.47, + 0.34 + ], + "angle": 0, + "content": "LoRand According to section 3.2, each \\(W\\) contains \\(\\alpha\\) sets of \\(\\{P,Q,K\\}\\), that is:" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.35, + 0.469, + 0.368 + ], + "angle": 0, + "content": "\\[\n\\alpha \\left(m \\beta + \\beta^ {2} + n \\beta\\right). \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.379, + 0.469, + 0.41 + ], + "angle": 0, + "content": "Each LoRand consists of two \\( W \\) and \\( \\alpha \\) shared \\( K \\), so the parameter quantity of each LoRand is:" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.42, + 0.469, + 0.439 + ], + "angle": 0, + "content": "\\[\n2 \\alpha (m \\beta + \\beta^ {2} + n \\beta) - \\alpha \\beta^ {2} = 2 \\alpha \\beta (m + n + \\beta / 2). \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.45, + 0.47, + 0.48 + ], + "angle": 0, + "content": "Each block has two LoRand structures, so the number of parameters in \\(\\gamma\\) blocks is:" + }, + { + "type": "equation", + "bbox": [ + 0.186, + 0.492, + 0.469, + 0.509 + ], + "angle": 0, + "content": "\\[\n4 \\alpha \\beta \\gamma (m + n) + 2 \\alpha \\beta^ {2} \\gamma . \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.521, + 0.469, + 0.549 + ], + "angle": 0, + "content": "As \\(\\alpha, \\beta, \\gamma \\ll \\min(m, n)\\), the space complexity here can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.551, + 0.469, + 0.567 + ], + "angle": 0, + "content": "\\[\nO \\left(4 \\alpha \\beta \\gamma (m + n)\\right). \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.574, + 0.469, + 0.603 + ], + "angle": 0, + "content": "Comparison between Formulas 12 and 16 can be simplified as:" + }, + { + "type": "equation", + "bbox": [ + 0.244, + 0.604, + 0.469, + 0.62 + ], + "angle": 0, + "content": "\\[\nO (m n), \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.628, + 0.107, + 0.64 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.643, + 0.469, + 0.659 + ], + "angle": 0, + "content": "\\[\nO (\\alpha \\beta (m + n)). \\tag {18}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.666, + 0.469, + 0.727 + ], + "angle": 0, + "content": "Given that \\(\\alpha, \\beta \\ll \\min(m, n)\\), the space complexity of LoRand is far lower than the typical adapter. The table in Figure 4 illustrates that LoRand saves most Adapter parameters with the same projecting dimension." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.74, + 0.21, + 0.758 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.765, + 0.469, + 0.902 + ], + "angle": 0, + "content": "We evaluate LoRand on multiple dense prediction tasks, including object detection, semantic segmentation, and instance segmentation. We also evaluate LoRand under low-resource conditions. We first describe our experimental setup in Section 4.1, including pre-trained backbones, baselines, LoRand settings, and downstream tasks. Then we present the main results of three benchmarks in Section 4.2. We also implement ablation study in Section 4.3 to investigate the impact of structural settings in LoRand." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.691, + 0.108 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.123, + 0.892, + 0.185 + ], + "angle": 0, + "content": "Pretrained Backbones We conduct experiments on the advanced Swin Transformer [32] architectures. All backbones in this section are pre-trained by ImageNet-22k [9]. Pre-trained models are provided by OpenMMLab [6]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.217, + 0.892, + 0.248 + ], + "angle": 0, + "content": "Baselines We compare LoRand with three other common training methods:" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.26, + 0.85, + 0.275 + ], + "angle": 0, + "content": "(a) FULL: update all parameters in the architecture." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.285, + 0.892, + 0.315 + ], + "angle": 0, + "content": "(b) FIXED: fix pre-trained parameters in Swin and train other parts of the architecture (neck, head)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.325, + 0.892, + 0.386 + ], + "angle": 0, + "content": "(c) ADAPTER: add two trainable adapter structures in each SwinBlock following [22], and freeze other parts of the backbone. We evaluate two forms of adapter with different middle layer dimensions \\((D_{ML})\\):" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.39, + 0.864, + 0.406 + ], + "angle": 0, + "content": "- ADAPTER-B: \\(D_{ML}\\) is a half of input dimension." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.41, + 0.883, + 0.426 + ], + "angle": 0, + "content": "- ADAPTER-T: \\(D_{ML}\\) is a quarter of input dimension." + }, + { + "type": "list", + "bbox": [ + 0.499, + 0.26, + 0.892, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.446, + 0.892, + 0.491 + ], + "angle": 0, + "content": "LoRand Settings We conducted experiments on three Lo-Rand variants, which have different branch numbers \\(\\alpha\\) and kernel matrix dimensions \\(\\beta\\)." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.501, + 0.765, + 0.516 + ], + "angle": 0, + "content": "- LoRand: \\(\\alpha = 2\\), \\(\\beta = 8\\) (Standard)." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.526, + 0.701, + 0.541 + ], + "angle": 0, + "content": "- LoRand+: \\(\\alpha = 4, \\beta = 8\\)." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.551, + 0.719, + 0.565 + ], + "angle": 0, + "content": "- LoRand++: \\(\\alpha = 4, \\beta = 16\\)." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.501, + 0.765, + 0.565 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.584, + 0.892, + 0.644 + ], + "angle": 0, + "content": "Downstream Tasks We conducted experiments on COCO [28], ADE20K [62], and PASCAL VOC [14] benchmarks to widely evaluate LoRand's performance on main dense prediction tasks." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.645, + 0.892, + 0.735 + ], + "angle": 0, + "content": "COCO 2017 [28] is the most commonly used dataset for object detection and instance segmentation, which contains 118K training and 5K validation images. We perform experiments on the validation set. For a fair comparison, all experiments performed on COCO employ Cascade MASK R-CNN [32] as the detector." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.735, + 0.892, + 0.796 + ], + "angle": 0, + "content": "ADE20K [62] is the most widely used semantic segmentation dataset, which contains 20K training and 2K validation images. We also conduct experiments on the ADE20K validation set and utilise UperNet [57] as the framework." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.796, + 0.892, + 0.87 + ], + "angle": 0, + "content": "PASCAL VOC 0712 [14] is also widely used in object detection, which contains about 16K training and 5K validation images. VOC 0712 is much smaller than the latest benchmarks, so we treat it as a low-resource case. We adopt Faster RCNN [42] as the detector for VOC 0712." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "All our experiments are conducted with 8x NVIDIA Tesla V100 GPUs. The experiments on PASCAL VOC and" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "20120" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.088, + 0.089, + 0.887, + 0.292 + ], + "angle": 0, + "content": "
Swin-L (198M)Trained* Params%ΔFullExtra StructurePascal VOC (Faster RCNN)ADE20K (UperNet)
APBoxΔLoRandmIoUΔLoRand
Baselines
FULL198.58 M100.00 %-X84.43 %- 2.69 %53.25 %+ 1.34 %
FIXED0.00 M0.00 %- 100.00 %X85.19 %- 1.93 %32.21 %- 19.70 %
ADAPTER-B32.04 M16.13 %- 83.87 %80.93 %- 6.19 %46.23 %- 5.68 %
ADAPTER-T16.04 M8.08 %- 91.92 %78.10 %- 9.02 %43.51 %- 8.40 %
Our Methods
LORAND3.59 M1.84 %- 98.16 %87.12 %-50.67 %-
LORAND+7.19 M3.62 %- 96.38 %87.63 %+ 0.51 %51.13 %+ 0.46 %
LORAND++14.24 M7.17 %- 92.83 %88.11 %+ 0.99 %51.87 %+ 1.20 %
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.3, + 0.893, + 0.342 + ], + "angle": 0, + "content": "Table 1. Results of baselines and our methods on Pascal VOC and ADE20K benchmarks. Swin-L is employed as the pre-trained model here. We present the numbers and percentages of trainable backbone parameters on the left and all the performances on the right. * denotes the trainable parameters in backbones." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.348, + 0.889, + 0.549 + ], + "angle": 0, + "content": "
Swin-B (89M)Trained* Params%ΔFullExtra StructureCOCO (Cascade Mask R-CNN)
APBoxΔLoRandAPMaskΔLoRand
Baselines
FULL89.14 M100.00 %-X51.90 %+0.80 %45.00 %+0.90 %
FIXED0.00 M0.00 %-100.00 %X15.30 %-35.80 %10.80 %-33.8 %
ADAPTER-B14.38 M16.13 %-83.87 %46.50 %-4.60 %40.20 %-3.90 %
ADAPTER-T7.20 M8.08 %-91.92 %43.20 %-7.90 %38.70 %-5.40 %
Our Methods
LORAND2.39 M2.76 %-97.24 %51.10 %-44.10 %-
LORAND+4.73 M5.31 %-94.69 %51.20 %+0.10 %44.30 %+0.20 %
LORAND++9.32 M10.46 %-89.54 %51.50 %+0.40 %44.40 %+0.30 %
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.559, + 0.893, + 0.601 + ], + "angle": 0, + "content": "Table 2. Results of baselines and our methods on COCO benchmarks. Swin-B is employed as the pre-trained model here. We present the numbers and percentages of trainable backbone parameters on the left and all the performances on the right. * denotes the trainable parameters in backbones." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.61, + 0.47, + 0.656 + ], + "angle": 0, + "content": "ADE20K are based on Swin-S, Swin-B, and Swin-L pretrained models. Limited by GPU memory, the COCO experiments are based on Swin-T, Swin-S, and Swin-B." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.666, + 0.218, + 0.68 + ], + "angle": 0, + "content": "4.2. Main Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.69, + 0.47, + 0.779 + ], + "angle": 0, + "content": "We first compare the trainable backbone parameters and performance of these methods on three benchmarks in Tables 1 and 2. Table 1 shows the results of PASCAL VOC and ADE20K datasets based on Swin-L, and Table 2 shows the results of COCO based on Swin-B. From Tables 1 and 2, we can see that:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.901 + ], + "angle": 0, + "content": "1) LoRand can effectively address the dilemma of fine-tuning in low-resource situations. Table 1 shows that FIXED outperforms FULL on the PASCAL VOC dataset, which implies that the powerful generalization ability of pre-trained model is severely weakened during fine-tuning. Fine-tuning with low-resource data reduces the feature understanding of pre-trained models, which leads to the poor performance on downstream tasks. LoRand avoids this dis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.892, + 0.867 + ], + "angle": 0, + "content": "advantage by fixing the original parameters. More importantly, LoRand can absorb features from the new data by its smaller trainable structures. Table 1 indicates that LoRand outperforms FULL and FIXED by \\(2.69\\%\\) and \\(1.93\\%\\) on the low-resource dataset with only \\(1.84\\%\\) trainable backbone parameters. LoRand+ and LoRand++ also outperform FULL by \\(3.2\\%\\) and \\(3.68\\%\\) with \\(3.62\\%\\) and \\(7.17\\%\\) backbone parameters. In fact, there are many other common computer vision datasets with similar volumes to the PASCAL VOC, including CUB-200-2011 [55], Oxford 102 Flowers [35], Stanford Cars [27], and Caltech-256 [16]. The prevalence of \"Pretrained & Finetuning\" leads us to focus more on giant benchmarks, but Table 1 suggests we need a better training paradigm to cope with many low-resource situations in industrial applications. LoRand-Tuning proves to be a competitive candidate who brings promising performance and parameter-efficient approaches to low-resource cases." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "2) LoRand effectively balances the number of trainable backbone parameters and downstream task per" + }, + { + "type": "list", + "bbox": [ + 0.498, + 0.61, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "20121" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.473, + 0.348 + ], + "angle": 0, + "content": "formance. Tables 1 and 2 demonstrate that LoRand (standard) performs very closely to FULL on large benchmarks with only \\(1.84\\%\\) to \\(2.76\\%\\) trainable parameters. By tuning less than 3.6M backbone parameters, LoRand (standard) achieves \\(50.67\\%\\) (mIOU) on ADE20K, and \\(51.10\\%\\) \\((\\mathrm{AP}_{\\mathrm{Box}})\\) / \\(44.10\\%\\) \\((\\mathrm{AP}_{\\mathrm{Mask}})\\) on COCO, which is only about \\(1.5\\%\\) off on average compared to FULL. LoRand+ and LoRand++ further reduce the gap between these two paradigms to approximately \\(1\\%\\) with slight parameter increases. For Swin-L, LoRand saves about 195M parameters per copy compared to FULL. For Swin-B, LoRand saves about \\(86\\mathrm{M}\\). These results are interesting, which means we do not have to spend plenty of hardware resources to store these redundant parameters. Industrial service providers deliver thousands of model training tasks every day. With LoRand-Tuning, millions of gigabytes per year for model storage could be saved." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.351, + 0.472, + 0.533 + ], + "angle": 0, + "content": "3) LoRand effectively broadens the potential of conventional parameter-efficient adapter structures in dense predictions. From the results, we can draw similar conclusions to [24] that the standard adapter [22] performs worse than fine-tuning on dense predictions. Tables 1 and 2 illustrate that the ADAPTER's performance is far from FULL, although it reduces \\(80\\%\\) of trainable backbone parameters. Also adding new structures, LoRand achieves comparable performance to FULL by training fewer parameters than the ADAPTER. Overall, Tables 1 and 2 demonstrate the feasibility of parameter-efficient tuning paradigm in visual dense prediction tasks." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.536, + 0.473, + 0.776 + ], + "angle": 0, + "content": "Comparisons with other fine-tuned backbone. We then show the comparisons of LoRand with some other remarkable fine-tuned backbones in Table 3. Table 3a shows the results based on UperNet and ADE20K, and 3b shows the results based on Cascade MASK R-CNN and COCO. Table 3 shows that LoRand (based on Swin-Transformer) can outperform most existing fine-tuned backbones with less than 2M parameters. Compared to these backbones, LoRand not only presents more robust and superior results but also saves massive hardware resources in this era of parameter explosion. Specifically, LoRand (Swin-T) exceeds COCO by \\(1.9\\%\\) \\(\\mathrm{(AP_{Box})}\\) and \\(1.2\\%\\) \\(\\mathrm{(AP_{Mask})}\\) with 80.12M fewer new backbone parameters than ResNeXt-101-64. Similarly, LoRand (Swin-L) surpasses \\(5.82\\%\\) (mIoU) on ADE20K with 40.41M fewer trainable backbone parameters than ResNet-101." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.78, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Comparisons on different backbone scales. In addition to Swin-L and Swin-B, we also conduct extensive experiments on Swin-S and Swin-T. We illustrate the performance of baselines and LoRand on multiple backbones. Figure 5 shows the performance of the six methods on different backbone scales, which includes three Swin variants for each benchmark. As FIXED's performance on COCO and ADE20K is too low to display, we only show FIXED's re" + }, + { + "type": "table_caption", + "bbox": [ + 0.515, + 0.089, + 0.879, + 0.102 + ], + "angle": 0, + "content": "(a) Comparisons between LoRand-Tuning and Fine-Tuning on COCO." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.104, + 0.885, + 0.675 + ], + "angle": 0, + "content": "
BackboneTrained \nParams*APBoxAPMask
Fine-Tuning Paradigm
ResNet-10144 M47.9 %41.5 %
ResNeXt-101-3240 M48.1 %41.6 %
ResNeXt-101-6481 M48.3 %41.7 %
DeiT-S22 M48.0 %41.4 %
Swin-T29 M50.5 %43.7 %
Swin-S50 M51.8 %44.7 %
Swin-B88 M51.9 %45.0 %
LoRand-Tuning
LoRand (Swin-T)0.88 M50.2 %42.9 %
LoRand (Swin-S)1.80 M50.7 %43.8 %
LoRand (Swin-B)2.39 M51.1 %44.3 %
(b) Comparisons between LoRand-Tuning and Fine-Tuning on ADE20K.
BackboneTrained Params*APMask
Fine-Tuning
ResNet-1812 M39.97 %
ResNet-5025 M42.78 %
ResNet-10144 M44.85 %
DeiT-S22 M44.01 %
Swin-S50 M49.30 %
Swin-B88 M51.60 %
Swin-L197 M53.25 %
LoRand-Tuning
LoRand (Swin-S)1.80 M47.33 %
LoRand (Swin-B)2.39 M49.62 %
LoRand (Swin-L)3.59 M50.67 %
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.498, + 0.675, + 0.892, + 0.759 + ], + "angle": 0, + "content": "Table 3. Comparisons between LoRand-Tuning and Fine-Tuning on ADE20K and COCO. We fine-tune multiple backbones and compare their performances with LoRand series. Architectures in (a) and (b) are Cascade Mask R-CNN and UperNet. Parameters in decoder and head are updated in both paradigms. * denotes the trainable parameters in backbones." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.903 + ], + "angle": 0, + "content": "sults in the PASCAL VOC. Figure 5 indicates that the performance of most methods improves as the backbone scale gets larger. For the LoRand series, more parameters bring better performance, but it is still challenging to outperform FULL on large datasets. For the ADAPTER, ADAPTER-B performs better than ADAPTER-T, suggesting that adding extra parameters does help improve adapter-tuning performance. Experiments on Swin variants systematically" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20122" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.12, + 0.092, + 0.853, + 0.115 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.117, + 0.338, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.361, + 0.118, + 0.609, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.118, + 0.877, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.267, + 0.892, + 0.324 + ], + "angle": 0, + "content": "Figure 5. Seven methods on different backbone scales. Figures show results on PASCAL VOC, COCO, and ADE20K from left to right. Swin-S, Swin-B, and Swin-L are employed as the pre-trained models for PASCAL VOC and ADE20K. Swin-T, Swin-S, and Swin-B are employed for COCO. FIXED's performances are so low on COCO and ADE20K that they reduce the intuitiveness of the other six methods, so FIXED is only presented in PASCAL VOC comparisons." + }, + { + "type": "image", + "bbox": [ + 0.56, + 0.322, + 0.88, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.346, + 0.34, + 0.488 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.346, + 0.609, + 0.488 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.624, + 0.346, + 0.879, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.499, + 0.892, + 0.542 + ], + "angle": 0, + "content": "Figure 6. Ablation Study for \\(\\alpha\\) and \\(\\beta\\). \\(\\alpha\\) ranges from 2, 4, 6, and \\(\\beta\\) ranges from 4, 8, 16. Figures from left to right present experiments on three benchmarks respectively. We only present \\(\\mathrm{AP_{Box}}\\) changes for COCO benchmark considering the strong correlation between the values of \\(\\mathrm{AP_{Box}}\\) and \\(\\mathrm{AP_{Mask}}\\) in COCO." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.567, + 0.47, + 0.613 + ], + "angle": 0, + "content": "demonstrate that LoRand can outperform both FULL and traditional adapter structures in low-resource cases and perform very closely to FULL in large benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.621, + 0.231, + 0.637 + ], + "angle": 0, + "content": "4.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.644, + 0.47, + 0.719 + ], + "angle": 0, + "content": "In this section, we ablate two key hyperparameters in LoRand: the LoRand branch number \\(\\alpha\\) and the kernel matrix dimension \\(\\beta\\). \\(\\alpha\\) affects the distributed decision-making of LoRand, while \\(\\beta\\) focuses on a single branch's learning capability and consistency." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.72, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Several sets of ablation experiments are designed and implemented to investigate the effect of \\(\\alpha\\) and \\(\\beta\\) on the performance of LoRand. The ablation experiments were conducted on the same three benchmarks. In order to improve the upper limit of LoRand, our experiments are conducted on the largest backbone of each dataset (ADE20K/PASCAL VOC: Swin-L, COCO: Swin-B). The value sets of \\(\\alpha\\) and \\(\\beta\\) are \\(\\{2,4,6\\}\\) and \\(\\{4,8,16\\}\\). Figure 6 shows the results of ablation studies on three datasets. In most cases, LoRand's performance increases slightly as \\(\\alpha\\) and \\(\\beta\\) become larger but hardly outperforms fine-tuning on large benchmarks. Besides, exponentially increasing the size of the LoRand does" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.892, + 0.658 + ], + "angle": 0, + "content": "not result in an equivalent performance improvement and even leads to a reduction (\\(\\alpha = 6\\) in VOC and COCO). Ablation studies demonstrate that larger LoRands have fewer gains both in parameter efficiency and performance. We have considered this trade-off when designing the LoRand standard, LoRand+, and LoRand++." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.679, + 0.619, + 0.695 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.892, + 0.903 + ], + "angle": 0, + "content": "This paper presents LoRand, a parameter-efficient low-rank adapter for dense predictions, which completely shares the feature understanding of advanced pre-trained models and effectively transfers it to downstream tasks. LoRand performs on par with fine-tuning in COCO instance segmentation, ADE20K semantic segmentation, and PASCAL VOC object detection with only \\(1\\%\\) to \\(3\\%\\) trainable backbone parameters. Moreover, LoRand effectively avoids the disadvantages of the fine-tuning paradigm and delivers better performance in low-resource situations. We hope that parameter-efficient LoRand can save massive redundant storage resources and facilitate a unified training paradigm for vision and language." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20123" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.169 + ], + "angle": 0, + "content": "[1] Caisse Amisse, Mario Ernesto Jijón-Palma, and Jorge Antonio Silva Centeno. Fine-tuning deep learning models for pedestrian detection. *Boletim de Ciências Geólicas*, 27, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.47, + 0.225 + ], + "angle": 0, + "content": "[2] Alexei Baevski, Sergey Edunov, Yinhan Liu, Luke Zettle-moyer, and Michael Auli. Cloze-driven pretraining of self-attention networks. arXiv preprint arXiv:1903.07785, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.228, + 0.47, + 0.297 + ], + "angle": 0, + "content": "[3] Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.47, + 0.367 + ], + "angle": 0, + "content": "[4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.368, + 0.469, + 0.424 + ], + "angle": 0, + "content": "[5] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.425, + 0.469, + 0.491 + ], + "angle": 0, + "content": "[6] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.494, + 0.469, + 0.549 + ], + "angle": 0, + "content": "[7] Shoufa Chen, Chongjian Ge, Zhan Tong, Jiangliu Wang, Yibing Song, Jue Wang, and Ping Luo. Adaptformer: Adapting vision transformers for scalable visual recognition. arXiv preprint arXiv:2205.13535, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.551, + 0.469, + 0.604 + ], + "angle": 0, + "content": "[8] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. arXiv preprint arXiv:2205.08534, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.607, + 0.469, + 0.662 + ], + "angle": 0, + "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.663, + 0.469, + 0.704 + ], + "angle": 0, + "content": "[10] Zhengming Ding and Yun Fu. Deep transfer low-rank coding for cross-domain learning. IEEE transactions on neural networks and learning systems, 30(6):1768-1779, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.469, + 0.747 + ], + "angle": 0, + "content": "[11] Zhengming Ding, Ming Shao, and Yun Fu. Deep low-rank coding for transfer learning. In Twenty-Fourth International Joint Conference on Artificial Intelligence, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.748, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[12] Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah Smith. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. arXiv preprint arXiv:2002.06305, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[13] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.16 + ], + "angle": 0, + "content": "[14] Mark Everingham, SM Eslami, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes challenge: A retrospective. International journal of computer vision, 111(1):98-136, 2015. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[15] Christoph Feichtenhofer, Haoqi Fan, Yanghao Li, and Kaiming He. Masked autoencoders as spatiotemporal learners. arXiv preprint arXiv:2205.09113, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.207, + 0.892, + 0.234 + ], + "angle": 0, + "content": "[16] Gregory Griffin, Alex Holub, and Pietro Perona. Caltech-256 object category dataset. 2007. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.235, + 0.892, + 0.289 + ], + "angle": 0, + "content": "[17] Xu Han, Zhengyan Zhang, Ning Ding, Yuxian Gu, Xiao Liu, Yuqi Huo, Jiezhong Qiu, Yuan Yao, Ao Zhang, Liang Zhang, et al. Pre-trained models: Past, present and future. AI Open, 2:225-250, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.293, + 0.892, + 0.347 + ], + "angle": 0, + "content": "[18] Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig. Towards a unified view of parameter-efficient transfer learning. In International Conference on Learning Representations, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.35, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[19] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16000-16009, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.42, + 0.892, + 0.475 + ], + "angle": 0, + "content": "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.477, + 0.892, + 0.531 + ], + "angle": 0, + "content": "[21] Xuehai He, Chunyuan Li, Pengchuan Zhang, Jianwei Yang, and Xin Eric Wang. Parameter-efficient fine-tuning for vision transformers. arXiv preprint arXiv:2203.16329, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.534, + 0.892, + 0.603 + ], + "angle": 0, + "content": "[22] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 3, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.605, + 0.892, + 0.686 + ], + "angle": 0, + "content": "[23] Fatsuma Jauro, Haruna Chiroma, Abdulsalam Y Gital, Mubarak Almutairi, M Abdulhamid Shafi'i, and Jemal H Abawajy. Deep learning architectures in emerging cloud computing architectures: Recent development, challenges and next research trend. Applied Soft Computing, 96:106582, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.689, + 0.892, + 0.743 + ], + "angle": 0, + "content": "[24] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. arXiv preprint arXiv:2203.12119, 2022. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.746, + 0.892, + 0.786 + ], + "angle": 0, + "content": "[25] Shibo Jie and Zhi-Hong Deng. Convolutional bypasses are better vision transformer adapters. arXiv preprint arXiv:2207.07039, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.789, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[26] Christoph Käding, Erik Rodner, Alexander Freytag, and Joachim Denzler. Fine-tuning deep neural networks in continuous learning scenarios. In *Asian Conference on Computer Vision*, pages 588–605. Springer, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[27] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pages 554–561, 2013. 6" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20124" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.164, + 0.472, + 0.233 + ], + "angle": 0, + "content": "[29] Fanfan Liu, Haoran Wei, Wenzhe Zhao, Guozhen Li, Jingquan Peng, and Zihao Li. Wb-detr: Transformer-based detector without backbone. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2979-2987, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.471, + 0.303 + ], + "angle": 0, + "content": "[30] Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. Pre-train, prompt, and predict: A systematic survey of prompting methods in natural language processing. arXiv preprint arXiv:2107.13586, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.471, + 0.36 + ], + "angle": 0, + "content": "[31] Yen-Cheng Liu, Chih-Yao Ma, Junjiao Tian, Zijian He, and Zsolt Kira. Polyhistor: Parameter-efficient multi-task adaptation for dense vision tasks. arXiv preprint arXiv:2210.03265, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.47, + 0.432 + ], + "angle": 0, + "content": "[32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.471, + 0.489 + ], + "angle": 0, + "content": "[33] Cheng Long Li, Andong Lu, Ai Hua Zheng, Zhengzheng Tu, and Jin Tang. Multi-adapter rgbt tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, pages 0-0, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.491, + 0.47, + 0.574 + ], + "angle": 0, + "content": "[34] Yuning Mao, Lambert Mathias, Rui Hou, Amjad Alma-hairi, Hao Ma, Jiawei Han, Scott Yih, and Madian Khabsa. Unipelt: A unified framework for parameter-efficient language model tuning. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6253-6264, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.576, + 0.47, + 0.631 + ], + "angle": 0, + "content": "[35] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pages 722-729. IEEE, 2008. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.633, + 0.47, + 0.674 + ], + "angle": 0, + "content": "[36] Matthew E Peters, Sebastian Ruder, and Noah A Smith. To tune or not to tune? adapting pretrained representations to diverse tasks. arXiv preprint arXiv:1903.05987, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.676, + 0.47, + 0.759 + ], + "angle": 0, + "content": "[37] Jonas Pfeiffer, Aishwarya Kamath, Andreas Rückle, Kyunghyun Cho, and Iryna Gurevych. Adapterfusion: Nondestructive task composition for transfer learning. In 16th Conference of the European Chapter of the Association for Computational Linguistics, EACL 2021, pages 487-503. Association for Computational Linguistics (ACL), 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.47, + 0.844 + ], + "angle": 0, + "content": "[38] Jonas Pfeiffer, Andreas Rücklé, Clifton Poth, Aishwarya Kamath, Ivan Vulić, Sebastian Ruder, Kyunghyun Cho, and Iryna Gurevych. Adapterhub: A framework for adapting transformers. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 46-54, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[39] Jonathan Pilault, Christopher Pal, et al. Conditionally adaptive multi-task learning: Improving transfer learning in nlp using fewer parameters & less data. In International Conference on Learning Representations, 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "[40] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J Liu, et al. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140):1-67, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.892, + 0.216 + ], + "angle": 0, + "content": "[41] Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. Advances in neural information processing systems, 30, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.219, + 0.892, + 0.273 + ], + "angle": 0, + "content": "[42] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.275, + 0.892, + 0.343 + ], + "angle": 0, + "content": "[43] Carlos Riquelme, Joan Puigcerver, Basil Mustafa, Maxim Neumann, Rodolphe Jenatton, André Susano Pinto, Daniel Keysers, and Neil Houlsby. Scaling vision with sparse mixture of experts. Advances in Neural Information Processing Systems, 34:8583-8595, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.344, + 0.892, + 0.425 + ], + "angle": 0, + "content": "[44] Andreas Rücklé, Gregor Geigle, Max Glockner, Tilman Beck, Jonas Pfeiffer, Nils Reimers, and Iryna Gurevych. Adapterdrop: On the efficiency of adapters in transformers. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7930-7946, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.428, + 0.892, + 0.469 + ], + "angle": 0, + "content": "[45] Omer Sagi and Lior Rokach. Ensemble learning: A survey. Wiley Interdisciplinary Reviews: Data Mining and Knowledge Discovery, 8(4):e1249, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.47, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[46] Chompunuch Sarasaen, Soumick Chatterjee, Mario Breitkopf, Georg Rose, Andreas Nurnberger, and Oliver Speck. Fine-tuning deep learning model parameters for improved super-resolution of dynamic mri with prior-knowledge. Artificial Intelligence in Medicine, 121:102196, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.54, + 0.892, + 0.594 + ], + "angle": 0, + "content": "[47] Jaime Sevilla, Lennart Heim, Anson Ho, Tamay Besiroglu, Marius Hobbahn, and Pablo Villalobos. Compute trends across three eras of machine learning. arXiv preprint arXiv:2202.05924, 2022.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.596, + 0.892, + 0.678 + ], + "angle": 0, + "content": "[48] Shaden Smith, Mostofa Patwary, Brandon Norick, Patrick LeGresley, Samyam Rajbhandari, Jared Casper, Zhun Liu, Shrimai Prabhumoye, George Zerveas, Vijay Korthikanti, et al. Using deepspeed and megatron to train megatron-turing nlg 530b, a large-scale generative language model. arXiv preprint arXiv:2201.11990, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.747 + ], + "angle": 0, + "content": "[49] Emma Strubell, Ananya Ganesh, and Andrew McCallum. Energy and policy considerations for deep learning in nlp. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3645–3650, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.748, + 0.892, + 0.831 + ], + "angle": 0, + "content": "[50] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.899 + ], + "angle": 0, + "content": "[51] Yi-Lin Sung, Jaemin Cho, and Mohit Bansal. Vl-adapter: Parameter-efficient transfer learning for vision-and-language tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5227-5237, 2022. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20125" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[52] B Thilagavathi, K Suthendran, and K Srujanraju. Evaluating the adaboost algorithm for biometric-based face recognition. In Data Engineering and Communication Technology, pages 669-678. Springer, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.47, + 0.205 + ], + "angle": 0, + "content": "[53] Edna Chebet Too, Li Yujiang, Sam Njuki, and Liu Yingchun. A comparative study of fine-tuning deep learning models for plant disease identification. Computers and Electronics in Agriculture, 161:272-279, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.207, + 0.47, + 0.261 + ], + "angle": 0, + "content": "[54] Thijs Vogels, Sai Praneeth Karimireddy, and Martin Jaggi. Practical low-rank communication compression in decentralized deep learning. Advances in Neural Information Processing Systems, 33:14171-14181, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.263, + 0.469, + 0.303 + ], + "angle": 0, + "content": "[55] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.305, + 0.469, + 0.373 + ], + "angle": 0, + "content": "[56] Xudong Wang, Zhaowei Cai, Dashan Gao, and Nuno Vasconcelos. Towards universal object detection by domain attention. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7289-7298, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.376, + 0.469, + 0.432 + ], + "angle": 0, + "content": "[57] Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, and Jian Sun. Unified perceptual parsing for scene understanding. In Proceedings of the European conference on computer vision (ECCV), pages 418-434, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.434, + 0.469, + 0.487 + ], + "angle": 0, + "content": "[58] Sha Yuan, Hanyu Zhao, Shuai Zhao, Jiahong Leng, Yangxiao Liang, Xiaozhi Wang, Jifan Yu, Xin Lv, Zhou Shao, Jiaao He, et al. A roadmap for big model. arXiv preprint arXiv:2203.14101, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.489, + 0.469, + 0.558 + ], + "angle": 0, + "content": "[59] Aston Zhang, Yi Tay, SHUAI Zhang, Alvin Chan, Anh Tuan Luu, Siu Hui, and Jie Fu. Beyond fully-connected layers with quaternions: Parameterization of hypercomplex multiplications with \\(1/n\\) parameters. In International Conference on Learning Representations, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.56, + 0.469, + 0.615 + ], + "angle": 0, + "content": "[60] Chaoning Zhang, Chenshuang Zhang, Junha Song, John Seon Keun Yi, Kang Zhang, and In So Kweon. A survey on masked autoencoder for self-supervised learning in vision and beyond. arXiv preprint arXiv:2208.00173, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.617, + 0.469, + 0.671 + ], + "angle": 0, + "content": "[61] Jianwei Zhao, Yongbiao Lv, Zhenghua Zhou, and Feilong Cao. A novel deep learning algorithm for incomplete face recognition: Low-rank-recovery network. Neural Networks, 94:115-124, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.673, + 0.469, + 0.741 + ], + "angle": 0, + "content": "[62] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017. 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20126" + } + ] +] \ No newline at end of file diff --git a/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_origin.pdf b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ac4800dae9fccc636e4a09bd5bed7d75ca7a6a97 --- /dev/null +++ b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/3b75c6c9-33bc-4e41-9df3-2e14ac85ef59_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fc9f6430cecf54e04bc50637a348599005eb52434349086aeebda29c091e51e +size 996453 diff --git a/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/full.md b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/full.md new file mode 100644 index 0000000000000000000000000000000000000000..038e9ad68dac7eb8fa5d5a1a1e26535e927a319a --- /dev/null +++ b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/full.md @@ -0,0 +1,359 @@ +# 1% VS 100%: Parameter-Efficient Low Rank Adapter for Dense Predictions + +Dongshuo Yin $^{1,2,\dagger}$ , Yiran Yang $^{1,2,\dagger}$ , Zhechao Wang $^{1,2}$ , Hongfeng Yu $^{1}$ , Kaiwen Wei $^{1,2}$ , Xian Sun $^{1,2,*}$ $^{1}$ Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences + $^{2}$ School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences + +{yindongshuo19, yangyiran19, wangzhechao21, weikaiwen19}@mails.ucas.ac.cn {yuhf, sunxian}@aircas.ac.cn + +# Abstract + +Fine-tuning large-scale pre-trained vision models to downstream tasks is a standard technique for achieving state-of-the-art performance on computer vision benchmarks. However, fine-tuning the whole model with millions of parameters is inefficient as it requires storing a samesized new model copy for each task. In this work, we propose LoRand, a method for fine-tuning large-scale vision models with a better trade-off between task performance and the number of trainable parameters. LoRand generates tiny adapter structures with low-rank synthesis while keeping the original backbone parameters fixed, resulting in high parameter sharing. To demonstrate LoRand's effectiveness, we implement extensive experiments on object detection, semantic segmentation, and instance segmentation tasks. By only training a small percentage (1% to 3%) of the pre-trained backbone parameters, LoRand achieves comparable performance to standard fine-tuning on COCO and ADE20K and outperforms fine-tuning in low-resource PASCAL VOC dataset. + +# 1. Introduction + +With the rapid development of computer vision, parameters in deep models are surging. Giant models need to be trained with massive resources to achieve superior performance [3, 17, 47, 58], which is often unavailable to many academics and institutions. "Pretrain & Finetuning" paradigm is widely used to alleviate this dilemma. Teams with sufficient computation resources utilise enormous datasets [2, 9, 40, 50] to train superior backbones [4, 32, 40, 48] and optimise the models with ideal performances. Models pretrained in this way usually have a su + +![](images/284244850d213821e678546c56bd87e129870b5533c18d88c83c8caed88f03ff.jpg) +Figure 1. Comparisons of trainable backbone parameters between our methods (red) and fine-tuning (black). In COCO, we achieve advanced performances and outperform most existing backbones with only $0.9\sim 2.5\mathrm{M}$ new backbone parameters (Cascade-RCNN is employed as the detector). The fine-tuning paradigm produces massive redundant backbone parameters, whereas our approach saves over $97\%$ of hardware resources with competitive performances. The sizes of the circles intuitively compare the number of trainable parameters. + +perior understanding of homogeneous data. After that, researchers with limited computational resources can transfer the understanding capabilities of the pre-trained models to downstream tasks with promising performances by finetuning [1,26,46,53]. + +However, the fine-tuned model will produce a new set of parameters as large as the pre-trained model. New parameters are independent of the pre-trained models and unshareable, which are very hardware intensive for cloud service providers [23, 49]. Figure 1 compares the parameter quantities of some remarkable backbones and their performances on the COCO [28] dataset. Recent advances in natural language processing (NLP) [30, 38] show that large pre-trained models trained with rich data have strong gener + +![](images/b58c6b179031f202b13762129589750230c30d575d80af5f850f0a002de939e8.jpg) +Swin-Transformer Block + +![](images/7edbd25abc95b7d7da6fcc332f29fee39ad6e9b53dd1dfb97cf12b8d90b381de.jpg) +LoRand Layer +Figure 2. Architecture of the adapter module and its integration with the Transformer. Left: We add two LoRand structures to each SwinBlock located behind the W/SW-MSA and MLP structures respectively. Right: LoRand contains two Multi-branch low-rank projections and nonlinearity. We include skip-connection to LoRand to enhance its robustness. + +alisability, which means most parameters in the pre-trained models can be shared with the new tasks [22, 36, 37, 44, 59]. Moreover, recent literature demonstrates that the feature understanding of pre-trained models could be reduced when they are fine-tuned in low-resource situations [12, 36]. To tackle these issues, NLP researchers propose two new training paradigms based on pre-trained models: Adapter Tuning [22] and Prompt Tuning [30], both of which tune the new models by fixing the pre-trained parameters and adding a few trainable structures (less than $10\%$ of the backbone). These paradigms create a new buzz in NLP and achieve impressive performances which can be competitive with finetuning [12, 22, 30, 36-38, 44, 59]. Advances in NLP also shed new light on computer vision. Jia et al. [24] propose Visual Prompt Tuning (VPT) and demonstrate that VPT can outperform fine-tuning on image classification tasks by training a small number of trainable parameters. Nevertheless, VPT shows weakness on more challenging dense predictions like semantic segmentation compared with finetuning [24]. + +To find a parameter-efficient paradigm with promising performance in computer vision, we explore the potential of Adapter Tuning for visual dense predictions. We employ the advanced Swin Transformer [32] trained with ImageNet-22K [9] as the pre-trained model. After that, we add bottleneck adapter structures [22] behind each SwinBlock and freeze the original backbone parameters when training, but this approach cannot achieve comparable performance to fine-tuning as mentioned in [24]. In the experi + +periments, we find that the models perform better with sparser adapter structures. To improve the performance of Adapter Tuning, we propose Low-Rank Adapter (LoRand) to reduce the adapter parameters, as shown in Figure 2. LoRand sparsely parameterizes the matrices in adapters by low-rank synthesis. Specifically, the projection matrix of the fully-connected layer (FC) in LoRand is a product of multiple low-rank matrices, which reduces FC parameters by more than $80\%$ . We implement extensive experiments on object detection (PASCAL VOC [14]), semantic segmentation (ADE20K [62]), and instance segmentation (MS COCO [28]) to verify the capability of LoRand. Experimental results show that LoRand-Tuning is comparable to fine-tuning on multiple tasks with only $1.8\%$ to $2.8\%$ new backbone parameters, which suggests that the pre-trained backbone parameters can be fully shared. More interestingly, our method completely outperforms fine-tuning on the PASCAL VOC dataset, illustrating that LoRand-Tuning can reduce the impairment of fine-tuning on pre-trained models in low-resource configurations. Our method demonstrates that the LoRand-Tuning paradigm can substantially save storage resources and achieve competitive performances on most dense prediction tasks. In summary, our contributions are three-fold: + +- We demonstrate that visual pre-trained models are highly generalisable and shareable. With our training methods, new tasks require only a few trainable parameters to achieve performances comparable to finetuning, which can save massive hardware resources. +- We propose the LoRand structure for sparser adapters based on low-rank synthesis. We demonstrate that the backbone parameters in fine-tuning are highly redundant, which can be replaced by $1.8\%$ to $2.8\%$ additional parameters in LoRand. +- Extensive experiments on object detection, semantic segmentation, and instance segmentation show that LoRand-Tuning can achieve remarkable performances and reduce massive new parameters in challenging dense prediction tasks. + +# 2. Related Work + +# 2.1. Training Paradigms in NLP + +Computer vision has been continuously inspired by NLP in recent years, including the visual transformer series [5,13,29,32] and self-supervised MAE series [15,19,60]. In fact, NLP is leading new training trends different from finetuning. Fine-tuning produces a new parameter set for each new task, which is parametrically inefficient for plenty of linguistic tasks [22,30]. To solve this problem, [30] and [22] have proposed "Prompt Tuning" and "Adapter Tuning" respectively, both of which fix all parameters of the backbone + +and plug a few tiny trainable structures (less than $10\%$ of the backbone) to adapt the pre-trained model to the new tasks. "Prompt tuning" adds learnable parameters (also known as prompts) to the input or intermediate layers to change the input space of the new tasks. "Prompts" can motivate the model to remember knowledge learned in the previous tasks. "Adapter tuning" adds learnable bottleneck structures after each block to connect the pre-trained model with new tasks. Adapter and prompt demonstrate the coexistence of parameter efficiency and high performances in NLP, stimulating studies in CV. [24] proposes Visual Prompt Tuning (VPT) for image classification and semantic segmentation, but the performance of VPT on semantic segmentation is still far from fine-tuning. This phenomenon motivates us to explore whether adapter tuning can bring a new paradigm in computer vision with fewer parameters and better performances. In this work, we try to explore parameter-efficient and high-performance adapter structures. + +# 2.2. Adapter Tuning + +Adapters have been widely studied in NLP. Houlsby et al. [22] first add a bottleneck adapter structure to the transformer blocks and fix the original backbone, which achieves comparable performances to fine-tuning. Figure 3 illustrates the differences between fine-tuning and adaptertuning. [37,44,59] further reduce parameters in the adapter with closer performances to fine-tuning. [18,34,39] outperform fine-tuning on low-resource tasks, demonstrating that more parameters may not improve performance when finetuning pre-trained models [36]. In computer vision, [41] add convolutional adapters to the ResNet [20] and obtain competitive results in image classification. Adapter concept has also been applied in multimodal [33], vision-and-language [51], and domain adaptation [56], but these methods are only applicable under specific conditions. [7, 21, 25, 31] investigate the potential of adapter-tuning for visual classification. [8] apply the adapter structure to visual dense predictions without fixing any original parameters, which indeed trades more parameters for better performances. + +# 2.3. Low-rank Approximation + +The low-rank approximation uses multiple low-dimensional tensors to approximate a larger tensor with higher dimensions. Tensor dimensions and sizes in machine learning are very large, so low-rank approximations are widely used in face recognition [61], distributed training [54], transfer learning [11], and cross-domain [10]. A $b \times c$ matrix $M$ can be approximated with $N$ low-rank matrices $Q$ by the following equation: + +$$ +M _ {b \times c} = \prod_ {i = 1} ^ {N} Q _ {r _ {i} \times s _ {i}}, \tag {1} +$$ + +![](images/076b68aad349b00b6a3bfa1feb3c01031b3a22e132f2f3e1d5dbafcabaff3fd7.jpg) +Figure 3. Comparison between Adapter-Tuning and Fine-Tuning paradigms. Fine-Tuning tunes ( $\mathcal{A}$ ) all parameters delivered by the pre-trained model. Adapter-Tuning freezes ( $\mathcal{A}$ ) all structures and parameters in the pre-trained model and only trains ( $\mathcal{A}$ ) the additional parameters in adapters. Parameters in the decoder and head are trainable in both paradigms. + +where $N$ has different values depending on the approximation methods, we implement low-rank approximation of the adapter matrices by heuristic learning. + +# 3. Method + +In this section, we will elaborate on the proposed low-rank adapter (LoRand) in three parts: adapter tuning paradigm, LoRand, and parameter analysis. + +# 3.1. Adapter Tuning Paradigm + +For dataset $D = \{(x_{i},y_{i})\}_{i = 1}^{N}$ , fine-tuning calculates the loss between inference results and labels according to the formula: + +$$ +L (D, \theta) = \sum_ {i = 1} ^ {N} \operatorname {l o s s} \left(f _ {\theta} \left(x _ {i}\right), y _ {i}\right), \tag {2} +$$ + +where $f_{\theta}$ denotes the network forward function and loss represents the loss function. After that, $\theta$ is optimized through + +$$ +\theta \leftarrow \underset {\theta} {\arg \min } L (D, \theta). \tag {3} +$$ + +In adapter tuning paradigm, parameters consist of two parts, including parameters in adapter $\theta_{A}$ and parameters in the original architecture $\theta$ . Here, $\theta$ is further divided into frozen part $\theta_{F}$ and trainable part $\theta_{T}$ , noted as $\theta = \{\theta_{F},\theta_{T}\}$ . Let $\Omega$ be all the trainable parameters, then $\Omega = \{\theta_{A},\theta_{T}\}$ . The loss function and optimization formula in adapter can be written as: + +$$ +L \left(D, \theta_ {F}, \Omega\right) = \sum_ {i = 1} ^ {N} \operatorname {l o s s} \left(f _ {\theta_ {F}, \Omega} \left(x _ {i}\right), y _ {i}\right), \tag {4} +$$ + +![](images/1121da732dac8d417df762986ffe346f6c0ca9d44752793fbadaa91c64385b68.jpg) +Figure 4. Left: Multi-branch projection in LoRand. The down-projection $W^{D}$ and up-projection $W^{U}$ matrices are the summation of $\alpha$ branches $W_{1}^{D}(W_{1}^{U})\ldots W_{\alpha}^{D}(W_{\alpha}^{U})$ . $K_{i}$ in $i$ -th branch is shared between $W_{i}^{D}$ and $W_{i}^{U}$ . All the $P, Q,$ and $K$ are trainable, while all the $W$ matrices are calculated. Right: Comparisons of the same-sized projection matrices between LoRand and Adapter. $(m,n)$ in the table are typical values in SwinBlocks. LoRand has far fewer parameters than Adapter. With the same projection dimension, LoRand saves over 80% parameters of the Adapter in Swin Transformers. $(\alpha ,\beta)$ here are (2,8), the same as the experiments. + +
(m,n)PLoRandPAdapter%
(96,48)4736921651.39%
(192,96)93443686425.35%
(384,192)1856014745612.59%
(768,384)369925898246.27%
……………………
+ +$$ +\Omega \leftarrow \underset {\Omega} {\arg \min } L (D, \theta_ {F}, \Omega). \tag {5} +$$ + +# 3.2. LoRand + +Before introducing LoRand, we first review the existing adapter structure. Conventional adapters are bottleneck structures containing a down-projection, an up-projection, and a non-linear activation function. Besides, adapters ensure the robustness of the model by adding residual [20] structures. Adapter layer can be formulated as follows: + +$$ +A ^ {l} = U ^ {l} \left(G e L U (D ^ {l} (x))\right) + x, \tag {6} +$$ + +where $U^l$ and $D^l$ represent the up and down projections in the $l$ -th adapter layer, and GeLU is the activation function. It is clear that the parameters in adapter come from the projections. The projection process can be written as: + +$$ +y = W x + b, \tag {7} +$$ + +which means most adapter parameters are in $W$ . + +To reduce the adapter parameters, we propose a low-rank adapter (LoRand) structure to replace the $W$ in the projection structures. Figure 2 shows the simplified structure of LoRand. Here we approximate not a specific matrix $W$ but an ideal matrix $W_{best}$ that can transform the feature space of the pre-trained model into new tasks by heuristic learning. The approximation matrix $\hat{W}$ has the same size as $W$ , but the low-rank design makes $\hat{W}$ have far fewer free degrees than a common $W$ . + +Specifically, we synthesize each $W$ by multiplying three low-rank matrices $P \in \mathbb{R}^{\beta \times m}$ , $K \in \mathbb{R}^{\beta \times \beta}$ , $Q \in \mathbb{R}^{\beta \times n}$ + +that is: + +$$ +W = P ^ {T} K Q, \tag {8} +$$ + +where $\beta \ll \min(m, n)$ ensuring that $P$ and $Q$ are low-rank matrices. $K$ can be regarded as a kernel matrix that controls the parameter size of LoRand. + +After that, we add multi-branch structures to LoRand to increase the robustness and stability of low-rank matrices, which is inspired by MoE [43] and adaboost [45,52]. Every $W$ consists of $\alpha$ branches, that is: + +$$ +W = \sum_ {i = 1} ^ {\alpha} W _ {i} = \sum_ {i = 1} ^ {\alpha} P _ {i} ^ {T} K _ {i} Q _ {i}. \tag {9} +$$ + +In addition, we share the kernel matrix $K$ of the two projection layers within each branch. We hope the sharing mechanism can promote the coherence of two projection layers during training process. Besides, the shared $K$ also slightly reduces the number of LoRand parameters. Up to now, the $W^{U}$ and $W^{D}$ in a complete LoRand structure can be represented as: + +$$ +W ^ {U} = \sum_ {i = 1} ^ {\alpha} W _ {i} ^ {U} = \sum_ {i = 1} ^ {\alpha} \left(P _ {i} ^ {U}\right) ^ {T} K _ {i} Q _ {i} ^ {U}, \tag {10} +$$ + +$$ +W ^ {D} = \sum_ {i = 1} ^ {\alpha} W _ {i} ^ {D} = \sum_ {i = 1} ^ {\alpha} \left(P _ {i} ^ {D}\right) ^ {T} K _ {i} Q _ {i} ^ {D}, \tag {11} +$$ + +where $K_{i}$ is shared in $W^{U}$ and $W^{D}$ . Figure 4 presents the detailed designs of the multi-branch projection. + +# 3.3. Parameter Analysis + +In this section, we will compare the parameters of Lo-Rand and typical adapter [22] with the same size of projection matrix. + +Adapter Let $m$ be the input dimension of the adapter and $n$ be the middle layer dimension after down projection. Then the number of parameters in each adapter is $2mn$ (ignoring the few biases). In general, adapter tuning places two adapter modules in each block, so the space complexity of all adapter parameters in $\gamma$ blocks can be written as: + +$$ +O (4 \gamma m n). \tag {12} +$$ + +LoRand According to section 3.2, each $W$ contains $\alpha$ sets of $\{P,Q,K\}$ , that is: + +$$ +\alpha \left(m \beta + \beta^ {2} + n \beta\right). \tag {13} +$$ + +Each LoRand consists of two $W$ and $\alpha$ shared $K$ , so the parameter quantity of each LoRand is: + +$$ +2 \alpha (m \beta + \beta^ {2} + n \beta) - \alpha \beta^ {2} = 2 \alpha \beta (m + n + \beta / 2). \tag {14} +$$ + +Each block has two LoRand structures, so the number of parameters in $\gamma$ blocks is: + +$$ +4 \alpha \beta \gamma (m + n) + 2 \alpha \beta^ {2} \gamma . \tag {15} +$$ + +As $\alpha, \beta, \gamma \ll \min(m, n)$ , the space complexity here can be written as: + +$$ +O \left(4 \alpha \beta \gamma (m + n)\right). \tag {16} +$$ + +Comparison between Formulas 12 and 16 can be simplified as: + +$$ +O (m n), \tag {17} +$$ + +and + +$$ +O (\alpha \beta (m + n)). \tag {18} +$$ + +Given that $\alpha, \beta \ll \min(m, n)$ , the space complexity of LoRand is far lower than the typical adapter. The table in Figure 4 illustrates that LoRand saves most Adapter parameters with the same projecting dimension. + +# 4. Experiments + +We evaluate LoRand on multiple dense prediction tasks, including object detection, semantic segmentation, and instance segmentation. We also evaluate LoRand under low-resource conditions. We first describe our experimental setup in Section 4.1, including pre-trained backbones, baselines, LoRand settings, and downstream tasks. Then we present the main results of three benchmarks in Section 4.2. We also implement ablation study in Section 4.3 to investigate the impact of structural settings in LoRand. + +# 4.1. Experimental Setup + +Pretrained Backbones We conduct experiments on the advanced Swin Transformer [32] architectures. All backbones in this section are pre-trained by ImageNet-22k [9]. Pre-trained models are provided by OpenMMLab [6]. + +Baselines We compare LoRand with three other common training methods: + +(a) FULL: update all parameters in the architecture. +(b) FIXED: fix pre-trained parameters in Swin and train other parts of the architecture (neck, head). +(c) ADAPTER: add two trainable adapter structures in each SwinBlock following [22], and freeze other parts of the backbone. We evaluate two forms of adapter with different middle layer dimensions $(D_{ML})$ : +- ADAPTER-B: $D_{ML}$ is a half of input dimension. +- ADAPTER-T: $D_{ML}$ is a quarter of input dimension. + +LoRand Settings We conducted experiments on three Lo-Rand variants, which have different branch numbers $\alpha$ and kernel matrix dimensions $\beta$ . + +- LoRand: $\alpha = 2$ , $\beta = 8$ (Standard). +- LoRand+: $\alpha = 4, \beta = 8$ . +- LoRand++: $\alpha = 4, \beta = 16$ . + +Downstream Tasks We conducted experiments on COCO [28], ADE20K [62], and PASCAL VOC [14] benchmarks to widely evaluate LoRand's performance on main dense prediction tasks. + +COCO 2017 [28] is the most commonly used dataset for object detection and instance segmentation, which contains 118K training and 5K validation images. We perform experiments on the validation set. For a fair comparison, all experiments performed on COCO employ Cascade MASK R-CNN [32] as the detector. + +ADE20K [62] is the most widely used semantic segmentation dataset, which contains 20K training and 2K validation images. We also conduct experiments on the ADE20K validation set and utilise UperNet [57] as the framework. + +PASCAL VOC 0712 [14] is also widely used in object detection, which contains about 16K training and 5K validation images. VOC 0712 is much smaller than the latest benchmarks, so we treat it as a low-resource case. We adopt Faster RCNN [42] as the detector for VOC 0712. + +All our experiments are conducted with 8x NVIDIA Tesla V100 GPUs. The experiments on PASCAL VOC and + +
Swin-L (198M)Trained* Params%ΔFullExtra StructurePascal VOC (Faster RCNN)ADE20K (UperNet)
APBoxΔLoRandmIoUΔLoRand
Baselines
FULL198.58 M100.00 %-X84.43 %- 2.69 %53.25 %+ 1.34 %
FIXED0.00 M0.00 %- 100.00 %X85.19 %- 1.93 %32.21 %- 19.70 %
ADAPTER-B32.04 M16.13 %- 83.87 %80.93 %- 6.19 %46.23 %- 5.68 %
ADAPTER-T16.04 M8.08 %- 91.92 %78.10 %- 9.02 %43.51 %- 8.40 %
Our Methods
LORAND3.59 M1.84 %- 98.16 %87.12 %-50.67 %-
LORAND+7.19 M3.62 %- 96.38 %87.63 %+ 0.51 %51.13 %+ 0.46 %
LORAND++14.24 M7.17 %- 92.83 %88.11 %+ 0.99 %51.87 %+ 1.20 %
+ +Table 1. Results of baselines and our methods on Pascal VOC and ADE20K benchmarks. Swin-L is employed as the pre-trained model here. We present the numbers and percentages of trainable backbone parameters on the left and all the performances on the right. * denotes the trainable parameters in backbones. + +
Swin-B (89M)Trained* Params%ΔFullExtra StructureCOCO (Cascade Mask R-CNN)
APBoxΔLoRandAPMaskΔLoRand
Baselines
FULL89.14 M100.00 %-X51.90 %+0.80 %45.00 %+0.90 %
FIXED0.00 M0.00 %-100.00 %X15.30 %-35.80 %10.80 %-33.8 %
ADAPTER-B14.38 M16.13 %-83.87 %46.50 %-4.60 %40.20 %-3.90 %
ADAPTER-T7.20 M8.08 %-91.92 %43.20 %-7.90 %38.70 %-5.40 %
Our Methods
LORAND2.39 M2.76 %-97.24 %51.10 %-44.10 %-
LORAND+4.73 M5.31 %-94.69 %51.20 %+0.10 %44.30 %+0.20 %
LORAND++9.32 M10.46 %-89.54 %51.50 %+0.40 %44.40 %+0.30 %
+ +Table 2. Results of baselines and our methods on COCO benchmarks. Swin-B is employed as the pre-trained model here. We present the numbers and percentages of trainable backbone parameters on the left and all the performances on the right. * denotes the trainable parameters in backbones. + +ADE20K are based on Swin-S, Swin-B, and Swin-L pretrained models. Limited by GPU memory, the COCO experiments are based on Swin-T, Swin-S, and Swin-B. + +# 4.2. Main Results + +We first compare the trainable backbone parameters and performance of these methods on three benchmarks in Tables 1 and 2. Table 1 shows the results of PASCAL VOC and ADE20K datasets based on Swin-L, and Table 2 shows the results of COCO based on Swin-B. From Tables 1 and 2, we can see that: + +1) LoRand can effectively address the dilemma of fine-tuning in low-resource situations. Table 1 shows that FIXED outperforms FULL on the PASCAL VOC dataset, which implies that the powerful generalization ability of pre-trained model is severely weakened during fine-tuning. Fine-tuning with low-resource data reduces the feature understanding of pre-trained models, which leads to the poor performance on downstream tasks. LoRand avoids this dis + +advantage by fixing the original parameters. More importantly, LoRand can absorb features from the new data by its smaller trainable structures. Table 1 indicates that LoRand outperforms FULL and FIXED by $2.69\%$ and $1.93\%$ on the low-resource dataset with only $1.84\%$ trainable backbone parameters. LoRand+ and LoRand++ also outperform FULL by $3.2\%$ and $3.68\%$ with $3.62\%$ and $7.17\%$ backbone parameters. In fact, there are many other common computer vision datasets with similar volumes to the PASCAL VOC, including CUB-200-2011 [55], Oxford 102 Flowers [35], Stanford Cars [27], and Caltech-256 [16]. The prevalence of "Pretrained & Finetuning" leads us to focus more on giant benchmarks, but Table 1 suggests we need a better training paradigm to cope with many low-resource situations in industrial applications. LoRand-Tuning proves to be a competitive candidate who brings promising performance and parameter-efficient approaches to low-resource cases. +2) LoRand effectively balances the number of trainable backbone parameters and downstream task per + +formance. Tables 1 and 2 demonstrate that LoRand (standard) performs very closely to FULL on large benchmarks with only $1.84\%$ to $2.76\%$ trainable parameters. By tuning less than 3.6M backbone parameters, LoRand (standard) achieves $50.67\%$ (mIOU) on ADE20K, and $51.10\%$ $(\mathrm{AP}_{\mathrm{Box}})$ / $44.10\%$ $(\mathrm{AP}_{\mathrm{Mask}})$ on COCO, which is only about $1.5\%$ off on average compared to FULL. LoRand+ and LoRand++ further reduce the gap between these two paradigms to approximately $1\%$ with slight parameter increases. For Swin-L, LoRand saves about 195M parameters per copy compared to FULL. For Swin-B, LoRand saves about $86\mathrm{M}$ . These results are interesting, which means we do not have to spend plenty of hardware resources to store these redundant parameters. Industrial service providers deliver thousands of model training tasks every day. With LoRand-Tuning, millions of gigabytes per year for model storage could be saved. + +3) LoRand effectively broadens the potential of conventional parameter-efficient adapter structures in dense predictions. From the results, we can draw similar conclusions to [24] that the standard adapter [22] performs worse than fine-tuning on dense predictions. Tables 1 and 2 illustrate that the ADAPTER's performance is far from FULL, although it reduces $80\%$ of trainable backbone parameters. Also adding new structures, LoRand achieves comparable performance to FULL by training fewer parameters than the ADAPTER. Overall, Tables 1 and 2 demonstrate the feasibility of parameter-efficient tuning paradigm in visual dense prediction tasks. + +Comparisons with other fine-tuned backbone. We then show the comparisons of LoRand with some other remarkable fine-tuned backbones in Table 3. Table 3a shows the results based on UperNet and ADE20K, and 3b shows the results based on Cascade MASK R-CNN and COCO. Table 3 shows that LoRand (based on Swin-Transformer) can outperform most existing fine-tuned backbones with less than 2M parameters. Compared to these backbones, LoRand not only presents more robust and superior results but also saves massive hardware resources in this era of parameter explosion. Specifically, LoRand (Swin-T) exceeds COCO by $1.9\%$ $\mathrm{(AP_{Box})}$ and $1.2\%$ $\mathrm{(AP_{Mask})}$ with 80.12M fewer new backbone parameters than ResNeXt-101-64. Similarly, LoRand (Swin-L) surpasses $5.82\%$ (mIoU) on ADE20K with 40.41M fewer trainable backbone parameters than ResNet-101. + +Comparisons on different backbone scales. In addition to Swin-L and Swin-B, we also conduct extensive experiments on Swin-S and Swin-T. We illustrate the performance of baselines and LoRand on multiple backbones. Figure 5 shows the performance of the six methods on different backbone scales, which includes three Swin variants for each benchmark. As FIXED's performance on COCO and ADE20K is too low to display, we only show FIXED's re + +(a) Comparisons between LoRand-Tuning and Fine-Tuning on COCO. + +
BackboneTrained +Params*APBoxAPMask
Fine-Tuning Paradigm
ResNet-10144 M47.9 %41.5 %
ResNeXt-101-3240 M48.1 %41.6 %
ResNeXt-101-6481 M48.3 %41.7 %
DeiT-S22 M48.0 %41.4 %
Swin-T29 M50.5 %43.7 %
Swin-S50 M51.8 %44.7 %
Swin-B88 M51.9 %45.0 %
LoRand-Tuning
LoRand (Swin-T)0.88 M50.2 %42.9 %
LoRand (Swin-S)1.80 M50.7 %43.8 %
LoRand (Swin-B)2.39 M51.1 %44.3 %
(b) Comparisons between LoRand-Tuning and Fine-Tuning on ADE20K.
BackboneTrained Params*APMask
Fine-Tuning
ResNet-1812 M39.97 %
ResNet-5025 M42.78 %
ResNet-10144 M44.85 %
DeiT-S22 M44.01 %
Swin-S50 M49.30 %
Swin-B88 M51.60 %
Swin-L197 M53.25 %
LoRand-Tuning
LoRand (Swin-S)1.80 M47.33 %
LoRand (Swin-B)2.39 M49.62 %
LoRand (Swin-L)3.59 M50.67 %
+ +Table 3. Comparisons between LoRand-Tuning and Fine-Tuning on ADE20K and COCO. We fine-tune multiple backbones and compare their performances with LoRand series. Architectures in (a) and (b) are Cascade Mask R-CNN and UperNet. Parameters in decoder and head are updated in both paradigms. * denotes the trainable parameters in backbones. + +sults in the PASCAL VOC. Figure 5 indicates that the performance of most methods improves as the backbone scale gets larger. For the LoRand series, more parameters bring better performance, but it is still challenging to outperform FULL on large datasets. For the ADAPTER, ADAPTER-B performs better than ADAPTER-T, suggesting that adding extra parameters does help improve adapter-tuning performance. Experiments on Swin variants systematically + +![](images/4799d8629c462496de8ce6fadebc123984f57f3c6291d7d764ef3994d63168c8.jpg) + +![](images/f1de32ed5f910da96337d105f6c2a278695e25f9594dff66acc215e4c8e21230.jpg) + +![](images/cf498dcc0f0551d854e6089165108833da42aef14b8789312e63f10306302f53.jpg) + +![](images/780459435f7dcd851bc2a17b3a882ae4f0dd704c150286faeb4d32782ced5b5b.jpg) + +![](images/c8f8fb199094b4d44297a887101b9142ca0aa4a8533d9a78d2804c16a7a98a80.jpg) +Figure 5. Seven methods on different backbone scales. Figures show results on PASCAL VOC, COCO, and ADE20K from left to right. Swin-S, Swin-B, and Swin-L are employed as the pre-trained models for PASCAL VOC and ADE20K. Swin-T, Swin-S, and Swin-B are employed for COCO. FIXED's performances are so low on COCO and ADE20K that they reduce the intuitiveness of the other six methods, so FIXED is only presented in PASCAL VOC comparisons. + +![](images/8beae4f9a09d00f90cecc036a470adcd74773704abd7430320102b6b1e266291.jpg) + +![](images/128d7434bbe4dec4e4d11a3c91b0ddb00bf1ce980db749fe4ff84624fbeb0a25.jpg) + +![](images/80ef1c9114fff622842c4aebee06a2c60b1be5b51ab9208d703be709a5193c33.jpg) +Figure 6. Ablation Study for $\alpha$ and $\beta$ . $\alpha$ ranges from 2, 4, 6, and $\beta$ ranges from 4, 8, 16. Figures from left to right present experiments on three benchmarks respectively. We only present $\mathrm{AP_{Box}}$ changes for COCO benchmark considering the strong correlation between the values of $\mathrm{AP_{Box}}$ and $\mathrm{AP_{Mask}}$ in COCO. + +demonstrate that LoRand can outperform both FULL and traditional adapter structures in low-resource cases and perform very closely to FULL in large benchmarks. + +# 4.3. Ablation Study + +In this section, we ablate two key hyperparameters in LoRand: the LoRand branch number $\alpha$ and the kernel matrix dimension $\beta$ . $\alpha$ affects the distributed decision-making of LoRand, while $\beta$ focuses on a single branch's learning capability and consistency. + +Several sets of ablation experiments are designed and implemented to investigate the effect of $\alpha$ and $\beta$ on the performance of LoRand. The ablation experiments were conducted on the same three benchmarks. In order to improve the upper limit of LoRand, our experiments are conducted on the largest backbone of each dataset (ADE20K/PASCAL VOC: Swin-L, COCO: Swin-B). The value sets of $\alpha$ and $\beta$ are $\{2,4,6\}$ and $\{4,8,16\}$ . Figure 6 shows the results of ablation studies on three datasets. In most cases, LoRand's performance increases slightly as $\alpha$ and $\beta$ become larger but hardly outperforms fine-tuning on large benchmarks. Besides, exponentially increasing the size of the LoRand does + +not result in an equivalent performance improvement and even leads to a reduction ( $\alpha = 6$ in VOC and COCO). Ablation studies demonstrate that larger LoRands have fewer gains both in parameter efficiency and performance. We have considered this trade-off when designing the LoRand standard, LoRand+, and LoRand++. + +# 5. Conclusion + +This paper presents LoRand, a parameter-efficient low-rank adapter for dense predictions, which completely shares the feature understanding of advanced pre-trained models and effectively transfers it to downstream tasks. LoRand performs on par with fine-tuning in COCO instance segmentation, ADE20K semantic segmentation, and PASCAL VOC object detection with only $1\%$ to $3\%$ trainable backbone parameters. Moreover, LoRand effectively avoids the disadvantages of the fine-tuning paradigm and delivers better performance in low-resource situations. We hope that parameter-efficient LoRand can save massive redundant storage resources and facilitate a unified training paradigm for vision and language. + +# References + +[1] Caisse Amisse, Mario Ernesto Jijón-Palma, and Jorge Antonio Silva Centeno. Fine-tuning deep learning models for pedestrian detection. *Boletim de Ciências Geólicas*, 27, 2021. 1 +[2] Alexei Baevski, Sergey Edunov, Yinhan Liu, Luke Zettle-moyer, and Michael Auli. Cloze-driven pretraining of self-attention networks. arXiv preprint arXiv:1903.07785, 2019. 1 +[3] Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021. 1 +[4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 1 +[5] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2 +[6] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 5 +[7] Shoufa Chen, Chongjian Ge, Zhan Tong, Jiangliu Wang, Yibing Song, Jue Wang, and Ping Luo. Adaptformer: Adapting vision transformers for scalable visual recognition. arXiv preprint arXiv:2205.13535, 2022. 3 +[8] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. arXiv preprint arXiv:2205.08534, 2022. 3 +[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 1, 2, 5 +[10] Zhengming Ding and Yun Fu. Deep transfer low-rank coding for cross-domain learning. IEEE transactions on neural networks and learning systems, 30(6):1768-1779, 2018. 3 +[11] Zhengming Ding, Ming Shao, and Yun Fu. Deep low-rank coding for transfer learning. In Twenty-Fourth International Joint Conference on Artificial Intelligence, 2015. 3 +[12] Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah Smith. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. arXiv preprint arXiv:2002.06305, 2020. 2 +[13] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. 2 + +[14] Mark Everingham, SM Eslami, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes challenge: A retrospective. International journal of computer vision, 111(1):98-136, 2015. 2, 5 +[15] Christoph Feichtenhofer, Haoqi Fan, Yanghao Li, and Kaiming He. Masked autoencoders as spatiotemporal learners. arXiv preprint arXiv:2205.09113, 2022. 2 +[16] Gregory Griffin, Alex Holub, and Pietro Perona. Caltech-256 object category dataset. 2007. 6 +[17] Xu Han, Zhengyan Zhang, Ning Ding, Yuxian Gu, Xiao Liu, Yuqi Huo, Jiezhong Qiu, Yuan Yao, Ao Zhang, Liang Zhang, et al. Pre-trained models: Past, present and future. AI Open, 2:225-250, 2021. 1 +[18] Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig. Towards a unified view of parameter-efficient transfer learning. In International Conference on Learning Representations, 2021. 3 +[19] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16000-16009, 2022. 2 +[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 3, 4 +[21] Xuehai He, Chunyuan Li, Pengchuan Zhang, Jianwei Yang, and Xin Eric Wang. Parameter-efficient fine-tuning for vision transformers. arXiv preprint arXiv:2203.16329, 2022. 3 +[22] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 3, 5, 7 +[23] Fatsuma Jauro, Haruna Chiroma, Abdulsalam Y Gital, Mubarak Almutairi, M Abdulhamid Shafi'i, and Jemal H Abawajy. Deep learning architectures in emerging cloud computing architectures: Recent development, challenges and next research trend. Applied Soft Computing, 96:106582, 2020. 1 +[24] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. arXiv preprint arXiv:2203.12119, 2022. 2, 3, 7 +[25] Shibo Jie and Zhi-Hong Deng. Convolutional bypasses are better vision transformer adapters. arXiv preprint arXiv:2207.07039, 2022. 3 +[26] Christoph Käding, Erik Rodner, Alexander Freytag, and Joachim Denzler. Fine-tuning deep neural networks in continuous learning scenarios. In *Asian Conference on Computer Vision*, pages 588–605. Springer, 2016. 1 +[27] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pages 554–561, 2013. 6 + +[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 1, 2, 5 +[29] Fanfan Liu, Haoran Wei, Wenzhe Zhao, Guozhen Li, Jingquan Peng, and Zihao Li. Wb-detr: Transformer-based detector without backbone. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2979-2987, 2021. 2 +[30] Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. Pre-train, prompt, and predict: A systematic survey of prompting methods in natural language processing. arXiv preprint arXiv:2107.13586, 2021. 1, 2 +[31] Yen-Cheng Liu, Chih-Yao Ma, Junjiao Tian, Zijian He, and Zsolt Kira. Polyhistor: Parameter-efficient multi-task adaptation for dense vision tasks. arXiv preprint arXiv:2210.03265, 2022. 3 +[32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 1, 2, 5 +[33] Cheng Long Li, Andong Lu, Ai Hua Zheng, Zhengzheng Tu, and Jin Tang. Multi-adapter rgbt tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, pages 0-0, 2019. 3 +[34] Yuning Mao, Lambert Mathias, Rui Hou, Amjad Alma-hairi, Hao Ma, Jiawei Han, Scott Yih, and Madian Khabsa. Unipelt: A unified framework for parameter-efficient language model tuning. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6253-6264, 2022. 3 +[35] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pages 722-729. IEEE, 2008. 6 +[36] Matthew E Peters, Sebastian Ruder, and Noah A Smith. To tune or not to tune? adapting pretrained representations to diverse tasks. arXiv preprint arXiv:1903.05987, 2019. 2, 3 +[37] Jonas Pfeiffer, Aishwarya Kamath, Andreas Rückle, Kyunghyun Cho, and Iryna Gurevych. Adapterfusion: Nondestructive task composition for transfer learning. In 16th Conference of the European Chapter of the Association for Computational Linguistics, EACL 2021, pages 487-503. Association for Computational Linguistics (ACL), 2021. 2, 3 +[38] Jonas Pfeiffer, Andreas Rücklé, Clifton Poth, Aishwarya Kamath, Ivan Vulić, Sebastian Ruder, Kyunghyun Cho, and Iryna Gurevych. Adapterhub: A framework for adapting transformers. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 46-54, 2020. 1, 2 +[39] Jonathan Pilault, Christopher Pal, et al. Conditionally adaptive multi-task learning: Improving transfer learning in nlp using fewer parameters & less data. In International Conference on Learning Representations, 2020. 3 + +[40] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J Liu, et al. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140):1-67, 2020. 1 +[41] Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. Advances in neural information processing systems, 30, 2017. 3 +[42] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 5 +[43] Carlos Riquelme, Joan Puigcerver, Basil Mustafa, Maxim Neumann, Rodolphe Jenatton, André Susano Pinto, Daniel Keysers, and Neil Houlsby. Scaling vision with sparse mixture of experts. Advances in Neural Information Processing Systems, 34:8583-8595, 2021. 4 +[44] Andreas Rücklé, Gregor Geigle, Max Glockner, Tilman Beck, Jonas Pfeiffer, Nils Reimers, and Iryna Gurevych. Adapterdrop: On the efficiency of adapters in transformers. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7930-7946, 2021. 2, 3 +[45] Omer Sagi and Lior Rokach. Ensemble learning: A survey. Wiley Interdisciplinary Reviews: Data Mining and Knowledge Discovery, 8(4):e1249, 2018. 4 +[46] Chompunuch Sarasaen, Soumick Chatterjee, Mario Breitkopf, Georg Rose, Andreas Nurnberger, and Oliver Speck. Fine-tuning deep learning model parameters for improved super-resolution of dynamic mri with prior-knowledge. Artificial Intelligence in Medicine, 121:102196, 2021. 1 +[47] Jaime Sevilla, Lennart Heim, Anson Ho, Tamay Besiroglu, Marius Hobbahn, and Pablo Villalobos. Compute trends across three eras of machine learning. arXiv preprint arXiv:2202.05924, 2022.1 +[48] Shaden Smith, Mostofa Patwary, Brandon Norick, Patrick LeGresley, Samyam Rajbhandari, Jared Casper, Zhun Liu, Shrimai Prabhumoye, George Zerveas, Vijay Korthikanti, et al. Using deepspeed and megatron to train megatron-turing nlg 530b, a large-scale generative language model. arXiv preprint arXiv:2201.11990, 2022. 1 +[49] Emma Strubell, Ananya Ganesh, and Andrew McCallum. Energy and policy considerations for deep learning in nlp. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3645–3650, 2019. 1 +[50] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 1 +[51] Yi-Lin Sung, Jaemin Cho, and Mohit Bansal. Vl-adapter: Parameter-efficient transfer learning for vision-and-language tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5227-5237, 2022. 3 + +[52] B Thilagavathi, K Suthendran, and K Srujanraju. Evaluating the adaboost algorithm for biometric-based face recognition. In Data Engineering and Communication Technology, pages 669-678. Springer, 2021. 4 +[53] Edna Chebet Too, Li Yujiang, Sam Njuki, and Liu Yingchun. A comparative study of fine-tuning deep learning models for plant disease identification. Computers and Electronics in Agriculture, 161:272-279, 2019. 1 +[54] Thijs Vogels, Sai Praneeth Karimireddy, and Martin Jaggi. Practical low-rank communication compression in decentralized deep learning. Advances in Neural Information Processing Systems, 33:14171-14181, 2020. 3 +[55] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 6 +[56] Xudong Wang, Zhaowei Cai, Dashan Gao, and Nuno Vasconcelos. Towards universal object detection by domain attention. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7289-7298, 2019. 3 +[57] Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, and Jian Sun. Unified perceptual parsing for scene understanding. In Proceedings of the European conference on computer vision (ECCV), pages 418-434, 2018. 5 +[58] Sha Yuan, Hanyu Zhao, Shuai Zhao, Jiahong Leng, Yangxiao Liang, Xiaozhi Wang, Jifan Yu, Xin Lv, Zhou Shao, Jiaao He, et al. A roadmap for big model. arXiv preprint arXiv:2203.14101, 2022. 1 +[59] Aston Zhang, Yi Tay, SHUAI Zhang, Alvin Chan, Anh Tuan Luu, Siu Hui, and Jie Fu. Beyond fully-connected layers with quaternions: Parameterization of hypercomplex multiplications with $1/n$ parameters. In International Conference on Learning Representations, 2020. 2, 3 +[60] Chaoning Zhang, Chenshuang Zhang, Junha Song, John Seon Keun Yi, Kang Zhang, and In So Kweon. A survey on masked autoencoder for self-supervised learning in vision and beyond. arXiv preprint arXiv:2208.00173, 2022. 2 +[61] Jianwei Zhao, Yongbiao Lv, Zhenghua Zhou, and Feilong Cao. A novel deep learning algorithm for incomplete face recognition: Low-rank-recovery network. Neural Networks, 94:115-124, 2017. 3 +[62] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017. 2, 5 \ No newline at end of file diff --git a/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/images.zip b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..aa053b3db07cc880eb9d421b8418710882dccefe --- /dev/null +++ b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32744eff491f2f5aef925b1001b27225c099f35c5b284cc56abde535e82036d4 +size 571629 diff --git a/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/layout.json b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9628dcc384a0cc1973525f0f578e1d57a580f963 --- /dev/null +++ b/2023/1% VS 100%_ Parameter-Efficient Low Rank Adapter for Dense Predictions/layout.json @@ -0,0 +1,10348 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 60, + 103, + 533, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 103, + 533, + 120 + ], + "spans": [ + { + "bbox": [ + 60, + 103, + 533, + 120 + ], + "type": "text", + "content": "1% VS 100%: Parameter-Efficient Low Rank Adapter for Dense Predictions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "spans": [ + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "text", + "content": "Dongshuo Yin" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "inline_equation", + "content": "^{1,2,\\dagger}" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "text", + "content": ", Yiran Yang" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "inline_equation", + "content": "^{1,2,\\dagger}" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "text", + "content": ", Zhechao Wang" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "text", + "content": ", Hongfeng Yu" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "text", + "content": ", Kaiwen Wei" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "text", + "content": ", Xian Sun" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "inline_equation", + "content": "^{1,2,*}" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "text", + "content": "Key Laboratory of Network Information System Technology, Aerospace Information Research Institute, Chinese Academy of Sciences \n" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 61, + 141, + 536, + 213 + ], + "type": "text", + "content": "School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 99, + 215, + 494, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 215, + 494, + 241 + ], + "spans": [ + { + "bbox": [ + 99, + 215, + 494, + 241 + ], + "type": "text", + "content": "{yindongshuo19, yangyiran19, wangzhechao21, weikaiwen19}@mails.ucas.ac.cn {yuhf, sunxian}@aircas.ac.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 269, + 192, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 269, + 192, + 281 + ], + "spans": [ + { + "bbox": [ + 143, + 269, + 192, + 281 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 294, + 290, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 294, + 290, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 294, + 290, + 521 + ], + "type": "text", + "content": "Fine-tuning large-scale pre-trained vision models to downstream tasks is a standard technique for achieving state-of-the-art performance on computer vision benchmarks. However, fine-tuning the whole model with millions of parameters is inefficient as it requires storing a samesized new model copy for each task. In this work, we propose LoRand, a method for fine-tuning large-scale vision models with a better trade-off between task performance and the number of trainable parameters. LoRand generates tiny adapter structures with low-rank synthesis while keeping the original backbone parameters fixed, resulting in high parameter sharing. To demonstrate LoRand's effectiveness, we implement extensive experiments on object detection, semantic segmentation, and instance segmentation tasks. By only training a small percentage (1% to 3%) of the pre-trained backbone parameters, LoRand achieves comparable performance to standard fine-tuning on COCO and ADE20K and outperforms fine-tuning in low-resource PASCAL VOC dataset." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 544, + 128, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 544, + 128, + 556 + ], + "spans": [ + { + "bbox": [ + 47, + 544, + 128, + 556 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 565, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 565, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 565, + 287, + 685 + ], + "type": "text", + "content": "With the rapid development of computer vision, parameters in deep models are surging. Giant models need to be trained with massive resources to achieve superior performance [3, 17, 47, 58], which is often unavailable to many academics and institutions. \"Pretrain & Finetuning\" paradigm is widely used to alleviate this dilemma. Teams with sufficient computation resources utilise enormous datasets [2, 9, 40, 50] to train superior backbones [4, 32, 40, 48] and optimise the models with ideal performances. Models pretrained in this way usually have a su" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 320, + 269, + 533, + 423 + ], + "blocks": [ + { + "bbox": [ + 320, + 269, + 533, + 423 + ], + "lines": [ + { + "bbox": [ + 320, + 269, + 533, + 423 + ], + "spans": [ + { + "bbox": [ + 320, + 269, + 533, + 423 + ], + "type": "image", + "image_path": "284244850d213821e678546c56bd87e129870b5533c18d88c83c8caed88f03ff.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 431, + 547, + 531 + ], + "lines": [ + { + "bbox": [ + 304, + 431, + 547, + 531 + ], + "spans": [ + { + "bbox": [ + 304, + 431, + 547, + 531 + ], + "type": "text", + "content": "Figure 1. Comparisons of trainable backbone parameters between our methods (red) and fine-tuning (black). In COCO, we achieve advanced performances and outperform most existing backbones with only " + }, + { + "bbox": [ + 304, + 431, + 547, + 531 + ], + "type": "inline_equation", + "content": "0.9\\sim 2.5\\mathrm{M}" + }, + { + "bbox": [ + 304, + 431, + 547, + 531 + ], + "type": "text", + "content": " new backbone parameters (Cascade-RCNN is employed as the detector). The fine-tuning paradigm produces massive redundant backbone parameters, whereas our approach saves over " + }, + { + "bbox": [ + 304, + 431, + 547, + 531 + ], + "type": "inline_equation", + "content": "97\\%" + }, + { + "bbox": [ + 304, + 431, + 547, + 531 + ], + "type": "text", + "content": " of hardware resources with competitive performances. The sizes of the circles intuitively compare the number of trainable parameters." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 545, + 545, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 605 + ], + "type": "text", + "content": "perior understanding of homogeneous data. After that, researchers with limited computational resources can transfer the understanding capabilities of the pre-trained models to downstream tasks with promising performances by finetuning [1,26,46,53]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 605, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 546, + 715 + ], + "type": "text", + "content": "However, the fine-tuned model will produce a new set of parameters as large as the pre-trained model. New parameters are independent of the pre-trained models and unshareable, which are very hardware intensive for cloud service providers [23, 49]. Figure 1 compares the parameter quantities of some remarkable backbones and their performances on the COCO [28] dataset. Recent advances in natural language processing (NLP) [30, 38] show that large pre-trained models trained with rich data have strong gener" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 693, + 136, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 693, + 136, + 703 + ], + "spans": [ + { + "bbox": [ + 57, + 693, + 136, + 703 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 59, + 703, + 126, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 126, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 126, + 712 + ], + "type": "text", + "content": "Equal contribution." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20116" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 69, + 154, + 252 + ], + "blocks": [ + { + "bbox": [ + 75, + 69, + 154, + 252 + ], + "lines": [ + { + "bbox": [ + 75, + 69, + 154, + 252 + ], + "spans": [ + { + "bbox": [ + 75, + 69, + 154, + 252 + ], + "type": "image", + "image_path": "b58c6b179031f202b13762129589750230c30d575d80af5f850f0a002de939e8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 78, + 258, + 149, + 266 + ], + "lines": [ + { + "bbox": [ + 78, + 258, + 149, + 266 + ], + "spans": [ + { + "bbox": [ + 78, + 258, + 149, + 266 + ], + "type": "text", + "content": "Swin-Transformer Block" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 164, + 69, + 260, + 254 + ], + "blocks": [ + { + "bbox": [ + 164, + 69, + 260, + 254 + ], + "lines": [ + { + "bbox": [ + 164, + 69, + 260, + 254 + ], + "spans": [ + { + "bbox": [ + 164, + 69, + 260, + 254 + ], + "type": "image", + "image_path": "7edbd25abc95b7d7da6fcc332f29fee39ad6e9b53dd1dfb97cf12b8d90b381de.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 258, + 233, + 266 + ], + "lines": [ + { + "bbox": [ + 190, + 258, + 233, + 266 + ], + "spans": [ + { + "bbox": [ + 190, + 258, + 233, + 266 + ], + "type": "text", + "content": "LoRand Layer" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 276, + 287, + 342 + ], + "lines": [ + { + "bbox": [ + 46, + 276, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 46, + 276, + 287, + 342 + ], + "type": "text", + "content": "Figure 2. Architecture of the adapter module and its integration with the Transformer. Left: We add two LoRand structures to each SwinBlock located behind the W/SW-MSA and MLP structures respectively. Right: LoRand contains two Multi-branch low-rank projections and nonlinearity. We include skip-connection to LoRand to enhance its robustness." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 363, + 287, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 287, + 602 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 287, + 602 + ], + "type": "text", + "content": "alisability, which means most parameters in the pre-trained models can be shared with the new tasks [22, 36, 37, 44, 59]. Moreover, recent literature demonstrates that the feature understanding of pre-trained models could be reduced when they are fine-tuned in low-resource situations [12, 36]. To tackle these issues, NLP researchers propose two new training paradigms based on pre-trained models: Adapter Tuning [22] and Prompt Tuning [30], both of which tune the new models by fixing the pre-trained parameters and adding a few trainable structures (less than " + }, + { + "bbox": [ + 46, + 363, + 287, + 602 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 363, + 287, + 602 + ], + "type": "text", + "content": " of the backbone). These paradigms create a new buzz in NLP and achieve impressive performances which can be competitive with finetuning [12, 22, 30, 36-38, 44, 59]. Advances in NLP also shed new light on computer vision. Jia et al. [24] propose Visual Prompt Tuning (VPT) and demonstrate that VPT can outperform fine-tuning on image classification tasks by training a small number of trainable parameters. Nevertheless, VPT shows weakness on more challenging dense predictions like semantic segmentation compared with finetuning [24]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 286, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 286, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 286, + 715 + ], + "type": "text", + "content": "To find a parameter-efficient paradigm with promising performance in computer vision, we explore the potential of Adapter Tuning for visual dense predictions. We employ the advanced Swin Transformer [32] trained with ImageNet-22K [9] as the pre-trained model. After that, we add bottleneck adapter structures [22] behind each SwinBlock and freeze the original backbone parameters when training, but this approach cannot achieve comparable performance to fine-tuning as mentioned in [24]. In the experi" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 358 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 358 + ], + "type": "text", + "content": "periments, we find that the models perform better with sparser adapter structures. To improve the performance of Adapter Tuning, we propose Low-Rank Adapter (LoRand) to reduce the adapter parameters, as shown in Figure 2. LoRand sparsely parameterizes the matrices in adapters by low-rank synthesis. Specifically, the projection matrix of the fully-connected layer (FC) in LoRand is a product of multiple low-rank matrices, which reduces FC parameters by more than " + }, + { + "bbox": [ + 304, + 72, + 545, + 358 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 358 + ], + "type": "text", + "content": ". We implement extensive experiments on object detection (PASCAL VOC [14]), semantic segmentation (ADE20K [62]), and instance segmentation (MS COCO [28]) to verify the capability of LoRand. Experimental results show that LoRand-Tuning is comparable to fine-tuning on multiple tasks with only " + }, + { + "bbox": [ + 304, + 72, + 545, + 358 + ], + "type": "inline_equation", + "content": "1.8\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 358 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 72, + 545, + 358 + ], + "type": "inline_equation", + "content": "2.8\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 358 + ], + "type": "text", + "content": " new backbone parameters, which suggests that the pre-trained backbone parameters can be fully shared. More interestingly, our method completely outperforms fine-tuning on the PASCAL VOC dataset, illustrating that LoRand-Tuning can reduce the impairment of fine-tuning on pre-trained models in low-resource configurations. Our method demonstrates that the LoRand-Tuning paradigm can substantially save storage resources and achieve competitive performances on most dense prediction tasks. In summary, our contributions are three-fold:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 366, + 545, + 559 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 316, + 366, + 545, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 366, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 316, + 366, + 545, + 426 + ], + "type": "text", + "content": "- We demonstrate that visual pre-trained models are highly generalisable and shareable. With our training methods, new tasks require only a few trainable parameters to achieve performances comparable to finetuning, which can save massive hardware resources." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 316, + 434, + 545, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 434, + 545, + 493 + ], + "spans": [ + { + "bbox": [ + 316, + 434, + 545, + 493 + ], + "type": "text", + "content": "- We propose the LoRand structure for sparser adapters based on low-rank synthesis. We demonstrate that the backbone parameters in fine-tuning are highly redundant, which can be replaced by " + }, + { + "bbox": [ + 316, + 434, + 545, + 493 + ], + "type": "inline_equation", + "content": "1.8\\%" + }, + { + "bbox": [ + 316, + 434, + 545, + 493 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 316, + 434, + 545, + 493 + ], + "type": "inline_equation", + "content": "2.8\\%" + }, + { + "bbox": [ + 316, + 434, + 545, + 493 + ], + "type": "text", + "content": " additional parameters in LoRand." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 316, + 500, + 545, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 500, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 316, + 500, + 545, + 559 + ], + "type": "text", + "content": "- Extensive experiments on object detection, semantic segmentation, and instance segmentation show that LoRand-Tuning can achieve remarkable performances and reduce massive new parameters in challenging dense prediction tasks." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 567, + 392, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 567, + 392, + 580 + ], + "spans": [ + { + "bbox": [ + 306, + 567, + 392, + 580 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 587, + 459, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 587, + 459, + 601 + ], + "spans": [ + { + "bbox": [ + 306, + 587, + 459, + 601 + ], + "type": "text", + "content": "2.1. Training Paradigms in NLP" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 605, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 715 + ], + "type": "text", + "content": "Computer vision has been continuously inspired by NLP in recent years, including the visual transformer series [5,13,29,32] and self-supervised MAE series [15,19,60]. In fact, NLP is leading new training trends different from finetuning. Fine-tuning produces a new parameter set for each new task, which is parametrically inefficient for plenty of linguistic tasks [22,30]. To solve this problem, [30] and [22] have proposed \"Prompt Tuning\" and \"Adapter Tuning\" respectively, both of which fix all parameters of the backbone" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20117" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 288 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 288 + ], + "type": "text", + "content": "and plug a few tiny trainable structures (less than " + }, + { + "bbox": [ + 46, + 72, + 289, + 288 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 72, + 289, + 288 + ], + "type": "text", + "content": " of the backbone) to adapt the pre-trained model to the new tasks. \"Prompt tuning\" adds learnable parameters (also known as prompts) to the input or intermediate layers to change the input space of the new tasks. \"Prompts\" can motivate the model to remember knowledge learned in the previous tasks. \"Adapter tuning\" adds learnable bottleneck structures after each block to connect the pre-trained model with new tasks. Adapter and prompt demonstrate the coexistence of parameter efficiency and high performances in NLP, stimulating studies in CV. [24] proposes Visual Prompt Tuning (VPT) for image classification and semantic segmentation, but the performance of VPT on semantic segmentation is still far from fine-tuning. This phenomenon motivates us to explore whether adapter tuning can bring a new paradigm in computer vision with fewer parameters and better performances. In this work, we try to explore parameter-efficient and high-performance adapter structures." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 299, + 145, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 299, + 145, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 299, + 145, + 312 + ], + "type": "text", + "content": "2.2. Adapter Tuning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 318, + 289, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 318, + 289, + 547 + ], + "spans": [ + { + "bbox": [ + 46, + 318, + 289, + 547 + ], + "type": "text", + "content": "Adapters have been widely studied in NLP. Houlsby et al. [22] first add a bottleneck adapter structure to the transformer blocks and fix the original backbone, which achieves comparable performances to fine-tuning. Figure 3 illustrates the differences between fine-tuning and adaptertuning. [37,44,59] further reduce parameters in the adapter with closer performances to fine-tuning. [18,34,39] outperform fine-tuning on low-resource tasks, demonstrating that more parameters may not improve performance when finetuning pre-trained models [36]. In computer vision, [41] add convolutional adapters to the ResNet [20] and obtain competitive results in image classification. Adapter concept has also been applied in multimodal [33], vision-and-language [51], and domain adaptation [56], but these methods are only applicable under specific conditions. [7, 21, 25, 31] investigate the potential of adapter-tuning for visual classification. [8] apply the adapter structure to visual dense predictions without fixing any original parameters, which indeed trades more parameters for better performances." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 556, + 191, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 556, + 191, + 569 + ], + "spans": [ + { + "bbox": [ + 47, + 556, + 191, + 569 + ], + "type": "text", + "content": "2.3. Low-rank Approximation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "spans": [ + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "text", + "content": "The low-rank approximation uses multiple low-dimensional tensors to approximate a larger tensor with higher dimensions. Tensor dimensions and sizes in machine learning are very large, so low-rank approximations are widely used in face recognition [61], distributed training [54], transfer learning [11], and cross-domain [10]. A " + }, + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "inline_equation", + "content": "b \\times c" + }, + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "text", + "content": " matrix " + }, + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "text", + "content": " can be approximated with " + }, + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "text", + "content": " low-rank matrices " + }, + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 46, + 576, + 287, + 673 + ], + "type": "text", + "content": " by the following equation:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 124, + 683, + 287, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 683, + 287, + 716 + ], + "spans": [ + { + "bbox": [ + 124, + 683, + 287, + 716 + ], + "type": "interline_equation", + "content": "M _ {b \\times c} = \\prod_ {i = 1} ^ {N} Q _ {r _ {i} \\times s _ {i}}, \\tag {1}", + "image_path": "241a60c5d99e7dcc59ff6c6234b90ce4b449508010f8846b945d142a0877d5b0.jpg" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 544, + 228 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 544, + 228 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 544, + 228 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 544, + 228 + ], + "type": "image", + "image_path": "076b68aad349b00b6a3bfa1feb3c01031b3a22e132f2f3e1d5dbafcabaff3fd7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 237, + 545, + 304 + ], + "lines": [ + { + "bbox": [ + 304, + 237, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 304, + 237, + 545, + 304 + ], + "type": "text", + "content": "Figure 3. Comparison between Adapter-Tuning and Fine-Tuning paradigms. Fine-Tuning tunes ( " + }, + { + "bbox": [ + 304, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 304, + 237, + 545, + 304 + ], + "type": "text", + "content": ") all parameters delivered by the pre-trained model. Adapter-Tuning freezes (" + }, + { + "bbox": [ + 304, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 304, + 237, + 545, + 304 + ], + "type": "text", + "content": ") all structures and parameters in the pre-trained model and only trains (" + }, + { + "bbox": [ + 304, + 237, + 545, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 304, + 237, + 545, + 304 + ], + "type": "text", + "content": ") the additional parameters in adapters. Parameters in the decoder and head are trainable in both paradigms." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 323, + 545, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 323, + 545, + 361 + ], + "spans": [ + { + "bbox": [ + 304, + 323, + 545, + 361 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 323, + 545, + 361 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 323, + 545, + 361 + ], + "type": "text", + "content": " has different values depending on the approximation methods, we implement low-rank approximation of the adapter matrices by heuristic learning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 370, + 361, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 370, + 361, + 382 + ], + "spans": [ + { + "bbox": [ + 306, + 370, + 361, + 382 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 390, + 545, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 390, + 545, + 427 + ], + "spans": [ + { + "bbox": [ + 304, + 390, + 545, + 427 + ], + "type": "text", + "content": "In this section, we will elaborate on the proposed low-rank adapter (LoRand) in three parts: adapter tuning paradigm, LoRand, and parameter analysis." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 434, + 452, + 447 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 434, + 452, + 447 + ], + "spans": [ + { + "bbox": [ + 306, + 434, + 452, + 447 + ], + "type": "text", + "content": "3.1. Adapter Tuning Paradigm" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 452, + 545, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 452, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 304, + 452, + 545, + 487 + ], + "type": "text", + "content": "For dataset " + }, + { + "bbox": [ + 304, + 452, + 545, + 487 + ], + "type": "inline_equation", + "content": "D = \\{(x_{i},y_{i})\\}_{i = 1}^{N}" + }, + { + "bbox": [ + 304, + 452, + 545, + 487 + ], + "type": "text", + "content": ", fine-tuning calculates the loss between inference results and labels according to the formula:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 357, + 494, + 545, + 528 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 494, + 545, + 528 + ], + "spans": [ + { + "bbox": [ + 357, + 494, + 545, + 528 + ], + "type": "interline_equation", + "content": "L (D, \\theta) = \\sum_ {i = 1} ^ {N} \\operatorname {l o s s} \\left(f _ {\\theta} \\left(x _ {i}\\right), y _ {i}\\right), \\tag {2}", + "image_path": "a908b4f3ec719a4b50f64d8b59efb80ada366e3682dcde3274d518546de1dfec.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 534, + 545, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 570 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 570 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 534, + 545, + 570 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 304, + 534, + 545, + 570 + ], + "type": "text", + "content": " denotes the network forward function and loss represents the loss function. After that, " + }, + { + "bbox": [ + 304, + 534, + 545, + 570 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 534, + 545, + 570 + ], + "type": "text", + "content": " is optimized through" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 378, + 570, + 545, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 570, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 378, + 570, + 545, + 590 + ], + "type": "interline_equation", + "content": "\\theta \\leftarrow \\underset {\\theta} {\\arg \\min } L (D, \\theta). \\tag {3}", + "image_path": "a1ff967715907acfb71bcf55cbbcce7a077b8f69748f094258a9e2dc511235b2.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "content": "In adapter tuning paradigm, parameters consist of two parts, including parameters in adapter " + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\theta_{A}" + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "content": " and parameters in the original architecture " + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "content": " is further divided into frozen part " + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\theta_{F}" + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "content": " and trainable part " + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\theta_{T}" + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "content": ", noted as " + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\theta = \\{\\theta_{F},\\theta_{T}\\}" + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "content": " be all the trainable parameters, then " + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "inline_equation", + "content": "\\Omega = \\{\\theta_{A},\\theta_{T}\\}" + }, + { + "bbox": [ + 304, + 594, + 545, + 676 + ], + "type": "text", + "content": ". The loss function and optimization formula in adapter can be written as:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 341, + 683, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 683, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 341, + 683, + 545, + 715 + ], + "type": "interline_equation", + "content": "L \\left(D, \\theta_ {F}, \\Omega\\right) = \\sum_ {i = 1} ^ {N} \\operatorname {l o s s} \\left(f _ {\\theta_ {F}, \\Omega} \\left(x _ {i}\\right), y _ {i}\\right), \\tag {4}", + "image_path": "9c6b244abc3c706086addabad5bc8b499349dfa0c98bfe209a2f4e5162275b92.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20118" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 72, + 407, + 266 + ], + "blocks": [ + { + "bbox": [ + 53, + 72, + 407, + 266 + ], + "lines": [ + { + "bbox": [ + 53, + 72, + 407, + 266 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 407, + 266 + ], + "type": "image", + "image_path": "1121da732dac8d417df762986ffe346f6c0ca9d44752793fbadaa91c64385b68.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "lines": [ + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "spans": [ + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": "Figure 4. Left: Multi-branch projection in LoRand. The down-projection " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "W^{D}" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " and up-projection " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "W^{U}" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " matrices are the summation of " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " branches " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "W_{1}^{D}(W_{1}^{U})\\ldots W_{\\alpha}^{D}(W_{\\alpha}^{U})" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "K_{i}" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": "-th branch is shared between " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "W_{i}^{D}" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "W_{i}^{U}" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": ". All the " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "P, Q," + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " are trainable, while all the " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " matrices are calculated. Right: Comparisons of the same-sized projection matrices between LoRand and Adapter. " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "(m,n)" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " in the table are typical values in SwinBlocks. LoRand has far fewer parameters than Adapter. With the same projection dimension, LoRand saves over 80% parameters of the Adapter in Swin Transformers. " + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "inline_equation", + "content": "(\\alpha ,\\beta)" + }, + { + "bbox": [ + 46, + 274, + 547, + 330 + ], + "type": "text", + "content": " here are (2,8), the same as the experiments." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 410, + 118, + 539, + 221 + ], + "blocks": [ + { + "bbox": [ + 410, + 118, + 539, + 221 + ], + "lines": [ + { + "bbox": [ + 410, + 118, + 539, + 221 + ], + "spans": [ + { + "bbox": [ + 410, + 118, + 539, + 221 + ], + "type": "table", + "html": "
(m,n)PLoRandPAdapter%
(96,48)4736921651.39%
(192,96)93443686425.35%
(384,192)1856014745612.59%
(768,384)369925898246.27%
……………………
", + "image_path": "5eca91dd2736066fb84fb54d46ca61d14724c9686fdcea2d77de0ad60e1368a3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 350, + 287, + 370 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 350, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 110, + 350, + 287, + 370 + ], + "type": "interline_equation", + "content": "\\Omega \\leftarrow \\underset {\\Omega} {\\arg \\min } L (D, \\theta_ {F}, \\Omega). \\tag {5}", + "image_path": "553ac397e2604b31b59c6e2070db373762cf4748bb3853c933e75ef959dea70d.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 376, + 108, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 376, + 108, + 388 + ], + "spans": [ + { + "bbox": [ + 47, + 376, + 108, + 388 + ], + "type": "text", + "content": "3.2. LoRand" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 394, + 287, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 394, + 287, + 467 + ], + "spans": [ + { + "bbox": [ + 46, + 394, + 287, + 467 + ], + "type": "text", + "content": "Before introducing LoRand, we first review the existing adapter structure. Conventional adapters are bottleneck structures containing a down-projection, an up-projection, and a non-linear activation function. Besides, adapters ensure the robustness of the model by adding residual [20] structures. Adapter layer can be formulated as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 100, + 472, + 287, + 488 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 472, + 287, + 488 + ], + "spans": [ + { + "bbox": [ + 100, + 472, + 287, + 488 + ], + "type": "interline_equation", + "content": "A ^ {l} = U ^ {l} \\left(G e L U (D ^ {l} (x))\\right) + x, \\tag {6}", + "image_path": "a06a8bd824cd42decb7405a70c23a5f7a34f24aececb631e2d42ce83643d6d2a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 494, + 287, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 494, + 287, + 542 + ], + "spans": [ + { + "bbox": [ + 46, + 494, + 287, + 542 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 494, + 287, + 542 + ], + "type": "inline_equation", + "content": "U^l" + }, + { + "bbox": [ + 46, + 494, + 287, + 542 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 494, + 287, + 542 + ], + "type": "inline_equation", + "content": "D^l" + }, + { + "bbox": [ + 46, + 494, + 287, + 542 + ], + "type": "text", + "content": " represent the up and down projections in the " + }, + { + "bbox": [ + 46, + 494, + 287, + 542 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 494, + 287, + 542 + ], + "type": "text", + "content": "-th adapter layer, and GeLU is the activation function. It is clear that the parameters in adapter come from the projections. The projection process can be written as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 137, + 550, + 287, + 562 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 550, + 287, + 562 + ], + "spans": [ + { + "bbox": [ + 137, + 550, + 287, + 562 + ], + "type": "interline_equation", + "content": "y = W x + b, \\tag {7}", + "image_path": "8173d8cedcf17d8f482c166a9043a39e5756c50400b9db206c4d2c147c568aa8.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 570, + 244, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 570, + 244, + 582 + ], + "spans": [ + { + "bbox": [ + 47, + 570, + 244, + 582 + ], + "type": "text", + "content": "which means most adapter parameters are in " + }, + { + "bbox": [ + 47, + 570, + 244, + 582 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 47, + 570, + 244, + 582 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "content": "To reduce the adapter parameters, we propose a low-rank adapter (LoRand) structure to replace the " + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "content": " in the projection structures. Figure 2 shows the simplified structure of LoRand. Here we approximate not a specific matrix " + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "content": " but an ideal matrix " + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "inline_equation", + "content": "W_{best}" + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "content": " that can transform the feature space of the pre-trained model into new tasks by heuristic learning. The approximation matrix " + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\hat{W}" + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "content": " has the same size as " + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "content": ", but the low-rank design makes " + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "inline_equation", + "content": "\\hat{W}" + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "content": " have far fewer free degrees than a common " + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 46, + 582, + 287, + 688 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "Specifically, we synthesize each " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": " by multiplying three low-rank matrices " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "P \\in \\mathbb{R}^{\\beta \\times m}" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "K \\in \\mathbb{R}^{\\beta \\times \\beta}" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "Q \\in \\mathbb{R}^{\\beta \\times n}" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 352, + 335, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 352, + 335, + 361 + ], + "spans": [ + { + "bbox": [ + 306, + 352, + 335, + 361 + ], + "type": "text", + "content": "that is:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 395, + 363, + 545, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 363, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 395, + 363, + 545, + 376 + ], + "type": "interline_equation", + "content": "W = P ^ {T} K Q, \\tag {8}", + "image_path": "d56b0d4f4ab39a406a2bb8c26234965f97dec513baf7d411a2604406acd37e8f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "inline_equation", + "content": "\\beta \\ll \\min(m, n)" + }, + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "text", + "content": " ensuring that " + }, + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "text", + "content": " are low-rank matrices. " + }, + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 305, + 383, + 545, + 418 + ], + "type": "text", + "content": " can be regarded as a kernel matrix that controls the parameter size of LoRand." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "spans": [ + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "type": "text", + "content": "After that, we add multi-branch structures to LoRand to increase the robustness and stability of low-rank matrices, which is inspired by MoE [43] and adaboost [45,52]. Every " + }, + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "type": "text", + "content": " consists of " + }, + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 305, + 419, + 545, + 467 + ], + "type": "text", + "content": " branches, that is:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 363, + 476, + 545, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 476, + 545, + 507 + ], + "spans": [ + { + "bbox": [ + 363, + 476, + 545, + 507 + ], + "type": "interline_equation", + "content": "W = \\sum_ {i = 1} ^ {\\alpha} W _ {i} = \\sum_ {i = 1} ^ {\\alpha} P _ {i} ^ {T} K _ {i} Q _ {i}. \\tag {9}", + "image_path": "0e82a8eeefa1685029e870f344da0c1acf92190039fb3eb4088285a2b167b0d9.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "spans": [ + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "text", + "content": "In addition, we share the kernel matrix " + }, + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "text", + "content": " of the two projection layers within each branch. We hope the sharing mechanism can promote the coherence of two projection layers during training process. Besides, the shared " + }, + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "text", + "content": " also slightly reduces the number of LoRand parameters. Up to now, the " + }, + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "inline_equation", + "content": "W^{U}" + }, + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "inline_equation", + "content": "W^{D}" + }, + { + "bbox": [ + 304, + 516, + 547, + 599 + ], + "type": "text", + "content": " in a complete LoRand structure can be represented as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 348, + 609, + 545, + 641 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 609, + 545, + 641 + ], + "spans": [ + { + "bbox": [ + 348, + 609, + 545, + 641 + ], + "type": "interline_equation", + "content": "W ^ {U} = \\sum_ {i = 1} ^ {\\alpha} W _ {i} ^ {U} = \\sum_ {i = 1} ^ {\\alpha} \\left(P _ {i} ^ {U}\\right) ^ {T} K _ {i} Q _ {i} ^ {U}, \\tag {10}", + "image_path": "a66bf10a555f5edf23da6d1c4e3df32c46a8a27b64482c8cc2a53e9eb55958ad.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 347, + 651, + 545, + 682 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 651, + 545, + 682 + ], + "spans": [ + { + "bbox": [ + 347, + 651, + 545, + 682 + ], + "type": "interline_equation", + "content": "W ^ {D} = \\sum_ {i = 1} ^ {\\alpha} W _ {i} ^ {D} = \\sum_ {i = 1} ^ {\\alpha} \\left(P _ {i} ^ {D}\\right) ^ {T} K _ {i} Q _ {i} ^ {D}, \\tag {11}", + "image_path": "bbc79643e75c760e8ea53d4885c5651d0742bc37860e08b913d0f684a68ceba1.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "K_{i}" + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": " is shared in " + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "W^{U}" + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "inline_equation", + "content": "W^{D}" + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": ". Figure 4 presents the detailed designs of the multi-branch projection." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20119" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 163, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 163, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 163, + 85 + ], + "type": "text", + "content": "3.3. Parameter Analysis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 287, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 287, + 126 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 287, + 126 + ], + "type": "text", + "content": "In this section, we will compare the parameters of Lo-Rand and typical adapter [22] with the same size of projection matrix." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "spans": [ + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "text", + "content": "Adapter Let " + }, + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "text", + "content": " be the input dimension of the adapter and " + }, + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "text", + "content": " be the middle layer dimension after down projection. Then the number of parameters in each adapter is " + }, + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "inline_equation", + "content": "2mn" + }, + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "text", + "content": " (ignoring the few biases). In general, adapter tuning places two adapter modules in each block, so the space complexity of all adapter parameters in " + }, + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 47, + 133, + 287, + 205 + ], + "type": "text", + "content": " blocks can be written as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 143, + 215, + 287, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 215, + 287, + 228 + ], + "spans": [ + { + "bbox": [ + 143, + 215, + 287, + 228 + ], + "type": "interline_equation", + "content": "O (4 \\gamma m n). \\tag {12}", + "image_path": "8e7f87e8818a7ba73d4fb5a147393131144dbdd39e42ffe5d8be7191d7a6f887.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 243, + 287, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 243, + 287, + 269 + ], + "spans": [ + { + "bbox": [ + 47, + 243, + 287, + 269 + ], + "type": "text", + "content": "LoRand According to section 3.2, each " + }, + { + "bbox": [ + 47, + 243, + 287, + 269 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 47, + 243, + 287, + 269 + ], + "type": "text", + "content": " contains " + }, + { + "bbox": [ + 47, + 243, + 287, + 269 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 47, + 243, + 287, + 269 + ], + "type": "text", + "content": " sets of " + }, + { + "bbox": [ + 47, + 243, + 287, + 269 + ], + "type": "inline_equation", + "content": "\\{P,Q,K\\}" + }, + { + "bbox": [ + 47, + 243, + 287, + 269 + ], + "type": "text", + "content": ", that is:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 125, + 277, + 287, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 277, + 287, + 291 + ], + "spans": [ + { + "bbox": [ + 125, + 277, + 287, + 291 + ], + "type": "interline_equation", + "content": "\\alpha \\left(m \\beta + \\beta^ {2} + n \\beta\\right). \\tag {13}", + "image_path": "f04e56846dcfd8ce670b01b515faf66f057ae06400f44b3191f697bbcc27c413.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 300, + 287, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 300, + 287, + 324 + ], + "spans": [ + { + "bbox": [ + 47, + 300, + 287, + 324 + ], + "type": "text", + "content": "Each LoRand consists of two " + }, + { + "bbox": [ + 47, + 300, + 287, + 324 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 47, + 300, + 287, + 324 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 300, + 287, + 324 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 47, + 300, + 287, + 324 + ], + "type": "text", + "content": " shared " + }, + { + "bbox": [ + 47, + 300, + 287, + 324 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 47, + 300, + 287, + 324 + ], + "type": "text", + "content": ", so the parameter quantity of each LoRand is:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 332, + 287, + 347 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 332, + 287, + 347 + ], + "spans": [ + { + "bbox": [ + 52, + 332, + 287, + 347 + ], + "type": "interline_equation", + "content": "2 \\alpha (m \\beta + \\beta^ {2} + n \\beta) - \\alpha \\beta^ {2} = 2 \\alpha \\beta (m + n + \\beta / 2). \\tag {14}", + "image_path": "37515e0adde0faadcb05ba8a03bcd5a7b0eff6b56d39cb5a7df72d1580b1f942.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 356, + 287, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 356, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 47, + 356, + 287, + 380 + ], + "type": "text", + "content": "Each block has two LoRand structures, so the number of parameters in " + }, + { + "bbox": [ + 47, + 356, + 287, + 380 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 47, + 356, + 287, + 380 + ], + "type": "text", + "content": " blocks is:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 113, + 389, + 287, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 389, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 113, + 389, + 287, + 403 + ], + "type": "interline_equation", + "content": "4 \\alpha \\beta \\gamma (m + n) + 2 \\alpha \\beta^ {2} \\gamma . \\tag {15}", + "image_path": "3b9c3aab62dbf74251875ab3115a9e222b9f54b0a4309ab7a2a7502aef8c3e19.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 412, + 287, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 287, + 434 + ], + "type": "text", + "content": "As " + }, + { + "bbox": [ + 47, + 412, + 287, + 434 + ], + "type": "inline_equation", + "content": "\\alpha, \\beta, \\gamma \\ll \\min(m, n)" + }, + { + "bbox": [ + 47, + 412, + 287, + 434 + ], + "type": "text", + "content": ", the space complexity here can be written as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 124, + 436, + 287, + 449 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 436, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 124, + 436, + 287, + 449 + ], + "type": "interline_equation", + "content": "O \\left(4 \\alpha \\beta \\gamma (m + n)\\right). \\tag {16}", + "image_path": "e7c58b0c08b9f024c2e5879147c63db53a9b509136bd630e8d41ccb529cbcb2f.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 454, + 287, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 454, + 287, + 477 + ], + "spans": [ + { + "bbox": [ + 47, + 454, + 287, + 477 + ], + "type": "text", + "content": "Comparison between Formulas 12 and 16 can be simplified as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 149, + 478, + 287, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 478, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 149, + 478, + 287, + 491 + ], + "type": "interline_equation", + "content": "O (m n), \\tag {17}", + "image_path": "179cf88273c708ced07a0b984830650499ffef5d75044a4904d36c4af094d664.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 497, + 65, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 497, + 65, + 506 + ], + "spans": [ + { + "bbox": [ + 47, + 497, + 65, + 506 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 509, + 287, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 509, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 132, + 509, + 287, + 521 + ], + "type": "interline_equation", + "content": "O (\\alpha \\beta (m + n)). \\tag {18}", + "image_path": "2d825a1059227ac752f074b237ae273111b7388f7df48a288f4adb04bb001418.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 527, + 287, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 287, + 575 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 287, + 575 + ], + "type": "text", + "content": "Given that " + }, + { + "bbox": [ + 47, + 527, + 287, + 575 + ], + "type": "inline_equation", + "content": "\\alpha, \\beta \\ll \\min(m, n)" + }, + { + "bbox": [ + 47, + 527, + 287, + 575 + ], + "type": "text", + "content": ", the space complexity of LoRand is far lower than the typical adapter. The table in Figure 4 illustrates that LoRand saves most Adapter parameters with the same projecting dimension." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 586, + 128, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 128, + 600 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 128, + 600 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 605, + 287, + 714 + ], + "type": "text", + "content": "We evaluate LoRand on multiple dense prediction tasks, including object detection, semantic segmentation, and instance segmentation. We also evaluate LoRand under low-resource conditions. We first describe our experimental setup in Section 4.1, including pre-trained backbones, baselines, LoRand settings, and downstream tasks. Then we present the main results of three benchmarks in Section 4.2. We also implement ablation study in Section 4.3 to investigate the impact of structural settings in LoRand." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 72, + 422, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 422, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 422, + 85 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 97, + 545, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 97, + 545, + 146 + ], + "spans": [ + { + "bbox": [ + 305, + 97, + 545, + 146 + ], + "type": "text", + "content": "Pretrained Backbones We conduct experiments on the advanced Swin Transformer [32] architectures. All backbones in this section are pre-trained by ImageNet-22k [9]. Pre-trained models are provided by OpenMMLab [6]." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 171, + 545, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 171, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 305, + 171, + 545, + 196 + ], + "type": "text", + "content": "Baselines We compare LoRand with three other common training methods:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 205, + 545, + 337 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 305, + 205, + 520, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 205, + 520, + 217 + ], + "spans": [ + { + "bbox": [ + 305, + 205, + 520, + 217 + ], + "type": "text", + "content": "(a) FULL: update all parameters in the architecture." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 225, + 545, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 225, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 305, + 225, + 545, + 249 + ], + "type": "text", + "content": "(b) FIXED: fix pre-trained parameters in Swin and train other parts of the architecture (neck, head)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 257, + 545, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 257, + 545, + 305 + ], + "spans": [ + { + "bbox": [ + 305, + 257, + 545, + 305 + ], + "type": "text", + "content": "(c) ADAPTER: add two trainable adapter structures in each SwinBlock following [22], and freeze other parts of the backbone. We evaluate two forms of adapter with different middle layer dimensions " + }, + { + "bbox": [ + 305, + 257, + 545, + 305 + ], + "type": "inline_equation", + "content": "(D_{ML})" + }, + { + "bbox": [ + 305, + 257, + 545, + 305 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 325, + 308, + 528, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 308, + 528, + 321 + ], + "spans": [ + { + "bbox": [ + 325, + 308, + 528, + 321 + ], + "type": "text", + "content": "- ADAPTER-B: " + }, + { + "bbox": [ + 325, + 308, + 528, + 321 + ], + "type": "inline_equation", + "content": "D_{ML}" + }, + { + "bbox": [ + 325, + 308, + 528, + 321 + ], + "type": "text", + "content": " is a half of input dimension." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 325, + 324, + 540, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 324, + 540, + 337 + ], + "spans": [ + { + "bbox": [ + 325, + 324, + 540, + 337 + ], + "type": "text", + "content": "- ADAPTER-T: " + }, + { + "bbox": [ + 325, + 324, + 540, + 337 + ], + "type": "inline_equation", + "content": "D_{ML}" + }, + { + "bbox": [ + 325, + 324, + 540, + 337 + ], + "type": "text", + "content": " is a quarter of input dimension." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 305, + 353, + 545, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 353, + 545, + 388 + ], + "spans": [ + { + "bbox": [ + 305, + 353, + 545, + 388 + ], + "type": "text", + "content": "LoRand Settings We conducted experiments on three Lo-Rand variants, which have different branch numbers " + }, + { + "bbox": [ + 305, + 353, + 545, + 388 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 305, + 353, + 545, + 388 + ], + "type": "text", + "content": " and kernel matrix dimensions " + }, + { + "bbox": [ + 305, + 353, + 545, + 388 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 305, + 353, + 545, + 388 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 396, + 468, + 447 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 317, + 396, + 468, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 396, + 468, + 408 + ], + "spans": [ + { + "bbox": [ + 317, + 396, + 468, + 408 + ], + "type": "text", + "content": "- LoRand: " + }, + { + "bbox": [ + 317, + 396, + 468, + 408 + ], + "type": "inline_equation", + "content": "\\alpha = 2" + }, + { + "bbox": [ + 317, + 396, + 468, + 408 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 317, + 396, + 468, + 408 + ], + "type": "inline_equation", + "content": "\\beta = 8" + }, + { + "bbox": [ + 317, + 396, + 468, + 408 + ], + "type": "text", + "content": " (Standard)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 416, + 429, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 416, + 429, + 428 + ], + "spans": [ + { + "bbox": [ + 317, + 416, + 429, + 428 + ], + "type": "text", + "content": "- LoRand+: " + }, + { + "bbox": [ + 317, + 416, + 429, + 428 + ], + "type": "inline_equation", + "content": "\\alpha = 4, \\beta = 8" + }, + { + "bbox": [ + 317, + 416, + 429, + 428 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 436, + 440, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 436, + 440, + 447 + ], + "spans": [ + { + "bbox": [ + 317, + 436, + 440, + 447 + ], + "type": "text", + "content": "- LoRand++: " + }, + { + "bbox": [ + 317, + 436, + 440, + 447 + ], + "type": "inline_equation", + "content": "\\alpha = 4, \\beta = 16" + }, + { + "bbox": [ + 317, + 436, + 440, + 447 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 305, + 462, + 545, + 510 + ], + "type": "text", + "content": "Downstream Tasks We conducted experiments on COCO [28], ADE20K [62], and PASCAL VOC [14] benchmarks to widely evaluate LoRand's performance on main dense prediction tasks." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 305, + 510, + 545, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 510, + 545, + 582 + ], + "spans": [ + { + "bbox": [ + 305, + 510, + 545, + 582 + ], + "type": "text", + "content": "COCO 2017 [28] is the most commonly used dataset for object detection and instance segmentation, which contains 118K training and 5K validation images. We perform experiments on the validation set. For a fair comparison, all experiments performed on COCO employ Cascade MASK R-CNN [32] as the detector." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 305, + 582, + 545, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 582, + 545, + 630 + ], + "spans": [ + { + "bbox": [ + 305, + 582, + 545, + 630 + ], + "type": "text", + "content": "ADE20K [62] is the most widely used semantic segmentation dataset, which contains 20K training and 2K validation images. We also conduct experiments on the ADE20K validation set and utilise UperNet [57] as the framework." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 305, + 630, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 630, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 305, + 630, + 545, + 689 + ], + "type": "text", + "content": "PASCAL VOC 0712 [14] is also widely used in object detection, which contains about 16K training and 5K validation images. VOC 0712 is much smaller than the latest benchmarks, so we treat it as a low-resource case. We adopt Faster RCNN [42] as the detector for VOC 0712." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "All our experiments are conducted with 8x NVIDIA Tesla V100 GPUs. The experiments on PASCAL VOC and" + } + ] + } + ], + "index": 37 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20120" + } + ] + } + ], + "index": 38 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 70, + 542, + 231 + ], + "blocks": [ + { + "bbox": [ + 53, + 70, + 542, + 231 + ], + "lines": [ + { + "bbox": [ + 53, + 70, + 542, + 231 + ], + "spans": [ + { + "bbox": [ + 53, + 70, + 542, + 231 + ], + "type": "table", + "html": "
Swin-L (198M)Trained* Params%ΔFullExtra StructurePascal VOC (Faster RCNN)ADE20K (UperNet)
APBoxΔLoRandmIoUΔLoRand
Baselines
FULL198.58 M100.00 %-X84.43 %- 2.69 %53.25 %+ 1.34 %
FIXED0.00 M0.00 %- 100.00 %X85.19 %- 1.93 %32.21 %- 19.70 %
ADAPTER-B32.04 M16.13 %- 83.87 %80.93 %- 6.19 %46.23 %- 5.68 %
ADAPTER-T16.04 M8.08 %- 91.92 %78.10 %- 9.02 %43.51 %- 8.40 %
Our Methods
LORAND3.59 M1.84 %- 98.16 %87.12 %-50.67 %-
LORAND+7.19 M3.62 %- 96.38 %87.63 %+ 0.51 %51.13 %+ 0.46 %
LORAND++14.24 M7.17 %- 92.83 %88.11 %+ 0.99 %51.87 %+ 1.20 %
", + "image_path": "78ed1d7df5666048085291a177d60527e41079be4303c0fa2fdf632339889cc3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 53, + 275, + 544, + 434 + ], + "blocks": [ + { + "bbox": [ + 46, + 237, + 546, + 270 + ], + "lines": [ + { + "bbox": [ + 46, + 237, + 546, + 270 + ], + "spans": [ + { + "bbox": [ + 46, + 237, + 546, + 270 + ], + "type": "text", + "content": "Table 1. Results of baselines and our methods on Pascal VOC and ADE20K benchmarks. Swin-L is employed as the pre-trained model here. We present the numbers and percentages of trainable backbone parameters on the left and all the performances on the right. * denotes the trainable parameters in backbones." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 275, + 544, + 434 + ], + "lines": [ + { + "bbox": [ + 53, + 275, + 544, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 275, + 544, + 434 + ], + "type": "table", + "html": "
Swin-B (89M)Trained* Params%ΔFullExtra StructureCOCO (Cascade Mask R-CNN)
APBoxΔLoRandAPMaskΔLoRand
Baselines
FULL89.14 M100.00 %-X51.90 %+0.80 %45.00 %+0.90 %
FIXED0.00 M0.00 %-100.00 %X15.30 %-35.80 %10.80 %-33.8 %
ADAPTER-B14.38 M16.13 %-83.87 %46.50 %-4.60 %40.20 %-3.90 %
ADAPTER-T7.20 M8.08 %-91.92 %43.20 %-7.90 %38.70 %-5.40 %
Our Methods
LORAND2.39 M2.76 %-97.24 %51.10 %-44.10 %-
LORAND+4.73 M5.31 %-94.69 %51.20 %+0.10 %44.30 %+0.20 %
LORAND++9.32 M10.46 %-89.54 %51.50 %+0.40 %44.40 %+0.30 %
", + "image_path": "4c26357a7976afde004ccdb1b5d7da94ceef118863e7dcbd3c2763f83235aa15.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 442, + 546, + 475 + ], + "lines": [ + { + "bbox": [ + 46, + 442, + 546, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 442, + 546, + 475 + ], + "type": "text", + "content": "Table 2. Results of baselines and our methods on COCO benchmarks. Swin-B is employed as the pre-trained model here. We present the numbers and percentages of trainable backbone parameters on the left and all the performances on the right. * denotes the trainable parameters in backbones." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 483, + 287, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 287, + 519 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 287, + 519 + ], + "type": "text", + "content": "ADE20K are based on Swin-S, Swin-B, and Swin-L pretrained models. Limited by GPU memory, the COCO experiments are based on Swin-T, Swin-S, and Swin-B." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 527, + 133, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 133, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 133, + 538 + ], + "type": "text", + "content": "4.2. Main Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 546, + 287, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 546, + 287, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 546, + 287, + 616 + ], + "type": "text", + "content": "We first compare the trainable backbone parameters and performance of these methods on three benchmarks in Tables 1 and 2. Table 1 shows the results of PASCAL VOC and ADE20K datasets based on Swin-L, and Table 2 shows the results of COCO based on Swin-B. From Tables 1 and 2, we can see that:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 713 + ], + "type": "text", + "content": "1) LoRand can effectively address the dilemma of fine-tuning in low-resource situations. Table 1 shows that FIXED outperforms FULL on the PASCAL VOC dataset, which implies that the powerful generalization ability of pre-trained model is severely weakened during fine-tuning. Fine-tuning with low-resource data reduces the feature understanding of pre-trained models, which leads to the poor performance on downstream tasks. LoRand avoids this dis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 483, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "text", + "content": "advantage by fixing the original parameters. More importantly, LoRand can absorb features from the new data by its smaller trainable structures. Table 1 indicates that LoRand outperforms FULL and FIXED by " + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "inline_equation", + "content": "2.69\\%" + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "inline_equation", + "content": "1.93\\%" + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "text", + "content": " on the low-resource dataset with only " + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "inline_equation", + "content": "1.84\\%" + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "text", + "content": " trainable backbone parameters. LoRand+ and LoRand++ also outperform FULL by " + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "inline_equation", + "content": "3.2\\%" + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "inline_equation", + "content": "3.68\\%" + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "inline_equation", + "content": "3.62\\%" + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "inline_equation", + "content": "7.17\\%" + }, + { + "bbox": [ + 304, + 483, + 545, + 686 + ], + "type": "text", + "content": " backbone parameters. In fact, there are many other common computer vision datasets with similar volumes to the PASCAL VOC, including CUB-200-2011 [55], Oxford 102 Flowers [35], Stanford Cars [27], and Caltech-256 [16]. The prevalence of \"Pretrained & Finetuning\" leads us to focus more on giant benchmarks, but Table 1 suggests we need a better training paradigm to cope with many low-resource situations in industrial applications. LoRand-Tuning proves to be a competitive candidate who brings promising performance and parameter-efficient approaches to low-resource cases." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": "2) LoRand effectively balances the number of trainable backbone parameters and downstream task per" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20121" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": "formance. Tables 1 and 2 demonstrate that LoRand (standard) performs very closely to FULL on large benchmarks with only " + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "1.84\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "2.76\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": " trainable parameters. By tuning less than 3.6M backbone parameters, LoRand (standard) achieves " + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "50.67\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": " (mIOU) on ADE20K, and " + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "51.10\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "(\\mathrm{AP}_{\\mathrm{Box}})" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": " / " + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "44.10\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "(\\mathrm{AP}_{\\mathrm{Mask}})" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": " on COCO, which is only about " + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "1.5\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": " off on average compared to FULL. LoRand+ and LoRand++ further reduce the gap between these two paradigms to approximately " + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": " with slight parameter increases. For Swin-L, LoRand saves about 195M parameters per copy compared to FULL. For Swin-B, LoRand saves about " + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "inline_equation", + "content": "86\\mathrm{M}" + }, + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": ". These results are interesting, which means we do not have to spend plenty of hardware resources to store these redundant parameters. Industrial service providers deliver thousands of model training tasks every day. With LoRand-Tuning, millions of gigabytes per year for model storage could be saved." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 277, + 288, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 277, + 288, + 422 + ], + "spans": [ + { + "bbox": [ + 47, + 277, + 288, + 422 + ], + "type": "text", + "content": "3) LoRand effectively broadens the potential of conventional parameter-efficient adapter structures in dense predictions. From the results, we can draw similar conclusions to [24] that the standard adapter [22] performs worse than fine-tuning on dense predictions. Tables 1 and 2 illustrate that the ADAPTER's performance is far from FULL, although it reduces " + }, + { + "bbox": [ + 47, + 277, + 288, + 422 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 47, + 277, + 288, + 422 + ], + "type": "text", + "content": " of trainable backbone parameters. Also adding new structures, LoRand achieves comparable performance to FULL by training fewer parameters than the ADAPTER. Overall, Tables 1 and 2 demonstrate the feasibility of parameter-efficient tuning paradigm in visual dense prediction tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "spans": [ + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "text", + "content": "Comparisons with other fine-tuned backbone. We then show the comparisons of LoRand with some other remarkable fine-tuned backbones in Table 3. Table 3a shows the results based on UperNet and ADE20K, and 3b shows the results based on Cascade MASK R-CNN and COCO. Table 3 shows that LoRand (based on Swin-Transformer) can outperform most existing fine-tuned backbones with less than 2M parameters. Compared to these backbones, LoRand not only presents more robust and superior results but also saves massive hardware resources in this era of parameter explosion. Specifically, LoRand (Swin-T) exceeds COCO by " + }, + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "inline_equation", + "content": "1.9\\%" + }, + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "inline_equation", + "content": "\\mathrm{(AP_{Box})}" + }, + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "inline_equation", + "content": "1.2\\%" + }, + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "inline_equation", + "content": "\\mathrm{(AP_{Mask})}" + }, + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "text", + "content": " with 80.12M fewer new backbone parameters than ResNeXt-101-64. Similarly, LoRand (Swin-L) surpasses " + }, + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "inline_equation", + "content": "5.82\\%" + }, + { + "bbox": [ + 47, + 424, + 289, + 614 + ], + "type": "text", + "content": " (mIoU) on ADE20K with 40.41M fewer trainable backbone parameters than ResNet-101." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 617, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 617, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 617, + 288, + 715 + ], + "type": "text", + "content": "Comparisons on different backbone scales. In addition to Swin-L and Swin-B, we also conduct extensive experiments on Swin-S and Swin-T. We illustrate the performance of baselines and LoRand on multiple backbones. Figure 5 shows the performance of the six methods on different backbone scales, which includes three Swin variants for each benchmark. As FIXED's performance on COCO and ADE20K is too low to display, we only show FIXED's re" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 308, + 82, + 541, + 534 + ], + "blocks": [ + { + "bbox": [ + 315, + 70, + 537, + 80 + ], + "lines": [ + { + "bbox": [ + 315, + 70, + 537, + 80 + ], + "spans": [ + { + "bbox": [ + 315, + 70, + 537, + 80 + ], + "type": "text", + "content": "(a) Comparisons between LoRand-Tuning and Fine-Tuning on COCO." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 82, + 541, + 534 + ], + "lines": [ + { + "bbox": [ + 308, + 82, + 541, + 534 + ], + "spans": [ + { + "bbox": [ + 308, + 82, + 541, + 534 + ], + "type": "table", + "html": "
BackboneTrained \nParams*APBoxAPMask
Fine-Tuning Paradigm
ResNet-10144 M47.9 %41.5 %
ResNeXt-101-3240 M48.1 %41.6 %
ResNeXt-101-6481 M48.3 %41.7 %
DeiT-S22 M48.0 %41.4 %
Swin-T29 M50.5 %43.7 %
Swin-S50 M51.8 %44.7 %
Swin-B88 M51.9 %45.0 %
LoRand-Tuning
LoRand (Swin-T)0.88 M50.2 %42.9 %
LoRand (Swin-S)1.80 M50.7 %43.8 %
LoRand (Swin-B)2.39 M51.1 %44.3 %
(b) Comparisons between LoRand-Tuning and Fine-Tuning on ADE20K.
BackboneTrained Params*APMask
Fine-Tuning
ResNet-1812 M39.97 %
ResNet-5025 M42.78 %
ResNet-10144 M44.85 %
DeiT-S22 M44.01 %
Swin-S50 M49.30 %
Swin-B88 M51.60 %
Swin-L197 M53.25 %
LoRand-Tuning
LoRand (Swin-S)1.80 M47.33 %
LoRand (Swin-B)2.39 M49.62 %
LoRand (Swin-L)3.59 M50.67 %
", + "image_path": "ac81b040aef3ed5169607b06418b5ad765aa8022c1df5b4c5231108821303a27.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 304, + 534, + 545, + 601 + ], + "lines": [ + { + "bbox": [ + 304, + 534, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 545, + 601 + ], + "type": "text", + "content": "Table 3. Comparisons between LoRand-Tuning and Fine-Tuning on ADE20K and COCO. We fine-tune multiple backbones and compare their performances with LoRand series. Architectures in (a) and (b) are Cascade Mask R-CNN and UperNet. Parameters in decoder and head are updated in both paradigms. * denotes the trainable parameters in backbones." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": "sults in the PASCAL VOC. Figure 5 indicates that the performance of most methods improves as the backbone scale gets larger. For the LoRand series, more parameters bring better performance, but it is still challenging to outperform FULL on large datasets. For the ADAPTER, ADAPTER-B performs better than ADAPTER-T, suggesting that adding extra parameters does help improve adapter-tuning performance. Experiments on Swin variants systematically" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20122" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 72, + 522, + 91 + ], + "blocks": [ + { + "bbox": [ + 73, + 72, + 522, + 91 + ], + "lines": [ + { + "bbox": [ + 73, + 72, + 522, + 91 + ], + "spans": [ + { + "bbox": [ + 73, + 72, + 522, + 91 + ], + "type": "image", + "image_path": "4799d8629c462496de8ce6fadebc123984f57f3c6291d7d764ef3994d63168c8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 56, + 92, + 206, + 202 + ], + "blocks": [ + { + "bbox": [ + 56, + 92, + 206, + 202 + ], + "lines": [ + { + "bbox": [ + 56, + 92, + 206, + 202 + ], + "spans": [ + { + "bbox": [ + 56, + 92, + 206, + 202 + ], + "type": "image", + "image_path": "f1de32ed5f910da96337d105f6c2a278695e25f9594dff66acc215e4c8e21230.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 220, + 93, + 372, + 202 + ], + "blocks": [ + { + "bbox": [ + 220, + 93, + 372, + 202 + ], + "lines": [ + { + "bbox": [ + 220, + 93, + 372, + 202 + ], + "spans": [ + { + "bbox": [ + 220, + 93, + 372, + 202 + ], + "type": "image", + "image_path": "cf498dcc0f0551d854e6089165108833da42aef14b8789312e63f10306302f53.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 384, + 93, + 536, + 202 + ], + "blocks": [ + { + "bbox": [ + 384, + 93, + 536, + 202 + ], + "lines": [ + { + "bbox": [ + 384, + 93, + 536, + 202 + ], + "spans": [ + { + "bbox": [ + 384, + 93, + 536, + 202 + ], + "type": "image", + "image_path": "780459435f7dcd851bc2a17b3a882ae4f0dd704c150286faeb4d32782ced5b5b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 342, + 255, + 538, + 270 + ], + "blocks": [ + { + "bbox": [ + 45, + 211, + 545, + 256 + ], + "lines": [ + { + "bbox": [ + 45, + 211, + 545, + 256 + ], + "spans": [ + { + "bbox": [ + 45, + 211, + 545, + 256 + ], + "type": "text", + "content": "Figure 5. Seven methods on different backbone scales. Figures show results on PASCAL VOC, COCO, and ADE20K from left to right. Swin-S, Swin-B, and Swin-L are employed as the pre-trained models for PASCAL VOC and ADE20K. Swin-T, Swin-S, and Swin-B are employed for COCO. FIXED's performances are so low on COCO and ADE20K that they reduce the intuitiveness of the other six methods, so FIXED is only presented in PASCAL VOC comparisons." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 342, + 255, + 538, + 270 + ], + "lines": [ + { + "bbox": [ + 342, + 255, + 538, + 270 + ], + "spans": [ + { + "bbox": [ + 342, + 255, + 538, + 270 + ], + "type": "image", + "image_path": "c8f8fb199094b4d44297a887101b9142ca0aa4a8533d9a78d2804c16a7a98a80.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 56, + 274, + 208, + 386 + ], + "blocks": [ + { + "bbox": [ + 56, + 274, + 208, + 386 + ], + "lines": [ + { + "bbox": [ + 56, + 274, + 208, + 386 + ], + "spans": [ + { + "bbox": [ + 56, + 274, + 208, + 386 + ], + "type": "image", + "image_path": "8beae4f9a09d00f90cecc036a470adcd74773704abd7430320102b6b1e266291.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 216, + 274, + 372, + 386 + ], + "blocks": [ + { + "bbox": [ + 216, + 274, + 372, + 386 + ], + "lines": [ + { + "bbox": [ + 216, + 274, + 372, + 386 + ], + "spans": [ + { + "bbox": [ + 216, + 274, + 372, + 386 + ], + "type": "image", + "image_path": "128d7434bbe4dec4e4d11a3c91b0ddb00bf1ce980db749fe4ff84624fbeb0a25.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 381, + 274, + 537, + 387 + ], + "blocks": [ + { + "bbox": [ + 381, + 274, + 537, + 387 + ], + "lines": [ + { + "bbox": [ + 381, + 274, + 537, + 387 + ], + "spans": [ + { + "bbox": [ + 381, + 274, + 537, + 387 + ], + "type": "image", + "image_path": "80ef1c9114fff622842c4aebee06a2c60b1be5b51ab9208d703be709a5193c33.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "lines": [ + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "spans": [ + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "text", + "content": "Figure 6. Ablation Study for " + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "text", + "content": " ranges from 2, 4, 6, and " + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "text", + "content": " ranges from 4, 8, 16. Figures from left to right present experiments on three benchmarks respectively. We only present " + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "inline_equation", + "content": "\\mathrm{AP_{Box}}" + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "text", + "content": " changes for COCO benchmark considering the strong correlation between the values of " + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "inline_equation", + "content": "\\mathrm{AP_{Box}}" + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "inline_equation", + "content": "\\mathrm{AP_{Mask}}" + }, + { + "bbox": [ + 45, + 395, + 545, + 429 + ], + "type": "text", + "content": " in COCO." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 449, + 287, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 287, + 485 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 287, + 485 + ], + "type": "text", + "content": "demonstrate that LoRand can outperform both FULL and traditional adapter structures in low-resource cases and perform very closely to FULL in large benchmarks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 491, + 141, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 491, + 141, + 504 + ], + "spans": [ + { + "bbox": [ + 46, + 491, + 141, + 504 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "text", + "content": "In this section, we ablate two key hyperparameters in LoRand: the LoRand branch number " + }, + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "text", + "content": " and the kernel matrix dimension " + }, + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "text", + "content": " affects the distributed decision-making of LoRand, while " + }, + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 46, + 510, + 287, + 569 + ], + "type": "text", + "content": " focuses on a single branch's learning capability and consistency." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "content": "Several sets of ablation experiments are designed and implemented to investigate the effect of " + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "content": " on the performance of LoRand. The ablation experiments were conducted on the same three benchmarks. In order to improve the upper limit of LoRand, our experiments are conducted on the largest backbone of each dataset (ADE20K/PASCAL VOC: Swin-L, COCO: Swin-B). The value sets of " + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\{2,4,6\\}" + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\{4,8,16\\}" + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "content": ". Figure 6 shows the results of ablation studies on three datasets. In most cases, LoRand's performance increases slightly as " + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 45, + 570, + 288, + 714 + ], + "type": "text", + "content": " become larger but hardly outperforms fine-tuning on large benchmarks. Besides, exponentially increasing the size of the LoRand does" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 449, + 545, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 521 + ], + "type": "text", + "content": "not result in an equivalent performance improvement and even leads to a reduction (" + }, + { + "bbox": [ + 304, + 449, + 545, + 521 + ], + "type": "inline_equation", + "content": "\\alpha = 6" + }, + { + "bbox": [ + 304, + 449, + 545, + 521 + ], + "type": "text", + "content": " in VOC and COCO). Ablation studies demonstrate that larger LoRands have fewer gains both in parameter efficiency and performance. We have considered this trade-off when designing the LoRand standard, LoRand+, and LoRand++." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 537, + 378, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 537, + 378, + 550 + ], + "spans": [ + { + "bbox": [ + 306, + 537, + 378, + 550 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 558, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 545, + 715 + ], + "type": "text", + "content": "This paper presents LoRand, a parameter-efficient low-rank adapter for dense predictions, which completely shares the feature understanding of advanced pre-trained models and effectively transfers it to downstream tasks. LoRand performs on par with fine-tuning in COCO instance segmentation, ADE20K semantic segmentation, and PASCAL VOC object detection with only " + }, + { + "bbox": [ + 304, + 558, + 545, + 715 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 304, + 558, + 545, + 715 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 558, + 545, + 715 + ], + "type": "inline_equation", + "content": "3\\%" + }, + { + "bbox": [ + 304, + 558, + 545, + 715 + ], + "type": "text", + "content": " trainable backbone parameters. Moreover, LoRand effectively avoids the disadvantages of the fine-tuning paradigm and delivers better performance in low-resource situations. We hope that parameter-efficient LoRand can save massive redundant storage resources and facilitate a unified training paradigm for vision and language." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20123" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "text", + "content": "[1] Caisse Amisse, Mario Ernesto Jijón-Palma, and Jorge Antonio Silva Centeno. Fine-tuning deep learning models for pedestrian detection. *Boletim de Ciências Geólicas*, 27, 2021. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 287, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 287, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 287, + 178 + ], + "type": "text", + "content": "[2] Alexei Baevski, Sergey Edunov, Yinhan Liu, Luke Zettle-moyer, and Michael Auli. Cloze-driven pretraining of self-attention networks. arXiv preprint arXiv:1903.07785, 2019. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 180, + 287, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 180, + 287, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 180, + 287, + 235 + ], + "type": "text", + "content": "[3] Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 287, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 287, + 290 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 287, + 290 + ], + "type": "text", + "content": "[4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 291, + 287, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 287, + 335 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 287, + 335 + ], + "type": "text", + "content": "[5] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 336, + 287, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 336, + 287, + 388 + ], + "spans": [ + { + "bbox": [ + 53, + 336, + 287, + 388 + ], + "type": "text", + "content": "[6] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "text", + "content": "[7] Shoufa Chen, Chongjian Ge, Zhan Tong, Jiangliu Wang, Yibing Song, Jue Wang, and Ping Luo. Adaptformer: Adapting vision transformers for scalable visual recognition. arXiv preprint arXiv:2205.13535, 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 436, + 287, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 436, + 287, + 478 + ], + "spans": [ + { + "bbox": [ + 53, + 436, + 287, + 478 + ], + "type": "text", + "content": "[8] Zhe Chen, Yuchen Duan, Wenhai Wang, Junjun He, Tong Lu, Jifeng Dai, and Yu Qiao. Vision transformer adapter for dense predictions. arXiv preprint arXiv:2205.08534, 2022. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 480, + 287, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 480, + 287, + 524 + ], + "spans": [ + { + "bbox": [ + 53, + 480, + 287, + 524 + ], + "type": "text", + "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 1, 2, 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 525, + 287, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 525, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 525, + 287, + 557 + ], + "type": "text", + "content": "[10] Zhengming Ding and Yun Fu. Deep transfer low-rank coding for cross-domain learning. IEEE transactions on neural networks and learning systems, 30(6):1768-1779, 2018. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 558, + 287, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 591 + ], + "type": "text", + "content": "[11] Zhengming Ding, Ming Shao, and Yun Fu. Deep low-rank coding for transfer learning. In Twenty-Fourth International Joint Conference on Artificial Intelligence, 2015. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 592, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 592, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 592, + 287, + 645 + ], + "type": "text", + "content": "[12] Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah Smith. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. arXiv preprint arXiv:2002.06305, 2020. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 713 + ], + "type": "text", + "content": "[13] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020. 2" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "text", + "content": "[14] Mark Everingham, SM Eslami, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes challenge: A retrospective. International journal of computer vision, 111(1):98-136, 2015. 2, 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 129, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 162 + ], + "type": "text", + "content": "[15] Christoph Feichtenhofer, Haoqi Fan, Yanghao Li, and Kaiming He. Masked autoencoders as spatiotemporal learners. arXiv preprint arXiv:2205.09113, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 163, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 545, + 185 + ], + "type": "text", + "content": "[16] Gregory Griffin, Alex Holub, and Pietro Perona. Caltech-256 object category dataset. 2007. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 186, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 186, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 308, + 186, + 545, + 228 + ], + "type": "text", + "content": "[17] Xu Han, Zhengyan Zhang, Ning Ding, Yuxian Gu, Xiao Liu, Yuqi Huo, Jiezhong Qiu, Yuan Yao, Ao Zhang, Liang Zhang, et al. Pre-trained models: Past, present and future. AI Open, 2:225-250, 2021. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 232, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 232, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 308, + 232, + 545, + 274 + ], + "type": "text", + "content": "[18] Junxian He, Chunting Zhou, Xuezhe Ma, Taylor Berg-Kirkpatrick, and Graham Neubig. Towards a unified view of parameter-efficient transfer learning. In International Conference on Learning Representations, 2021. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 277, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 277, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 308, + 277, + 545, + 330 + ], + "type": "text", + "content": "[19] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16000-16009, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 332, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 332, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 545, + 376 + ], + "type": "text", + "content": "[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 3, 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 377, + 545, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 377, + 545, + 420 + ], + "spans": [ + { + "bbox": [ + 308, + 377, + 545, + 420 + ], + "type": "text", + "content": "[21] Xuehai He, Chunyuan Li, Pengchuan Zhang, Jianwei Yang, and Xin Eric Wang. Parameter-efficient fine-tuning for vision transformers. arXiv preprint arXiv:2203.16329, 2022. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 422, + 545, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 422, + 545, + 477 + ], + "spans": [ + { + "bbox": [ + 308, + 422, + 545, + 477 + ], + "type": "text", + "content": "[22] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019. 2, 3, 5, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 479, + 545, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 479, + 545, + 543 + ], + "spans": [ + { + "bbox": [ + 308, + 479, + 545, + 543 + ], + "type": "text", + "content": "[23] Fatsuma Jauro, Haruna Chiroma, Abdulsalam Y Gital, Mubarak Almutairi, M Abdulhamid Shafi'i, and Jemal H Abawajy. Deep learning architectures in emerging cloud computing architectures: Recent development, challenges and next research trend. Applied Soft Computing, 96:106582, 2020. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 545, + 545, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 545, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 308, + 545, + 545, + 588 + ], + "type": "text", + "content": "[24] Menglin Jia, Luming Tang, Bor-Chun Chen, Claire Cardie, Serge Belongie, Bharath Hariharan, and Ser-Nam Lim. Visual prompt tuning. arXiv preprint arXiv:2203.12119, 2022. 2, 3, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 590, + 545, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 590, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 308, + 590, + 545, + 622 + ], + "type": "text", + "content": "[25] Shibo Jie and Zhi-Hong Deng. Convolutional bypasses are better vision transformer adapters. arXiv preprint arXiv:2207.07039, 2022. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 624, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 545, + 668 + ], + "type": "text", + "content": "[26] Christoph Käding, Erik Rodner, Alexander Freytag, and Joachim Denzler. Fine-tuning deep neural networks in continuous learning scenarios. In *Asian Conference on Computer Vision*, pages 588–605. Springer, 2016. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "text", + "content": "[27] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In Proceedings of the IEEE international conference on computer vision workshops, pages 554–561, 2013. 6" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20124" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 1, 2, 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "type": "text", + "content": "[29] Fanfan Liu, Haoran Wei, Wenzhe Zhao, Guozhen Li, Jingquan Peng, and Zihao Li. Wb-detr: Transformer-based detector without backbone. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2979-2987, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 186, + 288, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 288, + 239 + ], + "type": "text", + "content": "[30] Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. Pre-train, prompt, and predict: A systematic survey of prompting methods in natural language processing. arXiv preprint arXiv:2107.13586, 2021. 1, 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 241, + 288, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 288, + 285 + ], + "type": "text", + "content": "[31] Yen-Cheng Liu, Chih-Yao Ma, Junjiao Tian, Zijian He, and Zsolt Kira. Polyhistor: Parameter-efficient multi-task adaptation for dense vision tasks. arXiv preprint arXiv:2210.03265, 2022. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 286, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 287, + 342 + ], + "type": "text", + "content": "[32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 1, 2, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 343, + 288, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 288, + 387 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 288, + 387 + ], + "type": "text", + "content": "[33] Cheng Long Li, Andong Lu, Ai Hua Zheng, Zhengzheng Tu, and Jin Tang. Multi-adapter rgbt tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, pages 0-0, 2019. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 388, + 287, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 454 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 454 + ], + "type": "text", + "content": "[34] Yuning Mao, Lambert Mathias, Rui Hou, Amjad Alma-hairi, Hao Ma, Jiawei Han, Scott Yih, and Madian Khabsa. Unipelt: A unified framework for parameter-efficient language model tuning. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6253-6264, 2022. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 456, + 287, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 287, + 499 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 287, + 499 + ], + "type": "text", + "content": "[35] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, pages 722-729. IEEE, 2008. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 501, + 287, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 501, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 48, + 501, + 287, + 533 + ], + "type": "text", + "content": "[36] Matthew E Peters, Sebastian Ruder, and Noah A Smith. To tune or not to tune? adapting pretrained representations to diverse tasks. arXiv preprint arXiv:1903.05987, 2019. 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 535, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 535, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 535, + 287, + 601 + ], + "type": "text", + "content": "[37] Jonas Pfeiffer, Aishwarya Kamath, Andreas Rückle, Kyunghyun Cho, and Iryna Gurevych. Adapterfusion: Nondestructive task composition for transfer learning. In 16th Conference of the European Chapter of the Association for Computational Linguistics, EACL 2021, pages 487-503. Association for Computational Linguistics (ACL), 2021. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 602, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 287, + 668 + ], + "type": "text", + "content": "[38] Jonas Pfeiffer, Andreas Rücklé, Clifton Poth, Aishwarya Kamath, Ivan Vulić, Sebastian Ruder, Kyunghyun Cho, and Iryna Gurevych. Adapterhub: A framework for adapting transformers. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 46-54, 2020. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[39] Jonathan Pilault, Christopher Pal, et al. Conditionally adaptive multi-task learning: Improving transfer learning in nlp using fewer parameters & less data. In International Conference on Learning Representations, 2020. 3" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 127 + ], + "type": "text", + "content": "[40] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J Liu, et al. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140):1-67, 2020. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 129, + 545, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 171 + ], + "type": "text", + "content": "[41] Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. Advances in neural information processing systems, 30, 2017. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 173, + 545, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 173, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 307, + 173, + 545, + 216 + ], + "type": "text", + "content": "[42] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 217, + 545, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 217, + 545, + 271 + ], + "spans": [ + { + "bbox": [ + 307, + 217, + 545, + 271 + ], + "type": "text", + "content": "[43] Carlos Riquelme, Joan Puigcerver, Basil Mustafa, Maxim Neumann, Rodolphe Jenatton, André Susano Pinto, Daniel Keysers, and Neil Houlsby. Scaling vision with sparse mixture of experts. Advances in Neural Information Processing Systems, 34:8583-8595, 2021. 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 272, + 545, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 272, + 545, + 336 + ], + "spans": [ + { + "bbox": [ + 308, + 272, + 545, + 336 + ], + "type": "text", + "content": "[44] Andreas Rücklé, Gregor Geigle, Max Glockner, Tilman Beck, Jonas Pfeiffer, Nils Reimers, and Iryna Gurevych. Adapterdrop: On the efficiency of adapters in transformers. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7930-7946, 2021. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 338, + 545, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 338, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 545, + 371 + ], + "type": "text", + "content": "[45] Omer Sagi and Lior Rokach. Ensemble learning: A survey. Wiley Interdisciplinary Reviews: Data Mining and Knowledge Discovery, 8(4):e1249, 2018. 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 372, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 372, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 308, + 372, + 545, + 426 + ], + "type": "text", + "content": "[46] Chompunuch Sarasaen, Soumick Chatterjee, Mario Breitkopf, Georg Rose, Andreas Nurnberger, and Oliver Speck. Fine-tuning deep learning model parameters for improved super-resolution of dynamic mri with prior-knowledge. Artificial Intelligence in Medicine, 121:102196, 2021. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 427, + 545, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 427, + 545, + 470 + ], + "spans": [ + { + "bbox": [ + 308, + 427, + 545, + 470 + ], + "type": "text", + "content": "[47] Jaime Sevilla, Lennart Heim, Anson Ho, Tamay Besiroglu, Marius Hobbahn, and Pablo Villalobos. Compute trends across three eras of machine learning. arXiv preprint arXiv:2202.05924, 2022.1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 472, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 472, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 472, + 545, + 536 + ], + "type": "text", + "content": "[48] Shaden Smith, Mostofa Patwary, Brandon Norick, Patrick LeGresley, Samyam Rajbhandari, Jared Casper, Zhun Liu, Shrimai Prabhumoye, George Zerveas, Vijay Korthikanti, et al. Using deepspeed and megatron to train megatron-turing nlg 530b, a large-scale generative language model. arXiv preprint arXiv:2201.11990, 2022. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 537, + 545, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 591 + ], + "type": "text", + "content": "[49] Emma Strubell, Ananya Ganesh, and Andrew McCallum. Energy and policy considerations for deep learning in nlp. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3645–3650, 2019. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 592, + 545, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 592, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 308, + 592, + 545, + 658 + ], + "type": "text", + "content": "[50] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 712 + ], + "type": "text", + "content": "[51] Yi-Lin Sung, Jaemin Cho, and Mohit Bansal. Vl-adapter: Parameter-efficient transfer learning for vision-and-language tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5227-5237, 2022. 3" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "20125" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 586 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[52] B Thilagavathi, K Suthendran, and K Srujanraju. Evaluating the adaboost algorithm for biometric-based face recognition. In Data Engineering and Communication Technology, pages 669-678. Springer, 2021. 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 287, + 162 + ], + "type": "text", + "content": "[53] Edna Chebet Too, Li Yujiang, Sam Njuki, and Liu Yingchun. A comparative study of fine-tuning deep learning models for plant disease identification. Computers and Electronics in Agriculture, 161:272-279, 2019. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 163, + 287, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 163, + 287, + 206 + ], + "spans": [ + { + "bbox": [ + 49, + 163, + 287, + 206 + ], + "type": "text", + "content": "[54] Thijs Vogels, Sai Praneeth Karimireddy, and Martin Jaggi. Practical low-rank communication compression in decentralized deep learning. Advances in Neural Information Processing Systems, 33:14171-14181, 2020. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 208, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 208, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 49, + 208, + 287, + 239 + ], + "type": "text", + "content": "[55] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The caltech-ucsd birds-200-2011 dataset. 2011. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 241, + 287, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 241, + 287, + 295 + ], + "spans": [ + { + "bbox": [ + 49, + 241, + 287, + 295 + ], + "type": "text", + "content": "[56] Xudong Wang, Zhaowei Cai, Dashan Gao, and Nuno Vasconcelos. Towards universal object detection by domain attention. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7289-7298, 2019. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 297, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 297, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 49, + 297, + 287, + 342 + ], + "type": "text", + "content": "[57] Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, and Jian Sun. Unified perceptual parsing for scene understanding. In Proceedings of the European conference on computer vision (ECCV), pages 418-434, 2018. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 343, + 287, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 343, + 287, + 385 + ], + "spans": [ + { + "bbox": [ + 49, + 343, + 287, + 385 + ], + "type": "text", + "content": "[58] Sha Yuan, Hanyu Zhao, Shuai Zhao, Jiahong Leng, Yangxiao Liang, Xiaozhi Wang, Jifan Yu, Xin Lv, Zhou Shao, Jiaao He, et al. A roadmap for big model. arXiv preprint arXiv:2203.14101, 2022. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 387, + 287, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 387, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 49, + 387, + 287, + 441 + ], + "type": "text", + "content": "[59] Aston Zhang, Yi Tay, SHUAI Zhang, Alvin Chan, Anh Tuan Luu, Siu Hui, and Jie Fu. Beyond fully-connected layers with quaternions: Parameterization of hypercomplex multiplications with " + }, + { + "bbox": [ + 49, + 387, + 287, + 441 + ], + "type": "inline_equation", + "content": "1/n" + }, + { + "bbox": [ + 49, + 387, + 287, + 441 + ], + "type": "text", + "content": " parameters. In International Conference on Learning Representations, 2020. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 443, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 443, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 49, + 443, + 287, + 487 + ], + "type": "text", + "content": "[60] Chaoning Zhang, Chenshuang Zhang, Junha Song, John Seon Keun Yi, Kang Zhang, and In So Kweon. A survey on masked autoencoder for self-supervised learning in vision and beyond. arXiv preprint arXiv:2208.00173, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 488, + 287, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 488, + 287, + 531 + ], + "spans": [ + { + "bbox": [ + 49, + 488, + 287, + 531 + ], + "type": "text", + "content": "[61] Jianwei Zhao, Yongbiao Lv, Zhenghua Zhou, and Feilong Cao. A novel deep learning algorithm for incomplete face recognition: Low-rank-recovery network. Neural Networks, 94:115-124, 2017. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 533, + 287, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 533, + 287, + 586 + ], + "spans": [ + { + "bbox": [ + 49, + 533, + 287, + 586 + ], + "type": "text", + "content": "[62] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017. 2, 5" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "20126" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_content_list.json b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..70d13111d26e6cee4355472a500e11ad9efb4507 --- /dev/null +++ b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_content_list.json @@ -0,0 +1,1570 @@ +[ + { + "type": "text", + "text": "1000 FPS HDR Video with a Spike-RGB Hybrid Camera", + "text_level": 1, + "bbox": [ + 197, + 130, + 772, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yakun Chang $^{1,2}$ Chu Zhou $^{3}$ Yuchen Hong $^{1,2}$ Liwen Hu $^{2}$ Chao Xu $^{3}$ Tiejun Huang $^{1,2}$ Boxin Shi $^{1,2*}$", + "bbox": [ + 86, + 178, + 893, + 198 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ National Key Laboratory for Multimedia Information Processing, School of Computer Science, Peking University", + "$^{2}$ National Engineering Research Center of Visual Technology, School of Computer Science, Peking University", + "$^{3}$ National Key Laboratory of General AI, School of Intelligence Science and Technology, Peking University {yakunchang, zhou_chu, huliwen, tjhuang, shiboxin}@pku.edu.cn yuchenhong.cn@gmail.com, xuchao@cis.pku.edu" + ], + "bbox": [ + 107, + 199, + 866, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 321, + 313, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Capturing high frame rate and high dynamic range (HFR&HDR) color videos in high-speed scenes with conventional frame-based cameras is very challenging. The increasing frame rate is usually guaranteed by using shorter exposure time so that the captured video is severely interfered by noise. Alternating exposures can alleviate the noise issue but sacrifice frame rate due to involving long-exposure frames. The neuromorphic spiking camera records high-speed scenes of high dynamic range without colors using a completely different sensing mechanism and visual representation. We introduce a hybrid camera system composed of a spiking and an alternating-exposure RGB camera to capture HFR&HDR scenes with high fidelity. Our insight is to bring each camera's superiority into full play. The spike frames, with accurate fast motion information encoded, are firstly reconstructed for motion representation, from which the spike-based optical flows guide the recovery of missing temporal information for long-exposure RGB images while retaining their reliable color appearances. With the strong temporal constraint estimated from spike trains, both missing and distorted colors cross RGB frames are recovered to generate time-consistent and HFR color frames. We collect a new Spike-RGB dataset that contains 300 sequences of synthetic data and 20 groups of real-world data to demonstrate 1000 FPS HDR videos outperforming HDR video reconstruction methods and commercial high-speed cameras.", + "bbox": [ + 75, + 354, + 473, + 748 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 763, + 209, + 780 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The spiking camera [17] and event camera [10] are neuromorphic sensors working differently from conventional frame-based digital cameras, which have many attractive characteristics, e.g., high-speed (perceiving scene", + "bbox": [ + 75, + 790, + 468, + 852 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/60240d2e429f7bbaa254aba2a45842b1ba35e1ca5ec1952b36f6954e438ed27c.jpg", + "image_caption": [ + "Figure 1. (a) We build a spike-RGB hybrid camera system to achieve 1000 FPS HDR video reconstruction1. (b) The RGB camera uses alternating-exposure mode with a frame rate of 60 FPS, where $t_s$ , $4t_s$ , and $12t_s$ are the short, middle, and long exposure in our setup, respectively. The sampling frequency of the spiking camera is $20000\\mathrm{Hz}$ ." + ], + "image_footnote": [], + "bbox": [ + 503, + 320, + 890, + 494 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "radiance changes at the microsecond level), high dynamic range (HDR, $\\geq 100$ dB). However, since they only record neuromorphic signals, i.e., spike trains [64] and event streams [25], which are less friendly to the human visual system and cannot be directly processed by CNN-based models for video frames [40, 41], preprocessing modules that convert neuromorphic signals into compatible formats are usually required when applying them to frame-based vision algorithms [61, 65]. In comparison with event streams, spike trains contain concrete textured information of scene radiances, which are more suitable for reconstructing high frame rate (HFR) videos [61-64]. However, since the spiking camera only encodes the absolute intensities of environments, colors are absent in the reconstructed video frames.", + "bbox": [ + 496, + 597, + 892, + 809 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "When capturing with a frame-based RGB camera, quality of recorded colors for each frame is determined by trading off the exposure time, ambient light, and target objects' moving speed [57]. For high-speed dynamic scenes, it often", + "bbox": [ + 496, + 810, + 893, + 872 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 94, + 862, + 222, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Project page: https://changyakun.github.io/1000FPS-HDR", + "bbox": [ + 78, + 876, + 467, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "The video result is available on our project page.", + "bbox": [ + 517, + 886, + 782, + 901 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "22180", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "requires to set shorter exposure time to guarantee a higher frame rate and avoid motion blur. In such a situation, since the exposure time is extremely short, the quality of video frames would be severely degenerated due to noise. Merging a burst of short-exposure images is a simple yet effective approach to reduce the noise level [8, 11], however, the color shift caused by noise is difficult to be corrected. Fusing alternating-exposure (using short, middle, and long exposures) RGB frames is commonly used for synthesizing well-exposed images [3, 19, 21]. However, they are not suitable for high-speed scenes. As illustrated in Fig. 1(b), given a sequence of alternating-exposure RGB images, the total time from the starting of the current exposure to the starting of the next frame, denoted by $T$ , is consistent for all frames, and it is composed of the exposure time $T_{\\mathrm{exp}}$ and interval time $T_{\\mathrm{itv}}$ (containing the readout and waiting time). It can be seen that the information during interval time is lost, and the frame rate they could achieve is thus limited to dozens of FPS. Another possible solution is to build a hybrid camera system to capture low frame rate (LFR) color sequence and high-speed neuromorphic signals simultaneously, then use the neuromorphic signals to interpolate [51, 52] and deblur [14, 18, 59] the RGB frames. However, the saturated regions are usually ignored, leaving the colors of the interpolated frames still unsatisfactory. HDR intensity map (does not contain any chromatic information) built from the neuromorphic signals can also be used to compensate the missing textures in the saturated regions [15]. But such an approach is not robust for scenes with large areas of saturated regions, due to the heavy reliance on the chrominance compensation network to hallucinate the color.", + "bbox": [ + 76, + 90, + 472, + 559 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose an all-in-one framework to reconstruct HRF (Fig. 1(a), at the level of 1000 FPS) color videos with high fidelity from the spike trains and a series of alternating-exposure frames captured by a Spike-RGB hybrid camera system simultaneously (Fig. 1(b)). To make full use of the color information in RGB images, we propose a three-stage strategy to deal with different situations using specific modules: (i) For the blurry middle- and long-exposure images, we design a spike guided deblurring module to recover the corresponding sharp images with faithful colors; (ii) for missing colors during the interval time, we design a spike guided interpolation module that exploits the abundant motion information (SC-Flow [16]) obtained from spike trains; (iii) for suppressing noise in short-exposure images and maintaining temporal consistency, we design a merging module, which exploits the variant of recurrent U-Net [42] as its backbone, to complete the HFR&HDR color video reconstruction process. To summarize, this paper makes contributions by proposing:", + "bbox": [ + 76, + 560, + 472, + 847 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- an all-in-one framework to reconstruct high-speed HDR color video by jointly fusing spike trains and a sequence of alternating-exposure frames;", + "bbox": [ + 93, + 854, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- a three-stage strategy fusing alternating exposures of RGB frames for the generation of well-exposure colors, via a recurrent convolution neural network for continuous frames interpolation guided by spike trains;", + "- a Spike-RGB hybrid camera system to demonstrate the applicability of the proposed method for capturing high-speed and high dynamic range scenes." + ], + "bbox": [ + 514, + 90, + 893, + 200 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Experimental results show that the proposed method outperforms the state-of-the-art HDR video reconstruction method [3] and commercial cameras with the slow-motion photography capability in reconstructing 1000 FPS HDR color videos on synthetic data and real-world data.", + "bbox": [ + 496, + 205, + 893, + 282 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 294, + 640, + 310 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "HDR image and video reconstruction. The most common way to reconstruct HDR images is to fuse a set of LDR images with bracketed exposures [7, 34]. Since the results for dynamic scenes often contain ghosting artifacts, image alignment [28, 45] and deep learning [20, 55] are employed to reconstruct sharp HDR images. To better reduce ghosting artifacts, Lee et al. [24] and Shaw et al. [46] apply the estimated motion information from a high frame rate sequence to facilitate the HDR image synthesis. Messikommer et al. [35] also achieve HDR reconstruction by combining bracketed-exposure RGB images and events. There are methods being designed for HDR reconstruction from a single image. These methods cannot recover the missing textures in clipped regions [9, 44]. Abhiram and Chan [1] reconstruct HDR images with a quanta image sensor (QIS). Han et al. [15] find that the reconstructed intensity maps from event streams and spike trains contain abundant textures saturated in LDR images. Therefore, they exploit intensity maps to guide HDR image restoration. For the capturing of HDR videos, many existing methods use specialized hardware, such as scanline exposure [13], per-pixel exposure [37], or multiple sensors [33, 50]. Due to the particularity of hardware, these methods are limited to narrow applications. Merging alternating-exposure image sequences is the most common yet effective way to reconstruct HDR videos [12, 19, 21, 22, 30, 31]. Recently, Chen et al. [3] propose a coarse-to-fine network that performs alignment and fusion sequentially both in the image and feature space. However, these methods can only deal with LFR videos with about 20-60 FPS.", + "bbox": [ + 496, + 319, + 893, + 770 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "HFR video reconstruction. There is plenty of data redundancy in capturing HFR videos directly by commercial high-speed cameras, e.g., the Phatom camera². Building a hybrid system with a high-resolution LFR camera and a low-resolution HFR camera, and utilizing HFR signals to reconstruct a sequence of sharp images from blurred images [2, 49] is a more data-efficient way for HFR video", + "bbox": [ + 496, + 772, + 893, + 878 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "2https://www.phantomhighspeed.com/", + "bbox": [ + 514, + 886, + 785, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "22181", + "bbox": [ + 478, + 944, + 517, + 957 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/98c720d4cece126ecdfa4451934d7d939ae0de158ecc49548ba1c53ceaaf3e0d.jpg", + "image_caption": [ + "Figure 2. (a) The pipeline of the proposed solution. It contains three steps: Step $①$ spike preprocessing (Sec. 3.2), Step $②$ RGB frame processing (Sec. 3.3), and Step $③$ merging into HFR video (Sec. 3.4). Given the spike trains, we firstly estimate the optical flow from them as well as reconstruct spike frames. Secondly, we rectify the uneven brightness with a linear mapping function and use spike-guided deblurring (SG-deblur) to reconstruct sharp color frames. Finally, we use spike-guided frame interpolation (SG-interpolation) to recover the missing colors during $T_{\\mathrm{itv}}$ , and reconstruct time-consistent color frames. (b) and (c) show the detailed pipeline of SG-deblur and SG-interpolation." + ], + "image_footnote": [], + "bbox": [ + 83, + 89, + 519, + 305 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a3796d73d2d27d9cb39525bfe02cc066e9e00718bd42123a115adabad161f76c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 526, + 90, + 883, + 306 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "reconstruction. Li et al. [26] use a stereo pair of low-resolution HFR and high-resolution LFR cameras to calculate the fast motion and the depth map. Avinash et al. [38] compute optical flows between two existing frames by utilizing the content of auxiliary HFR videos. Jiang et al. [18] recover a sharp video sequence from a motion-blurred image by integrating the visual and temporal knowledge that is contained in the events. Xu et al. [54] achieve real-world event-based deblurring with a self-supervised learning method. Tulyakov et al. [52] propose the Time Lens that utilizes high-speed events to achieve video frame interpolation (VFI). Following that, Time Lens++ [51] further improves the performance. For the reason that real data are absent, Yu et al. [56] propose a weakly supervised method with the help of subpixel attention learning. Although the event-based interpolation realizes HFR video reconstruction [51, 52], the recovered quality of colors is usually unsatisfactory due to that single exposure cannot balance artifacts from noise and blur, we therefore propose to jointly fuse the high-speed spike signals and alternating-exposure RGB frames to achieve high-quality reconstruction.", + "bbox": [ + 75, + 409, + 472, + 726 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Approach", + "text_level": 1, + "bbox": [ + 76, + 739, + 184, + 756 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 76, + 763, + 186, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our goal is to reconstruct HFR&HDR videos from the binary spike trains $\\mathbb{S}(x,y) = \\{s(x,y,t)\\} (s(x,y,t) = 1$ if the accumulated photons reach a certain threshold, then the accumulator is reset and $s(x,y,t) = 0$ before the next spike is fired [17]) and LFR alternating-exposure RGB frames $\\mathbb{B} = \\{\\mathbf{B}_k\\} ^3$ , where $(x,y)$ denote the coordinates of spikes, $t$", + "bbox": [ + 75, + 786, + 468, + 878 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "denotes the timestamp, and $k$ denotes the index of an RGB image in the sequence. As shown in Fig. 2(a), to achieve this goal, we design a pipeline that consists of three steps:", + "bbox": [ + 500, + 409, + 890, + 454 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step ①: Spike preprocessing (Sec. 3.2). We estimate the optical flow $\\mathbf{F}_i$ and spike frames $\\mathbf{I}_i$ from the spike trains:", + "bbox": [ + 498, + 454, + 890, + 484 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {i} (x, y) = \\mathcal {S C} \\left(s \\left(x, y, t _ {i} \\rightarrow t _ {i + 1}\\right)\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 576, + 496, + 890, + 513 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {I} _ {i} (x, y) = \\int_ {t _ {i} t _ {f} / 2} ^ {t _ {i} + t _ {f} / 2} s (x, y, t) d t, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 523, + 890, + 560 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathcal{SC}(\\cdot)$ denotes optical flow estimation with Hu et al.'s [16] method, $i$ and $t_i$ denote the index and timestamp of spike frames, and $t_f$ is the time window. In Sec. 3.2, we further super-resolve $\\mathbf{I}_i$ at the feature space.", + "bbox": [ + 496, + 566, + 890, + 627 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step ②: RGB frame preprocessing (Sec. 3.3). For the 60 FPS RGB images captured with alternating exposures, i.e., $t_s, 4t_s$ , and $12t_s$ , we firstly unify the uneven brightness with a linear mapping function. Then we conduct motion deblurring for $4t_s$ and $12t_s$ images. For the $t_s$ images, when $t_s$ is sufficiently short, i.e., 1 ms, we assume the short-exposure image is free from motion blur, and take $t_s$ as the reference time for the motion deblurring. Consequently, we can recover 4 and 12 sharp images from $4t_s$ and $12t_s$ images, respectively. As shown in Fig. 2(b), we use $\\mathbf{B}^l$ to denote a blurry image, and the motion deblurring operation can be formulated as: $\\{\\mathbf{B}_j^l\\} = \\mathcal{R}(\\mathbf{B}^l, \\{\\mathbf{I}_j | j \\in \\mathcal{N}_l\\}, \\mathbf{B}^s)$ , where $j$ is the index of a recovered sharp image, $\\mathcal{R}(\\cdot)$ is sharp image reconstruction, $\\{\\mathbf{I}_j | j \\in \\mathcal{N}_l\\}$ is the corresponding spike frames, and $\\mathbf{B}^s$ is the nearest short-exposure RGB frame.", + "bbox": [ + 496, + 628, + 892, + 853 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step ③: Merging into HFR video (Sec. 3.4). Following Step ②, for the interval time $(T_{\\mathrm{itv}})$ that colors are not recorded, we bidirectionally query two nearest sharp RGB", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "3In this paper, we use $\\{\\cdot\\}$ to denote collections.", + "bbox": [ + 94, + 886, + 346, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "22182", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/69d8b311ce36a4530ecf232650dd28197899054c0313144d824fc179e70d4d2f.jpg", + "image_caption": [ + "warping", + "Figure 3. For the sake of increasing spatial resolution, we adopt flow-based warping to merge adjacent 5 spike frames." + ], + "image_footnote": [], + "bbox": [ + 78, + 99, + 153, + 146 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/cf39b78ad3ae1e85fd6294c3574fe337a9e139e2680711172548c5216ff96c0f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 158, + 90, + 233, + 146 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ae9042900c55487e39988804c1314cb26d945fa12eb7432a4fe8c52019a9b1ae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 235, + 90, + 310, + 146 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/c8d6dedbab383c4cabfdbf4100b648119d41bdf85f6b86c070cb6ca3c7313a25.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 313, + 90, + 390, + 146 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/49bbef8a2cd1ba9b8519bf1120a392f74430894e19e52a34957f5c27c82657ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 392, + 90, + 468, + 146 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "images $\\{\\mathbf{B}_i^+, \\mathbf{B}_i\\}$ for each spike frame $\\mathbf{I}_i$ , and get the warped images $\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}$ with optical flow, where $+$ and $-$ denote the forward and backward warping, respectively. In Fig. 2(c), we provide an illustration of the interpolation procedure. Finally, as shown in Fig. 4, we reconstruct time-consistent color frames, and each frame $\\mathbf{C}_i$ is generated by merging the spike frame $\\mathbf{I}_i$ with $\\{\\mathbf{C}_i\\}_{1}, \\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}$ with the strong constraint of optical flow.", + "bbox": [ + 76, + 180, + 468, + 303 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Spike preprocessing", + "text_level": 1, + "bbox": [ + 76, + 313, + 267, + 329 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The optical flow estimation and spike frame reconstruction using in Eqn. (1) and Eqn. (2) are theoretically, yet the reconstructed frames practically have two issues: Since the integration time $t_f$ is very short, noise is relatively strong; the spatial resolution of the first generation spiking camera (VidarOne [17]) is much lower than the RGB camera. To reduce the noise and increase the spatial resolution, inspired by the burst-based super-resolution [4] and denoising [27] for conventional RGB images, it is feasible to merge a group of adjacent spike frames with the help of spatial alignment. Moreover, thanks to the continuous motion recording capability of spiking cameras, the optical flow [16] estimated from spike trains makes the alignment even more stable than RGB images. As illustrated in Fig. 3, we design a computationally efficient module for spike frames, which is formulated as: $\\hat{\\mathbf{I}}_i = \\{\\mathcal{W}_{\\mathbf{F}_{j\\to i}}(\\mathbf{I}_j)|j\\in \\mathcal{N}_i\\}$ , where $\\mathcal{W}_{\\mathcal{F}_{j\\to i}}(\\cdot)$ denotes the flow-based warping operation, $\\mathcal{N}_i$ denotes a collection of adjacent frames. Then, we feed $\\hat{\\mathbf{I}}_i$ to a set of convolutional layers, and we use PixelShuffle [47] to increase the spatial resolution while decreasing the channel of features. It should be noted that the method for spike frame reconstruction is not unique, which means users can choose other learning-based methods [61, 62, 64]. However, those deep learning models are relatively heavy, and less efficient as a submodule fitting to our pipeline.", + "bbox": [ + 76, + 337, + 470, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. RGB image preprocessing", + "text_level": 1, + "bbox": [ + 76, + 726, + 313, + 742 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "RGB linear mapping. Following previous methods for HDR video reconstruction [3, 19, 21], we first unify the brightness of alternating-exposure RGB frames. Since we use an industrial camera (details in Sec. 3.5) that can acquire data without a nonlinear radiometric response function, the linearity of the captured frames is maintained. We find that the brightness of the frames can maintain a linear relationship with the duration of exposure time. Hence we use the global linear mapping to unify the frame brightness: $\\alpha \\cdot \\mathbf{B}_k(x,y)\\rightarrow \\mathbf{B}_k(x,y)$ , where $\\alpha$ denotes a linear scalar.", + "bbox": [ + 76, + 750, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Spike-guided deblurring. The physical model of the blurring process can be simply formulated as the average of a group of sharp images, i.e., $\\mathbf{B}^l (x,y) = \\frac{1}{N}\\sum_{j = 1}^{N}\\mathbf{B}_j^l (x,y)$ , where $N$ denotes the number of sharp images. However, due to the limited dynamic range of the RGB camera, that simplified equation does not hold in the clipped regions of real-world long-exposure frames. In general we should have: $\\mathbf{B}^l (x,y)\\leq \\frac{1}{N}\\sum_{j = 1}^{N}\\mathbf{B}_j^l (x,y)$ . Therefore, for reconstructing a sequence of sharp HDR images from $\\mathbf{B}^l$ , we divide it into two sub-tasks: (i) For the well-exposure regions, we use the sharp spike frames to guide motion deblurring; (ii) for the clipped regions where colors are lost, we compensate them with well-retained colors extracted from the adjacent short-exposure image $\\mathbf{B}^s$ .", + "bbox": [ + 496, + 90, + 890, + 304 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 2(b) shows the spike-guided deblurring (SG-deblur) from $\\mathbf{B}_l$ ( $\\mathbf{B}_l$ may be a middle- or long-exposure image). Similar to Xu et al. [54] that exploit event frames to motion deblurring, we first concatenate $\\mathbf{B}_l$ with $\\{\\mathbf{I}_l^j\\}$ , then extract shallow features and increase feature channels with PixelShuffle [47], which is followed by a set of residual dense blocks (RDBs) [60] and a decoder. To make the colors in over-exposure regions be compensated by the adjacent short-exposure RGB image $\\mathbf{B}_j^s$ , we warp the short-exposure image with the optical flow estimated from spike trains: $\\mathbf{B}_j^s = \\mathcal{W}_{\\mathbf{F}_{s\\rightarrow j}}(\\mathbf{B}^s)$ , where $\\mathcal{W}_{\\mathbf{F}_{s\\rightarrow j}}(\\cdot)$ denotes the warping operation from timestamp $t_s$ to the timestamp of $t_j$ . Subsequently, we extract features from $\\{\\mathbf{B}_l^{s\\rightarrow j}\\}$ and add residual links between them and the decoder. Finally, we obtain a sequence of sharp color images. Note that the SG-deblur for the middle- and long-exposure RGB images share the same architecture while the parameters are not shareable. SG-deblur outputs four images for both $4t_s$ and $12t_s$ frames. For the case of $12t_s$ frame, we interpolate the 4 frames to 12 frames with flow-based warping.", + "bbox": [ + 496, + 305, + 892, + 607 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Next, we briefly explain the reason why this event-based model [54] can be applied to a spike-based task. Both event streams and spike trains with the high-speed property have been used for motion deblurring and latent frame reconstruction [14,18,54]. It is necessary to convert them to event frames and spike frames, both of which belong to the category of 2D images. But event frames and spike frames have different physical meanings: Pixel values in an event frame reveal the residual (relatively sparse information) between two adjacent frames, while pixel values in a spike frame represent exactly the texture (relatively dense information) of the corresponding frame. Since both event frames and spike frames are 2D images and the spike frames have denser texture information, we can replace event frames in such a model with spike frames, so as to make the solution to the problem more well-posed.", + "bbox": [ + 496, + 608, + 892, + 849 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Merging into HFR video", + "text_level": 1, + "bbox": [ + 500, + 861, + 723, + 877 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "RGB interpolation. Given each middle- and long-exposure", + "bbox": [ + 500, + 885, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "22183", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f70c457f89317c1a8dfbd75c2c3839b8139e7a74e3c29226b3c581376d4d252a.jpg", + "image_caption": [ + "Figure 4. Network architecture of the CNN-RNN-based merging module for reconstructing HFR&HDR videos from alternating-exposure RGB frames and HFR spike frames. This module outputs HDR color frames in a step-wise manner. We unroll the module for $M$ steps during training." + ], + "image_footnote": [], + "bbox": [ + 81, + 88, + 890, + 314 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "frame, SG-deblur recovers 4 and 12 images. Therefore, the recovered RGB frames have a frame rate of $340^{4}$ FPS. But temporal distribution of them is quite uneven, e.g., there is no recovered color frame interval time $T_{\\mathrm{itv}}$ . Fortunately, the spike train contains continuous and dense texture information in the temporal domain. In Step ③, we use the SG-interpolation module to interpolate RGB frames into a sequence of uniformly distributed images. For each spike frame $\\mathbf{I}_i$ , we bidirectionally query its two nearest recovered RGB frames $\\{\\mathbf{B}_i^+, \\mathbf{B}_i\\}$ and interpolate two color frames $\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}$ with the optical flow estimated from spike trains. When $\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}$ are fed into our merging module, they are weighted by a linear coefficient $(\\oplus$ in Fig. 4) related to the distance between $t_i$ and $\\{t_+, t\\}$ , where $\\{t_+, t\\}$ denote the timestamp of $\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}$ .", + "bbox": [ + 75, + 364, + 472, + 598 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Merging module. The aforementioned modules reconstruct coarse HFR video frames, which need to be refined for smoothing over time. We build a CNN-RNN-based HFR&HDR video reconstruction network to merge the spike frames and RGB frames, which is shown in Fig. 4. The merging module consists of three encoders, i.e., $\\mathcal{E}_I$ , $\\mathcal{E}_B$ , and $\\mathcal{E}_C$ , which are respectively designed for feature extraction from the current spike frame $\\hat{\\mathbf{I}}_i$ , the interpolated RGB images $\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}$ , and the previously reconstructed image $\\mathbf{C}_{i-1}$ . In $\\mathcal{E}_I$ , we use PixelShuffle [47] to make the spatial resolution of spike features consistent with RGB features. The extracted features are denoted as $\\mathbf{E}_I$ , $\\{\\mathbf{E}_B, \\mathbf{E}_{B+}\\}$ , and $\\mathbf{E}_{C_i-1}$ , respectively.", + "bbox": [ + 75, + 598, + 468, + 795 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Considering the spike frames and RGB frames may not be perfectly aligned at pixel level for real-world data, we add deformable convolution layers [6] to improve the robustness to this issue. In order to output flicker-free color frames, we adopt two constraints in the merging module:", + "bbox": [ + 75, + 795, + 468, + 872 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/9754e7ba0b3f019bb54d91503f166a8e553e6c6c01ac31fde44a015920f2d53f.jpg", + "table_caption": [ + "Table 1. Details of the composition of the dataset (res. is the abbreviation of resolution)." + ], + "table_footnote": [], + "table_body": "
dataRGB res.spike res.train/testtime
full-synthetic500×800250×40080/200.1s
real-synthetic600×800250×400160/400.101s
real-world484×784242×392-/200.101s
", + "bbox": [ + 521, + 396, + 870, + 455 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(i) We add three ConvLSTM layers [48] to feed previous states forward in temporal domain; (ii) we feed $\\mathbf{E}_{C_i}$ into the current step and align it with the current features with flow-based warping. We then use a decoder to reversely map deep features to the current output HDR frame $\\mathbf{C}_i$ . We achieve the multi-module signal fusion by adding concatenation links between $\\{\\mathbf{E}_{C_i}$ , $\\mathbf{E}_B$ , $\\mathbf{E}_{B+}\\}$ and the decoder.", + "bbox": [ + 496, + 458, + 890, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 574, + 718, + 590 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Due to the setting of our method being different from existing HDR and video frame interpolation methods, there are no suitable datasets for training and testing our method. Therefore, we collect a new one with three components, whose details are summarized in Table 1 and sample images are provided in Fig. 5.", + "bbox": [ + 496, + 598, + 890, + 688 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Part 1: Full-synthetic data. This part of data is obtained by using the spike simulator proposed by Hu et al. [16]. We render 2000 RGB images with their computer graphics based solution as ground truth and generate 2000 spike planes (0.1 s). Since the photons arriving at the sensor follow Poisson probability distribution [43], we synthesize alternating-exposure 60 FPS RGB frames with a Poisson noise model. For the full synthetic data, we randomly select starting time of each group of training data. We randomly shift the RGB frames within 3 pixels to make the trained model more robust to the misalignment in real-world data.", + "bbox": [ + 496, + 689, + 892, + 854 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Part 2: Real-synthetic data. To reduce the domain gap between full-synthetic data and real-world data, we design a method to collect real-synthetic (the scenes are real while", + "bbox": [ + 496, + 854, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "4From $60 = 20\\times 3$ to $340 = 20\\times (1 + 4 + 12)$", + "bbox": [ + 94, + 886, + 315, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "22184", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/176aa9dfa2ecaf20f74ee48cdeb45fed0d736431be6a2a3e803a4ccf3f70da7d.jpg", + "image_caption": [ + "Figure 5. Example frames from the proposed dataset. Each group shows three alternating-exposure RGB frames (left, from top to bottom rows) and the corresponding spike signals (right)." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 467, + 215 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the spike trains are synthetic) data, and we use this part of data to fine-tune our model. The RGB frames are captured with an alternating-exposure mode in slow-motion scenes. Then we synthesize blurry middle-exposure RGB frames by averaging 4 adjacent middle-exposure RGB images, and blurry long-exposure RGB frames are synthesized in a similar way. We synthesize spike trains from ground truth RGB frames with the integrate-and-fire methodology [61].", + "bbox": [ + 75, + 282, + 468, + 404 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Part 3: Real-world data. We build a Spike-RGB hybrid camera (Fig. 6) to capture real-world data. The system is composed of an industrial camera (Basler acA800-510uc $^5$ ) with alternating exposure capability and a spiking camera [17]. There is a beam splitter in front of the two sensors. We conduct geometric calibration and time synchronization to align bimodal signals collected by them.", + "bbox": [ + 75, + 404, + 468, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Loss and training. The SG-deblur module and the merging module reconstruct images in the linear luminance domain, which covers a high dynamic range of pixel values. Following existing methods for HDR reconstruction, for the output images $\\mathbf{C}$ , we compress the range of pixel values by applying the following function proposed by Kalantari et al. [20]: $\\mathcal{T}(\\mathbf{C}) = \\log (1 + \\mu \\mathbf{C}) / \\log (1 + \\mu)$ , where $\\mathcal{T}(\\cdot)$ denotes the tone mapping operation and $\\mu$ denotes the amount of compression. For these two modules, we employ widely used $l_{1}$ loss, Structure similarity (SSIM) loss [53], and Learned Perceptual Image Patch Similarity (LPIPS) loss [58]. The total loss at step $i$ for both the motion deblurring and merging modules is", + "bbox": [ + 75, + 508, + 468, + 705 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} (i) = \\mathcal {L} _ {l _ {1}} (i) + \\beta_ {1} \\mathcal {L} _ {\\text {S S I M}} (i) + \\beta_ {2} \\mathcal {L} _ {\\text {L P I P S}} (i), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 717, + 468, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\beta_{1} = 1$ and $\\beta_{2} = 1$ . For spike-based optical flow estimation using [16], we fine-tune the parameters with full-synthetic data. During training, we resize the RGB images and spike frames to $512 \\times 800$ and $256 \\times 400$ . We implement our model with PyTorch, set the batch size to 4, and use ADAM optimizer during the training process. We first train the model on full-synthetic data. The SG-deblur module is trained with 50 epochs, before training the merging", + "bbox": [ + 75, + 744, + 468, + 867 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3a9785b5e9c025abd121037f9499ff36cfcf100a90bcaf5ae65255ec856a5815.jpg", + "image_caption": [ + "Figure 6. The prototype of our Spike-RGB imaging system composed of a spiking camera and an RGB camera." + ], + "image_footnote": [], + "bbox": [ + 501, + 87, + 890, + 220 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "module. We unroll the merging module for $M$ steps, and we find $M = 4$ achieves a suitable balance between training time and recovery quality. The total loss for the unrolled $M$ steps is $\\mathcal{L}_{\\mathrm{merge}} = \\sum_{i=1}^{M} \\mathcal{L}_{\\mathrm{total}}^{\\mathrm{M}}(i)$ , where $\\mathcal{L}_{\\mathrm{total}}^{\\mathrm{M}}(i)$ denotes the total loss for the merging module at step $i$ . The initial learning rate for both two modules is 0.001, we decay it to $10^{-6}$ with a linear strategy. For the real-synthetic data, we fine-tune another group of parameters to reduce the gap between synthetic data and real-world data. We use one NVIDIA Tesla A100 for training, and the training procedure consumes about 30 hours.", + "bbox": [ + 498, + 272, + 890, + 439 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 453, + 633, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Quantitative Evaluation using Synthetic Data", + "text_level": 1, + "bbox": [ + 500, + 478, + 883, + 494 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Validation on full-synthetic data. Figure 8 shows a group of results on full-synthetic data. We can see that both the flying objects in the short-exposure image and the oversaturated clouds (see the regions marked by boxes) in the long-exposure image are recovered successfully. The results with rich textures and consistent colors show the feasibility of our proposed method.", + "bbox": [ + 498, + 502, + 890, + 607 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation on real-synthetic data. To the best of our knowledge, the proposed method is the first framework to reconstruct HFR&HDR videos with the combination of spike trains and alternating-exposure RGB frames. Therefore, it is unfair to compare our method with existing ones, i.e., Kalantari13 [21], Kalantari19 [19], and Chen21 $[3]^{6}$ , which are designed for low frame rate HDR videos.", + "bbox": [ + 498, + 608, + 890, + 713 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We choose a state-of-the-art HDR video reconstruction method Chen21 [3], which also uses alternating-exposure RGB frames (the closest setup to ours) as a reference. Figure 7 shows the reconstruction results on real-synthetic data of the proposed method and Chen21 [3]. Thanks to the complementary motion information provided by spike trains, the abundant color extracted from alternating-exposure RGB frames, and the accurate textures contained in spike frames, the proposed method is capable of reconstructing rich texture details with less motion blur. For ex", + "bbox": [ + 496, + 713, + 890, + 864 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "5https://www.baslerweb.com/en/products/camera/ area-scan-cameras/ace/aca800-510uc/", + "bbox": [ + 76, + 875, + 467, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "In this section, we use \"Last name of the first author+year\" as synonyms of methods for comparison.", + "bbox": [ + 500, + 875, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "22185", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/afa1aa45dd7684f40beb4726479a3aba9c3cd0d96ec6f4bf23778aeeefabdc8f.jpg", + "image_caption": [ + "short" + ], + "image_footnote": [], + "bbox": [ + 81, + 101, + 207, + 272 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/71cdc16eea019dcd1bd0dd0177ee5024f38cc34f796330d5d1eac074cf09b1e0.jpg", + "image_caption": [ + "middle", + "(a)" + ], + "image_footnote": [], + "bbox": [ + 215, + 101, + 344, + 271 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6162aa256a287f2f993030fdd823e1c193c5a5945396f892262b2ef1595ff2d3.jpg", + "image_caption": [ + "long" + ], + "image_footnote": [], + "bbox": [ + 349, + 101, + 477, + 272 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6eb5166094979d17c7be16571a725ef333c07be4f97c0f8c35d1361d0ff8d59e.jpg", + "image_caption": [ + "short" + ], + "image_footnote": [], + "bbox": [ + 491, + 101, + 619, + 272 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/18487dec66ddaa2019bfc213e532588aef073461a9d8d2d11543e80ebfbdbd84.jpg", + "image_caption": [ + "middle", + "(b)" + ], + "image_footnote": [], + "bbox": [ + 625, + 101, + 754, + 272 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/865b4f729ea98b545b1620ffe9198a6a878505d1040d35bbaf214b8fe6412abe.jpg", + "image_caption": [ + "long" + ], + "image_footnote": [], + "bbox": [ + 759, + 101, + 888, + 272 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fbb712d19253aafc1fd9d055053cde4360d158fb65c53da6d8203063df92c7ec.jpg", + "image_caption": [ + "Figure 7. Visual equality comparison of real-synthetic data between the proposed method and the state-of-the-art HDR video reconstruction method: Chen 21 [3]. We present two sets of results in (a) and (b). Please zoom-in electronic versions for better details, and watch the HFR videos on the project page.", + "Figure 8. Validation on the synthetic data." + ], + "image_footnote": [], + "bbox": [ + 81, + 335, + 467, + 489 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ample, in the long-exposure frame in the first row of (a), the building marked by a yellow box suffers from severe motion blur and overexposure. Chen21 [3] partially recovers the colors of this building, but it fails to remove the blurry artifacts. In the results generated by our method, the edges are sharp and the colors are vivid. In Fig. 7(b), the motions across RGB frames have a very large span, Chen21 [3] can only recover the corresponding LFR videos, while our method can reconstruct an HFR video with smooth motion.", + "bbox": [ + 75, + 532, + 468, + 667 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluate the reconstructed HDR in terms of PSNR, SSIM, HDR-VDP-2 [32], and HDR-VQM [36]. Table 2 clearly shows that our framework outperforms the state-of-the-art method [3] in all the metrics on the real-synthetic data in the condition of 60 FPS. And we achieve excellent performance in the condition of 1000 FPS. We designed ablation experiments and used them to demonstrate the effectiveness of the modules in our framework. For \"w/o I\", we simply stack the spike trains with a time window, and upsample them using bilinear interpolation; for \"w/o PS\", we replace PixelShuffle with a convolutional layer. The two groups of experiments verify the effectiveness of spike frame preprocessing in Step ①. For \"w/o F1\" and \"w/o F2\", we remove the flow-based interpolation in the deblurring module and the merging module. The two groups of ex", + "bbox": [ + 75, + 672, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/bd2a969c0528a92c9fb04d225b6e4660c1e3727866146a713c86bbb5a5091a3f.jpg", + "table_caption": [ + "Table 2. Quantitative results and ablation study on our realistic synthetic data. We sample 60 FPS videos from our results for the comparison with Chen21 [3]. $\\uparrow (\\downarrow)$ indicates larger (smaller) values are better." + ], + "table_footnote": [], + "table_body": "
Comparison with the state-of-th-art method
MethodPSNR↑SSIM↑HDR-VDP2↑HDR-VQM↓FPS
Chen21 [3]18.460.69727.340.53660
Ours30.140.92160.140.093
Chen21 [3]////1000
Ours24.380.90347.790.120
Ablation study
w/o I23.150.88646.030.1431000
w/o PS23.980.88146.470.141
w/o F119.760.72338.950.314
w/o F218.040.71635.890.356
w/ t-loss22.410.86443.640.142
w/o DeConv24.310.89747.660.127
w/o DM19.010.71437.970.338
", + "bbox": [ + 503, + 393, + 890, + 563 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "periments verify the effectiveness of SC-Flow [16] based interpolation in Steps ② and ③. To further verify the effectiveness of deblurring module, we completely remove it in \"w/o DM\". For \"w/o DeConv\", we replace the deformable convolutional layers with traditional convolution layers. For \"w/ t-loss\", we remove the warping operation on $\\mathbf{C}_{i-1}$ and add the temporal consistent loss that is estimated by a pretrained optical flow model [23], which is widely used in video processing [5, 39]. Since the $\\mathbf{C}_{i-1}$ is warped by accurate optical flow $\\mathbf{F}_{i-1}$ and merged into the current step $i$ , our method fundamentally has a strong temporal consistent constraint for video processing. Thus, our merging module does not need this loss during training.", + "bbox": [ + 496, + 580, + 890, + 777 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Qualitative Evaluation using Real Data", + "text_level": 1, + "bbox": [ + 500, + 786, + 834, + 801 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In order to demonstrate the effectiveness of the proposed framework on real-world scenes, we collect 20 sets of real-world data, which are captured by our hybrid camera system shown in Fig. 6. We have compared our slow-motion capability with that of the commercial cameras. As shown in Fig. 9(a), the electric fan is moving at about 40 rounds", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "22186", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1f268f5b45e6511852f7c10d04b0e95083619bab47aae0ee46893577875ea9d0.jpg", + "image_caption": [ + "Figure 9. Visual quality comparison of real-world data between the proposed method and commercial cameras with the slow-motion capability. In (a), we show two adjacent frames for the video captured by smartphones that have slow-motion capability. The commercial cameras are not calibrated so their results are not strictly aligned with ours. (b) is the comparison with Phantom camera set to 1000 FPS." + ], + "image_footnote": [], + "bbox": [ + 80, + 89, + 192, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/72f0059269a352e5fae6316d26d6d3997fa3fa7268ce9475787b3e6b04ccd5e3.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 196, + 89, + 305, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/87e87953be2aadf893e099d367271fb5ced7b1b9999ee6fc17190a8fe64abca6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 308, + 89, + 418, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/12ab643dce1827b18edc878212e142f247aa8711e1c782c9617d11b686395bd6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 419, + 90, + 547, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/70699cd7e7248946e810a0c7736901cdbac1ebc3551834ffa17d97f4ef9afb79.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 553, + 90, + 666, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c3d3ddfd28c99162d50a607d2239a16a351121102d1537088a159bcc3e287777.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 666, + 90, + 888, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e93c85761807f3ca93c18962918da723c0e397456cdc1707f1e86ea56554f370.jpg", + "image_caption": [ + "Figure 10. Qualitative visualization of our method in a super fast scene: a balloon bursting. We select 38 frames from our results for showing." + ], + "image_footnote": [], + "bbox": [ + 78, + 287, + 468, + 487 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "per second. The short-exposure image is severely underexposed with less blurry artifacts, and the middle- and long-exposure images have severe blurring and oversaturated artifacts. With the accurate motion and texture information captured by the spiking camera, we have recovered temporally smooth video sequences. Four recovered images are shown for the middle- and long-exposure images. For the videos captured by iPhone 13 and Mi 10, the motions between frames are not continuous. And the electric fan captured by Mi 10 is deformed due to the rolling shutter. In Fig. 9(b), we compare our method with the Phantom7 camera set to 1000 FPS. Since the exposure time of the Phantom camera is extremely short, it fails to capture regions where scene radiance is weak.", + "bbox": [ + 75, + 547, + 468, + 758 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 773, + 194, + 789 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We propose an HFR&HDR video reconstruction method with a hybrid camera that is composed of an alternating-exposure RGB sensor and a spiking sensor. Extensive experiments on synthetic and real-world data demonstrate the superior performance of the proposed method.", + "bbox": [ + 76, + 800, + 468, + 875 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Discussion. (i) For super fast scenes, e.g., a balloon bursting, it is difficult to capture clear motions with a conventional RGB camera at 60 FPS. Therefore, the well-exposed color of the bursting balloon is not captured with the short exposure, which brings challenges to our reconstruction of accurate color. In our results, although the colors are somewhat distorted, we can still recover a smooth video sequence. Once the frame rate of the RGB camera is increased, e.g., 120 FPS, temporally smoother video with more accurate color is expected to be more reliably recovered. (ii) Since QIS [1, 29] share the same imaging model with the spiking camera, our method is ready to be applied to it. We show the simulation in supplementary material.", + "bbox": [ + 500, + 289, + 890, + 484 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitation and future work. Beam splitter is arguable for making a practical system on mobile devices. But when compact design is not a hard constraint, beam splitter has unique advantages in spatial alignment, that is why it is broadly adopted in building a hybrid prototype for HDR [15, 24, 33, 50]. Side-by-side arrangement with parallax unavoidably introduces occlusions and alignment issues, which is a promising direction to explore for our future work. Due to the low spatial resolution $(250\\times 400)$ of the current model we use is, we have to super-resolve the spike frames in feature space. If higher-resolution spike signals can be directly obtained, our method can achieve better visual quality. Besides, there is a domain gap between synthetic spike trains and real-captured spike trains since the noise of the spiking camera is more complex than the simulator. For time complexity, our approach is better suited as a post-processing module. The number of parameters is $45.7\\mathrm{M}$ and the time cost per frame is 0.371s with a single NVIDIA GeForce RTX 3090 graphics card. We hope to tackle these issues in the future work and achieve higher frame rate reconstruction.", + "bbox": [ + 498, + 486, + 892, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 500, + 811, + 658, + 825 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported by National Key R&D Program of China (2021ZD0109803), National Natural Science Foundation of China under Grant No. 62088102, 62136001. Yakun Chang was also supported by China Postdoctoral Science Foundation (8206300710).", + "bbox": [ + 498, + 825, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "7Refer to footnote 2. Camera model: VEO 640, F/1.8, 85mm lens.", + "bbox": [ + 94, + 886, + 444, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "22187", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Gnanasambandam Abhiram and Chan Stanley H. HDR imaging with quanta image sensors: Theoretical limits and optimal reconstruction. IEEE Transactions on Computational Imaging, 6:1571-1585, 2020. 2, 8", + "[2] Moshe Ben-Ezra and Shree K Nayar. Motion deblurring using hybrid imaging. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. 2", + "[3] Guanying Chen, Chaofeng Chen, Shi Guo, Zhetong Liang, Kwan-Yee K Wong, and Lei Zhang. HDR video reconstruction: A coarse-to-fine network and a real-world benchmark dataset. In Proc. of International Conference on Computer Vision, pages 2502-2511, 2021. 2, 4, 6, 7", + "[4] Wooyeong Cho, Sanghyeok Son, and Dae-Shik Kim. Weighted multi-kernel prediction network for burst image super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 404-413, 2021. 4", + "[5] Jonghyun Choi, Kuk-Jin Yoon, et al. Learning to super resolve intensity images from events. In Proc. of Computer Vision and Pattern Recognition, pages 2768-2776, 2020. 7", + "[6] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Proc. of International Conference on Computer Vision, pages 764-773, 2017. 5", + "[7] Paul E Debevec and Jitendra Malik. Recovering high dynamic range radiance maps from photographs. In Proc. of ACM SIGGRAPH, pages 1-10. 2008. 2", + "[8] Akshay Dudhane, Syed Waqas Zamir, Salman Khan, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Burst image restoration and enhancement. In Proc. of Computer Vision and Pattern Recognition, pages 5759-5768, 2022. 2", + "[9] Gabriel Eilertsen, Joel Kronander, Gyorgy Denes, Rafat K Mantiuk, and Jonas Unger. HDR image reconstruction from a single exposure using deep cnns. ACM Transactions on Graphics, 36(6):1-15, 2017. 2", + "[10] Guillermo Gallego, Tobi Delbrück, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew J Davison, Jörg Conradt, Kostas Daniilidis, et al. Event-based vision: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(1):154-180, 2020. 1", + "[11] Clément Godard, Kevin Matzen, and Matt Uytendaele. Deep burst denoising. In Proc. of European Conference on Computer Vision, pages 538-554, 2018. 2", + "[12] Yulia Gryaditskaya, Tania Pouli, Erik Reinhard, Karol Myszkowski, and Hans-Peter Seidel. Motion aware exposure bracketing for HDR video. In Computer Graphics Forum, volume 34, pages 119-130. Wiley Online Library, 2015. 2", + "[13] Saghi Hajisharif, Joel Kronander, and Jonas Unger. Adaptive dualiso HDR reconstruction. EURASIP Journal on Image and Video Processing, 2015(1):1-13, 2015. 2", + "[14] Jin Han, Yixin Yang, Chu Zhou, Chao Xu, and Boxin Shi. Evintsr-net: Event guided multiple latent frames reconstruction and super-resolution. In Proc. of International Conference on Computer Vision, pages 4882-4891, 2021. 2, 4", + "[15] Jin Han, Chu Zhou, Peiqi Duan, Yehui Tang, Chang Xu, Chao Xu, Tiejun Huang, and Boxin Shi. Neuromorphic cam-" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "era guided high dynamic range imaging. In Proc. of Computer Vision and Pattern Recognition, pages 1730-1739, 2020. 2, 8", + "[16] Liwen Hu, Rui Zhao, Ziluo Ding, Lei Ma, Boxin Shi, Ruiqin Xiong, and Tiejun Huang. Optical flow estimation for spiking camera. In Proc. of Computer Vision and Pattern Recognition, pages 17844-17853, 2022. 2, 3, 4, 5, 6, 7", + "[17] Tiejun Huang, Yajing Zheng, Zhaofei Yu, Rui Chen, Yuan Li, Ruiqin Xiong, Lei Ma, Junwei Zhao, Siwei Dong, Lin Zhu, et al. $1000 \\times$ faster camera and machine vision with ordinary devices. Engineering, 2022. 1, 3, 4, 6", + "[18] Zhe Jiang, Yu Zhang, Dongqing Zou, Jimmy Ren, Jiancheng Lv, and Yebin Liu. Learning event-based motion deblurring. In Proc. of Computer Vision and Pattern Recognition, pages 3320-3329, 2020. 2, 3, 4", + "[19] Nima Khademi Kalantari and Ravi Ramamoorthi. Deep HDR video from sequences with alternating exposures. In Computer graphics forum, volume 38, pages 193-205. Wiley Online Library, 2019. 2, 4, 6", + "[20] Nima Khademi Kalantari, Ravi Ramamoorthi, et al. Deep high dynamic range imaging of dynamic scenes. ACM Transactions on Graphics, 36(4):144-1, 2017. 2, 6", + "[21] Nima Khademi Kalantari, Eli Shechtman, Connelly Barnes, Soheil Darabi, Dan B Goldman, and Pradeep Sen. Patch-based high dynamic range video. ACM Transactions on Graphics, 32(6):202-1, 2013. 2, 4, 6", + "[22] Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High dynamic range video. ACM Transactions on Graphics, 22(3):319-325, 2003. 2", + "[23] Wei-Sheng Lai, Jia-Bin Huang, Oliver Wang, Eli Shechtman, Ersin Yumer, and Ming-Hsuan Yang. Learning blind video temporal consistency. In Proc. of European Conference on Computer Vision, pages 170-185, 2018. 7", + "[24] Byungju Lee and Byung Cheol Song. Multi-image high dynamic range algorithm using a hybrid camera. Signal Processing: Image Communication, 30:37-56, 2015. 2, 8", + "[25] Juan Antonio Lénero-Bardallo, Teresa Serrano-Gotarredona, and Bernabé Linares-Barranco. A 3.6 $\\mu$ s latency asynchronous frame-free event-driven dynamic-vision-sensor. IEEE Journal of Solid-State Circuits, 46(6):1443-1455, 2011. 1", + "[26] Feng Li, Jingyi Yu, and Jinxiang Chai. A hybrid camera for motion deblurring and depth map super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 3", + "[27] Ziwei Liu, Lu Yuan, Xiaou Tang, Matt Uytendaele, and Jian Sun. Fast burst images denoising. ACM Transactions on Graphics, 33(6):1-9, 2014. 4", + "[28] Kede Ma, Hui Li, Hongwei Yong, Zhou Wang, Deyu Meng, and Lei Zhang. Robust multi-exposure image fusion: A structural patch decomposition approach. IEEE Transactions on Image Processing, 26(5):2519-2532, 2017. 2", + "[29] Ulku Arin C Bruschini Claudio Charbon Edoardo Ma Sizhuo, Gupta Shantanu and Gupta Mohit. Quanta burst photography. ACM Transactions on Graphics, 39(4):79-1, 2020. 8" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "22188", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] Stephen Mangiat and Jerry Gibson. High dynamic range video with ghost removal. In Applications of Digital Image Processing XXXIII, volume 7798, pages 307-314. SPIE, 2010. 2", + "[31] Stephen Mangiat and Jerry Gibson. Spatially adaptive filtering for registration artifact removal in HDR video. In Proc. of International Conference on Image Processing, pages 1317-1320. IEEE, 2011. 2", + "[32] Rafal Mantiuk, Kil Joong Kim, Allan G Rempel, and Wolfgang Heidrich. HDR-VDP-2: A calibrated visual metric for visibility and quality predictions in all luminance conditions. ACM Transactions on Graphics, 30(4):1-14, 2011. 7", + "[33] Morgan McGuire, Wojciech Matusik, Hanspeter Pfister, Billy Chen, John F Hughes, and Shree K Nayar. Optical splitting trees for high-precision monocular imaging. IEEE Computer Graphics and Applications, 27(2):32-42, 2007. 2, 8", + "[34] Tom Mertens, Jan Kautz, and Frank Van Reeth. Exposure fusion. In Pacific Conference on Computer Graphics and Applications, pages 382-390, 2007. 2", + "[35] Nico Messikommer, Stamatios Georgoulis, Daniel Gehrig, Stepan Tulyakov, Julius Erbach, Alfredo Bochicchio, Yuanyou Li, and Davide Scaramuzza. Multi-Bracket high dynamic range imaging with event cameras. In Proc. of Computer Vision and Pattern Recognition, pages 547–557, 2022. 2", + "[36] Manish Narwaria, Matthieu Perreira Da Silva, and Patrick Le Callet. HDR-VQM: An objective quality measure for high dynamic range video. Signal Processing: Image Communication, 35:46-60, 2015. 7", + "[37] Shree K Nayar and Tomoo Mitsunaga. High dynamic range imaging: Spatially varying pixel exposures. In Proc. of Computer Vision and Pattern Recognition, volume 1, pages 472-479. IEEE, 2000. 2", + "[38] Avinash Paliwal and Nima Khademi Kalantari. Deep slow motion video reconstruction with hybrid imaging system. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42(7):1557-1569, 2020. 3", + "[39] Henri Rebecq, René Ranftl, Vladlen Koltun, and Davide Scaramuzza. High speed and high dynamic range video with an event camera. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(6):1964-1980, 2019. 7", + "[40] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In Proc. of Computer Vision and Pattern Recognition, pages 779-788, 2016. 1", + "[41] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster R-CNN: Towards real-time object detection with region proposal networks. Proc. of Advances in Neural Information Processing Systems, 28, 2015. 1", + "[42] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234–241. Springer, 2015. 2", + "[43] Yash Sanghvi, Abhiram Gnanasambandam, and Stanley H Chan. Photon limited non-blind deblurring using algorithm" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "unrolling. IEEE Transactions on Computational Imaging, 2022.5", + "[44] Marcel Santana Santos, Tsang Ing Ren, and Nima Khademi Kalantari. Single image HDR reconstruction using a cnn with masked features and perceptual loss. arXiv preprint arXiv:2005.07335, 2020. 2", + "[45] Pradeep Sen, Nima Khademi Kalantari, Maziar Yaesoubi, Soheil Darabi, Dan B Goldman, and Eli Shechtman. Robust patch-based HDR reconstruction of dynamic scenes. ACM Transactions on Graphics, 31(6):203-1, 2012. 2", + "[46] Richard Shaw, Sibi Catley-Chandar, Ales Leonardis, and Eduardo Perez-Pellitero. HDR reconstruction from bracketed exposures and events. arXiv preprint arXiv:2203.14825, 2022. 2", + "[47] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In Proc. of Computer Vision and Pattern Recognition, pages 1874-1883, 2016. 4, 5", + "[48] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM network: A machine learning approach for precipitation nowcasting. Proc. of Advances in Neural Information Processing Systems, 28, 2015. 5", + "[49] Yu-Wing Tai, Hao Du, Michael S Brown, and Stephen Lin. Image/video deblurring using a hybrid camera. In Proc. of Computer Vision and Pattern Recognition, pages 1-8, 2008. 2", + "[50] Michael D Tocci, Chris Kiser, Nora Tocci, and Pradeep Sen. A versatile HDR video production system. ACM Transactions on Graphics, 30(4):1-10, 2011. 2, 8", + "[51] Stepan Tulyakov, Alfredo Bochicchio, Daniel Gehrig, Stamatios Georgoulis, Yuanyou Li, and Davide Scaramuzza. Time Lens++: Event-based frame interpolation with parametric non-linear flow and multi-scale fusion. In Proc. of Computer Vision and Pattern Recognition, pages 17755-17764, 2022. 2, 3", + "[52] Stepan Tulyakov, Daniel Gehrig, Stamatios Georgoulis, Julius Erbach, Mathias Gehrig, Yuanyou Li, and Davide Scaramuzza. Time Lens: Event-based video frame interpolation. In Proc. of Computer Vision and Pattern Recognition, pages 16155-16164, 2021. 2, 3", + "[53] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 6", + "[54] Fang Xu, Lei Yu, Bishan Wang, Wen Yang, Gui-Song Xia, Xu Jia, Zhendong Qiao, and Jianzhuang Liu. Motion deblurring with real events. In Proc. of International Conference on Computer Vision, pages 2583-2592, 2021. 3, 4", + "[55] Qingsen Yan, Lei Zhang, Yu Liu, Yu Zhu, Jinqiu Sun, Qinfeng Shi, and Yanning Zhang. Deep HDR imaging via a non-local network. IEEE Transactions on Image Processing, 29:4308-4322, 2020. 2", + "[56] Zhiyang Yu, Yu Zhang, Deyuan Liu, Dongqing Zou, Xijun Chen, Yebin Liu, and Jimmy S Ren. Training weakly supervised video frame interpolation with events. In Proc. of" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "22189", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "International Conference on Computer Vision, pages 14589-14598, 2021. 3", + "[57] Cheng Zhang, Shaolin Su, Yu Zhu, Qingsen Yan, Jinqiu Sun, and Yanning Zhang. Exploring and evaluating image restoration potential in dynamic scenes. In Proc. of Computer Vision and Pattern Recognition, pages 2067-2076, 2022. 1", + "[58] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proc. of Computer Vision and Pattern Recognition, pages 586-595, 2018. 6", + "[59] Xiang Zhang and Lei Yu. Unifying motion deblurring and frame interpolation with events. In Proc. of Computer Vision and Pattern Recognition, pages 17765-17774, 2022. 2", + "[60] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 2472-2481, 2018. 4", + "[61] Jing Zhao, Ruiqin Xiong, Hangfan Liu, Jian Zhang, and Tiejun Huang. Spk2Imgnet: Learning to reconstruct dynamic scene from continuous spike stream. In Proc. of Computer Vision and Pattern Recognition, pages 11996-12005, 2021. 1, 4, 6", + "[62] Yajing Zheng, Lingxiao Zheng, Zhaofei Yu, Boxin Shi, Yonghong Tian, and Tiejun Huang. High-speed image reconstruction through short-term plasticity for spiking cameras. In Proc. of Computer Vision and Pattern Recognition, pages 6358-6367, 2021. 1, 4", + "[63] Lin Zhu, Siwei Dong, Tiejun Huang, and Yonghong Tian. A retina-inspired sampling method for visual texture reconstruction. In Proc. of International Conference on Multimedia and Expo. 1", + "[64] Lin Zhu, Siwei Dong, Jianing Li, Tiejun Huang, and Yonghong Tian. Retina-like visual image reconstruction via spiking neural model. In Proc. of Computer Vision and Pattern Recognition, pages 1438-1446, 2020. 1, 4", + "[65] Yunhao Zou, Yinqiang Zheng, Tsuyoshi Takatani, and Ying Fu. Learning to reconstruct high speed and high dynamic range videos from events. In Proc. of Computer Vision and Pattern Recognition, pages 2024-2033, 2021. 1" + ], + "bbox": [ + 78, + 90, + 468, + 642 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "22190", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_model.json b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a072e50159768fa0dddfa6ebf2894dfa6cf5f7c3 --- /dev/null +++ b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_model.json @@ -0,0 +1,2455 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.131, + 0.773, + 0.154 + ], + "angle": 0, + "content": "1000 FPS HDR Video with a Spike-RGB Hybrid Camera" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.179, + 0.894, + 0.199 + ], + "angle": 0, + "content": "Yakun Chang\\(^{1,2}\\) Chu Zhou\\(^{3}\\) Yuchen Hong\\(^{1,2}\\) Liwen Hu\\(^{2}\\) Chao Xu\\(^{3}\\) Tiejun Huang\\(^{1,2}\\) Boxin Shi\\(^{1,2*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.2, + 0.867, + 0.217 + ], + "angle": 0, + "content": "\\(^{1}\\) National Key Laboratory for Multimedia Information Processing, School of Computer Science, Peking University" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.218, + 0.857, + 0.235 + ], + "angle": 0, + "content": "\\(^{2}\\) National Engineering Research Center of Visual Technology, School of Computer Science, Peking University" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.235, + 0.849, + 0.288 + ], + "angle": 0, + "content": "\\(^{3}\\) National Key Laboratory of General AI, School of Intelligence Science and Technology, Peking University {yakunchang, zhou_chu, huliwen, tjhuang, shiboxin}@pku.edu.cn yuchenhong.cn@gmail.com, xuchao@cis.pku.edu" + }, + { + "type": "list", + "bbox": [ + 0.108, + 0.2, + 0.867, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.322, + 0.314, + 0.339 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.355, + 0.474, + 0.749 + ], + "angle": 0, + "content": "Capturing high frame rate and high dynamic range (HFR&HDR) color videos in high-speed scenes with conventional frame-based cameras is very challenging. The increasing frame rate is usually guaranteed by using shorter exposure time so that the captured video is severely interfered by noise. Alternating exposures can alleviate the noise issue but sacrifice frame rate due to involving long-exposure frames. The neuromorphic spiking camera records high-speed scenes of high dynamic range without colors using a completely different sensing mechanism and visual representation. We introduce a hybrid camera system composed of a spiking and an alternating-exposure RGB camera to capture HFR&HDR scenes with high fidelity. Our insight is to bring each camera's superiority into full play. The spike frames, with accurate fast motion information encoded, are firstly reconstructed for motion representation, from which the spike-based optical flows guide the recovery of missing temporal information for long-exposure RGB images while retaining their reliable color appearances. With the strong temporal constraint estimated from spike trains, both missing and distorted colors cross RGB frames are recovered to generate time-consistent and HFR color frames. We collect a new Spike-RGB dataset that contains 300 sequences of synthetic data and 20 groups of real-world data to demonstrate 1000 FPS HDR videos outperforming HDR video reconstruction methods and commercial high-speed cameras." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.765, + 0.21, + 0.781 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.791, + 0.47, + 0.853 + ], + "angle": 0, + "content": "The spiking camera [17] and event camera [10] are neuromorphic sensors working differently from conventional frame-based digital cameras, which have many attractive characteristics, e.g., high-speed (perceiving scene" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.321, + 0.891, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.502, + 0.894, + 0.586 + ], + "angle": 0, + "content": "Figure 1. (a) We build a spike-RGB hybrid camera system to achieve 1000 FPS HDR video reconstruction1. (b) The RGB camera uses alternating-exposure mode with a frame rate of 60 FPS, where \\( t_s \\), \\( 4t_s \\), and \\( 12t_s \\) are the short, middle, and long exposure in our setup, respectively. The sampling frequency of the spiking camera is \\( 20000\\mathrm{Hz} \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.893, + 0.81 + ], + "angle": 0, + "content": "radiance changes at the microsecond level), high dynamic range (HDR, \\(\\geq 100\\) dB). However, since they only record neuromorphic signals, i.e., spike trains [64] and event streams [25], which are less friendly to the human visual system and cannot be directly processed by CNN-based models for video frames [40, 41], preprocessing modules that convert neuromorphic signals into compatible formats are usually required when applying them to frame-based vision algorithms [61, 65]. In comparison with event streams, spike trains contain concrete textured information of scene radiances, which are more suitable for reconstructing high frame rate (HFR) videos [61-64]. However, since the spiking camera only encodes the absolute intensities of environments, colors are absent in the reconstructed video frames." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.894, + 0.873 + ], + "angle": 0, + "content": "When capturing with a frame-based RGB camera, quality of recorded colors for each frame is determined by trading off the exposure time, ambient light, and target objects' moving speed [57]. For high-speed dynamic scenes, it often" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.863, + 0.223, + 0.876 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_footnote", + "bbox": [ + 0.08, + 0.877, + 0.468, + 0.899 + ], + "angle": 0, + "content": "Project page: https://changyakun.github.io/1000FPS-HDR" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.863, + 0.468, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.887, + 0.783, + 0.902 + ], + "angle": 0, + "content": "The video result is available on our project page." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22180" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.56 + ], + "angle": 0, + "content": "requires to set shorter exposure time to guarantee a higher frame rate and avoid motion blur. In such a situation, since the exposure time is extremely short, the quality of video frames would be severely degenerated due to noise. Merging a burst of short-exposure images is a simple yet effective approach to reduce the noise level [8, 11], however, the color shift caused by noise is difficult to be corrected. Fusing alternating-exposure (using short, middle, and long exposures) RGB frames is commonly used for synthesizing well-exposed images [3, 19, 21]. However, they are not suitable for high-speed scenes. As illustrated in Fig. 1(b), given a sequence of alternating-exposure RGB images, the total time from the starting of the current exposure to the starting of the next frame, denoted by \\( T \\), is consistent for all frames, and it is composed of the exposure time \\( T_{\\mathrm{exp}} \\) and interval time \\( T_{\\mathrm{itv}} \\) (containing the readout and waiting time). It can be seen that the information during interval time is lost, and the frame rate they could achieve is thus limited to dozens of FPS. Another possible solution is to build a hybrid camera system to capture low frame rate (LFR) color sequence and high-speed neuromorphic signals simultaneously, then use the neuromorphic signals to interpolate [51, 52] and deblur [14, 18, 59] the RGB frames. However, the saturated regions are usually ignored, leaving the colors of the interpolated frames still unsatisfactory. HDR intensity map (does not contain any chromatic information) built from the neuromorphic signals can also be used to compensate the missing textures in the saturated regions [15]. But such an approach is not robust for scenes with large areas of saturated regions, due to the heavy reliance on the chrominance compensation network to hallucinate the color." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.561, + 0.473, + 0.848 + ], + "angle": 0, + "content": "In this paper, we propose an all-in-one framework to reconstruct HRF (Fig. 1(a), at the level of 1000 FPS) color videos with high fidelity from the spike trains and a series of alternating-exposure frames captured by a Spike-RGB hybrid camera system simultaneously (Fig. 1(b)). To make full use of the color information in RGB images, we propose a three-stage strategy to deal with different situations using specific modules: (i) For the blurry middle- and long-exposure images, we design a spike guided deblurring module to recover the corresponding sharp images with faithful colors; (ii) for missing colors during the interval time, we design a spike guided interpolation module that exploits the abundant motion information (SC-Flow [16]) obtained from spike trains; (iii) for suppressing noise in short-exposure images and maintaining temporal consistency, we design a merging module, which exploits the variant of recurrent U-Net [42] as its backbone, to complete the HFR&HDR color video reconstruction process. To summarize, this paper makes contributions by proposing:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.856, + 0.472, + 0.903 + ], + "angle": 0, + "content": "- an all-in-one framework to reconstruct high-speed HDR color video by jointly fusing spike trains and a sequence of alternating-exposure frames;" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.092, + 0.892, + 0.152 + ], + "angle": 0, + "content": "- a three-stage strategy fusing alternating exposures of RGB frames for the generation of well-exposure colors, via a recurrent convolution neural network for continuous frames interpolation guided by spike trains;" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.155, + 0.894, + 0.201 + ], + "angle": 0, + "content": "- a Spike-RGB hybrid camera system to demonstrate the applicability of the proposed method for capturing high-speed and high dynamic range scenes." + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.092, + 0.894, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.206, + 0.895, + 0.283 + ], + "angle": 0, + "content": "Experimental results show that the proposed method outperforms the state-of-the-art HDR video reconstruction method [3] and commercial cameras with the slow-motion photography capability in reconstructing 1000 FPS HDR color videos on synthetic data and real-world data." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.295, + 0.642, + 0.311 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.32, + 0.895, + 0.771 + ], + "angle": 0, + "content": "HDR image and video reconstruction. The most common way to reconstruct HDR images is to fuse a set of LDR images with bracketed exposures [7, 34]. Since the results for dynamic scenes often contain ghosting artifacts, image alignment [28, 45] and deep learning [20, 55] are employed to reconstruct sharp HDR images. To better reduce ghosting artifacts, Lee et al. [24] and Shaw et al. [46] apply the estimated motion information from a high frame rate sequence to facilitate the HDR image synthesis. Messikommer et al. [35] also achieve HDR reconstruction by combining bracketed-exposure RGB images and events. There are methods being designed for HDR reconstruction from a single image. These methods cannot recover the missing textures in clipped regions [9, 44]. Abhiram and Chan [1] reconstruct HDR images with a quanta image sensor (QIS). Han et al. [15] find that the reconstructed intensity maps from event streams and spike trains contain abundant textures saturated in LDR images. Therefore, they exploit intensity maps to guide HDR image restoration. For the capturing of HDR videos, many existing methods use specialized hardware, such as scanline exposure [13], per-pixel exposure [37], or multiple sensors [33, 50]. Due to the particularity of hardware, these methods are limited to narrow applications. Merging alternating-exposure image sequences is the most common yet effective way to reconstruct HDR videos [12, 19, 21, 22, 30, 31]. Recently, Chen et al. [3] propose a coarse-to-fine network that performs alignment and fusion sequentially both in the image and feature space. However, these methods can only deal with LFR videos with about 20-60 FPS." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.773, + 0.895, + 0.88 + ], + "angle": 0, + "content": "HFR video reconstruction. There is plenty of data redundancy in capturing HFR videos directly by commercial high-speed cameras, e.g., the Phatom camera². Building a hybrid system with a high-resolution LFR camera and a low-resolution HFR camera, and utilizing HFR signals to reconstruct a sequence of sharp images from blurred images [2, 49] is a more data-efficient way for HFR video" + }, + { + "type": "page_footnote", + "bbox": [ + 0.516, + 0.887, + 0.787, + 0.902 + ], + "angle": 0, + "content": "2https://www.phantomhighspeed.com/" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.958 + ], + "angle": 0, + "content": "22181" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.09, + 0.52, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.091, + 0.885, + 0.307 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.314, + 0.892, + 0.397 + ], + "angle": 0, + "content": "Figure 2. (a) The pipeline of the proposed solution. It contains three steps: Step \\(①\\) spike preprocessing (Sec. 3.2), Step \\(②\\) RGB frame processing (Sec. 3.3), and Step \\(③\\) merging into HFR video (Sec. 3.4). Given the spike trains, we firstly estimate the optical flow from them as well as reconstruct spike frames. Secondly, we rectify the uneven brightness with a linear mapping function and use spike-guided deblurring (SG-deblur) to reconstruct sharp color frames. Finally, we use spike-guided frame interpolation (SG-interpolation) to recover the missing colors during \\(T_{\\mathrm{itv}}\\), and reconstruct time-consistent color frames. (b) and (c) show the detailed pipeline of SG-deblur and SG-interpolation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.41, + 0.473, + 0.727 + ], + "angle": 0, + "content": "reconstruction. Li et al. [26] use a stereo pair of low-resolution HFR and high-resolution LFR cameras to calculate the fast motion and the depth map. Avinash et al. [38] compute optical flows between two existing frames by utilizing the content of auxiliary HFR videos. Jiang et al. [18] recover a sharp video sequence from a motion-blurred image by integrating the visual and temporal knowledge that is contained in the events. Xu et al. [54] achieve real-world event-based deblurring with a self-supervised learning method. Tulyakov et al. [52] propose the Time Lens that utilizes high-speed events to achieve video frame interpolation (VFI). Following that, Time Lens++ [51] further improves the performance. For the reason that real data are absent, Yu et al. [56] propose a weakly supervised method with the help of subpixel attention learning. Although the event-based interpolation realizes HFR video reconstruction [51, 52], the recovered quality of colors is usually unsatisfactory due to that single exposure cannot balance artifacts from noise and blur, we therefore propose to jointly fuse the high-speed spike signals and alternating-exposure RGB frames to achieve high-quality reconstruction." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.74, + 0.185, + 0.757 + ], + "angle": 0, + "content": "3. Approach" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.765, + 0.187, + 0.779 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.787, + 0.47, + 0.879 + ], + "angle": 0, + "content": "Our goal is to reconstruct HFR&HDR videos from the binary spike trains \\(\\mathbb{S}(x,y) = \\{s(x,y,t)\\} (s(x,y,t) = 1\\) if the accumulated photons reach a certain threshold, then the accumulator is reset and \\(s(x,y,t) = 0\\) before the next spike is fired [17]) and LFR alternating-exposure RGB frames \\(\\mathbb{B} = \\{\\mathbf{B}_k\\} ^3\\), where \\((x,y)\\) denote the coordinates of spikes, \\(t\\)" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.41, + 0.891, + 0.455 + ], + "angle": 0, + "content": "denotes the timestamp, and \\( k \\) denotes the index of an RGB image in the sequence. As shown in Fig. 2(a), to achieve this goal, we design a pipeline that consists of three steps:" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.455, + 0.891, + 0.486 + ], + "angle": 0, + "content": "Step ①: Spike preprocessing (Sec. 3.2). We estimate the optical flow \\(\\mathbf{F}_i\\) and spike frames \\(\\mathbf{I}_i\\) from the spike trains:" + }, + { + "type": "equation", + "bbox": [ + 0.577, + 0.497, + 0.891, + 0.514 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {i} (x, y) = \\mathcal {S C} \\left(s \\left(x, y, t _ {i} \\rightarrow t _ {i + 1}\\right)\\right), \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.585, + 0.524, + 0.891, + 0.561 + ], + "angle": 0, + "content": "\\[\n\\mathbf {I} _ {i} (x, y) = \\int_ {t _ {i} t _ {f} / 2} ^ {t _ {i} + t _ {f} / 2} s (x, y, t) d t, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.568, + 0.892, + 0.628 + ], + "angle": 0, + "content": "where \\(\\mathcal{SC}(\\cdot)\\) denotes optical flow estimation with Hu et al.'s [16] method, \\(i\\) and \\(t_i\\) denote the index and timestamp of spike frames, and \\(t_f\\) is the time window. In Sec. 3.2, we further super-resolve \\(\\mathbf{I}_i\\) at the feature space." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.893, + 0.854 + ], + "angle": 0, + "content": "Step ②: RGB frame preprocessing (Sec. 3.3). For the 60 FPS RGB images captured with alternating exposures, i.e., \\( t_s, 4t_s \\), and \\( 12t_s \\), we firstly unify the uneven brightness with a linear mapping function. Then we conduct motion deblurring for \\( 4t_s \\) and \\( 12t_s \\) images. For the \\( t_s \\) images, when \\( t_s \\) is sufficiently short, i.e., 1 ms, we assume the short-exposure image is free from motion blur, and take \\( t_s \\) as the reference time for the motion deblurring. Consequently, we can recover 4 and 12 sharp images from \\( 4t_s \\) and \\( 12t_s \\) images, respectively. As shown in Fig. 2(b), we use \\( \\mathbf{B}^l \\) to denote a blurry image, and the motion deblurring operation can be formulated as: \\( \\{\\mathbf{B}_j^l\\} = \\mathcal{R}(\\mathbf{B}^l, \\{\\mathbf{I}_j | j \\in \\mathcal{N}_l\\}, \\mathbf{B}^s) \\), where \\( j \\) is the index of a recovered sharp image, \\( \\mathcal{R}(\\cdot) \\) is sharp image reconstruction, \\( \\{\\mathbf{I}_j | j \\in \\mathcal{N}_l\\} \\) is the corresponding spike frames, and \\( \\mathbf{B}^s \\) is the nearest short-exposure RGB frame." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Step ③: Merging into HFR video (Sec. 3.4). Following Step ②, for the interval time \\((T_{\\mathrm{itv}})\\) that colors are not recorded, we bidirectionally query two nearest sharp RGB" + }, + { + "type": "page_footnote", + "bbox": [ + 0.095, + 0.887, + 0.348, + 0.901 + ], + "angle": 0, + "content": "3In this paper, we use \\(\\{\\cdot\\}\\) to denote collections." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22182" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.09, + 0.133, + 0.101 + ], + "angle": 0, + "content": "warping" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.101, + 0.155, + 0.147 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.159, + 0.091, + 0.234, + 0.147 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.236, + 0.091, + 0.312, + 0.147 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.091, + 0.391, + 0.147 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.091, + 0.469, + 0.147 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.078, + 0.148, + 0.47, + 0.174 + ], + "angle": 0, + "content": "Figure 3. For the sake of increasing spatial resolution, we adopt flow-based warping to merge adjacent 5 spike frames." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.181, + 0.47, + 0.304 + ], + "angle": 0, + "content": "images \\(\\{\\mathbf{B}_i^+, \\mathbf{B}_i\\}\\) for each spike frame \\(\\mathbf{I}_i\\), and get the warped images \\(\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}\\) with optical flow, where \\(+\\) and \\(-\\) denote the forward and backward warping, respectively. In Fig. 2(c), we provide an illustration of the interpolation procedure. Finally, as shown in Fig. 4, we reconstruct time-consistent color frames, and each frame \\(\\mathbf{C}_i\\) is generated by merging the spike frame \\(\\mathbf{I}_i\\) with \\(\\{\\mathbf{C}_i\\}_{1}, \\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}\\) with the strong constraint of optical flow." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.314, + 0.268, + 0.33 + ], + "angle": 0, + "content": "3.2. Spike preprocessing" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.338, + 0.471, + 0.716 + ], + "angle": 0, + "content": "The optical flow estimation and spike frame reconstruction using in Eqn. (1) and Eqn. (2) are theoretically, yet the reconstructed frames practically have two issues: Since the integration time \\( t_f \\) is very short, noise is relatively strong; the spatial resolution of the first generation spiking camera (VidarOne [17]) is much lower than the RGB camera. To reduce the noise and increase the spatial resolution, inspired by the burst-based super-resolution [4] and denoising [27] for conventional RGB images, it is feasible to merge a group of adjacent spike frames with the help of spatial alignment. Moreover, thanks to the continuous motion recording capability of spiking cameras, the optical flow [16] estimated from spike trains makes the alignment even more stable than RGB images. As illustrated in Fig. 3, we design a computationally efficient module for spike frames, which is formulated as: \\( \\hat{\\mathbf{I}}_i = \\{\\mathcal{W}_{\\mathbf{F}_{j\\to i}}(\\mathbf{I}_j)|j\\in \\mathcal{N}_i\\} \\), where \\( \\mathcal{W}_{\\mathcal{F}_{j\\to i}}(\\cdot) \\) denotes the flow-based warping operation, \\( \\mathcal{N}_i \\) denotes a collection of adjacent frames. Then, we feed \\( \\hat{\\mathbf{I}}_i \\) to a set of convolutional layers, and we use PixelShuffle [47] to increase the spatial resolution while decreasing the channel of features. It should be noted that the method for spike frame reconstruction is not unique, which means users can choose other learning-based methods [61, 62, 64]. However, those deep learning models are relatively heavy, and less efficient as a submodule fitting to our pipeline." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.727, + 0.315, + 0.743 + ], + "angle": 0, + "content": "3.3. RGB image preprocessing" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.75, + 0.469, + 0.901 + ], + "angle": 0, + "content": "RGB linear mapping. Following previous methods for HDR video reconstruction [3, 19, 21], we first unify the brightness of alternating-exposure RGB frames. Since we use an industrial camera (details in Sec. 3.5) that can acquire data without a nonlinear radiometric response function, the linearity of the captured frames is maintained. We find that the brightness of the frames can maintain a linear relationship with the duration of exposure time. Hence we use the global linear mapping to unify the frame brightness: \\(\\alpha \\cdot \\mathbf{B}_k(x,y)\\rightarrow \\mathbf{B}_k(x,y)\\), where \\(\\alpha\\) denotes a linear scalar." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.305 + ], + "angle": 0, + "content": "Spike-guided deblurring. The physical model of the blurring process can be simply formulated as the average of a group of sharp images, i.e., \\(\\mathbf{B}^l (x,y) = \\frac{1}{N}\\sum_{j = 1}^{N}\\mathbf{B}_j^l (x,y)\\), where \\(N\\) denotes the number of sharp images. However, due to the limited dynamic range of the RGB camera, that simplified equation does not hold in the clipped regions of real-world long-exposure frames. In general we should have: \\(\\mathbf{B}^l (x,y)\\leq \\frac{1}{N}\\sum_{j = 1}^{N}\\mathbf{B}_j^l (x,y)\\). Therefore, for reconstructing a sequence of sharp HDR images from \\(\\mathbf{B}^l\\), we divide it into two sub-tasks: (i) For the well-exposure regions, we use the sharp spike frames to guide motion deblurring; (ii) for the clipped regions where colors are lost, we compensate them with well-retained colors extracted from the adjacent short-exposure image \\(\\mathbf{B}^s\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.306, + 0.893, + 0.608 + ], + "angle": 0, + "content": "Figure 2(b) shows the spike-guided deblurring (SG-deblur) from \\(\\mathbf{B}_l\\) (\\(\\mathbf{B}_l\\) may be a middle- or long-exposure image). Similar to Xu et al. [54] that exploit event frames to motion deblurring, we first concatenate \\(\\mathbf{B}_l\\) with \\(\\{\\mathbf{I}_l^j\\}\\), then extract shallow features and increase feature channels with PixelShuffle [47], which is followed by a set of residual dense blocks (RDBs) [60] and a decoder. To make the colors in over-exposure regions be compensated by the adjacent short-exposure RGB image \\(\\mathbf{B}_j^s\\), we warp the short-exposure image with the optical flow estimated from spike trains: \\(\\mathbf{B}_j^s = \\mathcal{W}_{\\mathbf{F}_{s\\rightarrow j}}(\\mathbf{B}^s)\\), where \\(\\mathcal{W}_{\\mathbf{F}_{s\\rightarrow j}}(\\cdot)\\) denotes the warping operation from timestamp \\(t_s\\) to the timestamp of \\(t_j\\). Subsequently, we extract features from \\(\\{\\mathbf{B}_l^{s\\rightarrow j}\\}\\) and add residual links between them and the decoder. Finally, we obtain a sequence of sharp color images. Note that the SG-deblur for the middle- and long-exposure RGB images share the same architecture while the parameters are not shareable. SG-deblur outputs four images for both \\(4t_s\\) and \\(12t_s\\) frames. For the case of \\(12t_s\\) frame, we interpolate the 4 frames to 12 frames with flow-based warping." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.609, + 0.893, + 0.851 + ], + "angle": 0, + "content": "Next, we briefly explain the reason why this event-based model [54] can be applied to a spike-based task. Both event streams and spike trains with the high-speed property have been used for motion deblurring and latent frame reconstruction [14,18,54]. It is necessary to convert them to event frames and spike frames, both of which belong to the category of 2D images. But event frames and spike frames have different physical meanings: Pixel values in an event frame reveal the residual (relatively sparse information) between two adjacent frames, while pixel values in a spike frame represent exactly the texture (relatively dense information) of the corresponding frame. Since both event frames and spike frames are 2D images and the spike frames have denser texture information, we can replace event frames in such a model with spike frames, so as to make the solution to the problem more well-posed." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.862, + 0.725, + 0.878 + ], + "angle": 0, + "content": "3.4. Merging into HFR video" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.886, + 0.892, + 0.901 + ], + "angle": 0, + "content": "RGB interpolation. Given each middle- and long-exposure" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22183" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.089, + 0.891, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.318, + 0.893, + 0.361 + ], + "angle": 0, + "content": "Figure 4. Network architecture of the CNN-RNN-based merging module for reconstructing HFR&HDR videos from alternating-exposure RGB frames and HFR spike frames. This module outputs HDR color frames in a step-wise manner. We unroll the module for \\(M\\) steps during training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.366, + 0.473, + 0.599 + ], + "angle": 0, + "content": "frame, SG-deblur recovers 4 and 12 images. Therefore, the recovered RGB frames have a frame rate of \\(340^{4}\\) FPS. But temporal distribution of them is quite uneven, e.g., there is no recovered color frame interval time \\(T_{\\mathrm{itv}}\\). Fortunately, the spike train contains continuous and dense texture information in the temporal domain. In Step ③, we use the SG-interpolation module to interpolate RGB frames into a sequence of uniformly distributed images. For each spike frame \\(\\mathbf{I}_i\\), we bidirectionally query its two nearest recovered RGB frames \\(\\{\\mathbf{B}_i^+, \\mathbf{B}_i\\}\\) and interpolate two color frames \\(\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}\\) with the optical flow estimated from spike trains. When \\(\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}\\) are fed into our merging module, they are weighted by a linear coefficient \\((\\oplus\\) in Fig. 4) related to the distance between \\(t_i\\) and \\(\\{t_+, t\\}\\), where \\(\\{t_+, t\\}\\) denote the timestamp of \\(\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.47, + 0.796 + ], + "angle": 0, + "content": "Merging module. The aforementioned modules reconstruct coarse HFR video frames, which need to be refined for smoothing over time. We build a CNN-RNN-based HFR&HDR video reconstruction network to merge the spike frames and RGB frames, which is shown in Fig. 4. The merging module consists of three encoders, i.e., \\(\\mathcal{E}_I\\), \\(\\mathcal{E}_B\\), and \\(\\mathcal{E}_C\\), which are respectively designed for feature extraction from the current spike frame \\(\\hat{\\mathbf{I}}_i\\), the interpolated RGB images \\(\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}\\), and the previously reconstructed image \\(\\mathbf{C}_{i-1}\\). In \\(\\mathcal{E}_I\\), we use PixelShuffle [47] to make the spatial resolution of spike features consistent with RGB features. The extracted features are denoted as \\(\\mathbf{E}_I\\), \\(\\{\\mathbf{E}_B, \\mathbf{E}_{B+}\\}\\), and \\(\\mathbf{E}_{C_i-1}\\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.47, + 0.873 + ], + "angle": 0, + "content": "Considering the spike frames and RGB frames may not be perfectly aligned at pixel level for real-world data, we add deformable convolution layers [6] to improve the robustness to this issue. In order to output flicker-free color frames, we adopt two constraints in the merging module:" + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.364, + 0.892, + 0.391 + ], + "angle": 0, + "content": "Table 1. Details of the composition of the dataset (res. is the abbreviation of resolution)." + }, + { + "type": "table", + "bbox": [ + 0.522, + 0.397, + 0.871, + 0.456 + ], + "angle": 0, + "content": "
dataRGB res.spike res.train/testtime
full-synthetic500×800250×40080/200.1s
real-synthetic600×800250×400160/400.101s
real-world484×784242×392-/200.101s
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.459, + 0.892, + 0.567 + ], + "angle": 0, + "content": "(i) We add three ConvLSTM layers [48] to feed previous states forward in temporal domain; (ii) we feed \\(\\mathbf{E}_{C_i}\\) into the current step and align it with the current features with flow-based warping. We then use a decoder to reversely map deep features to the current output HDR frame \\(\\mathbf{C}_i\\). We achieve the multi-module signal fusion by adding concatenation links between \\(\\{\\mathbf{E}_{C_i}\\), \\(\\mathbf{E}_B\\), \\(\\mathbf{E}_{B+}\\}\\) and the decoder." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.575, + 0.719, + 0.591 + ], + "angle": 0, + "content": "3.5. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.892, + 0.689 + ], + "angle": 0, + "content": "Due to the setting of our method being different from existing HDR and video frame interpolation methods, there are no suitable datasets for training and testing our method. Therefore, we collect a new one with three components, whose details are summarized in Table 1 and sample images are provided in Fig. 5." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.893, + 0.856 + ], + "angle": 0, + "content": "Part 1: Full-synthetic data. This part of data is obtained by using the spike simulator proposed by Hu et al. [16]. We render 2000 RGB images with their computer graphics based solution as ground truth and generate 2000 spike planes (0.1 s). Since the photons arriving at the sensor follow Poisson probability distribution [43], we synthesize alternating-exposure 60 FPS RGB frames with a Poisson noise model. For the full synthetic data, we randomly select starting time of each group of training data. We randomly shift the RGB frames within 3 pixels to make the trained model more robust to the misalignment in real-world data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Part 2: Real-synthetic data. To reduce the domain gap between full-synthetic data and real-world data, we design a method to collect real-synthetic (the scenes are real while" + }, + { + "type": "page_footnote", + "bbox": [ + 0.095, + 0.887, + 0.316, + 0.9 + ], + "angle": 0, + "content": "4From \\(60 = 20\\times 3\\) to \\(340 = 20\\times (1 + 4 + 12)\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "22184" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.468, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.23, + 0.47, + 0.272 + ], + "angle": 0, + "content": "Figure 5. Example frames from the proposed dataset. Each group shows three alternating-exposure RGB frames (left, from top to bottom rows) and the corresponding spike signals (right)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.283, + 0.47, + 0.405 + ], + "angle": 0, + "content": "the spike trains are synthetic) data, and we use this part of data to fine-tune our model. The RGB frames are captured with an alternating-exposure mode in slow-motion scenes. Then we synthesize blurry middle-exposure RGB frames by averaging 4 adjacent middle-exposure RGB images, and blurry long-exposure RGB frames are synthesized in a similar way. We synthesize spike trains from ground truth RGB frames with the integrate-and-fire methodology [61]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.405, + 0.47, + 0.51 + ], + "angle": 0, + "content": "Part 3: Real-world data. We build a Spike-RGB hybrid camera (Fig. 6) to capture real-world data. The system is composed of an industrial camera (Basler acA800-510uc\\(^5\\)) with alternating exposure capability and a spiking camera [17]. There is a beam splitter in front of the two sensors. We conduct geometric calibration and time synchronization to align bimodal signals collected by them." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.51, + 0.47, + 0.707 + ], + "angle": 0, + "content": "Loss and training. The SG-deblur module and the merging module reconstruct images in the linear luminance domain, which covers a high dynamic range of pixel values. Following existing methods for HDR reconstruction, for the output images \\(\\mathbf{C}\\), we compress the range of pixel values by applying the following function proposed by Kalantari et al. [20]: \\(\\mathcal{T}(\\mathbf{C}) = \\log (1 + \\mu \\mathbf{C}) / \\log (1 + \\mu)\\), where \\(\\mathcal{T}(\\cdot)\\) denotes the tone mapping operation and \\(\\mu\\) denotes the amount of compression. For these two modules, we employ widely used \\(l_{1}\\) loss, Structure similarity (SSIM) loss [53], and Learned Perceptual Image Patch Similarity (LPIPS) loss [58]. The total loss at step \\(i\\) for both the motion deblurring and merging modules is" + }, + { + "type": "equation", + "bbox": [ + 0.098, + 0.718, + 0.469, + 0.736 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} (i) = \\mathcal {L} _ {l _ {1}} (i) + \\beta_ {1} \\mathcal {L} _ {\\text {S S I M}} (i) + \\beta_ {2} \\mathcal {L} _ {\\text {L P I P S}} (i), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.746, + 0.47, + 0.868 + ], + "angle": 0, + "content": "where \\(\\beta_{1} = 1\\) and \\(\\beta_{2} = 1\\). For spike-based optical flow estimation using [16], we fine-tune the parameters with full-synthetic data. During training, we resize the RGB images and spike frames to \\(512 \\times 800\\) and \\(256 \\times 400\\). We implement our model with PyTorch, set the batch size to 4, and use ADAM optimizer during the training process. We first train the model on full-synthetic data. The SG-deblur module is trained with 50 epochs, before training the merging" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.088, + 0.892, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.233, + 0.892, + 0.261 + ], + "angle": 0, + "content": "Figure 6. The prototype of our Spike-RGB imaging system composed of a spiking camera and an RGB camera." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.273, + 0.892, + 0.44 + ], + "angle": 0, + "content": "module. We unroll the merging module for \\(M\\) steps, and we find \\(M = 4\\) achieves a suitable balance between training time and recovery quality. The total loss for the unrolled \\(M\\) steps is \\(\\mathcal{L}_{\\mathrm{merge}} = \\sum_{i=1}^{M} \\mathcal{L}_{\\mathrm{total}}^{\\mathrm{M}}(i)\\), where \\(\\mathcal{L}_{\\mathrm{total}}^{\\mathrm{M}}(i)\\) denotes the total loss for the merging module at step \\(i\\). The initial learning rate for both two modules is 0.001, we decay it to \\(10^{-6}\\) with a linear strategy. For the real-synthetic data, we fine-tune another group of parameters to reduce the gap between synthetic data and real-world data. We use one NVIDIA Tesla A100 for training, and the training procedure consumes about 30 hours." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.454, + 0.634, + 0.471 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.479, + 0.884, + 0.495 + ], + "angle": 0, + "content": "4.1. Quantitative Evaluation using Synthetic Data" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.503, + 0.892, + 0.608 + ], + "angle": 0, + "content": "Validation on full-synthetic data. Figure 8 shows a group of results on full-synthetic data. We can see that both the flying objects in the short-exposure image and the oversaturated clouds (see the regions marked by boxes) in the long-exposure image are recovered successfully. The results with rich textures and consistent colors show the feasibility of our proposed method." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.609, + 0.892, + 0.714 + ], + "angle": 0, + "content": "Evaluation on real-synthetic data. To the best of our knowledge, the proposed method is the first framework to reconstruct HFR&HDR videos with the combination of spike trains and alternating-exposure RGB frames. Therefore, it is unfair to compare our method with existing ones, i.e., Kalantari13 [21], Kalantari19 [19], and Chen21 \\([3]^{6}\\), which are designed for low frame rate HDR videos." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.714, + 0.892, + 0.865 + ], + "angle": 0, + "content": "We choose a state-of-the-art HDR video reconstruction method Chen21 [3], which also uses alternating-exposure RGB frames (the closest setup to ours) as a reference. Figure 7 shows the reconstruction results on real-synthetic data of the proposed method and Chen21 [3]. Thanks to the complementary motion information provided by spike trains, the abundant color extracted from alternating-exposure RGB frames, and the accurate textures contained in spike frames, the proposed method is capable of reconstructing rich texture details with less motion blur. For ex" + }, + { + "type": "page_footnote", + "bbox": [ + 0.078, + 0.875, + 0.468, + 0.9 + ], + "angle": 0, + "content": "5https://www.baslerweb.com/en/products/camera/ area-scan-cameras/ace/aca800-510uc/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.875, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In this section, we use \"Last name of the first author+year\" as synonyms of methods for comparison." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "22185" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.124, + 0.09, + 0.157, + 0.1 + ], + "angle": 0, + "content": "short" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.102, + 0.209, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.257, + 0.09, + 0.299, + 0.1 + ], + "angle": 0, + "content": "middle" + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.102, + 0.345, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.401, + 0.09, + 0.428, + 0.101 + ], + "angle": 0, + "content": "long" + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.102, + 0.478, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.278, + 0.273, + 0.298, + 0.285 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image_caption", + "bbox": [ + 0.534, + 0.091, + 0.566, + 0.1 + ], + "angle": 0, + "content": "short" + }, + { + "type": "image", + "bbox": [ + 0.492, + 0.102, + 0.62, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.666, + 0.091, + 0.708, + 0.101 + ], + "angle": 0, + "content": "middle" + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.102, + 0.755, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.811, + 0.091, + 0.838, + 0.102 + ], + "angle": 0, + "content": "long" + }, + { + "type": "image", + "bbox": [ + 0.761, + 0.102, + 0.889, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.688, + 0.272, + 0.708, + 0.285 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.285, + 0.892, + 0.326 + ], + "angle": 0, + "content": "Figure 7. Visual equality comparison of real-synthetic data between the proposed method and the state-of-the-art HDR video reconstruction method: Chen 21 [3]. We present two sets of results in (a) and (b). Please zoom-in electronic versions for better details, and watch the HFR videos on the project page." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.336, + 0.468, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.147, + 0.501, + 0.398, + 0.514 + ], + "angle": 0, + "content": "Figure 8. Validation on the synthetic data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.534, + 0.469, + 0.669 + ], + "angle": 0, + "content": "ample, in the long-exposure frame in the first row of (a), the building marked by a yellow box suffers from severe motion blur and overexposure. Chen21 [3] partially recovers the colors of this building, but it fails to remove the blurry artifacts. In the results generated by our method, the edges are sharp and the colors are vivid. In Fig. 7(b), the motions across RGB frames have a very large span, Chen21 [3] can only recover the corresponding LFR videos, while our method can reconstruct an HFR video with smooth motion." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We evaluate the reconstructed HDR in terms of PSNR, SSIM, HDR-VDP-2 [32], and HDR-VQM [36]. Table 2 clearly shows that our framework outperforms the state-of-the-art method [3] in all the metrics on the real-synthetic data in the condition of 60 FPS. And we achieve excellent performance in the condition of 1000 FPS. We designed ablation experiments and used them to demonstrate the effectiveness of the modules in our framework. For \"w/o I\", we simply stack the spike trains with a time window, and upsample them using bilinear interpolation; for \"w/o PS\", we replace PixelShuffle with a convolutional layer. The two groups of experiments verify the effectiveness of spike frame preprocessing in Step ①. For \"w/o F1\" and \"w/o F2\", we remove the flow-based interpolation in the deblurring module and the merging module. The two groups of ex" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.336, + 0.892, + 0.39 + ], + "angle": 0, + "content": "Table 2. Quantitative results and ablation study on our realistic synthetic data. We sample 60 FPS videos from our results for the comparison with Chen21 [3]. \\(\\uparrow (\\downarrow)\\) indicates larger (smaller) values are better." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.394, + 0.892, + 0.564 + ], + "angle": 0, + "content": "
Comparison with the state-of-th-art method
MethodPSNR↑SSIM↑HDR-VDP2↑HDR-VQM↓FPS
Chen21 [3]18.460.69727.340.53660
Ours30.140.92160.140.093
Chen21 [3]////1000
Ours24.380.90347.790.120
Ablation study
w/o I23.150.88646.030.1431000
w/o PS23.980.88146.470.141
w/o F119.760.72338.950.314
w/o F218.040.71635.890.356
w/ t-loss22.410.86443.640.142
w/o DeConv24.310.89747.660.127
w/o DM19.010.71437.970.338
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.582, + 0.892, + 0.778 + ], + "angle": 0, + "content": "periments verify the effectiveness of SC-Flow [16] based interpolation in Steps ② and ③. To further verify the effectiveness of deblurring module, we completely remove it in \"w/o DM\". For \"w/o DeConv\", we replace the deformable convolutional layers with traditional convolution layers. For \"w/ t-loss\", we remove the warping operation on \\(\\mathbf{C}_{i-1}\\) and add the temporal consistent loss that is estimated by a pretrained optical flow model [23], which is widely used in video processing [5, 39]. Since the \\(\\mathbf{C}_{i-1}\\) is warped by accurate optical flow \\(\\mathbf{F}_{i-1}\\) and merged into the current step \\(i\\), our method fundamentally has a strong temporal consistent constraint for video processing. Thus, our merging module does not need this loss during training." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.836, + 0.803 + ], + "angle": 0, + "content": "4.2. Qualitative Evaluation using Real Data" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In order to demonstrate the effectiveness of the proposed framework on real-world scenes, we collect 20 sets of real-world data, which are captured by our hybrid camera system shown in Fig. 6. We have compared our slow-motion capability with that of the commercial cameras. As shown in Fig. 9(a), the electric fan is moving at about 40 rounds" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22186" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.194, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.09, + 0.306, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.31, + 0.09, + 0.419, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.42, + 0.091, + 0.549, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.294, + 0.221, + 0.317, + 0.234 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.554, + 0.091, + 0.667, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.091, + 0.89, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.726, + 0.221, + 0.748, + 0.234 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.236, + 0.893, + 0.278 + ], + "angle": 0, + "content": "Figure 9. Visual quality comparison of real-world data between the proposed method and commercial cameras with the slow-motion capability. In (a), we show two adjacent frames for the video captured by smartphones that have slow-motion capability. The commercial cameras are not calibrated so their results are not strictly aligned with ours. (b) is the comparison with Phantom camera set to 1000 FPS." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.289, + 0.47, + 0.488 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.489, + 0.47, + 0.531 + ], + "angle": 0, + "content": "Figure 10. Qualitative visualization of our method in a super fast scene: a balloon bursting. We select 38 frames from our results for showing." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.548, + 0.47, + 0.76 + ], + "angle": 0, + "content": "per second. The short-exposure image is severely underexposed with less blurry artifacts, and the middle- and long-exposure images have severe blurring and oversaturated artifacts. With the accurate motion and texture information captured by the spiking camera, we have recovered temporally smooth video sequences. Four recovered images are shown for the middle- and long-exposure images. For the videos captured by iPhone 13 and Mi 10, the motions between frames are not continuous. And the electric fan captured by Mi 10 is deformed due to the rolling shutter. In Fig. 9(b), we compare our method with the Phantom7 camera set to 1000 FPS. Since the exposure time of the Phantom camera is extremely short, it fails to capture regions where scene radiance is weak." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.775, + 0.196, + 0.79 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.801, + 0.469, + 0.876 + ], + "angle": 0, + "content": "We propose an HFR&HDR video reconstruction method with a hybrid camera that is composed of an alternating-exposure RGB sensor and a spiking sensor. Extensive experiments on synthetic and real-world data demonstrate the superior performance of the proposed method." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.29, + 0.892, + 0.486 + ], + "angle": 0, + "content": "Discussion. (i) For super fast scenes, e.g., a balloon bursting, it is difficult to capture clear motions with a conventional RGB camera at 60 FPS. Therefore, the well-exposed color of the bursting balloon is not captured with the short exposure, which brings challenges to our reconstruction of accurate color. In our results, although the colors are somewhat distorted, we can still recover a smooth video sequence. Once the frame rate of the RGB camera is increased, e.g., 120 FPS, temporally smoother video with more accurate color is expected to be more reliably recovered. (ii) Since QIS [1, 29] share the same imaging model with the spiking camera, our method is ready to be applied to it. We show the simulation in supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.487, + 0.893, + 0.802 + ], + "angle": 0, + "content": "Limitation and future work. Beam splitter is arguable for making a practical system on mobile devices. But when compact design is not a hard constraint, beam splitter has unique advantages in spatial alignment, that is why it is broadly adopted in building a hybrid prototype for HDR [15, 24, 33, 50]. Side-by-side arrangement with parallax unavoidably introduces occlusions and alignment issues, which is a promising direction to explore for our future work. Due to the low spatial resolution \\((250\\times 400)\\) of the current model we use is, we have to super-resolve the spike frames in feature space. If higher-resolution spike signals can be directly obtained, our method can achieve better visual quality. Besides, there is a domain gap between synthetic spike trains and real-captured spike trains since the noise of the spiking camera is more complex than the simulator. For time complexity, our approach is better suited as a post-processing module. The number of parameters is \\(45.7\\mathrm{M}\\) and the time cost per frame is 0.371s with a single NVIDIA GeForce RTX 3090 graphics card. We hope to tackle these issues in the future work and achieve higher frame rate reconstruction." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.812, + 0.66, + 0.827 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.826, + 0.892, + 0.9 + ], + "angle": 0, + "content": "This work was supported by National Key R&D Program of China (2021ZD0109803), National Natural Science Foundation of China under Grant No. 62088102, 62136001. Yakun Chang was also supported by China Postdoctoral Science Foundation (8206300710)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.446, + 0.9 + ], + "angle": 0, + "content": "7Refer to footnote 2. Camera model: VEO 640, F/1.8, 85mm lens." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22187" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Gnanasambandam Abhiram and Chan Stanley H. HDR imaging with quanta image sensors: Theoretical limits and optimal reconstruction. IEEE Transactions on Computational Imaging, 6:1571-1585, 2020. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.471, + 0.214 + ], + "angle": 0, + "content": "[2] Moshe Ben-Ezra and Shree K Nayar. Motion deblurring using hybrid imaging. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.216, + 0.471, + 0.285 + ], + "angle": 0, + "content": "[3] Guanying Chen, Chaofeng Chen, Shi Guo, Zhetong Liang, Kwan-Yee K Wong, and Lei Zhang. HDR video reconstruction: A coarse-to-fine network and a real-world benchmark dataset. In Proc. of International Conference on Computer Vision, pages 2502-2511, 2021. 2, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.287, + 0.471, + 0.343 + ], + "angle": 0, + "content": "[4] Wooyeong Cho, Sanghyeok Son, and Dae-Shik Kim. Weighted multi-kernel prediction network for burst image super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 404-413, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.344, + 0.471, + 0.385 + ], + "angle": 0, + "content": "[5] Jonghyun Choi, Kuk-Jin Yoon, et al. Learning to super resolve intensity images from events. In Proc. of Computer Vision and Pattern Recognition, pages 2768-2776, 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.387, + 0.471, + 0.442 + ], + "angle": 0, + "content": "[6] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Proc. of International Conference on Computer Vision, pages 764-773, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.444, + 0.472, + 0.486 + ], + "angle": 0, + "content": "[7] Paul E Debevec and Jitendra Malik. Recovering high dynamic range radiance maps from photographs. In Proc. of ACM SIGGRAPH, pages 1-10. 2008. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.488, + 0.471, + 0.543 + ], + "angle": 0, + "content": "[8] Akshay Dudhane, Syed Waqas Zamir, Salman Khan, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Burst image restoration and enhancement. In Proc. of Computer Vision and Pattern Recognition, pages 5759-5768, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.545, + 0.471, + 0.599 + ], + "angle": 0, + "content": "[9] Gabriel Eilertsen, Joel Kronander, Gyorgy Denes, Rafat K Mantiuk, and Jonas Unger. HDR image reconstruction from a single exposure using deep cnns. ACM Transactions on Graphics, 36(6):1-15, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.602, + 0.471, + 0.671 + ], + "angle": 0, + "content": "[10] Guillermo Gallego, Tobi Delbrück, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew J Davison, Jörg Conradt, Kostas Daniilidis, et al. Event-based vision: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(1):154-180, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.673, + 0.471, + 0.714 + ], + "angle": 0, + "content": "[11] Clément Godard, Kevin Matzen, and Matt Uytendaele. Deep burst denoising. In Proc. of European Conference on Computer Vision, pages 538-554, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.716, + 0.471, + 0.771 + ], + "angle": 0, + "content": "[12] Yulia Gryaditskaya, Tania Pouli, Erik Reinhard, Karol Myszkowski, and Hans-Peter Seidel. Motion aware exposure bracketing for HDR video. In Computer Graphics Forum, volume 34, pages 119-130. Wiley Online Library, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.773, + 0.471, + 0.814 + ], + "angle": 0, + "content": "[13] Saghi Hajisharif, Joel Kronander, and Jonas Unger. Adaptive dualiso HDR reconstruction. EURASIP Journal on Image and Video Processing, 2015(1):1-13, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.816, + 0.471, + 0.871 + ], + "angle": 0, + "content": "[14] Jin Han, Yixin Yang, Chu Zhou, Chao Xu, and Boxin Shi. Evintsr-net: Event guided multiple latent frames reconstruction and super-resolution. In Proc. of International Conference on Computer Vision, pages 4882-4891, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.873, + 0.471, + 0.901 + ], + "angle": 0, + "content": "[15] Jin Han, Chu Zhou, Peiqi Duan, Yehui Tang, Chang Xu, Chao Xu, Tiejun Huang, and Boxin Shi. Neuromorphic cam-" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.116, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "era guided high dynamic range imaging. In Proc. of Computer Vision and Pattern Recognition, pages 1730-1739, 2020. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.137, + 0.892, + 0.192 + ], + "angle": 0, + "content": "[16] Liwen Hu, Rui Zhao, Ziluo Ding, Lei Ma, Boxin Shi, Ruiqin Xiong, and Tiejun Huang. Optical flow estimation for spiking camera. In Proc. of Computer Vision and Pattern Recognition, pages 17844-17853, 2022. 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.194, + 0.892, + 0.25 + ], + "angle": 0, + "content": "[17] Tiejun Huang, Yajing Zheng, Zhaofei Yu, Rui Chen, Yuan Li, Ruiqin Xiong, Lei Ma, Junwei Zhao, Siwei Dong, Lin Zhu, et al. \\(1000 \\times\\) faster camera and machine vision with ordinary devices. Engineering, 2022. 1, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.252, + 0.892, + 0.307 + ], + "angle": 0, + "content": "[18] Zhe Jiang, Yu Zhang, Dongqing Zou, Jimmy Ren, Jiancheng Lv, and Yebin Liu. Learning event-based motion deblurring. In Proc. of Computer Vision and Pattern Recognition, pages 3320-3329, 2020. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.309, + 0.892, + 0.365 + ], + "angle": 0, + "content": "[19] Nima Khademi Kalantari and Ravi Ramamoorthi. Deep HDR video from sequences with alternating exposures. In Computer graphics forum, volume 38, pages 193-205. Wiley Online Library, 2019. 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.367, + 0.892, + 0.409 + ], + "angle": 0, + "content": "[20] Nima Khademi Kalantari, Ravi Ramamoorthi, et al. Deep high dynamic range imaging of dynamic scenes. ACM Transactions on Graphics, 36(4):144-1, 2017. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.411, + 0.892, + 0.466 + ], + "angle": 0, + "content": "[21] Nima Khademi Kalantari, Eli Shechtman, Connelly Barnes, Soheil Darabi, Dan B Goldman, and Pradeep Sen. Patch-based high dynamic range video. ACM Transactions on Graphics, 32(6):202-1, 2013. 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.469, + 0.892, + 0.511 + ], + "angle": 0, + "content": "[22] Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High dynamic range video. ACM Transactions on Graphics, 22(3):319-325, 2003. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.513, + 0.892, + 0.569 + ], + "angle": 0, + "content": "[23] Wei-Sheng Lai, Jia-Bin Huang, Oliver Wang, Eli Shechtman, Ersin Yumer, and Ming-Hsuan Yang. Learning blind video temporal consistency. In Proc. of European Conference on Computer Vision, pages 170-185, 2018. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.571, + 0.892, + 0.613 + ], + "angle": 0, + "content": "[24] Byungju Lee and Byung Cheol Song. Multi-image high dynamic range algorithm using a hybrid camera. Signal Processing: Image Communication, 30:37-56, 2015. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.615, + 0.892, + 0.683 + ], + "angle": 0, + "content": "[25] Juan Antonio Lénero-Bardallo, Teresa Serrano-Gotarredona, and Bernabé Linares-Barranco. A 3.6 \\(\\mu\\)s latency asynchronous frame-free event-driven dynamic-vision-sensor. IEEE Journal of Solid-State Circuits, 46(6):1443-1455, 2011. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.686, + 0.892, + 0.741 + ], + "angle": 0, + "content": "[26] Feng Li, Jingyi Yu, and Jinxiang Chai. A hybrid camera for motion deblurring and depth map super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.744, + 0.892, + 0.785 + ], + "angle": 0, + "content": "[27] Ziwei Liu, Lu Yuan, Xiaou Tang, Matt Uytendaele, and Jian Sun. Fast burst images denoising. ACM Transactions on Graphics, 33(6):1-9, 2014. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.788, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[28] Kede Ma, Hui Li, Hongwei Yong, Zhou Wang, Deyu Meng, and Lei Zhang. Robust multi-exposure image fusion: A structural patch decomposition approach. IEEE Transactions on Image Processing, 26(5):2519-2532, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[29] Ulku Arin C Bruschini Claudio Charbon Edoardo Ma Sizhuo, Gupta Shantanu and Gupta Mohit. Quanta burst photography. ACM Transactions on Graphics, 39(4):79-1, 2020. 8" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "22188" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[30] Stephen Mangiat and Jerry Gibson. High dynamic range video with ghost removal. In Applications of Digital Image Processing XXXIII, volume 7798, pages 307-314. SPIE, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.472, + 0.205 + ], + "angle": 0, + "content": "[31] Stephen Mangiat and Jerry Gibson. Spatially adaptive filtering for registration artifact removal in HDR video. In Proc. of International Conference on Image Processing, pages 1317-1320. IEEE, 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.207, + 0.471, + 0.262 + ], + "angle": 0, + "content": "[32] Rafal Mantiuk, Kil Joong Kim, Allan G Rempel, and Wolfgang Heidrich. HDR-VDP-2: A calibrated visual metric for visibility and quality predictions in all luminance conditions. ACM Transactions on Graphics, 30(4):1-14, 2011. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.264, + 0.471, + 0.331 + ], + "angle": 0, + "content": "[33] Morgan McGuire, Wojciech Matusik, Hanspeter Pfister, Billy Chen, John F Hughes, and Shree K Nayar. Optical splitting trees for high-precision monocular imaging. IEEE Computer Graphics and Applications, 27(2):32-42, 2007. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.333, + 0.471, + 0.376 + ], + "angle": 0, + "content": "[34] Tom Mertens, Jan Kautz, and Frank Van Reeth. Exposure fusion. In Pacific Conference on Computer Graphics and Applications, pages 382-390, 2007. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.377, + 0.47, + 0.459 + ], + "angle": 0, + "content": "[35] Nico Messikommer, Stamatios Georgoulis, Daniel Gehrig, Stepan Tulyakov, Julius Erbach, Alfredo Bochicchio, Yuanyou Li, and Davide Scaramuzza. Multi-Bracket high dynamic range imaging with event cameras. In Proc. of Computer Vision and Pattern Recognition, pages 547–557, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.461, + 0.47, + 0.516 + ], + "angle": 0, + "content": "[36] Manish Narwaria, Matthieu Perreira Da Silva, and Patrick Le Callet. HDR-VQM: An objective quality measure for high dynamic range video. Signal Processing: Image Communication, 35:46-60, 2015. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.518, + 0.47, + 0.572 + ], + "angle": 0, + "content": "[37] Shree K Nayar and Tomoo Mitsunaga. High dynamic range imaging: Spatially varying pixel exposures. In Proc. of Computer Vision and Pattern Recognition, volume 1, pages 472-479. IEEE, 2000. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.575, + 0.47, + 0.63 + ], + "angle": 0, + "content": "[38] Avinash Paliwal and Nima Khademi Kalantari. Deep slow motion video reconstruction with hybrid imaging system. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42(7):1557-1569, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.632, + 0.47, + 0.687 + ], + "angle": 0, + "content": "[39] Henri Rebecq, René Ranftl, Vladlen Koltun, and Davide Scaramuzza. High speed and high dynamic range video with an event camera. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(6):1964-1980, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.47, + 0.744 + ], + "angle": 0, + "content": "[40] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In Proc. of Computer Vision and Pattern Recognition, pages 779-788, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.469, + 0.801 + ], + "angle": 0, + "content": "[41] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster R-CNN: Towards real-time object detection with region proposal networks. Proc. of Advances in Neural Information Processing Systems, 28, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.47, + 0.871 + ], + "angle": 0, + "content": "[42] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234–241. Springer, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[43] Yash Sanghvi, Abhiram Gnanasambandam, and Stanley H Chan. Photon limited non-blind deblurring using algorithm" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.119 + ], + "angle": 0, + "content": "unrolling. IEEE Transactions on Computational Imaging, 2022.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.121, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[44] Marcel Santana Santos, Tsang Ing Ren, and Nima Khademi Kalantari. Single image HDR reconstruction using a cnn with masked features and perceptual loss. arXiv preprint arXiv:2005.07335, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.177, + 0.892, + 0.232 + ], + "angle": 0, + "content": "[45] Pradeep Sen, Nima Khademi Kalantari, Maziar Yaesoubi, Soheil Darabi, Dan B Goldman, and Eli Shechtman. Robust patch-based HDR reconstruction of dynamic scenes. ACM Transactions on Graphics, 31(6):203-1, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.233, + 0.892, + 0.287 + ], + "angle": 0, + "content": "[46] Richard Shaw, Sibi Catley-Chandar, Ales Leonardis, and Eduardo Perez-Pellitero. HDR reconstruction from bracketed exposures and events. arXiv preprint arXiv:2203.14825, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.288, + 0.892, + 0.37 + ], + "angle": 0, + "content": "[47] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In Proc. of Computer Vision and Pattern Recognition, pages 1874-1883, 2016. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.371, + 0.892, + 0.44 + ], + "angle": 0, + "content": "[48] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM network: A machine learning approach for precipitation nowcasting. Proc. of Advances in Neural Information Processing Systems, 28, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.441, + 0.892, + 0.495 + ], + "angle": 0, + "content": "[49] Yu-Wing Tai, Hao Du, Michael S Brown, and Stephen Lin. Image/video deblurring using a hybrid camera. In Proc. of Computer Vision and Pattern Recognition, pages 1-8, 2008. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.497, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[50] Michael D Tocci, Chris Kiser, Nora Tocci, and Pradeep Sen. A versatile HDR video production system. ACM Transactions on Graphics, 30(4):1-10, 2011. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.539, + 0.892, + 0.621 + ], + "angle": 0, + "content": "[51] Stepan Tulyakov, Alfredo Bochicchio, Daniel Gehrig, Stamatios Georgoulis, Yuanyou Li, and Davide Scaramuzza. Time Lens++: Event-based frame interpolation with parametric non-linear flow and multi-scale fusion. In Proc. of Computer Vision and Pattern Recognition, pages 17755-17764, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.622, + 0.892, + 0.692 + ], + "angle": 0, + "content": "[52] Stepan Tulyakov, Daniel Gehrig, Stamatios Georgoulis, Julius Erbach, Mathias Gehrig, Yuanyou Li, and Davide Scaramuzza. Time Lens: Event-based video frame interpolation. In Proc. of Computer Vision and Pattern Recognition, pages 16155-16164, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.692, + 0.892, + 0.747 + ], + "angle": 0, + "content": "[53] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.748, + 0.892, + 0.803 + ], + "angle": 0, + "content": "[54] Fang Xu, Lei Yu, Bishan Wang, Wen Yang, Gui-Song Xia, Xu Jia, Zhendong Qiao, and Jianzhuang Liu. Motion deblurring with real events. In Proc. of International Conference on Computer Vision, pages 2583-2592, 2021. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.804, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[55] Qingsen Yan, Lei Zhang, Yu Liu, Yu Zhu, Jinqiu Sun, Qinfeng Shi, and Yanning Zhang. Deep HDR imaging via a non-local network. IEEE Transactions on Image Processing, 29:4308-4322, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[56] Zhiyang Yu, Yu Zhang, Deyuan Liu, Dongqing Zou, Xijun Chen, Yebin Liu, and Jimmy S Ren. Training weakly supervised video frame interpolation with events. In Proc. of" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "22189" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.111, + 0.092, + 0.469, + 0.119 + ], + "angle": 0, + "content": "International Conference on Computer Vision, pages 14589-14598, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.177 + ], + "angle": 0, + "content": "[57] Cheng Zhang, Shaolin Su, Yu Zhu, Qingsen Yan, Jinqiu Sun, and Yanning Zhang. Exploring and evaluating image restoration potential in dynamic scenes. In Proc. of Computer Vision and Pattern Recognition, pages 2067-2076, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.233 + ], + "angle": 0, + "content": "[58] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proc. of Computer Vision and Pattern Recognition, pages 586-595, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.469, + 0.276 + ], + "angle": 0, + "content": "[59] Xiang Zhang and Lei Yu. Unifying motion deblurring and frame interpolation with events. In Proc. of Computer Vision and Pattern Recognition, pages 17765-17774, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.278, + 0.469, + 0.332 + ], + "angle": 0, + "content": "[60] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 2472-2481, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.334, + 0.469, + 0.403 + ], + "angle": 0, + "content": "[61] Jing Zhao, Ruiqin Xiong, Hangfan Liu, Jian Zhang, and Tiejun Huang. Spk2Imgnet: Learning to reconstruct dynamic scene from continuous spike stream. In Proc. of Computer Vision and Pattern Recognition, pages 11996-12005, 2021. 1, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.469, + 0.473 + ], + "angle": 0, + "content": "[62] Yajing Zheng, Lingxiao Zheng, Zhaofei Yu, Boxin Shi, Yonghong Tian, and Tiejun Huang. High-speed image reconstruction through short-term plasticity for spiking cameras. In Proc. of Computer Vision and Pattern Recognition, pages 6358-6367, 2021. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.476, + 0.469, + 0.53 + ], + "angle": 0, + "content": "[63] Lin Zhu, Siwei Dong, Tiejun Huang, and Yonghong Tian. A retina-inspired sampling method for visual texture reconstruction. In Proc. of International Conference on Multimedia and Expo. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.532, + 0.469, + 0.587 + ], + "angle": 0, + "content": "[64] Lin Zhu, Siwei Dong, Jianing Li, Tiejun Huang, and Yonghong Tian. Retina-like visual image reconstruction via spiking neural model. In Proc. of Computer Vision and Pattern Recognition, pages 1438-1446, 2020. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.589, + 0.469, + 0.643 + ], + "angle": 0, + "content": "[65] Yunhao Zou, Yinqiang Zheng, Tsuyoshi Takatani, and Ying Fu. Learning to reconstruct high speed and high dynamic range videos from events. In Proc. of Computer Vision and Pattern Recognition, pages 2024-2033, 2021. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "22190" + } + ] +] \ No newline at end of file diff --git a/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_origin.pdf b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..97ba0ec7a8de334753f8d1d2583ad2494996a4ca --- /dev/null +++ b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/1c93f555-c37f-43ed-866a-0e7c5d4458e6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91e1a22f7e48868eb2309c15ab691c0a8088f654066cc8bb02d1682ac9dea8e4 +size 8925125 diff --git a/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/full.md b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/full.md new file mode 100644 index 0000000000000000000000000000000000000000..dde030dc8e3bbf6bbfacc4d4b1b2d3541f00ba1e --- /dev/null +++ b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/full.md @@ -0,0 +1,300 @@ +# 1000 FPS HDR Video with a Spike-RGB Hybrid Camera + +Yakun Chang $^{1,2}$ Chu Zhou $^{3}$ Yuchen Hong $^{1,2}$ Liwen Hu $^{2}$ Chao Xu $^{3}$ Tiejun Huang $^{1,2}$ Boxin Shi $^{1,2*}$ + +$^{1}$ National Key Laboratory for Multimedia Information Processing, School of Computer Science, Peking University +$^{2}$ National Engineering Research Center of Visual Technology, School of Computer Science, Peking University +$^{3}$ National Key Laboratory of General AI, School of Intelligence Science and Technology, Peking University {yakunchang, zhou_chu, huliwen, tjhuang, shiboxin}@pku.edu.cn yuchenhong.cn@gmail.com, xuchao@cis.pku.edu + +# Abstract + +Capturing high frame rate and high dynamic range (HFR&HDR) color videos in high-speed scenes with conventional frame-based cameras is very challenging. The increasing frame rate is usually guaranteed by using shorter exposure time so that the captured video is severely interfered by noise. Alternating exposures can alleviate the noise issue but sacrifice frame rate due to involving long-exposure frames. The neuromorphic spiking camera records high-speed scenes of high dynamic range without colors using a completely different sensing mechanism and visual representation. We introduce a hybrid camera system composed of a spiking and an alternating-exposure RGB camera to capture HFR&HDR scenes with high fidelity. Our insight is to bring each camera's superiority into full play. The spike frames, with accurate fast motion information encoded, are firstly reconstructed for motion representation, from which the spike-based optical flows guide the recovery of missing temporal information for long-exposure RGB images while retaining their reliable color appearances. With the strong temporal constraint estimated from spike trains, both missing and distorted colors cross RGB frames are recovered to generate time-consistent and HFR color frames. We collect a new Spike-RGB dataset that contains 300 sequences of synthetic data and 20 groups of real-world data to demonstrate 1000 FPS HDR videos outperforming HDR video reconstruction methods and commercial high-speed cameras. + +# 1. Introduction + +The spiking camera [17] and event camera [10] are neuromorphic sensors working differently from conventional frame-based digital cameras, which have many attractive characteristics, e.g., high-speed (perceiving scene + +![](images/60240d2e429f7bbaa254aba2a45842b1ba35e1ca5ec1952b36f6954e438ed27c.jpg) +Figure 1. (a) We build a spike-RGB hybrid camera system to achieve 1000 FPS HDR video reconstruction1. (b) The RGB camera uses alternating-exposure mode with a frame rate of 60 FPS, where $t_s$ , $4t_s$ , and $12t_s$ are the short, middle, and long exposure in our setup, respectively. The sampling frequency of the spiking camera is $20000\mathrm{Hz}$ . + +radiance changes at the microsecond level), high dynamic range (HDR, $\geq 100$ dB). However, since they only record neuromorphic signals, i.e., spike trains [64] and event streams [25], which are less friendly to the human visual system and cannot be directly processed by CNN-based models for video frames [40, 41], preprocessing modules that convert neuromorphic signals into compatible formats are usually required when applying them to frame-based vision algorithms [61, 65]. In comparison with event streams, spike trains contain concrete textured information of scene radiances, which are more suitable for reconstructing high frame rate (HFR) videos [61-64]. However, since the spiking camera only encodes the absolute intensities of environments, colors are absent in the reconstructed video frames. + +When capturing with a frame-based RGB camera, quality of recorded colors for each frame is determined by trading off the exposure time, ambient light, and target objects' moving speed [57]. For high-speed dynamic scenes, it often + +requires to set shorter exposure time to guarantee a higher frame rate and avoid motion blur. In such a situation, since the exposure time is extremely short, the quality of video frames would be severely degenerated due to noise. Merging a burst of short-exposure images is a simple yet effective approach to reduce the noise level [8, 11], however, the color shift caused by noise is difficult to be corrected. Fusing alternating-exposure (using short, middle, and long exposures) RGB frames is commonly used for synthesizing well-exposed images [3, 19, 21]. However, they are not suitable for high-speed scenes. As illustrated in Fig. 1(b), given a sequence of alternating-exposure RGB images, the total time from the starting of the current exposure to the starting of the next frame, denoted by $T$ , is consistent for all frames, and it is composed of the exposure time $T_{\mathrm{exp}}$ and interval time $T_{\mathrm{itv}}$ (containing the readout and waiting time). It can be seen that the information during interval time is lost, and the frame rate they could achieve is thus limited to dozens of FPS. Another possible solution is to build a hybrid camera system to capture low frame rate (LFR) color sequence and high-speed neuromorphic signals simultaneously, then use the neuromorphic signals to interpolate [51, 52] and deblur [14, 18, 59] the RGB frames. However, the saturated regions are usually ignored, leaving the colors of the interpolated frames still unsatisfactory. HDR intensity map (does not contain any chromatic information) built from the neuromorphic signals can also be used to compensate the missing textures in the saturated regions [15]. But such an approach is not robust for scenes with large areas of saturated regions, due to the heavy reliance on the chrominance compensation network to hallucinate the color. + +In this paper, we propose an all-in-one framework to reconstruct HRF (Fig. 1(a), at the level of 1000 FPS) color videos with high fidelity from the spike trains and a series of alternating-exposure frames captured by a Spike-RGB hybrid camera system simultaneously (Fig. 1(b)). To make full use of the color information in RGB images, we propose a three-stage strategy to deal with different situations using specific modules: (i) For the blurry middle- and long-exposure images, we design a spike guided deblurring module to recover the corresponding sharp images with faithful colors; (ii) for missing colors during the interval time, we design a spike guided interpolation module that exploits the abundant motion information (SC-Flow [16]) obtained from spike trains; (iii) for suppressing noise in short-exposure images and maintaining temporal consistency, we design a merging module, which exploits the variant of recurrent U-Net [42] as its backbone, to complete the HFR&HDR color video reconstruction process. To summarize, this paper makes contributions by proposing: + +- an all-in-one framework to reconstruct high-speed HDR color video by jointly fusing spike trains and a sequence of alternating-exposure frames; + +- a three-stage strategy fusing alternating exposures of RGB frames for the generation of well-exposure colors, via a recurrent convolution neural network for continuous frames interpolation guided by spike trains; +- a Spike-RGB hybrid camera system to demonstrate the applicability of the proposed method for capturing high-speed and high dynamic range scenes. + +Experimental results show that the proposed method outperforms the state-of-the-art HDR video reconstruction method [3] and commercial cameras with the slow-motion photography capability in reconstructing 1000 FPS HDR color videos on synthetic data and real-world data. + +# 2. Related Work + +HDR image and video reconstruction. The most common way to reconstruct HDR images is to fuse a set of LDR images with bracketed exposures [7, 34]. Since the results for dynamic scenes often contain ghosting artifacts, image alignment [28, 45] and deep learning [20, 55] are employed to reconstruct sharp HDR images. To better reduce ghosting artifacts, Lee et al. [24] and Shaw et al. [46] apply the estimated motion information from a high frame rate sequence to facilitate the HDR image synthesis. Messikommer et al. [35] also achieve HDR reconstruction by combining bracketed-exposure RGB images and events. There are methods being designed for HDR reconstruction from a single image. These methods cannot recover the missing textures in clipped regions [9, 44]. Abhiram and Chan [1] reconstruct HDR images with a quanta image sensor (QIS). Han et al. [15] find that the reconstructed intensity maps from event streams and spike trains contain abundant textures saturated in LDR images. Therefore, they exploit intensity maps to guide HDR image restoration. For the capturing of HDR videos, many existing methods use specialized hardware, such as scanline exposure [13], per-pixel exposure [37], or multiple sensors [33, 50]. Due to the particularity of hardware, these methods are limited to narrow applications. Merging alternating-exposure image sequences is the most common yet effective way to reconstruct HDR videos [12, 19, 21, 22, 30, 31]. Recently, Chen et al. [3] propose a coarse-to-fine network that performs alignment and fusion sequentially both in the image and feature space. However, these methods can only deal with LFR videos with about 20-60 FPS. + +HFR video reconstruction. There is plenty of data redundancy in capturing HFR videos directly by commercial high-speed cameras, e.g., the Phatom camera². Building a hybrid system with a high-resolution LFR camera and a low-resolution HFR camera, and utilizing HFR signals to reconstruct a sequence of sharp images from blurred images [2, 49] is a more data-efficient way for HFR video + +![](images/98c720d4cece126ecdfa4451934d7d939ae0de158ecc49548ba1c53ceaaf3e0d.jpg) +Figure 2. (a) The pipeline of the proposed solution. It contains three steps: Step $①$ spike preprocessing (Sec. 3.2), Step $②$ RGB frame processing (Sec. 3.3), and Step $③$ merging into HFR video (Sec. 3.4). Given the spike trains, we firstly estimate the optical flow from them as well as reconstruct spike frames. Secondly, we rectify the uneven brightness with a linear mapping function and use spike-guided deblurring (SG-deblur) to reconstruct sharp color frames. Finally, we use spike-guided frame interpolation (SG-interpolation) to recover the missing colors during $T_{\mathrm{itv}}$ , and reconstruct time-consistent color frames. (b) and (c) show the detailed pipeline of SG-deblur and SG-interpolation. + +![](images/a3796d73d2d27d9cb39525bfe02cc066e9e00718bd42123a115adabad161f76c.jpg) + +reconstruction. Li et al. [26] use a stereo pair of low-resolution HFR and high-resolution LFR cameras to calculate the fast motion and the depth map. Avinash et al. [38] compute optical flows between two existing frames by utilizing the content of auxiliary HFR videos. Jiang et al. [18] recover a sharp video sequence from a motion-blurred image by integrating the visual and temporal knowledge that is contained in the events. Xu et al. [54] achieve real-world event-based deblurring with a self-supervised learning method. Tulyakov et al. [52] propose the Time Lens that utilizes high-speed events to achieve video frame interpolation (VFI). Following that, Time Lens++ [51] further improves the performance. For the reason that real data are absent, Yu et al. [56] propose a weakly supervised method with the help of subpixel attention learning. Although the event-based interpolation realizes HFR video reconstruction [51, 52], the recovered quality of colors is usually unsatisfactory due to that single exposure cannot balance artifacts from noise and blur, we therefore propose to jointly fuse the high-speed spike signals and alternating-exposure RGB frames to achieve high-quality reconstruction. + +# 3. Approach + +# 3.1. Overview + +Our goal is to reconstruct HFR&HDR videos from the binary spike trains $\mathbb{S}(x,y) = \{s(x,y,t)\} (s(x,y,t) = 1$ if the accumulated photons reach a certain threshold, then the accumulator is reset and $s(x,y,t) = 0$ before the next spike is fired [17]) and LFR alternating-exposure RGB frames $\mathbb{B} = \{\mathbf{B}_k\} ^3$ , where $(x,y)$ denote the coordinates of spikes, $t$ + +denotes the timestamp, and $k$ denotes the index of an RGB image in the sequence. As shown in Fig. 2(a), to achieve this goal, we design a pipeline that consists of three steps: + +Step ①: Spike preprocessing (Sec. 3.2). We estimate the optical flow $\mathbf{F}_i$ and spike frames $\mathbf{I}_i$ from the spike trains: + +$$ +\mathbf {F} _ {i} (x, y) = \mathcal {S C} \left(s \left(x, y, t _ {i} \rightarrow t _ {i + 1}\right)\right), \tag {1} +$$ + +$$ +\mathbf {I} _ {i} (x, y) = \int_ {t _ {i} t _ {f} / 2} ^ {t _ {i} + t _ {f} / 2} s (x, y, t) d t, \tag {2} +$$ + +where $\mathcal{SC}(\cdot)$ denotes optical flow estimation with Hu et al.'s [16] method, $i$ and $t_i$ denote the index and timestamp of spike frames, and $t_f$ is the time window. In Sec. 3.2, we further super-resolve $\mathbf{I}_i$ at the feature space. + +Step ②: RGB frame preprocessing (Sec. 3.3). For the 60 FPS RGB images captured with alternating exposures, i.e., $t_s, 4t_s$ , and $12t_s$ , we firstly unify the uneven brightness with a linear mapping function. Then we conduct motion deblurring for $4t_s$ and $12t_s$ images. For the $t_s$ images, when $t_s$ is sufficiently short, i.e., 1 ms, we assume the short-exposure image is free from motion blur, and take $t_s$ as the reference time for the motion deblurring. Consequently, we can recover 4 and 12 sharp images from $4t_s$ and $12t_s$ images, respectively. As shown in Fig. 2(b), we use $\mathbf{B}^l$ to denote a blurry image, and the motion deblurring operation can be formulated as: $\{\mathbf{B}_j^l\} = \mathcal{R}(\mathbf{B}^l, \{\mathbf{I}_j | j \in \mathcal{N}_l\}, \mathbf{B}^s)$ , where $j$ is the index of a recovered sharp image, $\mathcal{R}(\cdot)$ is sharp image reconstruction, $\{\mathbf{I}_j | j \in \mathcal{N}_l\}$ is the corresponding spike frames, and $\mathbf{B}^s$ is the nearest short-exposure RGB frame. + +Step ③: Merging into HFR video (Sec. 3.4). Following Step ②, for the interval time $(T_{\mathrm{itv}})$ that colors are not recorded, we bidirectionally query two nearest sharp RGB + +![](images/69d8b311ce36a4530ecf232650dd28197899054c0313144d824fc179e70d4d2f.jpg) +warping +Figure 3. For the sake of increasing spatial resolution, we adopt flow-based warping to merge adjacent 5 spike frames. + +![](images/cf39b78ad3ae1e85fd6294c3574fe337a9e139e2680711172548c5216ff96c0f.jpg) + +![](images/ae9042900c55487e39988804c1314cb26d945fa12eb7432a4fe8c52019a9b1ae.jpg) + +![](images/c8d6dedbab383c4cabfdbf4100b648119d41bdf85f6b86c070cb6ca3c7313a25.jpg) + +![](images/49bbef8a2cd1ba9b8519bf1120a392f74430894e19e52a34957f5c27c82657ef.jpg) + +images $\{\mathbf{B}_i^+, \mathbf{B}_i\}$ for each spike frame $\mathbf{I}_i$ , and get the warped images $\{\hat{\mathbf{B}}_i^+, \hat{\mathbf{B}}_i\}$ with optical flow, where $+$ and $-$ denote the forward and backward warping, respectively. In Fig. 2(c), we provide an illustration of the interpolation procedure. Finally, as shown in Fig. 4, we reconstruct time-consistent color frames, and each frame $\mathbf{C}_i$ is generated by merging the spike frame $\mathbf{I}_i$ with $\{\mathbf{C}_i\}_{1}, \hat{\mathbf{B}}_i^+, \hat{\mathbf{B}}_i\}$ with the strong constraint of optical flow. + +# 3.2. Spike preprocessing + +The optical flow estimation and spike frame reconstruction using in Eqn. (1) and Eqn. (2) are theoretically, yet the reconstructed frames practically have two issues: Since the integration time $t_f$ is very short, noise is relatively strong; the spatial resolution of the first generation spiking camera (VidarOne [17]) is much lower than the RGB camera. To reduce the noise and increase the spatial resolution, inspired by the burst-based super-resolution [4] and denoising [27] for conventional RGB images, it is feasible to merge a group of adjacent spike frames with the help of spatial alignment. Moreover, thanks to the continuous motion recording capability of spiking cameras, the optical flow [16] estimated from spike trains makes the alignment even more stable than RGB images. As illustrated in Fig. 3, we design a computationally efficient module for spike frames, which is formulated as: $\hat{\mathbf{I}}_i = \{\mathcal{W}_{\mathbf{F}_{j\to i}}(\mathbf{I}_j)|j\in \mathcal{N}_i\}$ , where $\mathcal{W}_{\mathcal{F}_{j\to i}}(\cdot)$ denotes the flow-based warping operation, $\mathcal{N}_i$ denotes a collection of adjacent frames. Then, we feed $\hat{\mathbf{I}}_i$ to a set of convolutional layers, and we use PixelShuffle [47] to increase the spatial resolution while decreasing the channel of features. It should be noted that the method for spike frame reconstruction is not unique, which means users can choose other learning-based methods [61, 62, 64]. However, those deep learning models are relatively heavy, and less efficient as a submodule fitting to our pipeline. + +# 3.3. RGB image preprocessing + +RGB linear mapping. Following previous methods for HDR video reconstruction [3, 19, 21], we first unify the brightness of alternating-exposure RGB frames. Since we use an industrial camera (details in Sec. 3.5) that can acquire data without a nonlinear radiometric response function, the linearity of the captured frames is maintained. We find that the brightness of the frames can maintain a linear relationship with the duration of exposure time. Hence we use the global linear mapping to unify the frame brightness: $\alpha \cdot \mathbf{B}_k(x,y)\rightarrow \mathbf{B}_k(x,y)$ , where $\alpha$ denotes a linear scalar. + +Spike-guided deblurring. The physical model of the blurring process can be simply formulated as the average of a group of sharp images, i.e., $\mathbf{B}^l (x,y) = \frac{1}{N}\sum_{j = 1}^{N}\mathbf{B}_j^l (x,y)$ , where $N$ denotes the number of sharp images. However, due to the limited dynamic range of the RGB camera, that simplified equation does not hold in the clipped regions of real-world long-exposure frames. In general we should have: $\mathbf{B}^l (x,y)\leq \frac{1}{N}\sum_{j = 1}^{N}\mathbf{B}_j^l (x,y)$ . Therefore, for reconstructing a sequence of sharp HDR images from $\mathbf{B}^l$ , we divide it into two sub-tasks: (i) For the well-exposure regions, we use the sharp spike frames to guide motion deblurring; (ii) for the clipped regions where colors are lost, we compensate them with well-retained colors extracted from the adjacent short-exposure image $\mathbf{B}^s$ . + +Figure 2(b) shows the spike-guided deblurring (SG-deblur) from $\mathbf{B}_l$ ( $\mathbf{B}_l$ may be a middle- or long-exposure image). Similar to Xu et al. [54] that exploit event frames to motion deblurring, we first concatenate $\mathbf{B}_l$ with $\{\mathbf{I}_l^j\}$ , then extract shallow features and increase feature channels with PixelShuffle [47], which is followed by a set of residual dense blocks (RDBs) [60] and a decoder. To make the colors in over-exposure regions be compensated by the adjacent short-exposure RGB image $\mathbf{B}_j^s$ , we warp the short-exposure image with the optical flow estimated from spike trains: $\mathbf{B}_j^s = \mathcal{W}_{\mathbf{F}_{s\rightarrow j}}(\mathbf{B}^s)$ , where $\mathcal{W}_{\mathbf{F}_{s\rightarrow j}}(\cdot)$ denotes the warping operation from timestamp $t_s$ to the timestamp of $t_j$ . Subsequently, we extract features from $\{\mathbf{B}_l^{s\rightarrow j}\}$ and add residual links between them and the decoder. Finally, we obtain a sequence of sharp color images. Note that the SG-deblur for the middle- and long-exposure RGB images share the same architecture while the parameters are not shareable. SG-deblur outputs four images for both $4t_s$ and $12t_s$ frames. For the case of $12t_s$ frame, we interpolate the 4 frames to 12 frames with flow-based warping. + +Next, we briefly explain the reason why this event-based model [54] can be applied to a spike-based task. Both event streams and spike trains with the high-speed property have been used for motion deblurring and latent frame reconstruction [14,18,54]. It is necessary to convert them to event frames and spike frames, both of which belong to the category of 2D images. But event frames and spike frames have different physical meanings: Pixel values in an event frame reveal the residual (relatively sparse information) between two adjacent frames, while pixel values in a spike frame represent exactly the texture (relatively dense information) of the corresponding frame. Since both event frames and spike frames are 2D images and the spike frames have denser texture information, we can replace event frames in such a model with spike frames, so as to make the solution to the problem more well-posed. + +# 3.4. Merging into HFR video + +RGB interpolation. Given each middle- and long-exposure + +![](images/f70c457f89317c1a8dfbd75c2c3839b8139e7a74e3c29226b3c581376d4d252a.jpg) +Figure 4. Network architecture of the CNN-RNN-based merging module for reconstructing HFR&HDR videos from alternating-exposure RGB frames and HFR spike frames. This module outputs HDR color frames in a step-wise manner. We unroll the module for $M$ steps during training. + +frame, SG-deblur recovers 4 and 12 images. Therefore, the recovered RGB frames have a frame rate of $340^{4}$ FPS. But temporal distribution of them is quite uneven, e.g., there is no recovered color frame interval time $T_{\mathrm{itv}}$ . Fortunately, the spike train contains continuous and dense texture information in the temporal domain. In Step ③, we use the SG-interpolation module to interpolate RGB frames into a sequence of uniformly distributed images. For each spike frame $\mathbf{I}_i$ , we bidirectionally query its two nearest recovered RGB frames $\{\mathbf{B}_i^+, \mathbf{B}_i\}$ and interpolate two color frames $\{\hat{\mathbf{B}}_i^+, \hat{\mathbf{B}}_i\}$ with the optical flow estimated from spike trains. When $\{\hat{\mathbf{B}}_i^+, \hat{\mathbf{B}}_i\}$ are fed into our merging module, they are weighted by a linear coefficient $(\oplus$ in Fig. 4) related to the distance between $t_i$ and $\{t_+, t\}$ , where $\{t_+, t\}$ denote the timestamp of $\{\hat{\mathbf{B}}_i^+, \hat{\mathbf{B}}_i\}$ . + +Merging module. The aforementioned modules reconstruct coarse HFR video frames, which need to be refined for smoothing over time. We build a CNN-RNN-based HFR&HDR video reconstruction network to merge the spike frames and RGB frames, which is shown in Fig. 4. The merging module consists of three encoders, i.e., $\mathcal{E}_I$ , $\mathcal{E}_B$ , and $\mathcal{E}_C$ , which are respectively designed for feature extraction from the current spike frame $\hat{\mathbf{I}}_i$ , the interpolated RGB images $\{\hat{\mathbf{B}}_i^+, \hat{\mathbf{B}}_i\}$ , and the previously reconstructed image $\mathbf{C}_{i-1}$ . In $\mathcal{E}_I$ , we use PixelShuffle [47] to make the spatial resolution of spike features consistent with RGB features. The extracted features are denoted as $\mathbf{E}_I$ , $\{\mathbf{E}_B, \mathbf{E}_{B+}\}$ , and $\mathbf{E}_{C_i-1}$ , respectively. + +Considering the spike frames and RGB frames may not be perfectly aligned at pixel level for real-world data, we add deformable convolution layers [6] to improve the robustness to this issue. In order to output flicker-free color frames, we adopt two constraints in the merging module: + +Table 1. Details of the composition of the dataset (res. is the abbreviation of resolution). + +
dataRGB res.spike res.train/testtime
full-synthetic500×800250×40080/200.1s
real-synthetic600×800250×400160/400.101s
real-world484×784242×392-/200.101s
+ +(i) We add three ConvLSTM layers [48] to feed previous states forward in temporal domain; (ii) we feed $\mathbf{E}_{C_i}$ into the current step and align it with the current features with flow-based warping. We then use a decoder to reversely map deep features to the current output HDR frame $\mathbf{C}_i$ . We achieve the multi-module signal fusion by adding concatenation links between $\{\mathbf{E}_{C_i}$ , $\mathbf{E}_B$ , $\mathbf{E}_{B+}\}$ and the decoder. + +# 3.5. Implementation Details + +Due to the setting of our method being different from existing HDR and video frame interpolation methods, there are no suitable datasets for training and testing our method. Therefore, we collect a new one with three components, whose details are summarized in Table 1 and sample images are provided in Fig. 5. + +Part 1: Full-synthetic data. This part of data is obtained by using the spike simulator proposed by Hu et al. [16]. We render 2000 RGB images with their computer graphics based solution as ground truth and generate 2000 spike planes (0.1 s). Since the photons arriving at the sensor follow Poisson probability distribution [43], we synthesize alternating-exposure 60 FPS RGB frames with a Poisson noise model. For the full synthetic data, we randomly select starting time of each group of training data. We randomly shift the RGB frames within 3 pixels to make the trained model more robust to the misalignment in real-world data. + +Part 2: Real-synthetic data. To reduce the domain gap between full-synthetic data and real-world data, we design a method to collect real-synthetic (the scenes are real while + +![](images/176aa9dfa2ecaf20f74ee48cdeb45fed0d736431be6a2a3e803a4ccf3f70da7d.jpg) +Figure 5. Example frames from the proposed dataset. Each group shows three alternating-exposure RGB frames (left, from top to bottom rows) and the corresponding spike signals (right). + +the spike trains are synthetic) data, and we use this part of data to fine-tune our model. The RGB frames are captured with an alternating-exposure mode in slow-motion scenes. Then we synthesize blurry middle-exposure RGB frames by averaging 4 adjacent middle-exposure RGB images, and blurry long-exposure RGB frames are synthesized in a similar way. We synthesize spike trains from ground truth RGB frames with the integrate-and-fire methodology [61]. + +Part 3: Real-world data. We build a Spike-RGB hybrid camera (Fig. 6) to capture real-world data. The system is composed of an industrial camera (Basler acA800-510uc $^5$ ) with alternating exposure capability and a spiking camera [17]. There is a beam splitter in front of the two sensors. We conduct geometric calibration and time synchronization to align bimodal signals collected by them. + +Loss and training. The SG-deblur module and the merging module reconstruct images in the linear luminance domain, which covers a high dynamic range of pixel values. Following existing methods for HDR reconstruction, for the output images $\mathbf{C}$ , we compress the range of pixel values by applying the following function proposed by Kalantari et al. [20]: $\mathcal{T}(\mathbf{C}) = \log (1 + \mu \mathbf{C}) / \log (1 + \mu)$ , where $\mathcal{T}(\cdot)$ denotes the tone mapping operation and $\mu$ denotes the amount of compression. For these two modules, we employ widely used $l_{1}$ loss, Structure similarity (SSIM) loss [53], and Learned Perceptual Image Patch Similarity (LPIPS) loss [58]. The total loss at step $i$ for both the motion deblurring and merging modules is + +$$ +\mathcal {L} _ {\text {t o t a l}} (i) = \mathcal {L} _ {l _ {1}} (i) + \beta_ {1} \mathcal {L} _ {\text {S S I M}} (i) + \beta_ {2} \mathcal {L} _ {\text {L P I P S}} (i), \tag {3} +$$ + +where $\beta_{1} = 1$ and $\beta_{2} = 1$ . For spike-based optical flow estimation using [16], we fine-tune the parameters with full-synthetic data. During training, we resize the RGB images and spike frames to $512 \times 800$ and $256 \times 400$ . We implement our model with PyTorch, set the batch size to 4, and use ADAM optimizer during the training process. We first train the model on full-synthetic data. The SG-deblur module is trained with 50 epochs, before training the merging + +![](images/3a9785b5e9c025abd121037f9499ff36cfcf100a90bcaf5ae65255ec856a5815.jpg) +Figure 6. The prototype of our Spike-RGB imaging system composed of a spiking camera and an RGB camera. + +module. We unroll the merging module for $M$ steps, and we find $M = 4$ achieves a suitable balance between training time and recovery quality. The total loss for the unrolled $M$ steps is $\mathcal{L}_{\mathrm{merge}} = \sum_{i=1}^{M} \mathcal{L}_{\mathrm{total}}^{\mathrm{M}}(i)$ , where $\mathcal{L}_{\mathrm{total}}^{\mathrm{M}}(i)$ denotes the total loss for the merging module at step $i$ . The initial learning rate for both two modules is 0.001, we decay it to $10^{-6}$ with a linear strategy. For the real-synthetic data, we fine-tune another group of parameters to reduce the gap between synthetic data and real-world data. We use one NVIDIA Tesla A100 for training, and the training procedure consumes about 30 hours. + +# 4. Experiments + +# 4.1. Quantitative Evaluation using Synthetic Data + +Validation on full-synthetic data. Figure 8 shows a group of results on full-synthetic data. We can see that both the flying objects in the short-exposure image and the oversaturated clouds (see the regions marked by boxes) in the long-exposure image are recovered successfully. The results with rich textures and consistent colors show the feasibility of our proposed method. + +Evaluation on real-synthetic data. To the best of our knowledge, the proposed method is the first framework to reconstruct HFR&HDR videos with the combination of spike trains and alternating-exposure RGB frames. Therefore, it is unfair to compare our method with existing ones, i.e., Kalantari13 [21], Kalantari19 [19], and Chen21 $[3]^{6}$ , which are designed for low frame rate HDR videos. + +We choose a state-of-the-art HDR video reconstruction method Chen21 [3], which also uses alternating-exposure RGB frames (the closest setup to ours) as a reference. Figure 7 shows the reconstruction results on real-synthetic data of the proposed method and Chen21 [3]. Thanks to the complementary motion information provided by spike trains, the abundant color extracted from alternating-exposure RGB frames, and the accurate textures contained in spike frames, the proposed method is capable of reconstructing rich texture details with less motion blur. For ex + +![](images/afa1aa45dd7684f40beb4726479a3aba9c3cd0d96ec6f4bf23778aeeefabdc8f.jpg) +short + +![](images/71cdc16eea019dcd1bd0dd0177ee5024f38cc34f796330d5d1eac074cf09b1e0.jpg) +middle +(a) + +![](images/6162aa256a287f2f993030fdd823e1c193c5a5945396f892262b2ef1595ff2d3.jpg) +long + +![](images/6eb5166094979d17c7be16571a725ef333c07be4f97c0f8c35d1361d0ff8d59e.jpg) +short + +![](images/18487dec66ddaa2019bfc213e532588aef073461a9d8d2d11543e80ebfbdbd84.jpg) +middle +(b) + +![](images/865b4f729ea98b545b1620ffe9198a6a878505d1040d35bbaf214b8fe6412abe.jpg) +long + +![](images/fbb712d19253aafc1fd9d055053cde4360d158fb65c53da6d8203063df92c7ec.jpg) +Figure 7. Visual equality comparison of real-synthetic data between the proposed method and the state-of-the-art HDR video reconstruction method: Chen 21 [3]. We present two sets of results in (a) and (b). Please zoom-in electronic versions for better details, and watch the HFR videos on the project page. +Figure 8. Validation on the synthetic data. + +ample, in the long-exposure frame in the first row of (a), the building marked by a yellow box suffers from severe motion blur and overexposure. Chen21 [3] partially recovers the colors of this building, but it fails to remove the blurry artifacts. In the results generated by our method, the edges are sharp and the colors are vivid. In Fig. 7(b), the motions across RGB frames have a very large span, Chen21 [3] can only recover the corresponding LFR videos, while our method can reconstruct an HFR video with smooth motion. + +We evaluate the reconstructed HDR in terms of PSNR, SSIM, HDR-VDP-2 [32], and HDR-VQM [36]. Table 2 clearly shows that our framework outperforms the state-of-the-art method [3] in all the metrics on the real-synthetic data in the condition of 60 FPS. And we achieve excellent performance in the condition of 1000 FPS. We designed ablation experiments and used them to demonstrate the effectiveness of the modules in our framework. For "w/o I", we simply stack the spike trains with a time window, and upsample them using bilinear interpolation; for "w/o PS", we replace PixelShuffle with a convolutional layer. The two groups of experiments verify the effectiveness of spike frame preprocessing in Step ①. For "w/o F1" and "w/o F2", we remove the flow-based interpolation in the deblurring module and the merging module. The two groups of ex + +Table 2. Quantitative results and ablation study on our realistic synthetic data. We sample 60 FPS videos from our results for the comparison with Chen21 [3]. $\uparrow (\downarrow)$ indicates larger (smaller) values are better. + +
Comparison with the state-of-th-art method
MethodPSNR↑SSIM↑HDR-VDP2↑HDR-VQM↓FPS
Chen21 [3]18.460.69727.340.53660
Ours30.140.92160.140.093
Chen21 [3]////1000
Ours24.380.90347.790.120
Ablation study
w/o I23.150.88646.030.1431000
w/o PS23.980.88146.470.141
w/o F119.760.72338.950.314
w/o F218.040.71635.890.356
w/ t-loss22.410.86443.640.142
w/o DeConv24.310.89747.660.127
w/o DM19.010.71437.970.338
+ +periments verify the effectiveness of SC-Flow [16] based interpolation in Steps ② and ③. To further verify the effectiveness of deblurring module, we completely remove it in "w/o DM". For "w/o DeConv", we replace the deformable convolutional layers with traditional convolution layers. For "w/ t-loss", we remove the warping operation on $\mathbf{C}_{i-1}$ and add the temporal consistent loss that is estimated by a pretrained optical flow model [23], which is widely used in video processing [5, 39]. Since the $\mathbf{C}_{i-1}$ is warped by accurate optical flow $\mathbf{F}_{i-1}$ and merged into the current step $i$ , our method fundamentally has a strong temporal consistent constraint for video processing. Thus, our merging module does not need this loss during training. + +# 4.2. Qualitative Evaluation using Real Data + +In order to demonstrate the effectiveness of the proposed framework on real-world scenes, we collect 20 sets of real-world data, which are captured by our hybrid camera system shown in Fig. 6. We have compared our slow-motion capability with that of the commercial cameras. As shown in Fig. 9(a), the electric fan is moving at about 40 rounds + +![](images/1f268f5b45e6511852f7c10d04b0e95083619bab47aae0ee46893577875ea9d0.jpg) +Figure 9. Visual quality comparison of real-world data between the proposed method and commercial cameras with the slow-motion capability. In (a), we show two adjacent frames for the video captured by smartphones that have slow-motion capability. The commercial cameras are not calibrated so their results are not strictly aligned with ours. (b) is the comparison with Phantom camera set to 1000 FPS. + +![](images/72f0059269a352e5fae6316d26d6d3997fa3fa7268ce9475787b3e6b04ccd5e3.jpg) +(a) + +![](images/87e87953be2aadf893e099d367271fb5ced7b1b9999ee6fc17190a8fe64abca6.jpg) + +![](images/12ab643dce1827b18edc878212e142f247aa8711e1c782c9617d11b686395bd6.jpg) + +![](images/70699cd7e7248946e810a0c7736901cdbac1ebc3551834ffa17d97f4ef9afb79.jpg) + +![](images/c3d3ddfd28c99162d50a607d2239a16a351121102d1537088a159bcc3e287777.jpg) +(b) + +![](images/e93c85761807f3ca93c18962918da723c0e397456cdc1707f1e86ea56554f370.jpg) +Figure 10. Qualitative visualization of our method in a super fast scene: a balloon bursting. We select 38 frames from our results for showing. + +per second. The short-exposure image is severely underexposed with less blurry artifacts, and the middle- and long-exposure images have severe blurring and oversaturated artifacts. With the accurate motion and texture information captured by the spiking camera, we have recovered temporally smooth video sequences. Four recovered images are shown for the middle- and long-exposure images. For the videos captured by iPhone 13 and Mi 10, the motions between frames are not continuous. And the electric fan captured by Mi 10 is deformed due to the rolling shutter. In Fig. 9(b), we compare our method with the Phantom7 camera set to 1000 FPS. Since the exposure time of the Phantom camera is extremely short, it fails to capture regions where scene radiance is weak. + +# 5. Conclusion + +We propose an HFR&HDR video reconstruction method with a hybrid camera that is composed of an alternating-exposure RGB sensor and a spiking sensor. Extensive experiments on synthetic and real-world data demonstrate the superior performance of the proposed method. + +Discussion. (i) For super fast scenes, e.g., a balloon bursting, it is difficult to capture clear motions with a conventional RGB camera at 60 FPS. Therefore, the well-exposed color of the bursting balloon is not captured with the short exposure, which brings challenges to our reconstruction of accurate color. In our results, although the colors are somewhat distorted, we can still recover a smooth video sequence. Once the frame rate of the RGB camera is increased, e.g., 120 FPS, temporally smoother video with more accurate color is expected to be more reliably recovered. (ii) Since QIS [1, 29] share the same imaging model with the spiking camera, our method is ready to be applied to it. We show the simulation in supplementary material. + +Limitation and future work. Beam splitter is arguable for making a practical system on mobile devices. But when compact design is not a hard constraint, beam splitter has unique advantages in spatial alignment, that is why it is broadly adopted in building a hybrid prototype for HDR [15, 24, 33, 50]. Side-by-side arrangement with parallax unavoidably introduces occlusions and alignment issues, which is a promising direction to explore for our future work. Due to the low spatial resolution $(250\times 400)$ of the current model we use is, we have to super-resolve the spike frames in feature space. If higher-resolution spike signals can be directly obtained, our method can achieve better visual quality. Besides, there is a domain gap between synthetic spike trains and real-captured spike trains since the noise of the spiking camera is more complex than the simulator. For time complexity, our approach is better suited as a post-processing module. The number of parameters is $45.7\mathrm{M}$ and the time cost per frame is 0.371s with a single NVIDIA GeForce RTX 3090 graphics card. We hope to tackle these issues in the future work and achieve higher frame rate reconstruction. + +# Acknowledgement + +This work was supported by National Key R&D Program of China (2021ZD0109803), National Natural Science Foundation of China under Grant No. 62088102, 62136001. Yakun Chang was also supported by China Postdoctoral Science Foundation (8206300710). + +# References + +[1] Gnanasambandam Abhiram and Chan Stanley H. HDR imaging with quanta image sensors: Theoretical limits and optimal reconstruction. IEEE Transactions on Computational Imaging, 6:1571-1585, 2020. 2, 8 +[2] Moshe Ben-Ezra and Shree K Nayar. Motion deblurring using hybrid imaging. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. 2 +[3] Guanying Chen, Chaofeng Chen, Shi Guo, Zhetong Liang, Kwan-Yee K Wong, and Lei Zhang. HDR video reconstruction: A coarse-to-fine network and a real-world benchmark dataset. In Proc. of International Conference on Computer Vision, pages 2502-2511, 2021. 2, 4, 6, 7 +[4] Wooyeong Cho, Sanghyeok Son, and Dae-Shik Kim. Weighted multi-kernel prediction network for burst image super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 404-413, 2021. 4 +[5] Jonghyun Choi, Kuk-Jin Yoon, et al. Learning to super resolve intensity images from events. In Proc. of Computer Vision and Pattern Recognition, pages 2768-2776, 2020. 7 +[6] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Proc. of International Conference on Computer Vision, pages 764-773, 2017. 5 +[7] Paul E Debevec and Jitendra Malik. Recovering high dynamic range radiance maps from photographs. In Proc. of ACM SIGGRAPH, pages 1-10. 2008. 2 +[8] Akshay Dudhane, Syed Waqas Zamir, Salman Khan, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Burst image restoration and enhancement. In Proc. of Computer Vision and Pattern Recognition, pages 5759-5768, 2022. 2 +[9] Gabriel Eilertsen, Joel Kronander, Gyorgy Denes, Rafat K Mantiuk, and Jonas Unger. HDR image reconstruction from a single exposure using deep cnns. ACM Transactions on Graphics, 36(6):1-15, 2017. 2 +[10] Guillermo Gallego, Tobi Delbrück, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew J Davison, Jörg Conradt, Kostas Daniilidis, et al. Event-based vision: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(1):154-180, 2020. 1 +[11] Clément Godard, Kevin Matzen, and Matt Uytendaele. Deep burst denoising. In Proc. of European Conference on Computer Vision, pages 538-554, 2018. 2 +[12] Yulia Gryaditskaya, Tania Pouli, Erik Reinhard, Karol Myszkowski, and Hans-Peter Seidel. Motion aware exposure bracketing for HDR video. In Computer Graphics Forum, volume 34, pages 119-130. Wiley Online Library, 2015. 2 +[13] Saghi Hajisharif, Joel Kronander, and Jonas Unger. Adaptive dualiso HDR reconstruction. EURASIP Journal on Image and Video Processing, 2015(1):1-13, 2015. 2 +[14] Jin Han, Yixin Yang, Chu Zhou, Chao Xu, and Boxin Shi. Evintsr-net: Event guided multiple latent frames reconstruction and super-resolution. In Proc. of International Conference on Computer Vision, pages 4882-4891, 2021. 2, 4 +[15] Jin Han, Chu Zhou, Peiqi Duan, Yehui Tang, Chang Xu, Chao Xu, Tiejun Huang, and Boxin Shi. Neuromorphic cam- + +era guided high dynamic range imaging. In Proc. of Computer Vision and Pattern Recognition, pages 1730-1739, 2020. 2, 8 +[16] Liwen Hu, Rui Zhao, Ziluo Ding, Lei Ma, Boxin Shi, Ruiqin Xiong, and Tiejun Huang. Optical flow estimation for spiking camera. In Proc. of Computer Vision and Pattern Recognition, pages 17844-17853, 2022. 2, 3, 4, 5, 6, 7 +[17] Tiejun Huang, Yajing Zheng, Zhaofei Yu, Rui Chen, Yuan Li, Ruiqin Xiong, Lei Ma, Junwei Zhao, Siwei Dong, Lin Zhu, et al. $1000 \times$ faster camera and machine vision with ordinary devices. Engineering, 2022. 1, 3, 4, 6 +[18] Zhe Jiang, Yu Zhang, Dongqing Zou, Jimmy Ren, Jiancheng Lv, and Yebin Liu. Learning event-based motion deblurring. In Proc. of Computer Vision and Pattern Recognition, pages 3320-3329, 2020. 2, 3, 4 +[19] Nima Khademi Kalantari and Ravi Ramamoorthi. Deep HDR video from sequences with alternating exposures. In Computer graphics forum, volume 38, pages 193-205. Wiley Online Library, 2019. 2, 4, 6 +[20] Nima Khademi Kalantari, Ravi Ramamoorthi, et al. Deep high dynamic range imaging of dynamic scenes. ACM Transactions on Graphics, 36(4):144-1, 2017. 2, 6 +[21] Nima Khademi Kalantari, Eli Shechtman, Connelly Barnes, Soheil Darabi, Dan B Goldman, and Pradeep Sen. Patch-based high dynamic range video. ACM Transactions on Graphics, 32(6):202-1, 2013. 2, 4, 6 +[22] Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High dynamic range video. ACM Transactions on Graphics, 22(3):319-325, 2003. 2 +[23] Wei-Sheng Lai, Jia-Bin Huang, Oliver Wang, Eli Shechtman, Ersin Yumer, and Ming-Hsuan Yang. Learning blind video temporal consistency. In Proc. of European Conference on Computer Vision, pages 170-185, 2018. 7 +[24] Byungju Lee and Byung Cheol Song. Multi-image high dynamic range algorithm using a hybrid camera. Signal Processing: Image Communication, 30:37-56, 2015. 2, 8 +[25] Juan Antonio Lénero-Bardallo, Teresa Serrano-Gotarredona, and Bernabé Linares-Barranco. A 3.6 $\mu$ s latency asynchronous frame-free event-driven dynamic-vision-sensor. IEEE Journal of Solid-State Circuits, 46(6):1443-1455, 2011. 1 +[26] Feng Li, Jingyi Yu, and Jinxiang Chai. A hybrid camera for motion deblurring and depth map super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 3 +[27] Ziwei Liu, Lu Yuan, Xiaou Tang, Matt Uytendaele, and Jian Sun. Fast burst images denoising. ACM Transactions on Graphics, 33(6):1-9, 2014. 4 +[28] Kede Ma, Hui Li, Hongwei Yong, Zhou Wang, Deyu Meng, and Lei Zhang. Robust multi-exposure image fusion: A structural patch decomposition approach. IEEE Transactions on Image Processing, 26(5):2519-2532, 2017. 2 +[29] Ulku Arin C Bruschini Claudio Charbon Edoardo Ma Sizhuo, Gupta Shantanu and Gupta Mohit. Quanta burst photography. ACM Transactions on Graphics, 39(4):79-1, 2020. 8 + +[30] Stephen Mangiat and Jerry Gibson. High dynamic range video with ghost removal. In Applications of Digital Image Processing XXXIII, volume 7798, pages 307-314. SPIE, 2010. 2 +[31] Stephen Mangiat and Jerry Gibson. Spatially adaptive filtering for registration artifact removal in HDR video. In Proc. of International Conference on Image Processing, pages 1317-1320. IEEE, 2011. 2 +[32] Rafal Mantiuk, Kil Joong Kim, Allan G Rempel, and Wolfgang Heidrich. HDR-VDP-2: A calibrated visual metric for visibility and quality predictions in all luminance conditions. ACM Transactions on Graphics, 30(4):1-14, 2011. 7 +[33] Morgan McGuire, Wojciech Matusik, Hanspeter Pfister, Billy Chen, John F Hughes, and Shree K Nayar. Optical splitting trees for high-precision monocular imaging. IEEE Computer Graphics and Applications, 27(2):32-42, 2007. 2, 8 +[34] Tom Mertens, Jan Kautz, and Frank Van Reeth. Exposure fusion. In Pacific Conference on Computer Graphics and Applications, pages 382-390, 2007. 2 +[35] Nico Messikommer, Stamatios Georgoulis, Daniel Gehrig, Stepan Tulyakov, Julius Erbach, Alfredo Bochicchio, Yuanyou Li, and Davide Scaramuzza. Multi-Bracket high dynamic range imaging with event cameras. In Proc. of Computer Vision and Pattern Recognition, pages 547–557, 2022. 2 +[36] Manish Narwaria, Matthieu Perreira Da Silva, and Patrick Le Callet. HDR-VQM: An objective quality measure for high dynamic range video. Signal Processing: Image Communication, 35:46-60, 2015. 7 +[37] Shree K Nayar and Tomoo Mitsunaga. High dynamic range imaging: Spatially varying pixel exposures. In Proc. of Computer Vision and Pattern Recognition, volume 1, pages 472-479. IEEE, 2000. 2 +[38] Avinash Paliwal and Nima Khademi Kalantari. Deep slow motion video reconstruction with hybrid imaging system. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42(7):1557-1569, 2020. 3 +[39] Henri Rebecq, René Ranftl, Vladlen Koltun, and Davide Scaramuzza. High speed and high dynamic range video with an event camera. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(6):1964-1980, 2019. 7 +[40] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In Proc. of Computer Vision and Pattern Recognition, pages 779-788, 2016. 1 +[41] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster R-CNN: Towards real-time object detection with region proposal networks. Proc. of Advances in Neural Information Processing Systems, 28, 2015. 1 +[42] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234–241. Springer, 2015. 2 +[43] Yash Sanghvi, Abhiram Gnanasambandam, and Stanley H Chan. Photon limited non-blind deblurring using algorithm + +unrolling. IEEE Transactions on Computational Imaging, 2022.5 +[44] Marcel Santana Santos, Tsang Ing Ren, and Nima Khademi Kalantari. Single image HDR reconstruction using a cnn with masked features and perceptual loss. arXiv preprint arXiv:2005.07335, 2020. 2 +[45] Pradeep Sen, Nima Khademi Kalantari, Maziar Yaesoubi, Soheil Darabi, Dan B Goldman, and Eli Shechtman. Robust patch-based HDR reconstruction of dynamic scenes. ACM Transactions on Graphics, 31(6):203-1, 2012. 2 +[46] Richard Shaw, Sibi Catley-Chandar, Ales Leonardis, and Eduardo Perez-Pellitero. HDR reconstruction from bracketed exposures and events. arXiv preprint arXiv:2203.14825, 2022. 2 +[47] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In Proc. of Computer Vision and Pattern Recognition, pages 1874-1883, 2016. 4, 5 +[48] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM network: A machine learning approach for precipitation nowcasting. Proc. of Advances in Neural Information Processing Systems, 28, 2015. 5 +[49] Yu-Wing Tai, Hao Du, Michael S Brown, and Stephen Lin. Image/video deblurring using a hybrid camera. In Proc. of Computer Vision and Pattern Recognition, pages 1-8, 2008. 2 +[50] Michael D Tocci, Chris Kiser, Nora Tocci, and Pradeep Sen. A versatile HDR video production system. ACM Transactions on Graphics, 30(4):1-10, 2011. 2, 8 +[51] Stepan Tulyakov, Alfredo Bochicchio, Daniel Gehrig, Stamatios Georgoulis, Yuanyou Li, and Davide Scaramuzza. Time Lens++: Event-based frame interpolation with parametric non-linear flow and multi-scale fusion. In Proc. of Computer Vision and Pattern Recognition, pages 17755-17764, 2022. 2, 3 +[52] Stepan Tulyakov, Daniel Gehrig, Stamatios Georgoulis, Julius Erbach, Mathias Gehrig, Yuanyou Li, and Davide Scaramuzza. Time Lens: Event-based video frame interpolation. In Proc. of Computer Vision and Pattern Recognition, pages 16155-16164, 2021. 2, 3 +[53] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 6 +[54] Fang Xu, Lei Yu, Bishan Wang, Wen Yang, Gui-Song Xia, Xu Jia, Zhendong Qiao, and Jianzhuang Liu. Motion deblurring with real events. In Proc. of International Conference on Computer Vision, pages 2583-2592, 2021. 3, 4 +[55] Qingsen Yan, Lei Zhang, Yu Liu, Yu Zhu, Jinqiu Sun, Qinfeng Shi, and Yanning Zhang. Deep HDR imaging via a non-local network. IEEE Transactions on Image Processing, 29:4308-4322, 2020. 2 +[56] Zhiyang Yu, Yu Zhang, Deyuan Liu, Dongqing Zou, Xijun Chen, Yebin Liu, and Jimmy S Ren. Training weakly supervised video frame interpolation with events. In Proc. of + +International Conference on Computer Vision, pages 14589-14598, 2021. 3 +[57] Cheng Zhang, Shaolin Su, Yu Zhu, Qingsen Yan, Jinqiu Sun, and Yanning Zhang. Exploring and evaluating image restoration potential in dynamic scenes. In Proc. of Computer Vision and Pattern Recognition, pages 2067-2076, 2022. 1 +[58] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proc. of Computer Vision and Pattern Recognition, pages 586-595, 2018. 6 +[59] Xiang Zhang and Lei Yu. Unifying motion deblurring and frame interpolation with events. In Proc. of Computer Vision and Pattern Recognition, pages 17765-17774, 2022. 2 +[60] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 2472-2481, 2018. 4 +[61] Jing Zhao, Ruiqin Xiong, Hangfan Liu, Jian Zhang, and Tiejun Huang. Spk2Imgnet: Learning to reconstruct dynamic scene from continuous spike stream. In Proc. of Computer Vision and Pattern Recognition, pages 11996-12005, 2021. 1, 4, 6 +[62] Yajing Zheng, Lingxiao Zheng, Zhaofei Yu, Boxin Shi, Yonghong Tian, and Tiejun Huang. High-speed image reconstruction through short-term plasticity for spiking cameras. In Proc. of Computer Vision and Pattern Recognition, pages 6358-6367, 2021. 1, 4 +[63] Lin Zhu, Siwei Dong, Tiejun Huang, and Yonghong Tian. A retina-inspired sampling method for visual texture reconstruction. In Proc. of International Conference on Multimedia and Expo. 1 +[64] Lin Zhu, Siwei Dong, Jianing Li, Tiejun Huang, and Yonghong Tian. Retina-like visual image reconstruction via spiking neural model. In Proc. of Computer Vision and Pattern Recognition, pages 1438-1446, 2020. 1, 4 +[65] Yunhao Zou, Yinqiang Zheng, Tsuyoshi Takatani, and Ying Fu. Learning to reconstruct high speed and high dynamic range videos from events. In Proc. of Computer Vision and Pattern Recognition, pages 2024-2033, 2021. 1 \ No newline at end of file diff --git a/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/images.zip b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..48faef29099edb6ee57c5eaa181b9499de9d4231 --- /dev/null +++ b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fb4d904a16199adc8818509f125a1ad971e7f16008abe3d80a503131d0d2040 +size 778787 diff --git a/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/layout.json b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f7727829d50c746382c04b57a74db1dac56fafd9 --- /dev/null +++ b/2023/1000 FPS HDR Video With a Spike-RGB Hybrid Camera/layout.json @@ -0,0 +1,10363 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 121, + 103, + 473, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 103, + 473, + 121 + ], + "spans": [ + { + "bbox": [ + 121, + 103, + 473, + 121 + ], + "type": "text", + "content": "1000 FPS HDR Video with a Spike-RGB Hybrid Camera" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "spans": [ + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "text", + "content": "Yakun Chang" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "text", + "content": " Chu Zhou" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "text", + "content": " Yuchen Hong" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "text", + "content": " Liwen Hu" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "text", + "content": " Chao Xu" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "text", + "content": " Tiejun Huang" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "text", + "content": " Boxin Shi" + }, + { + "bbox": [ + 53, + 141, + 547, + 157 + ], + "type": "inline_equation", + "content": "^{1,2*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 158, + 530, + 228 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 66, + 158, + 530, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 158, + 530, + 171 + ], + "spans": [ + { + "bbox": [ + 66, + 158, + 530, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 66, + 158, + 530, + 171 + ], + "type": "text", + "content": " National Key Laboratory for Multimedia Information Processing, School of Computer Science, Peking University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 78, + 172, + 524, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 172, + 524, + 186 + ], + "spans": [ + { + "bbox": [ + 78, + 172, + 524, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 78, + 172, + 524, + 186 + ], + "type": "text", + "content": " National Engineering Research Center of Visual Technology, School of Computer Science, Peking University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 186, + 519, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 186, + 519, + 228 + ], + "spans": [ + { + "bbox": [ + 83, + 186, + 519, + 228 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 83, + 186, + 519, + 228 + ], + "type": "text", + "content": " National Key Laboratory of General AI, School of Intelligence Science and Technology, Peking University {yakunchang, zhou_chu, huliwen, tjhuang, shiboxin}@pku.edu.cn yuchenhong.cn@gmail.com, xuchao@cis.pku.edu" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "spans": [ + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 281, + 290, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 281, + 290, + 593 + ], + "spans": [ + { + "bbox": [ + 46, + 281, + 290, + 593 + ], + "type": "text", + "content": "Capturing high frame rate and high dynamic range (HFR&HDR) color videos in high-speed scenes with conventional frame-based cameras is very challenging. The increasing frame rate is usually guaranteed by using shorter exposure time so that the captured video is severely interfered by noise. Alternating exposures can alleviate the noise issue but sacrifice frame rate due to involving long-exposure frames. The neuromorphic spiking camera records high-speed scenes of high dynamic range without colors using a completely different sensing mechanism and visual representation. We introduce a hybrid camera system composed of a spiking and an alternating-exposure RGB camera to capture HFR&HDR scenes with high fidelity. Our insight is to bring each camera's superiority into full play. The spike frames, with accurate fast motion information encoded, are firstly reconstructed for motion representation, from which the spike-based optical flows guide the recovery of missing temporal information for long-exposure RGB images while retaining their reliable color appearances. With the strong temporal constraint estimated from spike trains, both missing and distorted colors cross RGB frames are recovered to generate time-consistent and HFR color frames. We collect a new Spike-RGB dataset that contains 300 sequences of synthetic data and 20 groups of real-world data to demonstrate 1000 FPS HDR videos outperforming HDR video reconstruction methods and commercial high-speed cameras." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 605, + 128, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 605, + 128, + 618 + ], + "spans": [ + { + "bbox": [ + 47, + 605, + 128, + 618 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 626, + 287, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 626, + 287, + 675 + ], + "spans": [ + { + "bbox": [ + 46, + 626, + 287, + 675 + ], + "type": "text", + "content": "The spiking camera [17] and event camera [10] are neuromorphic sensors working differently from conventional frame-based digital cameras, which have many attractive characteristics, e.g., high-speed (perceiving scene" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 308, + 254, + 545, + 392 + ], + "blocks": [ + { + "bbox": [ + 308, + 254, + 545, + 392 + ], + "lines": [ + { + "bbox": [ + 308, + 254, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 308, + 254, + 545, + 392 + ], + "type": "image", + "image_path": "60240d2e429f7bbaa254aba2a45842b1ba35e1ca5ec1952b36f6954e438ed27c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "lines": [ + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "spans": [ + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "type": "text", + "content": "Figure 1. (a) We build a spike-RGB hybrid camera system to achieve 1000 FPS HDR video reconstruction1. (b) The RGB camera uses alternating-exposure mode with a frame rate of 60 FPS, where " + }, + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "type": "inline_equation", + "content": "t_s" + }, + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "type": "inline_equation", + "content": "4t_s" + }, + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "type": "inline_equation", + "content": "12t_s" + }, + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "type": "text", + "content": " are the short, middle, and long exposure in our setup, respectively. The sampling frequency of the spiking camera is " + }, + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "type": "inline_equation", + "content": "20000\\mathrm{Hz}" + }, + { + "bbox": [ + 305, + 397, + 547, + 464 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 473, + 546, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 546, + 641 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 546, + 641 + ], + "type": "text", + "content": "radiance changes at the microsecond level), high dynamic range (HDR, " + }, + { + "bbox": [ + 304, + 473, + 546, + 641 + ], + "type": "inline_equation", + "content": "\\geq 100" + }, + { + "bbox": [ + 304, + 473, + 546, + 641 + ], + "type": "text", + "content": " dB). However, since they only record neuromorphic signals, i.e., spike trains [64] and event streams [25], which are less friendly to the human visual system and cannot be directly processed by CNN-based models for video frames [40, 41], preprocessing modules that convert neuromorphic signals into compatible formats are usually required when applying them to frame-based vision algorithms [61, 65]. In comparison with event streams, spike trains contain concrete textured information of scene radiances, which are more suitable for reconstructing high frame rate (HFR) videos [61-64]. However, since the spiking camera only encodes the absolute intensities of environments, colors are absent in the reconstructed video frames." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 642, + 547, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 547, + 691 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 547, + 691 + ], + "type": "text", + "content": "When capturing with a frame-based RGB camera, quality of recorded colors for each frame is determined by trading off the exposure time, ambient light, and target objects' moving speed [57]. For high-speed dynamic scenes, it often" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 683, + 136, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 683, + 136, + 693 + ], + "spans": [ + { + "bbox": [ + 58, + 683, + 136, + 693 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 694, + 286, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 694, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 694, + 286, + 712 + ], + "type": "text", + "content": "Project page: https://changyakun.github.io/1000FPS-HDR" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 702, + 479, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 702, + 479, + 714 + ], + "spans": [ + { + "bbox": [ + 317, + 702, + 479, + 714 + ], + "type": "text", + "content": "The video result is available on our project page." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22180" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 443 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 443 + ], + "type": "text", + "content": "requires to set shorter exposure time to guarantee a higher frame rate and avoid motion blur. In such a situation, since the exposure time is extremely short, the quality of video frames would be severely degenerated due to noise. Merging a burst of short-exposure images is a simple yet effective approach to reduce the noise level [8, 11], however, the color shift caused by noise is difficult to be corrected. Fusing alternating-exposure (using short, middle, and long exposures) RGB frames is commonly used for synthesizing well-exposed images [3, 19, 21]. However, they are not suitable for high-speed scenes. As illustrated in Fig. 1(b), given a sequence of alternating-exposure RGB images, the total time from the starting of the current exposure to the starting of the next frame, denoted by " + }, + { + "bbox": [ + 47, + 72, + 289, + 443 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 47, + 72, + 289, + 443 + ], + "type": "text", + "content": ", is consistent for all frames, and it is composed of the exposure time " + }, + { + "bbox": [ + 47, + 72, + 289, + 443 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{exp}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 443 + ], + "type": "text", + "content": " and interval time " + }, + { + "bbox": [ + 47, + 72, + 289, + 443 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{itv}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 443 + ], + "type": "text", + "content": " (containing the readout and waiting time). It can be seen that the information during interval time is lost, and the frame rate they could achieve is thus limited to dozens of FPS. Another possible solution is to build a hybrid camera system to capture low frame rate (LFR) color sequence and high-speed neuromorphic signals simultaneously, then use the neuromorphic signals to interpolate [51, 52] and deblur [14, 18, 59] the RGB frames. However, the saturated regions are usually ignored, leaving the colors of the interpolated frames still unsatisfactory. HDR intensity map (does not contain any chromatic information) built from the neuromorphic signals can also be used to compensate the missing textures in the saturated regions [15]. But such an approach is not robust for scenes with large areas of saturated regions, due to the heavy reliance on the chrominance compensation network to hallucinate the color." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 444, + 289, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 444, + 289, + 671 + ], + "spans": [ + { + "bbox": [ + 47, + 444, + 289, + 671 + ], + "type": "text", + "content": "In this paper, we propose an all-in-one framework to reconstruct HRF (Fig. 1(a), at the level of 1000 FPS) color videos with high fidelity from the spike trains and a series of alternating-exposure frames captured by a Spike-RGB hybrid camera system simultaneously (Fig. 1(b)). To make full use of the color information in RGB images, we propose a three-stage strategy to deal with different situations using specific modules: (i) For the blurry middle- and long-exposure images, we design a spike guided deblurring module to recover the corresponding sharp images with faithful colors; (ii) for missing colors during the interval time, we design a spike guided interpolation module that exploits the abundant motion information (SC-Flow [16]) obtained from spike trains; (iii) for suppressing noise in short-exposure images and maintaining temporal consistency, we design a merging module, which exploits the variant of recurrent U-Net [42] as its backbone, to complete the HFR&HDR color video reconstruction process. To summarize, this paper makes contributions by proposing:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 57, + 677, + 288, + 715 + ], + "type": "text", + "content": "- an all-in-one framework to reconstruct high-speed HDR color video by jointly fusing spike trains and a sequence of alternating-exposure frames;" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 315, + 72, + 547, + 159 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 315, + 72, + 545, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 545, + 120 + ], + "type": "text", + "content": "- a three-stage strategy fusing alternating exposures of RGB frames for the generation of well-exposure colors, via a recurrent convolution neural network for continuous frames interpolation guided by spike trains;" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 315, + 122, + 547, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 122, + 547, + 159 + ], + "spans": [ + { + "bbox": [ + 315, + 122, + 547, + 159 + ], + "type": "text", + "content": "- a Spike-RGB hybrid camera system to demonstrate the applicability of the proposed method for capturing high-speed and high dynamic range scenes." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 163, + 547, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 163, + 547, + 224 + ], + "spans": [ + { + "bbox": [ + 304, + 163, + 547, + 224 + ], + "type": "text", + "content": "Experimental results show that the proposed method outperforms the state-of-the-art HDR video reconstruction method [3] and commercial cameras with the slow-motion photography capability in reconstructing 1000 FPS HDR color videos on synthetic data and real-world data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 233, + 392, + 246 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 233, + 392, + 246 + ], + "spans": [ + { + "bbox": [ + 306, + 233, + 392, + 246 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 253, + 547, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 253, + 547, + 610 + ], + "spans": [ + { + "bbox": [ + 304, + 253, + 547, + 610 + ], + "type": "text", + "content": "HDR image and video reconstruction. The most common way to reconstruct HDR images is to fuse a set of LDR images with bracketed exposures [7, 34]. Since the results for dynamic scenes often contain ghosting artifacts, image alignment [28, 45] and deep learning [20, 55] are employed to reconstruct sharp HDR images. To better reduce ghosting artifacts, Lee et al. [24] and Shaw et al. [46] apply the estimated motion information from a high frame rate sequence to facilitate the HDR image synthesis. Messikommer et al. [35] also achieve HDR reconstruction by combining bracketed-exposure RGB images and events. There are methods being designed for HDR reconstruction from a single image. These methods cannot recover the missing textures in clipped regions [9, 44]. Abhiram and Chan [1] reconstruct HDR images with a quanta image sensor (QIS). Han et al. [15] find that the reconstructed intensity maps from event streams and spike trains contain abundant textures saturated in LDR images. Therefore, they exploit intensity maps to guide HDR image restoration. For the capturing of HDR videos, many existing methods use specialized hardware, such as scanline exposure [13], per-pixel exposure [37], or multiple sensors [33, 50]. Due to the particularity of hardware, these methods are limited to narrow applications. Merging alternating-exposure image sequences is the most common yet effective way to reconstruct HDR videos [12, 19, 21, 22, 30, 31]. Recently, Chen et al. [3] propose a coarse-to-fine network that performs alignment and fusion sequentially both in the image and feature space. However, these methods can only deal with LFR videos with about 20-60 FPS." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 612, + 547, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 612, + 547, + 696 + ], + "spans": [ + { + "bbox": [ + 304, + 612, + 547, + 696 + ], + "type": "text", + "content": "HFR video reconstruction. There is plenty of data redundancy in capturing HFR videos directly by commercial high-speed cameras, e.g., the Phatom camera². Building a hybrid system with a high-resolution LFR camera and a low-resolution HFR camera, and utilizing HFR signals to reconstruct a sequence of sharp images from blurred images [2, 49] is a more data-efficient way for HFR video" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 702, + 481, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 702, + 481, + 714 + ], + "spans": [ + { + "bbox": [ + 315, + 702, + 481, + 714 + ], + "type": "text", + "content": "2https://www.phantomhighspeed.com/" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "text", + "content": "22181" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 71, + 318, + 242 + ], + "blocks": [ + { + "bbox": [ + 51, + 71, + 318, + 242 + ], + "lines": [ + { + "bbox": [ + 51, + 71, + 318, + 242 + ], + "spans": [ + { + "bbox": [ + 51, + 71, + 318, + 242 + ], + "type": "image", + "image_path": "98c720d4cece126ecdfa4451934d7d939ae0de158ecc49548ba1c53ceaaf3e0d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "lines": [ + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "spans": [ + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "type": "text", + "content": "Figure 2. (a) The pipeline of the proposed solution. It contains three steps: Step " + }, + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "type": "text", + "content": " spike preprocessing (Sec. 3.2), Step " + }, + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "type": "inline_equation", + "content": "②" + }, + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "type": "text", + "content": " RGB frame processing (Sec. 3.3), and Step " + }, + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "type": "inline_equation", + "content": "③" + }, + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "type": "text", + "content": " merging into HFR video (Sec. 3.4). Given the spike trains, we firstly estimate the optical flow from them as well as reconstruct spike frames. Secondly, we rectify the uneven brightness with a linear mapping function and use spike-guided deblurring (SG-deblur) to reconstruct sharp color frames. Finally, we use spike-guided frame interpolation (SG-interpolation) to recover the missing colors during " + }, + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{itv}}" + }, + { + "bbox": [ + 46, + 248, + 545, + 314 + ], + "type": "text", + "content": ", and reconstruct time-consistent color frames. (b) and (c) show the detailed pipeline of SG-deblur and SG-interpolation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 322, + 72, + 541, + 243 + ], + "blocks": [ + { + "bbox": [ + 322, + 72, + 541, + 243 + ], + "lines": [ + { + "bbox": [ + 322, + 72, + 541, + 243 + ], + "spans": [ + { + "bbox": [ + 322, + 72, + 541, + 243 + ], + "type": "image", + "image_path": "a3796d73d2d27d9cb39525bfe02cc066e9e00718bd42123a115adabad161f76c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 324, + 289, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 324, + 289, + 575 + ], + "spans": [ + { + "bbox": [ + 46, + 324, + 289, + 575 + ], + "type": "text", + "content": "reconstruction. Li et al. [26] use a stereo pair of low-resolution HFR and high-resolution LFR cameras to calculate the fast motion and the depth map. Avinash et al. [38] compute optical flows between two existing frames by utilizing the content of auxiliary HFR videos. Jiang et al. [18] recover a sharp video sequence from a motion-blurred image by integrating the visual and temporal knowledge that is contained in the events. Xu et al. [54] achieve real-world event-based deblurring with a self-supervised learning method. Tulyakov et al. [52] propose the Time Lens that utilizes high-speed events to achieve video frame interpolation (VFI). Following that, Time Lens++ [51] further improves the performance. For the reason that real data are absent, Yu et al. [56] propose a weakly supervised method with the help of subpixel attention learning. Although the event-based interpolation realizes HFR video reconstruction [51, 52], the recovered quality of colors is usually unsatisfactory due to that single exposure cannot balance artifacts from noise and blur, we therefore propose to jointly fuse the high-speed spike signals and alternating-exposure RGB frames to achieve high-quality reconstruction." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 586, + 113, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 113, + 599 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 113, + 599 + ], + "type": "text", + "content": "3. Approach" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 605, + 114, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 605, + 114, + 616 + ], + "spans": [ + { + "bbox": [ + 47, + 605, + 114, + 616 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "text", + "content": "Our goal is to reconstruct HFR&HDR videos from the binary spike trains " + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "inline_equation", + "content": "\\mathbb{S}(x,y) = \\{s(x,y,t)\\} (s(x,y,t) = 1" + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "text", + "content": " if the accumulated photons reach a certain threshold, then the accumulator is reset and " + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "inline_equation", + "content": "s(x,y,t) = 0" + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "text", + "content": " before the next spike is fired [17]) and LFR alternating-exposure RGB frames " + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "inline_equation", + "content": "\\mathbb{B} = \\{\\mathbf{B}_k\\} ^3" + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "text", + "content": " denote the coordinates of spikes, " + }, + { + "bbox": [ + 46, + 623, + 287, + 696 + ], + "type": "inline_equation", + "content": "t" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 324, + 545, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 324, + 545, + 360 + ], + "spans": [ + { + "bbox": [ + 306, + 324, + 545, + 360 + ], + "type": "text", + "content": "denotes the timestamp, and " + }, + { + "bbox": [ + 306, + 324, + 545, + 360 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 306, + 324, + 545, + 360 + ], + "type": "text", + "content": " denotes the index of an RGB image in the sequence. As shown in Fig. 2(a), to achieve this goal, we design a pipeline that consists of three steps:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 360, + 545, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 360, + 545, + 384 + ], + "spans": [ + { + "bbox": [ + 305, + 360, + 545, + 384 + ], + "type": "text", + "content": "Step ①: Spike preprocessing (Sec. 3.2). We estimate the optical flow " + }, + { + "bbox": [ + 305, + 360, + 545, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_i" + }, + { + "bbox": [ + 305, + 360, + 545, + 384 + ], + "type": "text", + "content": " and spike frames " + }, + { + "bbox": [ + 305, + 360, + 545, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_i" + }, + { + "bbox": [ + 305, + 360, + 545, + 384 + ], + "type": "text", + "content": " from the spike trains:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 353, + 393, + 545, + 407 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 393, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 353, + 393, + 545, + 407 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {i} (x, y) = \\mathcal {S C} \\left(s \\left(x, y, t _ {i} \\rightarrow t _ {i + 1}\\right)\\right), \\tag {1}", + "image_path": "9bfb7b4871ca16e9951b8500393932367e85f3318979fe52ef0aca1f006da68c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 358, + 415, + 545, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 415, + 545, + 444 + ], + "spans": [ + { + "bbox": [ + 358, + 415, + 545, + 444 + ], + "type": "interline_equation", + "content": "\\mathbf {I} _ {i} (x, y) = \\int_ {t _ {i} t _ {f} / 2} ^ {t _ {i} + t _ {f} / 2} s (x, y, t) d t, \\tag {2}", + "image_path": "bd668944d5a52cda57901dcc0225606336bc385948d4e7853df3c57ce6cdaf38.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "inline_equation", + "content": "\\mathcal{SC}(\\cdot)" + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "text", + "content": " denotes optical flow estimation with Hu et al.'s [16] method, " + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "text", + "content": " denote the index and timestamp of spike frames, and " + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "inline_equation", + "content": "t_f" + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "text", + "content": " is the time window. In Sec. 3.2, we further super-resolve " + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_i" + }, + { + "bbox": [ + 304, + 449, + 545, + 497 + ], + "type": "text", + "content": " at the feature space." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": "Step ②: RGB frame preprocessing (Sec. 3.3). For the 60 FPS RGB images captured with alternating exposures, i.e., " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "t_s, 4t_s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "12t_s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": ", we firstly unify the uneven brightness with a linear mapping function. Then we conduct motion deblurring for " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "4t_s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "12t_s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " images. For the " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "t_s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " images, when " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "t_s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " is sufficiently short, i.e., 1 ms, we assume the short-exposure image is free from motion blur, and take " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "t_s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " as the reference time for the motion deblurring. Consequently, we can recover 4 and 12 sharp images from " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "4t_s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "12t_s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " images, respectively. As shown in Fig. 2(b), we use " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{B}^l" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " to denote a blurry image, and the motion deblurring operation can be formulated as: " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{B}_j^l\\} = \\mathcal{R}(\\mathbf{B}^l, \\{\\mathbf{I}_j | j \\in \\mathcal{N}_l\\}, \\mathbf{B}^s)" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " is the index of a recovered sharp image, " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(\\cdot)" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " is sharp image reconstruction, " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_j | j \\in \\mathcal{N}_l\\}" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " is the corresponding spike frames, and " + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "inline_equation", + "content": "\\mathbf{B}^s" + }, + { + "bbox": [ + 304, + 498, + 546, + 676 + ], + "type": "text", + "content": " is the nearest short-exposure RGB frame." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Step ③: Merging into HFR video (Sec. 3.4). Following Step ②, for the interval time " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "(T_{\\mathrm{itv}})" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": " that colors are not recorded, we bidirectionally query two nearest sharp RGB" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 702, + 212, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 212, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 212, + 713 + ], + "type": "text", + "content": "3In this paper, we use " + }, + { + "bbox": [ + 58, + 702, + 212, + 713 + ], + "type": "inline_equation", + "content": "\\{\\cdot\\}" + }, + { + "bbox": [ + 58, + 702, + 212, + 713 + ], + "type": "text", + "content": " to denote collections." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22182" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 79, + 94, + 116 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 81, + 79 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 81, + 79 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 81, + 79 + ], + "type": "text", + "content": "warping" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 48, + 79, + 94, + 116 + ], + "lines": [ + { + "bbox": [ + 48, + 79, + 94, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 79, + 94, + 116 + ], + "type": "image", + "image_path": "69d8b311ce36a4530ecf232650dd28197899054c0313144d824fc179e70d4d2f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 117, + 287, + 137 + ], + "lines": [ + { + "bbox": [ + 47, + 117, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 47, + 117, + 287, + 137 + ], + "type": "text", + "content": "Figure 3. For the sake of increasing spatial resolution, we adopt flow-based warping to merge adjacent 5 spike frames." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 97, + 72, + 143, + 116 + ], + "blocks": [ + { + "bbox": [ + 97, + 72, + 143, + 116 + ], + "lines": [ + { + "bbox": [ + 97, + 72, + 143, + 116 + ], + "spans": [ + { + "bbox": [ + 97, + 72, + 143, + 116 + ], + "type": "image", + "image_path": "cf39b78ad3ae1e85fd6294c3574fe337a9e139e2680711172548c5216ff96c0f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 144, + 72, + 190, + 116 + ], + "blocks": [ + { + "bbox": [ + 144, + 72, + 190, + 116 + ], + "lines": [ + { + "bbox": [ + 144, + 72, + 190, + 116 + ], + "spans": [ + { + "bbox": [ + 144, + 72, + 190, + 116 + ], + "type": "image", + "image_path": "ae9042900c55487e39988804c1314cb26d945fa12eb7432a4fe8c52019a9b1ae.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 192, + 72, + 239, + 116 + ], + "blocks": [ + { + "bbox": [ + 192, + 72, + 239, + 116 + ], + "lines": [ + { + "bbox": [ + 192, + 72, + 239, + 116 + ], + "spans": [ + { + "bbox": [ + 192, + 72, + 239, + 116 + ], + "type": "image", + "image_path": "c8d6dedbab383c4cabfdbf4100b648119d41bdf85f6b86c070cb6ca3c7313a25.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 240, + 72, + 287, + 116 + ], + "blocks": [ + { + "bbox": [ + 240, + 72, + 287, + 116 + ], + "lines": [ + { + "bbox": [ + 240, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 240, + 72, + 287, + 116 + ], + "type": "image", + "image_path": "49bbef8a2cd1ba9b8519bf1120a392f74430894e19e52a34957f5c27c82657ef.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "content": "images " + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{B}_i^+, \\mathbf{B}_i\\}" + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "content": " for each spike frame " + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_i" + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "content": ", and get the warped images " + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}" + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "content": " with optical flow, where " + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "inline_equation", + "content": "+" + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "inline_equation", + "content": "-" + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "content": " denote the forward and backward warping, respectively. In Fig. 2(c), we provide an illustration of the interpolation procedure. Finally, as shown in Fig. 4, we reconstruct time-consistent color frames, and each frame " + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_i" + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "content": " is generated by merging the spike frame " + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_i" + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{C}_i\\}_{1}, \\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}" + }, + { + "bbox": [ + 47, + 143, + 287, + 240 + ], + "type": "text", + "content": " with the strong constraint of optical flow." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 248, + 164, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 248, + 164, + 261 + ], + "spans": [ + { + "bbox": [ + 47, + 248, + 164, + 261 + ], + "type": "text", + "content": "3.2. Spike preprocessing" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "spans": [ + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "text", + "content": "The optical flow estimation and spike frame reconstruction using in Eqn. (1) and Eqn. (2) are theoretically, yet the reconstructed frames practically have two issues: Since the integration time " + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "inline_equation", + "content": "t_f" + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "text", + "content": " is very short, noise is relatively strong; the spatial resolution of the first generation spiking camera (VidarOne [17]) is much lower than the RGB camera. To reduce the noise and increase the spatial resolution, inspired by the burst-based super-resolution [4] and denoising [27] for conventional RGB images, it is feasible to merge a group of adjacent spike frames with the help of spatial alignment. Moreover, thanks to the continuous motion recording capability of spiking cameras, the optical flow [16] estimated from spike trains makes the alignment even more stable than RGB images. As illustrated in Fig. 3, we design a computationally efficient module for spike frames, which is formulated as: " + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_i = \\{\\mathcal{W}_{\\mathbf{F}_{j\\to i}}(\\mathbf{I}_j)|j\\in \\mathcal{N}_i\\}" + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\mathcal{F}_{j\\to i}}(\\cdot)" + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "text", + "content": " denotes the flow-based warping operation, " + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "inline_equation", + "content": "\\mathcal{N}_i" + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "text", + "content": " denotes a collection of adjacent frames. Then, we feed " + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_i" + }, + { + "bbox": [ + 47, + 267, + 288, + 567 + ], + "type": "text", + "content": " to a set of convolutional layers, and we use PixelShuffle [47] to increase the spatial resolution while decreasing the channel of features. It should be noted that the method for spike frame reconstruction is not unique, which means users can choose other learning-based methods [61, 62, 64]. However, those deep learning models are relatively heavy, and less efficient as a submodule fitting to our pipeline." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 575, + 192, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 575, + 192, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 575, + 192, + 588 + ], + "type": "text", + "content": "3.3. RGB image preprocessing" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 594, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 594, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 594, + 287, + 713 + ], + "type": "text", + "content": "RGB linear mapping. Following previous methods for HDR video reconstruction [3, 19, 21], we first unify the brightness of alternating-exposure RGB frames. Since we use an industrial camera (details in Sec. 3.5) that can acquire data without a nonlinear radiometric response function, the linearity of the captured frames is maintained. We find that the brightness of the frames can maintain a linear relationship with the duration of exposure time. Hence we use the global linear mapping to unify the frame brightness: " + }, + { + "bbox": [ + 47, + 594, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\alpha \\cdot \\mathbf{B}_k(x,y)\\rightarrow \\mathbf{B}_k(x,y)" + }, + { + "bbox": [ + 47, + 594, + 287, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 594, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 47, + 594, + 287, + 713 + ], + "type": "text", + "content": " denotes a linear scalar." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "text", + "content": "Spike-guided deblurring. The physical model of the blurring process can be simply formulated as the average of a group of sharp images, i.e., " + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "inline_equation", + "content": "\\mathbf{B}^l (x,y) = \\frac{1}{N}\\sum_{j = 1}^{N}\\mathbf{B}_j^l (x,y)" + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "text", + "content": " denotes the number of sharp images. However, due to the limited dynamic range of the RGB camera, that simplified equation does not hold in the clipped regions of real-world long-exposure frames. In general we should have: " + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "inline_equation", + "content": "\\mathbf{B}^l (x,y)\\leq \\frac{1}{N}\\sum_{j = 1}^{N}\\mathbf{B}_j^l (x,y)" + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "text", + "content": ". Therefore, for reconstructing a sequence of sharp HDR images from " + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "inline_equation", + "content": "\\mathbf{B}^l" + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "text", + "content": ", we divide it into two sub-tasks: (i) For the well-exposure regions, we use the sharp spike frames to guide motion deblurring; (ii) for the clipped regions where colors are lost, we compensate them with well-retained colors extracted from the adjacent short-exposure image " + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "inline_equation", + "content": "\\mathbf{B}^s" + }, + { + "bbox": [ + 304, + 72, + 545, + 241 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "spans": [ + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": "Figure 2(b) shows the spike-guided deblurring (SG-deblur) from " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_l" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_l" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": " may be a middle- or long-exposure image). Similar to Xu et al. [54] that exploit event frames to motion deblurring, we first concatenate " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_l" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_l^j\\}" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": ", then extract shallow features and increase feature channels with PixelShuffle [47], which is followed by a set of residual dense blocks (RDBs) [60] and a decoder. To make the colors in over-exposure regions be compensated by the adjacent short-exposure RGB image " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_j^s" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": ", we warp the short-exposure image with the optical flow estimated from spike trains: " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_j^s = \\mathcal{W}_{\\mathbf{F}_{s\\rightarrow j}}(\\mathbf{B}^s)" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_{\\mathbf{F}_{s\\rightarrow j}}(\\cdot)" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": " denotes the warping operation from timestamp " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "t_s" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": " to the timestamp of " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "t_j" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": ". Subsequently, we extract features from " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{B}_l^{s\\rightarrow j}\\}" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": " and add residual links between them and the decoder. Finally, we obtain a sequence of sharp color images. Note that the SG-deblur for the middle- and long-exposure RGB images share the same architecture while the parameters are not shareable. SG-deblur outputs four images for both " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "4t_s" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "12t_s" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": " frames. For the case of " + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "inline_equation", + "content": "12t_s" + }, + { + "bbox": [ + 304, + 242, + 546, + 481 + ], + "type": "text", + "content": " frame, we interpolate the 4 frames to 12 frames with flow-based warping." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 482, + 546, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 482, + 546, + 673 + ], + "spans": [ + { + "bbox": [ + 304, + 482, + 546, + 673 + ], + "type": "text", + "content": "Next, we briefly explain the reason why this event-based model [54] can be applied to a spike-based task. Both event streams and spike trains with the high-speed property have been used for motion deblurring and latent frame reconstruction [14,18,54]. It is necessary to convert them to event frames and spike frames, both of which belong to the category of 2D images. But event frames and spike frames have different physical meanings: Pixel values in an event frame reveal the residual (relatively sparse information) between two adjacent frames, while pixel values in a spike frame represent exactly the texture (relatively dense information) of the corresponding frame. Since both event frames and spike frames are 2D images and the spike frames have denser texture information, we can replace event frames in such a model with spike frames, so as to make the solution to the problem more well-posed." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 682, + 443, + 695 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 682, + 443, + 695 + ], + "spans": [ + { + "bbox": [ + 306, + 682, + 443, + 695 + ], + "type": "text", + "content": "3.4. Merging into HFR video" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 701, + 545, + 713 + ], + "type": "text", + "content": "RGB interpolation. Given each middle- and long-exposure" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22183" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 70, + 545, + 249 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 545, + 249 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 545, + 249 + ], + "type": "image", + "image_path": "f70c457f89317c1a8dfbd75c2c3839b8139e7a74e3c29226b3c581376d4d252a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 251, + 546, + 285 + ], + "lines": [ + { + "bbox": [ + 46, + 251, + 546, + 285 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 546, + 285 + ], + "type": "text", + "content": "Figure 4. Network architecture of the CNN-RNN-based merging module for reconstructing HFR&HDR videos from alternating-exposure RGB frames and HFR spike frames. This module outputs HDR color frames in a step-wise manner. We unroll the module for " + }, + { + "bbox": [ + 46, + 251, + 546, + 285 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 251, + 546, + 285 + ], + "type": "text", + "content": " steps during training." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "spans": [ + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": "frame, SG-deblur recovers 4 and 12 images. Therefore, the recovered RGB frames have a frame rate of " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "340^{4}" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": " FPS. But temporal distribution of them is quite uneven, e.g., there is no recovered color frame interval time " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{itv}}" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": ". Fortunately, the spike train contains continuous and dense texture information in the temporal domain. In Step ③, we use the SG-interpolation module to interpolate RGB frames into a sequence of uniformly distributed images. For each spike frame " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_i" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": ", we bidirectionally query its two nearest recovered RGB frames " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{B}_i^+, \\mathbf{B}_i\\}" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": " and interpolate two color frames " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": " with the optical flow estimated from spike trains. When " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": " are fed into our merging module, they are weighted by a linear coefficient " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "(\\oplus" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": " in Fig. 4) related to the distance between " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "\\{t_+, t\\}" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "\\{t_+, t\\}" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": " denote the timestamp of " + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}" + }, + { + "bbox": [ + 46, + 289, + 289, + 474 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": "Merging module. The aforementioned modules reconstruct coarse HFR video frames, which need to be refined for smoothing over time. We build a CNN-RNN-based HFR&HDR video reconstruction network to merge the spike frames and RGB frames, which is shown in Fig. 4. The merging module consists of three encoders, i.e., " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_I" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_B" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_C" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ", which are respectively designed for feature extraction from the current spike frame " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_i" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ", the interpolated RGB images " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\{\\hat{\\mathbf{B}}_i^+, \\hat{\\mathbf{B}}_i\\}" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ", and the previously reconstructed image " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_{i-1}" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ". In " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_I" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ", we use PixelShuffle [47] to make the spatial resolution of spike features consistent with RGB features. The extracted features are denoted as " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_I" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{E}_B, \\mathbf{E}_{B+}\\}" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_{C_i-1}" + }, + { + "bbox": [ + 46, + 474, + 287, + 630 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 630, + 287, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 287, + 691 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 287, + 691 + ], + "type": "text", + "content": "Considering the spike frames and RGB frames may not be perfectly aligned at pixel level for real-world data, we add deformable convolution layers [6] to improve the robustness to this issue. In order to output flicker-free color frames, we adopt two constraints in the merging module:" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 319, + 314, + 533, + 361 + ], + "blocks": [ + { + "bbox": [ + 306, + 288, + 545, + 309 + ], + "lines": [ + { + "bbox": [ + 306, + 288, + 545, + 309 + ], + "spans": [ + { + "bbox": [ + 306, + 288, + 545, + 309 + ], + "type": "text", + "content": "Table 1. Details of the composition of the dataset (res. is the abbreviation of resolution)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 319, + 314, + 533, + 361 + ], + "lines": [ + { + "bbox": [ + 319, + 314, + 533, + 361 + ], + "spans": [ + { + "bbox": [ + 319, + 314, + 533, + 361 + ], + "type": "table", + "html": "
dataRGB res.spike res.train/testtime
full-synthetic500×800250×40080/200.1s
real-synthetic600×800250×400160/400.101s
real-world484×784242×392-/200.101s
", + "image_path": "9754e7ba0b3f019bb54d91503f166a8e553e6c6c01ac31fde44a015920f2d53f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "text", + "content": "(i) We add three ConvLSTM layers [48] to feed previous states forward in temporal domain; (ii) we feed " + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_{C_i}" + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "text", + "content": " into the current step and align it with the current features with flow-based warping. We then use a decoder to reversely map deep features to the current output HDR frame " + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_i" + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "text", + "content": ". We achieve the multi-module signal fusion by adding concatenation links between " + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{E}_{C_i}" + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_B" + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_{B+}\\}" + }, + { + "bbox": [ + 304, + 363, + 545, + 449 + ], + "type": "text", + "content": " and the decoder." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 455, + 440, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 455, + 440, + 468 + ], + "spans": [ + { + "bbox": [ + 306, + 455, + 440, + 468 + ], + "type": "text", + "content": "3.5. Implementation Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 474, + 545, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 545, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 545, + 545 + ], + "type": "text", + "content": "Due to the setting of our method being different from existing HDR and video frame interpolation methods, there are no suitable datasets for training and testing our method. Therefore, we collect a new one with three components, whose details are summarized in Table 1 and sample images are provided in Fig. 5." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 546, + 546, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 546, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 546, + 677 + ], + "type": "text", + "content": "Part 1: Full-synthetic data. This part of data is obtained by using the spike simulator proposed by Hu et al. [16]. We render 2000 RGB images with their computer graphics based solution as ground truth and generate 2000 spike planes (0.1 s). Since the photons arriving at the sensor follow Poisson probability distribution [43], we synthesize alternating-exposure 60 FPS RGB frames with a Poisson noise model. For the full synthetic data, we randomly select starting time of each group of training data. We randomly shift the RGB frames within 3 pixels to make the trained model more robust to the misalignment in real-world data." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "Part 2: Real-synthetic data. To reduce the domain gap between full-synthetic data and real-world data, we design a method to collect real-synthetic (the scenes are real while" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 702, + 193, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 193, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 193, + 712 + ], + "type": "text", + "content": "4From " + }, + { + "bbox": [ + 58, + 702, + 193, + 712 + ], + "type": "inline_equation", + "content": "60 = 20\\times 3" + }, + { + "bbox": [ + 58, + 702, + 193, + 712 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 58, + 702, + 193, + 712 + ], + "type": "inline_equation", + "content": "340 = 20\\times (1 + 4 + 12)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "22184" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 286, + 171 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 286, + 171 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 286, + 171 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 286, + 171 + ], + "type": "image", + "image_path": "176aa9dfa2ecaf20f74ee48cdeb45fed0d736431be6a2a3e803a4ccf3f70da7d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 182, + 287, + 215 + ], + "lines": [ + { + "bbox": [ + 46, + 182, + 287, + 215 + ], + "spans": [ + { + "bbox": [ + 46, + 182, + 287, + 215 + ], + "type": "text", + "content": "Figure 5. Example frames from the proposed dataset. Each group shows three alternating-exposure RGB frames (left, from top to bottom rows) and the corresponding spike signals (right)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 224, + 287, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 224, + 287, + 320 + ], + "spans": [ + { + "bbox": [ + 46, + 224, + 287, + 320 + ], + "type": "text", + "content": "the spike trains are synthetic) data, and we use this part of data to fine-tune our model. The RGB frames are captured with an alternating-exposure mode in slow-motion scenes. Then we synthesize blurry middle-exposure RGB frames by averaging 4 adjacent middle-exposure RGB images, and blurry long-exposure RGB frames are synthesized in a similar way. We synthesize spike trains from ground truth RGB frames with the integrate-and-fire methodology [61]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 320, + 287, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 320, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 46, + 320, + 287, + 403 + ], + "type": "text", + "content": "Part 3: Real-world data. We build a Spike-RGB hybrid camera (Fig. 6) to capture real-world data. The system is composed of an industrial camera (Basler acA800-510uc" + }, + { + "bbox": [ + 46, + 320, + 287, + 403 + ], + "type": "inline_equation", + "content": "^5" + }, + { + "bbox": [ + 46, + 320, + 287, + 403 + ], + "type": "text", + "content": ") with alternating exposure capability and a spiking camera [17]. There is a beam splitter in front of the two sensors. We conduct geometric calibration and time synchronization to align bimodal signals collected by them." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "spans": [ + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "text", + "content": "Loss and training. The SG-deblur module and the merging module reconstruct images in the linear luminance domain, which covers a high dynamic range of pixel values. Following existing methods for HDR reconstruction, for the output images " + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "text", + "content": ", we compress the range of pixel values by applying the following function proposed by Kalantari et al. [20]: " + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(\\mathbf{C}) = \\log (1 + \\mu \\mathbf{C}) / \\log (1 + \\mu)" + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(\\cdot)" + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "text", + "content": " denotes the tone mapping operation and " + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "text", + "content": " denotes the amount of compression. For these two modules, we employ widely used " + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "text", + "content": " loss, Structure similarity (SSIM) loss [53], and Learned Perceptual Image Patch Similarity (LPIPS) loss [58]. The total loss at step " + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 403, + 287, + 559 + ], + "type": "text", + "content": " for both the motion deblurring and merging modules is" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 568, + 287, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 568, + 287, + 582 + ], + "spans": [ + { + "bbox": [ + 59, + 568, + 287, + 582 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} (i) = \\mathcal {L} _ {l _ {1}} (i) + \\beta_ {1} \\mathcal {L} _ {\\text {S S I M}} (i) + \\beta_ {2} \\mathcal {L} _ {\\text {L P I P S}} (i), \\tag {3}", + "image_path": "c4cfdd21fb315d57e0bdaa0ef1a313caf5b595120ef0cc189d8288cd6e44e81b.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 1" + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 1" + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "text", + "content": ". For spike-based optical flow estimation using [16], we fine-tune the parameters with full-synthetic data. During training, we resize the RGB images and spike frames to " + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "inline_equation", + "content": "512 \\times 800" + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "inline_equation", + "content": "256 \\times 400" + }, + { + "bbox": [ + 46, + 590, + 287, + 687 + ], + "type": "text", + "content": ". We implement our model with PyTorch, set the batch size to 4, and use ADAM optimizer during the training process. We first train the model on full-synthetic data. The SG-deblur module is trained with 50 epochs, before training the merging" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 69, + 545, + 175 + ], + "blocks": [ + { + "bbox": [ + 307, + 69, + 545, + 175 + ], + "lines": [ + { + "bbox": [ + 307, + 69, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 307, + 69, + 545, + 175 + ], + "type": "image", + "image_path": "3a9785b5e9c025abd121037f9499ff36cfcf100a90bcaf5ae65255ec856a5815.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 184, + 545, + 206 + ], + "lines": [ + { + "bbox": [ + 305, + 184, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 305, + 184, + 545, + 206 + ], + "type": "text", + "content": "Figure 6. The prototype of our Spike-RGB imaging system composed of a spiking camera and an RGB camera." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "text", + "content": "module. We unroll the merging module for " + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "text", + "content": " steps, and we find " + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "inline_equation", + "content": "M = 4" + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "text", + "content": " achieves a suitable balance between training time and recovery quality. The total loss for the unrolled " + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "text", + "content": " steps is " + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{merge}} = \\sum_{i=1}^{M} \\mathcal{L}_{\\mathrm{total}}^{\\mathrm{M}}(i)" + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{total}}^{\\mathrm{M}}(i)" + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "text", + "content": " denotes the total loss for the merging module at step " + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "text", + "content": ". The initial learning rate for both two modules is 0.001, we decay it to " + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "inline_equation", + "content": "10^{-6}" + }, + { + "bbox": [ + 305, + 216, + 545, + 348 + ], + "type": "text", + "content": " with a linear strategy. For the real-synthetic data, we fine-tune another group of parameters to reduce the gap between synthetic data and real-world data. We use one NVIDIA Tesla A100 for training, and the training procedure consumes about 30 hours." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 359, + 388, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 359, + 388, + 373 + ], + "spans": [ + { + "bbox": [ + 306, + 359, + 388, + 373 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 379, + 541, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 379, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 306, + 379, + 541, + 392 + ], + "type": "text", + "content": "4.1. Quantitative Evaluation using Synthetic Data" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 398, + 545, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 398, + 545, + 481 + ], + "spans": [ + { + "bbox": [ + 305, + 398, + 545, + 481 + ], + "type": "text", + "content": "Validation on full-synthetic data. Figure 8 shows a group of results on full-synthetic data. We can see that both the flying objects in the short-exposure image and the oversaturated clouds (see the regions marked by boxes) in the long-exposure image are recovered successfully. The results with rich textures and consistent colors show the feasibility of our proposed method." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 482, + 545, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 482, + 545, + 565 + ], + "spans": [ + { + "bbox": [ + 305, + 482, + 545, + 565 + ], + "type": "text", + "content": "Evaluation on real-synthetic data. To the best of our knowledge, the proposed method is the first framework to reconstruct HFR&HDR videos with the combination of spike trains and alternating-exposure RGB frames. Therefore, it is unfair to compare our method with existing ones, i.e., Kalantari13 [21], Kalantari19 [19], and Chen21 " + }, + { + "bbox": [ + 305, + 482, + 545, + 565 + ], + "type": "inline_equation", + "content": "[3]^{6}" + }, + { + "bbox": [ + 305, + 482, + 545, + 565 + ], + "type": "text", + "content": ", which are designed for low frame rate HDR videos." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 565, + 545, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 565, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 304, + 565, + 545, + 685 + ], + "type": "text", + "content": "We choose a state-of-the-art HDR video reconstruction method Chen21 [3], which also uses alternating-exposure RGB frames (the closest setup to ours) as a reference. Figure 7 shows the reconstruction results on real-synthetic data of the proposed method and Chen21 [3]. Thanks to the complementary motion information provided by spike trains, the abundant color extracted from alternating-exposure RGB frames, and the accurate textures contained in spike frames, the proposed method is capable of reconstructing rich texture details with less motion blur. For ex" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 693, + 286, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 286, + 712 + ], + "type": "text", + "content": "5https://www.baslerweb.com/en/products/camera/ area-scan-cameras/ace/aca800-510uc/" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "text", + "content": "In this section, we use \"Last name of the first author+year\" as synonyms of methods for comparison." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22185" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 80, + 127, + 216 + ], + "blocks": [ + { + "bbox": [ + 75, + 71, + 96, + 79 + ], + "lines": [ + { + "bbox": [ + 75, + 71, + 96, + 79 + ], + "spans": [ + { + "bbox": [ + 75, + 71, + 96, + 79 + ], + "type": "text", + "content": "short" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 80, + 127, + 216 + ], + "lines": [ + { + "bbox": [ + 50, + 80, + 127, + 216 + ], + "spans": [ + { + "bbox": [ + 50, + 80, + 127, + 216 + ], + "type": "image", + "image_path": "afa1aa45dd7684f40beb4726479a3aba9c3cd0d96ec6f4bf23778aeeefabdc8f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 132, + 80, + 211, + 215 + ], + "blocks": [ + { + "bbox": [ + 157, + 71, + 182, + 79 + ], + "lines": [ + { + "bbox": [ + 157, + 71, + 182, + 79 + ], + "spans": [ + { + "bbox": [ + 157, + 71, + 182, + 79 + ], + "type": "text", + "content": "middle" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 132, + 80, + 211, + 215 + ], + "lines": [ + { + "bbox": [ + 132, + 80, + 211, + 215 + ], + "spans": [ + { + "bbox": [ + 132, + 80, + 211, + 215 + ], + "type": "image", + "image_path": "71cdc16eea019dcd1bd0dd0177ee5024f38cc34f796330d5d1eac074cf09b1e0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 170, + 216, + 182, + 225 + ], + "lines": [ + { + "bbox": [ + 170, + 216, + 182, + 225 + ], + "spans": [ + { + "bbox": [ + 170, + 216, + 182, + 225 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 214, + 80, + 292, + 216 + ], + "blocks": [ + { + "bbox": [ + 245, + 71, + 261, + 79 + ], + "lines": [ + { + "bbox": [ + 245, + 71, + 261, + 79 + ], + "spans": [ + { + "bbox": [ + 245, + 71, + 261, + 79 + ], + "type": "text", + "content": "long" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 214, + 80, + 292, + 216 + ], + "lines": [ + { + "bbox": [ + 214, + 80, + 292, + 216 + ], + "spans": [ + { + "bbox": [ + 214, + 80, + 292, + 216 + ], + "type": "image", + "image_path": "6162aa256a287f2f993030fdd823e1c193c5a5945396f892262b2ef1595ff2d3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 301, + 80, + 379, + 216 + ], + "blocks": [ + { + "bbox": [ + 326, + 72, + 346, + 79 + ], + "lines": [ + { + "bbox": [ + 326, + 72, + 346, + 79 + ], + "spans": [ + { + "bbox": [ + 326, + 72, + 346, + 79 + ], + "type": "text", + "content": "short" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 301, + 80, + 379, + 216 + ], + "lines": [ + { + "bbox": [ + 301, + 80, + 379, + 216 + ], + "spans": [ + { + "bbox": [ + 301, + 80, + 379, + 216 + ], + "type": "image", + "image_path": "6eb5166094979d17c7be16571a725ef333c07be4f97c0f8c35d1361d0ff8d59e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 383, + 80, + 462, + 216 + ], + "blocks": [ + { + "bbox": [ + 407, + 72, + 433, + 79 + ], + "lines": [ + { + "bbox": [ + 407, + 72, + 433, + 79 + ], + "spans": [ + { + "bbox": [ + 407, + 72, + 433, + 79 + ], + "type": "text", + "content": "middle" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 383, + 80, + 462, + 216 + ], + "lines": [ + { + "bbox": [ + 383, + 80, + 462, + 216 + ], + "spans": [ + { + "bbox": [ + 383, + 80, + 462, + 216 + ], + "type": "image", + "image_path": "18487dec66ddaa2019bfc213e532588aef073461a9d8d2d11543e80ebfbdbd84.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 421, + 215, + 433, + 225 + ], + "lines": [ + { + "bbox": [ + 421, + 215, + 433, + 225 + ], + "spans": [ + { + "bbox": [ + 421, + 215, + 433, + 225 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 465, + 80, + 544, + 216 + ], + "blocks": [ + { + "bbox": [ + 496, + 72, + 512, + 80 + ], + "lines": [ + { + "bbox": [ + 496, + 72, + 512, + 80 + ], + "spans": [ + { + "bbox": [ + 496, + 72, + 512, + 80 + ], + "type": "text", + "content": "long" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 465, + 80, + 544, + 216 + ], + "lines": [ + { + "bbox": [ + 465, + 80, + 544, + 216 + ], + "spans": [ + { + "bbox": [ + 465, + 80, + 544, + 216 + ], + "type": "image", + "image_path": "865b4f729ea98b545b1620ffe9198a6a878505d1040d35bbaf214b8fe6412abe.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 50, + 266, + 286, + 388 + ], + "blocks": [ + { + "bbox": [ + 46, + 225, + 545, + 258 + ], + "lines": [ + { + "bbox": [ + 46, + 225, + 545, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 225, + 545, + 258 + ], + "type": "text", + "content": "Figure 7. Visual equality comparison of real-synthetic data between the proposed method and the state-of-the-art HDR video reconstruction method: Chen 21 [3]. We present two sets of results in (a) and (b). Please zoom-in electronic versions for better details, and watch the HFR videos on the project page." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 266, + 286, + 388 + ], + "lines": [ + { + "bbox": [ + 50, + 266, + 286, + 388 + ], + "spans": [ + { + "bbox": [ + 50, + 266, + 286, + 388 + ], + "type": "image", + "image_path": "fbb712d19253aafc1fd9d055053cde4360d158fb65c53da6d8203063df92c7ec.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 89, + 396, + 243, + 407 + ], + "lines": [ + { + "bbox": [ + 89, + 396, + 243, + 407 + ], + "spans": [ + { + "bbox": [ + 89, + 396, + 243, + 407 + ], + "type": "text", + "content": "Figure 8. Validation on the synthetic data." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 422, + 287, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 422, + 287, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 422, + 287, + 529 + ], + "type": "text", + "content": "ample, in the long-exposure frame in the first row of (a), the building marked by a yellow box suffers from severe motion blur and overexposure. Chen21 [3] partially recovers the colors of this building, but it fails to remove the blurry artifacts. In the results generated by our method, the edges are sharp and the colors are vivid. In Fig. 7(b), the motions across RGB frames have a very large span, Chen21 [3] can only recover the corresponding LFR videos, while our method can reconstruct an HFR video with smooth motion." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 46, + 533, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 713 + ], + "type": "text", + "content": "We evaluate the reconstructed HDR in terms of PSNR, SSIM, HDR-VDP-2 [32], and HDR-VQM [36]. Table 2 clearly shows that our framework outperforms the state-of-the-art method [3] in all the metrics on the real-synthetic data in the condition of 60 FPS. And we achieve excellent performance in the condition of 1000 FPS. We designed ablation experiments and used them to demonstrate the effectiveness of the modules in our framework. For \"w/o I\", we simply stack the spike trains with a time window, and upsample them using bilinear interpolation; for \"w/o PS\", we replace PixelShuffle with a convolutional layer. The two groups of experiments verify the effectiveness of spike frame preprocessing in Step ①. For \"w/o F1\" and \"w/o F2\", we remove the flow-based interpolation in the deblurring module and the merging module. The two groups of ex" + } + ] + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 308, + 312, + 545, + 446 + ], + "blocks": [ + { + "bbox": [ + 305, + 266, + 545, + 308 + ], + "lines": [ + { + "bbox": [ + 305, + 266, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 305, + 266, + 545, + 308 + ], + "type": "text", + "content": "Table 2. Quantitative results and ablation study on our realistic synthetic data. We sample 60 FPS videos from our results for the comparison with Chen21 [3]. " + }, + { + "bbox": [ + 305, + 266, + 545, + 308 + ], + "type": "inline_equation", + "content": "\\uparrow (\\downarrow)" + }, + { + "bbox": [ + 305, + 266, + 545, + 308 + ], + "type": "text", + "content": " indicates larger (smaller) values are better." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 312, + 545, + 446 + ], + "lines": [ + { + "bbox": [ + 308, + 312, + 545, + 446 + ], + "spans": [ + { + "bbox": [ + 308, + 312, + 545, + 446 + ], + "type": "table", + "html": "
Comparison with the state-of-th-art method
MethodPSNR↑SSIM↑HDR-VDP2↑HDR-VQM↓FPS
Chen21 [3]18.460.69727.340.53660
Ours30.140.92160.140.093
Chen21 [3]////1000
Ours24.380.90347.790.120
Ablation study
w/o I23.150.88646.030.1431000
w/o PS23.980.88146.470.141
w/o F119.760.72338.950.314
w/o F218.040.71635.890.356
w/ t-loss22.410.86443.640.142
w/o DeConv24.310.89747.660.127
w/o DM19.010.71437.970.338
", + "image_path": "bd2a969c0528a92c9fb04d225b6e4660c1e3727866146a713c86bbb5a5091a3f.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "text", + "content": "periments verify the effectiveness of SC-Flow [16] based interpolation in Steps ② and ③. To further verify the effectiveness of deblurring module, we completely remove it in \"w/o DM\". For \"w/o DeConv\", we replace the deformable convolutional layers with traditional convolution layers. For \"w/ t-loss\", we remove the warping operation on " + }, + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_{i-1}" + }, + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "text", + "content": " and add the temporal consistent loss that is estimated by a pretrained optical flow model [23], which is widely used in video processing [5, 39]. Since the " + }, + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_{i-1}" + }, + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "text", + "content": " is warped by accurate optical flow " + }, + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{i-1}" + }, + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "text", + "content": " and merged into the current step " + }, + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 460, + 545, + 616 + ], + "type": "text", + "content": ", our method fundamentally has a strong temporal consistent constraint for video processing. Thus, our merging module does not need this loss during training." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 623, + 511, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 511, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 511, + 635 + ], + "type": "text", + "content": "4.2. Qualitative Evaluation using Real Data" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "In order to demonstrate the effectiveness of the proposed framework on real-world scenes, we collect 20 sets of real-world data, which are captured by our hybrid camera system shown in Fig. 6. We have compared our slow-motion capability with that of the commercial cameras. As shown in Fig. 9(a), the electric fan is moving at about 40 rounds" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "22186" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 118, + 175 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 118, + 175 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 118, + 175 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 118, + 175 + ], + "type": "image", + "image_path": "1f268f5b45e6511852f7c10d04b0e95083619bab47aae0ee46893577875ea9d0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 186, + 546, + 220 + ], + "lines": [ + { + "bbox": [ + 46, + 186, + 546, + 220 + ], + "spans": [ + { + "bbox": [ + 46, + 186, + 546, + 220 + ], + "type": "text", + "content": "Figure 9. Visual quality comparison of real-world data between the proposed method and commercial cameras with the slow-motion capability. In (a), we show two adjacent frames for the video captured by smartphones that have slow-motion capability. The commercial cameras are not calibrated so their results are not strictly aligned with ours. (b) is the comparison with Phantom camera set to 1000 FPS." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 120, + 71, + 187, + 175 + ], + "blocks": [ + { + "bbox": [ + 120, + 71, + 187, + 175 + ], + "lines": [ + { + "bbox": [ + 120, + 71, + 187, + 175 + ], + "spans": [ + { + "bbox": [ + 120, + 71, + 187, + 175 + ], + "type": "image", + "image_path": "72f0059269a352e5fae6316d26d6d3997fa3fa7268ce9475787b3e6b04ccd5e3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 175, + 194, + 185 + ], + "lines": [ + { + "bbox": [ + 179, + 175, + 194, + 185 + ], + "spans": [ + { + "bbox": [ + 179, + 175, + 194, + 185 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 189, + 71, + 256, + 175 + ], + "blocks": [ + { + "bbox": [ + 189, + 71, + 256, + 175 + ], + "lines": [ + { + "bbox": [ + 189, + 71, + 256, + 175 + ], + "spans": [ + { + "bbox": [ + 189, + 71, + 256, + 175 + ], + "type": "image", + "image_path": "87e87953be2aadf893e099d367271fb5ced7b1b9999ee6fc17190a8fe64abca6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 257, + 72, + 335, + 175 + ], + "blocks": [ + { + "bbox": [ + 257, + 72, + 335, + 175 + ], + "lines": [ + { + "bbox": [ + 257, + 72, + 335, + 175 + ], + "spans": [ + { + "bbox": [ + 257, + 72, + 335, + 175 + ], + "type": "image", + "image_path": "12ab643dce1827b18edc878212e142f247aa8711e1c782c9617d11b686395bd6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 339, + 72, + 408, + 175 + ], + "blocks": [ + { + "bbox": [ + 339, + 72, + 408, + 175 + ], + "lines": [ + { + "bbox": [ + 339, + 72, + 408, + 175 + ], + "spans": [ + { + "bbox": [ + 339, + 72, + 408, + 175 + ], + "type": "image", + "image_path": "70699cd7e7248946e810a0c7736901cdbac1ebc3551834ffa17d97f4ef9afb79.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 408, + 72, + 544, + 175 + ], + "blocks": [ + { + "bbox": [ + 408, + 72, + 544, + 175 + ], + "lines": [ + { + "bbox": [ + 408, + 72, + 544, + 175 + ], + "spans": [ + { + "bbox": [ + 408, + 72, + 544, + 175 + ], + "type": "image", + "image_path": "c3d3ddfd28c99162d50a607d2239a16a351121102d1537088a159bcc3e287777.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 444, + 175, + 457, + 185 + ], + "lines": [ + { + "bbox": [ + 444, + 175, + 457, + 185 + ], + "spans": [ + { + "bbox": [ + 444, + 175, + 457, + 185 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 48, + 228, + 287, + 386 + ], + "blocks": [ + { + "bbox": [ + 48, + 228, + 287, + 386 + ], + "lines": [ + { + "bbox": [ + 48, + 228, + 287, + 386 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 287, + 386 + ], + "type": "image", + "image_path": "e93c85761807f3ca93c18962918da723c0e397456cdc1707f1e86ea56554f370.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 387, + 287, + 420 + ], + "lines": [ + { + "bbox": [ + 47, + 387, + 287, + 420 + ], + "spans": [ + { + "bbox": [ + 47, + 387, + 287, + 420 + ], + "type": "text", + "content": "Figure 10. Qualitative visualization of our method in a super fast scene: a balloon bursting. We select 38 frames from our results for showing." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 434, + 287, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 434, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 46, + 434, + 287, + 601 + ], + "type": "text", + "content": "per second. The short-exposure image is severely underexposed with less blurry artifacts, and the middle- and long-exposure images have severe blurring and oversaturated artifacts. With the accurate motion and texture information captured by the spiking camera, we have recovered temporally smooth video sequences. Four recovered images are shown for the middle- and long-exposure images. For the videos captured by iPhone 13 and Mi 10, the motions between frames are not continuous. And the electric fan captured by Mi 10 is deformed due to the rolling shutter. In Fig. 9(b), we compare our method with the Phantom7 camera set to 1000 FPS. Since the exposure time of the Phantom camera is extremely short, it fails to capture regions where scene radiance is weak." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 613, + 119, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 613, + 119, + 625 + ], + "spans": [ + { + "bbox": [ + 47, + 613, + 119, + 625 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 634, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 634, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 47, + 634, + 287, + 693 + ], + "type": "text", + "content": "We propose an HFR&HDR video reconstruction method with a hybrid camera that is composed of an alternating-exposure RGB sensor and a spiking sensor. Extensive experiments on synthetic and real-world data demonstrate the superior performance of the proposed method." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 229, + 545, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 229, + 545, + 384 + ], + "spans": [ + { + "bbox": [ + 306, + 229, + 545, + 384 + ], + "type": "text", + "content": "Discussion. (i) For super fast scenes, e.g., a balloon bursting, it is difficult to capture clear motions with a conventional RGB camera at 60 FPS. Therefore, the well-exposed color of the bursting balloon is not captured with the short exposure, which brings challenges to our reconstruction of accurate color. In our results, although the colors are somewhat distorted, we can still recover a smooth video sequence. Once the frame rate of the RGB camera is increased, e.g., 120 FPS, temporally smoother video with more accurate color is expected to be more reliably recovered. (ii) Since QIS [1, 29] share the same imaging model with the spiking camera, our method is ready to be applied to it. We show the simulation in supplementary material." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 385, + 546, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 385, + 546, + 635 + ], + "spans": [ + { + "bbox": [ + 305, + 385, + 546, + 635 + ], + "type": "text", + "content": "Limitation and future work. Beam splitter is arguable for making a practical system on mobile devices. But when compact design is not a hard constraint, beam splitter has unique advantages in spatial alignment, that is why it is broadly adopted in building a hybrid prototype for HDR [15, 24, 33, 50]. Side-by-side arrangement with parallax unavoidably introduces occlusions and alignment issues, which is a promising direction to explore for our future work. Due to the low spatial resolution " + }, + { + "bbox": [ + 305, + 385, + 546, + 635 + ], + "type": "inline_equation", + "content": "(250\\times 400)" + }, + { + "bbox": [ + 305, + 385, + 546, + 635 + ], + "type": "text", + "content": " of the current model we use is, we have to super-resolve the spike frames in feature space. If higher-resolution spike signals can be directly obtained, our method can achieve better visual quality. Besides, there is a domain gap between synthetic spike trains and real-captured spike trains since the noise of the spiking camera is more complex than the simulator. For time complexity, our approach is better suited as a post-processing module. The number of parameters is " + }, + { + "bbox": [ + 305, + 385, + 546, + 635 + ], + "type": "inline_equation", + "content": "45.7\\mathrm{M}" + }, + { + "bbox": [ + 305, + 385, + 546, + 635 + ], + "type": "text", + "content": " and the time cost per frame is 0.371s with a single NVIDIA GeForce RTX 3090 graphics card. We hope to tackle these issues in the future work and achieve higher frame rate reconstruction." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 643, + 403, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 643, + 403, + 654 + ], + "spans": [ + { + "bbox": [ + 306, + 643, + 403, + 654 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 654, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 654, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 654, + 545, + 712 + ], + "type": "text", + "content": "This work was supported by National Key R&D Program of China (2021ZD0109803), National Natural Science Foundation of China under Grant No. 62088102, 62136001. Yakun Chang was also supported by China Postdoctoral Science Foundation (8206300710)." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 702, + 272, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 272, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 272, + 712 + ], + "type": "text", + "content": "7Refer to footnote 2. Camera model: VEO 640, F/1.8, 85mm lens." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22187" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Gnanasambandam Abhiram and Chan Stanley H. HDR imaging with quanta image sensors: Theoretical limits and optimal reconstruction. IEEE Transactions on Computational Imaging, 6:1571-1585, 2020. 2, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 169 + ], + "type": "text", + "content": "[2] Moshe Ben-Ezra and Shree K Nayar. Motion deblurring using hybrid imaging. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 288, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 288, + 225 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 288, + 225 + ], + "type": "text", + "content": "[3] Guanying Chen, Chaofeng Chen, Shi Guo, Zhetong Liang, Kwan-Yee K Wong, and Lei Zhang. HDR video reconstruction: A coarse-to-fine network and a real-world benchmark dataset. In Proc. of International Conference on Computer Vision, pages 2502-2511, 2021. 2, 4, 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 227, + 288, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 227, + 288, + 271 + ], + "spans": [ + { + "bbox": [ + 53, + 227, + 288, + 271 + ], + "type": "text", + "content": "[4] Wooyeong Cho, Sanghyeok Son, and Dae-Shik Kim. Weighted multi-kernel prediction network for burst image super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 404-413, 2021. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 272, + 288, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 272, + 288, + 304 + ], + "spans": [ + { + "bbox": [ + 53, + 272, + 288, + 304 + ], + "type": "text", + "content": "[5] Jonghyun Choi, Kuk-Jin Yoon, et al. Learning to super resolve intensity images from events. In Proc. of Computer Vision and Pattern Recognition, pages 2768-2776, 2020. 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 306, + 288, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 306, + 288, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 306, + 288, + 350 + ], + "type": "text", + "content": "[6] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Proc. of International Conference on Computer Vision, pages 764-773, 2017. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 351, + 288, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 351, + 288, + 384 + ], + "spans": [ + { + "bbox": [ + 53, + 351, + 288, + 384 + ], + "type": "text", + "content": "[7] Paul E Debevec and Jitendra Malik. Recovering high dynamic range radiance maps from photographs. In Proc. of ACM SIGGRAPH, pages 1-10. 2008. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 386, + 288, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 386, + 288, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 386, + 288, + 430 + ], + "type": "text", + "content": "[8] Akshay Dudhane, Syed Waqas Zamir, Salman Khan, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Burst image restoration and enhancement. In Proc. of Computer Vision and Pattern Recognition, pages 5759-5768, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 431, + 288, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 431, + 288, + 474 + ], + "spans": [ + { + "bbox": [ + 53, + 431, + 288, + 474 + ], + "type": "text", + "content": "[9] Gabriel Eilertsen, Joel Kronander, Gyorgy Denes, Rafat K Mantiuk, and Jonas Unger. HDR image reconstruction from a single exposure using deep cnns. ACM Transactions on Graphics, 36(6):1-15, 2017. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 476, + 288, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 476, + 288, + 531 + ], + "spans": [ + { + "bbox": [ + 48, + 476, + 288, + 531 + ], + "type": "text", + "content": "[10] Guillermo Gallego, Tobi Delbrück, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew J Davison, Jörg Conradt, Kostas Daniilidis, et al. Event-based vision: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(1):154-180, 2020. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 533, + 288, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 533, + 288, + 565 + ], + "spans": [ + { + "bbox": [ + 48, + 533, + 288, + 565 + ], + "type": "text", + "content": "[11] Clément Godard, Kevin Matzen, and Matt Uytendaele. Deep burst denoising. In Proc. of European Conference on Computer Vision, pages 538-554, 2018. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 567, + 288, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 288, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 288, + 610 + ], + "type": "text", + "content": "[12] Yulia Gryaditskaya, Tania Pouli, Erik Reinhard, Karol Myszkowski, and Hans-Peter Seidel. Motion aware exposure bracketing for HDR video. In Computer Graphics Forum, volume 34, pages 119-130. Wiley Online Library, 2015. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 612, + 288, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 612, + 288, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 612, + 288, + 644 + ], + "type": "text", + "content": "[13] Saghi Hajisharif, Joel Kronander, and Jonas Unger. Adaptive dualiso HDR reconstruction. EURASIP Journal on Image and Video Processing, 2015(1):1-13, 2015. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 646, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 288, + 689 + ], + "type": "text", + "content": "[14] Jin Han, Yixin Yang, Chu Zhou, Chao Xu, and Boxin Shi. Evintsr-net: Event guided multiple latent frames reconstruction and super-resolution. In Proc. of International Conference on Computer Vision, pages 4882-4891, 2021. 2, 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "type": "text", + "content": "[15] Jin Han, Chu Zhou, Peiqi Duan, Yehui Tang, Chang Xu, Chao Xu, Tiejun Huang, and Boxin Shi. Neuromorphic cam-" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "era guided high dynamic range imaging. In Proc. of Computer Vision and Pattern Recognition, pages 1730-1739, 2020. 2, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 108, + 545, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 108, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 307, + 108, + 545, + 152 + ], + "type": "text", + "content": "[16] Liwen Hu, Rui Zhao, Ziluo Ding, Lei Ma, Boxin Shi, Ruiqin Xiong, and Tiejun Huang. Optical flow estimation for spiking camera. In Proc. of Computer Vision and Pattern Recognition, pages 17844-17853, 2022. 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 153, + 545, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 153, + 545, + 198 + ], + "spans": [ + { + "bbox": [ + 307, + 153, + 545, + 198 + ], + "type": "text", + "content": "[17] Tiejun Huang, Yajing Zheng, Zhaofei Yu, Rui Chen, Yuan Li, Ruiqin Xiong, Lei Ma, Junwei Zhao, Siwei Dong, Lin Zhu, et al. " + }, + { + "bbox": [ + 307, + 153, + 545, + 198 + ], + "type": "inline_equation", + "content": "1000 \\times" + }, + { + "bbox": [ + 307, + 153, + 545, + 198 + ], + "type": "text", + "content": " faster camera and machine vision with ordinary devices. Engineering, 2022. 1, 3, 4, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 199, + 545, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 199, + 545, + 243 + ], + "spans": [ + { + "bbox": [ + 307, + 199, + 545, + 243 + ], + "type": "text", + "content": "[18] Zhe Jiang, Yu Zhang, Dongqing Zou, Jimmy Ren, Jiancheng Lv, and Yebin Liu. Learning event-based motion deblurring. In Proc. of Computer Vision and Pattern Recognition, pages 3320-3329, 2020. 2, 3, 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 244, + 545, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 244, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 307, + 244, + 545, + 289 + ], + "type": "text", + "content": "[19] Nima Khademi Kalantari and Ravi Ramamoorthi. Deep HDR video from sequences with alternating exposures. In Computer graphics forum, volume 38, pages 193-205. Wiley Online Library, 2019. 2, 4, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 290, + 545, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 290, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 307, + 290, + 545, + 323 + ], + "type": "text", + "content": "[20] Nima Khademi Kalantari, Ravi Ramamoorthi, et al. Deep high dynamic range imaging of dynamic scenes. ACM Transactions on Graphics, 36(4):144-1, 2017. 2, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 325, + 545, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 325, + 545, + 369 + ], + "spans": [ + { + "bbox": [ + 307, + 325, + 545, + 369 + ], + "type": "text", + "content": "[21] Nima Khademi Kalantari, Eli Shechtman, Connelly Barnes, Soheil Darabi, Dan B Goldman, and Pradeep Sen. Patch-based high dynamic range video. ACM Transactions on Graphics, 32(6):202-1, 2013. 2, 4, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 371, + 545, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 371, + 545, + 404 + ], + "spans": [ + { + "bbox": [ + 307, + 371, + 545, + 404 + ], + "type": "text", + "content": "[22] Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High dynamic range video. ACM Transactions on Graphics, 22(3):319-325, 2003. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 406, + 545, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 406, + 545, + 450 + ], + "spans": [ + { + "bbox": [ + 307, + 406, + 545, + 450 + ], + "type": "text", + "content": "[23] Wei-Sheng Lai, Jia-Bin Huang, Oliver Wang, Eli Shechtman, Ersin Yumer, and Ming-Hsuan Yang. Learning blind video temporal consistency. In Proc. of European Conference on Computer Vision, pages 170-185, 2018. 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 452, + 545, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 452, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 307, + 452, + 545, + 485 + ], + "type": "text", + "content": "[24] Byungju Lee and Byung Cheol Song. Multi-image high dynamic range algorithm using a hybrid camera. Signal Processing: Image Communication, 30:37-56, 2015. 2, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 487, + 545, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 487, + 545, + 540 + ], + "spans": [ + { + "bbox": [ + 307, + 487, + 545, + 540 + ], + "type": "text", + "content": "[25] Juan Antonio Lénero-Bardallo, Teresa Serrano-Gotarredona, and Bernabé Linares-Barranco. A 3.6 " + }, + { + "bbox": [ + 307, + 487, + 545, + 540 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 307, + 487, + 545, + 540 + ], + "type": "text", + "content": "s latency asynchronous frame-free event-driven dynamic-vision-sensor. IEEE Journal of Solid-State Circuits, 46(6):1443-1455, 2011. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 543, + 545, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 543, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 307, + 543, + 545, + 586 + ], + "type": "text", + "content": "[26] Feng Li, Jingyi Yu, and Jinxiang Chai. A hybrid camera for motion deblurring and depth map super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 589, + 545, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 589, + 545, + 621 + ], + "spans": [ + { + "bbox": [ + 307, + 589, + 545, + 621 + ], + "type": "text", + "content": "[27] Ziwei Liu, Lu Yuan, Xiaou Tang, Matt Uytendaele, and Jian Sun. Fast burst images denoising. ACM Transactions on Graphics, 33(6):1-9, 2014. 4" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 624, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 545, + 667 + ], + "type": "text", + "content": "[28] Kede Ma, Hui Li, Hongwei Yong, Zhou Wang, Deyu Meng, and Lei Zhang. Robust multi-exposure image fusion: A structural patch decomposition approach. IEEE Transactions on Image Processing, 26(5):2519-2532, 2017. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "type": "text", + "content": "[29] Ulku Arin C Bruschini Claudio Charbon Edoardo Ma Sizhuo, Gupta Shantanu and Gupta Mohit. Quanta burst photography. ACM Transactions on Graphics, 39(4):79-1, 2020. 8" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "22188" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[30] Stephen Mangiat and Jerry Gibson. High dynamic range video with ghost removal. In Applications of Digital Image Processing XXXIII, volume 7798, pages 307-314. SPIE, 2010. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 288, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 288, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 288, + 162 + ], + "type": "text", + "content": "[31] Stephen Mangiat and Jerry Gibson. Spatially adaptive filtering for registration artifact removal in HDR video. In Proc. of International Conference on Image Processing, pages 1317-1320. IEEE, 2011. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 288, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 288, + 207 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 288, + 207 + ], + "type": "text", + "content": "[32] Rafal Mantiuk, Kil Joong Kim, Allan G Rempel, and Wolfgang Heidrich. HDR-VDP-2: A calibrated visual metric for visibility and quality predictions in all luminance conditions. ACM Transactions on Graphics, 30(4):1-14, 2011. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 209, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 209, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 209, + 288, + 262 + ], + "type": "text", + "content": "[33] Morgan McGuire, Wojciech Matusik, Hanspeter Pfister, Billy Chen, John F Hughes, and Shree K Nayar. Optical splitting trees for high-precision monocular imaging. IEEE Computer Graphics and Applications, 27(2):32-42, 2007. 2, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 263, + 288, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 263, + 288, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 263, + 288, + 297 + ], + "type": "text", + "content": "[34] Tom Mertens, Jan Kautz, and Frank Van Reeth. Exposure fusion. In Pacific Conference on Computer Graphics and Applications, pages 382-390, 2007. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 298, + 287, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 298, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 48, + 298, + 287, + 363 + ], + "type": "text", + "content": "[35] Nico Messikommer, Stamatios Georgoulis, Daniel Gehrig, Stepan Tulyakov, Julius Erbach, Alfredo Bochicchio, Yuanyou Li, and Davide Scaramuzza. Multi-Bracket high dynamic range imaging with event cameras. In Proc. of Computer Vision and Pattern Recognition, pages 547–557, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 365, + 287, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 408 + ], + "type": "text", + "content": "[36] Manish Narwaria, Matthieu Perreira Da Silva, and Patrick Le Callet. HDR-VQM: An objective quality measure for high dynamic range video. Signal Processing: Image Communication, 35:46-60, 2015. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 410, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 410, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 410, + 287, + 453 + ], + "type": "text", + "content": "[37] Shree K Nayar and Tomoo Mitsunaga. High dynamic range imaging: Spatially varying pixel exposures. In Proc. of Computer Vision and Pattern Recognition, volume 1, pages 472-479. IEEE, 2000. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 455, + 287, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 455, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 455, + 287, + 498 + ], + "type": "text", + "content": "[38] Avinash Paliwal and Nima Khademi Kalantari. Deep slow motion video reconstruction with hybrid imaging system. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42(7):1557-1569, 2020. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 500, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 500, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 287, + 544 + ], + "type": "text", + "content": "[39] Henri Rebecq, René Ranftl, Vladlen Koltun, and Davide Scaramuzza. High speed and high dynamic range video with an event camera. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(6):1964-1980, 2019. 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 545, + 287, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 287, + 589 + ], + "type": "text", + "content": "[40] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In Proc. of Computer Vision and Pattern Recognition, pages 779-788, 2016. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 287, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 634 + ], + "type": "text", + "content": "[41] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster R-CNN: Towards real-time object detection with region proposal networks. Proc. of Advances in Neural Information Processing Systems, 28, 2015. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 689 + ], + "type": "text", + "content": "[42] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234–241. Springer, 2015. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[43] Yash Sanghvi, Abhiram Gnanasambandam, and Stanley H Chan. Photon limited non-blind deblurring using algorithm" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 94 + ], + "type": "text", + "content": "unrolling. IEEE Transactions on Computational Imaging, 2022.5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 95, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 95, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 545, + 139 + ], + "type": "text", + "content": "[44] Marcel Santana Santos, Tsang Ing Ren, and Nima Khademi Kalantari. Single image HDR reconstruction using a cnn with masked features and perceptual loss. arXiv preprint arXiv:2005.07335, 2020. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 140, + 545, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 545, + 183 + ], + "type": "text", + "content": "[45] Pradeep Sen, Nima Khademi Kalantari, Maziar Yaesoubi, Soheil Darabi, Dan B Goldman, and Eli Shechtman. Robust patch-based HDR reconstruction of dynamic scenes. ACM Transactions on Graphics, 31(6):203-1, 2012. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 184, + 545, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 184, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 307, + 184, + 545, + 227 + ], + "type": "text", + "content": "[46] Richard Shaw, Sibi Catley-Chandar, Ales Leonardis, and Eduardo Perez-Pellitero. HDR reconstruction from bracketed exposures and events. arXiv preprint arXiv:2203.14825, 2022. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 228, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 228, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 307, + 228, + 545, + 293 + ], + "type": "text", + "content": "[47] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In Proc. of Computer Vision and Pattern Recognition, pages 1874-1883, 2016. 4, 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 293, + 545, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 293, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 307, + 293, + 545, + 348 + ], + "type": "text", + "content": "[48] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM network: A machine learning approach for precipitation nowcasting. Proc. of Advances in Neural Information Processing Systems, 28, 2015. 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 349, + 545, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 349, + 545, + 392 + ], + "spans": [ + { + "bbox": [ + 307, + 349, + 545, + 392 + ], + "type": "text", + "content": "[49] Yu-Wing Tai, Hao Du, Michael S Brown, and Stephen Lin. Image/video deblurring using a hybrid camera. In Proc. of Computer Vision and Pattern Recognition, pages 1-8, 2008. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 393, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 393, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 393, + 545, + 426 + ], + "type": "text", + "content": "[50] Michael D Tocci, Chris Kiser, Nora Tocci, and Pradeep Sen. A versatile HDR video production system. ACM Transactions on Graphics, 30(4):1-10, 2011. 2, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 426, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 426, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 426, + 545, + 491 + ], + "type": "text", + "content": "[51] Stepan Tulyakov, Alfredo Bochicchio, Daniel Gehrig, Stamatios Georgoulis, Yuanyou Li, and Davide Scaramuzza. Time Lens++: Event-based frame interpolation with parametric non-linear flow and multi-scale fusion. In Proc. of Computer Vision and Pattern Recognition, pages 17755-17764, 2022. 2, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 492, + 545, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 492, + 545, + 548 + ], + "spans": [ + { + "bbox": [ + 307, + 492, + 545, + 548 + ], + "type": "text", + "content": "[52] Stepan Tulyakov, Daniel Gehrig, Stamatios Georgoulis, Julius Erbach, Mathias Gehrig, Yuanyou Li, and Davide Scaramuzza. Time Lens: Event-based video frame interpolation. In Proc. of Computer Vision and Pattern Recognition, pages 16155-16164, 2021. 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 548, + 545, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 548, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 307, + 548, + 545, + 591 + ], + "type": "text", + "content": "[53] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 592, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 592, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 592, + 545, + 635 + ], + "type": "text", + "content": "[54] Fang Xu, Lei Yu, Bishan Wang, Wen Yang, Gui-Song Xia, Xu Jia, Zhendong Qiao, and Jianzhuang Liu. Motion deblurring with real events. In Proc. of International Conference on Computer Vision, pages 2583-2592, 2021. 3, 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 636, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 636, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 307, + 636, + 545, + 679 + ], + "type": "text", + "content": "[55] Qingsen Yan, Lei Zhang, Yu Liu, Yu Zhu, Jinqiu Sun, Qinfeng Shi, and Yanning Zhang. Deep HDR imaging via a non-local network. IEEE Transactions on Image Processing, 29:4308-4322, 2020. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "text", + "content": "[56] Zhiyang Yu, Yu Zhang, Deyuan Liu, Dongqing Zou, Xijun Chen, Yebin Liu, and Jimmy S Ren. Training weakly supervised video frame interpolation with events. In Proc. of" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "22189" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 509 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 94 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 94 + ], + "type": "text", + "content": "International Conference on Computer Vision, pages 14589-14598, 2021. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[57] Cheng Zhang, Shaolin Su, Yu Zhu, Qingsen Yan, Jinqiu Sun, and Yanning Zhang. Exploring and evaluating image restoration potential in dynamic scenes. In Proc. of Computer Vision and Pattern Recognition, pages 2067-2076, 2022. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 184 + ], + "type": "text", + "content": "[58] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proc. of Computer Vision and Pattern Recognition, pages 586-595, 2018. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 218 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 218 + ], + "type": "text", + "content": "[59] Xiang Zhang and Lei Yu. Unifying motion deblurring and frame interpolation with events. In Proc. of Computer Vision and Pattern Recognition, pages 17765-17774, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 220, + 287, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 220, + 287, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 220, + 287, + 262 + ], + "type": "text", + "content": "[60] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In Proc. of Computer Vision and Pattern Recognition, pages 2472-2481, 2018. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 264, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 264, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 264, + 287, + 319 + ], + "type": "text", + "content": "[61] Jing Zhao, Ruiqin Xiong, Hangfan Liu, Jian Zhang, and Tiejun Huang. Spk2Imgnet: Learning to reconstruct dynamic scene from continuous spike stream. In Proc. of Computer Vision and Pattern Recognition, pages 11996-12005, 2021. 1, 4, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 320, + 287, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 287, + 374 + ], + "type": "text", + "content": "[62] Yajing Zheng, Lingxiao Zheng, Zhaofei Yu, Boxin Shi, Yonghong Tian, and Tiejun Huang. High-speed image reconstruction through short-term plasticity for spiking cameras. In Proc. of Computer Vision and Pattern Recognition, pages 6358-6367, 2021. 1, 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 376, + 287, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 287, + 419 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 287, + 419 + ], + "type": "text", + "content": "[63] Lin Zhu, Siwei Dong, Tiejun Huang, and Yonghong Tian. A retina-inspired sampling method for visual texture reconstruction. In Proc. of International Conference on Multimedia and Expo. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 421, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 421, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 421, + 287, + 464 + ], + "type": "text", + "content": "[64] Lin Zhu, Siwei Dong, Jianing Li, Tiejun Huang, and Yonghong Tian. Retina-like visual image reconstruction via spiking neural model. In Proc. of Computer Vision and Pattern Recognition, pages 1438-1446, 2020. 1, 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 466, + 287, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 466, + 287, + 509 + ], + "spans": [ + { + "bbox": [ + 48, + 466, + 287, + 509 + ], + "type": "text", + "content": "[65] Yunhao Zou, Yinqiang Zheng, Tsuyoshi Takatani, and Ying Fu. Learning to reconstruct high speed and high dynamic range videos from events. In Proc. of Computer Vision and Pattern Recognition, pages 2024-2033, 2021. 1" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "22190" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_content_list.json b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c0294f449b059bcc203fc024e1025dc4cbf511ce --- /dev/null +++ b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_content_list.json @@ -0,0 +1,1580 @@ +[ + { + "type": "text", + "text": "2PCNet: Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection", + "text_level": 1, + "bbox": [ + 186, + 130, + 784, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mikhail Kennerley $^{1,2}$ , Jian-Gang Wang $^{2}$ , Bharadwaj Veeravalli $^{1}$ , and Robby T. Tan $^{1}$ $^{1}$ National University of Singapore, Department of Electrical and Computer Engineering \n $^{2}$ Institute for Infocomm Research, A*STAR \nmikhailk@u.nus.edu, jgwang@i2r.a-star.edu.sg, elebv@nus.edu.sg, robby.tan@nus.edu.sg", + "bbox": [ + 114, + 214, + 857, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 325, + 313, + 343 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Object detection at night is a challenging problem due to the absence of night image annotations. Despite several domain adaptation methods, achieving high-precision results remains an issue. False-positive error propagation is still observed in methods using the well-established student-teacher framework, particularly for small-scale and low-light objects. This paper proposes a two-phase consistency unsupervised domain adaptation network, 2PCNet, to address these issues. The network employs high-confidence bounding-box predictions from the teacher in the first phase and appends them to the student's region proposals for the teacher to re-evaluate in the second phase, resulting in a combination of high and low confidence pseudo-labels. The night images and pseudo-labels are scaled-down before being used as input to the student, providing stronger small-scale pseudo-labels. To address errors that arise from low-light regions and other night-related attributes in images, we propose a night-specific augmentation pipeline called NightAug. This pipeline involves applying random augmentations, such as glare, blur, and noise, to daytime images. Experiments on publicly available datasets demonstrate that our method achieves superior results to state-of-the-art methods by $20\\%$ , and to supervised models trained directly on the target data.", + "bbox": [ + 76, + 358, + 473, + 720 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 747, + 209, + 763 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nighttime object detection is critical in many applications. However, the requirement of annotated data by supervised methods is impractical, since night data with annotations is few, and supervised methods are generally prone to overfitting to the training data. Among other reasons, this scarcity is due to poor lighting conditions which makes nighttime images hard to annotate. Hence, methods that", + "bbox": [ + 75, + 773, + 468, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5263fe911ce1f0a867c50db1a23864721ae578206f0864dbf948d76be7debed0.jpg", + "image_caption": [ + "DA Faster-RCNN" + ], + "image_footnote": [], + "bbox": [ + 506, + 327, + 696, + 410 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2e63e3fe63bff5506f7606feab4b1ad88b1cbdc1709be968f5f988018d6f6771.jpg", + "image_caption": [ + "UMT" + ], + "image_footnote": [], + "bbox": [ + 697, + 328, + 885, + 410 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/350b7bd91271c7942627218e0872edb02ac1007c41555ba9accbebbb4a8b99d5.jpg", + "image_caption": [ + "AT", + "Figure 1. Qualitative results of state-of-the-art DA methods, DA Faster-RCNN [3], UMT [7], Adaptive Teacher (AT) [15] and our method 2PCNet on the BDD100K [36] dataset. Unlike the SOTA methods, our method is able to detect dark and small scale objects with minimal additional false positive predictions." + ], + "image_footnote": [], + "bbox": [ + 508, + 424, + 696, + 508 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/53f055484860341fb6f8bbfbf09bbdc6bb26422bdf984c4ac17b1fca1945835d.jpg", + "image_caption": [ + "2PCNet (Ours)" + ], + "image_footnote": [], + "bbox": [ + 697, + 424, + 885, + 508 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "do not assume the availability of the annotations are more advantageous. Domain adaptation (DA) is an efficient solution to this problem by allowing the use of readily available annotated source daytime datasets.", + "bbox": [ + 496, + 638, + 890, + 700 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A few domain adaptation methods have been proposed, e.g., adversarial learning which uses image and instance level classifiers [3] and similar concepts [22, 32]. However, these methods isolate the domain adaptation task purely towards the feature extractor, and suppress features of the target data for the sake of domain invariance. Recent unsupervised domain adaptation methods exploit the studentteacher framework (e.g. [1,7,11,15]). Since the student initially learns from the supervised loss, there is a bias towards the source data. Augmentation [7, 11] and adversarial learning [15] have been proposed to address this problem. Unfortunately, particularly for day-to-night unsupervised domain adaptation, these methods suffer from a large num", + "bbox": [ + 496, + 704, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1www.github.com/mercarill/2pcnet", + "bbox": [ + 94, + 886, + 339, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "11484", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ber of inaccurate pseudo-labels produced by the teacher. In our investigation, the problem is notably due to insufficient knowledge of small scale features in the nighttime domain, which are then propagated through the learning process between the teacher and student, resulting in poor object detection performance.", + "bbox": [ + 75, + 90, + 468, + 181 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the problem, in this paper, we present 2PC-Net, a two-phase consistency unsupervised domain adaptation network for nighttime object detection. Our 2PCNet merges the bounding-boxes of highly-confident pseudolabels, which are predicted in phase one, together with regions proposed by the student's region proposal network (RPN). The merged proposals are then used by the teacher to generate a new set of pseudo-labels in phase two. This provides a combination of high and low confidence pseudolabels. These pseudo-labels are then matched with predictions generated by the student. We can then utilise a weighted consistency loss to ensure that a higher weightage of our unsupervised loss is based on stronger pseudo-labels, yet allow for weaker pseudo-labels to influence the training.", + "bbox": [ + 75, + 181, + 470, + 393 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Equipped with this two-phase strategy, we address the problem of errors from small-scale objects. We devise a student-scaling technique, where night images and their pseudo-labels for the student are deliberately scaled down. In order to generate accurate pseudo-labels, images to the teacher remain at their full scale. This results in the pseudolabels of larger objects, which are easier to predict, to be scaled down to smaller objects, allowing for an increase in small scale performance of the student.", + "bbox": [ + 75, + 395, + 468, + 530 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Nighttime images suffer from multiple complications not found in daytime scenes such as dark regions, glare, prominent noise, prominent blur, imbalanced lighting, etc. All these cause a problem, since the student, which was trained on daytime images, is much more biased towards the daytime domain's characteristics. To mitigate this problem, we propose NightAug, a set of random nighttime specific augmentations. NightAug includes adding artificial glare, noise, blur, etc. that mimic the night conditions to daytime images. With NightAug we are able to reduce the bias of the student network towards the source data without resulting in adversarial learning or compute-intensive translations. Overall, using 2PCNet, we can see the qualitative improvements of our result in Figure 1. In summary, the contributions of this paper are as follows:", + "bbox": [ + 75, + 532, + 470, + 758 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We present 2PCNet, a two-phase consistency approach for student-teacher learning. 2PCNet takes advantage of highly confident teacher labels augmented with less confident regions, which are proposed by the scaled student. This strategy produces a sharp reduction of the error propagation in the learning process.", + "- To address the bias of the student towards the source domain, we propose NightAug, a random night spe" + ], + "bbox": [ + 94, + 771, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "cific augmentation pipeline to shift the characteristics of daytime images toward nighttime.", + "bbox": [ + 529, + 90, + 890, + 121 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- The effectiveness of our approach has been verified by comparing it with the state-of-the-art domain adaptation approaches. An improvement of $+7.9\\mathrm{AP}(+20\\%)$ and $+10.2\\mathrm{AP}(26\\%)$ over the SOTA on BDD100K and SHIFT has been achieved, respectively.", + "bbox": [ + 517, + 128, + 893, + 204 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 227, + 640, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Unsupervised Domain Adaptation (UDA) Unsupervised domain adaptation aims to learn transferable features to reduce the discrepancy between a labelled source and unlabelled target domain. Previous works minimised the distance metric (MMD) [16-18] and considered intra-class and inter-class discrepancy [12, 13]. Adversarial feature learning involved adding an adversarial classifier to play the min-max game between the domain discriminator and feature extractors to generate a domain invariant feature map [27, 28, 37]. These methods have been applied to image classification. Our work focuses on object detection, which is more complex as it involves identifying multiple bounding boxes and associated classes in each image.", + "bbox": [ + 496, + 253, + 890, + 450 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "UDA for Object Detection Object detection with UDA is a recent challenge due to the complexities of identifying multiple objects in an image. DA-Faster RCNN [3] integrated adversarial learning with image and instance level classifiers, and several approaches have been proposed to improve on this method by introducing scale-awareness [4], class specific discriminators [31], and re-purposing the task-specific classifier as a discriminator [2]. The Mean Teacher (MT) framework [26] has been adopted in semi-supervised methods, such as UMT [7], which incorporates CycleGAN [39] augmented images; AT [15], which combines the student-teacher framework with adversarial learning; and TDD [11], which uses dual student-teacher networks with style transfer.", + "bbox": [ + 496, + 457, + 890, + 670 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Nighttime UDA The majority of research on unsupervised domain adaptation (UDA) in nighttime scenarios has focused on semantic segmentation [5, 8, 9, 14, 23, 29, 33]. Translation and style transformation techniques are commonly used to reduce the domain gap between the source and target domains in these methods [8,29,33]. Some UDA-based techniques for nighttime also utilise paired-images to generate a shared feature space [23], while others use an intermediate domain such as twilight to reduce the domain gap during unsupervised learning [5].", + "bbox": [ + 496, + 688, + 890, + 840 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Nighttime tracking has also been investigated where adversarial transformers are used to close the domain gap [35]. However, there is a gap in research when it comes to applying UDA techniques in the object detection task for night-", + "bbox": [ + 496, + 840, + 890, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "11485", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d79fc3147efb095c9d9a480464ef2004c703c1cc7d2c0b76ce09ed2f1902d44e.jpg", + "image_caption": [ + "Figure 2. Overview of our proposed framework, 2PCNet. 2PCNet consists of: A student network is trained on both the labelled daytime image, which has been augmented with NightAug, and unlabelled nighttime images. A teacher network which is the exponential moving average (EMA) of the student and provides matched pseudo-labels for unsupervised loss. The match pseudo-labels are the predictions of the teacher (phase two) using the RPN proposals of the student, which in turn was guided by the high confidence pseudo-labels of the teacher (phase one)." + ], + "image_footnote": [], + "bbox": [ + 76, + 88, + 890, + 412 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "time scenarios. Therefore, we explore the application of UDA techniques in object detection under low-light and nighttime conditions.", + "bbox": [ + 75, + 520, + 470, + 565 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Proposed Method", + "text_level": 1, + "bbox": [ + 76, + 583, + 250, + 601 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Let $\\mathbf{D}_s$ be the daytime source data. $\\mathbf{D}_s = \\{I_s, C_s, B_s\\}$ , where the variables refer to the image, class label and bounding-box label, respectively. Index $s$ indicates the daytime source. The night target data is represented by $\\mathbf{D}_t$ , where $\\mathbf{D}_t = \\{I_t\\}$ as we do not have the target labels available to us. Index $t$ indicates the nighttime target.", + "bbox": [ + 75, + 609, + 468, + 700 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The architecture of our 2PCNet is shown in Figure 2. Our 2PCNet consists of a student and a teacher network. The student is a multi-domain network trained on both labelled daytime images, augmented with NightAug, and unlabelled nighttime images. The teacher focuses on night images to produce pseudo-labels for the student and is the exponential moving average (EMA) of the student. After an initial pretraining phase, the teacher begins producing pseudo-labels, which allows the student to initialise the feature extractor and detector.", + "bbox": [ + 75, + 702, + 470, + 852 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "During each iteration, in phase one of 2PCNet, the teacher produces pseudo-labels from the night images. These pseudo-labels are filtered through a confidence", + "bbox": [ + 75, + 854, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "threshold. This is to ensure only high-confidence pseudolabels are given to the student. The bounding-boxes from the pseudo-labels are then combined with the region proposals generated by the student's RPN. The merged region proposals are then used to generate predictions from the student's RoI network. In phase two, the teacher utilises the same merged region proposals to generate a matched set of pseudo-labels, where each pseudo-label has its corresponding prediction obtained from the student.", + "bbox": [ + 496, + 520, + 892, + 655 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As mentioned earlier, our student network is initialised by pretraining for a set number of iterations. This is done with supervised loss on the augmented daytime images:", + "bbox": [ + 496, + 657, + 890, + 705 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\sup } = L _ {\\operatorname {r p n}} \\left(B _ {s}, I _ {s}\\right) + L _ {\\operatorname {r o i}} \\left(B _ {s}, C _ {s}, I _ {s}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 724, + 890, + 742 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $L_{\\mathrm{rpn}}$ represents the loss from the RPN, which consists of an objectness and bounding-box regression loss. $L_{\\mathrm{roi}}$ represents the loss from the detector network, consisting of a classification and bounding-box regression loss.", + "bbox": [ + 496, + 761, + 890, + 821 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Once the pretraining is completed, the student's weights are then transferred over to the teacher. In the succeeding iterations, the teacher's weights are the exponential moving average (EMA) of the student's. The matched pseudo-labels generated by the teacher, $\\{C_p^*, B_p^*\\}$ , are then used to guide", + "bbox": [ + 496, + 825, + 890, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "11486", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c67cd0ceb3844b100df47930828639c1f30a507ba48e45e37211aff677e01841.jpg", + "image_caption": [ + "Figure 3. (Left to Right, Top to Bottom) Ground truth bounding boxes, bounding boxes predicted by the teacher with non-maximal suppression (NMS) and thresholding $(B_{p})$ , bounding boxes predicted by the student $(B_{\\mathrm{student}})$ which is guided by $B_{p}$ , and the bounding boxes predicted by the teacher $(B_{p}^{*})$ for the consistency loss." + ], + "image_footnote": [], + "bbox": [ + 83, + 90, + 282, + 267 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/19d2798780ffd2eaa87261a168fdf6db1a82fe1e65293999ac403404e0f935dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 90, + 482, + 267 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the unsupervised loss, defined as:", + "bbox": [ + 76, + 393, + 299, + 407 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {u n s u p}} = L _ {\\text {r p n}} ^ {\\text {o b j}} \\left(C _ {p} ^ {*}; I _ {t}\\right) + L _ {\\text {c o n s}} \\left(C _ {p} ^ {*}; I _ {t}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 422, + 468, + 441 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $L_{\\mathrm{rpn}}^{\\mathrm{obj}}$ is the objectness loss of the RPN and $L_{\\mathrm{cons}}$ is the weighted KL-Divergence loss from the predicted outputs which we will further explain in the next section.", + "bbox": [ + 75, + 454, + 468, + 501 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Two-Phase Consistency", + "text_level": 1, + "bbox": [ + 76, + 512, + 292, + 527 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Due to the large domain gap between daytime source images and nighttime target images, the teacher is unable to produce high quality pseudo-labels. This generally occurs in the whole scene, but particularly for regions with strong night characteristics, e.g., low-light, glare, uneven lighting, etc. The teacher produces confident pseudo-labels only for regions that share more similarities to the daytime, since it is biased towards the daytime domain. This bias poses a problem for methods that employ a hard-threshold to filter pseudo-labels for categorical cross-entropy loss [7, 15, 26]. The remaining pseudo-labels contain only easy samples with daytime attributes. Consequently, the student does not learn from harder (e.g. darker) areas.", + "bbox": [ + 75, + 537, + 468, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As a result of minimal knowledge of the hard samples (i.e., areas with a high level of nighttime attributes), the teacher begins to predict highly confident yet incorrect pseudo-labels. As the teacher provides these incorrect pseudo-labels to the student, a viscous cycle starts where the teacher in turn is updated with incorrect knowledge. Consequently, the error continues to propagate through training. In our case, these errors notably occur in dark/glare regions and as small scale objects.", + "bbox": [ + 75, + 733, + 468, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address the problem of error propagation, we design a two-phase approach that combines high confidence", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "pseudo-labels together with their less confident counterparts. This combination allows for the high accuracy of confident-labels with the additional knowledge of less confident labels to be distilled onto the student. In phase one, the unlabelled nighttime image, $I_{t}$ , is used as an input for the teacher to generate pseudo-labels. These pseudo-labels are filtered with a threshold to retain only high-confidence pseudo-labels, $(C_p, B_p)$ . The bounding-box of the pseudolabels, $B_{p}$ , is then used as an input to the student. $B_{p}$ is concatenated to the region proposals generated by the student RPN module:", + "bbox": [ + 496, + 90, + 890, + 256 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP ^ {*} = \\operatorname {R P N} _ {\\text {s t u d e n t}} \\left(I _ {t}\\right) \\neq B _ {p}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 268, + 890, + 285 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $P^{*}$ is the combined region proposals, which are then used as an input to the student's RoI module to predict the classes, $C_{\\mathrm{student}}$ , and bounding-box, $B_{\\mathrm{student}}$ , of each region proposal.", + "bbox": [ + 496, + 295, + 890, + 356 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Phase two begins by using the same combined region proposals, $P^{*}$ , generated in phase one as an input to the teachers RoI module to generate a matched set of pseudolabels:", + "bbox": [ + 496, + 356, + 890, + 415 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{C _ {p} ^ {*}, B _ {p} ^ {*} \\right\\} = \\operatorname {R o I} _ {\\text {t e a c h e r}} \\left(P ^ {*}\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 598, + 428, + 890, + 446 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The difference between $C_p$ and $C_p^*$ is that $C_p^*$ is derived from the same region proposals as that of the student predictions $C_{\\mathrm{student}}$ . This allows us to compare $C_{\\mathrm{student}}$ and $C_p^*$ directly:", + "bbox": [ + 496, + 455, + 890, + 518 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\{C _ {\\text {s t u d e n t}} (n), B _ {\\text {s t u d e n t}} (n) \\right\\} = \\operatorname {R o I} _ {\\text {s t u d e n t}} \\left(P ^ {*} (n)\\right), \\tag {5} \\\\ \\left\\{C _ {p} ^ {*} (n), B _ {p} ^ {*} (n) \\right\\} = \\operatorname {R o I} _ {\\text {t e a c h e r}} \\left(P ^ {*} (n)\\right), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 527, + 890, + 564 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $n = \\{1,2,\\dots,N\\}$ and $N$ is the number of region proposals in $P^*$ . This operation ensures that the knowledge of highly confident predictions generated by the teacher is distilled through to the student. In addition, information from less confident predictions can also be learnt. However, we are still required to penalise less confident samples and thus employ weighed KL-Divergence to be used as our consistency loss:", + "bbox": [ + 496, + 571, + 890, + 693 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {c o n s}} = \\alpha \\operatorname {K L} \\left(C _ {\\text {s t u d e n t}}, C _ {p} ^ {*}\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 594, + 705, + 890, + 722 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\alpha$ is the highest confidence of $C_p^*$ expressed as $\\alpha = \\max(C_p^*)$ ; KL() is the KL-divergence function. Note that, pseudo-bounding boxes are not used to generate unsupervised loss, as the confidence score of each pseudo-label represents the class information rather than the bounding box. The outputs of each segment of our two-phase approach are shown in Figure 3.", + "bbox": [ + 496, + 732, + 890, + 839 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Student-Scaling", + "text_level": 1, + "bbox": [ + 500, + 847, + 658, + 863 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In our investigation, we have found that scales of objects have a strong influence on object detection at night. This", + "bbox": [ + 496, + 869, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "11487", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Single Augmentation - NightAug" + ], + "code_body": "imgClean $\\leftarrow$ img \nif randFloat $\\geq 0.5$ then randFloat $\\leftarrow 0.8*$ randFloat $+0.2$ img $\\leftarrow$ augmentation(img, randval) prob $\\leftarrow 0.4$ while randFloat $\\geq$ prob do $x\\gets$ randInt(img.shape[1],2) $y\\gets$ randInt(img.shape[2],2) img[x,y] $\\leftarrow$ imgClean[x,y] prob $\\leftarrow$ prob +0.1 end while \nend if", + "bbox": [ + 91, + 109, + 369, + 291 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "is due to the features of smaller objects being easily overwhelmed by glare or noise. To allow the student to overcome this, we apply scaling augmentation to the student's inputs which includes both the image and the pseudo-labels generated by the teacher. As training proceeds, we follow a schedule to increase the scale of the student augmentation until it equals to that of the original image. By iteratively increasing the scale we allow the student to focus on smaller features earlier in the training process. This process encourages the teacher to make more accurate predictions on smaller scale objects in the later stages of training. In turn, accurate small scale pseudo-labels allow for the increase in the scale of the student's inputs with minimal errors due to scale.", + "bbox": [ + 75, + 320, + 467, + 530 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To ensure the knowledge of the previous scales is not forgotten, a gaussian function for the scaling factor is applied. The norm of the Gaussian function is obtained from the schedule values. To prevent additional noise due to pseudo-labels being too small, labels that has an area below a threshold are removed.", + "bbox": [ + 75, + 532, + 467, + 622 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. NightAug", + "text_level": 1, + "bbox": [ + 76, + 633, + 187, + 650 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Night images suffer from a range of complications that are not present in daytime scenes. This causes a problem in the student-teacher framework, where the student would be biased towards the source domain. Previous methods have attempted to address this, but have either required compute-intensive translations [7, 11] or adding additional domain classifiers to the framework [15] which complicates training. We propose NightAug, a nighttime specific augmentation pipeline that is compute-light and does not require training. NightAug consists of a series of augmentations with the aim of steering the characteristics of daytime images to resemble that of a nighttime image.", + "bbox": [ + 75, + 657, + 467, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The defining features of nighttime images are that they are darker and have lower contrast than daytime images. In addition the signal-to-night ratio (SNR) could be higher due to the properties of digital cameras such as luminance and", + "bbox": [ + 75, + 839, + 467, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/44477bcfe46dd0b404ebc19a5eabcfb95708b6ebf6fb6883592a6a5e4c257b7b.jpg", + "image_caption": [ + "Figure 4. NightAug: Original image (top-left) and images with random augmentations from: gaussian blur, gamma correction, brightness, contrast, glare, gaussian noise and random cut-outs." + ], + "image_footnote": [], + "bbox": [ + 501, + 89, + 696, + 260 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/28b82fb10b3a24991f39d715a100a831e46d1756277035c6f05cde59023c911f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 697, + 89, + 890, + 260 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "colour noise. Glare and glow from street lamps and headlights are also present in nighttime images. Additionally, images may be out-of-focus due to the cameras inability to detect reference points to focus on in dark environments.", + "bbox": [ + 496, + 349, + 890, + 411 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Keeping in mind the properties of nighttime images, our NightAug includes random; brightness, contrast, gamma, gaussian noise, gaussian blur augmentations and random glare insertion. The augmentations are randomly applied to the images and are also random in intensity. This randomness results in a wider variance of images that are exposed to the student leading to more robust training [30]. To further increase the variance of the images, at each augmentation step, random segments of the image will ignore the application of that augmentation. This allows for the representation where different areas of nighttime images may be unevenly lighted. This uneven lighting affects the above characteristics of the local region.", + "bbox": [ + 496, + 416, + 890, + 612 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A single augmentation flow of NightAug is demonstrated in Algorithm 1. Samples of an image processed with NightAug are shown in Figure 4. Each augmentation has a set probability of being applied, with the strength of the augmentation being random. Random regions of the augmented image may then be replaced with that of the original image. The probability of this region replacement reduces with each iteration.", + "bbox": [ + 496, + 617, + 890, + 737 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Overall Loss Our total loss can be represented as:", + "bbox": [ + 500, + 773, + 843, + 789 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {t o t a l}} = L _ {\\sup } + \\lambda L _ {\\text {u n s u p}}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 814, + 890, + 830 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda$ represents a weight factor for the unsupervised loss, and is set experimentally. $L_{\\mathrm{sup}}, L_{\\mathrm{unsup}}$ refer to Eq. (1) and Eq. (2), respectively.", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "11488", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/6ad51ceac8b2659525cb3f8303090968c0631920eb8a8bf84214e9d2a8c2bb84.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodAPPedestrianRiderCarTruckBusMotorcycleBicycleTrafficLightTrafficSign
Lower-Bound41.150.028.966.647.847.532.839.541.056.5
Upper-Bound46.252.135.073.653.554.836.041.852.263.3
DA F-RCNN [3]41.350.430.366.346.848.332.641.441.056.2
TDD [11]34.643.120.768.433.335.616.525.943.159.5
UMT [7]36.246.526.146.844.046.328.240.231.652.7
AT [15]38.542.330.460.848.952.134.542.729.143.9
2PCNet (Ours)46.454.430.873.153.855.237.544.549.465.2
", + "bbox": [ + 78, + 88, + 893, + 250 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/a67e50c51805e5d3272d20f75a585101e458778aa663a8acedc2e157070dc842.jpg", + "table_caption": [ + "Table 1. Results of day-to-night domain adaptation on the BDD100K dataset, the Average Precision (AP) of all classes are reported. Faster RCNN detector with ResNet-50 feature extractor is used for all experiments to ensure a fair comparison. Faster RCNN is used as the lower-bound and upper-bound and is trained on labelled daytime and nighttime data respectively. The lower-bound provides a baseline without any domain adaptation while the upper-bound is fully supervised, the case where labelled target night data is available." + ], + "table_footnote": [], + "table_body": "
MethodAPcocoCarBusTruck
Lower-Bound22.137.529.830.7
Upper-Bound23.942.033.835.0
FDA [34]22.638.537.223.2
ForkGAN [38]22.941.233.332.1
2PCNet (Ours)23.540.738.235.0
", + "bbox": [ + 78, + 340, + 470, + 454 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Comparison of our framework, 2PCNet, with image-to-image (I2I) translation methods. Conducted on the BDD100K dataset. ForkGan and FDA are used for comparison. Reported $AP_{coco}$ is the averaged AP over IoUs 0.5 to 0.95.", + "bbox": [ + 75, + 465, + 468, + 522 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 540, + 209, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Baselines", + "text_level": 1, + "bbox": [ + 76, + 565, + 184, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To evaluate our method, we compare our approach with SOTA methods in domain adaptation for object detection. These include DA-Faster RCNN [3], TDD [11], UMT [7], AT [15] as well as a non-DA baseline Faster-RCNN [21]. Faster-RCNN is used as both our lower and upper-bound, where it is trained on labelled source and target data respectively. We additionally compare our approach with image-to-image translation methods, ForkGAN [38] and FDA [34]. Translation methods are trained on Faster RCNN with both the daytime and translated images.", + "bbox": [ + 75, + 590, + 468, + 742 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Datasets", + "text_level": 1, + "bbox": [ + 76, + 755, + 179, + 768 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The majority of existing nighttime datasets either focuses on semantic segmentation which do not provide labels for object detection [5, 23, 24], or contains very few classes [19, 20]. BDD100K [36] was selected as it provides object detection labels which includes a wide range of classes (10). It also has a large number of images compared to other DA datasets covering daytime, nighttime and other adverse conditions.", + "bbox": [ + 75, + 779, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The SHIFT [25] dataset is a recent simulated driving dataset that contains scenes in various environments. A continuous shift of these environments is available. SHIFT contains 6 class labels that share similarities to the BDD100K classes. For our evaluation, we use images with the 'day' and 'night' label as our source and target data respectively. We further ensure that the weather tag is 'clear' to isolate other weather conditions from the evaluation.", + "bbox": [ + 496, + 343, + 890, + 465 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Implementation", + "text_level": 1, + "bbox": [ + 498, + 477, + 660, + 494 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Following previous SOTA methods, we employ Faster-RCNN [21] as our base detection model and ResNet-50 [10] pretrained on ImageNet [6] as our feature extractor. All images are scaled by resizing its shorter side to 600 pixels. For student-scaling we set a schedule for (0.57, 0.64, 0.71, 0.78, 0.85, 0.92) of the maximum iterations at scales (0.5, 0.6, 0.7, 0.8, 0.9, 1.0). Loss hyperparameters are set at $\\lambda = 0.3$ and the rate smooth coefficient parameter of the EMA is 0.9996. A confidence threshold of 0.8 for phase one of Two-Phase Consistency. For the initial pretraining of the student model, we train the student for 50k and 20k iterations on the source images, for BDD100K and SHIFT respectively. Supervised inputs are daytime images with and without NightAug. We then copy the weights to the teacher and continue training with the addition of unsupervised loss for an additional 50k iterations. The learning rate is kept at 0.04 throughout training. Our network is trained on 3 RTX3090 GPUs with a batch-size of 6 source and 6 target images.", + "bbox": [ + 496, + 501, + 892, + 787 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Comparison to SOTA", + "text_level": 1, + "bbox": [ + 498, + 800, + 700, + 816 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison on BDD100K We compare our method against the SOTA on real driving scenes and evaluating their domain adaptation performance on nighttime images, the results of this experiment can be seen on Table 1. The results show that our method achieves the highest perfor", + "bbox": [ + 496, + 824, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "11489", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/260a68fcdf8a6dfda8ed4d951a9e734559f34114dc70c808af48e92e2eeabd0c.jpg", + "image_caption": [ + "Figure 5. Qualitative results of Faster RCNN, Adaptive Teacher (AT) and our method on the SHIFT dataset with the ground-truth on the far right. We can observe that Faster RCNN is not able to detect objects due to absence of domain adaptation, while AT has a large number of small false positive bounding boxes compared to our method which closely resembles that of the ground-truth." + ], + "image_footnote": [], + "bbox": [ + 83, + 90, + 243, + 335 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fa1c9a5062df328949db62faac6c53ac246b0a74b845931868e4ae6e10da8b1d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 245, + 90, + 405, + 335 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/118d34ec49e84b21fdf24b602e2d317adbad87c2f07af0c8f63bcb2c7fe80499.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 90, + 565, + 335 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/85899240049ff1fc50d9929c44f154e9f808a51c616d22cd1432f4ba874464f8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 566, + 90, + 725, + 335 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/52216da15ee92e9d03f9f345c2ca5f962ae1e5bd73a17eec5bd7ac074b3d2650.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 725, + 90, + 887, + 335 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/826634909662486b1ab343b5849c94145be733bcd7b369139f600ddc99f42a01.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodAPPer.CarTruckBusMcy.Bcy.
Lower-Bound41.640.444.549.953.714.346.7
Upper-Bound47.049.751.556.053.619.252.4
DA FR [3]43.743.048.847.852.119.955.8
UMT [7]31.17.747.518.446.816.649.2
AT [15]38.925.833.054.749.520.752.3
2PCNet (Ours)49.151.454.654.856.623.954.2
", + "bbox": [ + 78, + 414, + 470, + 534 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Results of Day-to-Night domain adaptation on the SHIFT dataset. The Average Precision (AP) of all classes. Faster RCNN is used as the lower-bound and upper-bound and is trained on labelled daytime and nighttime data respectively.", + "bbox": [ + 75, + 546, + 468, + 602 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "mance with an AP of 46.4. $20.5\\%$ higher than that of the SOTA student-teacher methods and above that of the upper-bound. We have observed in experiments that student-teacher methods underperforms with an AP below that of the lower-bound due to the error-propagation from noisy pseudo-labels. The result of the error is small false positive detections as seen in Figure 1. Our method does not suffer from the same allowing for higher performance. We can also observe that our method performs well across all classes. Even when compared with the upper-bound, 2PC-Net achieves higher AP on the majority of classes. This indicates that our method is able to generalise well across large and small classes.", + "bbox": [ + 75, + 625, + 468, + 821 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The comparison with image-to-image translation methods is shown in Table 2. Translation methods do not suffer from the error propagation problem as it is trained on Faster RCNN without a teacher. Even so, we can see that our method outperforms SOTA adverse vision translation", + "bbox": [ + 75, + 825, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "methods.", + "bbox": [ + 500, + 419, + 563, + 431 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparison on SHIFT To further compare our method with SOTA we evaluate on the SHIFT simulation dataset. Due to the nature of the simulated data, many nighttime image characteristics that we have previously mention is not exhibited in this data such as blurriness, noise and glare.", + "bbox": [ + 496, + 444, + 890, + 520 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The results of this experiments are shown in Table 3. We can observe that previous SOTA methods that use the student-teacher framework perform worse than the lower-bound. The sub-par performance is again due to the error-propagation problem. AT performs better than UMT due to ATs inclusion of adversarial learning. However, adversarial learning is not enough to mitigate this problem. We can see that the performance of DA FRCNN outperforms both the SOTA student-teacher methods as it would not be affected by error-propagation. It is however, still largely below the upper-bound performance. 2PCNet outperforms these previous methods as well as the upperbound. We achieve an improvement of $+10.2$ AP over previous SOTA student-teacher methods and $+2.1$ AP over that of the upper-bound.", + "bbox": [ + 496, + 520, + 892, + 733 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 743, + 663, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To demonstrate the effectiveness of each of our components, we train several models for 100K iterations and evaluate them on the BDD100K dataset. We present our findings in Table 4.", + "bbox": [ + 496, + 768, + 890, + 828 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Two-Phase Consistency We can observe in Table 4 that the addition of Two-Phase Consistency (C) demonstrated a wide performance gap when compared to the Mean-Teacher baseline, +13.5 AP (43%). This improvement in AP ex", + "bbox": [ + 496, + 839, + 892, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "11490", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/808250427ec14cf82c8ac96e883ffd23fd2c6af9f58f5aeface3c8be797b87c7.jpg", + "image_caption": [ + "Figure 6. Training curve on BDD100K dataset ablation study. We show the overall AP training curve as well as the AP of large, medium and small objects. MT represents the base Mean Teacher framework. It can be seen that at all scales, the absence of Two-Phase Consistency (C) results in a sharp drop during training. We can also see that with the inclusion of NightAug (NA) and student-scaling (SS) the gradient of the curve increases. We note that the inclusion of a domain classifier (DC) reduces the performance at all scales." + ], + "image_footnote": [], + "bbox": [ + 81, + 89, + 287, + 215 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b6705329c2169a0416d90e84e815498b1f0bfb53c6481a403cfb48ec48081d07.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 290, + 90, + 486, + 215 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/35530a358b9d008ad8b061628ac5c0ae9f9d1b58dc0f9a7ef67087c3e74bd327.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 488, + 90, + 684, + 215 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6e1f6579437a259fb07456dba92adee7672ddce723d1501354e0dc90cdc44b74.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 90, + 887, + 215 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ists across large, medium and small objects. While the performance of MT is initially strong, it rapidly begins to decline; which can be observed in Figure 6. This drop in performance is due to the error propagation of noisy pseudolabels. The experimental results show that Two-Phase Consistency is able to provide a solution. This ensures that highly confident pseudo-labels are bounded by less confident pseudo-label enabling a balance of knowledge into the student.", + "bbox": [ + 75, + 310, + 472, + 446 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "NightAug We benched marked the effectiveness of NightAug in our framework as shown in Table 4. The inclusion of NightAug increases the detection performance of small objects with an increase of $5\\%$ . Additionally, the gradient of the training performance remains steep as seen in Figure 6. The positive gradient is displayed most strongly for APm and APs where objects are more prone to nighttime specific complications.", + "bbox": [ + 75, + 455, + 468, + 578 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Student-Scaling Our final component, student-scaling, is included into the framework and the results can be seen in Table 4. We can observe that student-scaling is able to boost the performance of small object detection by $6\\%$ . This boost in performance is due to the student network focusing on smaller object earlier in the training process. We note that the performance of large objects have dropped by $1 - 2\\%$ ; however when referring to the training curves in Figure 6, API remains steep. As the initial focus is on smaller objects, less time is allocated to larger objects during training. This can be mitigated by lengthening training resulting in more iterations for larger objects.", + "bbox": [ + 75, + 588, + 470, + 770 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Domain Classifier To conclude our study, we included a domain classifier into our network. Adversarial learning is a widely used DA technique; however when added into 2PCNet, a performance drop across all scales can be seen. This drop is shown in Table 4. The suppression of nighttime features is suspected to be the cause. Suppression is present as the adversarial loss guides the feature extractor to maintain domain invariance. By suppressing nighttime fea", + "bbox": [ + 75, + 779, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/4ee03232bc2b517cbeda29e935f6a541aad701501640833e034d50932f217a5a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Methods
CNASSDCAPAPIAPmAPs
46.441.725.89.1
44.541.625.08.3
45.842.225.78.6
45.242.925.78.2
31.730.416.54.8
", + "bbox": [ + 513, + 308, + 880, + 431 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Ablation studies on the BDD100K dataset. The last row represents the base Mean-Teacher network. Methods are referred to as, C: Two-Phase Consistency, NA: NightAug, SS: StudentScaling, DC: Domain Classifier. API, APm, and APs represent the AP of large, medium and small objects respectively.", + "bbox": [ + 496, + 441, + 893, + 513 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tures, the teacher has less information to distil to the student. This is demonstrated in Figure 6 where the domain classifier (dotted purple) initially performs well. But as training continues, our method (solid red) is able to surpass its performance.", + "bbox": [ + 496, + 530, + 890, + 604 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 623, + 617, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our proposed framework, 2PCNet, presents a novel solution to the challenges of day-to-night domain adaptive object detection. With our Two-Phase Consistency approach, we are able to effectively leverage high and low confidence knowledge for the student, while mitigating error propagation commonly present in previous student-teacher methods. We further address issues arising from small scale and dark objects through the use of student-scaling and NightAug, respectively. Experimental results on the e BDD100K [36] and SHIFT [25] datasets demonstrate that 2PCNet outperforms existing state-of-the-art methods. Overall, our proposed framework provides an effective and efficient solution for day-to-night domain adaptive object detection.", + "bbox": [ + 496, + 648, + 890, + 847 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements This work is partially supported by MOE2019-T2-1-130.", + "bbox": [ + 500, + 869, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "11491", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Qi Cai, Yingwei Pan, Chong-Wah Ngo, Xinmei Tian, Lingyu Duan, and Ting Yao. Exploring object relation in mean teacher for cross-domain detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11449-11458, 2019. 1", + "[2] Lin Chen, Huaian Chen, Zhixiang Wei, Xin Jin, Xiao Tan, Yi Jin, and Enhong Chen. Reusing the task-specific classifier as a discriminator: Discriminator-free adversarial domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7171-7180, 2022. 2", + "[3] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain adaptive faster r-cnn for object detection in the wild. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3339-3348, 2018. 1, 2, 6, 7", + "[4] Yuhua Chen, Haoran Wang, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Scale-aware domain adaptive faster r-cnn. International Journal of Computer Vision, page 2223-2243, 2021. 2", + "[5] Dengxin Dai and Luc Van Gool. Dark model adaptation: Semantic image segmentation from daytime to nighttime. In International Conference on Intelligent Transportation Systems (ITSC), pages 3819-3824, 2018. 2, 6", + "[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255, 2009. 6", + "[7] Jinhong Deng, Wen Li, Yuhua Chen, and Lixin Duan. Unbiased mean teacher for cross-domain object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4089-4099, 2021. 1, 2, 4, 5, 6, 7", + "[8] Xueqing Deng, Peng Wang, Xiaochen Lian, and Shawn Newsam. NightLab: A Dual-Level Architecture With Hardness Detection for Segmentation at Night. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16938-16948, 2022. 2", + "[9] Huan Gao, Jichang Guo, Guoli Wang, and Qian Zhang. Cross-Domain Correlation Distillation for Unsupervised Domain Adaptation in Nighttime Semantic Segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9913-9923, 2022. 2", + "[10] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016. 6", + "[11] Mengzhe He, Yali Wang, Jiaxi Wu, Yiru Wang, Hanqing Li, Bo Li, Weihao Gan, Wei Wu, and Yu Qiao. Cross domain object detection by target-perceived dual branch distillation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9560-9570, 2022. 1, 2, 5, 6", + "[12] Guoliang Kang, Lu Jiang, Yunchao Wei, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for single- and multi-source domain adaptation. IEEE Transactions on Pattern Analysis amp; Machine Intelligence, pages 1793–1804, 2022. 2" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for unsupervised domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4888-4897, 2019. 2", + "[14] Attila Lengyel, Sourav Garg, Michael Milford, and Jan C. van Gemert. Zero-shot day-night domain adaptation with a physics prior. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 4379-4389, 2021. 2", + "[15] Yu-Jhe Li, Xiaoliang Dai, Chih-Yao Ma, Yen-Cheng Liu, Kan Chen, Bichen Wu, Zijian He, Kris Kitani, and Peter Vajda. Cross-domain adaptive teacher for object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7571-7580, 2022. 1, 2, 4, 5, 6, 7", + "[16] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael I. Jordan. Learning transferable features with deep adaptation networks. In International Conference on International Conference on Machine Learning, page 97-105, 2015. 2", + "[17] Mingsheng Long, Han Zhu, Jianmin Wang, and Michael I. Jordan. Unsupervised domain adaptation with residual transfer networks. In International Conference on Neural Information Processing Systems, page 136-144, 2016. 2", + "[18] Mingsheng Long, Han Zhu, Jianmin Wang, and Michael I. Jordan. Deep transfer learning with joint adaptation networks. In International Conference on Machine Learning, page 2208-2217, 2017. 2", + "[19] Igor Morawski, Yu-An Chen, Yu-Sheng Lin, and Winston H. Hsu. Nod: Taking a closer look at detection under extreme low-light conditions with night object detection dataset. In British Machine Vision Conference, (BMVC), 2021. 6", + "[20] Lukás Neumann, Michelle Karg, Shanshan Zhang, Christian Scharfenberger, Ericiegert, Sarah Mistr, Olga Prokofyeva, Robert Thiel, Andrea Vedaldi, Andrew Zisserman, and Bernt Schiele. Nightowls: A pedestrians at night dataset. In Asian Conference on Computer Vision (ACCV), pages 691-705, 2018. 6", + "[21] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In International Conference on Neural Information Processing Systems, page 91-99, 2015. 6", + "[22] Kuniaki Saito, Yoshitaka Ushiku, Tatsuya Harada, and Kate Saenko. Strong-weak distribution alignment for adaptive object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6949–6958, 2019. 1", + "[23] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Guided curriculum model adaptation and uncertainty-aware evaluation for semantic nighttime image segmentation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 7373-7382, 2019. 2, 6", + "[24] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Acdc: The adverse conditions dataset with correspondences for semantic driving scene understanding. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 10745-10755, 2021. 6", + "[25] Tao Sun, Mattia Segu, Janis Postels, Yuxuan Wang, Luc Van Gool, Bernt Schiele, Federico Tombari, and Fisher Yu. Shift:" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "11492", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "A synthetic driving dataset for continuous multi-task domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 21339-21350, 2022. 6, 8", + "[26] Antti Tarvainen and Harri Valpola. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In International Conference on Neural Information Processing Systems, page 1195–1204, 2017. 2, 4", + "[27] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2962-2971, 2017. 2", + "[28] Sinan Wang, Xinyang Chen, Yunbo Wang, Mingsheng Long, and Jianmin Wang. Progressive adversarial networks for fine-grained domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9210-9219, 2020. 2", + "[29] Xinyi Wu, Zhenyao Wu, Hao Guo, Lili Ju, and Song Wang. DANNet: A One-Stage Domain Adaptation Network for Unsupervised Nighttime Semantic Segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15769–15778, 2021. 2", + "[30] Qizhe Xie, Minh-Thang Luong, Eduard Hovy, and Quoc V. Le. Self-training with noisy student improves imagenet classification. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2020. 5", + "[31] Chang-Dong Xu, Xingjie Zhao, Xin Jin, and Xiu-Shen Wei. Exploring categorical regularization for domain adaptive object detection. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11721-11730, 2020. 2", + "[32] Minghao Xu, Hang Wang, Bingbing Ni, Qi Tian, and Wenjun Zhang. Cross-domain detection via graph-induced prototype alignment. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12352-12361, 2020. 1", + "[33] Qi Xu, Yinan Ma, Jing Wu, Chengnian Long, and Xiaolin Huang. CDAda: A Curriculum Domain Adaptation for Nighttime Semantic Segmentation. In IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), pages 2962-2971, 2021. 2", + "[34] Yanchao Yang and Stefano Soatto. FDA: Fourier domain adaptation for semantic segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4084-4094, 2020. 6", + "[35] Junjie Ye, Changhong Fu, Guangze Zheng, Danda Pani Paudel, and Guang Chen. Unsupervised Domain Adaptation for Nighttime Aerial Tracking. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8896-8905, 2022. 2", + "[36] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darrell. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2633-2642, 2020. 1, 6, 8" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[37] Weichen Zhang, Wanli Ouyang, Wen Li, and Dong Xu. Collaborative and adversarial network for unsupervised domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3801-3809, 2018. 2", + "[38] Ziqiang Zheng, Yang Wu, Xinran Nicole Han, and Jianbo Shi. Forkgan: Seeing into the rainy night. In European Conference on Computer Vision (ECCV), 2020. 6", + "[39] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 2242-2251, 2017. 2" + ], + "bbox": [ + 501, + 92, + 890, + 258 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "11493", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_model.json b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4a2b6b152bc2269ab08739549a75be84afe25f9f --- /dev/null +++ b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_model.json @@ -0,0 +1,2046 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.131, + 0.785, + 0.177 + ], + "angle": 0, + "content": "2PCNet: Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.215, + 0.859, + 0.284 + ], + "angle": 0, + "content": "Mikhail Kennerley\\(^{1,2}\\), Jian-Gang Wang\\(^{2}\\), Bharadwaj Veeravalli\\(^{1}\\), and Robby T. Tan\\(^{1}\\) \n\\(^{1}\\)National University of Singapore, Department of Electrical and Computer Engineering \n\\(^{2}\\)Institute for Infocomm Research, A*STAR \nmikhailk@u.nus.edu, jgwang@i2r.a-star.edu.sg, elebv@nus.edu.sg, robby.tan@nus.edu.sg" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.327, + 0.314, + 0.344 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.359, + 0.474, + 0.722 + ], + "angle": 0, + "content": "Object detection at night is a challenging problem due to the absence of night image annotations. Despite several domain adaptation methods, achieving high-precision results remains an issue. False-positive error propagation is still observed in methods using the well-established student-teacher framework, particularly for small-scale and low-light objects. This paper proposes a two-phase consistency unsupervised domain adaptation network, 2PCNet, to address these issues. The network employs high-confidence bounding-box predictions from the teacher in the first phase and appends them to the student's region proposals for the teacher to re-evaluate in the second phase, resulting in a combination of high and low confidence pseudo-labels. The night images and pseudo-labels are scaled-down before being used as input to the student, providing stronger small-scale pseudo-labels. To address errors that arise from low-light regions and other night-related attributes in images, we propose a night-specific augmentation pipeline called NightAug. This pipeline involves applying random augmentations, such as glare, blur, and noise, to daytime images. Experiments on publicly available datasets demonstrate that our method achieves superior results to state-of-the-art methods by \\(20\\%\\), and to supervised models trained directly on the target data." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.748, + 0.21, + 0.765 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.774, + 0.47, + 0.88 + ], + "angle": 0, + "content": "Nighttime object detection is critical in many applications. However, the requirement of annotated data by supervised methods is impractical, since night data with annotations is few, and supervised methods are generally prone to overfitting to the training data. Among other reasons, this scarcity is due to poor lighting conditions which makes nighttime images hard to annotate. Hence, methods that" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.328, + 0.697, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.411, + 0.657, + 0.422 + ], + "angle": 0, + "content": "DA Faster-RCNN" + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.329, + 0.886, + 0.411 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.776, + 0.411, + 0.806, + 0.422 + ], + "angle": 0, + "content": "UMT" + }, + { + "type": "image", + "bbox": [ + 0.509, + 0.425, + 0.697, + 0.509 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.595, + 0.51, + 0.611, + 0.519 + ], + "angle": 0, + "content": "AT" + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.425, + 0.886, + 0.509 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.743, + 0.509, + 0.838, + 0.521 + ], + "angle": 0, + "content": "2PCNet (Ours)" + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.534, + 0.892, + 0.604 + ], + "angle": 0, + "content": "Figure 1. Qualitative results of state-of-the-art DA methods, DA Faster-RCNN [3], UMT [7], Adaptive Teacher (AT) [15] and our method 2PCNet on the BDD100K [36] dataset. Unlike the SOTA methods, our method is able to detect dark and small scale objects with minimal additional false positive predictions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.639, + 0.892, + 0.701 + ], + "angle": 0, + "content": "do not assume the availability of the annotations are more advantageous. Domain adaptation (DA) is an efficient solution to this problem by allowing the use of readily available annotated source daytime datasets." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.893, + 0.901 + ], + "angle": 0, + "content": "A few domain adaptation methods have been proposed, e.g., adversarial learning which uses image and instance level classifiers [3] and similar concepts [22, 32]. However, these methods isolate the domain adaptation task purely towards the feature extractor, and suppress features of the target data for the sake of domain invariance. Recent unsupervised domain adaptation methods exploit the studentteacher framework (e.g. [1,7,11,15]). Since the student initially learns from the supervised loss, there is a bias towards the source data. Augmentation [7, 11] and adversarial learning [15] have been proposed to address this problem. Unfortunately, particularly for day-to-night unsupervised domain adaptation, these methods suffer from a large num" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.341, + 0.9 + ], + "angle": 0, + "content": "1www.github.com/mercarill/2pcnet" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "11484" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.182 + ], + "angle": 0, + "content": "ber of inaccurate pseudo-labels produced by the teacher. In our investigation, the problem is notably due to insufficient knowledge of small scale features in the nighttime domain, which are then propagated through the learning process between the teacher and student, resulting in poor object detection performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.183, + 0.471, + 0.395 + ], + "angle": 0, + "content": "To address the problem, in this paper, we present 2PC-Net, a two-phase consistency unsupervised domain adaptation network for nighttime object detection. Our 2PCNet merges the bounding-boxes of highly-confident pseudolabels, which are predicted in phase one, together with regions proposed by the student's region proposal network (RPN). The merged proposals are then used by the teacher to generate a new set of pseudo-labels in phase two. This provides a combination of high and low confidence pseudolabels. These pseudo-labels are then matched with predictions generated by the student. We can then utilise a weighted consistency loss to ensure that a higher weightage of our unsupervised loss is based on stronger pseudo-labels, yet allow for weaker pseudo-labels to influence the training." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.396, + 0.47, + 0.531 + ], + "angle": 0, + "content": "Equipped with this two-phase strategy, we address the problem of errors from small-scale objects. We devise a student-scaling technique, where night images and their pseudo-labels for the student are deliberately scaled down. In order to generate accurate pseudo-labels, images to the teacher remain at their full scale. This results in the pseudolabels of larger objects, which are easier to predict, to be scaled down to smaller objects, allowing for an increase in small scale performance of the student." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.533, + 0.471, + 0.76 + ], + "angle": 0, + "content": "Nighttime images suffer from multiple complications not found in daytime scenes such as dark regions, glare, prominent noise, prominent blur, imbalanced lighting, etc. All these cause a problem, since the student, which was trained on daytime images, is much more biased towards the daytime domain's characteristics. To mitigate this problem, we propose NightAug, a set of random nighttime specific augmentations. NightAug includes adding artificial glare, noise, blur, etc. that mimic the night conditions to daytime images. With NightAug we are able to reduce the bias of the student network towards the source data without resulting in adversarial learning or compute-intensive translations. Overall, using 2PCNet, we can see the qualitative improvements of our result in Figure 1. In summary, the contributions of this paper are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.772, + 0.47, + 0.864 + ], + "angle": 0, + "content": "- We present 2PCNet, a two-phase consistency approach for student-teacher learning. 2PCNet takes advantage of highly confident teacher labels augmented with less confident regions, which are proposed by the scaled student. This strategy produces a sharp reduction of the error propagation in the learning process." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.871, + 0.47, + 0.902 + ], + "angle": 0, + "content": "- To address the bias of the student towards the source domain, we propose NightAug, a random night spe" + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.772, + 0.47, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.092, + 0.892, + 0.122 + ], + "angle": 0, + "content": "cific augmentation pipeline to shift the characteristics of daytime images toward nighttime." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.129, + 0.894, + 0.205 + ], + "angle": 0, + "content": "- The effectiveness of our approach has been verified by comparing it with the state-of-the-art domain adaptation approaches. An improvement of \\(+7.9\\mathrm{AP}(+20\\%)\\) and \\(+10.2\\mathrm{AP}(26\\%)\\) over the SOTA on BDD100K and SHIFT has been achieved, respectively." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.228, + 0.642, + 0.244 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.254, + 0.892, + 0.451 + ], + "angle": 0, + "content": "Unsupervised Domain Adaptation (UDA) Unsupervised domain adaptation aims to learn transferable features to reduce the discrepancy between a labelled source and unlabelled target domain. Previous works minimised the distance metric (MMD) [16-18] and considered intra-class and inter-class discrepancy [12, 13]. Adversarial feature learning involved adding an adversarial classifier to play the min-max game between the domain discriminator and feature extractors to generate a domain invariant feature map [27, 28, 37]. These methods have been applied to image classification. Our work focuses on object detection, which is more complex as it involves identifying multiple bounding boxes and associated classes in each image." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.458, + 0.892, + 0.671 + ], + "angle": 0, + "content": "UDA for Object Detection Object detection with UDA is a recent challenge due to the complexities of identifying multiple objects in an image. DA-Faster RCNN [3] integrated adversarial learning with image and instance level classifiers, and several approaches have been proposed to improve on this method by introducing scale-awareness [4], class specific discriminators [31], and re-purposing the task-specific classifier as a discriminator [2]. The Mean Teacher (MT) framework [26] has been adopted in semi-supervised methods, such as UMT [7], which incorporates CycleGAN [39] augmented images; AT [15], which combines the student-teacher framework with adversarial learning; and TDD [11], which uses dual student-teacher networks with style transfer." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.892, + 0.841 + ], + "angle": 0, + "content": "Nighttime UDA The majority of research on unsupervised domain adaptation (UDA) in nighttime scenarios has focused on semantic segmentation [5, 8, 9, 14, 23, 29, 33]. Translation and style transformation techniques are commonly used to reduce the domain gap between the source and target domains in these methods [8,29,33]. Some UDA-based techniques for nighttime also utilise paired-images to generate a shared feature space [23], while others use an intermediate domain such as twilight to reduce the domain gap during unsupervised learning [5]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.841, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Nighttime tracking has also been investigated where adversarial transformers are used to close the domain gap [35]. However, there is a gap in research when it comes to applying UDA techniques in the object detection task for night-" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11485" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.089, + 0.891, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.424, + 0.893, + 0.496 + ], + "angle": 0, + "content": "Figure 2. Overview of our proposed framework, 2PCNet. 2PCNet consists of: A student network is trained on both the labelled daytime image, which has been augmented with NightAug, and unlabelled nighttime images. A teacher network which is the exponential moving average (EMA) of the student and provides matched pseudo-labels for unsupervised loss. The match pseudo-labels are the predictions of the teacher (phase two) using the RPN proposals of the student, which in turn was guided by the high confidence pseudo-labels of the teacher (phase one)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.521, + 0.471, + 0.566 + ], + "angle": 0, + "content": "time scenarios. Therefore, we explore the application of UDA techniques in object detection under low-light and nighttime conditions." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.584, + 0.251, + 0.602 + ], + "angle": 0, + "content": "3. Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.61, + 0.47, + 0.702 + ], + "angle": 0, + "content": "Let \\(\\mathbf{D}_s\\) be the daytime source data. \\(\\mathbf{D}_s = \\{I_s, C_s, B_s\\}\\), where the variables refer to the image, class label and bounding-box label, respectively. Index \\(s\\) indicates the daytime source. The night target data is represented by \\(\\mathbf{D}_t\\), where \\(\\mathbf{D}_t = \\{I_t\\}\\) as we do not have the target labels available to us. Index \\(t\\) indicates the nighttime target." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.703, + 0.471, + 0.853 + ], + "angle": 0, + "content": "The architecture of our 2PCNet is shown in Figure 2. Our 2PCNet consists of a student and a teacher network. The student is a multi-domain network trained on both labelled daytime images, augmented with NightAug, and unlabelled nighttime images. The teacher focuses on night images to produce pseudo-labels for the student and is the exponential moving average (EMA) of the student. After an initial pretraining phase, the teacher begins producing pseudo-labels, which allows the student to initialise the feature extractor and detector." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.901 + ], + "angle": 0, + "content": "During each iteration, in phase one of 2PCNet, the teacher produces pseudo-labels from the night images. These pseudo-labels are filtered through a confidence" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.521, + 0.893, + 0.656 + ], + "angle": 0, + "content": "threshold. This is to ensure only high-confidence pseudolabels are given to the student. The bounding-boxes from the pseudo-labels are then combined with the region proposals generated by the student's RPN. The merged region proposals are then used to generate predictions from the student's RoI network. In phase two, the teacher utilises the same merged region proposals to generate a matched set of pseudo-labels, where each pseudo-label has its corresponding prediction obtained from the student." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.706 + ], + "angle": 0, + "content": "As mentioned earlier, our student network is initialised by pretraining for a set number of iterations. This is done with supervised loss on the augmented daytime images:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.726, + 0.892, + 0.743 + ], + "angle": 0, + "content": "\\[\nL _ {\\sup } = L _ {\\operatorname {r p n}} \\left(B _ {s}, I _ {s}\\right) + L _ {\\operatorname {r o i}} \\left(B _ {s}, C _ {s}, I _ {s}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.762, + 0.891, + 0.823 + ], + "angle": 0, + "content": "where \\( L_{\\mathrm{rpn}} \\) represents the loss from the RPN, which consists of an objectness and bounding-box regression loss. \\( L_{\\mathrm{roi}} \\) represents the loss from the detector network, consisting of a classification and bounding-box regression loss." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Once the pretraining is completed, the student's weights are then transferred over to the teacher. In the succeeding iterations, the teacher's weights are the exponential moving average (EMA) of the student's. The matched pseudo-labels generated by the teacher, \\(\\{C_p^*, B_p^*\\}\\), are then used to guide" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11486" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.092, + 0.283, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.092, + 0.483, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.282, + 0.47, + 0.364 + ], + "angle": 0, + "content": "Figure 3. (Left to Right, Top to Bottom) Ground truth bounding boxes, bounding boxes predicted by the teacher with non-maximal suppression (NMS) and thresholding \\((B_{p})\\), bounding boxes predicted by the student \\((B_{\\mathrm{student}})\\) which is guided by \\(B_{p}\\), and the bounding boxes predicted by the teacher \\((B_{p}^{*})\\) for the consistency loss." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.394, + 0.3, + 0.409 + ], + "angle": 0, + "content": "the unsupervised loss, defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.137, + 0.424, + 0.469, + 0.443 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {u n s u p}} = L _ {\\text {r p n}} ^ {\\text {o b j}} \\left(C _ {p} ^ {*}; I _ {t}\\right) + L _ {\\text {c o n s}} \\left(C _ {p} ^ {*}; I _ {t}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.455, + 0.469, + 0.502 + ], + "angle": 0, + "content": "where \\( L_{\\mathrm{rpn}}^{\\mathrm{obj}} \\) is the objectness loss of the RPN and \\( L_{\\mathrm{cons}} \\) is the weighted KL-Divergence loss from the predicted outputs which we will further explain in the next section." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.513, + 0.294, + 0.529 + ], + "angle": 0, + "content": "3.1. Two-Phase Consistency" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.538, + 0.47, + 0.734 + ], + "angle": 0, + "content": "Due to the large domain gap between daytime source images and nighttime target images, the teacher is unable to produce high quality pseudo-labels. This generally occurs in the whole scene, but particularly for regions with strong night characteristics, e.g., low-light, glare, uneven lighting, etc. The teacher produces confident pseudo-labels only for regions that share more similarities to the daytime, since it is biased towards the daytime domain. This bias poses a problem for methods that employ a hard-threshold to filter pseudo-labels for categorical cross-entropy loss [7, 15, 26]. The remaining pseudo-labels contain only easy samples with daytime attributes. Consequently, the student does not learn from harder (e.g. darker) areas." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.469, + 0.87 + ], + "angle": 0, + "content": "As a result of minimal knowledge of the hard samples (i.e., areas with a high level of nighttime attributes), the teacher begins to predict highly confident yet incorrect pseudo-labels. As the teacher provides these incorrect pseudo-labels to the student, a viscous cycle starts where the teacher in turn is updated with incorrect knowledge. Consequently, the error continues to propagate through training. In our case, these errors notably occur in dark/glare regions and as small scale objects." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "To address the problem of error propagation, we design a two-phase approach that combines high confidence" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.257 + ], + "angle": 0, + "content": "pseudo-labels together with their less confident counterparts. This combination allows for the high accuracy of confident-labels with the additional knowledge of less confident labels to be distilled onto the student. In phase one, the unlabelled nighttime image, \\( I_{t} \\), is used as an input for the teacher to generate pseudo-labels. These pseudo-labels are filtered with a threshold to retain only high-confidence pseudo-labels, \\( (C_p, B_p) \\). The bounding-box of the pseudolabels, \\( B_{p} \\), is then used as an input to the student. \\( B_{p} \\) is concatenated to the region proposals generated by the student RPN module:" + }, + { + "type": "equation", + "bbox": [ + 0.603, + 0.269, + 0.892, + 0.286 + ], + "angle": 0, + "content": "\\[\nP ^ {*} = \\operatorname {R P N} _ {\\text {s t u d e n t}} \\left(I _ {t}\\right) \\neq B _ {p}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.296, + 0.891, + 0.357 + ], + "angle": 0, + "content": "where \\( P^{*} \\) is the combined region proposals, which are then used as an input to the student's RoI module to predict the classes, \\( C_{\\mathrm{student}} \\), and bounding-box, \\( B_{\\mathrm{student}} \\), of each region proposal." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.357, + 0.892, + 0.416 + ], + "angle": 0, + "content": "Phase two begins by using the same combined region proposals, \\( P^{*} \\), generated in phase one as an input to the teachers RoI module to generate a matched set of pseudolabels:" + }, + { + "type": "equation", + "bbox": [ + 0.599, + 0.429, + 0.891, + 0.448 + ], + "angle": 0, + "content": "\\[\n\\left\\{C _ {p} ^ {*}, B _ {p} ^ {*} \\right\\} = \\operatorname {R o I} _ {\\text {t e a c h e r}} \\left(P ^ {*}\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.456, + 0.892, + 0.52 + ], + "angle": 0, + "content": "The difference between \\( C_p \\) and \\( C_p^* \\) is that \\( C_p^* \\) is derived from the same region proposals as that of the student predictions \\( C_{\\mathrm{student}} \\). This allows us to compare \\( C_{\\mathrm{student}} \\) and \\( C_p^* \\) directly:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.528, + 0.892, + 0.565 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\{C _ {\\text {s t u d e n t}} (n), B _ {\\text {s t u d e n t}} (n) \\right\\} = \\operatorname {R o I} _ {\\text {s t u d e n t}} \\left(P ^ {*} (n)\\right), \\tag {5} \\\\ \\left\\{C _ {p} ^ {*} (n), B _ {p} ^ {*} (n) \\right\\} = \\operatorname {R o I} _ {\\text {t e a c h e r}} \\left(P ^ {*} (n)\\right), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.573, + 0.892, + 0.694 + ], + "angle": 0, + "content": "where \\( n = \\{1,2,\\dots,N\\} \\) and \\( N \\) is the number of region proposals in \\( P^* \\). This operation ensures that the knowledge of highly confident predictions generated by the teacher is distilled through to the student. In addition, information from less confident predictions can also be learnt. However, we are still required to penalise less confident samples and thus employ weighed KL-Divergence to be used as our consistency loss:" + }, + { + "type": "equation", + "bbox": [ + 0.596, + 0.706, + 0.891, + 0.723 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {c o n s}} = \\alpha \\operatorname {K L} \\left(C _ {\\text {s t u d e n t}}, C _ {p} ^ {*}\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.733, + 0.892, + 0.84 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is the highest confidence of \\(C_p^*\\) expressed as \\(\\alpha = \\max(C_p^*)\\); KL() is the KL-divergence function. Note that, pseudo-bounding boxes are not used to generate unsupervised loss, as the confidence score of each pseudo-label represents the class information rather than the bounding box. The outputs of each segment of our two-phase approach are shown in Figure 3." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.848, + 0.66, + 0.864 + ], + "angle": 0, + "content": "3.2. Student-Scaling" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In our investigation, we have found that scales of objects have a strong influence on object detection at night. This" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "11487" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.081, + 0.092, + 0.388, + 0.107 + ], + "angle": 0, + "content": "Algorithm 1 Single Augmentation - NightAug" + }, + { + "type": "algorithm", + "bbox": [ + 0.092, + 0.111, + 0.37, + 0.292 + ], + "angle": 0, + "content": "imgClean \\(\\leftarrow\\) img \nif randFloat \\(\\geq 0.5\\) then randFloat \\(\\leftarrow 0.8*\\) randFloat \\(+0.2\\) img \\(\\leftarrow\\) augmentation(img, randval) prob \\(\\leftarrow 0.4\\) while randFloat \\(\\geq\\) prob do \\(x\\gets\\) randInt(img.shape[1],2) \\(y\\gets\\) randInt(img.shape[2],2) img[x,y] \\(\\leftarrow\\) imgClean[x,y] prob \\(\\leftarrow\\) prob +0.1 end while \nend if" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.321, + 0.468, + 0.531 + ], + "angle": 0, + "content": "is due to the features of smaller objects being easily overwhelmed by glare or noise. To allow the student to overcome this, we apply scaling augmentation to the student's inputs which includes both the image and the pseudo-labels generated by the teacher. As training proceeds, we follow a schedule to increase the scale of the student augmentation until it equals to that of the original image. By iteratively increasing the scale we allow the student to focus on smaller features earlier in the training process. This process encourages the teacher to make more accurate predictions on smaller scale objects in the later stages of training. In turn, accurate small scale pseudo-labels allow for the increase in the scale of the student's inputs with minimal errors due to scale." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.533, + 0.468, + 0.623 + ], + "angle": 0, + "content": "To ensure the knowledge of the previous scales is not forgotten, a gaussian function for the scaling factor is applied. The norm of the Gaussian function is obtained from the schedule values. To prevent additional noise due to pseudo-labels being too small, labels that has an area below a threshold are removed." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.635, + 0.188, + 0.651 + ], + "angle": 0, + "content": "3.3. NightAug" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.468, + 0.84 + ], + "angle": 0, + "content": "Night images suffer from a range of complications that are not present in daytime scenes. This causes a problem in the student-teacher framework, where the student would be biased towards the source domain. Previous methods have attempted to address this, but have either required compute-intensive translations [7, 11] or adding additional domain classifiers to the framework [15] which complicates training. We propose NightAug, a nighttime specific augmentation pipeline that is compute-light and does not require training. NightAug consists of a series of augmentations with the aim of steering the characteristics of daytime images to resemble that of a nighttime image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.468, + 0.901 + ], + "angle": 0, + "content": "The defining features of nighttime images are that they are darker and have lower contrast than daytime images. In addition the signal-to-night ratio (SNR) could be higher due to the properties of digital cameras such as luminance and" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.09, + 0.697, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.698, + 0.09, + 0.892, + 0.261 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.271, + 0.892, + 0.314 + ], + "angle": 0, + "content": "Figure 4. NightAug: Original image (top-left) and images with random augmentations from: gaussian blur, gamma correction, brightness, contrast, glare, gaussian noise and random cut-outs." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.351, + 0.892, + 0.412 + ], + "angle": 0, + "content": "colour noise. Glare and glow from street lamps and headlights are also present in nighttime images. Additionally, images may be out-of-focus due to the cameras inability to detect reference points to focus on in dark environments." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.417, + 0.892, + 0.613 + ], + "angle": 0, + "content": "Keeping in mind the properties of nighttime images, our NightAug includes random; brightness, contrast, gamma, gaussian noise, gaussian blur augmentations and random glare insertion. The augmentations are randomly applied to the images and are also random in intensity. This randomness results in a wider variance of images that are exposed to the student leading to more robust training [30]. To further increase the variance of the images, at each augmentation step, random segments of the image will ignore the application of that augmentation. This allows for the representation where different areas of nighttime images may be unevenly lighted. This uneven lighting affects the above characteristics of the local region." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.618, + 0.892, + 0.738 + ], + "angle": 0, + "content": "A single augmentation flow of NightAug is demonstrated in Algorithm 1. Samples of an image processed with NightAug are shown in Figure 4. Each augmentation has a set probability of being applied, with the strength of the augmentation being random. Random regions of the augmented image may then be replaced with that of the original image. The probability of this region replacement reduces with each iteration." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.775, + 0.844, + 0.79 + ], + "angle": 0, + "content": "Overall Loss Our total loss can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.815, + 0.891, + 0.832 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {t o t a l}} = L _ {\\sup } + \\lambda L _ {\\text {u n s u p}}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where \\(\\lambda\\) represents a weight factor for the unsupervised loss, and is set experimentally. \\(L_{\\mathrm{sup}}, L_{\\mathrm{unsup}}\\) refer to Eq. (1) and Eq. (2), respectively." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.956 + ], + "angle": 0, + "content": "11488" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.079, + 0.089, + 0.894, + 0.251 + ], + "angle": 0, + "content": "
MethodAPPedestrianRiderCarTruckBusMotorcycleBicycleTrafficLightTrafficSign
Lower-Bound41.150.028.966.647.847.532.839.541.056.5
Upper-Bound46.252.135.073.653.554.836.041.852.263.3
DA F-RCNN [3]41.350.430.366.346.848.332.641.441.056.2
TDD [11]34.643.120.768.433.335.616.525.943.159.5
UMT [7]36.246.526.146.844.046.328.240.231.652.7
AT [15]38.542.330.460.848.952.134.542.729.143.9
2PCNet (Ours)46.454.430.873.153.855.237.544.549.465.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.262, + 0.893, + 0.318 + ], + "angle": 0, + "content": "Table 1. Results of day-to-night domain adaptation on the BDD100K dataset, the Average Precision (AP) of all classes are reported. Faster RCNN detector with ResNet-50 feature extractor is used for all experiments to ensure a fair comparison. Faster RCNN is used as the lower-bound and upper-bound and is trained on labelled daytime and nighttime data respectively. The lower-bound provides a baseline without any domain adaptation while the upper-bound is fully supervised, the case where labelled target night data is available." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.342, + 0.472, + 0.455 + ], + "angle": 0, + "content": "
MethodAPcocoCarBusTruck
Lower-Bound22.137.529.830.7
Upper-Bound23.942.033.835.0
FDA [34]22.638.537.223.2
ForkGAN [38]22.941.233.332.1
2PCNet (Ours)23.540.738.235.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.467, + 0.47, + 0.523 + ], + "angle": 0, + "content": "Table 2. Comparison of our framework, 2PCNet, with image-to-image (I2I) translation methods. Conducted on the BDD100K dataset. ForkGan and FDA are used for comparison. Reported \\(AP_{coco}\\) is the averaged AP over IoUs 0.5 to 0.95." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.541, + 0.21, + 0.558 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.566, + 0.186, + 0.581 + ], + "angle": 0, + "content": "4.1. Baselines" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.591, + 0.47, + 0.743 + ], + "angle": 0, + "content": "To evaluate our method, we compare our approach with SOTA methods in domain adaptation for object detection. These include DA-Faster RCNN [3], TDD [11], UMT [7], AT [15] as well as a non-DA baseline Faster-RCNN [21]. Faster-RCNN is used as both our lower and upper-bound, where it is trained on labelled source and target data respectively. We additionally compare our approach with image-to-image translation methods, ForkGAN [38] and FDA [34]. Translation methods are trained on Faster RCNN with both the daytime and translated images." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.756, + 0.18, + 0.77 + ], + "angle": 0, + "content": "4.2. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.47, + 0.901 + ], + "angle": 0, + "content": "The majority of existing nighttime datasets either focuses on semantic segmentation which do not provide labels for object detection [5, 23, 24], or contains very few classes [19, 20]. BDD100K [36] was selected as it provides object detection labels which includes a wide range of classes (10). It also has a large number of images compared to other DA datasets covering daytime, nighttime and other adverse conditions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.344, + 0.892, + 0.466 + ], + "angle": 0, + "content": "The SHIFT [25] dataset is a recent simulated driving dataset that contains scenes in various environments. A continuous shift of these environments is available. SHIFT contains 6 class labels that share similarities to the BDD100K classes. For our evaluation, we use images with the 'day' and 'night' label as our source and target data respectively. We further ensure that the weather tag is 'clear' to isolate other weather conditions from the evaluation." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.478, + 0.661, + 0.495 + ], + "angle": 0, + "content": "4.3. Implementation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.502, + 0.893, + 0.789 + ], + "angle": 0, + "content": "Following previous SOTA methods, we employ Faster-RCNN [21] as our base detection model and ResNet-50 [10] pretrained on ImageNet [6] as our feature extractor. All images are scaled by resizing its shorter side to 600 pixels. For student-scaling we set a schedule for (0.57, 0.64, 0.71, 0.78, 0.85, 0.92) of the maximum iterations at scales (0.5, 0.6, 0.7, 0.8, 0.9, 1.0). Loss hyperparameters are set at \\(\\lambda = 0.3\\) and the rate smooth coefficient parameter of the EMA is 0.9996. A confidence threshold of 0.8 for phase one of Two-Phase Consistency. For the initial pretraining of the student model, we train the student for 50k and 20k iterations on the source images, for BDD100K and SHIFT respectively. Supervised inputs are daytime images with and without NightAug. We then copy the weights to the teacher and continue training with the addition of unsupervised loss for an additional 50k iterations. The learning rate is kept at 0.04 throughout training. Our network is trained on 3 RTX3090 GPUs with a batch-size of 6 source and 6 target images." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.801, + 0.702, + 0.817 + ], + "angle": 0, + "content": "4.4. Comparison to SOTA" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Comparison on BDD100K We compare our method against the SOTA on real driving scenes and evaluating their domain adaptation performance on nighttime images, the results of this experiment can be seen on Table 1. The results show that our method achieves the highest perfor" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "11489" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.092, + 0.244, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.246, + 0.092, + 0.406, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.407, + 0.092, + 0.566, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.567, + 0.092, + 0.726, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.727, + 0.092, + 0.888, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.349, + 0.895, + 0.392 + ], + "angle": 0, + "content": "Figure 5. Qualitative results of Faster RCNN, Adaptive Teacher (AT) and our method on the SHIFT dataset with the ground-truth on the far right. We can observe that Faster RCNN is not able to detect objects due to absence of domain adaptation, while AT has a large number of small false positive bounding boxes compared to our method which closely resembles that of the ground-truth." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.415, + 0.472, + 0.535 + ], + "angle": 0, + "content": "
MethodAPPer.CarTruckBusMcy.Bcy.
Lower-Bound41.640.444.549.953.714.346.7
Upper-Bound47.049.751.556.053.619.252.4
DA FR [3]43.743.048.847.852.119.955.8
UMT [7]31.17.747.518.446.816.649.2
AT [15]38.925.833.054.749.520.752.3
2PCNet (Ours)49.151.454.654.856.623.954.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.547, + 0.47, + 0.603 + ], + "angle": 0, + "content": "Table 3. Results of Day-to-Night domain adaptation on the SHIFT dataset. The Average Precision (AP) of all classes. Faster RCNN is used as the lower-bound and upper-bound and is trained on labelled daytime and nighttime data respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.626, + 0.47, + 0.822 + ], + "angle": 0, + "content": "mance with an AP of 46.4. \\(20.5\\%\\) higher than that of the SOTA student-teacher methods and above that of the upper-bound. We have observed in experiments that student-teacher methods underperforms with an AP below that of the lower-bound due to the error-propagation from noisy pseudo-labels. The result of the error is small false positive detections as seen in Figure 1. Our method does not suffer from the same allowing for higher performance. We can also observe that our method performs well across all classes. Even when compared with the upper-bound, 2PC-Net achieves higher AP on the majority of classes. This indicates that our method is able to generalise well across large and small classes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.47, + 0.901 + ], + "angle": 0, + "content": "The comparison with image-to-image translation methods is shown in Table 2. Translation methods do not suffer from the error propagation problem as it is trained on Faster RCNN without a teacher. Even so, we can see that our method outperforms SOTA adverse vision translation" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.42, + 0.565, + 0.433 + ], + "angle": 0, + "content": "methods." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.445, + 0.892, + 0.521 + ], + "angle": 0, + "content": "Comparison on SHIFT To further compare our method with SOTA we evaluate on the SHIFT simulation dataset. Due to the nature of the simulated data, many nighttime image characteristics that we have previously mention is not exhibited in this data such as blurriness, noise and glare." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.521, + 0.893, + 0.734 + ], + "angle": 0, + "content": "The results of this experiments are shown in Table 3. We can observe that previous SOTA methods that use the student-teacher framework perform worse than the lower-bound. The sub-par performance is again due to the error-propagation problem. AT performs better than UMT due to ATs inclusion of adversarial learning. However, adversarial learning is not enough to mitigate this problem. We can see that the performance of DA FRCNN outperforms both the SOTA student-teacher methods as it would not be affected by error-propagation. It is however, still largely below the upper-bound performance. 2PCNet outperforms these previous methods as well as the upperbound. We achieve an improvement of \\(+10.2\\) AP over previous SOTA student-teacher methods and \\(+2.1\\) AP over that of the upper-bound." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.744, + 0.665, + 0.759 + ], + "angle": 0, + "content": "4.5. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.769, + 0.892, + 0.829 + ], + "angle": 0, + "content": "To demonstrate the effectiveness of each of our components, we train several models for 100K iterations and evaluate them on the BDD100K dataset. We present our findings in Table 4." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Two-Phase Consistency We can observe in Table 4 that the addition of Two-Phase Consistency (C) demonstrated a wide performance gap when compared to the Mean-Teacher baseline, +13.5 AP (43%). This improvement in AP ex" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "11490" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.09, + 0.288, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.091, + 0.487, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.49, + 0.091, + 0.686, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.091, + 0.888, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.229, + 0.895, + 0.286 + ], + "angle": 0, + "content": "Figure 6. Training curve on BDD100K dataset ablation study. We show the overall AP training curve as well as the AP of large, medium and small objects. MT represents the base Mean Teacher framework. It can be seen that at all scales, the absence of Two-Phase Consistency (C) results in a sharp drop during training. We can also see that with the inclusion of NightAug (NA) and student-scaling (SS) the gradient of the curve increases. We note that the inclusion of a domain classifier (DC) reduces the performance at all scales." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.311, + 0.473, + 0.448 + ], + "angle": 0, + "content": "ists across large, medium and small objects. While the performance of MT is initially strong, it rapidly begins to decline; which can be observed in Figure 6. This drop in performance is due to the error propagation of noisy pseudolabels. The experimental results show that Two-Phase Consistency is able to provide a solution. This ensures that highly confident pseudo-labels are bounded by less confident pseudo-label enabling a balance of knowledge into the student." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.457, + 0.47, + 0.579 + ], + "angle": 0, + "content": "NightAug We benched marked the effectiveness of NightAug in our framework as shown in Table 4. The inclusion of NightAug increases the detection performance of small objects with an increase of \\(5\\%\\). Additionally, the gradient of the training performance remains steep as seen in Figure 6. The positive gradient is displayed most strongly for APm and APs where objects are more prone to nighttime specific complications." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.589, + 0.471, + 0.771 + ], + "angle": 0, + "content": "Student-Scaling Our final component, student-scaling, is included into the framework and the results can be seen in Table 4. We can observe that student-scaling is able to boost the performance of small object detection by \\(6\\%\\). This boost in performance is due to the student network focusing on smaller object earlier in the training process. We note that the performance of large objects have dropped by \\(1 - 2\\%\\); however when referring to the training curves in Figure 6, API remains steep. As the initial focus is on smaller objects, less time is allocated to larger objects during training. This can be mitigated by lengthening training resulting in more iterations for larger objects." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Domain Classifier To conclude our study, we included a domain classifier into our network. Adversarial learning is a widely used DA technique; however when added into 2PCNet, a performance drop across all scales can be seen. This drop is shown in Table 4. The suppression of nighttime features is suspected to be the cause. Suppression is present as the adversarial loss guides the feature extractor to maintain domain invariance. By suppressing nighttime fea" + }, + { + "type": "table", + "bbox": [ + 0.514, + 0.309, + 0.882, + 0.433 + ], + "angle": 0, + "content": "
Methods
CNASSDCAPAPIAPmAPs
46.441.725.89.1
44.541.625.08.3
45.842.225.78.6
45.242.925.78.2
31.730.416.54.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.443, + 0.894, + 0.514 + ], + "angle": 0, + "content": "Table 4. Ablation studies on the BDD100K dataset. The last row represents the base Mean-Teacher network. Methods are referred to as, C: Two-Phase Consistency, NA: NightAug, SS: StudentScaling, DC: Domain Classifier. API, APm, and APs represent the AP of large, medium and small objects respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.531, + 0.892, + 0.605 + ], + "angle": 0, + "content": "tures, the teacher has less information to distil to the student. This is demonstrated in Figure 6 where the domain classifier (dotted purple) initially performs well. But as training continues, our method (solid red) is able to surpass its performance." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.624, + 0.619, + 0.639 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.65, + 0.892, + 0.848 + ], + "angle": 0, + "content": "Our proposed framework, 2PCNet, presents a novel solution to the challenges of day-to-night domain adaptive object detection. With our Two-Phase Consistency approach, we are able to effectively leverage high and low confidence knowledge for the student, while mitigating error propagation commonly present in previous student-teacher methods. We further address issues arising from small scale and dark objects through the use of student-scaling and NightAug, respectively. Experimental results on the e BDD100K [36] and SHIFT [25] datasets demonstrate that 2PCNet outperforms existing state-of-the-art methods. Overall, our proposed framework provides an effective and efficient solution for day-to-night domain adaptive object detection." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Acknowledgements This work is partially supported by MOE2019-T2-1-130." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "11491" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Qi Cai, Yingwei Pan, Chong-Wah Ngo, Xinmei Tian, Lingyu Duan, and Ting Yao. Exploring object relation in mean teacher for cross-domain detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11449-11458, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.472, + 0.255 + ], + "angle": 0, + "content": "[2] Lin Chen, Huaian Chen, Zhixiang Wei, Xin Jin, Xiao Tan, Yi Jin, and Enhong Chen. Reusing the task-specific classifier as a discriminator: Discriminator-free adversarial domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7171-7180, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.471, + 0.323 + ], + "angle": 0, + "content": "[3] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain adaptive faster r-cnn for object detection in the wild. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3339-3348, 2018. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.47, + 0.38 + ], + "angle": 0, + "content": "[4] Yuhua Chen, Haoran Wang, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Scale-aware domain adaptive faster r-cnn. International Journal of Computer Vision, page 2223-2243, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.47, + 0.437 + ], + "angle": 0, + "content": "[5] Dengxin Dai and Luc Van Gool. Dark model adaptation: Semantic image segmentation from daytime to nighttime. In International Conference on Intelligent Transportation Systems (ITSC), pages 3819-3824, 2018. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.439, + 0.47, + 0.493 + ], + "angle": 0, + "content": "[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255, 2009. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.495, + 0.47, + 0.563 + ], + "angle": 0, + "content": "[7] Jinhong Deng, Wen Li, Yuhua Chen, and Lixin Duan. Unbiased mean teacher for cross-domain object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4089-4099, 2021. 1, 2, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.565, + 0.47, + 0.633 + ], + "angle": 0, + "content": "[8] Xueqing Deng, Peng Wang, Xiaochen Lian, and Shawn Newsam. NightLab: A Dual-Level Architecture With Hardness Detection for Segmentation at Night. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16938-16948, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.635, + 0.47, + 0.703 + ], + "angle": 0, + "content": "[9] Huan Gao, Jichang Guo, Guoli Wang, and Qian Zhang. Cross-Domain Correlation Distillation for Unsupervised Domain Adaptation in Nighttime Semantic Segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9913-9923, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.47, + 0.76 + ], + "angle": 0, + "content": "[10] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.762, + 0.469, + 0.83 + ], + "angle": 0, + "content": "[11] Mengzhe He, Yali Wang, Jiaxi Wu, Yiru Wang, Hanqing Li, Bo Li, Weihao Gan, Wei Wu, and Yu Qiao. Cross domain object detection by target-perceived dual branch distillation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9560-9570, 2022. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[12] Guoliang Kang, Lu Jiang, Yunchao Wei, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for single- and multi-source domain adaptation. IEEE Transactions on Pattern Analysis amp; Machine Intelligence, pages 1793–1804, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.16 + ], + "angle": 0, + "content": "[13] Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for unsupervised domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4888-4897, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[14] Attila Lengyel, Sourav Garg, Michael Milford, and Jan C. van Gemert. Zero-shot day-night domain adaptation with a physics prior. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 4379-4389, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.3 + ], + "angle": 0, + "content": "[15] Yu-Jhe Li, Xiaoliang Dai, Chih-Yao Ma, Yen-Cheng Liu, Kan Chen, Bichen Wu, Zijian He, Kris Kitani, and Peter Vajda. Cross-domain adaptive teacher for object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7571-7580, 2022. 1, 2, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.892, + 0.36 + ], + "angle": 0, + "content": "[16] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael I. Jordan. Learning transferable features with deep adaptation networks. In International Conference on International Conference on Machine Learning, page 97-105, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.362, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[17] Mingsheng Long, Han Zhu, Jianmin Wang, and Michael I. Jordan. Unsupervised domain adaptation with residual transfer networks. In International Conference on Neural Information Processing Systems, page 136-144, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.42, + 0.892, + 0.474 + ], + "angle": 0, + "content": "[18] Mingsheng Long, Han Zhu, Jianmin Wang, and Michael I. Jordan. Deep transfer learning with joint adaptation networks. In International Conference on Machine Learning, page 2208-2217, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.476, + 0.892, + 0.531 + ], + "angle": 0, + "content": "[19] Igor Morawski, Yu-An Chen, Yu-Sheng Lin, and Winston H. Hsu. Nod: Taking a closer look at detection under extreme low-light conditions with night object detection dataset. In British Machine Vision Conference, (BMVC), 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.892, + 0.613 + ], + "angle": 0, + "content": "[20] Lukás Neumann, Michelle Karg, Shanshan Zhang, Christian Scharfenberger, Ericiegert, Sarah Mistr, Olga Prokofyeva, Robert Thiel, Andrea Vedaldi, Andrew Zisserman, and Bernt Schiele. Nightowls: A pedestrians at night dataset. In Asian Conference on Computer Vision (ACCV), pages 691-705, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.617, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[21] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In International Conference on Neural Information Processing Systems, page 91-99, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.675, + 0.892, + 0.73 + ], + "angle": 0, + "content": "[22] Kuniaki Saito, Yoshitaka Ushiku, Tatsuya Harada, and Kate Saenko. Strong-weak distribution alignment for adaptive object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6949–6958, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.732, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[23] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Guided curriculum model adaptation and uncertainty-aware evaluation for semantic nighttime image segmentation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 7373-7382, 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[24] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Acdc: The adverse conditions dataset with correspondences for semantic driving scene understanding. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 10745-10755, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[25] Tao Sun, Mattia Segu, Janis Postels, Yuxuan Wang, Luc Van Gool, Bernt Schiele, Federico Tombari, and Fisher Yu. Shift:" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "11492" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "A synthetic driving dataset for continuous multi-task domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 21339-21350, 2022. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.15, + 0.47, + 0.219 + ], + "angle": 0, + "content": "[26] Antti Tarvainen and Harri Valpola. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In International Conference on Neural Information Processing Systems, page 1195–1204, 2017. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.221, + 0.47, + 0.276 + ], + "angle": 0, + "content": "[27] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2962-2971, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.278, + 0.47, + 0.346 + ], + "angle": 0, + "content": "[28] Sinan Wang, Xinyang Chen, Yunbo Wang, Mingsheng Long, and Jianmin Wang. Progressive adversarial networks for fine-grained domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9210-9219, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.349, + 0.47, + 0.417 + ], + "angle": 0, + "content": "[29] Xinyi Wu, Zhenyao Wu, Hao Guo, Lili Ju, and Song Wang. DANNet: A One-Stage Domain Adaptation Network for Unsupervised Nighttime Semantic Segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15769–15778, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.42, + 0.47, + 0.475 + ], + "angle": 0, + "content": "[30] Qizhe Xie, Minh-Thang Luong, Eduard Hovy, and Quoc V. Le. Self-training with noisy student improves imagenet classification. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.478, + 0.469, + 0.544 + ], + "angle": 0, + "content": "[31] Chang-Dong Xu, Xingjie Zhao, Xin Jin, and Xiu-Shen Wei. Exploring categorical regularization for domain adaptive object detection. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11721-11730, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.548, + 0.469, + 0.616 + ], + "angle": 0, + "content": "[32] Minghao Xu, Hang Wang, Bingbing Ni, Qi Tian, and Wenjun Zhang. Cross-domain detection via graph-induced prototype alignment. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12352-12361, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.619, + 0.469, + 0.687 + ], + "angle": 0, + "content": "[33] Qi Xu, Yinan Ma, Jing Wu, Chengnian Long, and Xiaolin Huang. CDAda: A Curriculum Domain Adaptation for Nighttime Semantic Segmentation. In IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), pages 2962-2971, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.69, + 0.469, + 0.744 + ], + "angle": 0, + "content": "[34] Yanchao Yang and Stefano Soatto. FDA: Fourier domain adaptation for semantic segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4084-4094, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.747, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[35] Junjie Ye, Changhong Fu, Guangze Zheng, Danda Pani Paudel, and Guang Chen. Unsupervised Domain Adaptation for Nighttime Aerial Tracking. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8896-8905, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[36] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darrell. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2633-2642, 2020. 1, 6, 8" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[37] Weichen Zhang, Wanli Ouyang, Wen Li, and Dong Xu. Collaborative and adversarial network for unsupervised domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3801-3809, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[38] Ziqiang Zheng, Yang Wu, Xinran Nicole Han, and Jianbo Shi. Forkgan: Seeing into the rainy night. In European Conference on Computer Vision (ECCV), 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[39] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 2242-2251, 2017. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.956 + ], + "angle": 0, + "content": "11493" + } + ] +] \ No newline at end of file diff --git a/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_origin.pdf b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a0ac62748c663e3270b4e76ac40a66cb9fdef1c4 --- /dev/null +++ b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/818b1ea7-c7c2-488e-9c91-78c9a94fffa2_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec973672e8927fd04aab49af7ca2fc3783a2bb46fd7bfc9ea4bbbc7782e96a77 +size 1846162 diff --git a/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/full.md b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b808ed248d69b91bd8a231b8097200b04b14003b --- /dev/null +++ b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/full.md @@ -0,0 +1,301 @@ +# 2PCNet: Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection + +Mikhail Kennerley $^{1,2}$ , Jian-Gang Wang $^{2}$ , Bharadwaj Veeravalli $^{1}$ , and Robby T. Tan $^{1}$ $^{1}$ National University of Singapore, Department of Electrical and Computer Engineering + $^{2}$ Institute for Infocomm Research, A*STAR +mikhailk@u.nus.edu, jgwang@i2r.a-star.edu.sg, elebv@nus.edu.sg, robby.tan@nus.edu.sg + +# Abstract + +Object detection at night is a challenging problem due to the absence of night image annotations. Despite several domain adaptation methods, achieving high-precision results remains an issue. False-positive error propagation is still observed in methods using the well-established student-teacher framework, particularly for small-scale and low-light objects. This paper proposes a two-phase consistency unsupervised domain adaptation network, 2PCNet, to address these issues. The network employs high-confidence bounding-box predictions from the teacher in the first phase and appends them to the student's region proposals for the teacher to re-evaluate in the second phase, resulting in a combination of high and low confidence pseudo-labels. The night images and pseudo-labels are scaled-down before being used as input to the student, providing stronger small-scale pseudo-labels. To address errors that arise from low-light regions and other night-related attributes in images, we propose a night-specific augmentation pipeline called NightAug. This pipeline involves applying random augmentations, such as glare, blur, and noise, to daytime images. Experiments on publicly available datasets demonstrate that our method achieves superior results to state-of-the-art methods by $20\%$ , and to supervised models trained directly on the target data. + +# 1. Introduction + +Nighttime object detection is critical in many applications. However, the requirement of annotated data by supervised methods is impractical, since night data with annotations is few, and supervised methods are generally prone to overfitting to the training data. Among other reasons, this scarcity is due to poor lighting conditions which makes nighttime images hard to annotate. Hence, methods that + +![](images/5263fe911ce1f0a867c50db1a23864721ae578206f0864dbf948d76be7debed0.jpg) +DA Faster-RCNN + +![](images/2e63e3fe63bff5506f7606feab4b1ad88b1cbdc1709be968f5f988018d6f6771.jpg) +UMT + +![](images/350b7bd91271c7942627218e0872edb02ac1007c41555ba9accbebbb4a8b99d5.jpg) +AT +Figure 1. Qualitative results of state-of-the-art DA methods, DA Faster-RCNN [3], UMT [7], Adaptive Teacher (AT) [15] and our method 2PCNet on the BDD100K [36] dataset. Unlike the SOTA methods, our method is able to detect dark and small scale objects with minimal additional false positive predictions. + +![](images/53f055484860341fb6f8bbfbf09bbdc6bb26422bdf984c4ac17b1fca1945835d.jpg) +2PCNet (Ours) + +do not assume the availability of the annotations are more advantageous. Domain adaptation (DA) is an efficient solution to this problem by allowing the use of readily available annotated source daytime datasets. + +A few domain adaptation methods have been proposed, e.g., adversarial learning which uses image and instance level classifiers [3] and similar concepts [22, 32]. However, these methods isolate the domain adaptation task purely towards the feature extractor, and suppress features of the target data for the sake of domain invariance. Recent unsupervised domain adaptation methods exploit the studentteacher framework (e.g. [1,7,11,15]). Since the student initially learns from the supervised loss, there is a bias towards the source data. Augmentation [7, 11] and adversarial learning [15] have been proposed to address this problem. Unfortunately, particularly for day-to-night unsupervised domain adaptation, these methods suffer from a large num + +ber of inaccurate pseudo-labels produced by the teacher. In our investigation, the problem is notably due to insufficient knowledge of small scale features in the nighttime domain, which are then propagated through the learning process between the teacher and student, resulting in poor object detection performance. + +To address the problem, in this paper, we present 2PC-Net, a two-phase consistency unsupervised domain adaptation network for nighttime object detection. Our 2PCNet merges the bounding-boxes of highly-confident pseudolabels, which are predicted in phase one, together with regions proposed by the student's region proposal network (RPN). The merged proposals are then used by the teacher to generate a new set of pseudo-labels in phase two. This provides a combination of high and low confidence pseudolabels. These pseudo-labels are then matched with predictions generated by the student. We can then utilise a weighted consistency loss to ensure that a higher weightage of our unsupervised loss is based on stronger pseudo-labels, yet allow for weaker pseudo-labels to influence the training. + +Equipped with this two-phase strategy, we address the problem of errors from small-scale objects. We devise a student-scaling technique, where night images and their pseudo-labels for the student are deliberately scaled down. In order to generate accurate pseudo-labels, images to the teacher remain at their full scale. This results in the pseudolabels of larger objects, which are easier to predict, to be scaled down to smaller objects, allowing for an increase in small scale performance of the student. + +Nighttime images suffer from multiple complications not found in daytime scenes such as dark regions, glare, prominent noise, prominent blur, imbalanced lighting, etc. All these cause a problem, since the student, which was trained on daytime images, is much more biased towards the daytime domain's characteristics. To mitigate this problem, we propose NightAug, a set of random nighttime specific augmentations. NightAug includes adding artificial glare, noise, blur, etc. that mimic the night conditions to daytime images. With NightAug we are able to reduce the bias of the student network towards the source data without resulting in adversarial learning or compute-intensive translations. Overall, using 2PCNet, we can see the qualitative improvements of our result in Figure 1. In summary, the contributions of this paper are as follows: + +- We present 2PCNet, a two-phase consistency approach for student-teacher learning. 2PCNet takes advantage of highly confident teacher labels augmented with less confident regions, which are proposed by the scaled student. This strategy produces a sharp reduction of the error propagation in the learning process. +- To address the bias of the student towards the source domain, we propose NightAug, a random night spe + +cific augmentation pipeline to shift the characteristics of daytime images toward nighttime. + +- The effectiveness of our approach has been verified by comparing it with the state-of-the-art domain adaptation approaches. An improvement of $+7.9\mathrm{AP}(+20\%)$ and $+10.2\mathrm{AP}(26\%)$ over the SOTA on BDD100K and SHIFT has been achieved, respectively. + +# 2. Related Work + +Unsupervised Domain Adaptation (UDA) Unsupervised domain adaptation aims to learn transferable features to reduce the discrepancy between a labelled source and unlabelled target domain. Previous works minimised the distance metric (MMD) [16-18] and considered intra-class and inter-class discrepancy [12, 13]. Adversarial feature learning involved adding an adversarial classifier to play the min-max game between the domain discriminator and feature extractors to generate a domain invariant feature map [27, 28, 37]. These methods have been applied to image classification. Our work focuses on object detection, which is more complex as it involves identifying multiple bounding boxes and associated classes in each image. + +UDA for Object Detection Object detection with UDA is a recent challenge due to the complexities of identifying multiple objects in an image. DA-Faster RCNN [3] integrated adversarial learning with image and instance level classifiers, and several approaches have been proposed to improve on this method by introducing scale-awareness [4], class specific discriminators [31], and re-purposing the task-specific classifier as a discriminator [2]. The Mean Teacher (MT) framework [26] has been adopted in semi-supervised methods, such as UMT [7], which incorporates CycleGAN [39] augmented images; AT [15], which combines the student-teacher framework with adversarial learning; and TDD [11], which uses dual student-teacher networks with style transfer. + +Nighttime UDA The majority of research on unsupervised domain adaptation (UDA) in nighttime scenarios has focused on semantic segmentation [5, 8, 9, 14, 23, 29, 33]. Translation and style transformation techniques are commonly used to reduce the domain gap between the source and target domains in these methods [8,29,33]. Some UDA-based techniques for nighttime also utilise paired-images to generate a shared feature space [23], while others use an intermediate domain such as twilight to reduce the domain gap during unsupervised learning [5]. + +Nighttime tracking has also been investigated where adversarial transformers are used to close the domain gap [35]. However, there is a gap in research when it comes to applying UDA techniques in the object detection task for night- + +![](images/d79fc3147efb095c9d9a480464ef2004c703c1cc7d2c0b76ce09ed2f1902d44e.jpg) +Figure 2. Overview of our proposed framework, 2PCNet. 2PCNet consists of: A student network is trained on both the labelled daytime image, which has been augmented with NightAug, and unlabelled nighttime images. A teacher network which is the exponential moving average (EMA) of the student and provides matched pseudo-labels for unsupervised loss. The match pseudo-labels are the predictions of the teacher (phase two) using the RPN proposals of the student, which in turn was guided by the high confidence pseudo-labels of the teacher (phase one). + +time scenarios. Therefore, we explore the application of UDA techniques in object detection under low-light and nighttime conditions. + +# 3. Proposed Method + +Let $\mathbf{D}_s$ be the daytime source data. $\mathbf{D}_s = \{I_s, C_s, B_s\}$ , where the variables refer to the image, class label and bounding-box label, respectively. Index $s$ indicates the daytime source. The night target data is represented by $\mathbf{D}_t$ , where $\mathbf{D}_t = \{I_t\}$ as we do not have the target labels available to us. Index $t$ indicates the nighttime target. + +The architecture of our 2PCNet is shown in Figure 2. Our 2PCNet consists of a student and a teacher network. The student is a multi-domain network trained on both labelled daytime images, augmented with NightAug, and unlabelled nighttime images. The teacher focuses on night images to produce pseudo-labels for the student and is the exponential moving average (EMA) of the student. After an initial pretraining phase, the teacher begins producing pseudo-labels, which allows the student to initialise the feature extractor and detector. + +During each iteration, in phase one of 2PCNet, the teacher produces pseudo-labels from the night images. These pseudo-labels are filtered through a confidence + +threshold. This is to ensure only high-confidence pseudolabels are given to the student. The bounding-boxes from the pseudo-labels are then combined with the region proposals generated by the student's RPN. The merged region proposals are then used to generate predictions from the student's RoI network. In phase two, the teacher utilises the same merged region proposals to generate a matched set of pseudo-labels, where each pseudo-label has its corresponding prediction obtained from the student. + +As mentioned earlier, our student network is initialised by pretraining for a set number of iterations. This is done with supervised loss on the augmented daytime images: + +$$ +L _ {\sup } = L _ {\operatorname {r p n}} \left(B _ {s}, I _ {s}\right) + L _ {\operatorname {r o i}} \left(B _ {s}, C _ {s}, I _ {s}\right), \tag {1} +$$ + +where $L_{\mathrm{rpn}}$ represents the loss from the RPN, which consists of an objectness and bounding-box regression loss. $L_{\mathrm{roi}}$ represents the loss from the detector network, consisting of a classification and bounding-box regression loss. + +Once the pretraining is completed, the student's weights are then transferred over to the teacher. In the succeeding iterations, the teacher's weights are the exponential moving average (EMA) of the student's. The matched pseudo-labels generated by the teacher, $\{C_p^*, B_p^*\}$ , are then used to guide + +![](images/c67cd0ceb3844b100df47930828639c1f30a507ba48e45e37211aff677e01841.jpg) +Figure 3. (Left to Right, Top to Bottom) Ground truth bounding boxes, bounding boxes predicted by the teacher with non-maximal suppression (NMS) and thresholding $(B_{p})$ , bounding boxes predicted by the student $(B_{\mathrm{student}})$ which is guided by $B_{p}$ , and the bounding boxes predicted by the teacher $(B_{p}^{*})$ for the consistency loss. + +![](images/19d2798780ffd2eaa87261a168fdf6db1a82fe1e65293999ac403404e0f935dc.jpg) + +the unsupervised loss, defined as: + +$$ +L _ {\text {u n s u p}} = L _ {\text {r p n}} ^ {\text {o b j}} \left(C _ {p} ^ {*}; I _ {t}\right) + L _ {\text {c o n s}} \left(C _ {p} ^ {*}; I _ {t}\right), \tag {2} +$$ + +where $L_{\mathrm{rpn}}^{\mathrm{obj}}$ is the objectness loss of the RPN and $L_{\mathrm{cons}}$ is the weighted KL-Divergence loss from the predicted outputs which we will further explain in the next section. + +# 3.1. Two-Phase Consistency + +Due to the large domain gap between daytime source images and nighttime target images, the teacher is unable to produce high quality pseudo-labels. This generally occurs in the whole scene, but particularly for regions with strong night characteristics, e.g., low-light, glare, uneven lighting, etc. The teacher produces confident pseudo-labels only for regions that share more similarities to the daytime, since it is biased towards the daytime domain. This bias poses a problem for methods that employ a hard-threshold to filter pseudo-labels for categorical cross-entropy loss [7, 15, 26]. The remaining pseudo-labels contain only easy samples with daytime attributes. Consequently, the student does not learn from harder (e.g. darker) areas. + +As a result of minimal knowledge of the hard samples (i.e., areas with a high level of nighttime attributes), the teacher begins to predict highly confident yet incorrect pseudo-labels. As the teacher provides these incorrect pseudo-labels to the student, a viscous cycle starts where the teacher in turn is updated with incorrect knowledge. Consequently, the error continues to propagate through training. In our case, these errors notably occur in dark/glare regions and as small scale objects. + +To address the problem of error propagation, we design a two-phase approach that combines high confidence + +pseudo-labels together with their less confident counterparts. This combination allows for the high accuracy of confident-labels with the additional knowledge of less confident labels to be distilled onto the student. In phase one, the unlabelled nighttime image, $I_{t}$ , is used as an input for the teacher to generate pseudo-labels. These pseudo-labels are filtered with a threshold to retain only high-confidence pseudo-labels, $(C_p, B_p)$ . The bounding-box of the pseudolabels, $B_{p}$ , is then used as an input to the student. $B_{p}$ is concatenated to the region proposals generated by the student RPN module: + +$$ +P ^ {*} = \operatorname {R P N} _ {\text {s t u d e n t}} \left(I _ {t}\right) \neq B _ {p}, \tag {3} +$$ + +where $P^{*}$ is the combined region proposals, which are then used as an input to the student's RoI module to predict the classes, $C_{\mathrm{student}}$ , and bounding-box, $B_{\mathrm{student}}$ , of each region proposal. + +Phase two begins by using the same combined region proposals, $P^{*}$ , generated in phase one as an input to the teachers RoI module to generate a matched set of pseudolabels: + +$$ +\left\{C _ {p} ^ {*}, B _ {p} ^ {*} \right\} = \operatorname {R o I} _ {\text {t e a c h e r}} \left(P ^ {*}\right). \tag {4} +$$ + +The difference between $C_p$ and $C_p^*$ is that $C_p^*$ is derived from the same region proposals as that of the student predictions $C_{\mathrm{student}}$ . This allows us to compare $C_{\mathrm{student}}$ and $C_p^*$ directly: + +$$ +\begin{array}{l} \left\{C _ {\text {s t u d e n t}} (n), B _ {\text {s t u d e n t}} (n) \right\} = \operatorname {R o I} _ {\text {s t u d e n t}} \left(P ^ {*} (n)\right), \tag {5} \\ \left\{C _ {p} ^ {*} (n), B _ {p} ^ {*} (n) \right\} = \operatorname {R o I} _ {\text {t e a c h e r}} \left(P ^ {*} (n)\right), \\ \end{array} +$$ + +where $n = \{1,2,\dots,N\}$ and $N$ is the number of region proposals in $P^*$ . This operation ensures that the knowledge of highly confident predictions generated by the teacher is distilled through to the student. In addition, information from less confident predictions can also be learnt. However, we are still required to penalise less confident samples and thus employ weighed KL-Divergence to be used as our consistency loss: + +$$ +L _ {\text {c o n s}} = \alpha \operatorname {K L} \left(C _ {\text {s t u d e n t}}, C _ {p} ^ {*}\right), \tag {6} +$$ + +where $\alpha$ is the highest confidence of $C_p^*$ expressed as $\alpha = \max(C_p^*)$ ; KL() is the KL-divergence function. Note that, pseudo-bounding boxes are not used to generate unsupervised loss, as the confidence score of each pseudo-label represents the class information rather than the bounding box. The outputs of each segment of our two-phase approach are shown in Figure 3. + +# 3.2. Student-Scaling + +In our investigation, we have found that scales of objects have a strong influence on object detection at night. This + +Algorithm 1 Single Augmentation - NightAug +imgClean $\leftarrow$ img +if randFloat $\geq 0.5$ then randFloat $\leftarrow 0.8*$ randFloat $+0.2$ img $\leftarrow$ augmentation(img, randval) prob $\leftarrow 0.4$ while randFloat $\geq$ prob do $x\gets$ randInt(img.shape[1],2) $y\gets$ randInt(img.shape[2],2) img[x,y] $\leftarrow$ imgClean[x,y] prob $\leftarrow$ prob +0.1 end while +end if + +is due to the features of smaller objects being easily overwhelmed by glare or noise. To allow the student to overcome this, we apply scaling augmentation to the student's inputs which includes both the image and the pseudo-labels generated by the teacher. As training proceeds, we follow a schedule to increase the scale of the student augmentation until it equals to that of the original image. By iteratively increasing the scale we allow the student to focus on smaller features earlier in the training process. This process encourages the teacher to make more accurate predictions on smaller scale objects in the later stages of training. In turn, accurate small scale pseudo-labels allow for the increase in the scale of the student's inputs with minimal errors due to scale. + +To ensure the knowledge of the previous scales is not forgotten, a gaussian function for the scaling factor is applied. The norm of the Gaussian function is obtained from the schedule values. To prevent additional noise due to pseudo-labels being too small, labels that has an area below a threshold are removed. + +# 3.3. NightAug + +Night images suffer from a range of complications that are not present in daytime scenes. This causes a problem in the student-teacher framework, where the student would be biased towards the source domain. Previous methods have attempted to address this, but have either required compute-intensive translations [7, 11] or adding additional domain classifiers to the framework [15] which complicates training. We propose NightAug, a nighttime specific augmentation pipeline that is compute-light and does not require training. NightAug consists of a series of augmentations with the aim of steering the characteristics of daytime images to resemble that of a nighttime image. + +The defining features of nighttime images are that they are darker and have lower contrast than daytime images. In addition the signal-to-night ratio (SNR) could be higher due to the properties of digital cameras such as luminance and + +![](images/44477bcfe46dd0b404ebc19a5eabcfb95708b6ebf6fb6883592a6a5e4c257b7b.jpg) +Figure 4. NightAug: Original image (top-left) and images with random augmentations from: gaussian blur, gamma correction, brightness, contrast, glare, gaussian noise and random cut-outs. + +![](images/28b82fb10b3a24991f39d715a100a831e46d1756277035c6f05cde59023c911f.jpg) + +colour noise. Glare and glow from street lamps and headlights are also present in nighttime images. Additionally, images may be out-of-focus due to the cameras inability to detect reference points to focus on in dark environments. + +Keeping in mind the properties of nighttime images, our NightAug includes random; brightness, contrast, gamma, gaussian noise, gaussian blur augmentations and random glare insertion. The augmentations are randomly applied to the images and are also random in intensity. This randomness results in a wider variance of images that are exposed to the student leading to more robust training [30]. To further increase the variance of the images, at each augmentation step, random segments of the image will ignore the application of that augmentation. This allows for the representation where different areas of nighttime images may be unevenly lighted. This uneven lighting affects the above characteristics of the local region. + +A single augmentation flow of NightAug is demonstrated in Algorithm 1. Samples of an image processed with NightAug are shown in Figure 4. Each augmentation has a set probability of being applied, with the strength of the augmentation being random. Random regions of the augmented image may then be replaced with that of the original image. The probability of this region replacement reduces with each iteration. + +Overall Loss Our total loss can be represented as: + +$$ +L _ {\text {t o t a l}} = L _ {\sup } + \lambda L _ {\text {u n s u p}}, \tag {7} +$$ + +where $\lambda$ represents a weight factor for the unsupervised loss, and is set experimentally. $L_{\mathrm{sup}}, L_{\mathrm{unsup}}$ refer to Eq. (1) and Eq. (2), respectively. + +
MethodAPPedestrianRiderCarTruckBusMotorcycleBicycleTrafficLightTrafficSign
Lower-Bound41.150.028.966.647.847.532.839.541.056.5
Upper-Bound46.252.135.073.653.554.836.041.852.263.3
DA F-RCNN [3]41.350.430.366.346.848.332.641.441.056.2
TDD [11]34.643.120.768.433.335.616.525.943.159.5
UMT [7]36.246.526.146.844.046.328.240.231.652.7
AT [15]38.542.330.460.848.952.134.542.729.143.9
2PCNet (Ours)46.454.430.873.153.855.237.544.549.465.2
+ +Table 1. Results of day-to-night domain adaptation on the BDD100K dataset, the Average Precision (AP) of all classes are reported. Faster RCNN detector with ResNet-50 feature extractor is used for all experiments to ensure a fair comparison. Faster RCNN is used as the lower-bound and upper-bound and is trained on labelled daytime and nighttime data respectively. The lower-bound provides a baseline without any domain adaptation while the upper-bound is fully supervised, the case where labelled target night data is available. + +
MethodAPcocoCarBusTruck
Lower-Bound22.137.529.830.7
Upper-Bound23.942.033.835.0
FDA [34]22.638.537.223.2
ForkGAN [38]22.941.233.332.1
2PCNet (Ours)23.540.738.235.0
+ +Table 2. Comparison of our framework, 2PCNet, with image-to-image (I2I) translation methods. Conducted on the BDD100K dataset. ForkGan and FDA are used for comparison. Reported $AP_{coco}$ is the averaged AP over IoUs 0.5 to 0.95. + +# 4. Experiments + +# 4.1. Baselines + +To evaluate our method, we compare our approach with SOTA methods in domain adaptation for object detection. These include DA-Faster RCNN [3], TDD [11], UMT [7], AT [15] as well as a non-DA baseline Faster-RCNN [21]. Faster-RCNN is used as both our lower and upper-bound, where it is trained on labelled source and target data respectively. We additionally compare our approach with image-to-image translation methods, ForkGAN [38] and FDA [34]. Translation methods are trained on Faster RCNN with both the daytime and translated images. + +# 4.2. Datasets + +The majority of existing nighttime datasets either focuses on semantic segmentation which do not provide labels for object detection [5, 23, 24], or contains very few classes [19, 20]. BDD100K [36] was selected as it provides object detection labels which includes a wide range of classes (10). It also has a large number of images compared to other DA datasets covering daytime, nighttime and other adverse conditions. + +The SHIFT [25] dataset is a recent simulated driving dataset that contains scenes in various environments. A continuous shift of these environments is available. SHIFT contains 6 class labels that share similarities to the BDD100K classes. For our evaluation, we use images with the 'day' and 'night' label as our source and target data respectively. We further ensure that the weather tag is 'clear' to isolate other weather conditions from the evaluation. + +# 4.3. Implementation + +Following previous SOTA methods, we employ Faster-RCNN [21] as our base detection model and ResNet-50 [10] pretrained on ImageNet [6] as our feature extractor. All images are scaled by resizing its shorter side to 600 pixels. For student-scaling we set a schedule for (0.57, 0.64, 0.71, 0.78, 0.85, 0.92) of the maximum iterations at scales (0.5, 0.6, 0.7, 0.8, 0.9, 1.0). Loss hyperparameters are set at $\lambda = 0.3$ and the rate smooth coefficient parameter of the EMA is 0.9996. A confidence threshold of 0.8 for phase one of Two-Phase Consistency. For the initial pretraining of the student model, we train the student for 50k and 20k iterations on the source images, for BDD100K and SHIFT respectively. Supervised inputs are daytime images with and without NightAug. We then copy the weights to the teacher and continue training with the addition of unsupervised loss for an additional 50k iterations. The learning rate is kept at 0.04 throughout training. Our network is trained on 3 RTX3090 GPUs with a batch-size of 6 source and 6 target images. + +# 4.4. Comparison to SOTA + +Comparison on BDD100K We compare our method against the SOTA on real driving scenes and evaluating their domain adaptation performance on nighttime images, the results of this experiment can be seen on Table 1. The results show that our method achieves the highest perfor + +![](images/260a68fcdf8a6dfda8ed4d951a9e734559f34114dc70c808af48e92e2eeabd0c.jpg) +Figure 5. Qualitative results of Faster RCNN, Adaptive Teacher (AT) and our method on the SHIFT dataset with the ground-truth on the far right. We can observe that Faster RCNN is not able to detect objects due to absence of domain adaptation, while AT has a large number of small false positive bounding boxes compared to our method which closely resembles that of the ground-truth. + +![](images/fa1c9a5062df328949db62faac6c53ac246b0a74b845931868e4ae6e10da8b1d.jpg) + +![](images/118d34ec49e84b21fdf24b602e2d317adbad87c2f07af0c8f63bcb2c7fe80499.jpg) + +![](images/85899240049ff1fc50d9929c44f154e9f808a51c616d22cd1432f4ba874464f8.jpg) + +![](images/52216da15ee92e9d03f9f345c2ca5f962ae1e5bd73a17eec5bd7ac074b3d2650.jpg) + +
MethodAPPer.CarTruckBusMcy.Bcy.
Lower-Bound41.640.444.549.953.714.346.7
Upper-Bound47.049.751.556.053.619.252.4
DA FR [3]43.743.048.847.852.119.955.8
UMT [7]31.17.747.518.446.816.649.2
AT [15]38.925.833.054.749.520.752.3
2PCNet (Ours)49.151.454.654.856.623.954.2
+ +Table 3. Results of Day-to-Night domain adaptation on the SHIFT dataset. The Average Precision (AP) of all classes. Faster RCNN is used as the lower-bound and upper-bound and is trained on labelled daytime and nighttime data respectively. + +mance with an AP of 46.4. $20.5\%$ higher than that of the SOTA student-teacher methods and above that of the upper-bound. We have observed in experiments that student-teacher methods underperforms with an AP below that of the lower-bound due to the error-propagation from noisy pseudo-labels. The result of the error is small false positive detections as seen in Figure 1. Our method does not suffer from the same allowing for higher performance. We can also observe that our method performs well across all classes. Even when compared with the upper-bound, 2PC-Net achieves higher AP on the majority of classes. This indicates that our method is able to generalise well across large and small classes. + +The comparison with image-to-image translation methods is shown in Table 2. Translation methods do not suffer from the error propagation problem as it is trained on Faster RCNN without a teacher. Even so, we can see that our method outperforms SOTA adverse vision translation + +methods. + +Comparison on SHIFT To further compare our method with SOTA we evaluate on the SHIFT simulation dataset. Due to the nature of the simulated data, many nighttime image characteristics that we have previously mention is not exhibited in this data such as blurriness, noise and glare. + +The results of this experiments are shown in Table 3. We can observe that previous SOTA methods that use the student-teacher framework perform worse than the lower-bound. The sub-par performance is again due to the error-propagation problem. AT performs better than UMT due to ATs inclusion of adversarial learning. However, adversarial learning is not enough to mitigate this problem. We can see that the performance of DA FRCNN outperforms both the SOTA student-teacher methods as it would not be affected by error-propagation. It is however, still largely below the upper-bound performance. 2PCNet outperforms these previous methods as well as the upperbound. We achieve an improvement of $+10.2$ AP over previous SOTA student-teacher methods and $+2.1$ AP over that of the upper-bound. + +# 4.5. Ablation Studies + +To demonstrate the effectiveness of each of our components, we train several models for 100K iterations and evaluate them on the BDD100K dataset. We present our findings in Table 4. + +Two-Phase Consistency We can observe in Table 4 that the addition of Two-Phase Consistency (C) demonstrated a wide performance gap when compared to the Mean-Teacher baseline, +13.5 AP (43%). This improvement in AP ex + +![](images/808250427ec14cf82c8ac96e883ffd23fd2c6af9f58f5aeface3c8be797b87c7.jpg) +Figure 6. Training curve on BDD100K dataset ablation study. We show the overall AP training curve as well as the AP of large, medium and small objects. MT represents the base Mean Teacher framework. It can be seen that at all scales, the absence of Two-Phase Consistency (C) results in a sharp drop during training. We can also see that with the inclusion of NightAug (NA) and student-scaling (SS) the gradient of the curve increases. We note that the inclusion of a domain classifier (DC) reduces the performance at all scales. + +![](images/b6705329c2169a0416d90e84e815498b1f0bfb53c6481a403cfb48ec48081d07.jpg) + +![](images/35530a358b9d008ad8b061628ac5c0ae9f9d1b58dc0f9a7ef67087c3e74bd327.jpg) + +![](images/6e1f6579437a259fb07456dba92adee7672ddce723d1501354e0dc90cdc44b74.jpg) + +ists across large, medium and small objects. While the performance of MT is initially strong, it rapidly begins to decline; which can be observed in Figure 6. This drop in performance is due to the error propagation of noisy pseudolabels. The experimental results show that Two-Phase Consistency is able to provide a solution. This ensures that highly confident pseudo-labels are bounded by less confident pseudo-label enabling a balance of knowledge into the student. + +NightAug We benched marked the effectiveness of NightAug in our framework as shown in Table 4. The inclusion of NightAug increases the detection performance of small objects with an increase of $5\%$ . Additionally, the gradient of the training performance remains steep as seen in Figure 6. The positive gradient is displayed most strongly for APm and APs where objects are more prone to nighttime specific complications. + +Student-Scaling Our final component, student-scaling, is included into the framework and the results can be seen in Table 4. We can observe that student-scaling is able to boost the performance of small object detection by $6\%$ . This boost in performance is due to the student network focusing on smaller object earlier in the training process. We note that the performance of large objects have dropped by $1 - 2\%$ ; however when referring to the training curves in Figure 6, API remains steep. As the initial focus is on smaller objects, less time is allocated to larger objects during training. This can be mitigated by lengthening training resulting in more iterations for larger objects. + +Domain Classifier To conclude our study, we included a domain classifier into our network. Adversarial learning is a widely used DA technique; however when added into 2PCNet, a performance drop across all scales can be seen. This drop is shown in Table 4. The suppression of nighttime features is suspected to be the cause. Suppression is present as the adversarial loss guides the feature extractor to maintain domain invariance. By suppressing nighttime fea + +
Methods
CNASSDCAPAPIAPmAPs
46.441.725.89.1
44.541.625.08.3
45.842.225.78.6
45.242.925.78.2
31.730.416.54.8
+ +Table 4. Ablation studies on the BDD100K dataset. The last row represents the base Mean-Teacher network. Methods are referred to as, C: Two-Phase Consistency, NA: NightAug, SS: StudentScaling, DC: Domain Classifier. API, APm, and APs represent the AP of large, medium and small objects respectively. + +tures, the teacher has less information to distil to the student. This is demonstrated in Figure 6 where the domain classifier (dotted purple) initially performs well. But as training continues, our method (solid red) is able to surpass its performance. + +# 5. Conclusion + +Our proposed framework, 2PCNet, presents a novel solution to the challenges of day-to-night domain adaptive object detection. With our Two-Phase Consistency approach, we are able to effectively leverage high and low confidence knowledge for the student, while mitigating error propagation commonly present in previous student-teacher methods. We further address issues arising from small scale and dark objects through the use of student-scaling and NightAug, respectively. Experimental results on the e BDD100K [36] and SHIFT [25] datasets demonstrate that 2PCNet outperforms existing state-of-the-art methods. Overall, our proposed framework provides an effective and efficient solution for day-to-night domain adaptive object detection. + +Acknowledgements This work is partially supported by MOE2019-T2-1-130. + +# References + +[1] Qi Cai, Yingwei Pan, Chong-Wah Ngo, Xinmei Tian, Lingyu Duan, and Ting Yao. Exploring object relation in mean teacher for cross-domain detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11449-11458, 2019. 1 +[2] Lin Chen, Huaian Chen, Zhixiang Wei, Xin Jin, Xiao Tan, Yi Jin, and Enhong Chen. Reusing the task-specific classifier as a discriminator: Discriminator-free adversarial domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7171-7180, 2022. 2 +[3] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain adaptive faster r-cnn for object detection in the wild. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3339-3348, 2018. 1, 2, 6, 7 +[4] Yuhua Chen, Haoran Wang, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Scale-aware domain adaptive faster r-cnn. International Journal of Computer Vision, page 2223-2243, 2021. 2 +[5] Dengxin Dai and Luc Van Gool. Dark model adaptation: Semantic image segmentation from daytime to nighttime. In International Conference on Intelligent Transportation Systems (ITSC), pages 3819-3824, 2018. 2, 6 +[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255, 2009. 6 +[7] Jinhong Deng, Wen Li, Yuhua Chen, and Lixin Duan. Unbiased mean teacher for cross-domain object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4089-4099, 2021. 1, 2, 4, 5, 6, 7 +[8] Xueqing Deng, Peng Wang, Xiaochen Lian, and Shawn Newsam. NightLab: A Dual-Level Architecture With Hardness Detection for Segmentation at Night. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16938-16948, 2022. 2 +[9] Huan Gao, Jichang Guo, Guoli Wang, and Qian Zhang. Cross-Domain Correlation Distillation for Unsupervised Domain Adaptation in Nighttime Semantic Segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9913-9923, 2022. 2 +[10] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016. 6 +[11] Mengzhe He, Yali Wang, Jiaxi Wu, Yiru Wang, Hanqing Li, Bo Li, Weihao Gan, Wei Wu, and Yu Qiao. Cross domain object detection by target-perceived dual branch distillation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9560-9570, 2022. 1, 2, 5, 6 +[12] Guoliang Kang, Lu Jiang, Yunchao Wei, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for single- and multi-source domain adaptation. IEEE Transactions on Pattern Analysis amp; Machine Intelligence, pages 1793–1804, 2022. 2 + +[13] Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for unsupervised domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4888-4897, 2019. 2 +[14] Attila Lengyel, Sourav Garg, Michael Milford, and Jan C. van Gemert. Zero-shot day-night domain adaptation with a physics prior. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 4379-4389, 2021. 2 +[15] Yu-Jhe Li, Xiaoliang Dai, Chih-Yao Ma, Yen-Cheng Liu, Kan Chen, Bichen Wu, Zijian He, Kris Kitani, and Peter Vajda. Cross-domain adaptive teacher for object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7571-7580, 2022. 1, 2, 4, 5, 6, 7 +[16] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael I. Jordan. Learning transferable features with deep adaptation networks. In International Conference on International Conference on Machine Learning, page 97-105, 2015. 2 +[17] Mingsheng Long, Han Zhu, Jianmin Wang, and Michael I. Jordan. Unsupervised domain adaptation with residual transfer networks. In International Conference on Neural Information Processing Systems, page 136-144, 2016. 2 +[18] Mingsheng Long, Han Zhu, Jianmin Wang, and Michael I. Jordan. Deep transfer learning with joint adaptation networks. In International Conference on Machine Learning, page 2208-2217, 2017. 2 +[19] Igor Morawski, Yu-An Chen, Yu-Sheng Lin, and Winston H. Hsu. Nod: Taking a closer look at detection under extreme low-light conditions with night object detection dataset. In British Machine Vision Conference, (BMVC), 2021. 6 +[20] Lukás Neumann, Michelle Karg, Shanshan Zhang, Christian Scharfenberger, Ericiegert, Sarah Mistr, Olga Prokofyeva, Robert Thiel, Andrea Vedaldi, Andrew Zisserman, and Bernt Schiele. Nightowls: A pedestrians at night dataset. In Asian Conference on Computer Vision (ACCV), pages 691-705, 2018. 6 +[21] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In International Conference on Neural Information Processing Systems, page 91-99, 2015. 6 +[22] Kuniaki Saito, Yoshitaka Ushiku, Tatsuya Harada, and Kate Saenko. Strong-weak distribution alignment for adaptive object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6949–6958, 2019. 1 +[23] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Guided curriculum model adaptation and uncertainty-aware evaluation for semantic nighttime image segmentation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 7373-7382, 2019. 2, 6 +[24] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Acdc: The adverse conditions dataset with correspondences for semantic driving scene understanding. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 10745-10755, 2021. 6 +[25] Tao Sun, Mattia Segu, Janis Postels, Yuxuan Wang, Luc Van Gool, Bernt Schiele, Federico Tombari, and Fisher Yu. Shift: + +A synthetic driving dataset for continuous multi-task domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 21339-21350, 2022. 6, 8 +[26] Antti Tarvainen and Harri Valpola. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In International Conference on Neural Information Processing Systems, page 1195–1204, 2017. 2, 4 +[27] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2962-2971, 2017. 2 +[28] Sinan Wang, Xinyang Chen, Yunbo Wang, Mingsheng Long, and Jianmin Wang. Progressive adversarial networks for fine-grained domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9210-9219, 2020. 2 +[29] Xinyi Wu, Zhenyao Wu, Hao Guo, Lili Ju, and Song Wang. DANNet: A One-Stage Domain Adaptation Network for Unsupervised Nighttime Semantic Segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15769–15778, 2021. 2 +[30] Qizhe Xie, Minh-Thang Luong, Eduard Hovy, and Quoc V. Le. Self-training with noisy student improves imagenet classification. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2020. 5 +[31] Chang-Dong Xu, Xingjie Zhao, Xin Jin, and Xiu-Shen Wei. Exploring categorical regularization for domain adaptive object detection. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11721-11730, 2020. 2 +[32] Minghao Xu, Hang Wang, Bingbing Ni, Qi Tian, and Wenjun Zhang. Cross-domain detection via graph-induced prototype alignment. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12352-12361, 2020. 1 +[33] Qi Xu, Yinan Ma, Jing Wu, Chengnian Long, and Xiaolin Huang. CDAda: A Curriculum Domain Adaptation for Nighttime Semantic Segmentation. In IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), pages 2962-2971, 2021. 2 +[34] Yanchao Yang and Stefano Soatto. FDA: Fourier domain adaptation for semantic segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4084-4094, 2020. 6 +[35] Junjie Ye, Changhong Fu, Guangze Zheng, Danda Pani Paudel, and Guang Chen. Unsupervised Domain Adaptation for Nighttime Aerial Tracking. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8896-8905, 2022. 2 +[36] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darrell. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2633-2642, 2020. 1, 6, 8 + +[37] Weichen Zhang, Wanli Ouyang, Wen Li, and Dong Xu. Collaborative and adversarial network for unsupervised domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3801-3809, 2018. 2 +[38] Ziqiang Zheng, Yang Wu, Xinran Nicole Han, and Jianbo Shi. Forkgan: Seeing into the rainy night. In European Conference on Computer Vision (ECCV), 2020. 6 +[39] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 2242-2251, 2017. 2 \ No newline at end of file diff --git a/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/images.zip b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..9fc9735a63308a52b5ebba4ac6e8349c9f9c1a91 --- /dev/null +++ b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e4c69267e408322b621e0cd75a0e75d498d76cf6fc69438c87320b387844b03 +size 525176 diff --git a/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/layout.json b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f0c6010293c1e8c7e6a8fd7d870afead165fe5f2 --- /dev/null +++ b/2023/2PCNet_ Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection/layout.json @@ -0,0 +1,7671 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 114, + 103, + 480, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 103, + 480, + 140 + ], + "spans": [ + { + "bbox": [ + 114, + 103, + 480, + 140 + ], + "type": "text", + "content": "2PCNet: Two-Phase Consistency Training for Day-to-Night Unsupervised Domain Adaptive Object Detection" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "spans": [ + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "text", + "content": "Mikhail Kennerley" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "text", + "content": ", Jian-Gang Wang" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "text", + "content": ", Bharadwaj Veeravalli" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "text", + "content": ", and Robby T. Tan" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "text", + "content": "National University of Singapore, Department of Electrical and Computer Engineering \n" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 70, + 170, + 525, + 224 + ], + "type": "text", + "content": "Institute for Infocomm Research, A*STAR \nmikhailk@u.nus.edu, jgwang@i2r.a-star.edu.sg, elebv@nus.edu.sg, robby.tan@nus.edu.sg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 258, + 192, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 258, + 192, + 272 + ], + "spans": [ + { + "bbox": [ + 143, + 258, + 192, + 272 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "spans": [ + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "text", + "content": "Object detection at night is a challenging problem due to the absence of night image annotations. Despite several domain adaptation methods, achieving high-precision results remains an issue. False-positive error propagation is still observed in methods using the well-established student-teacher framework, particularly for small-scale and low-light objects. This paper proposes a two-phase consistency unsupervised domain adaptation network, 2PCNet, to address these issues. The network employs high-confidence bounding-box predictions from the teacher in the first phase and appends them to the student's region proposals for the teacher to re-evaluate in the second phase, resulting in a combination of high and low confidence pseudo-labels. The night images and pseudo-labels are scaled-down before being used as input to the student, providing stronger small-scale pseudo-labels. To address errors that arise from low-light regions and other night-related attributes in images, we propose a night-specific augmentation pipeline called NightAug. This pipeline involves applying random augmentations, such as glare, blur, and noise, to daytime images. Experiments on publicly available datasets demonstrate that our method achieves superior results to state-of-the-art methods by " + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 47, + 284, + 290, + 571 + ], + "type": "text", + "content": ", and to supervised models trained directly on the target data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 592, + 128, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 592, + 128, + 605 + ], + "spans": [ + { + "bbox": [ + 47, + 592, + 128, + 605 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 613, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 613, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 613, + 287, + 696 + ], + "type": "text", + "content": "Nighttime object detection is critical in many applications. However, the requirement of annotated data by supervised methods is impractical, since night data with annotations is few, and supervised methods are generally prone to overfitting to the training data. Among other reasons, this scarcity is due to poor lighting conditions which makes nighttime images hard to annotate. Hence, methods that" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 259, + 426, + 325 + ], + "blocks": [ + { + "bbox": [ + 310, + 259, + 426, + 325 + ], + "lines": [ + { + "bbox": [ + 310, + 259, + 426, + 325 + ], + "spans": [ + { + "bbox": [ + 310, + 259, + 426, + 325 + ], + "type": "image", + "image_path": "5263fe911ce1f0a867c50db1a23864721ae578206f0864dbf948d76be7debed0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 325, + 402, + 334 + ], + "lines": [ + { + "bbox": [ + 336, + 325, + 402, + 334 + ], + "spans": [ + { + "bbox": [ + 336, + 325, + 402, + 334 + ], + "type": "text", + "content": "DA Faster-RCNN" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 427, + 260, + 542, + 325 + ], + "blocks": [ + { + "bbox": [ + 427, + 260, + 542, + 325 + ], + "lines": [ + { + "bbox": [ + 427, + 260, + 542, + 325 + ], + "spans": [ + { + "bbox": [ + 427, + 260, + 542, + 325 + ], + "type": "image", + "image_path": "2e63e3fe63bff5506f7606feab4b1ad88b1cbdc1709be968f5f988018d6f6771.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 474, + 325, + 493, + 334 + ], + "lines": [ + { + "bbox": [ + 474, + 325, + 493, + 334 + ], + "spans": [ + { + "bbox": [ + 474, + 325, + 493, + 334 + ], + "type": "text", + "content": "UMT" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 311, + 336, + 426, + 403 + ], + "blocks": [ + { + "bbox": [ + 311, + 336, + 426, + 403 + ], + "lines": [ + { + "bbox": [ + 311, + 336, + 426, + 403 + ], + "spans": [ + { + "bbox": [ + 311, + 336, + 426, + 403 + ], + "type": "image", + "image_path": "350b7bd91271c7942627218e0872edb02ac1007c41555ba9accbebbb4a8b99d5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 403, + 373, + 411 + ], + "lines": [ + { + "bbox": [ + 364, + 403, + 373, + 411 + ], + "spans": [ + { + "bbox": [ + 364, + 403, + 373, + 411 + ], + "type": "text", + "content": "AT" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 422, + 545, + 478 + ], + "lines": [ + { + "bbox": [ + 306, + 422, + 545, + 478 + ], + "spans": [ + { + "bbox": [ + 306, + 422, + 545, + 478 + ], + "type": "text", + "content": "Figure 1. Qualitative results of state-of-the-art DA methods, DA Faster-RCNN [3], UMT [7], Adaptive Teacher (AT) [15] and our method 2PCNet on the BDD100K [36] dataset. Unlike the SOTA methods, our method is able to detect dark and small scale objects with minimal additional false positive predictions." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 427, + 336, + 542, + 403 + ], + "blocks": [ + { + "bbox": [ + 427, + 336, + 542, + 403 + ], + "lines": [ + { + "bbox": [ + 427, + 336, + 542, + 403 + ], + "spans": [ + { + "bbox": [ + 427, + 336, + 542, + 403 + ], + "type": "image", + "image_path": "53f055484860341fb6f8bbfbf09bbdc6bb26422bdf984c4ac17b1fca1945835d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 403, + 512, + 412 + ], + "lines": [ + { + "bbox": [ + 454, + 403, + 512, + 412 + ], + "spans": [ + { + "bbox": [ + 454, + 403, + 512, + 412 + ], + "type": "text", + "content": "2PCNet (Ours)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 506, + 545, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 545, + 555 + ], + "type": "text", + "content": "do not assume the availability of the annotations are more advantageous. Domain adaptation (DA) is an efficient solution to this problem by allowing the use of readily available annotated source daytime datasets." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "content": "A few domain adaptation methods have been proposed, e.g., adversarial learning which uses image and instance level classifiers [3] and similar concepts [22, 32]. However, these methods isolate the domain adaptation task purely towards the feature extractor, and suppress features of the target data for the sake of domain invariance. Recent unsupervised domain adaptation methods exploit the studentteacher framework (e.g. [1,7,11,15]). Since the student initially learns from the supervised loss, there is a bias towards the source data. Augmentation [7, 11] and adversarial learning [15] have been proposed to address this problem. Unfortunately, particularly for day-to-night unsupervised domain adaptation, these methods suffer from a large num" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 208, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 208, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 208, + 712 + ], + "type": "text", + "content": "1www.github.com/mercarill/2pcnet" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "11484" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": "ber of inaccurate pseudo-labels produced by the teacher. In our investigation, the problem is notably due to insufficient knowledge of small scale features in the nighttime domain, which are then propagated through the learning process between the teacher and student, resulting in poor object detection performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 144, + 288, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 288, + 312 + ], + "type": "text", + "content": "To address the problem, in this paper, we present 2PC-Net, a two-phase consistency unsupervised domain adaptation network for nighttime object detection. Our 2PCNet merges the bounding-boxes of highly-confident pseudolabels, which are predicted in phase one, together with regions proposed by the student's region proposal network (RPN). The merged proposals are then used by the teacher to generate a new set of pseudo-labels in phase two. This provides a combination of high and low confidence pseudolabels. These pseudo-labels are then matched with predictions generated by the student. We can then utilise a weighted consistency loss to ensure that a higher weightage of our unsupervised loss is based on stronger pseudo-labels, yet allow for weaker pseudo-labels to influence the training." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 313, + 287, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 313, + 287, + 420 + ], + "spans": [ + { + "bbox": [ + 46, + 313, + 287, + 420 + ], + "type": "text", + "content": "Equipped with this two-phase strategy, we address the problem of errors from small-scale objects. We devise a student-scaling technique, where night images and their pseudo-labels for the student are deliberately scaled down. In order to generate accurate pseudo-labels, images to the teacher remain at their full scale. This results in the pseudolabels of larger objects, which are easier to predict, to be scaled down to smaller objects, allowing for an increase in small scale performance of the student." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 422, + 288, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 422, + 288, + 601 + ], + "spans": [ + { + "bbox": [ + 46, + 422, + 288, + 601 + ], + "type": "text", + "content": "Nighttime images suffer from multiple complications not found in daytime scenes such as dark regions, glare, prominent noise, prominent blur, imbalanced lighting, etc. All these cause a problem, since the student, which was trained on daytime images, is much more biased towards the daytime domain's characteristics. To mitigate this problem, we propose NightAug, a set of random nighttime specific augmentations. NightAug includes adding artificial glare, noise, blur, etc. that mimic the night conditions to daytime images. With NightAug we are able to reduce the bias of the student network towards the source data without resulting in adversarial learning or compute-intensive translations. Overall, using 2PCNet, we can see the qualitative improvements of our result in Figure 1. In summary, the contributions of this paper are as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 611, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 58, + 611, + 287, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 611, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 58, + 611, + 287, + 684 + ], + "type": "text", + "content": "- We present 2PCNet, a two-phase consistency approach for student-teacher learning. 2PCNet takes advantage of highly confident teacher labels augmented with less confident regions, which are proposed by the scaled student. This strategy produces a sharp reduction of the error propagation in the learning process." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 58, + 689, + 287, + 714 + ], + "type": "text", + "content": "- To address the bias of the student towards the source domain, we propose NightAug, a random night spe" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 324, + 72, + 545, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 324, + 72, + 545, + 96 + ], + "type": "text", + "content": "cific augmentation pipeline to shift the characteristics of daytime images toward nighttime." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 102, + 547, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 102, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 317, + 102, + 547, + 162 + ], + "type": "text", + "content": "- The effectiveness of our approach has been verified by comparing it with the state-of-the-art domain adaptation approaches. An improvement of " + }, + { + "bbox": [ + 317, + 102, + 547, + 162 + ], + "type": "inline_equation", + "content": "+7.9\\mathrm{AP}(+20\\%)" + }, + { + "bbox": [ + 317, + 102, + 547, + 162 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 317, + 102, + 547, + 162 + ], + "type": "inline_equation", + "content": "+10.2\\mathrm{AP}(26\\%)" + }, + { + "bbox": [ + 317, + 102, + 547, + 162 + ], + "type": "text", + "content": " over the SOTA on BDD100K and SHIFT has been achieved, respectively." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 180, + 392, + 193 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 180, + 392, + 193 + ], + "spans": [ + { + "bbox": [ + 306, + 180, + 392, + 193 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 201, + 545, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 201, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 201, + 545, + 357 + ], + "type": "text", + "content": "Unsupervised Domain Adaptation (UDA) Unsupervised domain adaptation aims to learn transferable features to reduce the discrepancy between a labelled source and unlabelled target domain. Previous works minimised the distance metric (MMD) [16-18] and considered intra-class and inter-class discrepancy [12, 13]. Adversarial feature learning involved adding an adversarial classifier to play the min-max game between the domain discriminator and feature extractors to generate a domain invariant feature map [27, 28, 37]. These methods have been applied to image classification. Our work focuses on object detection, which is more complex as it involves identifying multiple bounding boxes and associated classes in each image." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 362, + 545, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 362, + 545, + 531 + ], + "spans": [ + { + "bbox": [ + 304, + 362, + 545, + 531 + ], + "type": "text", + "content": "UDA for Object Detection Object detection with UDA is a recent challenge due to the complexities of identifying multiple objects in an image. DA-Faster RCNN [3] integrated adversarial learning with image and instance level classifiers, and several approaches have been proposed to improve on this method by introducing scale-awareness [4], class specific discriminators [31], and re-purposing the task-specific classifier as a discriminator [2]. The Mean Teacher (MT) framework [26] has been adopted in semi-supervised methods, such as UMT [7], which incorporates CycleGAN [39] augmented images; AT [15], which combines the student-teacher framework with adversarial learning; and TDD [11], which uses dual student-teacher networks with style transfer." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 545, + 545, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 666 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 666 + ], + "type": "text", + "content": "Nighttime UDA The majority of research on unsupervised domain adaptation (UDA) in nighttime scenarios has focused on semantic segmentation [5, 8, 9, 14, 23, 29, 33]. Translation and style transformation techniques are commonly used to reduce the domain gap between the source and target domains in these methods [8,29,33]. Some UDA-based techniques for nighttime also utilise paired-images to generate a shared feature space [23], while others use an intermediate domain such as twilight to reduce the domain gap during unsupervised learning [5]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 666, + 545, + 714 + ], + "type": "text", + "content": "Nighttime tracking has also been investigated where adversarial transformers are used to close the domain gap [35]. However, there is a gap in research when it comes to applying UDA techniques in the object detection task for night-" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11485" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 70, + 545, + 327 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 545, + 327 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 545, + 327 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 545, + 327 + ], + "type": "image", + "image_path": "d79fc3147efb095c9d9a480464ef2004c703c1cc7d2c0b76ce09ed2f1902d44e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 335, + 546, + 392 + ], + "lines": [ + { + "bbox": [ + 45, + 335, + 546, + 392 + ], + "spans": [ + { + "bbox": [ + 45, + 335, + 546, + 392 + ], + "type": "text", + "content": "Figure 2. Overview of our proposed framework, 2PCNet. 2PCNet consists of: A student network is trained on both the labelled daytime image, which has been augmented with NightAug, and unlabelled nighttime images. A teacher network which is the exponential moving average (EMA) of the student and provides matched pseudo-labels for unsupervised loss. The match pseudo-labels are the predictions of the teacher (phase two) using the RPN proposals of the student, which in turn was guided by the high confidence pseudo-labels of the teacher (phase one)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 412, + 288, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 412, + 288, + 448 + ], + "spans": [ + { + "bbox": [ + 46, + 412, + 288, + 448 + ], + "type": "text", + "content": "time scenarios. Therefore, we explore the application of UDA techniques in object detection under low-light and nighttime conditions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 462, + 153, + 476 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 462, + 153, + 476 + ], + "spans": [ + { + "bbox": [ + 47, + 462, + 153, + 476 + ], + "type": "text", + "content": "3. Proposed Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_s" + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "content": " be the daytime source data. " + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_s = \\{I_s, C_s, B_s\\}" + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "content": ", where the variables refer to the image, class label and bounding-box label, respectively. Index " + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "content": " indicates the daytime source. The night target data is represented by " + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_t" + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_t = \\{I_t\\}" + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "content": " as we do not have the target labels available to us. Index " + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 483, + 287, + 555 + ], + "type": "text", + "content": " indicates the nighttime target." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 556, + 288, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 556, + 288, + 675 + ], + "spans": [ + { + "bbox": [ + 46, + 556, + 288, + 675 + ], + "type": "text", + "content": "The architecture of our 2PCNet is shown in Figure 2. Our 2PCNet consists of a student and a teacher network. The student is a multi-domain network trained on both labelled daytime images, augmented with NightAug, and unlabelled nighttime images. The teacher focuses on night images to produce pseudo-labels for the student and is the exponential moving average (EMA) of the student. After an initial pretraining phase, the teacher begins producing pseudo-labels, which allows the student to initialise the feature extractor and detector." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "During each iteration, in phase one of 2PCNet, the teacher produces pseudo-labels from the night images. These pseudo-labels are filtered through a confidence" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 412, + 546, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 546, + 519 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 546, + 519 + ], + "type": "text", + "content": "threshold. This is to ensure only high-confidence pseudolabels are given to the student. The bounding-boxes from the pseudo-labels are then combined with the region proposals generated by the student's RPN. The merged region proposals are then used to generate predictions from the student's RoI network. In phase two, the teacher utilises the same merged region proposals to generate a matched set of pseudo-labels, where each pseudo-label has its corresponding prediction obtained from the student." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 521, + 545, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 559 + ], + "type": "text", + "content": "As mentioned earlier, our student network is initialised by pretraining for a set number of iterations. This is done with supervised loss on the augmented daytime images:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 341, + 574, + 545, + 588 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 574, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 341, + 574, + 545, + 588 + ], + "type": "interline_equation", + "content": "L _ {\\sup } = L _ {\\operatorname {r p n}} \\left(B _ {s}, I _ {s}\\right) + L _ {\\operatorname {r o i}} \\left(B _ {s}, C _ {s}, I _ {s}\\right), \\tag {1}", + "image_path": "306a9504eb926626003e71ba6a6b5fe2bd2c5df59723bc7b7dfdde4dbf2613e6.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 603, + 545, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 603, + 545, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 603, + 545, + 651 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 603, + 545, + 651 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{rpn}}" + }, + { + "bbox": [ + 304, + 603, + 545, + 651 + ], + "type": "text", + "content": " represents the loss from the RPN, which consists of an objectness and bounding-box regression loss. " + }, + { + "bbox": [ + 304, + 603, + 545, + 651 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{roi}}" + }, + { + "bbox": [ + 304, + 603, + 545, + 651 + ], + "type": "text", + "content": " represents the loss from the detector network, consisting of a classification and bounding-box regression loss." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "content": "Once the pretraining is completed, the student's weights are then transferred over to the teacher. In the succeeding iterations, the teacher's weights are the exponential moving average (EMA) of the student's. The matched pseudo-labels generated by the teacher, " + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\{C_p^*, B_p^*\\}" + }, + { + "bbox": [ + 304, + 654, + 545, + 715 + ], + "type": "text", + "content": ", are then used to guide" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11486" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 72, + 173, + 212 + ], + "blocks": [ + { + "bbox": [ + 51, + 72, + 173, + 212 + ], + "lines": [ + { + "bbox": [ + 51, + 72, + 173, + 212 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 173, + 212 + ], + "type": "image", + "image_path": "c67cd0ceb3844b100df47930828639c1f30a507ba48e45e37211aff677e01841.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "lines": [ + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "spans": [ + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "type": "text", + "content": "Figure 3. (Left to Right, Top to Bottom) Ground truth bounding boxes, bounding boxes predicted by the teacher with non-maximal suppression (NMS) and thresholding " + }, + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "type": "inline_equation", + "content": "(B_{p})" + }, + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "type": "text", + "content": ", bounding boxes predicted by the student " + }, + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "type": "inline_equation", + "content": "(B_{\\mathrm{student}})" + }, + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "type": "text", + "content": " which is guided by " + }, + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "type": "inline_equation", + "content": "B_{p}" + }, + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "type": "text", + "content": ", and the bounding boxes predicted by the teacher " + }, + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "type": "inline_equation", + "content": "(B_{p}^{*})" + }, + { + "bbox": [ + 46, + 223, + 287, + 288 + ], + "type": "text", + "content": " for the consistency loss." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 173, + 72, + 295, + 212 + ], + "blocks": [ + { + "bbox": [ + 173, + 72, + 295, + 212 + ], + "lines": [ + { + "bbox": [ + 173, + 72, + 295, + 212 + ], + "spans": [ + { + "bbox": [ + 173, + 72, + 295, + 212 + ], + "type": "image", + "image_path": "19d2798780ffd2eaa87261a168fdf6db1a82fe1e65293999ac403404e0f935dc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 312, + 183, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 312, + 183, + 323 + ], + "spans": [ + { + "bbox": [ + 47, + 312, + 183, + 323 + ], + "type": "text", + "content": "the unsupervised loss, defined as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 83, + 335, + 287, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 335, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 83, + 335, + 287, + 350 + ], + "type": "interline_equation", + "content": "L _ {\\text {u n s u p}} = L _ {\\text {r p n}} ^ {\\text {o b j}} \\left(C _ {p} ^ {*}; I _ {t}\\right) + L _ {\\text {c o n s}} \\left(C _ {p} ^ {*}; I _ {t}\\right), \\tag {2}", + "image_path": "2cbc64337f3cd039384fb33cd14913116a8602bcb8f0628845c99a5041676930.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 360, + 287, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 360, + 287, + 397 + ], + "spans": [ + { + "bbox": [ + 46, + 360, + 287, + 397 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 360, + 287, + 397 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{rpn}}^{\\mathrm{obj}}" + }, + { + "bbox": [ + 46, + 360, + 287, + 397 + ], + "type": "text", + "content": " is the objectness loss of the RPN and " + }, + { + "bbox": [ + 46, + 360, + 287, + 397 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{cons}}" + }, + { + "bbox": [ + 46, + 360, + 287, + 397 + ], + "type": "text", + "content": " is the weighted KL-Divergence loss from the predicted outputs which we will further explain in the next section." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 406, + 179, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 406, + 179, + 418 + ], + "spans": [ + { + "bbox": [ + 47, + 406, + 179, + 418 + ], + "type": "text", + "content": "3.1. Two-Phase Consistency" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 426, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 287, + 581 + ], + "type": "text", + "content": "Due to the large domain gap between daytime source images and nighttime target images, the teacher is unable to produce high quality pseudo-labels. This generally occurs in the whole scene, but particularly for regions with strong night characteristics, e.g., low-light, glare, uneven lighting, etc. The teacher produces confident pseudo-labels only for regions that share more similarities to the daytime, since it is biased towards the daytime domain. This bias poses a problem for methods that employ a hard-threshold to filter pseudo-labels for categorical cross-entropy loss [7, 15, 26]. The remaining pseudo-labels contain only easy samples with daytime attributes. Consequently, the student does not learn from harder (e.g. darker) areas." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 581, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 287, + 689 + ], + "type": "text", + "content": "As a result of minimal knowledge of the hard samples (i.e., areas with a high level of nighttime attributes), the teacher begins to predict highly confident yet incorrect pseudo-labels. As the teacher provides these incorrect pseudo-labels to the student, a viscous cycle starts where the teacher in turn is updated with incorrect knowledge. Consequently, the error continues to propagate through training. In our case, these errors notably occur in dark/glare regions and as small scale objects." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "To address the problem of error propagation, we design a two-phase approach that combines high confidence" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": "pseudo-labels together with their less confident counterparts. This combination allows for the high accuracy of confident-labels with the additional knowledge of less confident labels to be distilled onto the student. In phase one, the unlabelled nighttime image, " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ", is used as an input for the teacher to generate pseudo-labels. These pseudo-labels are filtered with a threshold to retain only high-confidence pseudo-labels, " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "(C_p, B_p)" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ". The bounding-box of the pseudolabels, " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "B_{p}" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ", is then used as an input to the student. " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "B_{p}" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": " is concatenated to the region proposals generated by the student RPN module:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 369, + 213, + 545, + 226 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 213, + 545, + 226 + ], + "spans": [ + { + "bbox": [ + 369, + 213, + 545, + 226 + ], + "type": "interline_equation", + "content": "P ^ {*} = \\operatorname {R P N} _ {\\text {s t u d e n t}} \\left(I _ {t}\\right) \\neq B _ {p}, \\tag {3}", + "image_path": "47daf230186e962985d72328cb85b5ba27fdd8b75ede00c44c0e87c3ff1318e5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 234, + 545, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 234, + 545, + 282 + ], + "spans": [ + { + "bbox": [ + 304, + 234, + 545, + 282 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 234, + 545, + 282 + ], + "type": "inline_equation", + "content": "P^{*}" + }, + { + "bbox": [ + 304, + 234, + 545, + 282 + ], + "type": "text", + "content": " is the combined region proposals, which are then used as an input to the student's RoI module to predict the classes, " + }, + { + "bbox": [ + 304, + 234, + 545, + 282 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{student}}" + }, + { + "bbox": [ + 304, + 234, + 545, + 282 + ], + "type": "text", + "content": ", and bounding-box, " + }, + { + "bbox": [ + 304, + 234, + 545, + 282 + ], + "type": "inline_equation", + "content": "B_{\\mathrm{student}}" + }, + { + "bbox": [ + 304, + 234, + 545, + 282 + ], + "type": "text", + "content": ", of each region proposal." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 282, + 545, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 282, + 545, + 329 + ], + "spans": [ + { + "bbox": [ + 304, + 282, + 545, + 329 + ], + "type": "text", + "content": "Phase two begins by using the same combined region proposals, " + }, + { + "bbox": [ + 304, + 282, + 545, + 329 + ], + "type": "inline_equation", + "content": "P^{*}" + }, + { + "bbox": [ + 304, + 282, + 545, + 329 + ], + "type": "text", + "content": ", generated in phase one as an input to the teachers RoI module to generate a matched set of pseudolabels:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 366, + 339, + 545, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 339, + 545, + 354 + ], + "spans": [ + { + "bbox": [ + 366, + 339, + 545, + 354 + ], + "type": "interline_equation", + "content": "\\left\\{C _ {p} ^ {*}, B _ {p} ^ {*} \\right\\} = \\operatorname {R o I} _ {\\text {t e a c h e r}} \\left(P ^ {*}\\right). \\tag {4}", + "image_path": "e9bf6f1e1664dfe9dbb0c9a7bf5fe519d90ffc936226fc4b0c0030434c6d490b.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "spans": [ + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "text", + "content": "The difference between " + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "inline_equation", + "content": "C_p" + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "inline_equation", + "content": "C_p^*" + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "text", + "content": " is that " + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "inline_equation", + "content": "C_p^*" + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "text", + "content": " is derived from the same region proposals as that of the student predictions " + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{student}}" + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "text", + "content": ". This allows us to compare " + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{student}}" + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "inline_equation", + "content": "C_p^*" + }, + { + "bbox": [ + 304, + 361, + 545, + 411 + ], + "type": "text", + "content": " directly:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 418, + 545, + 447 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 418, + 545, + 447 + ], + "spans": [ + { + "bbox": [ + 318, + 418, + 545, + 447 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\{C _ {\\text {s t u d e n t}} (n), B _ {\\text {s t u d e n t}} (n) \\right\\} = \\operatorname {R o I} _ {\\text {s t u d e n t}} \\left(P ^ {*} (n)\\right), \\tag {5} \\\\ \\left\\{C _ {p} ^ {*} (n), B _ {p} ^ {*} (n) \\right\\} = \\operatorname {R o I} _ {\\text {t e a c h e r}} \\left(P ^ {*} (n)\\right), \\\\ \\end{array}", + "image_path": "94cab8a4e17e960bd842298ec53f28abe35cfce19cc5d96a3e5ed2de8a97263d.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 453, + 545, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 453, + 545, + 549 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 545, + 549 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 453, + 545, + 549 + ], + "type": "inline_equation", + "content": "n = \\{1,2,\\dots,N\\}" + }, + { + "bbox": [ + 304, + 453, + 545, + 549 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 453, + 545, + 549 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 453, + 545, + 549 + ], + "type": "text", + "content": " is the number of region proposals in " + }, + { + "bbox": [ + 304, + 453, + 545, + 549 + ], + "type": "inline_equation", + "content": "P^*" + }, + { + "bbox": [ + 304, + 453, + 545, + 549 + ], + "type": "text", + "content": ". This operation ensures that the knowledge of highly confident predictions generated by the teacher is distilled through to the student. In addition, information from less confident predictions can also be learnt. However, we are still required to penalise less confident samples and thus employ weighed KL-Divergence to be used as our consistency loss:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 364, + 559, + 545, + 572 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 559, + 545, + 572 + ], + "spans": [ + { + "bbox": [ + 364, + 559, + 545, + 572 + ], + "type": "interline_equation", + "content": "L _ {\\text {c o n s}} = \\alpha \\operatorname {K L} \\left(C _ {\\text {s t u d e n t}}, C _ {p} ^ {*}\\right), \\tag {6}", + "image_path": "075ae2518d63007de4b74b6531b8ff02df7f3580df89be49c699de995a8fbec3.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 580, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 580, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 580, + 545, + 665 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 580, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 580, + 545, + 665 + ], + "type": "text", + "content": " is the highest confidence of " + }, + { + "bbox": [ + 304, + 580, + 545, + 665 + ], + "type": "inline_equation", + "content": "C_p^*" + }, + { + "bbox": [ + 304, + 580, + 545, + 665 + ], + "type": "text", + "content": " expressed as " + }, + { + "bbox": [ + 304, + 580, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\alpha = \\max(C_p^*)" + }, + { + "bbox": [ + 304, + 580, + 545, + 665 + ], + "type": "text", + "content": "; KL() is the KL-divergence function. Note that, pseudo-bounding boxes are not used to generate unsupervised loss, as the confidence score of each pseudo-label represents the class information rather than the bounding box. The outputs of each segment of our two-phase approach are shown in Figure 3." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 671, + 403, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 403, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 403, + 684 + ], + "type": "text", + "content": "3.2. Student-Scaling" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "text", + "content": "In our investigation, we have found that scales of objects have a strong influence on object detection at night. This" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "11487" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 56, + 87, + 226, + 231 + ], + "blocks": [ + { + "bbox": [ + 49, + 72, + 237, + 84 + ], + "lines": [ + { + "bbox": [ + 49, + 72, + 237, + 84 + ], + "spans": [ + { + "bbox": [ + 49, + 72, + 237, + 84 + ], + "type": "text", + "content": "Algorithm 1 Single Augmentation - NightAug" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "lines": [ + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "spans": [ + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": "imgClean " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " img \nif randFloat " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "\\geq 0.5" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " then randFloat " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "\\leftarrow 0.8*" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " randFloat " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "+0.2" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " img " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " augmentation(img, randval) prob " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "\\leftarrow 0.4" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " while randFloat " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "\\geq" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " prob do " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "x\\gets" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " randInt(img.shape[1],2) " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "y\\gets" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " randInt(img.shape[2],2) img[x,y] " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " imgClean[x,y] prob " + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 56, + 87, + 226, + 231 + ], + "type": "text", + "content": " prob +0.1 end while \nend if" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 46, + 254, + 286, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 254, + 286, + 420 + ], + "spans": [ + { + "bbox": [ + 46, + 254, + 286, + 420 + ], + "type": "text", + "content": "is due to the features of smaller objects being easily overwhelmed by glare or noise. To allow the student to overcome this, we apply scaling augmentation to the student's inputs which includes both the image and the pseudo-labels generated by the teacher. As training proceeds, we follow a schedule to increase the scale of the student augmentation until it equals to that of the original image. By iteratively increasing the scale we allow the student to focus on smaller features earlier in the training process. This process encourages the teacher to make more accurate predictions on smaller scale objects in the later stages of training. In turn, accurate small scale pseudo-labels allow for the increase in the scale of the student's inputs with minimal errors due to scale." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 422, + 286, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 422, + 286, + 493 + ], + "spans": [ + { + "bbox": [ + 46, + 422, + 286, + 493 + ], + "type": "text", + "content": "To ensure the knowledge of the previous scales is not forgotten, a gaussian function for the scaling factor is applied. The norm of the Gaussian function is obtained from the schedule values. To prevent additional noise due to pseudo-labels being too small, labels that has an area below a threshold are removed." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 502, + 115, + 515 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 115, + 515 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 115, + 515 + ], + "type": "text", + "content": "3.3. NightAug" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 521, + 286, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 286, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 286, + 665 + ], + "type": "text", + "content": "Night images suffer from a range of complications that are not present in daytime scenes. This causes a problem in the student-teacher framework, where the student would be biased towards the source domain. Previous methods have attempted to address this, but have either required compute-intensive translations [7, 11] or adding additional domain classifiers to the framework [15] which complicates training. We propose NightAug, a nighttime specific augmentation pipeline that is compute-light and does not require training. NightAug consists of a series of augmentations with the aim of steering the characteristics of daytime images to resemble that of a nighttime image." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 286, + 713 + ], + "type": "text", + "content": "The defining features of nighttime images are that they are darker and have lower contrast than daytime images. In addition the signal-to-night ratio (SNR) could be higher due to the properties of digital cameras such as luminance and" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 71, + 426, + 206 + ], + "blocks": [ + { + "bbox": [ + 307, + 71, + 426, + 206 + ], + "lines": [ + { + "bbox": [ + 307, + 71, + 426, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 71, + 426, + 206 + ], + "type": "image", + "image_path": "44477bcfe46dd0b404ebc19a5eabcfb95708b6ebf6fb6883592a6a5e4c257b7b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 214, + 545, + 248 + ], + "lines": [ + { + "bbox": [ + 305, + 214, + 545, + 248 + ], + "spans": [ + { + "bbox": [ + 305, + 214, + 545, + 248 + ], + "type": "text", + "content": "Figure 4. NightAug: Original image (top-left) and images with random augmentations from: gaussian blur, gamma correction, brightness, contrast, glare, gaussian noise and random cut-outs." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 427, + 71, + 545, + 206 + ], + "blocks": [ + { + "bbox": [ + 427, + 71, + 545, + 206 + ], + "lines": [ + { + "bbox": [ + 427, + 71, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 427, + 71, + 545, + 206 + ], + "type": "image", + "image_path": "28b82fb10b3a24991f39d715a100a831e46d1756277035c6f05cde59023c911f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 277, + 545, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 277, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 304, + 277, + 545, + 326 + ], + "type": "text", + "content": "colour noise. Glare and glow from street lamps and headlights are also present in nighttime images. Additionally, images may be out-of-focus due to the cameras inability to detect reference points to focus on in dark environments." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 330, + 545, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 330, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 304, + 330, + 545, + 485 + ], + "type": "text", + "content": "Keeping in mind the properties of nighttime images, our NightAug includes random; brightness, contrast, gamma, gaussian noise, gaussian blur augmentations and random glare insertion. The augmentations are randomly applied to the images and are also random in intensity. This randomness results in a wider variance of images that are exposed to the student leading to more robust training [30]. To further increase the variance of the images, at each augmentation step, random segments of the image will ignore the application of that augmentation. This allows for the representation where different areas of nighttime images may be unevenly lighted. This uneven lighting affects the above characteristics of the local region." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 489, + 545, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 489, + 545, + 584 + ], + "spans": [ + { + "bbox": [ + 304, + 489, + 545, + 584 + ], + "type": "text", + "content": "A single augmentation flow of NightAug is demonstrated in Algorithm 1. Samples of an image processed with NightAug are shown in Figure 4. Each augmentation has a set probability of being applied, with the strength of the augmentation being random. Random regions of the augmented image may then be replaced with that of the original image. The probability of this region replacement reduces with each iteration." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 613, + 516, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 613, + 516, + 625 + ], + "spans": [ + { + "bbox": [ + 306, + 613, + 516, + 625 + ], + "type": "text", + "content": "Overall Loss Our total loss can be represented as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 372, + 645, + 545, + 658 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 645, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 372, + 645, + 545, + 658 + ], + "type": "interline_equation", + "content": "L _ {\\text {t o t a l}} = L _ {\\sup } + \\lambda L _ {\\text {u n s u p}}, \\tag {7}", + "image_path": "14ee4cf56fb86e05d5956e58cbabe4ab0124516ccfa89f1cc768b9a39a5d289c.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": " represents a weight factor for the unsupervised loss, and is set experimentally. " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{sup}}, L_{\\mathrm{unsup}}" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": " refer to Eq. (1) and Eq. (2), respectively." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11488" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 70, + 547, + 198 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 547, + 198 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 547, + 198 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 547, + 198 + ], + "type": "table", + "html": "
MethodAPPedestrianRiderCarTruckBusMotorcycleBicycleTrafficLightTrafficSign
Lower-Bound41.150.028.966.647.847.532.839.541.056.5
Upper-Bound46.252.135.073.653.554.836.041.852.263.3
DA F-RCNN [3]41.350.430.366.346.848.332.641.441.056.2
TDD [11]34.643.120.768.433.335.616.525.943.159.5
UMT [7]36.246.526.146.844.046.328.240.231.652.7
AT [15]38.542.330.460.848.952.134.542.729.143.9
2PCNet (Ours)46.454.430.873.153.855.237.544.549.465.2
", + "image_path": "6ad51ceac8b2659525cb3f8303090968c0631920eb8a8bf84214e9d2a8c2bb84.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 48, + 270, + 288, + 360 + ], + "blocks": [ + { + "bbox": [ + 46, + 207, + 546, + 251 + ], + "lines": [ + { + "bbox": [ + 46, + 207, + 546, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 207, + 546, + 251 + ], + "type": "text", + "content": "Table 1. Results of day-to-night domain adaptation on the BDD100K dataset, the Average Precision (AP) of all classes are reported. Faster RCNN detector with ResNet-50 feature extractor is used for all experiments to ensure a fair comparison. Faster RCNN is used as the lower-bound and upper-bound and is trained on labelled daytime and nighttime data respectively. The lower-bound provides a baseline without any domain adaptation while the upper-bound is fully supervised, the case where labelled target night data is available." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 270, + 288, + 360 + ], + "lines": [ + { + "bbox": [ + 48, + 270, + 288, + 360 + ], + "spans": [ + { + "bbox": [ + 48, + 270, + 288, + 360 + ], + "type": "table", + "html": "
MethodAPcocoCarBusTruck
Lower-Bound22.137.529.830.7
Upper-Bound23.942.033.835.0
FDA [34]22.638.537.223.2
ForkGAN [38]22.941.233.332.1
2PCNet (Ours)23.540.738.235.0
", + "image_path": "a67e50c51805e5d3272d20f75a585101e458778aa663a8acedc2e157070dc842.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 369, + 287, + 414 + ], + "lines": [ + { + "bbox": [ + 46, + 369, + 287, + 414 + ], + "spans": [ + { + "bbox": [ + 46, + 369, + 287, + 414 + ], + "type": "text", + "content": "Table 2. Comparison of our framework, 2PCNet, with image-to-image (I2I) translation methods. Conducted on the BDD100K dataset. ForkGan and FDA are used for comparison. Reported " + }, + { + "bbox": [ + 46, + 369, + 287, + 414 + ], + "type": "inline_equation", + "content": "AP_{coco}" + }, + { + "bbox": [ + 46, + 369, + 287, + 414 + ], + "type": "text", + "content": " is the averaged AP over IoUs 0.5 to 0.95." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 428, + 128, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 428, + 128, + 441 + ], + "spans": [ + { + "bbox": [ + 47, + 428, + 128, + 441 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 448, + 113, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 448, + 113, + 460 + ], + "spans": [ + { + "bbox": [ + 47, + 448, + 113, + 460 + ], + "type": "text", + "content": "4.1. Baselines" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 468, + 287, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 468, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 46, + 468, + 287, + 588 + ], + "type": "text", + "content": "To evaluate our method, we compare our approach with SOTA methods in domain adaptation for object detection. These include DA-Faster RCNN [3], TDD [11], UMT [7], AT [15] as well as a non-DA baseline Faster-RCNN [21]. Faster-RCNN is used as both our lower and upper-bound, where it is trained on labelled source and target data respectively. We additionally compare our approach with image-to-image translation methods, ForkGAN [38] and FDA [34]. Translation methods are trained on Faster RCNN with both the daytime and translated images." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 598, + 110, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 598, + 110, + 609 + ], + "spans": [ + { + "bbox": [ + 47, + 598, + 110, + 609 + ], + "type": "text", + "content": "4.2. Datasets" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 617, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 287, + 713 + ], + "type": "text", + "content": "The majority of existing nighttime datasets either focuses on semantic segmentation which do not provide labels for object detection [5, 23, 24], or contains very few classes [19, 20]. BDD100K [36] was selected as it provides object detection labels which includes a wide range of classes (10). It also has a large number of images compared to other DA datasets covering daytime, nighttime and other adverse conditions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 272, + 545, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 272, + 545, + 369 + ], + "spans": [ + { + "bbox": [ + 304, + 272, + 545, + 369 + ], + "type": "text", + "content": "The SHIFT [25] dataset is a recent simulated driving dataset that contains scenes in various environments. A continuous shift of these environments is available. SHIFT contains 6 class labels that share similarities to the BDD100K classes. For our evaluation, we use images with the 'day' and 'night' label as our source and target data respectively. We further ensure that the weather tag is 'clear' to isolate other weather conditions from the evaluation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 378, + 404, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 378, + 404, + 392 + ], + "spans": [ + { + "bbox": [ + 305, + 378, + 404, + 392 + ], + "type": "text", + "content": "4.3. Implementation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 397, + 546, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 397, + 546, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 397, + 546, + 624 + ], + "type": "text", + "content": "Following previous SOTA methods, we employ Faster-RCNN [21] as our base detection model and ResNet-50 [10] pretrained on ImageNet [6] as our feature extractor. All images are scaled by resizing its shorter side to 600 pixels. For student-scaling we set a schedule for (0.57, 0.64, 0.71, 0.78, 0.85, 0.92) of the maximum iterations at scales (0.5, 0.6, 0.7, 0.8, 0.9, 1.0). Loss hyperparameters are set at " + }, + { + "bbox": [ + 304, + 397, + 546, + 624 + ], + "type": "inline_equation", + "content": "\\lambda = 0.3" + }, + { + "bbox": [ + 304, + 397, + 546, + 624 + ], + "type": "text", + "content": " and the rate smooth coefficient parameter of the EMA is 0.9996. A confidence threshold of 0.8 for phase one of Two-Phase Consistency. For the initial pretraining of the student model, we train the student for 50k and 20k iterations on the source images, for BDD100K and SHIFT respectively. Supervised inputs are daytime images with and without NightAug. We then copy the weights to the teacher and continue training with the addition of unsupervised loss for an additional 50k iterations. The learning rate is kept at 0.04 throughout training. Our network is trained on 3 RTX3090 GPUs with a batch-size of 6 source and 6 target images." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 634, + 429, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 634, + 429, + 647 + ], + "spans": [ + { + "bbox": [ + 305, + 634, + 429, + 647 + ], + "type": "text", + "content": "4.4. Comparison to SOTA" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "content": "Comparison on BDD100K We compare our method against the SOTA on real driving scenes and evaluating their domain adaptation performance on nighttime images, the results of this experiment can be seen on Table 1. The results show that our method achieves the highest perfor" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "11489" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 72, + 149, + 266 + ], + "blocks": [ + { + "bbox": [ + 51, + 72, + 149, + 266 + ], + "lines": [ + { + "bbox": [ + 51, + 72, + 149, + 266 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 149, + 266 + ], + "type": "image", + "image_path": "260a68fcdf8a6dfda8ed4d951a9e734559f34114dc70c808af48e92e2eeabd0c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 276, + 547, + 310 + ], + "lines": [ + { + "bbox": [ + 46, + 276, + 547, + 310 + ], + "spans": [ + { + "bbox": [ + 46, + 276, + 547, + 310 + ], + "type": "text", + "content": "Figure 5. Qualitative results of Faster RCNN, Adaptive Teacher (AT) and our method on the SHIFT dataset with the ground-truth on the far right. We can observe that Faster RCNN is not able to detect objects due to absence of domain adaptation, while AT has a large number of small false positive bounding boxes compared to our method which closely resembles that of the ground-truth." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 150, + 72, + 248, + 266 + ], + "blocks": [ + { + "bbox": [ + 150, + 72, + 248, + 266 + ], + "lines": [ + { + "bbox": [ + 150, + 72, + 248, + 266 + ], + "spans": [ + { + "bbox": [ + 150, + 72, + 248, + 266 + ], + "type": "image", + "image_path": "fa1c9a5062df328949db62faac6c53ac246b0a74b845931868e4ae6e10da8b1d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 249, + 72, + 346, + 266 + ], + "blocks": [ + { + "bbox": [ + 249, + 72, + 346, + 266 + ], + "lines": [ + { + "bbox": [ + 249, + 72, + 346, + 266 + ], + "spans": [ + { + "bbox": [ + 249, + 72, + 346, + 266 + ], + "type": "image", + "image_path": "118d34ec49e84b21fdf24b602e2d317adbad87c2f07af0c8f63bcb2c7fe80499.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 347, + 72, + 444, + 266 + ], + "blocks": [ + { + "bbox": [ + 347, + 72, + 444, + 266 + ], + "lines": [ + { + "bbox": [ + 347, + 72, + 444, + 266 + ], + "spans": [ + { + "bbox": [ + 347, + 72, + 444, + 266 + ], + "type": "image", + "image_path": "85899240049ff1fc50d9929c44f154e9f808a51c616d22cd1432f4ba874464f8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 444, + 72, + 543, + 266 + ], + "blocks": [ + { + "bbox": [ + 444, + 72, + 543, + 266 + ], + "lines": [ + { + "bbox": [ + 444, + 72, + 543, + 266 + ], + "spans": [ + { + "bbox": [ + 444, + 72, + 543, + 266 + ], + "type": "image", + "image_path": "52216da15ee92e9d03f9f345c2ca5f962ae1e5bd73a17eec5bd7ac074b3d2650.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 48, + 328, + 288, + 423 + ], + "blocks": [ + { + "bbox": [ + 48, + 328, + 288, + 423 + ], + "lines": [ + { + "bbox": [ + 48, + 328, + 288, + 423 + ], + "spans": [ + { + "bbox": [ + 48, + 328, + 288, + 423 + ], + "type": "table", + "html": "
MethodAPPer.CarTruckBusMcy.Bcy.
Lower-Bound41.640.444.549.953.714.346.7
Upper-Bound47.049.751.556.053.619.252.4
DA FR [3]43.743.048.847.852.119.955.8
UMT [7]31.17.747.518.446.816.649.2
AT [15]38.925.833.054.749.520.752.3
2PCNet (Ours)49.151.454.654.856.623.954.2
", + "image_path": "826634909662486b1ab343b5849c94145be733bcd7b369139f600ddc99f42a01.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 433, + 287, + 477 + ], + "lines": [ + { + "bbox": [ + 46, + 433, + 287, + 477 + ], + "spans": [ + { + "bbox": [ + 46, + 433, + 287, + 477 + ], + "type": "text", + "content": "Table 3. Results of Day-to-Night domain adaptation on the SHIFT dataset. The Average Precision (AP) of all classes. Faster RCNN is used as the lower-bound and upper-bound and is trained on labelled daytime and nighttime data respectively." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 495, + 287, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 495, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 495, + 287, + 651 + ], + "type": "text", + "content": "mance with an AP of 46.4. " + }, + { + "bbox": [ + 46, + 495, + 287, + 651 + ], + "type": "inline_equation", + "content": "20.5\\%" + }, + { + "bbox": [ + 46, + 495, + 287, + 651 + ], + "type": "text", + "content": " higher than that of the SOTA student-teacher methods and above that of the upper-bound. We have observed in experiments that student-teacher methods underperforms with an AP below that of the lower-bound due to the error-propagation from noisy pseudo-labels. The result of the error is small false positive detections as seen in Figure 1. Our method does not suffer from the same allowing for higher performance. We can also observe that our method performs well across all classes. Even when compared with the upper-bound, 2PC-Net achieves higher AP on the majority of classes. This indicates that our method is able to generalise well across large and small classes." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "type": "text", + "content": "The comparison with image-to-image translation methods is shown in Table 2. Translation methods do not suffer from the error propagation problem as it is trained on Faster RCNN without a teacher. Even so, we can see that our method outperforms SOTA adverse vision translation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 332, + 345, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 332, + 345, + 342 + ], + "spans": [ + { + "bbox": [ + 306, + 332, + 345, + 342 + ], + "type": "text", + "content": "methods." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 352, + 545, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 352, + 545, + 412 + ], + "spans": [ + { + "bbox": [ + 304, + 352, + 545, + 412 + ], + "type": "text", + "content": "Comparison on SHIFT To further compare our method with SOTA we evaluate on the SHIFT simulation dataset. Due to the nature of the simulated data, many nighttime image characteristics that we have previously mention is not exhibited in this data such as blurriness, noise and glare." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 412, + 546, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 546, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 546, + 581 + ], + "type": "text", + "content": "The results of this experiments are shown in Table 3. We can observe that previous SOTA methods that use the student-teacher framework perform worse than the lower-bound. The sub-par performance is again due to the error-propagation problem. AT performs better than UMT due to ATs inclusion of adversarial learning. However, adversarial learning is not enough to mitigate this problem. We can see that the performance of DA FRCNN outperforms both the SOTA student-teacher methods as it would not be affected by error-propagation. It is however, still largely below the upper-bound performance. 2PCNet outperforms these previous methods as well as the upperbound. We achieve an improvement of " + }, + { + "bbox": [ + 304, + 412, + 546, + 581 + ], + "type": "inline_equation", + "content": "+10.2" + }, + { + "bbox": [ + 304, + 412, + 546, + 581 + ], + "type": "text", + "content": " AP over previous SOTA student-teacher methods and " + }, + { + "bbox": [ + 304, + 412, + 546, + 581 + ], + "type": "inline_equation", + "content": "+2.1" + }, + { + "bbox": [ + 304, + 412, + 546, + 581 + ], + "type": "text", + "content": " AP over that of the upper-bound." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 589, + 406, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 589, + 406, + 601 + ], + "spans": [ + { + "bbox": [ + 306, + 589, + 406, + 601 + ], + "type": "text", + "content": "4.5. Ablation Studies" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 609, + 545, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 609, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 304, + 609, + 545, + 656 + ], + "type": "text", + "content": "To demonstrate the effectiveness of each of our components, we train several models for 100K iterations and evaluate them on the BDD100K dataset. We present our findings in Table 4." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 665, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 546, + 713 + ], + "type": "text", + "content": "Two-Phase Consistency We can observe in Table 4 that the addition of Two-Phase Consistency (C) demonstrated a wide performance gap when compared to the Mean-Teacher baseline, +13.5 AP (43%). This improvement in AP ex" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "11490" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 176, + 171 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 176, + 171 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 176, + 171 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 176, + 171 + ], + "type": "image", + "image_path": "808250427ec14cf82c8ac96e883ffd23fd2c6af9f58f5aeface3c8be797b87c7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 181, + 547, + 226 + ], + "lines": [ + { + "bbox": [ + 46, + 181, + 547, + 226 + ], + "spans": [ + { + "bbox": [ + 46, + 181, + 547, + 226 + ], + "type": "text", + "content": "Figure 6. Training curve on BDD100K dataset ablation study. We show the overall AP training curve as well as the AP of large, medium and small objects. MT represents the base Mean Teacher framework. It can be seen that at all scales, the absence of Two-Phase Consistency (C) results in a sharp drop during training. We can also see that with the inclusion of NightAug (NA) and student-scaling (SS) the gradient of the curve increases. We note that the inclusion of a domain classifier (DC) reduces the performance at all scales." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 178, + 72, + 298, + 171 + ], + "blocks": [ + { + "bbox": [ + 178, + 72, + 298, + 171 + ], + "lines": [ + { + "bbox": [ + 178, + 72, + 298, + 171 + ], + "spans": [ + { + "bbox": [ + 178, + 72, + 298, + 171 + ], + "type": "image", + "image_path": "b6705329c2169a0416d90e84e815498b1f0bfb53c6481a403cfb48ec48081d07.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 299, + 72, + 419, + 171 + ], + "blocks": [ + { + "bbox": [ + 299, + 72, + 419, + 171 + ], + "lines": [ + { + "bbox": [ + 299, + 72, + 419, + 171 + ], + "spans": [ + { + "bbox": [ + 299, + 72, + 419, + 171 + ], + "type": "image", + "image_path": "35530a358b9d008ad8b061628ac5c0ae9f9d1b58dc0f9a7ef67087c3e74bd327.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 421, + 72, + 543, + 171 + ], + "blocks": [ + { + "bbox": [ + 421, + 72, + 543, + 171 + ], + "lines": [ + { + "bbox": [ + 421, + 72, + 543, + 171 + ], + "spans": [ + { + "bbox": [ + 421, + 72, + 543, + 171 + ], + "type": "image", + "image_path": "6e1f6579437a259fb07456dba92adee7672ddce723d1501354e0dc90cdc44b74.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 246, + 289, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 246, + 289, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 246, + 289, + 354 + ], + "type": "text", + "content": "ists across large, medium and small objects. While the performance of MT is initially strong, it rapidly begins to decline; which can be observed in Figure 6. This drop in performance is due to the error propagation of noisy pseudolabels. The experimental results show that Two-Phase Consistency is able to provide a solution. This ensures that highly confident pseudo-labels are bounded by less confident pseudo-label enabling a balance of knowledge into the student." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 361, + 287, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 361, + 287, + 458 + ], + "spans": [ + { + "bbox": [ + 46, + 361, + 287, + 458 + ], + "type": "text", + "content": "NightAug We benched marked the effectiveness of NightAug in our framework as shown in Table 4. The inclusion of NightAug increases the detection performance of small objects with an increase of " + }, + { + "bbox": [ + 46, + 361, + 287, + 458 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 46, + 361, + 287, + 458 + ], + "type": "text", + "content": ". Additionally, the gradient of the training performance remains steep as seen in Figure 6. The positive gradient is displayed most strongly for APm and APs where objects are more prone to nighttime specific complications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 466, + 288, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 466, + 288, + 610 + ], + "spans": [ + { + "bbox": [ + 46, + 466, + 288, + 610 + ], + "type": "text", + "content": "Student-Scaling Our final component, student-scaling, is included into the framework and the results can be seen in Table 4. We can observe that student-scaling is able to boost the performance of small object detection by " + }, + { + "bbox": [ + 46, + 466, + 288, + 610 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 46, + 466, + 288, + 610 + ], + "type": "text", + "content": ". This boost in performance is due to the student network focusing on smaller object earlier in the training process. We note that the performance of large objects have dropped by " + }, + { + "bbox": [ + 46, + 466, + 288, + 610 + ], + "type": "inline_equation", + "content": "1 - 2\\%" + }, + { + "bbox": [ + 46, + 466, + 288, + 610 + ], + "type": "text", + "content": "; however when referring to the training curves in Figure 6, API remains steep. As the initial focus is on smaller objects, less time is allocated to larger objects during training. This can be mitigated by lengthening training resulting in more iterations for larger objects." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": "Domain Classifier To conclude our study, we included a domain classifier into our network. Adversarial learning is a widely used DA technique; however when added into 2PCNet, a performance drop across all scales can be seen. This drop is shown in Table 4. The suppression of nighttime features is suspected to be the cause. Suppression is present as the adversarial loss guides the feature extractor to maintain domain invariance. By suppressing nighttime fea" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 314, + 244, + 539, + 342 + ], + "blocks": [ + { + "bbox": [ + 314, + 244, + 539, + 342 + ], + "lines": [ + { + "bbox": [ + 314, + 244, + 539, + 342 + ], + "spans": [ + { + "bbox": [ + 314, + 244, + 539, + 342 + ], + "type": "table", + "html": "
Methods
CNASSDCAPAPIAPmAPs
46.441.725.89.1
44.541.625.08.3
45.842.225.78.6
45.242.925.78.2
31.730.416.54.8
", + "image_path": "4ee03232bc2b517cbeda29e935f6a541aad701501640833e034d50932f217a5a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 350, + 547, + 407 + ], + "lines": [ + { + "bbox": [ + 304, + 350, + 547, + 407 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 547, + 407 + ], + "type": "text", + "content": "Table 4. Ablation studies on the BDD100K dataset. The last row represents the base Mean-Teacher network. Methods are referred to as, C: Two-Phase Consistency, NA: NightAug, SS: StudentScaling, DC: Domain Classifier. API, APm, and APs represent the AP of large, medium and small objects respectively." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 420, + 545, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 420, + 545, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 420, + 545, + 479 + ], + "type": "text", + "content": "tures, the teacher has less information to distil to the student. This is demonstrated in Figure 6 where the domain classifier (dotted purple) initially performs well. But as training continues, our method (solid red) is able to surpass its performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 494, + 378, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 494, + 378, + 506 + ], + "spans": [ + { + "bbox": [ + 306, + 494, + 378, + 506 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 514, + 545, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 545, + 671 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 545, + 671 + ], + "type": "text", + "content": "Our proposed framework, 2PCNet, presents a novel solution to the challenges of day-to-night domain adaptive object detection. With our Two-Phase Consistency approach, we are able to effectively leverage high and low confidence knowledge for the student, while mitigating error propagation commonly present in previous student-teacher methods. We further address issues arising from small scale and dark objects through the use of student-scaling and NightAug, respectively. Experimental results on the e BDD100K [36] and SHIFT [25] datasets demonstrate that 2PCNet outperforms existing state-of-the-art methods. Overall, our proposed framework provides an effective and efficient solution for day-to-night domain adaptive object detection." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "type": "text", + "content": "Acknowledgements This work is partially supported by MOE2019-T2-1-130." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "11491" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Qi Cai, Yingwei Pan, Chong-Wah Ngo, Xinmei Tian, Lingyu Duan, and Ting Yao. Exploring object relation in mean teacher for cross-domain detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11449-11458, 2019. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 147, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 201 + ], + "type": "text", + "content": "[2] Lin Chen, Huaian Chen, Zhixiang Wei, Xin Jin, Xiao Tan, Yi Jin, and Enhong Chen. Reusing the task-specific classifier as a discriminator: Discriminator-free adversarial domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7171-7180, 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 202, + 288, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 288, + 255 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 288, + 255 + ], + "type": "text", + "content": "[3] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain adaptive faster r-cnn for object detection in the wild. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3339-3348, 2018. 1, 2, 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 258, + 287, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 287, + 300 + ], + "type": "text", + "content": "[4] Yuhua Chen, Haoran Wang, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Scale-aware domain adaptive faster r-cnn. International Journal of Computer Vision, page 2223-2243, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 302, + 287, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 287, + 346 + ], + "type": "text", + "content": "[5] Dengxin Dai and Luc Van Gool. Dark model adaptation: Semantic image segmentation from daytime to nighttime. In International Conference on Intelligent Transportation Systems (ITSC), pages 3819-3824, 2018. 2, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 347, + 287, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 347, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 53, + 347, + 287, + 390 + ], + "type": "text", + "content": "[6] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 248-255, 2009. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 392, + 287, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 287, + 445 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 287, + 445 + ], + "type": "text", + "content": "[7] Jinhong Deng, Wen Li, Yuhua Chen, and Lixin Duan. Unbiased mean teacher for cross-domain object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4089-4099, 2021. 1, 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 447, + 287, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 447, + 287, + 501 + ], + "spans": [ + { + "bbox": [ + 53, + 447, + 287, + 501 + ], + "type": "text", + "content": "[8] Xueqing Deng, Peng Wang, Xiaochen Lian, and Shawn Newsam. NightLab: A Dual-Level Architecture With Hardness Detection for Segmentation at Night. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16938-16948, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 502, + 287, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 502, + 287, + 556 + ], + "spans": [ + { + "bbox": [ + 53, + 502, + 287, + 556 + ], + "type": "text", + "content": "[9] Huan Gao, Jichang Guo, Guoli Wang, and Qian Zhang. Cross-Domain Correlation Distillation for Unsupervised Domain Adaptation in Nighttime Semantic Segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9913-9923, 2022. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 558, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 601 + ], + "type": "text", + "content": "[10] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 603, + 287, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 603, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 48, + 603, + 287, + 657 + ], + "type": "text", + "content": "[11] Mengzhe He, Yali Wang, Jiaxi Wu, Yiru Wang, Hanqing Li, Bo Li, Weihao Gan, Wei Wu, and Yu Qiao. Cross domain object detection by target-perceived dual branch distillation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9560-9570, 2022. 1, 2, 5, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[12] Guoliang Kang, Lu Jiang, Yunchao Wei, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for single- and multi-source domain adaptation. IEEE Transactions on Pattern Analysis amp; Machine Intelligence, pages 1793–1804, 2022. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 126 + ], + "type": "text", + "content": "[13] Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for unsupervised domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4888-4897, 2019. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 173 + ], + "type": "text", + "content": "[14] Attila Lengyel, Sourav Garg, Michael Milford, and Jan C. van Gemert. Zero-shot day-night domain adaptation with a physics prior. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 4379-4389, 2021. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 175, + 545, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 237 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 237 + ], + "type": "text", + "content": "[15] Yu-Jhe Li, Xiaoliang Dai, Chih-Yao Ma, Yen-Cheng Liu, Kan Chen, Bichen Wu, Zijian He, Kris Kitani, and Peter Vajda. Cross-domain adaptive teacher for object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7571-7580, 2022. 1, 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "type": "text", + "content": "[16] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael I. Jordan. Learning transferable features with deep adaptation networks. In International Conference on International Conference on Machine Learning, page 97-105, 2015. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 286, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 286, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 545, + 330 + ], + "type": "text", + "content": "[17] Mingsheng Long, Han Zhu, Jianmin Wang, and Michael I. Jordan. Unsupervised domain adaptation with residual transfer networks. In International Conference on Neural Information Processing Systems, page 136-144, 2016. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 332, + 545, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 332, + 545, + 375 + ], + "spans": [ + { + "bbox": [ + 307, + 332, + 545, + 375 + ], + "type": "text", + "content": "[18] Mingsheng Long, Han Zhu, Jianmin Wang, and Michael I. Jordan. Deep transfer learning with joint adaptation networks. In International Conference on Machine Learning, page 2208-2217, 2017. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 376, + 545, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 545, + 420 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 545, + 420 + ], + "type": "text", + "content": "[19] Igor Morawski, Yu-An Chen, Yu-Sheng Lin, and Winston H. Hsu. Nod: Taking a closer look at detection under extreme low-light conditions with night object detection dataset. In British Machine Vision Conference, (BMVC), 2021. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 422, + 545, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 485 + ], + "type": "text", + "content": "[20] Lukás Neumann, Michelle Karg, Shanshan Zhang, Christian Scharfenberger, Ericiegert, Sarah Mistr, Olga Prokofyeva, Robert Thiel, Andrea Vedaldi, Andrew Zisserman, and Bernt Schiele. Nightowls: A pedestrians at night dataset. In Asian Conference on Computer Vision (ACCV), pages 691-705, 2018. 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 488, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 488, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 488, + 545, + 533 + ], + "type": "text", + "content": "[21] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In International Conference on Neural Information Processing Systems, page 91-99, 2015. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 534, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 534, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 534, + 545, + 578 + ], + "type": "text", + "content": "[22] Kuniaki Saito, Yoshitaka Ushiku, Tatsuya Harada, and Kate Saenko. Strong-weak distribution alignment for adaptive object detection. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6949–6958, 2019. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 579, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 579, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 307, + 579, + 545, + 633 + ], + "type": "text", + "content": "[23] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Guided curriculum model adaptation and uncertainty-aware evaluation for semantic nighttime image segmentation. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 7373-7382, 2019. 2, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "type": "text", + "content": "[24] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Acdc: The adverse conditions dataset with correspondences for semantic driving scene understanding. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 10745-10755, 2021. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "type": "text", + "content": "[25] Tao Sun, Mattia Segu, Janis Postels, Yuxuan Wang, Luc Van Gool, Bernt Schiele, Federico Tombari, and Fisher Yu. Shift:" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "11492" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "text", + "content": "A synthetic driving dataset for continuous multi-task domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 21339-21350, 2022. 6, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 173 + ], + "type": "text", + "content": "[26] Antti Tarvainen and Harri Valpola. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In International Conference on Neural Information Processing Systems, page 1195–1204, 2017. 2, 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 287, + 218 + ], + "type": "text", + "content": "[27] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2962-2971, 2017. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 220, + 287, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 220, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 220, + 287, + 274 + ], + "type": "text", + "content": "[28] Sinan Wang, Xinyang Chen, Yunbo Wang, Mingsheng Long, and Jianmin Wang. Progressive adversarial networks for fine-grained domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9210-9219, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 276, + 287, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 276, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 48, + 276, + 287, + 330 + ], + "type": "text", + "content": "[29] Xinyi Wu, Zhenyao Wu, Hao Guo, Lili Ju, and Song Wang. DANNet: A One-Stage Domain Adaptation Network for Unsupervised Nighttime Semantic Segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15769–15778, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 332, + 287, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 332, + 287, + 376 + ], + "spans": [ + { + "bbox": [ + 48, + 332, + 287, + 376 + ], + "type": "text", + "content": "[30] Qizhe Xie, Minh-Thang Luong, Eduard Hovy, and Quoc V. Le. Self-training with noisy student improves imagenet classification. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2020. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 378, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 378, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 48, + 378, + 287, + 430 + ], + "type": "text", + "content": "[31] Chang-Dong Xu, Xingjie Zhao, Xin Jin, and Xiu-Shen Wei. Exploring categorical regularization for domain adaptive object detection. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11721-11730, 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 434, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 434, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 48, + 434, + 287, + 487 + ], + "type": "text", + "content": "[32] Minghao Xu, Hang Wang, Bingbing Ni, Qi Tian, and Wenjun Zhang. Cross-domain detection via graph-induced prototype alignment. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12352-12361, 2020. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 490, + 287, + 544 + ], + "type": "text", + "content": "[33] Qi Xu, Yinan Ma, Jing Wu, Chengnian Long, and Xiaolin Huang. CDAda: A Curriculum Domain Adaptation for Nighttime Semantic Segmentation. In IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), pages 2962-2971, 2021. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 546, + 287, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 546, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 48, + 546, + 287, + 589 + ], + "type": "text", + "content": "[34] Yanchao Yang and Stefano Soatto. FDA: Fourier domain adaptation for semantic segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4084-4094, 2020. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 591, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 287, + 645 + ], + "type": "text", + "content": "[35] Junjie Ye, Changhong Fu, Guangze Zheng, Danda Pani Paudel, and Guang Chen. Unsupervised Domain Adaptation for Nighttime Aerial Tracking. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8896-8905, 2022. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 712 + ], + "type": "text", + "content": "[36] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darrell. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2633-2642, 2020. 1, 6, 8" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 205 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[37] Weichen Zhang, Wanli Ouyang, Wen Li, and Dong Xu. Collaborative and adversarial network for unsupervised domain adaptation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3801-3809, 2018. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "text", + "content": "[38] Ziqiang Zheng, Yang Wu, Xinran Nicole Han, and Jianbo Shi. Forkgan: Seeing into the rainy night. In European Conference on Computer Vision (ECCV), 2020. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 152, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 205 + ], + "type": "text", + "content": "[39] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In IEEE/CVF International Conference on Computer Vision (ICCV), pages 2242-2251, 2017. 2" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "11493" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Cinemagraphy From a Single Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_content_list.json b/2023/3D Cinemagraphy From a Single Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3d501c882614bde17b3145a6438a313498ca13c6 --- /dev/null +++ b/2023/3D Cinemagraphy From a Single Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_content_list.json @@ -0,0 +1,1989 @@ +[ + { + "type": "text", + "text": "3D Cinemagraphy from a Single Image", + "text_level": 1, + "bbox": [ + 285, + 130, + 684, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xingyi Li $^{1,3}$ Zhiguo Cao $^{1}$ Huiqiang Sun $^{1}$ Jianming Zhang $^{2}$ Ke Xian $^{3*}$ Guosheng Lin $^{3}$ $^{1}$ Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Artificial Intelligence and Automation, Huazhong University of Science and Technology \n $^{2}$ Adobe Research $^{3}$ S-Lab, Nanyang Technological University \n{xingyi.li, zgcao, shq1031}@hust.edu.cn, jianmzha@adobe.com, {ke.xian, gslin}@ntu.edu.sg \nhttps://xingyi-li.github.io/3d-cinemagraphy", + "bbox": [ + 94, + 178, + 872, + 287 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/77e45a8a8ae99f7e5122bf60ba50833109c19c4ad4f2de0de9320aca1eab8f4b.jpg", + "image_caption": [ + "Figure 1. Given a single still image, our method can synthesize videos with plausible animation of the scene while allowing camera movements. Here, we showcase four 3D cinematographs with various camera trajectories. Besides real-world photos (the left two examples), our method can also generalize to paintings (the third one) and synthetic images generated by Stable Diffusion [47] (the rightmost one). To see the effect of 3D cinematography, readers are encouraged to view with Adobe Acrobat or KDE Okular." + ], + "image_footnote": [], + "bbox": [ + 80, + 314, + 290, + 422 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/afaecbba82dd6bd57e0f587e50178780a6b79bc215a6dbcec1887dd14bf9fa3b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 292, + 315, + 504, + 422 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/812c489801e67547cea0e4abdc1db516b991152c6dc20e97d1f533d9d77e7022.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 315, + 741, + 422 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9e76c7973b4c0d6ac5f0c7109de2752b00aa644105db1a7c8f97243477dba918.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 743, + 315, + 888, + 422 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 512, + 313, + 527 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present 3D Cinemagography, a new technique that marries 2D image animation with 3D photography. Given a single still image as input, our goal is to generate a video that contains both visual content animation and camera motion. We empirically find that naively combining existing 2D image animation and 3D photography methods leads to obvious artifacts or inconsistent animation. Our key insight is that representing and animating the scene in 3D space offers a natural solution to this task. To this end, we first convert the input image into feature-based layered depth images using predicted depth values, followed by unprojecting them to a feature point cloud. To animate the scene, we perform motion estimation and lift the 2D motion into the 3D scene flow. Finally, to resolve the problem of hole emergence as points move forward, we propose to bidirectionally displace the point cloud as per the scene flow and synthesize novel views by separately projecting them into target image planes and blending the results. Extensive experiments demonstrate the effectiveness of our method. A user study is also conducted to validate the compelling rendering results of our method.", + "bbox": [ + 75, + 544, + 472, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 512, + 630, + 527 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nowadays, since people can easily take images using smartphone cameras, the number of online photos has increased drastically. However, with the rise of online video-sharing platforms such as YouTube and TikTok, people are no longer content with static images as they have grown accustomed to watching videos. It would be great if we could animate those still images and synthesize videos for a better experience. These living images, termed cinematographs, have already been created and gained rapid popularity online [1, 71]. Although cinematographs may engage people with the content for longer than a regular photo, they usually fail to deliver an immersive sense of 3D to audiences. This is because cinematographs are usually based on a static camera and fail to produce parallax effects. We are therefore motivated to explore ways of animating the photos and moving around the cameras at the same time. As shown in Fig. 1, this will bring many still images to life and provide a drastically vivid experience.", + "bbox": [ + 496, + 537, + 890, + 809 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we are interested in making the first step towards 3D cinematography that allows both realistic animation of the scene and camera motions with compelling parallax effects from a single image. There are plenty of attempts to tackle either of the two problems. Single-image animation methods [12, 19, 35] manage to produce a real-", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 94, + 886, + 220, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "4595", + "bbox": [ + 482, + 944, + 514, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "istic animated video from a single image, but they usually operate in 2D space, and therefore they cannot create camera movement effects. Classic novel view synthesis methods [5, 6, 9, 14, 25] and recent implicit neural representations [37, 40, 58] entail densely captured views as input to render unseen camera perspectives. Single-shot novel view synthesis approaches [21, 39, 52, 66] exhibit the potential for generating novel camera trajectories of the scene from a single image. Nonetheless, these methods usually hypothesize that the observed scene is static without moving elements. Directly combining existing state-of-the-art solutions of single-image animation and novel view synthesis yields visual artifacts or inconsistent animation.", + "bbox": [ + 75, + 90, + 472, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the above challenges, we present a novel framework that solves the joint task of image animation and novel view synthesis. This framework can be trained to create 3D cinematographs from a single still image. Our key intuition is that handling this new task in 3D space would naturally enable both animation and moving cameras simultaneously. With this in mind, we first represent the scene as feature-based layered depth images (LDIs) [50] and unproject the feature LDIs into a feature point cloud. To animate the scene, we perform motion estimation and lift the 2D motion to 3D scene flow using depth values predicted by DPT [45]. Next, we animate the point cloud according to the scene flow. To resolve the problem of hole emergence as points move forward, we are inspired by prior works [3, 19, 38] and propose a 3D symmetric animation technique to bidirectionally displace point clouds, which can effectively fill in those unknown regions. Finally, we synthesize novel views at time $t$ by rendering point clouds into target image planes and blending the results. In this manner, our proposed method can automatically create 3D cinematographs from a single image. Moreover, our framework is highly extensible, e.g., we can augment our motion estimator with user-defined masks and flow hints for accurate flow estimation and controllable animation.", + "bbox": [ + 75, + 287, + 472, + 648 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our main contributions are:", + "bbox": [ + 96, + 650, + 364, + 664 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a new task of creating 3D cinematographs from single images. To this end, we propose a novel framework that jointly learns to solve the task of image animation and novel view synthesis in 3D space.", + "- We design a 3D symmetric animation technique to address the hole problem as points move forward.", + "- Our framework is flexible and customized. We can achieve controllable animation by augmenting our motion estimator with user-defined masks and flow hints." + ], + "bbox": [ + 96, + 674, + 468, + 830 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 844, + 218, + 859 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Single-image animation. Different kinds of methods have been explored to animate still images. Some works [8, 22]", + "bbox": [ + 75, + 869, + 468, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "focus on animating certain objects via physical simulation but may not be easily applied to more general cases of inthe-wild photos. Given driving videos as guidance, there are plenty of methods that attempt to perform motion transfer on static objects with either a priori knowledge of moving objects [7, 11, 33, 46, 55] or in an unsupervised manner [53, 54, 56]. They entail reference videos to drive the motion of static objects, and thus do not suit our task. Recent advances in generative models have attracted much attention and motivated the community to develop realistic image and video synthesis methods. Many works [31, 32, 34, 51, 69] are based on generative adversarial networks (GANs) and operate transformations in latent space to generate plausible appearance changes and movements. Nonetheless, it is non-trial to allow for explicit control over those latent codes and to animate input imagery in a disentangled manner. As diffusion models [17, 59] improve by leaps and bounds, several diffusion-based works [16, 18, 57] attempt to generate realistic videos from text or images. However, these methods are time-consuming and expensive in terms of computation. Here we focus on methods that utilize learned motion priors to convert a still image into an animated video texture [12, 13, 19, 29, 35]. In particular, Holynski et al. [19] first synthesize the optical flow of the input image via a motion estimation network, then obtain future frames using the estimated flow field. This method renders plausible animation of fluid elements in the input image but suffers from producing camera motions with parallax.", + "bbox": [ + 496, + 90, + 893, + 515 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Novel view synthesis from a single image. Novel view synthesis allows for rendering unseen camera perspectives from 2D images and their corresponding camera poses. Recent impressive synthesis results may credit to implicit neural representations [37, 40, 58]. Nevertheless, these methods usually assume dense views as input, which is not always available in most cases. Moreover, they focus on the task of interpolation given multiple views rather than extrapolation. As such, we instead turn to methods aiming at handling single input. Among them, a number of works [15, 26, 28, 62, 63, 70, 72] infer the 3D structure of scenes by learning to predict a scene representation from a single image. These methods are usually trained end-to-end but suffer from generalizing to in-the-wild photos. Most relevant to our work are those approaches [39, 52, 66] that apply depth estimation [45, 65, 67, 68] followed by inpainting occluded regions. For example, 3D Photo [52] estimates monocular depth maps and uses the representation of layered depth images (LDIs) [43, 50], in which context-aware color and depth inpainting are performed. To enable fine-grained detail modeling, SLIDE [21] decomposes the scene into foreground and background via a soft-layering scheme. However, unlike our approach, these methods usually assume the scene is static by default, which largely lessens the sense of reality, especially when some elements such as", + "bbox": [ + 496, + 523, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "4596", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6c4949f4da65374cd84740c97a37b56fee1a8822d40c3c37efaec7f81ec19bb8.jpg", + "image_caption": [ + "Figure 2. An overview of our method. Given a single still image as input, we first predict a dense depth map. To represent the scene in 3D space, we separate the input image into several layers according to depth discontinuities and apply context-aware inpainting, yielding layered depth images (LDIs) $\\mathcal{L}$ . We then use a 2D feature extractor to encode 2D feature maps for each inpainted LDI color layer, resulting in feature LDIs $\\mathcal{F}$ . Subsequently, we lift feature LDIs into 3D space using corresponding depth values to obtain a feature point cloud $\\mathcal{P}$ . To animate the scene, we estimate a 2D motion field from the input image and apply Euler integration to generate forward and backward displacement fields $F_{0\\rightarrow t}$ and $F_{0\\rightarrow t - N}$ . We then augment displacement fields with estimated depth values to obtain 3D scene flow fields. Next, we bidirectionally displace the feature point cloud $\\mathcal{P}$ as per the scene flow and separately project them into target image planes to obtain $\\mathbf{F}_f$ and $\\mathbf{F}_b$ . Finally, we blend them together and pass the result through our image decoder to synthesize a novel view at time $t$ ." + ], + "image_footnote": [], + "bbox": [ + 83, + 90, + 890, + 281 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "a creek or smoke are also captured in the input image.", + "bbox": [ + 75, + 433, + 433, + 448 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Space-time view synthesis. Space-time view synthesis is the task of rendering novel camera perspectives for dynamic scenes in terms of space and time [30]. Most of the prior works [2, 4, 27] rely on synchronized multi-view videos as input, which prevents their wide applicability. To mitigate this requirement, many neural rendering approaches [30, 41, 44] manage to show promising space-time view synthesis results from monocular videos. They usually train each new scene independently, and thus cannot directly handle in-the-wild inputs. Most related to our work, 3D Moments [64] introduces a novel 3D photography effect where cinematic camera motion and frame interpolation are simultaneously performed. However, this method demands near-duplicate photos as input and is unable to control the animation results. Instead, we show that our method can animate still images while enabling camera motion with 3D parallax. Moreover, we can also extend our system so that users are allowed to interactively control how the photos are animated by providing user-defined masks and flow hints.", + "bbox": [ + 75, + 448, + 472, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 746, + 169, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 76, + 771, + 187, + 786 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a single still image, our goal is to synthesize plausible animation of the scene and simultaneously enable camera motion. The output of our method is a realistic cinematograph with compelling parallax effects. Fig. 2 schematically illustrates our pipeline. Our method starts by estimating a motion field and a depth map from the input image. We then separate the RGBD input into several layers", + "bbox": [ + 75, + 794, + 468, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "as per depth discontinuities and inpaint occluded regions, followed by extracting 2D feature maps for each layer, resulting in feature LDIs [50]. To enable scene animation, we lift the 2D motion to 3D scene flow and unproject feature LDIs into a feature point cloud using their corresponding depth values. Thereafter, we bidirectionally animate the point cloud with scene flow using our 3D symmetric animation technique. We end up rendering them into two animated feature maps and composite the results to synthesize novel views at time $t$ .", + "bbox": [ + 496, + 433, + 893, + 583 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Motion Estimation", + "text_level": 1, + "bbox": [ + 500, + 599, + 681, + 616 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To animate a still image, we wish to estimate the corresponding motion field for the observed scene. Generally, the motion we witness in the real world is extremely complicated as it is time-varying and many events such as occlusion and collision could occur. Intuitively, we could directly adopt prior optical flow estimation methods [10, 20, 60, 61] to accomplish this. However, it is not trivial since they usually take a pair of images as input to compute optical flow. Endo et al. [12] instead propose to learn and predict the motion in a recurrent manner, but this kind of approach is prone to large distortions in the long term. To simplify this, we follow Holynski et al. [19] and assume that a time-invariant and constant-velocity motion field, termed Eulerian flow field, can well approximate the bulk of real-world motions, e.g., water, smoke, and clouds. Formally, we denote $M$ as the Eulerian flow field of the scene, which suggests that", + "bbox": [ + 496, + 626, + 893, + 869 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nF _ {t \\rightarrow t + 1} (\\cdot) = M (\\cdot), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 883, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "4597", + "bbox": [ + 480, + 944, + 514, + 957 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $F_{t\\rightarrow t + 1}(\\cdot)$ represents the optical flow map from frame $t$ to frame $t + 1$ . This defines how each pixel in the current frame will move in the future. Specifically, we can obtain the next frame via Euler integration:", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {x} _ {t + 1} = \\mathbf {x} _ {t} + M (\\mathbf {x} _ {t}), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 199, + 166, + 468, + 181 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{x}_t$ represents the coordinates of a pixel $\\mathbf{x}_t$ at time $t$ . Since the optical flow between consecutive frames is identical, we can easily deduce the displacement field by recursively applying:", + "bbox": [ + 75, + 195, + 468, + 255 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nF _ {0 \\rightarrow t} (\\mathbf {x} _ {0}) = F _ {0 \\rightarrow t - 1} (\\mathbf {x} _ {0}) + M (\\mathbf {x} _ {0} + F _ {0 \\rightarrow t - 1} (\\mathbf {x} _ {0})), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 88, + 268, + 468, + 286 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $F_{0\\rightarrow t}(\\cdot)$ denotes the displacement field from time 0 to time $t$ , which describes the course of each pixel in the input image across future frames. To estimate the Eulerian flow field, we adopt an image-to-image translation network as our motion estimator, which is able to map an RGB image to the optical flow.", + "bbox": [ + 75, + 297, + 468, + 388 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. 3D Scene Representation", + "text_level": 1, + "bbox": [ + 76, + 401, + 303, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "One common disadvantage of previous single-image animation methods [12, 19, 29] is that they usually operate in 2D space via a deep image warping technique, which prevents them from creating parallax effects. Instead, to enable camera motion, we propose to lift our workspace into 3D and thus resort to 3D scene representation.", + "bbox": [ + 75, + 425, + 468, + 516 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We start by estimating the underlying geometry of the scene using the state-of-the-art monocular depth estimator DPT [45], which can predict reasonable dense depth maps for in-the-wild photos. Following Wang et al. [64], we then convert the RGBD input into an LDI representation [50] by separating it into several layers as per depth discontinuities and inpainting occluded regions. Specifically, we first divide the depth range of the source depth map into multiple intervals using agglomerative clustering [36], followed by creating layered depth images $\\mathcal{L} = \\{\\mathbf{C}_l,\\mathbf{D}_l\\}_{l = 1}^L$ . Next, we inpaint occluded regions of each color and depth layer by applying the pretrained inpainting model from 3D Photo [52]. To improve rendering quality and reduce artifacts, we also introduce a 2D feature extraction network to encode 2D feature maps for each inpainted LDI color layer, resulting in feature LDIs $\\mathcal{F} = \\{\\mathbf{F}_l,\\mathbf{D}_l\\}_{l = 1}^L$ . Finally, in order to enable animation in 3D space, we unproject feature LDIs into 3D via their corresponding inpainted depth layers, yielding a feature point cloud $\\mathcal{P} = \\{(\\mathbf{X}_i,\\mathbf{f}_i)\\}$ , where $\\mathbf{X}_i$ and $\\mathbf{f}_i$ are 3D coordinates and the feature vector for each 3D point respectively.", + "bbox": [ + 75, + 517, + 468, + 834 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Point Cloud Animation and Rendering", + "text_level": 1, + "bbox": [ + 76, + 845, + 410, + 862 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We now have the estimated displacement fields $F_{0\\rightarrow t}$ and the feature point cloud $\\mathcal{P}$ . Our next step is to animate this", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4420e0d6b8fcb14bf3c3b500c14fac36e9318d4891d4d320681be1555ce24c18.jpg", + "image_caption": [ + "Figure 3. 3D symmetric animation. To address the hole issue, we borrow textural information from the point cloud that moves in the opposite direction and integrate both of the animated point clouds to feasibly fill in the missing regions (the red and blue regions)." + ], + "image_footnote": [], + "bbox": [ + 513, + 90, + 880, + 261 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "point cloud over time. To bridge the gap between 2D displacement fields and 3D scene representation, we first augment the displacement fields with estimated depth values to lift them into 3D scene flow. In other words, we now have a function of time $t$ and the coordinates of a 3D point that returns a corresponding 3D translation vector that can shift this 3D point accordingly. Thus, for time $t$ , we then move each 3D point by computing its destination as its original position plus a corresponding 3D translation vector, i.e., $\\mathcal{P}(t) = \\{(\\mathbf{X}_i(t),\\mathbf{f}_i)\\}$ . Intuitively, this process indeed animates the point cloud from one time to another. However, we empirically find that as points move forward, increasingly large holes emerge. This frequently happens when points leave their original locations without any points filling in those unknown regions.", + "bbox": [ + 496, + 349, + 890, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D symmetric animation. To resolve this, inspired by prior works [3, 19, 38], we propose a 3D symmetric animation technique that leverages bidirectionally displaced point clouds to complement each other. With 3D symmetric animation, we can borrow textural information from point clouds that move in the opposite direction and integrate both of the animated point clouds to feasibly fill in missing regions. Specifically, we directly replace the original Eulerian flow field $M$ with $-M$ and recursively apply Eq. (3) to generate a reversed displacement field. Similarly, we then lift this 2D displacement field to obtain inverse scene flow, which is employed to produce point clouds with backward movements. As illustrated in Fig. 3, for time $t$ , to fill in holes, we respectively apply $F_{0\\rightarrow t}$ and $F_{0\\rightarrow t - N}$ to draw associated scene flow fields and use them to move the point cloud, resulting in $\\mathcal{P}_f(t) = \\{(\\mathbf{X}_i^f (t),\\mathbf{f}_i)\\}$ and $\\mathcal{P}_b(t) = \\{(\\mathbf{X}_i^b (t),\\mathbf{f}_i)\\}$ , where $N$ is the number of frames.", + "bbox": [ + 496, + 579, + 890, + 837 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Neural rendering. We now have two bidirectionally animated feature point clouds. Our final step is to render them into animated feature maps and composite the results for synthesizing novel views at time $t$ . In particu", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4598", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "lar, given camera poses and intrinsics, we use a differentiable point-based renderer [66] to splat feature point clouds $\\mathcal{P}_f(t) = \\{(\\mathbf{X}_i^f (t),\\mathbf{f}_i)\\}$ and $\\mathcal{P}_b(t) = \\{(\\mathbf{X}_i^b (t),\\mathbf{f}_i)\\}$ separately into the target image plane. This process yields 2D feature maps $\\mathbf{F}_f$ and $\\mathbf{F}_b$ along with depth maps $\\mathbf{D}_f$ , $\\mathbf{D}_b$ and alpha maps $\\alpha_{f},\\alpha_{b}$ . Next, we wish to fuse $\\mathbf{F}_f$ and $\\mathbf{F}_b$ into one feature map $\\mathbf{F}_t$ . Inspired by prior work [64], our intuition is three-fold: 1) to enable endless and seamless looping, we should assign the weight of the two feature maps based on time so as to guarantee that the first and last frame of the synthesized video are identical; 2) the weight map should favor pixel locations with smaller depth values, in the sense that it is impossible to see objects behind those objects closer to the eye; 3) to avoid missing regions as much as possible, we should greatly increase the contribution of those pixel locations that can fill in holes. With this in mind, we formulate the weight map as follows:", + "bbox": [ + 76, + 90, + 472, + 349 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {W} _ {t} = \\frac {\\left(1 - \\frac {t}{N}\\right) \\cdot \\boldsymbol {\\alpha} _ {f} \\cdot e ^ {- \\mathbf {D} _ {f}}}{\\left(1 - \\frac {t}{N}\\right) \\cdot \\boldsymbol {\\alpha} _ {f} \\cdot e ^ {- \\mathbf {D} _ {f}} + \\frac {t}{N} \\cdot \\boldsymbol {\\alpha} _ {b} \\cdot e ^ {- \\mathbf {D} _ {b}}}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 116, + 356, + 468, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $N$ is the number of frames. Therefore, we can integrate $\\mathbf{F}_f$ and $\\mathbf{F}_b$ via:", + "bbox": [ + 76, + 401, + 468, + 433 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {t} = \\mathbf {W} _ {t} \\cdot \\mathbf {F} _ {f} + (1 - \\mathbf {W} _ {t}) \\cdot \\mathbf {F} _ {b}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 441, + 468, + 459 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We also obtain the merged depth map $\\mathbf{D}_t$ :", + "bbox": [ + 76, + 468, + 356, + 484 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {D} _ {t} = \\mathbf {W} _ {t} \\cdot \\mathbf {D} _ {f} + (1 - \\mathbf {W} _ {t}) \\cdot \\mathbf {D} _ {b}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 494, + 468, + 511 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, we employ an image decoder network to map the 2D feature map $\\mathbf{F}_t$ and depth map $\\mathbf{D}_t$ to a novel view at time $t$ . Repeating this method, we are able to synthesize a realistic cinematograph with compelling parallax effects.", + "bbox": [ + 76, + 521, + 468, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Training", + "text_level": 1, + "bbox": [ + 76, + 590, + 181, + 606 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This section describes our training scheme. In general, we train our image-to-image translation network, 2D feature extraction network, and image decoder network in a two-stage manner.", + "bbox": [ + 76, + 613, + 468, + 672 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training dataset. We use the training set from Holynski et al. [19] as our training dataset. This dataset comprises short video clips of fluid motion that are extracted from longer stock-footage videos. We use the first frames of each video clip and the corresponding ground truth motion fields estimated by a pretrained optical flow network [60] as motion estimation pairs to train our motion estimation network. To develop animation ability, we randomly sample training data from fluid motion video clips. For novel view synthesis training, we require multi-view supervision of the same scene, which is not available in the training set. Instead, we use 3D Photo [52] to generate pseudo ground truth novel views for training.", + "bbox": [ + 75, + 674, + 468, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Two-stage training. Our model is trained in a two-stage manner. Specifically, we first train our motion estimation", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "network using motion estimation pairs. To train the motion estimation network, we minimize GAN loss, GAN feature matching loss [49], and endpoint error as follows:", + "bbox": [ + 498, + 90, + 890, + 137 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {M o t i o n}} = \\mathcal {L} _ {\\text {G A N}} + 1 0 \\mathcal {L} _ {\\text {F M}} + \\mathcal {L} _ {\\text {E P E}}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 562, + 143, + 890, + 161 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the second stage, we freeze the motion estimation network and train the feature extraction network and image decoder network. Our model simultaneously learns to render novel views and animate scenes. For novel view synthesis, we set $t = 0$ and use pseudo ground truth novel views to supervise our model. We randomly sample target viewpoints of scenes and require the model to synthesize them. For animation, we train our model on training triplets (start frame, middle frame, end frame) sampled from fluid motion video clips. In particular, we render the middle frame from both directions using $F_{0\\rightarrow t}$ and $F_{0\\rightarrow t - N}$ without changing the camera poses and intrinsics. Besides GAN loss and GAN feature matching loss [49], we also enforce VGG perceptual loss [23, 73] and $l_{1}$ loss between synthesized and ground truth images. The overall loss is as follows:", + "bbox": [ + 496, + 169, + 892, + 395 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {A n i m a t i o n}} = \\mathcal {L} _ {G A N} + 1 0 \\mathcal {L} _ {F M} + \\mathcal {L} _ {l _ {1}} + \\mathcal {L} _ {V G G}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 404, + 890, + 420 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 429, + 633, + 445 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 453, + 718, + 469 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our motion estimator is a U-Net [48] based generator with 16 convolutional layers, and we replace Batch Normalization with SPADE [42]. For the feature extraction network and image decoder network, we follow the network architectures from Wang et al. [64]. We adopt the multi-scale discriminator used in SPADE [42] during training.", + "bbox": [ + 496, + 476, + 890, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our model is trained using the Adam optimizer [24]. We conduct all experiments on a single NVIDIA GeForce RTX 3090 GPU. We train the motion estimation network for around $120k$ iterations with a batch size of 16. We set the generator learning rate to $5 \\times 10^{-4}$ and the discriminator learning rate to $2 \\times 10^{-3}$ . For the animation training stage, we train the feature extraction network and image decoder network for around $250k$ iterations with a learning rate starting at $1 \\times 10^{-4}$ and then decaying exponentially.", + "bbox": [ + 496, + 566, + 890, + 704 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Baselines", + "text_level": 1, + "bbox": [ + 500, + 710, + 607, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In principle, to evaluate our method, we are required to compare it against current state-of-the-art models. However, to our knowledge, we are the first to tackle the novel task of synthesizing a realistic cinematograph with compelling parallax effects from a single image. As a result, we cannot directly compare to previous works. Instead, we consider forming the following baselines to verify the superiority of our method:", + "bbox": [ + 496, + 734, + 890, + 853 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2D animation $\\rightarrow$ novel view synthesis. One might consider 2D image animation $\\rightarrow$ single-shot novel view synthesis: first employing a 2D image animation method, then", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "4599", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/c73aa6fbda032b3f48e03479b2b356fc2ed002a2f565478dc012b95cf53ed609.jpg", + "table_caption": [ + "Table 1. Quantitative comparisons against all baselines on the validation set from Holynski et al. [19]. The better approach favors higher PSNR and SSIM but lower LPIPS. The best performance is in bold." + ], + "table_footnote": [], + "table_body": "
MethodPSNR↑SSIM↑LPIPS↓
2D Anim. [19] → NVS [52]21.120.6330.286
NV5 [52] → 2D Anim. [19]21.970.6970.276
NV5 [52] → 2D Anim. [19] + MA22.470.7180.261
Naive PC Anim.19.460.6470.243
Naive PC Anim. + 3DSA20.490.6600.237
Ours23.330.7760.197
", + "bbox": [ + 84, + 156, + 465, + 243 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "a single-shot novel view synthesis method. Specifically, we first adopt a state-of-the-art image animation method [19] to produce an animated looping video. We then apply DPT [45] to estimate geometry and utilize 3D Photo [52] to generate novel views for each frame.", + "bbox": [ + 75, + 263, + 467, + 337 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Novel view synthesis $\\rightarrow$ 2D animation. It also appears to be feasible that we first render novel views of scenes by 3D Photo [52] and then use the image animation method [19] to animate each viewpoint. Note that motion estimation should be performed for each frame as viewpoints have changed. However, we empirically find that this usually results in varying motion fields across the video. To mitigate this, we further propose using the moving average technique to smooth estimated motions for each frame. This results in novel view synthesis $\\rightarrow$ 2D animation + MA.", + "bbox": [ + 75, + 338, + 467, + 488 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Naive point cloud animation. Intuitively, we may also consider directly unprojecting pixels into 3D space and subsequently moving and rendering the RGB point cloud. Specifically, given a single input image, we first predict the depth map using DPT [45] and estimate 2D optical flow. We then lift the pixels and optical flow into 3D space to form RGB point clouds and scene flow. Finally, we animate RGB point clouds over time according to the scene flow and project these point clouds into target viewpoints. This baseline also faces a similar issue: as time goes by, large holes gradually appear. One might also employ our 3D symmetric animation technique to further enhance this baseline, i.e., naive point cloud animation + 3DSA.", + "bbox": [ + 75, + 489, + 467, + 685 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Results", + "text_level": 1, + "bbox": [ + 76, + 695, + 171, + 709 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation dataset. Since Holynski et al. [19] only provide a single image for each scene in the test set, we use the validation set from Holynski et al. [19] to evaluate our method and baselines. The validation set consists of 31 unique scenes with 162 samples of ground truth video clips captured by static cameras.", + "bbox": [ + 75, + 718, + 467, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Experimental setup. For evaluation, we render novel views of the ground truth videos in 4 different trajectories, resulting in 240 ground truth frames for each sample. This process does not involve inpainting, thus ground truth frames may contain holes. Only considering valid pixels when calculating metrics, we compare the predicted images", + "bbox": [ + 75, + 810, + 467, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/419e802d8d544b0e190dd2d8a7434862507b11f2f806342b10b0f78cbb050539.jpg", + "table_caption": [ + "Table 2. User study. Pairwise comparison results indicate that users prefer our method as more realistic and immersive." + ], + "table_footnote": [], + "table_body": "
ComparisonHuman preference
2D Anim. [19] → NVS [52] / Ours12.5% / 87.5%
NVS [52] → 2D Anim. [19] / Ours3.9% / 96.1%
NVS [52] → 2D Anim. [19] + MA / Ours6.1% / 93.9%
Naive PC Anim. / Ours7.6% / 92.4%
Naive PC Anim. + 3DSA / Ours8.6% / 91.4%
3D Photo [52] / Ours10.5% / 89.5%
Holynski et al. [19] / Ours29.9% / 70.1%
", + "bbox": [ + 506, + 128, + 887, + 232 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/a327edaaefee6126932d4bc20cd18ef6720c1d7fbd532db6aa0b7225a7eb8400.jpg", + "table_caption": [ + "Table 3. Ablation study on each component of our method." + ], + "table_footnote": [], + "table_body": "
PSNR↑SSIM↑LPIPS↓
w/o features21.500.6740.228
w/o inpainting22.860.7630.216
w/o 3D symmetric animation22.990.7680.199
Full model23.330.7760.197
", + "bbox": [ + 506, + 266, + 887, + 339 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "with the ground truth frames at the same time and viewpoint. For a fair comparison, all methods utilize the depth maps estimated by DPT [45]. Since we focus on comparing rendering quality, all methods use ground truth optical flows, except that NVS $[52] \\rightarrow 2\\mathrm{D}$ Anim. [19] and NVS $[52] \\rightarrow 2\\mathrm{D}$ Anim. [19] + MA have to estimate optical flows for each frame apart from the first frame. We adopt PSNR, SSIM, and LPIPS [73] as our evaluation metrics.", + "bbox": [ + 496, + 353, + 890, + 474 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative comparisons. As shown in Table 1, our method outperforms all baselines across all metrics by a large margin. This result implies that our method achieves better perceptual quality and produces more realistic renderings, which demonstrates the superiority and effectiveness of our method.", + "bbox": [ + 496, + 476, + 890, + 565 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative comparisons. We showcase the visual comparisons in Fig. 4. One can observe that our method presents photorealistic results while other comparative baselines produce more or less visual artifacts. 2D Anim. [19] $\\rightarrow$ NVS [52] intends to generate stripped flickering artifacts. This is because 2D Anim. [19] $\\rightarrow$ NVS [52] predicts the depth map for each animated frame, leading to frequent changes in the 3D structure of the scene and inconsistent inpainting. NVS [52] $\\rightarrow$ 2D Anim. [19] and NVS [52] $\\rightarrow$ 2D Anim. [19] + MA show jelly-like effects as optical flow should be estimated for each novel view. This results in varying motion fields across the video and thus inconsistent animation. Although Naive PC Anim. and Naive PC Anim. + 3DSA also lift the workspace into 3D, they are often prone to produce noticeable holes inevitably. One reason for this is that they do not perform inpainting. Note that some artifacts are difficult to observe when only scanning static figures.", + "bbox": [ + 496, + 566, + 890, + 838 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Controllable animation. Our method is able to create 3D cinematographs from a single image automatically. Further, we show that our framework is also highly extensible. For example, we can involve masks and flow hints as extra in", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "4600", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2b9a3b5024c703ad6a3950082d8e7607af5f5941a49d58059b4a31d7dd23adbb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 104, + 90, + 348, + 137 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4334070062cd3b6f3f61c4716f983ef356853d2f0c87bbdebda3acd11bb58f24.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 104, + 138, + 348, + 185 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fa5cce382bbf43a37a88b618101d5ca4327f0c17d97b77d52c1b05d1eb0d96a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 104, + 186, + 348, + 232 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a43c4c8a6d6d40a35c63778061a2597ce60ed6e21c39b83bae9c84489a4a2453.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 104, + 233, + 348, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3b3d6ff5aef130dad9ac707fe69750ca678b5d366cde49ca651a5492e0252ded.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 104, + 281, + 348, + 327 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8629ab41f0c9305323db490922edf6a4e47ba58c4307ccdbbf4ed438de1a7caf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 104, + 329, + 348, + 375 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/651ff5c4abb3c8701b53eb6ed325a498989ab107ab1f61f94d7dcf2814b29bd4.jpg", + "image_caption": [ + "Figure 4. Qualitative comparisons against all baselines on the validation set from Holynski et al. [19]. Our method produces compelling results while other comparative alternatives suffer from visual artifacts. (a) 2D animation $[19] \\rightarrow$ novel view synthesis [52], (b) novel view synthesis $[52] \\rightarrow 2\\mathrm{D}$ animation [19], (c) novel view synthesis $[52] \\rightarrow 2\\mathrm{D}$ animation $[19] +$ moving average, (d) naive point cloud animation, (e) naive point cloud animation $+3\\mathrm{D}$ symmetric animation, (f) our method, and (g) pseudo ground truth." + ], + "image_footnote": [], + "bbox": [ + 104, + 377, + 348, + 422 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/331c677dbb65d9c8fcb4bd343646ea093a83117d483dd86880bdff02a28080be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 90, + 617, + 137 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9bdd6efad93b53201e3059bdb622697f8cdffc9b39353571ef3a60943de609c8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 138, + 617, + 185 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/40ee3763d929b45715967b510caf5198dd53ef3dd243ee301bd4af3a6b2dfe69.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 186, + 617, + 232 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/58cf468364f685b31d3b6dc459e6806ee5bf50a14d606d81ff407aa0e4aa475b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 233, + 617, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/101fdfc66c8e7d9d70c1b1878b53351507d8b82b79bc9d25898fcac07cb054bb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 281, + 617, + 327 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7da3049f12d0abc772d8572f44294c7bb2c968998b0b4142dbcafbfd9e767d1a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 329, + 617, + 375 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e3a2e79cca12412e125ac511395c5ac0c931e84fa031a1b4a2c001f842b53cc3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 352, + 377, + 617, + 422 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c7dae67b88b3dd693c188e1d35a4f93844c49e9bfc52632fe5e3999eef2772e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 90, + 885, + 137 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/907677b32cddcac887b489a70cb9d39cdbac34a0f1d53f82b35a8ac9442b405a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 138, + 885, + 185 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/384c5b775982f07cf8871ad81a2107da7dc170eacb047dd6a682bca81edff17e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 186, + 885, + 233 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/34793c795d25aff587522c040118594855e2b0d7894388abee62f1ff292d4184.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 234, + 885, + 280 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/802d9c0882d76105fca960886c7495e67b212f93ee128b99e8cf9b908e1cfb0b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 281, + 885, + 327 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2ae7891102eef16b2f71dff01c593fe55bd91fd679a83adce1cb01993d75e9e2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 329, + 885, + 375 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/28ace664d6be5ce608c5751704199d8f3a9466423168636eaef72a15f9386770.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 620, + 377, + 885, + 422 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6171cac29139afec89e9c37141a91e034a08b298fda2f7c0eaed46e54430c695.jpg", + "image_caption": [ + "Input", + "Figure 5. Controllable animation. By changing the masks and motion hints, our method can interactively control the animation." + ], + "image_footnote": [], + "bbox": [ + 84, + 513, + 272, + 595 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6512544ff9e2a4c89e35e5e0410b46730857ebefa3586d32891f5e604b655193.jpg", + "image_caption": [ + "Masks & motion hints" + ], + "image_footnote": [], + "bbox": [ + 272, + 513, + 369, + 595 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/438519e393db1e5206e2ed44c6a00dce4244e7b410324bda96632ea60ed34f4f.jpg", + "image_caption": [ + "Motion fields" + ], + "image_footnote": [], + "bbox": [ + 370, + 513, + 462, + 595 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "puts to augment our motion estimator. This brings two advantages: (1) more accurate flow estimation; (2) interactive and controllable animation. As shown in Fig. 5, we can control the animation of the scene by providing various masks and motion hints to obtain different motion fields.", + "bbox": [ + 75, + 672, + 468, + 747 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Generalizing on in-the-wild photos. To further demonstrate the generalization of our method, we also test our method on in-the-wild photos. We first create hemagraphs with camera motions on the test set from Holynski et al. [19], where, for each scene, only a single image is provided. We then select some online images at random to test our method. To accurately estimate motion fields, we provide masks and flow hints as extra inputs to our motion estimator. As shown in Fig. 6, our method produces reasonable results for in-the-wild inputs while other comparative", + "bbox": [ + 75, + 750, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "alternatives yield visual artifacts or inconsistent animation.", + "bbox": [ + 500, + 513, + 887, + 529 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. User Study", + "text_level": 1, + "bbox": [ + 500, + 539, + 620, + 556 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further conduct a user study to investigate how our method performs in the view of humans when compared with all baselines, 3D Photo [52], and Holynski et al. [19]. Specifically, we collect 50 photos from the test set of Holynski et al. [19] and the Internet. We use different approaches to generate videos with identical settings. During the study, we show each participant an input image and two animated videos generated by our method and a randomly selected approach in random order. 108 volunteers are invited to choose the method with better perceptual quality and realism, or none if it is hard to judge. We report the results in Table 2, which points out that our method surpasses alternative methods by a large margin in terms of the sense of reality and immersion.", + "bbox": [ + 496, + 563, + 890, + 775 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 786, + 653, + 801 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To validate the effect of each component, we conduct an ablation study on the validation set from Holynski et al. [19] and show the results in Table 3. One can observe: i) 3D symmetric animation technique matters because it allows us to leverage bidirectionally displaced point clouds to complement each other and feasibly fill in missing regions; ii)", + "bbox": [ + 496, + 810, + 892, + 902 + ], + "page_idx": 6 + }, + { + "type": "aside_text", + "text": "(a) (b) (c) (d) (e) (f) (g)", + "bbox": [ + 83, + 109, + 98, + 405 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "4601", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f5a81ab7b712b7b960749e52d83c52d407103fbd4cba6a5547a0585890164b7f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 83, + 92, + 305, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f15f25732e6450fed411f18299fa2ac6ecbc699da2e7d96cfb0faf5db0e8912f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 310, + 93, + 454, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a537a67f5067021e134728ef88014fc757aca63dbd184b9ce803199d7a572b23.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 93, + 598, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f483a064c98f128c705ec1bc2c49ec7771c09ad5d6d3ff2748f7dfdf6c5faac3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 93, + 741, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1e9333e1c031bf1016858079d90cecb56c3910c4816f1f5dfbadc6eae3e00671.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 93, + 883, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/35fe3eb972a1809130e0d10285fae79b16cfae42c84a92aa0e99afc7e9e6d151.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 83, + 193, + 303, + 291 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/686417ad0b34fe4e82290b22a0e1390705cdca8f1343846e18101e735e69e82b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 310, + 193, + 454, + 291 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dcf5994d108ca3af8318dd9cc05c36f7662a96438da8eda2332971c021c5c1a5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 193, + 598, + 291 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/14082932986a7cd6976251e28ce1a025d3f3ceb34249bedd0533c9af6210f144.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 193, + 741, + 291 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/775feb0956c44f1ecb7d5ede298bf1672b0ee9545e39db49209de96c71c87d05.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 193, + 883, + 291 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4c808cd46b96d136f16ed5f2968847e9994cd67834dccc178c612fc00d07b773.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 83, + 292, + 303, + 391 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dcfb77c70003a8e89315db6e2dcfb87597df19aba88455302bbe198b0bec233b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 310, + 292, + 454, + 391 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6a531b7a7972d6c3ea66dcb96e2e9eae2e9490a239bd9485594f1196377c4c03.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 292, + 598, + 391 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bcb1951dc15289fde8623f0ec65bfd45bfa6221e67742d9aa58aea42471f36da.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 292, + 741, + 391 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c2570be568c72ca139a833bfa69595dcb84907a38a2e3c8bbcb572f90d24ec02.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 741, + 292, + 883, + 391 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d4b52e205f17b8e3e3f0f48959728534632f8588920125c9a895982d9acf98b7.jpg", + "image_caption": [ + "Input" + ], + "image_footnote": [], + "bbox": [ + 83, + 393, + 303, + 489 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/62b672b63224ab57a02712918e31b9ad7cbf7e898bbf05728e8068d8b69d1f60.jpg", + "image_caption": [ + "2D Anim. $\\rightarrow$ NVS" + ], + "image_footnote": [], + "bbox": [ + 310, + 393, + 454, + 489 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/76439cae103eb89c3da25766dd92b3002d79d9ee8b1183b89e558399ea883dcb.jpg", + "image_caption": [ + "$\\mathrm{NVS}\\rightarrow 2\\mathrm{D}$ Anim. $^+$ MA" + ], + "image_footnote": [], + "bbox": [ + 455, + 393, + 598, + 489 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a0b8de3a8fac3435b38d3675b9502a511377a4f0588daf62921d5ec67ea165a9.jpg", + "image_caption": [ + "Naive PC Anim. + 3DSA", + "Figure 6. Visual comparisons on the test set from Holynski et al. [19] and in-the-wild photos. Our method consistently produces more realistic rendering with fewer visual artifacts as opposed to other baselines." + ], + "image_footnote": [], + "bbox": [ + 599, + 393, + 741, + 489 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bf29a9be76599a0ec19a90418f4e817bc7da3c7e836f8d67fd5f6c4c015ca424.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 741, + 393, + 883, + 489 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "introducing inpainting when constructing 3D geometry can improve the performance as this allows our model to produce plausible structures around depth discontinuities and fill in holes; iii) switching from directly using RGB colors to features in 3D scene representation significantly improves the rendering quality and reduces artifacts.", + "bbox": [ + 75, + 568, + 468, + 659 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 676, + 194, + 691 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we introduce a novel task of creating 3D cinematographs from single images. To this end, we present a simple yet effective method that makes a connection between image animation and novel view synthesis. We show that our method produces plausible animation of the scene while allowing camera movements. Our framework is flexible and customized. For accurate motion estimation and controllable animation, we can further include masks and flow hints as extra input for the motion estimator. Therefore, users can control how the scene is animated. Furthermore, our method generalizes well to in-the-wild photos, even like paintings or synthetic images generated by diffusion models. We conduct extensive experiments to ver", + "bbox": [ + 75, + 704, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ify the effectiveness and superiority of our method. A user study also demonstrates that our method generates realistic 3D cinematographs. We hope that our work can bring 3D cinematography into the sight of a broader community and motivate further research.", + "bbox": [ + 496, + 568, + 890, + 642 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations and future work. Our method may not work well when the depth prediction module estimates erroneous geometry from the input image, e.g., thin structures. In addition, inappropriate motion fields will sometimes lead to undesirable results, e.g., some regions are mistakenly identified as frozen. As we take the first step towards 3D cinematography, in this paper, we focus on handling common moving elements, i.e., fluids. In other words, our method may not apply to more complex motions, e.g., cyclic motion. We leave this for our future work.", + "bbox": [ + 496, + 643, + 892, + 792 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner(s). This work is also supported by Adobe Gift and the Ministry of Education, Singapore, under its Academic Research Fund Tier 2 (MOE-T2EP20220-0007) and Tier 1 (RG14/22).", + "bbox": [ + 496, + 795, + 893, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "4602", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Jiamin Bai, Aseem Agarwala, Maneesh Agrawala, and Ravi Ramamoorthi. Automatic cinemagraph portraits. In Computer Graphics Forum, volume 32, pages 17-25. Wiley Online Library, 2013. 1", + "[2] Aayush Bansal, Minh Vo, Yaser Sheikh, Deva Ramanan, and Srinivasa Narasimhan. 4d visualization of dynamic events from unconstrained multi-view videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5366-5375, 2020. 3", + "[3] Wenbo Bao, Wei-Sheng Lai, Chao Ma, Xiaoyun Zhang, Zhiyong Gao, and Ming-Hsuan Yang. Depth-aware video frame interpolation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3703-3712, 2019. 2, 4", + "[4] Mojtaba Bemana, Karol Myszkowski, Hans-Peter Seidel, and Tobias Ritschel. X-Fields: Implicit neural view-, light- and time-image interpolation. ACM Transactions on Graphics (TOG), 39(6):1-15, 2020. 3", + "[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 425-432, 2001. 2", + "[6] Jin-Xiang Chai, Xin Tong, Shing-Chow Chan, and Heung-Yeung Shum. Plenoptic sampling. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 307-318, 2000. 2", + "[7] Caroline Chan, Shiry Ginosar, Tinghui Zhou, and Alexei A Efros. Everybody dance now. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5933-5942, 2019. 2", + "[8] Yung-Yu Chuang, Dan B Goldman, Ke Colin Zheng, Brian Curless, David H Salesin, and Richard Szeliski. Animating pictures with stochastic motion textures. ACM Transactions on Graphics (TOG), 24(3):853-860, 2005. 2", + "[9] Paul E Debevec, Camillo J Taylor, and Jitendra Malik. Modeling and rendering architecture from photographs: A hybrid geometry-and image-based approach. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 11-20, 1996. 2", + "[10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. FlowNet: Learning optical flow with convolutional networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 2758-2766, 2015. 3", + "[11] Michail Christos Doukas, Stefanos Zafeiriou, and Viktoriia Sharmanska. HeadGAN: One-shot neural head synthesis and editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14398-14407, 2021. 2", + "[12] Yuki Endo, Yoshihiro Kanamori, and Shigeru Kuriyama. Animating Landscape: Self-supervised learning of decoupled motion and appearance for single-image video synthesis. ACM Transactions on Graphics (Proceedings of ACM" + ], + "bbox": [ + 76, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "SIGGRAPH Asia 2019), 38(6):175:1-175:19, 2019. 1, 2, 3, 4", + "[13] Siming Fan, Jingtan Piao, Chen Qian, Kwan-Yee Lin, and Hongsheng Li. Simulating fluids in real-world still images. arXiv preprint arXiv:2204.11335, 2022. 2", + "[14] Steven J Gortler, Radek Grzeszcuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 43-54, 1996. 2", + "[15] Yuxuan Han, Ruicheng Wang, and Jiaolong Yang. Single-view synthesis in the wild with learned adaptive multiplane images. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-8, 2022. 2", + "[16] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsenko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen Video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2", + "[17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2", + "[18] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. arXiv preprint arXiv:2204.03458, 2022. 2", + "[19] Aleksander Holynski, Brian L. Curless, Steven M. Seitz, and Richard Szeliski. Animating pictures with eulerian motion fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5810-5819, 2021. 1, 2, 3, 4, 5, 6, 7, 8", + "[20] Eddy Ilg, Nikolaus Mayer, Tonmoy Saikia, Margret Keuper, Alexey Dosovitskiy, and Thomas Brox. FlowNet 2.0: Evolution of optical flow estimation with deep networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2462-2470, 2017. 3", + "[21] Varun Jampani, Huiwen Chang, Kyle Sargent, Abhishek Kar, Richard Tucker, Michael Krainin, Dominik Kaeser, William T Freeman, David Salesin, Brian Curless, and Ce Liu. SLIDE: Single image 3d photography with soft layering and depth-aware inpainting. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 2", + "[22] Wei-Cih Jhou and Wen-Huang Cheng. Animating still landscape photographs through cloud motion creation. IEEE Transactions on Multimedia, 18(1):4-13, 2015. 2", + "[23] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 694–711. Springer, 2016. 5", + "[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5", + "[25] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 31-42, 1996. 2", + "[26] Jiaxin Li, Zijian Feng, Qi She, Henghui Ding, Changhu Wang, and Gim Hee Lee. MINE: Towards continuous depth spi with nerf for novel view synthesis. In Proceedings of" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "4603", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12578-12588, 2021. 2", + "[27] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5521-5531, 2022. 3", + "[28] Xingyi Li, Chaoyi Hong, Yiran Wang, Zhiguo Cao, Ke Xian, and Guosheng Lin. Symmnerf: Learning to explore symmetry prior for single-view view synthesis. In Proceedings of the Asian Conference on Computer Vision (ACCV), pages 1726-1742, 2022. 2", + "[29] Yijun Li, Chen Fang, Jimei Yang, Zhaowen Wang, Xin Lu, and Ming-Hsuan Yang. Flow-grounded spatial-temporal video prediction from still images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 600-615, 2018. 2, 4", + "[30] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 3", + "[31] Chieh Hubert Lin, Yen-Chi Cheng, Hsin-Ying Lee, Sergey Tulyakov, and Ming-Hsuan Yang. InfinityGAN: Towards infinite-pixel image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2", + "[32] Andrew Liu, Richard Tucker, Varun Jampani, Ameesh Makadia, Noah Snavely, and Angjoo Kanazawa. Infinite nature: Perpetual view generation of natural scenes from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14458-14467, 2021. 2", + "[33] Wen Liu, Zhixin Piao, Jie Min, Wenhan Luo, Lin Ma, and Shenghua Gao. Liquid Warping GAN: A unified framework for human motion imitation, appearance transfer and novel view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5904-5913, 2019. 2", + "[34] Elizaveta Logacheva, Roman Suvorov, Oleg Khomenko, Anton Mashikhin, and Victor Lempitsky. DeepLandscape: Adversarial modeling of landscape videos. In Proceedings of the European Conference on Computer Vision (ECCV), pages 256-272. Springer, 2020. 2", + "[35] Aniruddha Mahapatra and Kuldeep Kulkarni. Controllable animation of fluid elements in still images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3667-3676, 2022. 1, 2", + "[36] Oded Maimon and Lior Rokach. Data mining and knowledge discovery handbook. 2005. 4", + "[37] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2", + "[38] Simon Niklaus and Feng Liu. Softmax splatting for video frame interpolation. In Proceedings of the IEEE/CVF" + ], + "bbox": [ + 78, + 92, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on Computer Vision and Pattern Recognition (CVPR), pages 5437-5446, 2020. 2, 4", + "[39] Simon Niklaus, Long Mai, Jimei Yang, and Feng Liu. 3dken burns effect from a single image. ACM Transactions on Graphics (ToG), 38(6):1-15, 2019. 2", + "[40] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2", + "[41] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 3", + "[42] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2337-2346, 2019. 5", + "[43] Juewen Peng, Jianming Zhang, Xianrui Luo, Hao Lu, Ke Xian, and Zhiguo Cao. Mpib: An mpi-based bokeh rendering framework for realistic partial occlusion effects. In Proceedings of the European Conference on Computer Vision (ECCV), pages 590-607. Springer, 2022. 2", + "[44] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10318-10327, 2021. 3", + "[45] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12179-12188, 2021. 2, 4, 6", + "[46] Yurui Ren, Xiaoming Yu, Junming Chen, Thomas H Li, and Ge Li. Deep image spatial transformation for person image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7690-7699, 2020. 2", + "[47] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2022. 1", + "[48] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI), pages 234–241. Springer, 2015. 5", + "[49] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. In Advances in Neural Information Processing Systems (NeurIPS), 2016. 5", + "[50] Jonathan Shade, Steven Gortler, Li-wei He, and Richard Szeliski. Layered depth images. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 231–242, 1998. 2, 3, 4" + ], + "bbox": [ + 503, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "4604", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[51] Tamar Rott Shaham, Tali Dekel, and Tomer Michaeli. SinGAN: Learning a generative model from a single natural image. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4570-4580, 2019. 2", + "[52] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 4, 5, 6, 7", + "[53] Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. Animating arbitrary objects via deep motion transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2377-2386, 2019. 2", + "[54] Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. First order motion model for image animation. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2", + "[55] Aliaksandr Siarohin, Enver Sangineto, Stéphane Lathuiliere, and Nicu Sebe. Deformable gans for pose-based human image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3408-3416, 2018. 2", + "[56] Aliaksandr Siarohin, Oliver J Woodford, Jian Ren, Mengei Chai, and Sergey Tulyakov. Motion representations for articulated animation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13653-13662, 2021. 2", + "[57] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-A-Video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022.2", + "[58] Vincent Sitzmann, Michael Zollhoefer, and Gordon Wetzstein. Scene Representation Networks: Continuous 3d-structure-aware neural scene representations. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2", + "[59] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning (ICML), pages 2256-2265. PMLR, 2015. 2", + "[60] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. PWC-Net: Cnns for optical flow using pyramid, warping, and cost volume. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8934–8943, 2018. 3, 5", + "[61] Zachary Teed and Jia Deng. RAFT: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision (ECCV), pages 402-419, 2020. 3", + "[62] Richard Tucker and Noah Snavely. Single-view view synthesis with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 551-560, 2020. 2" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[63] Shubham Tulsiani, Richard Tucker, and Noah Snavely. Layer-structured 3d scene inference via view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), pages 302–317, 2018. 2", + "[64] Qianqian Wang, Zhengqi Li, David Salesin, Noah Snavely, Brian Curless, and Janne Kontkanen. 3d moments from near-duplicate photos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3, 4, 5", + "[65] Yiran Wang, Zhiyu Pan, Xingyi Li, Zhiguo Cao, Ke Xian, and Jianming Zhang. Less is more: Consistent video depth estimation with masked frames modeling. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), pages 6347-6358, 2022. 2", + "[66] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin Johnson. SynSin: End-to-end view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7467-7477, 2020. 2, 5", + "[67] Ke Xian, Chunhua Shen, Zhiguo Cao, Hao Lu, Yang Xiao, Ruibo Li, and Zhenbo Luo. Monocular relative depth perception with web stereo data supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 311-320, 2018. 2", + "[68] Ke Xian, Jianming Zhang, Oliver Wang, Long Mai, Zhe Lin, and Zhiguo Cao. Structure-guided ranking loss for single image depth prediction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 611-620, 2020. 2", + "[69] Wei Xiong, Wenhan Luo, Lin Ma, Wei Liu, and Jiebo Luo. Learning to generate time-lapse videos using multi-stage dynamic generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2364-2373, 2018. 2", + "[70] Dejia Xu, Yifan Jiang, Peihao Wang, Zhiwen Fan, Humphrey Shi, and Zhangyang Wang. SinNeRF: Training neural radiance fields on complex scenes from a single image. In Proceedings of the European Conference on Computer Vision (ECCV), pages 736-753. Springer, 2022. 2", + "[71] Hang Yan, Yebin Liu, and Yasutaka Furukawa. Turning an urban scene video into a cinematograph. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 394-402, 2017. 1", + "[72] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4578-4587, 2021. 2", + "[73] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 586-595, 2018. 5, 6" + ], + "bbox": [ + 501, + 92, + 893, + 839 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "4605", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3D Cinemagraphy From a Single Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_model.json b/2023/3D Cinemagraphy From a Single Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..26dd5d2edb2e3bab8f8dd749eb799fcb0dc4cbb2 --- /dev/null +++ b/2023/3D Cinemagraphy From a Single Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_model.json @@ -0,0 +1,2807 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.287, + 0.131, + 0.685, + 0.154 + ], + "angle": 0, + "content": "3D Cinemagraphy from a Single Image" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.179, + 0.874, + 0.288 + ], + "angle": 0, + "content": "Xingyi Li\\(^{1,3}\\) Zhiguo Cao\\(^{1}\\) Huiqiang Sun\\(^{1}\\) Jianming Zhang\\(^{2}\\) Ke Xian\\(^{3*}\\) Guosheng Lin\\(^{3}\\) \n\\(^{1}\\)Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Artificial Intelligence and Automation, Huazhong University of Science and Technology \n\\(^{2}\\)Adobe Research \\(^{3}\\)S-Lab, Nanyang Technological University \n{xingyi.li, zgcao, shq1031}@hust.edu.cn, jianmzha@adobe.com, {ke.xian, gslin}@ntu.edu.sg \nhttps://xingyi-li.github.io/3d-cinemagraphy" + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.315, + 0.291, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.293, + 0.316, + 0.506, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.316, + 0.742, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.744, + 0.316, + 0.89, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.434, + 0.893, + 0.492 + ], + "angle": 0, + "content": "Figure 1. Given a single still image, our method can synthesize videos with plausible animation of the scene while allowing camera movements. Here, we showcase four 3D cinematographs with various camera trajectories. Besides real-world photos (the left two examples), our method can also generalize to paintings (the third one) and synthetic images generated by Stable Diffusion [47] (the rightmost one). To see the effect of 3D cinematography, readers are encouraged to view with Adobe Acrobat or KDE Okular." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.513, + 0.314, + 0.529 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.545, + 0.473, + 0.864 + ], + "angle": 0, + "content": "We present 3D Cinemagography, a new technique that marries 2D image animation with 3D photography. Given a single still image as input, our goal is to generate a video that contains both visual content animation and camera motion. We empirically find that naively combining existing 2D image animation and 3D photography methods leads to obvious artifacts or inconsistent animation. Our key insight is that representing and animating the scene in 3D space offers a natural solution to this task. To this end, we first convert the input image into feature-based layered depth images using predicted depth values, followed by unprojecting them to a feature point cloud. To animate the scene, we perform motion estimation and lift the 2D motion into the 3D scene flow. Finally, to resolve the problem of hole emergence as points move forward, we propose to bidirectionally displace the point cloud as per the scene flow and synthesize novel views by separately projecting them into target image planes and blending the results. Extensive experiments demonstrate the effectiveness of our method. A user study is also conducted to validate the compelling rendering results of our method." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.513, + 0.631, + 0.529 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.539, + 0.892, + 0.81 + ], + "angle": 0, + "content": "Nowadays, since people can easily take images using smartphone cameras, the number of online photos has increased drastically. However, with the rise of online video-sharing platforms such as YouTube and TikTok, people are no longer content with static images as they have grown accustomed to watching videos. It would be great if we could animate those still images and synthesize videos for a better experience. These living images, termed cinematographs, have already been created and gained rapid popularity online [1, 71]. Although cinematographs may engage people with the content for longer than a regular photo, they usually fail to deliver an immersive sense of 3D to audiences. This is because cinematographs are usually based on a static camera and fail to produce parallax effects. We are therefore motivated to explore ways of animating the photos and moving around the cameras at the same time. As shown in Fig. 1, this will bring many still images to life and provide a drastically vivid experience." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In this paper, we are interested in making the first step towards 3D cinematography that allows both realistic animation of the scene and camera motions with compelling parallax effects from a single image. There are plenty of attempts to tackle either of the two problems. Single-image animation methods [12, 19, 35] manage to produce a real-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.222, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.958 + ], + "angle": 0, + "content": "4595" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.288 + ], + "angle": 0, + "content": "istic animated video from a single image, but they usually operate in 2D space, and therefore they cannot create camera movement effects. Classic novel view synthesis methods [5, 6, 9, 14, 25] and recent implicit neural representations [37, 40, 58] entail densely captured views as input to render unseen camera perspectives. Single-shot novel view synthesis approaches [21, 39, 52, 66] exhibit the potential for generating novel camera trajectories of the scene from a single image. Nonetheless, these methods usually hypothesize that the observed scene is static without moving elements. Directly combining existing state-of-the-art solutions of single-image animation and novel view synthesis yields visual artifacts or inconsistent animation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.288, + 0.473, + 0.65 + ], + "angle": 0, + "content": "To address the above challenges, we present a novel framework that solves the joint task of image animation and novel view synthesis. This framework can be trained to create 3D cinematographs from a single still image. Our key intuition is that handling this new task in 3D space would naturally enable both animation and moving cameras simultaneously. With this in mind, we first represent the scene as feature-based layered depth images (LDIs) [50] and unproject the feature LDIs into a feature point cloud. To animate the scene, we perform motion estimation and lift the 2D motion to 3D scene flow using depth values predicted by DPT [45]. Next, we animate the point cloud according to the scene flow. To resolve the problem of hole emergence as points move forward, we are inspired by prior works [3, 19, 38] and propose a 3D symmetric animation technique to bidirectionally displace point clouds, which can effectively fill in those unknown regions. Finally, we synthesize novel views at time \\( t \\) by rendering point clouds into target image planes and blending the results. In this manner, our proposed method can automatically create 3D cinematographs from a single image. Moreover, our framework is highly extensible, e.g., we can augment our motion estimator with user-defined masks and flow hints for accurate flow estimation and controllable animation." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.651, + 0.365, + 0.665 + ], + "angle": 0, + "content": "In summary, our main contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.675, + 0.469, + 0.737 + ], + "angle": 0, + "content": "- We propose a new task of creating 3D cinematographs from single images. To this end, we propose a novel framework that jointly learns to solve the task of image animation and novel view synthesis in 3D space." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.746, + 0.469, + 0.777 + ], + "angle": 0, + "content": "- We design a 3D symmetric animation technique to address the hole problem as points move forward." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.786, + 0.469, + 0.831 + ], + "angle": 0, + "content": "- Our framework is flexible and customized. We can achieve controllable animation by augmenting our motion estimator with user-defined masks and flow hints." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.675, + 0.469, + 0.831 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.845, + 0.22, + 0.86 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Single-image animation. Different kinds of methods have been explored to animate still images. Some works [8, 22]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.516 + ], + "angle": 0, + "content": "focus on animating certain objects via physical simulation but may not be easily applied to more general cases of inthe-wild photos. Given driving videos as guidance, there are plenty of methods that attempt to perform motion transfer on static objects with either a priori knowledge of moving objects [7, 11, 33, 46, 55] or in an unsupervised manner [53, 54, 56]. They entail reference videos to drive the motion of static objects, and thus do not suit our task. Recent advances in generative models have attracted much attention and motivated the community to develop realistic image and video synthesis methods. Many works [31, 32, 34, 51, 69] are based on generative adversarial networks (GANs) and operate transformations in latent space to generate plausible appearance changes and movements. Nonetheless, it is non-trial to allow for explicit control over those latent codes and to animate input imagery in a disentangled manner. As diffusion models [17, 59] improve by leaps and bounds, several diffusion-based works [16, 18, 57] attempt to generate realistic videos from text or images. However, these methods are time-consuming and expensive in terms of computation. Here we focus on methods that utilize learned motion priors to convert a still image into an animated video texture [12, 13, 19, 29, 35]. In particular, Holynski et al. [19] first synthesize the optical flow of the input image via a motion estimation network, then obtain future frames using the estimated flow field. This method renders plausible animation of fluid elements in the input image but suffers from producing camera motions with parallax." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Novel view synthesis from a single image. Novel view synthesis allows for rendering unseen camera perspectives from 2D images and their corresponding camera poses. Recent impressive synthesis results may credit to implicit neural representations [37, 40, 58]. Nevertheless, these methods usually assume dense views as input, which is not always available in most cases. Moreover, they focus on the task of interpolation given multiple views rather than extrapolation. As such, we instead turn to methods aiming at handling single input. Among them, a number of works [15, 26, 28, 62, 63, 70, 72] infer the 3D structure of scenes by learning to predict a scene representation from a single image. These methods are usually trained end-to-end but suffer from generalizing to in-the-wild photos. Most relevant to our work are those approaches [39, 52, 66] that apply depth estimation [45, 65, 67, 68] followed by inpainting occluded regions. For example, 3D Photo [52] estimates monocular depth maps and uses the representation of layered depth images (LDIs) [43, 50], in which context-aware color and depth inpainting are performed. To enable fine-grained detail modeling, SLIDE [21] decomposes the scene into foreground and background via a soft-layering scheme. However, unlike our approach, these methods usually assume the scene is static by default, which largely lessens the sense of reality, especially when some elements such as" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4596" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.092, + 0.892, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.296, + 0.895, + 0.409 + ], + "angle": 0, + "content": "Figure 2. An overview of our method. Given a single still image as input, we first predict a dense depth map. To represent the scene in 3D space, we separate the input image into several layers according to depth discontinuities and apply context-aware inpainting, yielding layered depth images (LDIs) \\(\\mathcal{L}\\). We then use a 2D feature extractor to encode 2D feature maps for each inpainted LDI color layer, resulting in feature LDIs \\(\\mathcal{F}\\). Subsequently, we lift feature LDIs into 3D space using corresponding depth values to obtain a feature point cloud \\(\\mathcal{P}\\). To animate the scene, we estimate a 2D motion field from the input image and apply Euler integration to generate forward and backward displacement fields \\(F_{0\\rightarrow t}\\) and \\(F_{0\\rightarrow t - N}\\). We then augment displacement fields with estimated depth values to obtain 3D scene flow fields. Next, we bidirectionally displace the feature point cloud \\(\\mathcal{P}\\) as per the scene flow and separately project them into target image planes to obtain \\(\\mathbf{F}_f\\) and \\(\\mathbf{F}_b\\). Finally, we blend them together and pass the result through our image decoder to synthesize a novel view at time \\(t\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.434, + 0.434, + 0.449 + ], + "angle": 0, + "content": "a creek or smoke are also captured in the input image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.449, + 0.473, + 0.736 + ], + "angle": 0, + "content": "Space-time view synthesis. Space-time view synthesis is the task of rendering novel camera perspectives for dynamic scenes in terms of space and time [30]. Most of the prior works [2, 4, 27] rely on synchronized multi-view videos as input, which prevents their wide applicability. To mitigate this requirement, many neural rendering approaches [30, 41, 44] manage to show promising space-time view synthesis results from monocular videos. They usually train each new scene independently, and thus cannot directly handle in-the-wild inputs. Most related to our work, 3D Moments [64] introduces a novel 3D photography effect where cinematic camera motion and frame interpolation are simultaneously performed. However, this method demands near-duplicate photos as input and is unable to control the animation results. Instead, we show that our method can animate still images while enabling camera motion with 3D parallax. Moreover, we can also extend our system so that users are allowed to interactively control how the photos are animated by providing user-defined masks and flow hints." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.747, + 0.17, + 0.763 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.772, + 0.188, + 0.787 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Given a single still image, our goal is to synthesize plausible animation of the scene and simultaneously enable camera motion. The output of our method is a realistic cinematograph with compelling parallax effects. Fig. 2 schematically illustrates our pipeline. Our method starts by estimating a motion field and a depth map from the input image. We then separate the RGBD input into several layers" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.434, + 0.895, + 0.584 + ], + "angle": 0, + "content": "as per depth discontinuities and inpaint occluded regions, followed by extracting 2D feature maps for each layer, resulting in feature LDIs [50]. To enable scene animation, we lift the 2D motion to 3D scene flow and unproject feature LDIs into a feature point cloud using their corresponding depth values. Thereafter, we bidirectionally animate the point cloud with scene flow using our 3D symmetric animation technique. We end up rendering them into two animated feature maps and composite the results to synthesize novel views at time \\( t \\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.601, + 0.682, + 0.617 + ], + "angle": 0, + "content": "3.2. Motion Estimation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.627, + 0.895, + 0.87 + ], + "angle": 0, + "content": "To animate a still image, we wish to estimate the corresponding motion field for the observed scene. Generally, the motion we witness in the real world is extremely complicated as it is time-varying and many events such as occlusion and collision could occur. Intuitively, we could directly adopt prior optical flow estimation methods [10, 20, 60, 61] to accomplish this. However, it is not trivial since they usually take a pair of images as input to compute optical flow. Endo et al. [12] instead propose to learn and predict the motion in a recurrent manner, but this kind of approach is prone to large distortions in the long term. To simplify this, we follow Holynski et al. [19] and assume that a time-invariant and constant-velocity motion field, termed Eulerian flow field, can well approximate the bulk of real-world motions, e.g., water, smoke, and clouds. Formally, we denote \\( M \\) as the Eulerian flow field of the scene, which suggests that" + }, + { + "type": "equation", + "bbox": [ + 0.629, + 0.885, + 0.892, + 0.902 + ], + "angle": 0, + "content": "\\[\nF _ {t \\rightarrow t + 1} (\\cdot) = M (\\cdot), \\tag {1}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.958 + ], + "angle": 0, + "content": "4597" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "where \\( F_{t\\rightarrow t + 1}(\\cdot) \\) represents the optical flow map from frame \\( t \\) to frame \\( t + 1 \\). This defines how each pixel in the current frame will move in the future. Specifically, we can obtain the next frame via Euler integration:" + }, + { + "type": "equation", + "bbox": [ + 0.2, + 0.167, + 0.469, + 0.182 + ], + "angle": 0, + "content": "\\[\n\\mathbf {x} _ {t + 1} = \\mathbf {x} _ {t} + M (\\mathbf {x} _ {t}), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.196, + 0.469, + 0.256 + ], + "angle": 0, + "content": "where \\(\\mathbf{x}_t\\) represents the coordinates of a pixel \\(\\mathbf{x}_t\\) at time \\(t\\). Since the optical flow between consecutive frames is identical, we can easily deduce the displacement field by recursively applying:" + }, + { + "type": "equation", + "bbox": [ + 0.089, + 0.27, + 0.469, + 0.287 + ], + "angle": 0, + "content": "\\[\nF _ {0 \\rightarrow t} (\\mathbf {x} _ {0}) = F _ {0 \\rightarrow t - 1} (\\mathbf {x} _ {0}) + M (\\mathbf {x} _ {0} + F _ {0 \\rightarrow t - 1} (\\mathbf {x} _ {0})), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.299, + 0.469, + 0.39 + ], + "angle": 0, + "content": "where \\( F_{0\\rightarrow t}(\\cdot) \\) denotes the displacement field from time 0 to time \\( t \\), which describes the course of each pixel in the input image across future frames. To estimate the Eulerian flow field, we adopt an image-to-image translation network as our motion estimator, which is able to map an RGB image to the optical flow." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.402, + 0.305, + 0.418 + ], + "angle": 0, + "content": "3.3. 3D Scene Representation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.426, + 0.469, + 0.517 + ], + "angle": 0, + "content": "One common disadvantage of previous single-image animation methods [12, 19, 29] is that they usually operate in 2D space via a deep image warping technique, which prevents them from creating parallax effects. Instead, to enable camera motion, we propose to lift our workspace into 3D and thus resort to 3D scene representation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.518, + 0.469, + 0.835 + ], + "angle": 0, + "content": "We start by estimating the underlying geometry of the scene using the state-of-the-art monocular depth estimator DPT [45], which can predict reasonable dense depth maps for in-the-wild photos. Following Wang et al. [64], we then convert the RGBD input into an LDI representation [50] by separating it into several layers as per depth discontinuities and inpainting occluded regions. Specifically, we first divide the depth range of the source depth map into multiple intervals using agglomerative clustering [36], followed by creating layered depth images \\(\\mathcal{L} = \\{\\mathbf{C}_l,\\mathbf{D}_l\\}_{l = 1}^L\\). Next, we inpaint occluded regions of each color and depth layer by applying the pretrained inpainting model from 3D Photo [52]. To improve rendering quality and reduce artifacts, we also introduce a 2D feature extraction network to encode 2D feature maps for each inpainted LDI color layer, resulting in feature LDIs \\(\\mathcal{F} = \\{\\mathbf{F}_l,\\mathbf{D}_l\\}_{l = 1}^L\\). Finally, in order to enable animation in 3D space, we unproject feature LDIs into 3D via their corresponding inpainted depth layers, yielding a feature point cloud \\(\\mathcal{P} = \\{(\\mathbf{X}_i,\\mathbf{f}_i)\\}\\), where \\(\\mathbf{X}_i\\) and \\(\\mathbf{f}_i\\) are 3D coordinates and the feature vector for each 3D point respectively." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.847, + 0.411, + 0.863 + ], + "angle": 0, + "content": "3.4. Point Cloud Animation and Rendering" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "We now have the estimated displacement fields \\(F_{0\\rightarrow t}\\) and the feature point cloud \\(\\mathcal{P}\\). Our next step is to animate this" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.092, + 0.882, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.275, + 0.892, + 0.331 + ], + "angle": 0, + "content": "Figure 3. 3D symmetric animation. To address the hole issue, we borrow textural information from the point cloud that moves in the opposite direction and integrate both of the animated point clouds to feasibly fill in the missing regions (the red and blue regions)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.35, + 0.892, + 0.576 + ], + "angle": 0, + "content": "point cloud over time. To bridge the gap between 2D displacement fields and 3D scene representation, we first augment the displacement fields with estimated depth values to lift them into 3D scene flow. In other words, we now have a function of time \\( t \\) and the coordinates of a 3D point that returns a corresponding 3D translation vector that can shift this 3D point accordingly. Thus, for time \\( t \\), we then move each 3D point by computing its destination as its original position plus a corresponding 3D translation vector, i.e., \\( \\mathcal{P}(t) = \\{(\\mathbf{X}_i(t),\\mathbf{f}_i)\\} \\). Intuitively, this process indeed animates the point cloud from one time to another. However, we empirically find that as points move forward, increasingly large holes emerge. This frequently happens when points leave their original locations without any points filling in those unknown regions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.58, + 0.892, + 0.838 + ], + "angle": 0, + "content": "3D symmetric animation. To resolve this, inspired by prior works [3, 19, 38], we propose a 3D symmetric animation technique that leverages bidirectionally displaced point clouds to complement each other. With 3D symmetric animation, we can borrow textural information from point clouds that move in the opposite direction and integrate both of the animated point clouds to feasibly fill in missing regions. Specifically, we directly replace the original Eulerian flow field \\(M\\) with \\(-M\\) and recursively apply Eq. (3) to generate a reversed displacement field. Similarly, we then lift this 2D displacement field to obtain inverse scene flow, which is employed to produce point clouds with backward movements. As illustrated in Fig. 3, for time \\(t\\), to fill in holes, we respectively apply \\(F_{0\\rightarrow t}\\) and \\(F_{0\\rightarrow t - N}\\) to draw associated scene flow fields and use them to move the point cloud, resulting in \\(\\mathcal{P}_f(t) = \\{(\\mathbf{X}_i^f (t),\\mathbf{f}_i)\\}\\) and \\(\\mathcal{P}_b(t) = \\{(\\mathbf{X}_i^b (t),\\mathbf{f}_i)\\}\\), where \\(N\\) is the number of frames." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Neural rendering. We now have two bidirectionally animated feature point clouds. Our final step is to render them into animated feature maps and composite the results for synthesizing novel views at time \\( t \\). In particu" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4598" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.35 + ], + "angle": 0, + "content": "lar, given camera poses and intrinsics, we use a differentiable point-based renderer [66] to splat feature point clouds \\(\\mathcal{P}_f(t) = \\{(\\mathbf{X}_i^f (t),\\mathbf{f}_i)\\}\\) and \\(\\mathcal{P}_b(t) = \\{(\\mathbf{X}_i^b (t),\\mathbf{f}_i)\\}\\) separately into the target image plane. This process yields 2D feature maps \\(\\mathbf{F}_f\\) and \\(\\mathbf{F}_b\\) along with depth maps \\(\\mathbf{D}_f\\), \\(\\mathbf{D}_b\\) and alpha maps \\(\\alpha_{f},\\alpha_{b}\\). Next, we wish to fuse \\(\\mathbf{F}_f\\) and \\(\\mathbf{F}_b\\) into one feature map \\(\\mathbf{F}_t\\). Inspired by prior work [64], our intuition is three-fold: 1) to enable endless and seamless looping, we should assign the weight of the two feature maps based on time so as to guarantee that the first and last frame of the synthesized video are identical; 2) the weight map should favor pixel locations with smaller depth values, in the sense that it is impossible to see objects behind those objects closer to the eye; 3) to avoid missing regions as much as possible, we should greatly increase the contribution of those pixel locations that can fill in holes. With this in mind, we formulate the weight map as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.117, + 0.357, + 0.47, + 0.395 + ], + "angle": 0, + "content": "\\[\n\\mathbf {W} _ {t} = \\frac {\\left(1 - \\frac {t}{N}\\right) \\cdot \\boldsymbol {\\alpha} _ {f} \\cdot e ^ {- \\mathbf {D} _ {f}}}{\\left(1 - \\frac {t}{N}\\right) \\cdot \\boldsymbol {\\alpha} _ {f} \\cdot e ^ {- \\mathbf {D} _ {f}} + \\frac {t}{N} \\cdot \\boldsymbol {\\alpha} _ {b} \\cdot e ^ {- \\mathbf {D} _ {b}}}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.402, + 0.47, + 0.434 + ], + "angle": 0, + "content": "where \\(N\\) is the number of frames. Therefore, we can integrate \\(\\mathbf{F}_f\\) and \\(\\mathbf{F}_b\\) via:" + }, + { + "type": "equation", + "bbox": [ + 0.162, + 0.443, + 0.469, + 0.46 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {t} = \\mathbf {W} _ {t} \\cdot \\mathbf {F} _ {f} + (1 - \\mathbf {W} _ {t}) \\cdot \\mathbf {F} _ {b}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.469, + 0.357, + 0.485 + ], + "angle": 0, + "content": "We also obtain the merged depth map \\(\\mathbf{D}_t\\):" + }, + { + "type": "equation", + "bbox": [ + 0.158, + 0.495, + 0.469, + 0.512 + ], + "angle": 0, + "content": "\\[\n\\mathbf {D} _ {t} = \\mathbf {W} _ {t} \\cdot \\mathbf {D} _ {f} + (1 - \\mathbf {W} _ {t}) \\cdot \\mathbf {D} _ {b}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.522, + 0.47, + 0.582 + ], + "angle": 0, + "content": "Finally, we employ an image decoder network to map the 2D feature map \\(\\mathbf{F}_t\\) and depth map \\(\\mathbf{D}_t\\) to a novel view at time \\(t\\). Repeating this method, we are able to synthesize a realistic cinematograph with compelling parallax effects." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.591, + 0.182, + 0.607 + ], + "angle": 0, + "content": "3.5. Training" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.614, + 0.47, + 0.674 + ], + "angle": 0, + "content": "This section describes our training scheme. In general, we train our image-to-image translation network, 2D feature extraction network, and image decoder network in a two-stage manner." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.469, + 0.87 + ], + "angle": 0, + "content": "Training dataset. We use the training set from Holynski et al. [19] as our training dataset. This dataset comprises short video clips of fluid motion that are extracted from longer stock-footage videos. We use the first frames of each video clip and the corresponding ground truth motion fields estimated by a pretrained optical flow network [60] as motion estimation pairs to train our motion estimation network. To develop animation ability, we randomly sample training data from fluid motion video clips. For novel view synthesis training, we require multi-view supervision of the same scene, which is not available in the training set. Instead, we use 3D Photo [52] to generate pseudo ground truth novel views for training." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Two-stage training. Our model is trained in a two-stage manner. Specifically, we first train our motion estimation" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.138 + ], + "angle": 0, + "content": "network using motion estimation pairs. To train the motion estimation network, we minimize GAN loss, GAN feature matching loss [49], and endpoint error as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.563, + 0.145, + 0.892, + 0.162 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {M o t i o n}} = \\mathcal {L} _ {\\text {G A N}} + 1 0 \\mathcal {L} _ {\\text {F M}} + \\mathcal {L} _ {\\text {E P E}}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.17, + 0.893, + 0.396 + ], + "angle": 0, + "content": "In the second stage, we freeze the motion estimation network and train the feature extraction network and image decoder network. Our model simultaneously learns to render novel views and animate scenes. For novel view synthesis, we set \\( t = 0 \\) and use pseudo ground truth novel views to supervise our model. We randomly sample target viewpoints of scenes and require the model to synthesize them. For animation, we train our model on training triplets (start frame, middle frame, end frame) sampled from fluid motion video clips. In particular, we render the middle frame from both directions using \\( F_{0\\rightarrow t} \\) and \\( F_{0\\rightarrow t - N} \\) without changing the camera poses and intrinsics. Besides GAN loss and GAN feature matching loss [49], we also enforce VGG perceptual loss [23, 73] and \\( l_{1} \\) loss between synthesized and ground truth images. The overall loss is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.405, + 0.892, + 0.421 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {A n i m a t i o n}} = \\mathcal {L} _ {G A N} + 1 0 \\mathcal {L} _ {F M} + \\mathcal {L} _ {l _ {1}} + \\mathcal {L} _ {V G G}. \\tag {8}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.43, + 0.634, + 0.446 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.454, + 0.719, + 0.47 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.477, + 0.892, + 0.568 + ], + "angle": 0, + "content": "Our motion estimator is a U-Net [48] based generator with 16 convolutional layers, and we replace Batch Normalization with SPADE [42]. For the feature extraction network and image decoder network, we follow the network architectures from Wang et al. [64]. We adopt the multi-scale discriminator used in SPADE [42] during training." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.568, + 0.892, + 0.705 + ], + "angle": 0, + "content": "Our model is trained using the Adam optimizer [24]. We conduct all experiments on a single NVIDIA GeForce RTX 3090 GPU. We train the motion estimation network for around \\(120k\\) iterations with a batch size of 16. We set the generator learning rate to \\(5 \\times 10^{-4}\\) and the discriminator learning rate to \\(2 \\times 10^{-3}\\). For the animation training stage, we train the feature extraction network and image decoder network for around \\(250k\\) iterations with a learning rate starting at \\(1 \\times 10^{-4}\\) and then decaying exponentially." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.712, + 0.608, + 0.726 + ], + "angle": 0, + "content": "4.2. Baselines" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.892, + 0.854 + ], + "angle": 0, + "content": "In principle, to evaluate our method, we are required to compare it against current state-of-the-art models. However, to our knowledge, we are the first to tackle the novel task of synthesizing a realistic cinematograph with compelling parallax effects from a single image. As a result, we cannot directly compare to previous works. Instead, we consider forming the following baselines to verify the superiority of our method:" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "2D animation \\(\\rightarrow\\) novel view synthesis. One might consider 2D image animation \\(\\rightarrow\\) single-shot novel view synthesis: first employing a 2D image animation method, then" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4599" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.472, + 0.145 + ], + "angle": 0, + "content": "Table 1. Quantitative comparisons against all baselines on the validation set from Holynski et al. [19]. The better approach favors higher PSNR and SSIM but lower LPIPS. The best performance is in bold." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.157, + 0.466, + 0.244 + ], + "angle": 0, + "content": "
MethodPSNR↑SSIM↑LPIPS↓
2D Anim. [19] → NVS [52]21.120.6330.286
NV5 [52] → 2D Anim. [19]21.970.6970.276
NV5 [52] → 2D Anim. [19] + MA22.470.7180.261
Naive PC Anim.19.460.6470.243
Naive PC Anim. + 3DSA20.490.6600.237
Ours23.330.7760.197
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.264, + 0.468, + 0.338 + ], + "angle": 0, + "content": "a single-shot novel view synthesis method. Specifically, we first adopt a state-of-the-art image animation method [19] to produce an animated looping video. We then apply DPT [45] to estimate geometry and utilize 3D Photo [52] to generate novel views for each frame." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.339, + 0.468, + 0.489 + ], + "angle": 0, + "content": "Novel view synthesis \\(\\rightarrow\\) 2D animation. It also appears to be feasible that we first render novel views of scenes by 3D Photo [52] and then use the image animation method [19] to animate each viewpoint. Note that motion estimation should be performed for each frame as viewpoints have changed. However, we empirically find that this usually results in varying motion fields across the video. To mitigate this, we further propose using the moving average technique to smooth estimated motions for each frame. This results in novel view synthesis \\(\\rightarrow\\) 2D animation + MA." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.491, + 0.468, + 0.686 + ], + "angle": 0, + "content": "Naive point cloud animation. Intuitively, we may also consider directly unprojecting pixels into 3D space and subsequently moving and rendering the RGB point cloud. Specifically, given a single input image, we first predict the depth map using DPT [45] and estimate 2D optical flow. We then lift the pixels and optical flow into 3D space to form RGB point clouds and scene flow. Finally, we animate RGB point clouds over time according to the scene flow and project these point clouds into target viewpoints. This baseline also faces a similar issue: as time goes by, large holes gradually appear. One might also employ our 3D symmetric animation technique to further enhance this baseline, i.e., naive point cloud animation + 3DSA." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.696, + 0.172, + 0.71 + ], + "angle": 0, + "content": "4.3. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.719, + 0.468, + 0.809 + ], + "angle": 0, + "content": "Evaluation dataset. Since Holynski et al. [19] only provide a single image for each scene in the test set, we use the validation set from Holynski et al. [19] to evaluate our method and baselines. The validation set consists of 31 unique scenes with 162 samples of ground truth video clips captured by static cameras." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Experimental setup. For evaluation, we render novel views of the ground truth videos in 4 different trajectories, resulting in 240 ground truth frames for each sample. This process does not involve inpainting, thus ground truth frames may contain holes. Only considering valid pixels when calculating metrics, we compare the predicted images" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.089, + 0.892, + 0.117 + ], + "angle": 0, + "content": "Table 2. User study. Pairwise comparison results indicate that users prefer our method as more realistic and immersive." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.129, + 0.888, + 0.233 + ], + "angle": 0, + "content": "
ComparisonHuman preference
2D Anim. [19] → NVS [52] / Ours12.5% / 87.5%
NVS [52] → 2D Anim. [19] / Ours3.9% / 96.1%
NVS [52] → 2D Anim. [19] + MA / Ours6.1% / 93.9%
Naive PC Anim. / Ours7.6% / 92.4%
Naive PC Anim. + 3DSA / Ours8.6% / 91.4%
3D Photo [52] / Ours10.5% / 89.5%
Holynski et al. [19] / Ours29.9% / 70.1%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.51, + 0.242, + 0.88, + 0.257 + ], + "angle": 0, + "content": "Table 3. Ablation study on each component of our method." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.267, + 0.888, + 0.34 + ], + "angle": 0, + "content": "
PSNR↑SSIM↑LPIPS↓
w/o features21.500.6740.228
w/o inpainting22.860.7630.216
w/o 3D symmetric animation22.990.7680.199
Full model23.330.7760.197
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.354, + 0.892, + 0.475 + ], + "angle": 0, + "content": "with the ground truth frames at the same time and viewpoint. For a fair comparison, all methods utilize the depth maps estimated by DPT [45]. Since we focus on comparing rendering quality, all methods use ground truth optical flows, except that NVS \\([52] \\rightarrow 2\\mathrm{D}\\) Anim. [19] and NVS \\([52] \\rightarrow 2\\mathrm{D}\\) Anim. [19] + MA have to estimate optical flows for each frame apart from the first frame. We adopt PSNR, SSIM, and LPIPS [73] as our evaluation metrics." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.477, + 0.892, + 0.566 + ], + "angle": 0, + "content": "Quantitative comparisons. As shown in Table 1, our method outperforms all baselines across all metrics by a large margin. This result implies that our method achieves better perceptual quality and produces more realistic renderings, which demonstrates the superiority and effectiveness of our method." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.568, + 0.892, + 0.839 + ], + "angle": 0, + "content": "Qualitative comparisons. We showcase the visual comparisons in Fig. 4. One can observe that our method presents photorealistic results while other comparative baselines produce more or less visual artifacts. 2D Anim. [19] \\(\\rightarrow\\) NVS [52] intends to generate stripped flickering artifacts. This is because 2D Anim. [19] \\(\\rightarrow\\) NVS [52] predicts the depth map for each animated frame, leading to frequent changes in the 3D structure of the scene and inconsistent inpainting. NVS [52] \\(\\rightarrow\\) 2D Anim. [19] and NVS [52] \\(\\rightarrow\\) 2D Anim. [19] + MA show jelly-like effects as optical flow should be estimated for each novel view. This results in varying motion fields across the video and thus inconsistent animation. Although Naive PC Anim. and Naive PC Anim. + 3DSA also lift the workspace into 3D, they are often prone to produce noticeable holes inevitably. One reason for this is that they do not perform inpainting. Note that some artifacts are difficult to observe when only scanning static figures." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Controllable animation. Our method is able to create 3D cinematographs from a single image automatically. Further, we show that our framework is also highly extensible. For example, we can involve masks and flow hints as extra in" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4600" + } + ], + [ + { + "type": "aside_text", + "bbox": [ + 0.084, + 0.111, + 0.099, + 0.406 + ], + "angle": 0, + "content": "(a) (b) (c) (d) (e) (f) (g)" + }, + { + "type": "image", + "bbox": [ + 0.105, + 0.092, + 0.349, + 0.138 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.105, + 0.14, + 0.349, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.105, + 0.187, + 0.349, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.105, + 0.234, + 0.349, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.105, + 0.282, + 0.349, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.105, + 0.33, + 0.349, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.105, + 0.378, + 0.349, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.092, + 0.618, + 0.138 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.14, + 0.618, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.187, + 0.618, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.234, + 0.618, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.282, + 0.618, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.33, + 0.618, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.378, + 0.618, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.092, + 0.886, + 0.138 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.14, + 0.886, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.187, + 0.886, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.235, + 0.886, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.282, + 0.886, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.33, + 0.886, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.622, + 0.378, + 0.886, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.439, + 0.895, + 0.496 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparisons against all baselines on the validation set from Holynski et al. [19]. Our method produces compelling results while other comparative alternatives suffer from visual artifacts. (a) 2D animation \\([19] \\rightarrow\\) novel view synthesis [52], (b) novel view synthesis \\([52] \\rightarrow 2\\mathrm{D}\\) animation [19], (c) novel view synthesis \\([52] \\rightarrow 2\\mathrm{D}\\) animation \\([19] +\\) moving average, (d) naive point cloud animation, (e) naive point cloud animation \\(+3\\mathrm{D}\\) symmetric animation, (f) our method, and (g) pseudo ground truth." + }, + { + "type": "image", + "bbox": [ + 0.085, + 0.514, + 0.273, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.165, + 0.607, + 0.194, + 0.619 + ], + "angle": 0, + "content": "Input" + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.514, + 0.37, + 0.597 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.29, + 0.601, + 0.355, + 0.623 + ], + "angle": 0, + "content": "Masks & motion hints" + }, + { + "type": "image", + "bbox": [ + 0.371, + 0.514, + 0.464, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.395, + 0.601, + 0.435, + 0.622 + ], + "angle": 0, + "content": "Motion fields" + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.636, + 0.47, + 0.664 + ], + "angle": 0, + "content": "Figure 5. Controllable animation. By changing the masks and motion hints, our method can interactively control the animation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.469, + 0.748 + ], + "angle": 0, + "content": "puts to augment our motion estimator. This brings two advantages: (1) more accurate flow estimation; (2) interactive and controllable animation. As shown in Fig. 5, we can control the animation of the scene by providing various masks and motion hints to obtain different motion fields." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Generalizing on in-the-wild photos. To further demonstrate the generalization of our method, we also test our method on in-the-wild photos. We first create hemagraphs with camera motions on the test set from Holynski et al. [19], where, for each scene, only a single image is provided. We then select some online images at random to test our method. To accurately estimate motion fields, we provide masks and flow hints as extra inputs to our motion estimator. As shown in Fig. 6, our method produces reasonable results for in-the-wild inputs while other comparative" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.515, + 0.888, + 0.53 + ], + "angle": 0, + "content": "alternatives yield visual artifacts or inconsistent animation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.54, + 0.622, + 0.557 + ], + "angle": 0, + "content": "4.4. User Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.564, + 0.892, + 0.776 + ], + "angle": 0, + "content": "We further conduct a user study to investigate how our method performs in the view of humans when compared with all baselines, 3D Photo [52], and Holynski et al. [19]. Specifically, we collect 50 photos from the test set of Holynski et al. [19] and the Internet. We use different approaches to generate videos with identical settings. During the study, we show each participant an input image and two animated videos generated by our method and a randomly selected approach in random order. 108 volunteers are invited to choose the method with better perceptual quality and realism, or none if it is hard to judge. We report the results in Table 2, which points out that our method surpasses alternative methods by a large margin in terms of the sense of reality and immersion." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.654, + 0.803 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.893, + 0.903 + ], + "angle": 0, + "content": "To validate the effect of each component, we conduct an ablation study on the validation set from Holynski et al. [19] and show the results in Table 3. One can observe: i) 3D symmetric animation technique matters because it allows us to leverage bidirectionally displaced point clouds to complement each other and feasibly fill in missing regions; ii)" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "4601" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.093, + 0.306, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.094, + 0.455, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.457, + 0.094, + 0.599, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.6, + 0.094, + 0.742, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.743, + 0.094, + 0.885, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.194, + 0.305, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.194, + 0.455, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.457, + 0.194, + 0.599, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.6, + 0.194, + 0.742, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.743, + 0.194, + 0.885, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.294, + 0.305, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.294, + 0.455, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.457, + 0.294, + 0.599, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.6, + 0.294, + 0.742, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.743, + 0.294, + 0.885, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.394, + 0.305, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.394, + 0.455, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.457, + 0.394, + 0.599, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.6, + 0.394, + 0.742, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.743, + 0.394, + 0.885, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.181, + 0.5, + 0.21, + 0.512 + ], + "angle": 0, + "content": "Input" + }, + { + "type": "image_caption", + "bbox": [ + 0.335, + 0.499, + 0.429, + 0.511 + ], + "angle": 0, + "content": "2D Anim. \\(\\rightarrow\\) NVS" + }, + { + "type": "image_caption", + "bbox": [ + 0.464, + 0.5, + 0.586, + 0.512 + ], + "angle": 0, + "content": "\\(\\mathrm{NVS}\\rightarrow 2\\mathrm{D}\\) Anim. \\(^+\\) MA" + }, + { + "type": "image_caption", + "bbox": [ + 0.61, + 0.499, + 0.732, + 0.51 + ], + "angle": 0, + "content": "Naive PC Anim. + 3DSA" + }, + { + "type": "image_caption", + "bbox": [ + 0.802, + 0.5, + 0.827, + 0.51 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.528, + 0.892, + 0.556 + ], + "angle": 0, + "content": "Figure 6. Visual comparisons on the test set from Holynski et al. [19] and in-the-wild photos. Our method consistently produces more realistic rendering with fewer visual artifacts as opposed to other baselines." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.47, + 0.66 + ], + "angle": 0, + "content": "introducing inpainting when constructing 3D geometry can improve the performance as this allows our model to produce plausible structures around depth discontinuities and fill in holes; iii) switching from directly using RGB colors to features in 3D scene representation significantly improves the rendering quality and reduces artifacts." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.678, + 0.196, + 0.693 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.471, + 0.901 + ], + "angle": 0, + "content": "In this paper, we introduce a novel task of creating 3D cinematographs from single images. To this end, we present a simple yet effective method that makes a connection between image animation and novel view synthesis. We show that our method produces plausible animation of the scene while allowing camera movements. Our framework is flexible and customized. For accurate motion estimation and controllable animation, we can further include masks and flow hints as extra input for the motion estimator. Therefore, users can control how the scene is animated. Furthermore, our method generalizes well to in-the-wild photos, even like paintings or synthetic images generated by diffusion models. We conduct extensive experiments to ver" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.569, + 0.892, + 0.643 + ], + "angle": 0, + "content": "ify the effectiveness and superiority of our method. A user study also demonstrates that our method generates realistic 3D cinematographs. We hope that our work can bring 3D cinematography into the sight of a broader community and motivate further research." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.893, + 0.794 + ], + "angle": 0, + "content": "Limitations and future work. Our method may not work well when the depth prediction module estimates erroneous geometry from the input image, e.g., thin structures. In addition, inappropriate motion fields will sometimes lead to undesirable results, e.g., some regions are mistakenly identified as frozen. As we take the first step towards 3D cinematography, in this paper, we focus on handling common moving elements, i.e., fluids. In other words, our method may not apply to more complex motions, e.g., cyclic motion. We leave this for our future work." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Acknowledgements. This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner(s). This work is also supported by Adobe Gift and the Ministry of Education, Singapore, under its Academic Research Fund Tier 2 (MOE-T2EP20220-0007) and Tier 1 (RG14/22)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4602" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Jiamin Bai, Aseem Agarwala, Maneesh Agrawala, and Ravi Ramamoorthi. Automatic cinemagraph portraits. In Computer Graphics Forum, volume 32, pages 17-25. Wiley Online Library, 2013. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.243 + ], + "angle": 0, + "content": "[2] Aayush Bansal, Minh Vo, Yaser Sheikh, Deva Ramanan, and Srinivasa Narasimhan. 4d visualization of dynamic events from unconstrained multi-view videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5366-5375, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.245, + 0.471, + 0.314 + ], + "angle": 0, + "content": "[3] Wenbo Bao, Wei-Sheng Lai, Chao Ma, Xiaoyun Zhang, Zhiyong Gao, and Ming-Hsuan Yang. Depth-aware video frame interpolation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3703-3712, 2019. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.316, + 0.47, + 0.372 + ], + "angle": 0, + "content": "[4] Mojtaba Bemana, Karol Myszkowski, Hans-Peter Seidel, and Tobias Ritschel. X-Fields: Implicit neural view-, light- and time-image interpolation. ACM Transactions on Graphics (TOG), 39(6):1-15, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.373, + 0.47, + 0.442 + ], + "angle": 0, + "content": "[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 425-432, 2001. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.445, + 0.47, + 0.5 + ], + "angle": 0, + "content": "[6] Jin-Xiang Chai, Xin Tong, Shing-Chow Chan, and Heung-Yeung Shum. Plenoptic sampling. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 307-318, 2000. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.502, + 0.47, + 0.558 + ], + "angle": 0, + "content": "[7] Caroline Chan, Shiry Ginosar, Tinghui Zhou, and Alexei A Efros. Everybody dance now. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5933-5942, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.56, + 0.47, + 0.615 + ], + "angle": 0, + "content": "[8] Yung-Yu Chuang, Dan B Goldman, Ke Colin Zheng, Brian Curless, David H Salesin, and Richard Szeliski. Animating pictures with stochastic motion textures. ACM Transactions on Graphics (TOG), 24(3):853-860, 2005. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.617, + 0.47, + 0.687 + ], + "angle": 0, + "content": "[9] Paul E Debevec, Camillo J Taylor, and Jitendra Malik. Modeling and rendering architecture from photographs: A hybrid geometry-and image-based approach. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 11-20, 1996. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.689, + 0.47, + 0.772 + ], + "angle": 0, + "content": "[10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. FlowNet: Learning optical flow with convolutional networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 2758-2766, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.774, + 0.47, + 0.843 + ], + "angle": 0, + "content": "[11] Michail Christos Doukas, Stefanos Zafeiriou, and Viktoriia Sharmanska. HeadGAN: One-shot neural head synthesis and editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14398-14407, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.845, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[12] Yuki Endo, Yoshihiro Kanamori, and Shigeru Kuriyama. Animating Landscape: Self-supervised learning of decoupled motion and appearance for single-image video synthesis. ACM Transactions on Graphics (Proceedings of ACM" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.116, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.119 + ], + "angle": 0, + "content": "SIGGRAPH Asia 2019), 38(6):175:1-175:19, 2019. 1, 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.163 + ], + "angle": 0, + "content": "[13] Siming Fan, Jingtan Piao, Chen Qian, Kwan-Yee Lin, and Hongsheng Li. Simulating fluids in real-world still images. arXiv preprint arXiv:2204.11335, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.165, + 0.892, + 0.22 + ], + "angle": 0, + "content": "[14] Steven J Gortler, Radek Grzeszcuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 43-54, 1996. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.222, + 0.892, + 0.277 + ], + "angle": 0, + "content": "[15] Yuxuan Han, Ruicheng Wang, and Jiaolong Yang. Single-view synthesis in the wild with learned adaptive multiplane images. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-8, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.279, + 0.892, + 0.347 + ], + "angle": 0, + "content": "[16] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsenko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen Video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.349, + 0.892, + 0.39 + ], + "angle": 0, + "content": "[17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.392, + 0.892, + 0.433 + ], + "angle": 0, + "content": "[18] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. arXiv preprint arXiv:2204.03458, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.435, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[19] Aleksander Holynski, Brian L. Curless, Steven M. Seitz, and Richard Szeliski. Animating pictures with eulerian motion fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5810-5819, 2021. 1, 2, 3, 4, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.505, + 0.892, + 0.575 + ], + "angle": 0, + "content": "[20] Eddy Ilg, Nikolaus Mayer, Tonmoy Saikia, Margret Keuper, Alexey Dosovitskiy, and Thomas Brox. FlowNet 2.0: Evolution of optical flow estimation with deep networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2462-2470, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.577, + 0.892, + 0.672 + ], + "angle": 0, + "content": "[21] Varun Jampani, Huiwen Chang, Kyle Sargent, Abhishek Kar, Richard Tucker, Michael Krainin, Dominik Kaeser, William T Freeman, David Salesin, Brian Curless, and Ce Liu. SLIDE: Single image 3d photography with soft layering and depth-aware inpainting. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.674, + 0.892, + 0.715 + ], + "angle": 0, + "content": "[22] Wei-Cih Jhou and Wen-Huang Cheng. Animating still landscape photographs through cloud motion creation. IEEE Transactions on Multimedia, 18(1):4-13, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.717, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[23] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 694–711. Springer, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.774, + 0.892, + 0.814 + ], + "angle": 0, + "content": "[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[25] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 31-42, 1996. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[26] Jiaxin Li, Zijian Feng, Qi She, Henghui Ding, Changhu Wang, and Gim Hee Lee. MINE: Towards continuous depth spi with nerf for novel view synthesis. In Proceedings of" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4603" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.469, + 0.12 + ], + "angle": 0, + "content": "the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12578-12588, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.216 + ], + "angle": 0, + "content": "[27] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5521-5531, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.218, + 0.469, + 0.285 + ], + "angle": 0, + "content": "[28] Xingyi Li, Chaoyi Hong, Yiran Wang, Zhiguo Cao, Ke Xian, and Guosheng Lin. Symmnerf: Learning to explore symmetry prior for single-view view synthesis. In Proceedings of the Asian Conference on Computer Vision (ACCV), pages 1726-1742, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.288, + 0.469, + 0.356 + ], + "angle": 0, + "content": "[29] Yijun Li, Chen Fang, Jimei Yang, Zhaowen Wang, Xin Lu, and Ming-Hsuan Yang. Flow-grounded spatial-temporal video prediction from still images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 600-615, 2018. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.358, + 0.469, + 0.425 + ], + "angle": 0, + "content": "[30] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.427, + 0.469, + 0.482 + ], + "angle": 0, + "content": "[31] Chieh Hubert Lin, Yen-Chi Cheng, Hsin-Ying Lee, Sergey Tulyakov, and Ming-Hsuan Yang. InfinityGAN: Towards infinite-pixel image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.484, + 0.469, + 0.564 + ], + "angle": 0, + "content": "[32] Andrew Liu, Richard Tucker, Varun Jampani, Ameesh Makadia, Noah Snavely, and Angjoo Kanazawa. Infinite nature: Perpetual view generation of natural scenes from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14458-14467, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.567, + 0.469, + 0.648 + ], + "angle": 0, + "content": "[33] Wen Liu, Zhixin Piao, Jie Min, Wenhan Luo, Lin Ma, and Shenghua Gao. Liquid Warping GAN: A unified framework for human motion imitation, appearance transfer and novel view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5904-5913, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.65, + 0.469, + 0.718 + ], + "angle": 0, + "content": "[34] Elizaveta Logacheva, Roman Suvorov, Oleg Khomenko, Anton Mashikhin, and Victor Lempitsky. DeepLandscape: Adversarial modeling of landscape videos. In Proceedings of the European Conference on Computer Vision (ECCV), pages 256-272. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.72, + 0.47, + 0.774 + ], + "angle": 0, + "content": "[35] Aniruddha Mahapatra and Kuldeep Kulkarni. Controllable animation of fluid elements in still images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3667-3676, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.776, + 0.469, + 0.802 + ], + "angle": 0, + "content": "[36] Oded Maimon and Lior Rokach. Data mining and knowledge discovery handbook. 2005. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.804, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[37] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.874, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[38] Simon Niklaus and Feng Liu. Softmax splatting for video frame interpolation. In Proceedings of the IEEE/CVF" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "Conference on Computer Vision and Pattern Recognition (CVPR), pages 5437-5446, 2020. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.161 + ], + "angle": 0, + "content": "[39] Simon Niklaus, Long Mai, Jimei Yang, and Feng Liu. 3dken burns effect from a single image. ACM Transactions on Graphics (ToG), 38(6):1-15, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.164, + 0.892, + 0.244 + ], + "angle": 0, + "content": "[40] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.247, + 0.892, + 0.315 + ], + "angle": 0, + "content": "[41] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.317, + 0.892, + 0.383 + ], + "angle": 0, + "content": "[42] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2337-2346, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.386, + 0.892, + 0.454 + ], + "angle": 0, + "content": "[43] Juewen Peng, Jianming Zhang, Xianrui Luo, Hao Lu, Ke Xian, and Zhiguo Cao. Mpib: An mpi-based bokeh rendering framework for realistic partial occlusion effects. In Proceedings of the European Conference on Computer Vision (ECCV), pages 590-607. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.456, + 0.892, + 0.524 + ], + "angle": 0, + "content": "[44] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10318-10327, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.526, + 0.892, + 0.579 + ], + "angle": 0, + "content": "[45] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12179-12188, 2021. 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.581, + 0.892, + 0.648 + ], + "angle": 0, + "content": "[46] Yurui Ren, Xiaoming Yu, Junming Chen, Thomas H Li, and Ge Li. Deep image spatial transformation for person image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7690-7699, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.651, + 0.892, + 0.719 + ], + "angle": 0, + "content": "[47] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.721, + 0.892, + 0.788 + ], + "angle": 0, + "content": "[48] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI), pages 234–241. Springer, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.79, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[49] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. In Advances in Neural Information Processing Systems (NeurIPS), 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[50] Jonathan Shade, Steven Gortler, Li-wei He, and Richard Szeliski. Layered depth images. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 231–242, 1998. 2, 3, 4" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4604" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.16 + ], + "angle": 0, + "content": "[51] Tamar Rott Shaham, Tali Dekel, and Tomer Michaeli. SinGAN: Learning a generative model from a single natural image. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4570-4580, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.164, + 0.472, + 0.233 + ], + "angle": 0, + "content": "[52] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.472, + 0.304 + ], + "angle": 0, + "content": "[53] Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. Animating arbitrary objects via deep motion transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2377-2386, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.472, + 0.362 + ], + "angle": 0, + "content": "[54] Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. First order motion model for image animation. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.363, + 0.472, + 0.431 + ], + "angle": 0, + "content": "[55] Aliaksandr Siarohin, Enver Sangineto, Stéphane Lathuiliere, and Nicu Sebe. Deformable gans for pose-based human image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3408-3416, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.433, + 0.472, + 0.503 + ], + "angle": 0, + "content": "[56] Aliaksandr Siarohin, Oliver J Woodford, Jian Ren, Mengei Chai, and Sergey Tulyakov. Motion representations for articulated animation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13653-13662, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.504, + 0.472, + 0.573 + ], + "angle": 0, + "content": "[57] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-A-Video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022.2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.575, + 0.472, + 0.644 + ], + "angle": 0, + "content": "[58] Vincent Sitzmann, Michael Zollhoefer, and Gordon Wetzstein. Scene Representation Networks: Continuous 3d-structure-aware neural scene representations. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.646, + 0.472, + 0.715 + ], + "angle": 0, + "content": "[59] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning (ICML), pages 2256-2265. PMLR, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.717, + 0.472, + 0.786 + ], + "angle": 0, + "content": "[60] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. PWC-Net: Cnns for optical flow using pyramid, warping, and cost volume. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8934–8943, 2018. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.472, + 0.843 + ], + "angle": 0, + "content": "[61] Zachary Teed and Jia Deng. RAFT: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision (ECCV), pages 402-419, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.472, + 0.902 + ], + "angle": 0, + "content": "[62] Richard Tucker and Noah Snavely. Single-view view synthesis with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 551-560, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.148 + ], + "angle": 0, + "content": "[63] Shubham Tulsiani, Richard Tucker, and Noah Snavely. Layer-structured 3d scene inference via view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), pages 302–317, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.894, + 0.218 + ], + "angle": 0, + "content": "[64] Qianqian Wang, Zhengqi Li, David Salesin, Noah Snavely, Brian Curless, and Janne Kontkanen. 3d moments from near-duplicate photos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.22, + 0.893, + 0.289 + ], + "angle": 0, + "content": "[65] Yiran Wang, Zhiyu Pan, Xingyi Li, Zhiguo Cao, Ke Xian, and Jianming Zhang. Less is more: Consistent video depth estimation with masked frames modeling. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), pages 6347-6358, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.29, + 0.893, + 0.359 + ], + "angle": 0, + "content": "[66] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin Johnson. SynSin: End-to-end view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7467-7477, 2020. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.36, + 0.894, + 0.431 + ], + "angle": 0, + "content": "[67] Ke Xian, Chunhua Shen, Zhiguo Cao, Hao Lu, Yang Xiao, Ruibo Li, and Zhenbo Luo. Monocular relative depth perception with web stereo data supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 311-320, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.431, + 0.893, + 0.5 + ], + "angle": 0, + "content": "[68] Ke Xian, Jianming Zhang, Oliver Wang, Long Mai, Zhe Lin, and Zhiguo Cao. Structure-guided ranking loss for single image depth prediction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 611-620, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.501, + 0.894, + 0.572 + ], + "angle": 0, + "content": "[69] Wei Xiong, Wenhan Luo, Lin Ma, Wei Liu, and Jiebo Luo. Learning to generate time-lapse videos using multi-stage dynamic generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2364-2373, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.572, + 0.893, + 0.642 + ], + "angle": 0, + "content": "[70] Dejia Xu, Yifan Jiang, Peihao Wang, Zhiwen Fan, Humphrey Shi, and Zhangyang Wang. SinNeRF: Training neural radiance fields on complex scenes from a single image. In Proceedings of the European Conference on Computer Vision (ECCV), pages 736-753. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.642, + 0.894, + 0.698 + ], + "angle": 0, + "content": "[71] Hang Yan, Yebin Liu, and Yasutaka Furukawa. Turning an urban scene video into a cinematograph. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 394-402, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.699, + 0.893, + 0.768 + ], + "angle": 0, + "content": "[72] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4578-4587, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.769, + 0.894, + 0.84 + ], + "angle": 0, + "content": "[73] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 586-595, 2018. 5, 6" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.84 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4605" + } + ] +] \ No newline at end of file diff --git a/2023/3D Cinemagraphy From a Single Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_origin.pdf b/2023/3D Cinemagraphy From a Single Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..28ee64bb0c43281749767b2d3dd61ce903b2ef36 --- /dev/null +++ b/2023/3D Cinemagraphy From a Single Image/822e0c52-d8c7-4a4e-8a84-1a2d57dbe08f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a0b2d4b5b4f5477703001e9c8d05108a54d6ac13bb620d122e1779f23c8b3ae +size 4704233 diff --git a/2023/3D Cinemagraphy From a Single Image/full.md b/2023/3D Cinemagraphy From a Single Image/full.md new file mode 100644 index 0000000000000000000000000000000000000000..94e514b9ea9fafcdf5b2f450d2293145611a2528 --- /dev/null +++ b/2023/3D Cinemagraphy From a Single Image/full.md @@ -0,0 +1,394 @@ +# 3D Cinemagraphy from a Single Image + +Xingyi Li $^{1,3}$ Zhiguo Cao $^{1}$ Huiqiang Sun $^{1}$ Jianming Zhang $^{2}$ Ke Xian $^{3*}$ Guosheng Lin $^{3}$ $^{1}$ Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Artificial Intelligence and Automation, Huazhong University of Science and Technology + $^{2}$ Adobe Research $^{3}$ S-Lab, Nanyang Technological University +{xingyi.li, zgcao, shq1031}@hust.edu.cn, jianmzha@adobe.com, {ke.xian, gslin}@ntu.edu.sg +https://xingyi-li.github.io/3d-cinemagraphy + +![](images/77e45a8a8ae99f7e5122bf60ba50833109c19c4ad4f2de0de9320aca1eab8f4b.jpg) +Figure 1. Given a single still image, our method can synthesize videos with plausible animation of the scene while allowing camera movements. Here, we showcase four 3D cinematographs with various camera trajectories. Besides real-world photos (the left two examples), our method can also generalize to paintings (the third one) and synthetic images generated by Stable Diffusion [47] (the rightmost one). To see the effect of 3D cinematography, readers are encouraged to view with Adobe Acrobat or KDE Okular. + +![](images/afaecbba82dd6bd57e0f587e50178780a6b79bc215a6dbcec1887dd14bf9fa3b.jpg) + +![](images/812c489801e67547cea0e4abdc1db516b991152c6dc20e97d1f533d9d77e7022.jpg) + +![](images/9e76c7973b4c0d6ac5f0c7109de2752b00aa644105db1a7c8f97243477dba918.jpg) + +# Abstract + +We present 3D Cinemagography, a new technique that marries 2D image animation with 3D photography. Given a single still image as input, our goal is to generate a video that contains both visual content animation and camera motion. We empirically find that naively combining existing 2D image animation and 3D photography methods leads to obvious artifacts or inconsistent animation. Our key insight is that representing and animating the scene in 3D space offers a natural solution to this task. To this end, we first convert the input image into feature-based layered depth images using predicted depth values, followed by unprojecting them to a feature point cloud. To animate the scene, we perform motion estimation and lift the 2D motion into the 3D scene flow. Finally, to resolve the problem of hole emergence as points move forward, we propose to bidirectionally displace the point cloud as per the scene flow and synthesize novel views by separately projecting them into target image planes and blending the results. Extensive experiments demonstrate the effectiveness of our method. A user study is also conducted to validate the compelling rendering results of our method. + +# 1. Introduction + +Nowadays, since people can easily take images using smartphone cameras, the number of online photos has increased drastically. However, with the rise of online video-sharing platforms such as YouTube and TikTok, people are no longer content with static images as they have grown accustomed to watching videos. It would be great if we could animate those still images and synthesize videos for a better experience. These living images, termed cinematographs, have already been created and gained rapid popularity online [1, 71]. Although cinematographs may engage people with the content for longer than a regular photo, they usually fail to deliver an immersive sense of 3D to audiences. This is because cinematographs are usually based on a static camera and fail to produce parallax effects. We are therefore motivated to explore ways of animating the photos and moving around the cameras at the same time. As shown in Fig. 1, this will bring many still images to life and provide a drastically vivid experience. + +In this paper, we are interested in making the first step towards 3D cinematography that allows both realistic animation of the scene and camera motions with compelling parallax effects from a single image. There are plenty of attempts to tackle either of the two problems. Single-image animation methods [12, 19, 35] manage to produce a real- + +istic animated video from a single image, but they usually operate in 2D space, and therefore they cannot create camera movement effects. Classic novel view synthesis methods [5, 6, 9, 14, 25] and recent implicit neural representations [37, 40, 58] entail densely captured views as input to render unseen camera perspectives. Single-shot novel view synthesis approaches [21, 39, 52, 66] exhibit the potential for generating novel camera trajectories of the scene from a single image. Nonetheless, these methods usually hypothesize that the observed scene is static without moving elements. Directly combining existing state-of-the-art solutions of single-image animation and novel view synthesis yields visual artifacts or inconsistent animation. + +To address the above challenges, we present a novel framework that solves the joint task of image animation and novel view synthesis. This framework can be trained to create 3D cinematographs from a single still image. Our key intuition is that handling this new task in 3D space would naturally enable both animation and moving cameras simultaneously. With this in mind, we first represent the scene as feature-based layered depth images (LDIs) [50] and unproject the feature LDIs into a feature point cloud. To animate the scene, we perform motion estimation and lift the 2D motion to 3D scene flow using depth values predicted by DPT [45]. Next, we animate the point cloud according to the scene flow. To resolve the problem of hole emergence as points move forward, we are inspired by prior works [3, 19, 38] and propose a 3D symmetric animation technique to bidirectionally displace point clouds, which can effectively fill in those unknown regions. Finally, we synthesize novel views at time $t$ by rendering point clouds into target image planes and blending the results. In this manner, our proposed method can automatically create 3D cinematographs from a single image. Moreover, our framework is highly extensible, e.g., we can augment our motion estimator with user-defined masks and flow hints for accurate flow estimation and controllable animation. + +In summary, our main contributions are: + +- We propose a new task of creating 3D cinematographs from single images. To this end, we propose a novel framework that jointly learns to solve the task of image animation and novel view synthesis in 3D space. +- We design a 3D symmetric animation technique to address the hole problem as points move forward. +- Our framework is flexible and customized. We can achieve controllable animation by augmenting our motion estimator with user-defined masks and flow hints. + +# 2. Related Work + +Single-image animation. Different kinds of methods have been explored to animate still images. Some works [8, 22] + +focus on animating certain objects via physical simulation but may not be easily applied to more general cases of inthe-wild photos. Given driving videos as guidance, there are plenty of methods that attempt to perform motion transfer on static objects with either a priori knowledge of moving objects [7, 11, 33, 46, 55] or in an unsupervised manner [53, 54, 56]. They entail reference videos to drive the motion of static objects, and thus do not suit our task. Recent advances in generative models have attracted much attention and motivated the community to develop realistic image and video synthesis methods. Many works [31, 32, 34, 51, 69] are based on generative adversarial networks (GANs) and operate transformations in latent space to generate plausible appearance changes and movements. Nonetheless, it is non-trial to allow for explicit control over those latent codes and to animate input imagery in a disentangled manner. As diffusion models [17, 59] improve by leaps and bounds, several diffusion-based works [16, 18, 57] attempt to generate realistic videos from text or images. However, these methods are time-consuming and expensive in terms of computation. Here we focus on methods that utilize learned motion priors to convert a still image into an animated video texture [12, 13, 19, 29, 35]. In particular, Holynski et al. [19] first synthesize the optical flow of the input image via a motion estimation network, then obtain future frames using the estimated flow field. This method renders plausible animation of fluid elements in the input image but suffers from producing camera motions with parallax. + +Novel view synthesis from a single image. Novel view synthesis allows for rendering unseen camera perspectives from 2D images and their corresponding camera poses. Recent impressive synthesis results may credit to implicit neural representations [37, 40, 58]. Nevertheless, these methods usually assume dense views as input, which is not always available in most cases. Moreover, they focus on the task of interpolation given multiple views rather than extrapolation. As such, we instead turn to methods aiming at handling single input. Among them, a number of works [15, 26, 28, 62, 63, 70, 72] infer the 3D structure of scenes by learning to predict a scene representation from a single image. These methods are usually trained end-to-end but suffer from generalizing to in-the-wild photos. Most relevant to our work are those approaches [39, 52, 66] that apply depth estimation [45, 65, 67, 68] followed by inpainting occluded regions. For example, 3D Photo [52] estimates monocular depth maps and uses the representation of layered depth images (LDIs) [43, 50], in which context-aware color and depth inpainting are performed. To enable fine-grained detail modeling, SLIDE [21] decomposes the scene into foreground and background via a soft-layering scheme. However, unlike our approach, these methods usually assume the scene is static by default, which largely lessens the sense of reality, especially when some elements such as + +![](images/6c4949f4da65374cd84740c97a37b56fee1a8822d40c3c37efaec7f81ec19bb8.jpg) +Figure 2. An overview of our method. Given a single still image as input, we first predict a dense depth map. To represent the scene in 3D space, we separate the input image into several layers according to depth discontinuities and apply context-aware inpainting, yielding layered depth images (LDIs) $\mathcal{L}$ . We then use a 2D feature extractor to encode 2D feature maps for each inpainted LDI color layer, resulting in feature LDIs $\mathcal{F}$ . Subsequently, we lift feature LDIs into 3D space using corresponding depth values to obtain a feature point cloud $\mathcal{P}$ . To animate the scene, we estimate a 2D motion field from the input image and apply Euler integration to generate forward and backward displacement fields $F_{0\rightarrow t}$ and $F_{0\rightarrow t - N}$ . We then augment displacement fields with estimated depth values to obtain 3D scene flow fields. Next, we bidirectionally displace the feature point cloud $\mathcal{P}$ as per the scene flow and separately project them into target image planes to obtain $\mathbf{F}_f$ and $\mathbf{F}_b$ . Finally, we blend them together and pass the result through our image decoder to synthesize a novel view at time $t$ . + +a creek or smoke are also captured in the input image. + +Space-time view synthesis. Space-time view synthesis is the task of rendering novel camera perspectives for dynamic scenes in terms of space and time [30]. Most of the prior works [2, 4, 27] rely on synchronized multi-view videos as input, which prevents their wide applicability. To mitigate this requirement, many neural rendering approaches [30, 41, 44] manage to show promising space-time view synthesis results from monocular videos. They usually train each new scene independently, and thus cannot directly handle in-the-wild inputs. Most related to our work, 3D Moments [64] introduces a novel 3D photography effect where cinematic camera motion and frame interpolation are simultaneously performed. However, this method demands near-duplicate photos as input and is unable to control the animation results. Instead, we show that our method can animate still images while enabling camera motion with 3D parallax. Moreover, we can also extend our system so that users are allowed to interactively control how the photos are animated by providing user-defined masks and flow hints. + +# 3. Method + +# 3.1. Overview + +Given a single still image, our goal is to synthesize plausible animation of the scene and simultaneously enable camera motion. The output of our method is a realistic cinematograph with compelling parallax effects. Fig. 2 schematically illustrates our pipeline. Our method starts by estimating a motion field and a depth map from the input image. We then separate the RGBD input into several layers + +as per depth discontinuities and inpaint occluded regions, followed by extracting 2D feature maps for each layer, resulting in feature LDIs [50]. To enable scene animation, we lift the 2D motion to 3D scene flow and unproject feature LDIs into a feature point cloud using their corresponding depth values. Thereafter, we bidirectionally animate the point cloud with scene flow using our 3D symmetric animation technique. We end up rendering them into two animated feature maps and composite the results to synthesize novel views at time $t$ . + +# 3.2. Motion Estimation + +To animate a still image, we wish to estimate the corresponding motion field for the observed scene. Generally, the motion we witness in the real world is extremely complicated as it is time-varying and many events such as occlusion and collision could occur. Intuitively, we could directly adopt prior optical flow estimation methods [10, 20, 60, 61] to accomplish this. However, it is not trivial since they usually take a pair of images as input to compute optical flow. Endo et al. [12] instead propose to learn and predict the motion in a recurrent manner, but this kind of approach is prone to large distortions in the long term. To simplify this, we follow Holynski et al. [19] and assume that a time-invariant and constant-velocity motion field, termed Eulerian flow field, can well approximate the bulk of real-world motions, e.g., water, smoke, and clouds. Formally, we denote $M$ as the Eulerian flow field of the scene, which suggests that + +$$ +F _ {t \rightarrow t + 1} (\cdot) = M (\cdot), \tag {1} +$$ + +where $F_{t\rightarrow t + 1}(\cdot)$ represents the optical flow map from frame $t$ to frame $t + 1$ . This defines how each pixel in the current frame will move in the future. Specifically, we can obtain the next frame via Euler integration: + +$$ +\mathbf {x} _ {t + 1} = \mathbf {x} _ {t} + M (\mathbf {x} _ {t}), \tag {2} +$$ + +where $\mathbf{x}_t$ represents the coordinates of a pixel $\mathbf{x}_t$ at time $t$ . Since the optical flow between consecutive frames is identical, we can easily deduce the displacement field by recursively applying: + +$$ +F _ {0 \rightarrow t} (\mathbf {x} _ {0}) = F _ {0 \rightarrow t - 1} (\mathbf {x} _ {0}) + M (\mathbf {x} _ {0} + F _ {0 \rightarrow t - 1} (\mathbf {x} _ {0})), \tag {3} +$$ + +where $F_{0\rightarrow t}(\cdot)$ denotes the displacement field from time 0 to time $t$ , which describes the course of each pixel in the input image across future frames. To estimate the Eulerian flow field, we adopt an image-to-image translation network as our motion estimator, which is able to map an RGB image to the optical flow. + +# 3.3. 3D Scene Representation + +One common disadvantage of previous single-image animation methods [12, 19, 29] is that they usually operate in 2D space via a deep image warping technique, which prevents them from creating parallax effects. Instead, to enable camera motion, we propose to lift our workspace into 3D and thus resort to 3D scene representation. + +We start by estimating the underlying geometry of the scene using the state-of-the-art monocular depth estimator DPT [45], which can predict reasonable dense depth maps for in-the-wild photos. Following Wang et al. [64], we then convert the RGBD input into an LDI representation [50] by separating it into several layers as per depth discontinuities and inpainting occluded regions. Specifically, we first divide the depth range of the source depth map into multiple intervals using agglomerative clustering [36], followed by creating layered depth images $\mathcal{L} = \{\mathbf{C}_l,\mathbf{D}_l\}_{l = 1}^L$ . Next, we inpaint occluded regions of each color and depth layer by applying the pretrained inpainting model from 3D Photo [52]. To improve rendering quality and reduce artifacts, we also introduce a 2D feature extraction network to encode 2D feature maps for each inpainted LDI color layer, resulting in feature LDIs $\mathcal{F} = \{\mathbf{F}_l,\mathbf{D}_l\}_{l = 1}^L$ . Finally, in order to enable animation in 3D space, we unproject feature LDIs into 3D via their corresponding inpainted depth layers, yielding a feature point cloud $\mathcal{P} = \{(\mathbf{X}_i,\mathbf{f}_i)\}$ , where $\mathbf{X}_i$ and $\mathbf{f}_i$ are 3D coordinates and the feature vector for each 3D point respectively. + +# 3.4. Point Cloud Animation and Rendering + +We now have the estimated displacement fields $F_{0\rightarrow t}$ and the feature point cloud $\mathcal{P}$ . Our next step is to animate this + +![](images/4420e0d6b8fcb14bf3c3b500c14fac36e9318d4891d4d320681be1555ce24c18.jpg) +Figure 3. 3D symmetric animation. To address the hole issue, we borrow textural information from the point cloud that moves in the opposite direction and integrate both of the animated point clouds to feasibly fill in the missing regions (the red and blue regions). + +point cloud over time. To bridge the gap between 2D displacement fields and 3D scene representation, we first augment the displacement fields with estimated depth values to lift them into 3D scene flow. In other words, we now have a function of time $t$ and the coordinates of a 3D point that returns a corresponding 3D translation vector that can shift this 3D point accordingly. Thus, for time $t$ , we then move each 3D point by computing its destination as its original position plus a corresponding 3D translation vector, i.e., $\mathcal{P}(t) = \{(\mathbf{X}_i(t),\mathbf{f}_i)\}$ . Intuitively, this process indeed animates the point cloud from one time to another. However, we empirically find that as points move forward, increasingly large holes emerge. This frequently happens when points leave their original locations without any points filling in those unknown regions. + +3D symmetric animation. To resolve this, inspired by prior works [3, 19, 38], we propose a 3D symmetric animation technique that leverages bidirectionally displaced point clouds to complement each other. With 3D symmetric animation, we can borrow textural information from point clouds that move in the opposite direction and integrate both of the animated point clouds to feasibly fill in missing regions. Specifically, we directly replace the original Eulerian flow field $M$ with $-M$ and recursively apply Eq. (3) to generate a reversed displacement field. Similarly, we then lift this 2D displacement field to obtain inverse scene flow, which is employed to produce point clouds with backward movements. As illustrated in Fig. 3, for time $t$ , to fill in holes, we respectively apply $F_{0\rightarrow t}$ and $F_{0\rightarrow t - N}$ to draw associated scene flow fields and use them to move the point cloud, resulting in $\mathcal{P}_f(t) = \{(\mathbf{X}_i^f (t),\mathbf{f}_i)\}$ and $\mathcal{P}_b(t) = \{(\mathbf{X}_i^b (t),\mathbf{f}_i)\}$ , where $N$ is the number of frames. + +Neural rendering. We now have two bidirectionally animated feature point clouds. Our final step is to render them into animated feature maps and composite the results for synthesizing novel views at time $t$ . In particu + +lar, given camera poses and intrinsics, we use a differentiable point-based renderer [66] to splat feature point clouds $\mathcal{P}_f(t) = \{(\mathbf{X}_i^f (t),\mathbf{f}_i)\}$ and $\mathcal{P}_b(t) = \{(\mathbf{X}_i^b (t),\mathbf{f}_i)\}$ separately into the target image plane. This process yields 2D feature maps $\mathbf{F}_f$ and $\mathbf{F}_b$ along with depth maps $\mathbf{D}_f$ , $\mathbf{D}_b$ and alpha maps $\alpha_{f},\alpha_{b}$ . Next, we wish to fuse $\mathbf{F}_f$ and $\mathbf{F}_b$ into one feature map $\mathbf{F}_t$ . Inspired by prior work [64], our intuition is three-fold: 1) to enable endless and seamless looping, we should assign the weight of the two feature maps based on time so as to guarantee that the first and last frame of the synthesized video are identical; 2) the weight map should favor pixel locations with smaller depth values, in the sense that it is impossible to see objects behind those objects closer to the eye; 3) to avoid missing regions as much as possible, we should greatly increase the contribution of those pixel locations that can fill in holes. With this in mind, we formulate the weight map as follows: + +$$ +\mathbf {W} _ {t} = \frac {\left(1 - \frac {t}{N}\right) \cdot \boldsymbol {\alpha} _ {f} \cdot e ^ {- \mathbf {D} _ {f}}}{\left(1 - \frac {t}{N}\right) \cdot \boldsymbol {\alpha} _ {f} \cdot e ^ {- \mathbf {D} _ {f}} + \frac {t}{N} \cdot \boldsymbol {\alpha} _ {b} \cdot e ^ {- \mathbf {D} _ {b}}}, \tag {4} +$$ + +where $N$ is the number of frames. Therefore, we can integrate $\mathbf{F}_f$ and $\mathbf{F}_b$ via: + +$$ +\mathbf {F} _ {t} = \mathbf {W} _ {t} \cdot \mathbf {F} _ {f} + (1 - \mathbf {W} _ {t}) \cdot \mathbf {F} _ {b}. \tag {5} +$$ + +We also obtain the merged depth map $\mathbf{D}_t$ : + +$$ +\mathbf {D} _ {t} = \mathbf {W} _ {t} \cdot \mathbf {D} _ {f} + (1 - \mathbf {W} _ {t}) \cdot \mathbf {D} _ {b}. \tag {6} +$$ + +Finally, we employ an image decoder network to map the 2D feature map $\mathbf{F}_t$ and depth map $\mathbf{D}_t$ to a novel view at time $t$ . Repeating this method, we are able to synthesize a realistic cinematograph with compelling parallax effects. + +# 3.5. Training + +This section describes our training scheme. In general, we train our image-to-image translation network, 2D feature extraction network, and image decoder network in a two-stage manner. + +Training dataset. We use the training set from Holynski et al. [19] as our training dataset. This dataset comprises short video clips of fluid motion that are extracted from longer stock-footage videos. We use the first frames of each video clip and the corresponding ground truth motion fields estimated by a pretrained optical flow network [60] as motion estimation pairs to train our motion estimation network. To develop animation ability, we randomly sample training data from fluid motion video clips. For novel view synthesis training, we require multi-view supervision of the same scene, which is not available in the training set. Instead, we use 3D Photo [52] to generate pseudo ground truth novel views for training. + +Two-stage training. Our model is trained in a two-stage manner. Specifically, we first train our motion estimation + +network using motion estimation pairs. To train the motion estimation network, we minimize GAN loss, GAN feature matching loss [49], and endpoint error as follows: + +$$ +\mathcal {L} _ {\text {M o t i o n}} = \mathcal {L} _ {\text {G A N}} + 1 0 \mathcal {L} _ {\text {F M}} + \mathcal {L} _ {\text {E P E}}. \tag {7} +$$ + +In the second stage, we freeze the motion estimation network and train the feature extraction network and image decoder network. Our model simultaneously learns to render novel views and animate scenes. For novel view synthesis, we set $t = 0$ and use pseudo ground truth novel views to supervise our model. We randomly sample target viewpoints of scenes and require the model to synthesize them. For animation, we train our model on training triplets (start frame, middle frame, end frame) sampled from fluid motion video clips. In particular, we render the middle frame from both directions using $F_{0\rightarrow t}$ and $F_{0\rightarrow t - N}$ without changing the camera poses and intrinsics. Besides GAN loss and GAN feature matching loss [49], we also enforce VGG perceptual loss [23, 73] and $l_{1}$ loss between synthesized and ground truth images. The overall loss is as follows: + +$$ +\mathcal {L} _ {\text {A n i m a t i o n}} = \mathcal {L} _ {G A N} + 1 0 \mathcal {L} _ {F M} + \mathcal {L} _ {l _ {1}} + \mathcal {L} _ {V G G}. \tag {8} +$$ + +# 4. Experiments + +# 4.1. Implementation Details + +Our motion estimator is a U-Net [48] based generator with 16 convolutional layers, and we replace Batch Normalization with SPADE [42]. For the feature extraction network and image decoder network, we follow the network architectures from Wang et al. [64]. We adopt the multi-scale discriminator used in SPADE [42] during training. + +Our model is trained using the Adam optimizer [24]. We conduct all experiments on a single NVIDIA GeForce RTX 3090 GPU. We train the motion estimation network for around $120k$ iterations with a batch size of 16. We set the generator learning rate to $5 \times 10^{-4}$ and the discriminator learning rate to $2 \times 10^{-3}$ . For the animation training stage, we train the feature extraction network and image decoder network for around $250k$ iterations with a learning rate starting at $1 \times 10^{-4}$ and then decaying exponentially. + +# 4.2. Baselines + +In principle, to evaluate our method, we are required to compare it against current state-of-the-art models. However, to our knowledge, we are the first to tackle the novel task of synthesizing a realistic cinematograph with compelling parallax effects from a single image. As a result, we cannot directly compare to previous works. Instead, we consider forming the following baselines to verify the superiority of our method: + +2D animation $\rightarrow$ novel view synthesis. One might consider 2D image animation $\rightarrow$ single-shot novel view synthesis: first employing a 2D image animation method, then + +Table 1. Quantitative comparisons against all baselines on the validation set from Holynski et al. [19]. The better approach favors higher PSNR and SSIM but lower LPIPS. The best performance is in bold. + +
MethodPSNR↑SSIM↑LPIPS↓
2D Anim. [19] → NVS [52]21.120.6330.286
NV5 [52] → 2D Anim. [19]21.970.6970.276
NV5 [52] → 2D Anim. [19] + MA22.470.7180.261
Naive PC Anim.19.460.6470.243
Naive PC Anim. + 3DSA20.490.6600.237
Ours23.330.7760.197
+ +a single-shot novel view synthesis method. Specifically, we first adopt a state-of-the-art image animation method [19] to produce an animated looping video. We then apply DPT [45] to estimate geometry and utilize 3D Photo [52] to generate novel views for each frame. + +Novel view synthesis $\rightarrow$ 2D animation. It also appears to be feasible that we first render novel views of scenes by 3D Photo [52] and then use the image animation method [19] to animate each viewpoint. Note that motion estimation should be performed for each frame as viewpoints have changed. However, we empirically find that this usually results in varying motion fields across the video. To mitigate this, we further propose using the moving average technique to smooth estimated motions for each frame. This results in novel view synthesis $\rightarrow$ 2D animation + MA. + +Naive point cloud animation. Intuitively, we may also consider directly unprojecting pixels into 3D space and subsequently moving and rendering the RGB point cloud. Specifically, given a single input image, we first predict the depth map using DPT [45] and estimate 2D optical flow. We then lift the pixels and optical flow into 3D space to form RGB point clouds and scene flow. Finally, we animate RGB point clouds over time according to the scene flow and project these point clouds into target viewpoints. This baseline also faces a similar issue: as time goes by, large holes gradually appear. One might also employ our 3D symmetric animation technique to further enhance this baseline, i.e., naive point cloud animation + 3DSA. + +# 4.3. Results + +Evaluation dataset. Since Holynski et al. [19] only provide a single image for each scene in the test set, we use the validation set from Holynski et al. [19] to evaluate our method and baselines. The validation set consists of 31 unique scenes with 162 samples of ground truth video clips captured by static cameras. + +Experimental setup. For evaluation, we render novel views of the ground truth videos in 4 different trajectories, resulting in 240 ground truth frames for each sample. This process does not involve inpainting, thus ground truth frames may contain holes. Only considering valid pixels when calculating metrics, we compare the predicted images + +Table 2. User study. Pairwise comparison results indicate that users prefer our method as more realistic and immersive. + +
ComparisonHuman preference
2D Anim. [19] → NVS [52] / Ours12.5% / 87.5%
NVS [52] → 2D Anim. [19] / Ours3.9% / 96.1%
NVS [52] → 2D Anim. [19] + MA / Ours6.1% / 93.9%
Naive PC Anim. / Ours7.6% / 92.4%
Naive PC Anim. + 3DSA / Ours8.6% / 91.4%
3D Photo [52] / Ours10.5% / 89.5%
Holynski et al. [19] / Ours29.9% / 70.1%
+ +Table 3. Ablation study on each component of our method. + +
PSNR↑SSIM↑LPIPS↓
w/o features21.500.6740.228
w/o inpainting22.860.7630.216
w/o 3D symmetric animation22.990.7680.199
Full model23.330.7760.197
+ +with the ground truth frames at the same time and viewpoint. For a fair comparison, all methods utilize the depth maps estimated by DPT [45]. Since we focus on comparing rendering quality, all methods use ground truth optical flows, except that NVS $[52] \rightarrow 2\mathrm{D}$ Anim. [19] and NVS $[52] \rightarrow 2\mathrm{D}$ Anim. [19] + MA have to estimate optical flows for each frame apart from the first frame. We adopt PSNR, SSIM, and LPIPS [73] as our evaluation metrics. + +Quantitative comparisons. As shown in Table 1, our method outperforms all baselines across all metrics by a large margin. This result implies that our method achieves better perceptual quality and produces more realistic renderings, which demonstrates the superiority and effectiveness of our method. + +Qualitative comparisons. We showcase the visual comparisons in Fig. 4. One can observe that our method presents photorealistic results while other comparative baselines produce more or less visual artifacts. 2D Anim. [19] $\rightarrow$ NVS [52] intends to generate stripped flickering artifacts. This is because 2D Anim. [19] $\rightarrow$ NVS [52] predicts the depth map for each animated frame, leading to frequent changes in the 3D structure of the scene and inconsistent inpainting. NVS [52] $\rightarrow$ 2D Anim. [19] and NVS [52] $\rightarrow$ 2D Anim. [19] + MA show jelly-like effects as optical flow should be estimated for each novel view. This results in varying motion fields across the video and thus inconsistent animation. Although Naive PC Anim. and Naive PC Anim. + 3DSA also lift the workspace into 3D, they are often prone to produce noticeable holes inevitably. One reason for this is that they do not perform inpainting. Note that some artifacts are difficult to observe when only scanning static figures. + +Controllable animation. Our method is able to create 3D cinematographs from a single image automatically. Further, we show that our framework is also highly extensible. For example, we can involve masks and flow hints as extra in + +![](images/2b9a3b5024c703ad6a3950082d8e7607af5f5941a49d58059b4a31d7dd23adbb.jpg) + +![](images/4334070062cd3b6f3f61c4716f983ef356853d2f0c87bbdebda3acd11bb58f24.jpg) + +![](images/fa5cce382bbf43a37a88b618101d5ca4327f0c17d97b77d52c1b05d1eb0d96a0.jpg) + +![](images/a43c4c8a6d6d40a35c63778061a2597ce60ed6e21c39b83bae9c84489a4a2453.jpg) + +![](images/3b3d6ff5aef130dad9ac707fe69750ca678b5d366cde49ca651a5492e0252ded.jpg) + +![](images/8629ab41f0c9305323db490922edf6a4e47ba58c4307ccdbbf4ed438de1a7caf.jpg) + +![](images/651ff5c4abb3c8701b53eb6ed325a498989ab107ab1f61f94d7dcf2814b29bd4.jpg) +Figure 4. Qualitative comparisons against all baselines on the validation set from Holynski et al. [19]. Our method produces compelling results while other comparative alternatives suffer from visual artifacts. (a) 2D animation $[19] \rightarrow$ novel view synthesis [52], (b) novel view synthesis $[52] \rightarrow 2\mathrm{D}$ animation [19], (c) novel view synthesis $[52] \rightarrow 2\mathrm{D}$ animation $[19] +$ moving average, (d) naive point cloud animation, (e) naive point cloud animation $+3\mathrm{D}$ symmetric animation, (f) our method, and (g) pseudo ground truth. + +![](images/331c677dbb65d9c8fcb4bd343646ea093a83117d483dd86880bdff02a28080be.jpg) + +![](images/9bdd6efad93b53201e3059bdb622697f8cdffc9b39353571ef3a60943de609c8.jpg) + +![](images/40ee3763d929b45715967b510caf5198dd53ef3dd243ee301bd4af3a6b2dfe69.jpg) + +![](images/58cf468364f685b31d3b6dc459e6806ee5bf50a14d606d81ff407aa0e4aa475b.jpg) + +![](images/101fdfc66c8e7d9d70c1b1878b53351507d8b82b79bc9d25898fcac07cb054bb.jpg) + +![](images/7da3049f12d0abc772d8572f44294c7bb2c968998b0b4142dbcafbfd9e767d1a.jpg) + +![](images/e3a2e79cca12412e125ac511395c5ac0c931e84fa031a1b4a2c001f842b53cc3.jpg) + +![](images/c7dae67b88b3dd693c188e1d35a4f93844c49e9bfc52632fe5e3999eef2772e4.jpg) + +![](images/907677b32cddcac887b489a70cb9d39cdbac34a0f1d53f82b35a8ac9442b405a.jpg) + +![](images/384c5b775982f07cf8871ad81a2107da7dc170eacb047dd6a682bca81edff17e.jpg) + +![](images/34793c795d25aff587522c040118594855e2b0d7894388abee62f1ff292d4184.jpg) + +![](images/802d9c0882d76105fca960886c7495e67b212f93ee128b99e8cf9b908e1cfb0b.jpg) + +![](images/2ae7891102eef16b2f71dff01c593fe55bd91fd679a83adce1cb01993d75e9e2.jpg) + +![](images/28ace664d6be5ce608c5751704199d8f3a9466423168636eaef72a15f9386770.jpg) + +![](images/6171cac29139afec89e9c37141a91e034a08b298fda2f7c0eaed46e54430c695.jpg) +Input +Figure 5. Controllable animation. By changing the masks and motion hints, our method can interactively control the animation. + +![](images/6512544ff9e2a4c89e35e5e0410b46730857ebefa3586d32891f5e604b655193.jpg) +Masks & motion hints + +![](images/438519e393db1e5206e2ed44c6a00dce4244e7b410324bda96632ea60ed34f4f.jpg) +Motion fields + +puts to augment our motion estimator. This brings two advantages: (1) more accurate flow estimation; (2) interactive and controllable animation. As shown in Fig. 5, we can control the animation of the scene by providing various masks and motion hints to obtain different motion fields. + +Generalizing on in-the-wild photos. To further demonstrate the generalization of our method, we also test our method on in-the-wild photos. We first create hemagraphs with camera motions on the test set from Holynski et al. [19], where, for each scene, only a single image is provided. We then select some online images at random to test our method. To accurately estimate motion fields, we provide masks and flow hints as extra inputs to our motion estimator. As shown in Fig. 6, our method produces reasonable results for in-the-wild inputs while other comparative + +alternatives yield visual artifacts or inconsistent animation. + +# 4.4. User Study + +We further conduct a user study to investigate how our method performs in the view of humans when compared with all baselines, 3D Photo [52], and Holynski et al. [19]. Specifically, we collect 50 photos from the test set of Holynski et al. [19] and the Internet. We use different approaches to generate videos with identical settings. During the study, we show each participant an input image and two animated videos generated by our method and a randomly selected approach in random order. 108 volunteers are invited to choose the method with better perceptual quality and realism, or none if it is hard to judge. We report the results in Table 2, which points out that our method surpasses alternative methods by a large margin in terms of the sense of reality and immersion. + +# 4.5. Ablation Study + +To validate the effect of each component, we conduct an ablation study on the validation set from Holynski et al. [19] and show the results in Table 3. One can observe: i) 3D symmetric animation technique matters because it allows us to leverage bidirectionally displaced point clouds to complement each other and feasibly fill in missing regions; ii) + +![](images/f5a81ab7b712b7b960749e52d83c52d407103fbd4cba6a5547a0585890164b7f.jpg) + +![](images/f15f25732e6450fed411f18299fa2ac6ecbc699da2e7d96cfb0faf5db0e8912f.jpg) + +![](images/a537a67f5067021e134728ef88014fc757aca63dbd184b9ce803199d7a572b23.jpg) + +![](images/f483a064c98f128c705ec1bc2c49ec7771c09ad5d6d3ff2748f7dfdf6c5faac3.jpg) + +![](images/1e9333e1c031bf1016858079d90cecb56c3910c4816f1f5dfbadc6eae3e00671.jpg) + +![](images/35fe3eb972a1809130e0d10285fae79b16cfae42c84a92aa0e99afc7e9e6d151.jpg) + +![](images/686417ad0b34fe4e82290b22a0e1390705cdca8f1343846e18101e735e69e82b.jpg) + +![](images/dcf5994d108ca3af8318dd9cc05c36f7662a96438da8eda2332971c021c5c1a5.jpg) + +![](images/14082932986a7cd6976251e28ce1a025d3f3ceb34249bedd0533c9af6210f144.jpg) + +![](images/775feb0956c44f1ecb7d5ede298bf1672b0ee9545e39db49209de96c71c87d05.jpg) + +![](images/4c808cd46b96d136f16ed5f2968847e9994cd67834dccc178c612fc00d07b773.jpg) + +![](images/dcfb77c70003a8e89315db6e2dcfb87597df19aba88455302bbe198b0bec233b.jpg) + +![](images/6a531b7a7972d6c3ea66dcb96e2e9eae2e9490a239bd9485594f1196377c4c03.jpg) + +![](images/bcb1951dc15289fde8623f0ec65bfd45bfa6221e67742d9aa58aea42471f36da.jpg) + +![](images/c2570be568c72ca139a833bfa69595dcb84907a38a2e3c8bbcb572f90d24ec02.jpg) + +![](images/d4b52e205f17b8e3e3f0f48959728534632f8588920125c9a895982d9acf98b7.jpg) +Input + +![](images/62b672b63224ab57a02712918e31b9ad7cbf7e898bbf05728e8068d8b69d1f60.jpg) +2D Anim. $\rightarrow$ NVS + +![](images/76439cae103eb89c3da25766dd92b3002d79d9ee8b1183b89e558399ea883dcb.jpg) +$\mathrm{NVS}\rightarrow 2\mathrm{D}$ Anim. $^+$ MA + +![](images/a0b8de3a8fac3435b38d3675b9502a511377a4f0588daf62921d5ec67ea165a9.jpg) +Naive PC Anim. + 3DSA +Figure 6. Visual comparisons on the test set from Holynski et al. [19] and in-the-wild photos. Our method consistently produces more realistic rendering with fewer visual artifacts as opposed to other baselines. + +![](images/bf29a9be76599a0ec19a90418f4e817bc7da3c7e836f8d67fd5f6c4c015ca424.jpg) +Ours + +introducing inpainting when constructing 3D geometry can improve the performance as this allows our model to produce plausible structures around depth discontinuities and fill in holes; iii) switching from directly using RGB colors to features in 3D scene representation significantly improves the rendering quality and reduces artifacts. + +# 5. Conclusion + +In this paper, we introduce a novel task of creating 3D cinematographs from single images. To this end, we present a simple yet effective method that makes a connection between image animation and novel view synthesis. We show that our method produces plausible animation of the scene while allowing camera movements. Our framework is flexible and customized. For accurate motion estimation and controllable animation, we can further include masks and flow hints as extra input for the motion estimator. Therefore, users can control how the scene is animated. Furthermore, our method generalizes well to in-the-wild photos, even like paintings or synthetic images generated by diffusion models. We conduct extensive experiments to ver + +ify the effectiveness and superiority of our method. A user study also demonstrates that our method generates realistic 3D cinematographs. We hope that our work can bring 3D cinematography into the sight of a broader community and motivate further research. + +Limitations and future work. Our method may not work well when the depth prediction module estimates erroneous geometry from the input image, e.g., thin structures. In addition, inappropriate motion fields will sometimes lead to undesirable results, e.g., some regions are mistakenly identified as frozen. As we take the first step towards 3D cinematography, in this paper, we focus on handling common moving elements, i.e., fluids. In other words, our method may not apply to more complex motions, e.g., cyclic motion. We leave this for our future work. + +Acknowledgements. This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner(s). This work is also supported by Adobe Gift and the Ministry of Education, Singapore, under its Academic Research Fund Tier 2 (MOE-T2EP20220-0007) and Tier 1 (RG14/22). + +# References + +[1] Jiamin Bai, Aseem Agarwala, Maneesh Agrawala, and Ravi Ramamoorthi. Automatic cinemagraph portraits. In Computer Graphics Forum, volume 32, pages 17-25. Wiley Online Library, 2013. 1 +[2] Aayush Bansal, Minh Vo, Yaser Sheikh, Deva Ramanan, and Srinivasa Narasimhan. 4d visualization of dynamic events from unconstrained multi-view videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5366-5375, 2020. 3 +[3] Wenbo Bao, Wei-Sheng Lai, Chao Ma, Xiaoyun Zhang, Zhiyong Gao, and Ming-Hsuan Yang. Depth-aware video frame interpolation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3703-3712, 2019. 2, 4 +[4] Mojtaba Bemana, Karol Myszkowski, Hans-Peter Seidel, and Tobias Ritschel. X-Fields: Implicit neural view-, light- and time-image interpolation. ACM Transactions on Graphics (TOG), 39(6):1-15, 2020. 3 +[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 425-432, 2001. 2 +[6] Jin-Xiang Chai, Xin Tong, Shing-Chow Chan, and Heung-Yeung Shum. Plenoptic sampling. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 307-318, 2000. 2 +[7] Caroline Chan, Shiry Ginosar, Tinghui Zhou, and Alexei A Efros. Everybody dance now. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5933-5942, 2019. 2 +[8] Yung-Yu Chuang, Dan B Goldman, Ke Colin Zheng, Brian Curless, David H Salesin, and Richard Szeliski. Animating pictures with stochastic motion textures. ACM Transactions on Graphics (TOG), 24(3):853-860, 2005. 2 +[9] Paul E Debevec, Camillo J Taylor, and Jitendra Malik. Modeling and rendering architecture from photographs: A hybrid geometry-and image-based approach. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 11-20, 1996. 2 +[10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. FlowNet: Learning optical flow with convolutional networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 2758-2766, 2015. 3 +[11] Michail Christos Doukas, Stefanos Zafeiriou, and Viktoriia Sharmanska. HeadGAN: One-shot neural head synthesis and editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14398-14407, 2021. 2 +[12] Yuki Endo, Yoshihiro Kanamori, and Shigeru Kuriyama. Animating Landscape: Self-supervised learning of decoupled motion and appearance for single-image video synthesis. ACM Transactions on Graphics (Proceedings of ACM + +SIGGRAPH Asia 2019), 38(6):175:1-175:19, 2019. 1, 2, 3, 4 +[13] Siming Fan, Jingtan Piao, Chen Qian, Kwan-Yee Lin, and Hongsheng Li. Simulating fluids in real-world still images. arXiv preprint arXiv:2204.11335, 2022. 2 +[14] Steven J Gortler, Radek Grzeszcuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 43-54, 1996. 2 +[15] Yuxuan Han, Ruicheng Wang, and Jiaolong Yang. Single-view synthesis in the wild with learned adaptive multiplane images. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-8, 2022. 2 +[16] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsenko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen Video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2 +[17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2 +[18] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. arXiv preprint arXiv:2204.03458, 2022. 2 +[19] Aleksander Holynski, Brian L. Curless, Steven M. Seitz, and Richard Szeliski. Animating pictures with eulerian motion fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5810-5819, 2021. 1, 2, 3, 4, 5, 6, 7, 8 +[20] Eddy Ilg, Nikolaus Mayer, Tonmoy Saikia, Margret Keuper, Alexey Dosovitskiy, and Thomas Brox. FlowNet 2.0: Evolution of optical flow estimation with deep networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2462-2470, 2017. 3 +[21] Varun Jampani, Huiwen Chang, Kyle Sargent, Abhishek Kar, Richard Tucker, Michael Krainin, Dominik Kaeser, William T Freeman, David Salesin, Brian Curless, and Ce Liu. SLIDE: Single image 3d photography with soft layering and depth-aware inpainting. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 2 +[22] Wei-Cih Jhou and Wen-Huang Cheng. Animating still landscape photographs through cloud motion creation. IEEE Transactions on Multimedia, 18(1):4-13, 2015. 2 +[23] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 694–711. Springer, 2016. 5 +[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5 +[25] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 31-42, 1996. 2 +[26] Jiaxin Li, Zijian Feng, Qi She, Henghui Ding, Changhu Wang, and Gim Hee Lee. MINE: Towards continuous depth spi with nerf for novel view synthesis. In Proceedings of + +the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12578-12588, 2021. 2 +[27] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5521-5531, 2022. 3 +[28] Xingyi Li, Chaoyi Hong, Yiran Wang, Zhiguo Cao, Ke Xian, and Guosheng Lin. Symmnerf: Learning to explore symmetry prior for single-view view synthesis. In Proceedings of the Asian Conference on Computer Vision (ACCV), pages 1726-1742, 2022. 2 +[29] Yijun Li, Chen Fang, Jimei Yang, Zhaowen Wang, Xin Lu, and Ming-Hsuan Yang. Flow-grounded spatial-temporal video prediction from still images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 600-615, 2018. 2, 4 +[30] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 3 +[31] Chieh Hubert Lin, Yen-Chi Cheng, Hsin-Ying Lee, Sergey Tulyakov, and Ming-Hsuan Yang. InfinityGAN: Towards infinite-pixel image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2 +[32] Andrew Liu, Richard Tucker, Varun Jampani, Ameesh Makadia, Noah Snavely, and Angjoo Kanazawa. Infinite nature: Perpetual view generation of natural scenes from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14458-14467, 2021. 2 +[33] Wen Liu, Zhixin Piao, Jie Min, Wenhan Luo, Lin Ma, and Shenghua Gao. Liquid Warping GAN: A unified framework for human motion imitation, appearance transfer and novel view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5904-5913, 2019. 2 +[34] Elizaveta Logacheva, Roman Suvorov, Oleg Khomenko, Anton Mashikhin, and Victor Lempitsky. DeepLandscape: Adversarial modeling of landscape videos. In Proceedings of the European Conference on Computer Vision (ECCV), pages 256-272. Springer, 2020. 2 +[35] Aniruddha Mahapatra and Kuldeep Kulkarni. Controllable animation of fluid elements in still images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3667-3676, 2022. 1, 2 +[36] Oded Maimon and Lior Rokach. Data mining and knowledge discovery handbook. 2005. 4 +[37] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2 +[38] Simon Niklaus and Feng Liu. Softmax splatting for video frame interpolation. In Proceedings of the IEEE/CVF + +Conference on Computer Vision and Pattern Recognition (CVPR), pages 5437-5446, 2020. 2, 4 +[39] Simon Niklaus, Long Mai, Jimei Yang, and Feng Liu. 3dken burns effect from a single image. ACM Transactions on Graphics (ToG), 38(6):1-15, 2019. 2 +[40] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2 +[41] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 3 +[42] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2337-2346, 2019. 5 +[43] Juewen Peng, Jianming Zhang, Xianrui Luo, Hao Lu, Ke Xian, and Zhiguo Cao. Mpib: An mpi-based bokeh rendering framework for realistic partial occlusion effects. In Proceedings of the European Conference on Computer Vision (ECCV), pages 590-607. Springer, 2022. 2 +[44] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10318-10327, 2021. 3 +[45] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12179-12188, 2021. 2, 4, 6 +[46] Yurui Ren, Xiaoming Yu, Junming Chen, Thomas H Li, and Ge Li. Deep image spatial transformation for person image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7690-7699, 2020. 2 +[47] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2022. 1 +[48] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI), pages 234–241. Springer, 2015. 5 +[49] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. In Advances in Neural Information Processing Systems (NeurIPS), 2016. 5 +[50] Jonathan Shade, Steven Gortler, Li-wei He, and Richard Szeliski. Layered depth images. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 231–242, 1998. 2, 3, 4 + +[51] Tamar Rott Shaham, Tali Dekel, and Tomer Michaeli. SinGAN: Learning a generative model from a single natural image. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4570-4580, 2019. 2 +[52] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 4, 5, 6, 7 +[53] Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. Animating arbitrary objects via deep motion transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2377-2386, 2019. 2 +[54] Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. First order motion model for image animation. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2 +[55] Aliaksandr Siarohin, Enver Sangineto, Stéphane Lathuiliere, and Nicu Sebe. Deformable gans for pose-based human image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3408-3416, 2018. 2 +[56] Aliaksandr Siarohin, Oliver J Woodford, Jian Ren, Mengei Chai, and Sergey Tulyakov. Motion representations for articulated animation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13653-13662, 2021. 2 +[57] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-A-Video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022.2 +[58] Vincent Sitzmann, Michael Zollhoefer, and Gordon Wetzstein. Scene Representation Networks: Continuous 3d-structure-aware neural scene representations. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2 +[59] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning (ICML), pages 2256-2265. PMLR, 2015. 2 +[60] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. PWC-Net: Cnns for optical flow using pyramid, warping, and cost volume. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8934–8943, 2018. 3, 5 +[61] Zachary Teed and Jia Deng. RAFT: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision (ECCV), pages 402-419, 2020. 3 +[62] Richard Tucker and Noah Snavely. Single-view view synthesis with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 551-560, 2020. 2 + +[63] Shubham Tulsiani, Richard Tucker, and Noah Snavely. Layer-structured 3d scene inference via view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), pages 302–317, 2018. 2 +[64] Qianqian Wang, Zhengqi Li, David Salesin, Noah Snavely, Brian Curless, and Janne Kontkanen. 3d moments from near-duplicate photos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3, 4, 5 +[65] Yiran Wang, Zhiyu Pan, Xingyi Li, Zhiguo Cao, Ke Xian, and Jianming Zhang. Less is more: Consistent video depth estimation with masked frames modeling. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), pages 6347-6358, 2022. 2 +[66] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin Johnson. SynSin: End-to-end view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7467-7477, 2020. 2, 5 +[67] Ke Xian, Chunhua Shen, Zhiguo Cao, Hao Lu, Yang Xiao, Ruibo Li, and Zhenbo Luo. Monocular relative depth perception with web stereo data supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 311-320, 2018. 2 +[68] Ke Xian, Jianming Zhang, Oliver Wang, Long Mai, Zhe Lin, and Zhiguo Cao. Structure-guided ranking loss for single image depth prediction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 611-620, 2020. 2 +[69] Wei Xiong, Wenhan Luo, Lin Ma, Wei Liu, and Jiebo Luo. Learning to generate time-lapse videos using multi-stage dynamic generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2364-2373, 2018. 2 +[70] Dejia Xu, Yifan Jiang, Peihao Wang, Zhiwen Fan, Humphrey Shi, and Zhangyang Wang. SinNeRF: Training neural radiance fields on complex scenes from a single image. In Proceedings of the European Conference on Computer Vision (ECCV), pages 736-753. Springer, 2022. 2 +[71] Hang Yan, Yebin Liu, and Yasutaka Furukawa. Turning an urban scene video into a cinematograph. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 394-402, 2017. 1 +[72] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4578-4587, 2021. 2 +[73] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 586-595, 2018. 5, 6 \ No newline at end of file diff --git a/2023/3D Cinemagraphy From a Single Image/images.zip b/2023/3D Cinemagraphy From a Single Image/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..cb46c885f218a0a87ac23d7274708ab06d542400 --- /dev/null +++ b/2023/3D Cinemagraphy From a Single Image/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7fe37368dec3ae760245ffabd54c837dd2bec49baa04bf82be20d57a43bb877 +size 831519 diff --git a/2023/3D Cinemagraphy From a Single Image/layout.json b/2023/3D Cinemagraphy From a Single Image/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ec93b6d736c688b4aa01bfff53eda64985869d91 --- /dev/null +++ b/2023/3D Cinemagraphy From a Single Image/layout.json @@ -0,0 +1,10810 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 175, + 103, + 419, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 103, + 419, + 121 + ], + "spans": [ + { + "bbox": [ + 175, + 103, + 419, + 121 + ], + "type": "text", + "content": "3D Cinemagraphy from a Single Image" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "spans": [ + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "content": "Xingyi Li" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "inline_equation", + "content": "^{1,3}" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "content": " Zhiguo Cao" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "content": " Huiqiang Sun" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "content": " Jianming Zhang" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "content": " Ke Xian" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "inline_equation", + "content": "^{3*}" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "content": " Guosheng Lin" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "content": "Key Laboratory of Image Processing and Intelligent Control, Ministry of Education School of Artificial Intelligence and Automation, Huazhong University of Science and Technology \n" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "content": "Adobe Research " + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 58, + 141, + 534, + 228 + ], + "type": "text", + "content": "S-Lab, Nanyang Technological University \n{xingyi.li, zgcao, shq1031}@hust.edu.cn, jianmzha@adobe.com, {ke.xian, gslin}@ntu.edu.sg \nhttps://xingyi-li.github.io/3d-cinemagraphy" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 49, + 249, + 178, + 335 + ], + "blocks": [ + { + "bbox": [ + 49, + 249, + 178, + 335 + ], + "lines": [ + { + "bbox": [ + 49, + 249, + 178, + 335 + ], + "spans": [ + { + "bbox": [ + 49, + 249, + 178, + 335 + ], + "type": "image", + "image_path": "77e45a8a8ae99f7e5122bf60ba50833109c19c4ad4f2de0de9320aca1eab8f4b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 343, + 546, + 389 + ], + "lines": [ + { + "bbox": [ + 46, + 343, + 546, + 389 + ], + "spans": [ + { + "bbox": [ + 46, + 343, + 546, + 389 + ], + "type": "text", + "content": "Figure 1. Given a single still image, our method can synthesize videos with plausible animation of the scene while allowing camera movements. Here, we showcase four 3D cinematographs with various camera trajectories. Besides real-world photos (the left two examples), our method can also generalize to paintings (the third one) and synthetic images generated by Stable Diffusion [47] (the rightmost one). To see the effect of 3D cinematography, readers are encouraged to view with Adobe Acrobat or KDE Okular." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 179, + 250, + 309, + 335 + ], + "blocks": [ + { + "bbox": [ + 179, + 250, + 309, + 335 + ], + "lines": [ + { + "bbox": [ + 179, + 250, + 309, + 335 + ], + "spans": [ + { + "bbox": [ + 179, + 250, + 309, + 335 + ], + "type": "image", + "image_path": "afaecbba82dd6bd57e0f587e50178780a6b79bc215a6dbcec1887dd14bf9fa3b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 250, + 454, + 335 + ], + "blocks": [ + { + "bbox": [ + 310, + 250, + 454, + 335 + ], + "lines": [ + { + "bbox": [ + 310, + 250, + 454, + 335 + ], + "spans": [ + { + "bbox": [ + 310, + 250, + 454, + 335 + ], + "type": "image", + "image_path": "812c489801e67547cea0e4abdc1db516b991152c6dc20e97d1f533d9d77e7022.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 455, + 250, + 544, + 335 + ], + "blocks": [ + { + "bbox": [ + 455, + 250, + 544, + 335 + ], + "lines": [ + { + "bbox": [ + 455, + 250, + 544, + 335 + ], + "spans": [ + { + "bbox": [ + 455, + 250, + 544, + 335 + ], + "type": "image", + "image_path": "9e76c7973b4c0d6ac5f0c7109de2752b00aa644105db1a7c8f97243477dba918.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 406, + 192, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 406, + 192, + 418 + ], + "spans": [ + { + "bbox": [ + 143, + 406, + 192, + 418 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 431, + 289, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 431, + 289, + 684 + ], + "spans": [ + { + "bbox": [ + 46, + 431, + 289, + 684 + ], + "type": "text", + "content": "We present 3D Cinemagography, a new technique that marries 2D image animation with 3D photography. Given a single still image as input, our goal is to generate a video that contains both visual content animation and camera motion. We empirically find that naively combining existing 2D image animation and 3D photography methods leads to obvious artifacts or inconsistent animation. Our key insight is that representing and animating the scene in 3D space offers a natural solution to this task. To this end, we first convert the input image into feature-based layered depth images using predicted depth values, followed by unprojecting them to a feature point cloud. To animate the scene, we perform motion estimation and lift the 2D motion into the 3D scene flow. Finally, to resolve the problem of hole emergence as points move forward, we propose to bidirectionally displace the point cloud as per the scene flow and synthesize novel views by separately projecting them into target image planes and blending the results. Extensive experiments demonstrate the effectiveness of our method. A user study is also conducted to validate the compelling rendering results of our method." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 406, + 386, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 406, + 386, + 418 + ], + "spans": [ + { + "bbox": [ + 307, + 406, + 386, + 418 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 426, + 545, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 426, + 545, + 641 + ], + "spans": [ + { + "bbox": [ + 304, + 426, + 545, + 641 + ], + "type": "text", + "content": "Nowadays, since people can easily take images using smartphone cameras, the number of online photos has increased drastically. However, with the rise of online video-sharing platforms such as YouTube and TikTok, people are no longer content with static images as they have grown accustomed to watching videos. It would be great if we could animate those still images and synthesize videos for a better experience. These living images, termed cinematographs, have already been created and gained rapid popularity online [1, 71]. Although cinematographs may engage people with the content for longer than a regular photo, they usually fail to deliver an immersive sense of 3D to audiences. This is because cinematographs are usually based on a static camera and fail to produce parallax effects. We are therefore motivated to explore ways of animating the photos and moving around the cameras at the same time. As shown in Fig. 1, this will bring many still images to life and provide a drastically vivid experience." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "In this paper, we are interested in making the first step towards 3D cinematography that allows both realistic animation of the scene and camera motions with compelling parallax effects from a single image. There are plenty of attempts to tackle either of the two problems. Single-image animation methods [12, 19, 35] manage to produce a real-" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 135, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 135, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 135, + 712 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "text", + "content": "4595" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 228 + ], + "type": "text", + "content": "istic animated video from a single image, but they usually operate in 2D space, and therefore they cannot create camera movement effects. Classic novel view synthesis methods [5, 6, 9, 14, 25] and recent implicit neural representations [37, 40, 58] entail densely captured views as input to render unseen camera perspectives. Single-shot novel view synthesis approaches [21, 39, 52, 66] exhibit the potential for generating novel camera trajectories of the scene from a single image. Nonetheless, these methods usually hypothesize that the observed scene is static without moving elements. Directly combining existing state-of-the-art solutions of single-image animation and novel view synthesis yields visual artifacts or inconsistent animation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 228, + 289, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 228, + 289, + 514 + ], + "spans": [ + { + "bbox": [ + 46, + 228, + 289, + 514 + ], + "type": "text", + "content": "To address the above challenges, we present a novel framework that solves the joint task of image animation and novel view synthesis. This framework can be trained to create 3D cinematographs from a single still image. Our key intuition is that handling this new task in 3D space would naturally enable both animation and moving cameras simultaneously. With this in mind, we first represent the scene as feature-based layered depth images (LDIs) [50] and unproject the feature LDIs into a feature point cloud. To animate the scene, we perform motion estimation and lift the 2D motion to 3D scene flow using depth values predicted by DPT [45]. Next, we animate the point cloud according to the scene flow. To resolve the problem of hole emergence as points move forward, we are inspired by prior works [3, 19, 38] and propose a 3D symmetric animation technique to bidirectionally displace point clouds, which can effectively fill in those unknown regions. Finally, we synthesize novel views at time " + }, + { + "bbox": [ + 46, + 228, + 289, + 514 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 228, + 289, + 514 + ], + "type": "text", + "content": " by rendering point clouds into target image planes and blending the results. In this manner, our proposed method can automatically create 3D cinematographs from a single image. Moreover, our framework is highly extensible, e.g., we can augment our motion estimator with user-defined masks and flow hints for accurate flow estimation and controllable animation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 515, + 223, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 515, + 223, + 526 + ], + "spans": [ + { + "bbox": [ + 59, + 515, + 223, + 526 + ], + "type": "text", + "content": "In summary, our main contributions are:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 534, + 287, + 658 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 59, + 534, + 287, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 534, + 287, + 583 + ], + "spans": [ + { + "bbox": [ + 59, + 534, + 287, + 583 + ], + "type": "text", + "content": "- We propose a new task of creating 3D cinematographs from single images. To this end, we propose a novel framework that jointly learns to solve the task of image animation and novel view synthesis in 3D space." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 590, + 287, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 590, + 287, + 615 + ], + "spans": [ + { + "bbox": [ + 59, + 590, + 287, + 615 + ], + "type": "text", + "content": "- We design a 3D symmetric animation technique to address the hole problem as points move forward." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 622, + 287, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 622, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 59, + 622, + 287, + 658 + ], + "type": "text", + "content": "- Our framework is flexible and customized. We can achieve controllable animation by augmenting our motion estimator with user-defined masks and flow hints." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 669, + 134, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 134, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 134, + 681 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 715 + ], + "type": "text", + "content": "Single-image animation. Different kinds of methods have been explored to animate still images. Some works [8, 22]" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 547, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 408 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 408 + ], + "type": "text", + "content": "focus on animating certain objects via physical simulation but may not be easily applied to more general cases of inthe-wild photos. Given driving videos as guidance, there are plenty of methods that attempt to perform motion transfer on static objects with either a priori knowledge of moving objects [7, 11, 33, 46, 55] or in an unsupervised manner [53, 54, 56]. They entail reference videos to drive the motion of static objects, and thus do not suit our task. Recent advances in generative models have attracted much attention and motivated the community to develop realistic image and video synthesis methods. Many works [31, 32, 34, 51, 69] are based on generative adversarial networks (GANs) and operate transformations in latent space to generate plausible appearance changes and movements. Nonetheless, it is non-trial to allow for explicit control over those latent codes and to animate input imagery in a disentangled manner. As diffusion models [17, 59] improve by leaps and bounds, several diffusion-based works [16, 18, 57] attempt to generate realistic videos from text or images. However, these methods are time-consuming and expensive in terms of computation. Here we focus on methods that utilize learned motion priors to convert a still image into an animated video texture [12, 13, 19, 29, 35]. In particular, Holynski et al. [19] first synthesize the optical flow of the input image via a motion estimation network, then obtain future frames using the estimated flow field. This method renders plausible animation of fluid elements in the input image but suffers from producing camera motions with parallax." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 415, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 547, + 715 + ], + "type": "text", + "content": "Novel view synthesis from a single image. Novel view synthesis allows for rendering unseen camera perspectives from 2D images and their corresponding camera poses. Recent impressive synthesis results may credit to implicit neural representations [37, 40, 58]. Nevertheless, these methods usually assume dense views as input, which is not always available in most cases. Moreover, they focus on the task of interpolation given multiple views rather than extrapolation. As such, we instead turn to methods aiming at handling single input. Among them, a number of works [15, 26, 28, 62, 63, 70, 72] infer the 3D structure of scenes by learning to predict a scene representation from a single image. These methods are usually trained end-to-end but suffer from generalizing to in-the-wild photos. Most relevant to our work are those approaches [39, 52, 66] that apply depth estimation [45, 65, 67, 68] followed by inpainting occluded regions. For example, 3D Photo [52] estimates monocular depth maps and uses the representation of layered depth images (LDIs) [43, 50], in which context-aware color and depth inpainting are performed. To enable fine-grained detail modeling, SLIDE [21] decomposes the scene into foreground and background via a soft-layering scheme. However, unlike our approach, these methods usually assume the scene is static by default, which largely lessens the sense of reality, especially when some elements such as" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4596" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 72, + 545, + 223 + ], + "blocks": [ + { + "bbox": [ + 51, + 72, + 545, + 223 + ], + "lines": [ + { + "bbox": [ + 51, + 72, + 545, + 223 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 545, + 223 + ], + "type": "image", + "image_path": "6c4949f4da65374cd84740c97a37b56fee1a8822d40c3c37efaec7f81ec19bb8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "lines": [ + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "spans": [ + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": "Figure 2. An overview of our method. Given a single still image as input, we first predict a dense depth map. To represent the scene in 3D space, we separate the input image into several layers according to depth discontinuities and apply context-aware inpainting, yielding layered depth images (LDIs) " + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": ". We then use a 2D feature extractor to encode 2D feature maps for each inpainted LDI color layer, resulting in feature LDIs " + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": ". Subsequently, we lift feature LDIs into 3D space using corresponding depth values to obtain a feature point cloud " + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": ". To animate the scene, we estimate a 2D motion field from the input image and apply Euler integration to generate forward and backward displacement fields " + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "inline_equation", + "content": "F_{0\\rightarrow t}" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "inline_equation", + "content": "F_{0\\rightarrow t - N}" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": ". We then augment displacement fields with estimated depth values to obtain 3D scene flow fields. Next, we bidirectionally displace the feature point cloud " + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": " as per the scene flow and separately project them into target image planes to obtain " + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_f" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_b" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": ". Finally, we blend them together and pass the result through our image decoder to synthesize a novel view at time " + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 234, + 547, + 323 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 343, + 265, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 343, + 265, + 355 + ], + "spans": [ + { + "bbox": [ + 46, + 343, + 265, + 355 + ], + "type": "text", + "content": "a creek or smoke are also captured in the input image." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 355, + 289, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 355, + 289, + 582 + ], + "spans": [ + { + "bbox": [ + 46, + 355, + 289, + 582 + ], + "type": "text", + "content": "Space-time view synthesis. Space-time view synthesis is the task of rendering novel camera perspectives for dynamic scenes in terms of space and time [30]. Most of the prior works [2, 4, 27] rely on synchronized multi-view videos as input, which prevents their wide applicability. To mitigate this requirement, many neural rendering approaches [30, 41, 44] manage to show promising space-time view synthesis results from monocular videos. They usually train each new scene independently, and thus cannot directly handle in-the-wild inputs. Most related to our work, 3D Moments [64] introduces a novel 3D photography effect where cinematic camera motion and frame interpolation are simultaneously performed. However, this method demands near-duplicate photos as input and is unable to control the animation results. Instead, we show that our method can animate still images while enabling camera motion with 3D parallax. Moreover, we can also extend our system so that users are allowed to interactively control how the photos are animated by providing user-defined masks and flow hints." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 591, + 104, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 591, + 104, + 604 + ], + "spans": [ + { + "bbox": [ + 47, + 591, + 104, + 604 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 611, + 115, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 611, + 115, + 623 + ], + "spans": [ + { + "bbox": [ + 47, + 611, + 115, + 623 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 715 + ], + "type": "text", + "content": "Given a single still image, our goal is to synthesize plausible animation of the scene and simultaneously enable camera motion. The output of our method is a realistic cinematograph with compelling parallax effects. Fig. 2 schematically illustrates our pipeline. Our method starts by estimating a motion field and a depth map from the input image. We then separate the RGBD input into several layers" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 343, + 547, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 343, + 547, + 462 + ], + "spans": [ + { + "bbox": [ + 304, + 343, + 547, + 462 + ], + "type": "text", + "content": "as per depth discontinuities and inpaint occluded regions, followed by extracting 2D feature maps for each layer, resulting in feature LDIs [50]. To enable scene animation, we lift the 2D motion to 3D scene flow and unproject feature LDIs into a feature point cloud using their corresponding depth values. Thereafter, we bidirectionally animate the point cloud with scene flow using our 3D symmetric animation technique. We end up rendering them into two animated feature maps and composite the results to synthesize novel views at time " + }, + { + "bbox": [ + 304, + 343, + 547, + 462 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 343, + 547, + 462 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 475, + 417, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 475, + 417, + 488 + ], + "spans": [ + { + "bbox": [ + 306, + 475, + 417, + 488 + ], + "type": "text", + "content": "3.2. Motion Estimation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 496, + 547, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 496, + 547, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 496, + 547, + 689 + ], + "type": "text", + "content": "To animate a still image, we wish to estimate the corresponding motion field for the observed scene. Generally, the motion we witness in the real world is extremely complicated as it is time-varying and many events such as occlusion and collision could occur. Intuitively, we could directly adopt prior optical flow estimation methods [10, 20, 60, 61] to accomplish this. However, it is not trivial since they usually take a pair of images as input to compute optical flow. Endo et al. [12] instead propose to learn and predict the motion in a recurrent manner, but this kind of approach is prone to large distortions in the long term. To simplify this, we follow Holynski et al. [19] and assume that a time-invariant and constant-velocity motion field, termed Eulerian flow field, can well approximate the bulk of real-world motions, e.g., water, smoke, and clouds. Formally, we denote " + }, + { + "bbox": [ + 304, + 496, + 547, + 689 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 496, + 547, + 689 + ], + "type": "text", + "content": " as the Eulerian flow field of the scene, which suggests that" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 384, + 700, + 545, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 700, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 384, + 700, + 545, + 714 + ], + "type": "interline_equation", + "content": "F _ {t \\rightarrow t + 1} (\\cdot) = M (\\cdot), \\tag {1}", + "image_path": "613a7798432150280678977af35d7ea0ab74e78d62142229cfffc2314b9c41bc.jpg" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 758 + ], + "type": "text", + "content": "4597" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "F_{t\\rightarrow t + 1}(\\cdot)" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " represents the optical flow map from frame " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " to frame " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "t + 1" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": ". This defines how each pixel in the current frame will move in the future. Specifically, we can obtain the next frame via Euler integration:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 122, + 132, + 287, + 144 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 132, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 122, + 132, + 287, + 144 + ], + "type": "interline_equation", + "content": "\\mathbf {x} _ {t + 1} = \\mathbf {x} _ {t} + M (\\mathbf {x} _ {t}), \\tag {2}", + "image_path": "180894f32f54d208cb51a282252ad1414b592e7a2dab6fb4d75e39367eb3ec86.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 155, + 287, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 155, + 287, + 202 + ], + "spans": [ + { + "bbox": [ + 46, + 155, + 287, + 202 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 155, + 287, + 202 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 46, + 155, + 287, + 202 + ], + "type": "text", + "content": " represents the coordinates of a pixel " + }, + { + "bbox": [ + 46, + 155, + 287, + 202 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_t" + }, + { + "bbox": [ + 46, + 155, + 287, + 202 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 46, + 155, + 287, + 202 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 155, + 287, + 202 + ], + "type": "text", + "content": ". Since the optical flow between consecutive frames is identical, we can easily deduce the displacement field by recursively applying:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 213, + 287, + 227 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 213, + 287, + 227 + ], + "spans": [ + { + "bbox": [ + 54, + 213, + 287, + 227 + ], + "type": "interline_equation", + "content": "F _ {0 \\rightarrow t} (\\mathbf {x} _ {0}) = F _ {0 \\rightarrow t - 1} (\\mathbf {x} _ {0}) + M (\\mathbf {x} _ {0} + F _ {0 \\rightarrow t - 1} (\\mathbf {x} _ {0})), \\tag {3}", + "image_path": "0588c5393580ec0d897782980525908baf93d7456368c4fe8d374417df201a1b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 236, + 287, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 236, + 287, + 308 + ], + "spans": [ + { + "bbox": [ + 46, + 236, + 287, + 308 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 236, + 287, + 308 + ], + "type": "inline_equation", + "content": "F_{0\\rightarrow t}(\\cdot)" + }, + { + "bbox": [ + 46, + 236, + 287, + 308 + ], + "type": "text", + "content": " denotes the displacement field from time 0 to time " + }, + { + "bbox": [ + 46, + 236, + 287, + 308 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 236, + 287, + 308 + ], + "type": "text", + "content": ", which describes the course of each pixel in the input image across future frames. To estimate the Eulerian flow field, we adopt an image-to-image translation network as our motion estimator, which is able to map an RGB image to the optical flow." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 318, + 186, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 318, + 186, + 331 + ], + "spans": [ + { + "bbox": [ + 47, + 318, + 186, + 331 + ], + "type": "text", + "content": "3.3. 3D Scene Representation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 337, + 287, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 337, + 287, + 409 + ], + "spans": [ + { + "bbox": [ + 46, + 337, + 287, + 409 + ], + "type": "text", + "content": "One common disadvantage of previous single-image animation methods [12, 19, 29] is that they usually operate in 2D space via a deep image warping technique, which prevents them from creating parallax effects. Instead, to enable camera motion, we propose to lift our workspace into 3D and thus resort to 3D scene representation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "spans": [ + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "text", + "content": "We start by estimating the underlying geometry of the scene using the state-of-the-art monocular depth estimator DPT [45], which can predict reasonable dense depth maps for in-the-wild photos. Following Wang et al. [64], we then convert the RGBD input into an LDI representation [50] by separating it into several layers as per depth discontinuities and inpainting occluded regions. Specifically, we first divide the depth range of the source depth map into multiple intervals using agglomerative clustering [36], followed by creating layered depth images " + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "inline_equation", + "content": "\\mathcal{L} = \\{\\mathbf{C}_l,\\mathbf{D}_l\\}_{l = 1}^L" + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "text", + "content": ". Next, we inpaint occluded regions of each color and depth layer by applying the pretrained inpainting model from 3D Photo [52]. To improve rendering quality and reduce artifacts, we also introduce a 2D feature extraction network to encode 2D feature maps for each inpainted LDI color layer, resulting in feature LDIs " + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "inline_equation", + "content": "\\mathcal{F} = \\{\\mathbf{F}_l,\\mathbf{D}_l\\}_{l = 1}^L" + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "text", + "content": ". Finally, in order to enable animation in 3D space, we unproject feature LDIs into 3D via their corresponding inpainted depth layers, yielding a feature point cloud " + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "inline_equation", + "content": "\\mathcal{P} = \\{(\\mathbf{X}_i,\\mathbf{f}_i)\\}" + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_i" + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i" + }, + { + "bbox": [ + 46, + 410, + 287, + 661 + ], + "type": "text", + "content": " are 3D coordinates and the feature vector for each 3D point respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 670, + 251, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 251, + 683 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 251, + 683 + ], + "type": "text", + "content": "3.4. Point Cloud Animation and Rendering" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "We now have the estimated displacement fields " + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "F_{0\\rightarrow t}" + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": " and the feature point cloud " + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": ". Our next step is to animate this" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 314, + 72, + 539, + 207 + ], + "blocks": [ + { + "bbox": [ + 314, + 72, + 539, + 207 + ], + "lines": [ + { + "bbox": [ + 314, + 72, + 539, + 207 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 539, + 207 + ], + "type": "image", + "image_path": "4420e0d6b8fcb14bf3c3b500c14fac36e9318d4891d4d320681be1555ce24c18.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 217, + 545, + 262 + ], + "lines": [ + { + "bbox": [ + 305, + 217, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 305, + 217, + 545, + 262 + ], + "type": "text", + "content": "Figure 3. 3D symmetric animation. To address the hole issue, we borrow textural information from the point cloud that moves in the opposite direction and integrate both of the animated point clouds to feasibly fill in the missing regions (the red and blue regions)." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 277, + 545, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 277, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 304, + 277, + 545, + 456 + ], + "type": "text", + "content": "point cloud over time. To bridge the gap between 2D displacement fields and 3D scene representation, we first augment the displacement fields with estimated depth values to lift them into 3D scene flow. In other words, we now have a function of time " + }, + { + "bbox": [ + 304, + 277, + 545, + 456 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 277, + 545, + 456 + ], + "type": "text", + "content": " and the coordinates of a 3D point that returns a corresponding 3D translation vector that can shift this 3D point accordingly. Thus, for time " + }, + { + "bbox": [ + 304, + 277, + 545, + 456 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 277, + 545, + 456 + ], + "type": "text", + "content": ", we then move each 3D point by computing its destination as its original position plus a corresponding 3D translation vector, i.e., " + }, + { + "bbox": [ + 304, + 277, + 545, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{P}(t) = \\{(\\mathbf{X}_i(t),\\mathbf{f}_i)\\}" + }, + { + "bbox": [ + 304, + 277, + 545, + 456 + ], + "type": "text", + "content": ". Intuitively, this process indeed animates the point cloud from one time to another. However, we empirically find that as points move forward, increasingly large holes emerge. This frequently happens when points leave their original locations without any points filling in those unknown regions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "spans": [ + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "content": "3D symmetric animation. To resolve this, inspired by prior works [3, 19, 38], we propose a 3D symmetric animation technique that leverages bidirectionally displaced point clouds to complement each other. With 3D symmetric animation, we can borrow textural information from point clouds that move in the opposite direction and integrate both of the animated point clouds to feasibly fill in missing regions. Specifically, we directly replace the original Eulerian flow field " + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "inline_equation", + "content": "-M" + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "content": " and recursively apply Eq. (3) to generate a reversed displacement field. Similarly, we then lift this 2D displacement field to obtain inverse scene flow, which is employed to produce point clouds with backward movements. As illustrated in Fig. 3, for time " + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "content": ", to fill in holes, we respectively apply " + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "inline_equation", + "content": "F_{0\\rightarrow t}" + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "inline_equation", + "content": "F_{0\\rightarrow t - N}" + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "content": " to draw associated scene flow fields and use them to move the point cloud, resulting in " + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_f(t) = \\{(\\mathbf{X}_i^f (t),\\mathbf{f}_i)\\}" + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_b(t) = \\{(\\mathbf{X}_i^b (t),\\mathbf{f}_i)\\}" + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 459, + 545, + 663 + ], + "type": "text", + "content": " is the number of frames." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Neural rendering. We now have two bidirectionally animated feature point clouds. Our final step is to render them into animated feature maps and composite the results for synthesizing novel views at time " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": ". In particu" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4598" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": "lar, given camera poses and intrinsics, we use a differentiable point-based renderer [66] to splat feature point clouds " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_f(t) = \\{(\\mathbf{X}_i^f (t),\\mathbf{f}_i)\\}" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_b(t) = \\{(\\mathbf{X}_i^b (t),\\mathbf{f}_i)\\}" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": " separately into the target image plane. This process yields 2D feature maps " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_f" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_b" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": " along with depth maps " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_f" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_b" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": " and alpha maps " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\alpha_{f},\\alpha_{b}" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": ". Next, we wish to fuse " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_f" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_b" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": " into one feature map " + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_t" + }, + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": ". Inspired by prior work [64], our intuition is three-fold: 1) to enable endless and seamless looping, we should assign the weight of the two feature maps based on time so as to guarantee that the first and last frame of the synthesized video are identical; 2) the weight map should favor pixel locations with smaller depth values, in the sense that it is impossible to see objects behind those objects closer to the eye; 3) to avoid missing regions as much as possible, we should greatly increase the contribution of those pixel locations that can fill in holes. With this in mind, we formulate the weight map as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 71, + 282, + 287, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 282, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 71, + 282, + 287, + 312 + ], + "type": "interline_equation", + "content": "\\mathbf {W} _ {t} = \\frac {\\left(1 - \\frac {t}{N}\\right) \\cdot \\boldsymbol {\\alpha} _ {f} \\cdot e ^ {- \\mathbf {D} _ {f}}}{\\left(1 - \\frac {t}{N}\\right) \\cdot \\boldsymbol {\\alpha} _ {f} \\cdot e ^ {- \\mathbf {D} _ {f}} + \\frac {t}{N} \\cdot \\boldsymbol {\\alpha} _ {b} \\cdot e ^ {- \\mathbf {D} _ {b}}}, \\tag {4}", + "image_path": "0ad9af91d9f9b734522d7e7a8adb7f63c0be6f5c88747ebc647651f1cd983cd2.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 318, + 287, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 318, + 287, + 343 + ], + "spans": [ + { + "bbox": [ + 47, + 318, + 287, + 343 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 318, + 287, + 343 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 47, + 318, + 287, + 343 + ], + "type": "text", + "content": " is the number of frames. Therefore, we can integrate " + }, + { + "bbox": [ + 47, + 318, + 287, + 343 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_f" + }, + { + "bbox": [ + 47, + 318, + 287, + 343 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 318, + 287, + 343 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_b" + }, + { + "bbox": [ + 47, + 318, + 287, + 343 + ], + "type": "text", + "content": " via:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 99, + 350, + 287, + 364 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 350, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 99, + 350, + 287, + 364 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {t} = \\mathbf {W} _ {t} \\cdot \\mathbf {F} _ {f} + (1 - \\mathbf {W} _ {t}) \\cdot \\mathbf {F} _ {b}. \\tag {5}", + "image_path": "da0a9e446400415f7830c6e9cd2f639412735454cbaefc69d464a5bfaac8b3d1.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 371, + 218, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 371, + 218, + 384 + ], + "spans": [ + { + "bbox": [ + 47, + 371, + 218, + 384 + ], + "type": "text", + "content": "We also obtain the merged depth map " + }, + { + "bbox": [ + 47, + 371, + 218, + 384 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_t" + }, + { + "bbox": [ + 47, + 371, + 218, + 384 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 392, + 287, + 405 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 392, + 287, + 405 + ], + "spans": [ + { + "bbox": [ + 96, + 392, + 287, + 405 + ], + "type": "interline_equation", + "content": "\\mathbf {D} _ {t} = \\mathbf {W} _ {t} \\cdot \\mathbf {D} _ {f} + (1 - \\mathbf {W} _ {t}) \\cdot \\mathbf {D} _ {b}. \\tag {6}", + "image_path": "55511871b9a5c0f1292bdb77aa10d4b0fad9250b2fe91df68ca2936e785727f0.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 413, + 287, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 413, + 287, + 460 + ], + "spans": [ + { + "bbox": [ + 47, + 413, + 287, + 460 + ], + "type": "text", + "content": "Finally, we employ an image decoder network to map the 2D feature map " + }, + { + "bbox": [ + 47, + 413, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_t" + }, + { + "bbox": [ + 47, + 413, + 287, + 460 + ], + "type": "text", + "content": " and depth map " + }, + { + "bbox": [ + 47, + 413, + 287, + 460 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_t" + }, + { + "bbox": [ + 47, + 413, + 287, + 460 + ], + "type": "text", + "content": " to a novel view at time " + }, + { + "bbox": [ + 47, + 413, + 287, + 460 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 413, + 287, + 460 + ], + "type": "text", + "content": ". Repeating this method, we are able to synthesize a realistic cinematograph with compelling parallax effects." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 468, + 111, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 468, + 111, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 468, + 111, + 480 + ], + "type": "text", + "content": "3.5. Training" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 486, + 287, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 486, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 47, + 486, + 287, + 533 + ], + "type": "text", + "content": "This section describes our training scheme. In general, we train our image-to-image translation network, 2D feature extraction network, and image decoder network in a two-stage manner." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 534, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 287, + 689 + ], + "type": "text", + "content": "Training dataset. We use the training set from Holynski et al. [19] as our training dataset. This dataset comprises short video clips of fluid motion that are extracted from longer stock-footage videos. We use the first frames of each video clip and the corresponding ground truth motion fields estimated by a pretrained optical flow network [60] as motion estimation pairs to train our motion estimation network. To develop animation ability, we randomly sample training data from fluid motion video clips. For novel view synthesis training, we require multi-view supervision of the same scene, which is not available in the training set. Instead, we use 3D Photo [52] to generate pseudo ground truth novel views for training." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "Two-stage training. Our model is trained in a two-stage manner. Specifically, we first train our motion estimation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 72, + 545, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 109 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 109 + ], + "type": "text", + "content": "network using motion estimation pairs. To train the motion estimation network, we minimize GAN loss, GAN feature matching loss [49], and endpoint error as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 344, + 114, + 545, + 128 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 114, + 545, + 128 + ], + "spans": [ + { + "bbox": [ + 344, + 114, + 545, + 128 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {M o t i o n}} = \\mathcal {L} _ {\\text {G A N}} + 1 0 \\mathcal {L} _ {\\text {F M}} + \\mathcal {L} _ {\\text {E P E}}. \\tag {7}", + "image_path": "7df8ec22a32cc5e4e9891fda53987d782bc9a55d3dc069aede5890d307e9eb38.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "spans": [ + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "text", + "content": "In the second stage, we freeze the motion estimation network and train the feature extraction network and image decoder network. Our model simultaneously learns to render novel views and animate scenes. For novel view synthesis, we set " + }, + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "text", + "content": " and use pseudo ground truth novel views to supervise our model. We randomly sample target viewpoints of scenes and require the model to synthesize them. For animation, we train our model on training triplets (start frame, middle frame, end frame) sampled from fluid motion video clips. In particular, we render the middle frame from both directions using " + }, + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "inline_equation", + "content": "F_{0\\rightarrow t}" + }, + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "inline_equation", + "content": "F_{0\\rightarrow t - N}" + }, + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "text", + "content": " without changing the camera poses and intrinsics. Besides GAN loss and GAN feature matching loss [49], we also enforce VGG perceptual loss [23, 73] and " + }, + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 304, + 134, + 546, + 313 + ], + "type": "text", + "content": " loss between synthesized and ground truth images. The overall loss is as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 319, + 320, + 545, + 333 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 320, + 545, + 333 + ], + "spans": [ + { + "bbox": [ + 319, + 320, + 545, + 333 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {A n i m a t i o n}} = \\mathcal {L} _ {G A N} + 1 0 \\mathcal {L} _ {F M} + \\mathcal {L} _ {l _ {1}} + \\mathcal {L} _ {V G G}. \\tag {8}", + "image_path": "2d84213c8f7b545204b287d1476e2d18a877d552a68f6e97e14ea05bc94002e6.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 340, + 388, + 353 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 340, + 388, + 353 + ], + "spans": [ + { + "bbox": [ + 306, + 340, + 388, + 353 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 359, + 440, + 372 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 359, + 440, + 372 + ], + "spans": [ + { + "bbox": [ + 306, + 359, + 440, + 372 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 377, + 545, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 377, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 377, + 545, + 449 + ], + "type": "text", + "content": "Our motion estimator is a U-Net [48] based generator with 16 convolutional layers, and we replace Batch Normalization with SPADE [42]. For the feature extraction network and image decoder network, we follow the network architectures from Wang et al. [64]. We adopt the multi-scale discriminator used in SPADE [42] during training." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "text", + "content": "Our model is trained using the Adam optimizer [24]. We conduct all experiments on a single NVIDIA GeForce RTX 3090 GPU. We train the motion estimation network for around " + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "inline_equation", + "content": "120k" + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "text", + "content": " iterations with a batch size of 16. We set the generator learning rate to " + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "text", + "content": " and the discriminator learning rate to " + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-3}" + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "text", + "content": ". For the animation training stage, we train the feature extraction network and image decoder network for around " + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "inline_equation", + "content": "250k" + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "text", + "content": " iterations with a learning rate starting at " + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 304, + 449, + 545, + 558 + ], + "type": "text", + "content": " and then decaying exponentially." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 563, + 372, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 563, + 372, + 574 + ], + "spans": [ + { + "bbox": [ + 306, + 563, + 372, + 574 + ], + "type": "text", + "content": "4.2. Baselines" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 582, + 545, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 545, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 545, + 676 + ], + "type": "text", + "content": "In principle, to evaluate our method, we are required to compare it against current state-of-the-art models. However, to our knowledge, we are the first to tackle the novel task of synthesizing a realistic cinematograph with compelling parallax effects from a single image. As a result, we cannot directly compare to previous works. Instead, we consider forming the following baselines to verify the superiority of our method:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "2D animation " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": " novel view synthesis. One might consider 2D image animation " + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": " single-shot novel view synthesis: first employing a 2D image animation method, then" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4599" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 124, + 285, + 193 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 288, + 114 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 288, + 114 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 288, + 114 + ], + "type": "text", + "content": "Table 1. Quantitative comparisons against all baselines on the validation set from Holynski et al. [19]. The better approach favors higher PSNR and SSIM but lower LPIPS. The best performance is in bold." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 124, + 285, + 193 + ], + "lines": [ + { + "bbox": [ + 52, + 124, + 285, + 193 + ], + "spans": [ + { + "bbox": [ + 52, + 124, + 285, + 193 + ], + "type": "table", + "html": "
MethodPSNR↑SSIM↑LPIPS↓
2D Anim. [19] → NVS [52]21.120.6330.286
NV5 [52] → 2D Anim. [19]21.970.6970.276
NV5 [52] → 2D Anim. [19] + MA22.470.7180.261
Naive PC Anim.19.460.6470.243
Naive PC Anim. + 3DSA20.490.6600.237
Ours23.330.7760.197
", + "image_path": "c73aa6fbda032b3f48e03479b2b356fc2ed002a2f565478dc012b95cf53ed609.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 209, + 286, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 209, + 286, + 267 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 286, + 267 + ], + "type": "text", + "content": "a single-shot novel view synthesis method. Specifically, we first adopt a state-of-the-art image animation method [19] to produce an animated looping video. We then apply DPT [45] to estimate geometry and utilize 3D Photo [52] to generate novel views for each frame." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 268, + 286, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 268, + 286, + 387 + ], + "spans": [ + { + "bbox": [ + 46, + 268, + 286, + 387 + ], + "type": "text", + "content": "Novel view synthesis " + }, + { + "bbox": [ + 46, + 268, + 286, + 387 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 268, + 286, + 387 + ], + "type": "text", + "content": " 2D animation. It also appears to be feasible that we first render novel views of scenes by 3D Photo [52] and then use the image animation method [19] to animate each viewpoint. Note that motion estimation should be performed for each frame as viewpoints have changed. However, we empirically find that this usually results in varying motion fields across the video. To mitigate this, we further propose using the moving average technique to smooth estimated motions for each frame. This results in novel view synthesis " + }, + { + "bbox": [ + 46, + 268, + 286, + 387 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 268, + 286, + 387 + ], + "type": "text", + "content": " 2D animation + MA." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 388, + 286, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 388, + 286, + 543 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 286, + 543 + ], + "type": "text", + "content": "Naive point cloud animation. Intuitively, we may also consider directly unprojecting pixels into 3D space and subsequently moving and rendering the RGB point cloud. Specifically, given a single input image, we first predict the depth map using DPT [45] and estimate 2D optical flow. We then lift the pixels and optical flow into 3D space to form RGB point clouds and scene flow. Finally, we animate RGB point clouds over time according to the scene flow and project these point clouds into target viewpoints. This baseline also faces a similar issue: as time goes by, large holes gradually appear. One might also employ our 3D symmetric animation technique to further enhance this baseline, i.e., naive point cloud animation + 3DSA." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 551, + 105, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 551, + 105, + 562 + ], + "spans": [ + { + "bbox": [ + 47, + 551, + 105, + 562 + ], + "type": "text", + "content": "4.3. Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 569, + 286, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 569, + 286, + 640 + ], + "spans": [ + { + "bbox": [ + 46, + 569, + 286, + 640 + ], + "type": "text", + "content": "Evaluation dataset. Since Holynski et al. [19] only provide a single image for each scene in the test set, we use the validation set from Holynski et al. [19] to evaluate our method and baselines. The validation set consists of 31 unique scenes with 162 samples of ground truth video clips captured by static cameras." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "type": "text", + "content": "Experimental setup. For evaluation, we render novel views of the ground truth videos in 4 different trajectories, resulting in 240 ground truth frames for each sample. This process does not involve inpainting, thus ground truth frames may contain holes. Only considering valid pixels when calculating metrics, we compare the predicted images" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 310, + 102, + 543, + 184 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 545, + 92 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 545, + 92 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 545, + 92 + ], + "type": "text", + "content": "Table 2. User study. Pairwise comparison results indicate that users prefer our method as more realistic and immersive." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 102, + 543, + 184 + ], + "lines": [ + { + "bbox": [ + 310, + 102, + 543, + 184 + ], + "spans": [ + { + "bbox": [ + 310, + 102, + 543, + 184 + ], + "type": "table", + "html": "
ComparisonHuman preference
2D Anim. [19] → NVS [52] / Ours12.5% / 87.5%
NVS [52] → 2D Anim. [19] / Ours3.9% / 96.1%
NVS [52] → 2D Anim. [19] + MA / Ours6.1% / 93.9%
Naive PC Anim. / Ours7.6% / 92.4%
Naive PC Anim. + 3DSA / Ours8.6% / 91.4%
3D Photo [52] / Ours10.5% / 89.5%
Holynski et al. [19] / Ours29.9% / 70.1%
", + "image_path": "419e802d8d544b0e190dd2d8a7434862507b11f2f806342b10b0f78cbb050539.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 310, + 211, + 543, + 269 + ], + "blocks": [ + { + "bbox": [ + 312, + 191, + 538, + 203 + ], + "lines": [ + { + "bbox": [ + 312, + 191, + 538, + 203 + ], + "spans": [ + { + "bbox": [ + 312, + 191, + 538, + 203 + ], + "type": "text", + "content": "Table 3. Ablation study on each component of our method." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 211, + 543, + 269 + ], + "lines": [ + { + "bbox": [ + 310, + 211, + 543, + 269 + ], + "spans": [ + { + "bbox": [ + 310, + 211, + 543, + 269 + ], + "type": "table", + "html": "
PSNR↑SSIM↑LPIPS↓
w/o features21.500.6740.228
w/o inpainting22.860.7630.216
w/o 3D symmetric animation22.990.7680.199
Full model23.330.7760.197
", + "image_path": "a327edaaefee6126932d4bc20cd18ef6720c1d7fbd532db6aa0b7225a7eb8400.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 280, + 545, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 280, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 304, + 280, + 545, + 376 + ], + "type": "text", + "content": "with the ground truth frames at the same time and viewpoint. For a fair comparison, all methods utilize the depth maps estimated by DPT [45]. Since we focus on comparing rendering quality, all methods use ground truth optical flows, except that NVS " + }, + { + "bbox": [ + 304, + 280, + 545, + 376 + ], + "type": "inline_equation", + "content": "[52] \\rightarrow 2\\mathrm{D}" + }, + { + "bbox": [ + 304, + 280, + 545, + 376 + ], + "type": "text", + "content": " Anim. [19] and NVS " + }, + { + "bbox": [ + 304, + 280, + 545, + 376 + ], + "type": "inline_equation", + "content": "[52] \\rightarrow 2\\mathrm{D}" + }, + { + "bbox": [ + 304, + 280, + 545, + 376 + ], + "type": "text", + "content": " Anim. [19] + MA have to estimate optical flows for each frame apart from the first frame. We adopt PSNR, SSIM, and LPIPS [73] as our evaluation metrics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 377, + 545, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 377, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 304, + 377, + 545, + 448 + ], + "type": "text", + "content": "Quantitative comparisons. As shown in Table 1, our method outperforms all baselines across all metrics by a large margin. This result implies that our method achieves better perceptual quality and produces more realistic renderings, which demonstrates the superiority and effectiveness of our method." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "text", + "content": "Qualitative comparisons. We showcase the visual comparisons in Fig. 4. One can observe that our method presents photorealistic results while other comparative baselines produce more or less visual artifacts. 2D Anim. [19] " + }, + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "text", + "content": " NVS [52] intends to generate stripped flickering artifacts. This is because 2D Anim. [19] " + }, + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "text", + "content": " NVS [52] predicts the depth map for each animated frame, leading to frequent changes in the 3D structure of the scene and inconsistent inpainting. NVS [52] " + }, + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "text", + "content": " 2D Anim. [19] and NVS [52] " + }, + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 449, + 545, + 664 + ], + "type": "text", + "content": " 2D Anim. [19] + MA show jelly-like effects as optical flow should be estimated for each novel view. This results in varying motion fields across the video and thus inconsistent animation. Although Naive PC Anim. and Naive PC Anim. + 3DSA also lift the workspace into 3D, they are often prone to produce noticeable holes inevitably. One reason for this is that they do not perform inpainting. Note that some artifacts are difficult to observe when only scanning static figures." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Controllable animation. Our method is able to create 3D cinematographs from a single image automatically. Further, we show that our framework is also highly extensible. For example, we can involve masks and flow hints as extra in" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4600" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 64, + 72, + 213, + 109 + ], + "blocks": [ + { + "bbox": [ + 64, + 72, + 213, + 109 + ], + "lines": [ + { + "bbox": [ + 64, + 72, + 213, + 109 + ], + "spans": [ + { + "bbox": [ + 64, + 72, + 213, + 109 + ], + "type": "image", + "image_path": "2b9a3b5024c703ad6a3950082d8e7607af5f5941a49d58059b4a31d7dd23adbb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 64, + 110, + 213, + 147 + ], + "blocks": [ + { + "bbox": [ + 64, + 110, + 213, + 147 + ], + "lines": [ + { + "bbox": [ + 64, + 110, + 213, + 147 + ], + "spans": [ + { + "bbox": [ + 64, + 110, + 213, + 147 + ], + "type": "image", + "image_path": "4334070062cd3b6f3f61c4716f983ef356853d2f0c87bbdebda3acd11bb58f24.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 64, + 148, + 213, + 184 + ], + "blocks": [ + { + "bbox": [ + 64, + 148, + 213, + 184 + ], + "lines": [ + { + "bbox": [ + 64, + 148, + 213, + 184 + ], + "spans": [ + { + "bbox": [ + 64, + 148, + 213, + 184 + ], + "type": "image", + "image_path": "fa5cce382bbf43a37a88b618101d5ca4327f0c17d97b77d52c1b05d1eb0d96a0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 64, + 185, + 213, + 222 + ], + "blocks": [ + { + "bbox": [ + 64, + 185, + 213, + 222 + ], + "lines": [ + { + "bbox": [ + 64, + 185, + 213, + 222 + ], + "spans": [ + { + "bbox": [ + 64, + 185, + 213, + 222 + ], + "type": "image", + "image_path": "a43c4c8a6d6d40a35c63778061a2597ce60ed6e21c39b83bae9c84489a4a2453.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 64, + 223, + 213, + 259 + ], + "blocks": [ + { + "bbox": [ + 64, + 223, + 213, + 259 + ], + "lines": [ + { + "bbox": [ + 64, + 223, + 213, + 259 + ], + "spans": [ + { + "bbox": [ + 64, + 223, + 213, + 259 + ], + "type": "image", + "image_path": "3b3d6ff5aef130dad9ac707fe69750ca678b5d366cde49ca651a5492e0252ded.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 64, + 261, + 213, + 297 + ], + "blocks": [ + { + "bbox": [ + 64, + 261, + 213, + 297 + ], + "lines": [ + { + "bbox": [ + 64, + 261, + 213, + 297 + ], + "spans": [ + { + "bbox": [ + 64, + 261, + 213, + 297 + ], + "type": "image", + "image_path": "8629ab41f0c9305323db490922edf6a4e47ba58c4307ccdbbf4ed438de1a7caf.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 64, + 299, + 213, + 335 + ], + "blocks": [ + { + "bbox": [ + 64, + 299, + 213, + 335 + ], + "lines": [ + { + "bbox": [ + 64, + 299, + 213, + 335 + ], + "spans": [ + { + "bbox": [ + 64, + 299, + 213, + 335 + ], + "type": "image", + "image_path": "651ff5c4abb3c8701b53eb6ed325a498989ab107ab1f61f94d7dcf2814b29bd4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "lines": [ + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "spans": [ + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "text", + "content": "Figure 4. Qualitative comparisons against all baselines on the validation set from Holynski et al. [19]. Our method produces compelling results while other comparative alternatives suffer from visual artifacts. (a) 2D animation " + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "inline_equation", + "content": "[19] \\rightarrow" + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "text", + "content": " novel view synthesis [52], (b) novel view synthesis " + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "inline_equation", + "content": "[52] \\rightarrow 2\\mathrm{D}" + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "text", + "content": " animation [19], (c) novel view synthesis " + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "inline_equation", + "content": "[52] \\rightarrow 2\\mathrm{D}" + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "text", + "content": " animation " + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "inline_equation", + "content": "[19] +" + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "text", + "content": " moving average, (d) naive point cloud animation, (e) naive point cloud animation " + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "inline_equation", + "content": "+3\\mathrm{D}" + }, + { + "bbox": [ + 46, + 347, + 547, + 392 + ], + "type": "text", + "content": " symmetric animation, (f) our method, and (g) pseudo ground truth." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 216, + 72, + 378, + 109 + ], + "blocks": [ + { + "bbox": [ + 216, + 72, + 378, + 109 + ], + "lines": [ + { + "bbox": [ + 216, + 72, + 378, + 109 + ], + "spans": [ + { + "bbox": [ + 216, + 72, + 378, + 109 + ], + "type": "image", + "image_path": "331c677dbb65d9c8fcb4bd343646ea093a83117d483dd86880bdff02a28080be.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 216, + 110, + 378, + 147 + ], + "blocks": [ + { + "bbox": [ + 216, + 110, + 378, + 147 + ], + "lines": [ + { + "bbox": [ + 216, + 110, + 378, + 147 + ], + "spans": [ + { + "bbox": [ + 216, + 110, + 378, + 147 + ], + "type": "image", + "image_path": "9bdd6efad93b53201e3059bdb622697f8cdffc9b39353571ef3a60943de609c8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 216, + 148, + 378, + 184 + ], + "blocks": [ + { + "bbox": [ + 216, + 148, + 378, + 184 + ], + "lines": [ + { + "bbox": [ + 216, + 148, + 378, + 184 + ], + "spans": [ + { + "bbox": [ + 216, + 148, + 378, + 184 + ], + "type": "image", + "image_path": "40ee3763d929b45715967b510caf5198dd53ef3dd243ee301bd4af3a6b2dfe69.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 216, + 185, + 378, + 222 + ], + "blocks": [ + { + "bbox": [ + 216, + 185, + 378, + 222 + ], + "lines": [ + { + "bbox": [ + 216, + 185, + 378, + 222 + ], + "spans": [ + { + "bbox": [ + 216, + 185, + 378, + 222 + ], + "type": "image", + "image_path": "58cf468364f685b31d3b6dc459e6806ee5bf50a14d606d81ff407aa0e4aa475b.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 216, + 223, + 378, + 259 + ], + "blocks": [ + { + "bbox": [ + 216, + 223, + 378, + 259 + ], + "lines": [ + { + "bbox": [ + 216, + 223, + 378, + 259 + ], + "spans": [ + { + "bbox": [ + 216, + 223, + 378, + 259 + ], + "type": "image", + "image_path": "101fdfc66c8e7d9d70c1b1878b53351507d8b82b79bc9d25898fcac07cb054bb.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 216, + 261, + 378, + 297 + ], + "blocks": [ + { + "bbox": [ + 216, + 261, + 378, + 297 + ], + "lines": [ + { + "bbox": [ + 216, + 261, + 378, + 297 + ], + "spans": [ + { + "bbox": [ + 216, + 261, + 378, + 297 + ], + "type": "image", + "image_path": "7da3049f12d0abc772d8572f44294c7bb2c968998b0b4142dbcafbfd9e767d1a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 216, + 299, + 378, + 335 + ], + "blocks": [ + { + "bbox": [ + 216, + 299, + 378, + 335 + ], + "lines": [ + { + "bbox": [ + 216, + 299, + 378, + 335 + ], + "spans": [ + { + "bbox": [ + 216, + 299, + 378, + 335 + ], + "type": "image", + "image_path": "e3a2e79cca12412e125ac511395c5ac0c931e84fa031a1b4a2c001f842b53cc3.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 380, + 72, + 542, + 109 + ], + "blocks": [ + { + "bbox": [ + 380, + 72, + 542, + 109 + ], + "lines": [ + { + "bbox": [ + 380, + 72, + 542, + 109 + ], + "spans": [ + { + "bbox": [ + 380, + 72, + 542, + 109 + ], + "type": "image", + "image_path": "c7dae67b88b3dd693c188e1d35a4f93844c49e9bfc52632fe5e3999eef2772e4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 380, + 110, + 542, + 147 + ], + "blocks": [ + { + "bbox": [ + 380, + 110, + 542, + 147 + ], + "lines": [ + { + "bbox": [ + 380, + 110, + 542, + 147 + ], + "spans": [ + { + "bbox": [ + 380, + 110, + 542, + 147 + ], + "type": "image", + "image_path": "907677b32cddcac887b489a70cb9d39cdbac34a0f1d53f82b35a8ac9442b405a.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 380, + 148, + 542, + 185 + ], + "blocks": [ + { + "bbox": [ + 380, + 148, + 542, + 185 + ], + "lines": [ + { + "bbox": [ + 380, + 148, + 542, + 185 + ], + "spans": [ + { + "bbox": [ + 380, + 148, + 542, + 185 + ], + "type": "image", + "image_path": "384c5b775982f07cf8871ad81a2107da7dc170eacb047dd6a682bca81edff17e.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 380, + 186, + 542, + 222 + ], + "blocks": [ + { + "bbox": [ + 380, + 186, + 542, + 222 + ], + "lines": [ + { + "bbox": [ + 380, + 186, + 542, + 222 + ], + "spans": [ + { + "bbox": [ + 380, + 186, + 542, + 222 + ], + "type": "image", + "image_path": "34793c795d25aff587522c040118594855e2b0d7894388abee62f1ff292d4184.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 380, + 223, + 542, + 259 + ], + "blocks": [ + { + "bbox": [ + 380, + 223, + 542, + 259 + ], + "lines": [ + { + "bbox": [ + 380, + 223, + 542, + 259 + ], + "spans": [ + { + "bbox": [ + 380, + 223, + 542, + 259 + ], + "type": "image", + "image_path": "802d9c0882d76105fca960886c7495e67b212f93ee128b99e8cf9b908e1cfb0b.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 380, + 261, + 542, + 297 + ], + "blocks": [ + { + "bbox": [ + 380, + 261, + 542, + 297 + ], + "lines": [ + { + "bbox": [ + 380, + 261, + 542, + 297 + ], + "spans": [ + { + "bbox": [ + 380, + 261, + 542, + 297 + ], + "type": "image", + "image_path": "2ae7891102eef16b2f71dff01c593fe55bd91fd679a83adce1cb01993d75e9e2.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 380, + 299, + 542, + 335 + ], + "blocks": [ + { + "bbox": [ + 380, + 299, + 542, + 335 + ], + "lines": [ + { + "bbox": [ + 380, + 299, + 542, + 335 + ], + "spans": [ + { + "bbox": [ + 380, + 299, + 542, + 335 + ], + "type": "image", + "image_path": "28ace664d6be5ce608c5751704199d8f3a9466423168636eaef72a15f9386770.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 52, + 407, + 167, + 472 + ], + "blocks": [ + { + "bbox": [ + 52, + 407, + 167, + 472 + ], + "lines": [ + { + "bbox": [ + 52, + 407, + 167, + 472 + ], + "spans": [ + { + "bbox": [ + 52, + 407, + 167, + 472 + ], + "type": "image", + "image_path": "6171cac29139afec89e9c37141a91e034a08b298fda2f7c0eaed46e54430c695.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 100, + 480, + 118, + 490 + ], + "lines": [ + { + "bbox": [ + 100, + 480, + 118, + 490 + ], + "spans": [ + { + "bbox": [ + 100, + 480, + 118, + 490 + ], + "type": "text", + "content": "Input" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 503, + 287, + 525 + ], + "lines": [ + { + "bbox": [ + 47, + 503, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 47, + 503, + 287, + 525 + ], + "type": "text", + "content": "Figure 5. Controllable animation. By changing the masks and motion hints, our method can interactively control the animation." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 167, + 407, + 226, + 472 + ], + "blocks": [ + { + "bbox": [ + 167, + 407, + 226, + 472 + ], + "lines": [ + { + "bbox": [ + 167, + 407, + 226, + 472 + ], + "spans": [ + { + "bbox": [ + 167, + 407, + 226, + 472 + ], + "type": "image", + "image_path": "6512544ff9e2a4c89e35e5e0410b46730857ebefa3586d32891f5e604b655193.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 177, + 475, + 217, + 493 + ], + "lines": [ + { + "bbox": [ + 177, + 475, + 217, + 493 + ], + "spans": [ + { + "bbox": [ + 177, + 475, + 217, + 493 + ], + "type": "text", + "content": "Masks & motion hints" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 227, + 407, + 283, + 472 + ], + "blocks": [ + { + "bbox": [ + 227, + 407, + 283, + 472 + ], + "lines": [ + { + "bbox": [ + 227, + 407, + 283, + 472 + ], + "spans": [ + { + "bbox": [ + 227, + 407, + 283, + 472 + ], + "type": "image", + "image_path": "438519e393db1e5206e2ed44c6a00dce4244e7b410324bda96632ea60ed34f4f.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 241, + 475, + 266, + 492 + ], + "lines": [ + { + "bbox": [ + 241, + 475, + 266, + 492 + ], + "spans": [ + { + "bbox": [ + 241, + 475, + 266, + 492 + ], + "type": "text", + "content": "Motion fields" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "bbox": [ + 46, + 533, + 287, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 592 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 592 + ], + "type": "text", + "content": "puts to augment our motion estimator. This brings two advantages: (1) more accurate flow estimation; (2) interactive and controllable animation. As shown in Fig. 5, we can control the animation of the scene by providing various masks and motion hints to obtain different motion fields." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 46, + 594, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 713 + ], + "type": "text", + "content": "Generalizing on in-the-wild photos. To further demonstrate the generalization of our method, we also test our method on in-the-wild photos. We first create hemagraphs with camera motions on the test set from Holynski et al. [19], where, for each scene, only a single image is provided. We then select some online images at random to test our method. To accurately estimate motion fields, we provide masks and flow hints as extra inputs to our motion estimator. As shown in Fig. 6, our method produces reasonable results for in-the-wild inputs while other comparative" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 306, + 407, + 543, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 543, + 419 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 543, + 419 + ], + "type": "text", + "content": "alternatives yield visual artifacts or inconsistent animation." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 306, + 427, + 380, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 427, + 380, + 441 + ], + "spans": [ + { + "bbox": [ + 306, + 427, + 380, + 441 + ], + "type": "text", + "content": "4.4. User Study" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 304, + 446, + 545, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 446, + 545, + 614 + ], + "spans": [ + { + "bbox": [ + 304, + 446, + 545, + 614 + ], + "type": "text", + "content": "We further conduct a user study to investigate how our method performs in the view of humans when compared with all baselines, 3D Photo [52], and Holynski et al. [19]. Specifically, we collect 50 photos from the test set of Holynski et al. [19] and the Internet. We use different approaches to generate videos with identical settings. During the study, we show each participant an input image and two animated videos generated by our method and a randomly selected approach in random order. 108 volunteers are invited to choose the method with better perceptual quality and realism, or none if it is hard to judge. We report the results in Table 2, which points out that our method surpasses alternative methods by a large margin in terms of the sense of reality and immersion." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 306, + 623, + 400, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 400, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 400, + 635 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 304, + 642, + 546, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 546, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 546, + 715 + ], + "type": "text", + "content": "To validate the effect of each component, we conduct an ablation study on the validation set from Holynski et al. [19] and show the results in Table 3. One can observe: i) 3D symmetric animation technique matters because it allows us to leverage bidirectionally displaced point clouds to complement each other and feasibly fill in missing regions; ii)" + } + ] + } + ], + "index": 36 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 87, + 60, + 321 + ], + "type": "aside_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 87, + 60, + 321 + ], + "spans": [ + { + "bbox": [ + 51, + 87, + 60, + 321 + ], + "type": "text", + "content": "(a) (b) (c) (d) (e) (f) (g)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "4601" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 73, + 187, + 152 + ], + "blocks": [ + { + "bbox": [ + 51, + 73, + 187, + 152 + ], + "lines": [ + { + "bbox": [ + 51, + 73, + 187, + 152 + ], + "spans": [ + { + "bbox": [ + 51, + 73, + 187, + 152 + ], + "type": "image", + "image_path": "f5a81ab7b712b7b960749e52d83c52d407103fbd4cba6a5547a0585890164b7f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 190, + 74, + 278, + 152 + ], + "blocks": [ + { + "bbox": [ + 190, + 74, + 278, + 152 + ], + "lines": [ + { + "bbox": [ + 190, + 74, + 278, + 152 + ], + "spans": [ + { + "bbox": [ + 190, + 74, + 278, + 152 + ], + "type": "image", + "image_path": "f15f25732e6450fed411f18299fa2ac6ecbc699da2e7d96cfb0faf5db0e8912f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 279, + 74, + 366, + 152 + ], + "blocks": [ + { + "bbox": [ + 279, + 74, + 366, + 152 + ], + "lines": [ + { + "bbox": [ + 279, + 74, + 366, + 152 + ], + "spans": [ + { + "bbox": [ + 279, + 74, + 366, + 152 + ], + "type": "image", + "image_path": "a537a67f5067021e134728ef88014fc757aca63dbd184b9ce803199d7a572b23.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 367, + 74, + 454, + 152 + ], + "blocks": [ + { + "bbox": [ + 367, + 74, + 454, + 152 + ], + "lines": [ + { + "bbox": [ + 367, + 74, + 454, + 152 + ], + "spans": [ + { + "bbox": [ + 367, + 74, + 454, + 152 + ], + "type": "image", + "image_path": "f483a064c98f128c705ec1bc2c49ec7771c09ad5d6d3ff2748f7dfdf6c5faac3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 454, + 74, + 541, + 152 + ], + "blocks": [ + { + "bbox": [ + 454, + 74, + 541, + 152 + ], + "lines": [ + { + "bbox": [ + 454, + 74, + 541, + 152 + ], + "spans": [ + { + "bbox": [ + 454, + 74, + 541, + 152 + ], + "type": "image", + "image_path": "1e9333e1c031bf1016858079d90cecb56c3910c4816f1f5dfbadc6eae3e00671.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 51, + 153, + 186, + 231 + ], + "blocks": [ + { + "bbox": [ + 51, + 153, + 186, + 231 + ], + "lines": [ + { + "bbox": [ + 51, + 153, + 186, + 231 + ], + "spans": [ + { + "bbox": [ + 51, + 153, + 186, + 231 + ], + "type": "image", + "image_path": "35fe3eb972a1809130e0d10285fae79b16cfae42c84a92aa0e99afc7e9e6d151.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 190, + 153, + 278, + 231 + ], + "blocks": [ + { + "bbox": [ + 190, + 153, + 278, + 231 + ], + "lines": [ + { + "bbox": [ + 190, + 153, + 278, + 231 + ], + "spans": [ + { + "bbox": [ + 190, + 153, + 278, + 231 + ], + "type": "image", + "image_path": "686417ad0b34fe4e82290b22a0e1390705cdca8f1343846e18101e735e69e82b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 279, + 153, + 366, + 231 + ], + "blocks": [ + { + "bbox": [ + 279, + 153, + 366, + 231 + ], + "lines": [ + { + "bbox": [ + 279, + 153, + 366, + 231 + ], + "spans": [ + { + "bbox": [ + 279, + 153, + 366, + 231 + ], + "type": "image", + "image_path": "dcf5994d108ca3af8318dd9cc05c36f7662a96438da8eda2332971c021c5c1a5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 367, + 153, + 454, + 231 + ], + "blocks": [ + { + "bbox": [ + 367, + 153, + 454, + 231 + ], + "lines": [ + { + "bbox": [ + 367, + 153, + 454, + 231 + ], + "spans": [ + { + "bbox": [ + 367, + 153, + 454, + 231 + ], + "type": "image", + "image_path": "14082932986a7cd6976251e28ce1a025d3f3ceb34249bedd0533c9af6210f144.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 454, + 153, + 541, + 231 + ], + "blocks": [ + { + "bbox": [ + 454, + 153, + 541, + 231 + ], + "lines": [ + { + "bbox": [ + 454, + 153, + 541, + 231 + ], + "spans": [ + { + "bbox": [ + 454, + 153, + 541, + 231 + ], + "type": "image", + "image_path": "775feb0956c44f1ecb7d5ede298bf1672b0ee9545e39db49209de96c71c87d05.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 51, + 232, + 186, + 310 + ], + "blocks": [ + { + "bbox": [ + 51, + 232, + 186, + 310 + ], + "lines": [ + { + "bbox": [ + 51, + 232, + 186, + 310 + ], + "spans": [ + { + "bbox": [ + 51, + 232, + 186, + 310 + ], + "type": "image", + "image_path": "4c808cd46b96d136f16ed5f2968847e9994cd67834dccc178c612fc00d07b773.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 190, + 232, + 278, + 310 + ], + "blocks": [ + { + "bbox": [ + 190, + 232, + 278, + 310 + ], + "lines": [ + { + "bbox": [ + 190, + 232, + 278, + 310 + ], + "spans": [ + { + "bbox": [ + 190, + 232, + 278, + 310 + ], + "type": "image", + "image_path": "dcfb77c70003a8e89315db6e2dcfb87597df19aba88455302bbe198b0bec233b.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 279, + 232, + 366, + 310 + ], + "blocks": [ + { + "bbox": [ + 279, + 232, + 366, + 310 + ], + "lines": [ + { + "bbox": [ + 279, + 232, + 366, + 310 + ], + "spans": [ + { + "bbox": [ + 279, + 232, + 366, + 310 + ], + "type": "image", + "image_path": "6a531b7a7972d6c3ea66dcb96e2e9eae2e9490a239bd9485594f1196377c4c03.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 367, + 232, + 454, + 310 + ], + "blocks": [ + { + "bbox": [ + 367, + 232, + 454, + 310 + ], + "lines": [ + { + "bbox": [ + 367, + 232, + 454, + 310 + ], + "spans": [ + { + "bbox": [ + 367, + 232, + 454, + 310 + ], + "type": "image", + "image_path": "bcb1951dc15289fde8623f0ec65bfd45bfa6221e67742d9aa58aea42471f36da.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 454, + 232, + 541, + 310 + ], + "blocks": [ + { + "bbox": [ + 454, + 232, + 541, + 310 + ], + "lines": [ + { + "bbox": [ + 454, + 232, + 541, + 310 + ], + "spans": [ + { + "bbox": [ + 454, + 232, + 541, + 310 + ], + "type": "image", + "image_path": "c2570be568c72ca139a833bfa69595dcb84907a38a2e3c8bbcb572f90d24ec02.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 51, + 312, + 186, + 388 + ], + "blocks": [ + { + "bbox": [ + 51, + 312, + 186, + 388 + ], + "lines": [ + { + "bbox": [ + 51, + 312, + 186, + 388 + ], + "spans": [ + { + "bbox": [ + 51, + 312, + 186, + 388 + ], + "type": "image", + "image_path": "d4b52e205f17b8e3e3f0f48959728534632f8588920125c9a895982d9acf98b7.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 396, + 128, + 405 + ], + "lines": [ + { + "bbox": [ + 110, + 396, + 128, + 405 + ], + "spans": [ + { + "bbox": [ + 110, + 396, + 128, + 405 + ], + "type": "text", + "content": "Input" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 190, + 312, + 278, + 388 + ], + "blocks": [ + { + "bbox": [ + 190, + 312, + 278, + 388 + ], + "lines": [ + { + "bbox": [ + 190, + 312, + 278, + 388 + ], + "spans": [ + { + "bbox": [ + 190, + 312, + 278, + 388 + ], + "type": "image", + "image_path": "62b672b63224ab57a02712918e31b9ad7cbf7e898bbf05728e8068d8b69d1f60.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 205, + 395, + 262, + 404 + ], + "lines": [ + { + "bbox": [ + 205, + 395, + 262, + 404 + ], + "spans": [ + { + "bbox": [ + 205, + 395, + 262, + 404 + ], + "type": "text", + "content": "2D Anim. " + }, + { + "bbox": [ + 205, + 395, + 262, + 404 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 205, + 395, + 262, + 404 + ], + "type": "text", + "content": " NVS" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 279, + 312, + 366, + 388 + ], + "blocks": [ + { + "bbox": [ + 279, + 312, + 366, + 388 + ], + "lines": [ + { + "bbox": [ + 279, + 312, + 366, + 388 + ], + "spans": [ + { + "bbox": [ + 279, + 312, + 366, + 388 + ], + "type": "image", + "image_path": "76439cae103eb89c3da25766dd92b3002d79d9ee8b1183b89e558399ea883dcb.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 283, + 396, + 358, + 405 + ], + "lines": [ + { + "bbox": [ + 283, + 396, + 358, + 405 + ], + "spans": [ + { + "bbox": [ + 283, + 396, + 358, + 405 + ], + "type": "inline_equation", + "content": "\\mathrm{NVS}\\rightarrow 2\\mathrm{D}" + }, + { + "bbox": [ + 283, + 396, + 358, + 405 + ], + "type": "text", + "content": " Anim. " + }, + { + "bbox": [ + 283, + 396, + 358, + 405 + ], + "type": "inline_equation", + "content": "^+" + }, + { + "bbox": [ + 283, + 396, + 358, + 405 + ], + "type": "text", + "content": " MA" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 367, + 312, + 454, + 388 + ], + "blocks": [ + { + "bbox": [ + 367, + 312, + 454, + 388 + ], + "lines": [ + { + "bbox": [ + 367, + 312, + 454, + 388 + ], + "spans": [ + { + "bbox": [ + 367, + 312, + 454, + 388 + ], + "type": "image", + "image_path": "a0b8de3a8fac3435b38d3675b9502a511377a4f0588daf62921d5ec67ea165a9.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 373, + 395, + 447, + 403 + ], + "lines": [ + { + "bbox": [ + 373, + 395, + 447, + 403 + ], + "spans": [ + { + "bbox": [ + 373, + 395, + 447, + 403 + ], + "type": "text", + "content": "Naive PC Anim. + 3DSA" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 418, + 545, + 440 + ], + "lines": [ + { + "bbox": [ + 46, + 418, + 545, + 440 + ], + "spans": [ + { + "bbox": [ + 46, + 418, + 545, + 440 + ], + "type": "text", + "content": "Figure 6. Visual comparisons on the test set from Holynski et al. [19] and in-the-wild photos. Our method consistently produces more realistic rendering with fewer visual artifacts as opposed to other baselines." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 454, + 312, + 541, + 388 + ], + "blocks": [ + { + "bbox": [ + 454, + 312, + 541, + 388 + ], + "lines": [ + { + "bbox": [ + 454, + 312, + 541, + 388 + ], + "spans": [ + { + "bbox": [ + 454, + 312, + 541, + 388 + ], + "type": "image", + "image_path": "bf29a9be76599a0ec19a90418f4e817bc7da3c7e836f8d67fd5f6c4c015ca424.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 490, + 396, + 506, + 403 + ], + "lines": [ + { + "bbox": [ + 490, + 396, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 490, + 396, + 506, + 403 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "bbox": [ + 46, + 450, + 287, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 287, + 522 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 287, + 522 + ], + "type": "text", + "content": "introducing inpainting when constructing 3D geometry can improve the performance as this allows our model to produce plausible structures around depth discontinuities and fill in holes; iii) switching from directly using RGB colors to features in 3D scene representation significantly improves the rendering quality and reduces artifacts." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 47, + 536, + 119, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 536, + 119, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 536, + 119, + 548 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 288, + 713 + ], + "type": "text", + "content": "In this paper, we introduce a novel task of creating 3D cinematographs from single images. To this end, we present a simple yet effective method that makes a connection between image animation and novel view synthesis. We show that our method produces plausible animation of the scene while allowing camera movements. Our framework is flexible and customized. For accurate motion estimation and controllable animation, we can further include masks and flow hints as extra input for the motion estimator. Therefore, users can control how the scene is animated. Furthermore, our method generalizes well to in-the-wild photos, even like paintings or synthetic images generated by diffusion models. We conduct extensive experiments to ver" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 450, + 545, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 450, + 545, + 509 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 545, + 509 + ], + "type": "text", + "content": "ify the effectiveness and superiority of our method. A user study also demonstrates that our method generates realistic 3D cinematographs. We hope that our work can bring 3D cinematography into the sight of a broader community and motivate further research." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 304, + 510, + 546, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 546, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 546, + 628 + ], + "type": "text", + "content": "Limitations and future work. Our method may not work well when the depth prediction module estimates erroneous geometry from the input image, e.g., thin structures. In addition, inappropriate motion fields will sometimes lead to undesirable results, e.g., some regions are mistakenly identified as frozen. As we take the first step towards 3D cinematography, in this paper, we focus on handling common moving elements, i.e., fluids. In other words, our method may not apply to more complex motions, e.g., cyclic motion. We leave this for our future work." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 713 + ], + "type": "text", + "content": "Acknowledgements. This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner(s). This work is also supported by Adobe Gift and the Ministry of Education, Singapore, under its Academic Research Fund Tier 2 (MOE-T2EP20220-0007) and Tier 1 (RG14/22)." + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4602" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Jiamin Bai, Aseem Agarwala, Maneesh Agrawala, and Ravi Ramamoorthi. Automatic cinemagraph portraits. In Computer Graphics Forum, volume 32, pages 17-25. Wiley Online Library, 2013. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 192 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 192 + ], + "type": "text", + "content": "[2] Aayush Bansal, Minh Vo, Yaser Sheikh, Deva Ramanan, and Srinivasa Narasimhan. 4d visualization of dynamic events from unconstrained multi-view videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5366-5375, 2020. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 194, + 288, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 194, + 288, + 248 + ], + "spans": [ + { + "bbox": [ + 53, + 194, + 288, + 248 + ], + "type": "text", + "content": "[3] Wenbo Bao, Wei-Sheng Lai, Chao Ma, Xiaoyun Zhang, Zhiyong Gao, and Ming-Hsuan Yang. Depth-aware video frame interpolation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3703-3712, 2019. 2, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 250, + 287, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 250, + 287, + 294 + ], + "spans": [ + { + "bbox": [ + 53, + 250, + 287, + 294 + ], + "type": "text", + "content": "[4] Mojtaba Bemana, Karol Myszkowski, Hans-Peter Seidel, and Tobias Ritschel. X-Fields: Implicit neural view-, light- and time-image interpolation. ACM Transactions on Graphics (TOG), 39(6):1-15, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 295, + 287, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 295, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 295, + 287, + 350 + ], + "type": "text", + "content": "[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 425-432, 2001. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 352, + 287, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 352, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 53, + 352, + 287, + 396 + ], + "type": "text", + "content": "[6] Jin-Xiang Chai, Xin Tong, Shing-Chow Chan, and Heung-Yeung Shum. Plenoptic sampling. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 307-318, 2000. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 397, + 287, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 397, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 397, + 287, + 441 + ], + "type": "text", + "content": "[7] Caroline Chan, Shiry Ginosar, Tinghui Zhou, and Alexei A Efros. Everybody dance now. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5933-5942, 2019. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 443, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 443, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 53, + 443, + 287, + 487 + ], + "type": "text", + "content": "[8] Yung-Yu Chuang, Dan B Goldman, Ke Colin Zheng, Brian Curless, David H Salesin, and Richard Szeliski. Animating pictures with stochastic motion textures. ACM Transactions on Graphics (TOG), 24(3):853-860, 2005. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 488, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 488, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 53, + 488, + 287, + 544 + ], + "type": "text", + "content": "[9] Paul E Debevec, Camillo J Taylor, and Jitendra Malik. Modeling and rendering architecture from photographs: A hybrid geometry-and image-based approach. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 11-20, 1996. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 545, + 287, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 545, + 287, + 611 + ], + "spans": [ + { + "bbox": [ + 47, + 545, + 287, + 611 + ], + "type": "text", + "content": "[10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. FlowNet: Learning optical flow with convolutional networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 2758-2766, 2015. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 613, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 613, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 47, + 613, + 287, + 667 + ], + "type": "text", + "content": "[11] Michail Christos Doukas, Stefanos Zafeiriou, and Viktoriia Sharmanska. HeadGAN: One-shot neural head synthesis and editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14398-14407, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 669, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 287, + 713 + ], + "type": "text", + "content": "[12] Yuki Endo, Yoshihiro Kanamori, and Shigeru Kuriyama. Animating Landscape: Self-supervised learning of decoupled motion and appearance for single-image video synthesis. ACM Transactions on Graphics (Proceedings of ACM" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 94 + ], + "type": "text", + "content": "SIGGRAPH Asia 2019), 38(6):175:1-175:19, 2019. 1, 2, 3, 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 96, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 129 + ], + "type": "text", + "content": "[13] Siming Fan, Jingtan Piao, Chen Qian, Kwan-Yee Lin, and Hongsheng Li. Simulating fluids in real-world still images. arXiv preprint arXiv:2204.11335, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 130, + 545, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 130, + 545, + 174 + ], + "spans": [ + { + "bbox": [ + 307, + 130, + 545, + 174 + ], + "type": "text", + "content": "[14] Steven J Gortler, Radek Grzeszcuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 43-54, 1996. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 175, + 545, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 219 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 219 + ], + "type": "text", + "content": "[15] Yuxuan Han, Ruicheng Wang, and Jiaolong Yang. Single-view synthesis in the wild with learned adaptive multiplane images. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-8, 2022. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 220, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 274 + ], + "type": "text", + "content": "[16] Jonathan Ho, William Chan, Chitwan Saharia, Jay Whang, Ruiqi Gao, Alexey Gritsenko, Diederik P Kingma, Ben Poole, Mohammad Norouzi, David J Fleet, et al. Imagen Video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303, 2022. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 276, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 276, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 276, + 545, + 308 + ], + "type": "text", + "content": "[17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 310, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 310, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 310, + 545, + 342 + ], + "type": "text", + "content": "[18] Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. Video diffusion models. arXiv preprint arXiv:2204.03458, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 344, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 344, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 344, + 545, + 398 + ], + "type": "text", + "content": "[19] Aleksander Holynski, Brian L. Curless, Steven M. Seitz, and Richard Szeliski. Animating pictures with eulerian motion fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5810-5819, 2021. 1, 2, 3, 4, 5, 6, 7, 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 399, + 545, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 455 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 455 + ], + "type": "text", + "content": "[20] Eddy Ilg, Nikolaus Mayer, Tonmoy Saikia, Margret Keuper, Alexey Dosovitskiy, and Thomas Brox. FlowNet 2.0: Evolution of optical flow estimation with deep networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2462-2470, 2017. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 456, + 545, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 532 + ], + "type": "text", + "content": "[21] Varun Jampani, Huiwen Chang, Kyle Sargent, Abhishek Kar, Richard Tucker, Michael Krainin, Dominik Kaeser, William T Freeman, David Salesin, Brian Curless, and Ce Liu. SLIDE: Single image 3d photography with soft layering and depth-aware inpainting. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 533, + 545, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 533, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 307, + 533, + 545, + 566 + ], + "type": "text", + "content": "[22] Wei-Cih Jhou and Wen-Huang Cheng. Animating still landscape photographs through cloud motion creation. IEEE Transactions on Multimedia, 18(1):4-13, 2015. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 567, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 567, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 307, + 567, + 545, + 611 + ], + "type": "text", + "content": "[23] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Proceedings of the European Conference on Computer Vision (ECCV), pages 694–711. Springer, 2016. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 613, + 545, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 644 + ], + "type": "text", + "content": "[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "type": "text", + "content": "[25] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 31-42, 1996. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 681, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 547, + 713 + ], + "type": "text", + "content": "[26] Jiaxin Li, Zijian Feng, Qi She, Henghui Ding, Changhu Wang, and Gim Hee Lee. MINE: Towards continuous depth spi with nerf for novel view synthesis. In Proceedings of" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4603" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "text", + "content": "the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 12578-12588, 2021. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 171 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 171 + ], + "type": "text", + "content": "[27] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5521-5531, 2022. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 172, + 287, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 172, + 287, + 225 + ], + "spans": [ + { + "bbox": [ + 49, + 172, + 287, + 225 + ], + "type": "text", + "content": "[28] Xingyi Li, Chaoyi Hong, Yiran Wang, Zhiguo Cao, Ke Xian, and Guosheng Lin. Symmnerf: Learning to explore symmetry prior for single-view view synthesis. In Proceedings of the Asian Conference on Computer Vision (ACCV), pages 1726-1742, 2022. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 228, + 287, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 228, + 287, + 281 + ], + "spans": [ + { + "bbox": [ + 49, + 228, + 287, + 281 + ], + "type": "text", + "content": "[29] Yijun Li, Chen Fang, Jimei Yang, Zhaowen Wang, Xin Lu, and Ming-Hsuan Yang. Flow-grounded spatial-temporal video prediction from still images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 600-615, 2018. 2, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 283, + 287, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 283, + 287, + 336 + ], + "spans": [ + { + "bbox": [ + 49, + 283, + 287, + 336 + ], + "type": "text", + "content": "[30] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 6498-6508, 2021. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 338, + 287, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 338, + 287, + 381 + ], + "spans": [ + { + "bbox": [ + 49, + 338, + 287, + 381 + ], + "type": "text", + "content": "[31] Chieh Hubert Lin, Yen-Chi Cheng, Hsin-Ying Lee, Sergey Tulyakov, and Ming-Hsuan Yang. InfinityGAN: Towards infinite-pixel image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 383, + 287, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 383, + 287, + 446 + ], + "spans": [ + { + "bbox": [ + 49, + 383, + 287, + 446 + ], + "type": "text", + "content": "[32] Andrew Liu, Richard Tucker, Varun Jampani, Ameesh Makadia, Noah Snavely, and Angjoo Kanazawa. Infinite nature: Perpetual view generation of natural scenes from a single image. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 14458-14467, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 449, + 287, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 449, + 287, + 513 + ], + "spans": [ + { + "bbox": [ + 49, + 449, + 287, + 513 + ], + "type": "text", + "content": "[33] Wen Liu, Zhixin Piao, Jie Min, Wenhan Luo, Lin Ma, and Shenghua Gao. Liquid Warping GAN: A unified framework for human motion imitation, appearance transfer and novel view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5904-5913, 2019. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 514, + 287, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 514, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 49, + 514, + 287, + 568 + ], + "type": "text", + "content": "[34] Elizaveta Logacheva, Roman Suvorov, Oleg Khomenko, Anton Mashikhin, and Victor Lempitsky. DeepLandscape: Adversarial modeling of landscape videos. In Proceedings of the European Conference on Computer Vision (ECCV), pages 256-272. Springer, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 570, + 287, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 570, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 49, + 570, + 287, + 613 + ], + "type": "text", + "content": "[35] Aniruddha Mahapatra and Kuldeep Kulkarni. Controllable animation of fluid elements in still images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3667-3676, 2022. 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 614, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 614, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 49, + 614, + 287, + 635 + ], + "type": "text", + "content": "[36] Oded Maimon and Lior Rokach. Data mining and knowledge discovery handbook. 2005. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 636, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 636, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 49, + 636, + 287, + 689 + ], + "type": "text", + "content": "[37] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 692, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 692, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 692, + 287, + 712 + ], + "type": "text", + "content": "[38] Simon Niklaus and Feng Liu. Softmax splatting for video frame interpolation. In Proceedings of the IEEE/CVF" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "Conference on Computer Vision and Pattern Recognition (CVPR), pages 5437-5446, 2020. 2, 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 96, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 127 + ], + "type": "text", + "content": "[39] Simon Niklaus, Long Mai, Jimei Yang, and Feng Liu. 3dken burns effect from a single image. ACM Transactions on Graphics (ToG), 38(6):1-15, 2019. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 129, + 545, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 129, + 545, + 193 + ], + "spans": [ + { + "bbox": [ + 308, + 129, + 545, + 193 + ], + "type": "text", + "content": "[40] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 165-174, 2019. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 195, + 545, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 195, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 308, + 195, + 545, + 249 + ], + "type": "text", + "content": "[41] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5865-5874, 2021. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 251, + 545, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 251, + 545, + 303 + ], + "spans": [ + { + "bbox": [ + 308, + 251, + 545, + 303 + ], + "type": "text", + "content": "[42] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2337-2346, 2019. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 305, + 545, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 305, + 545, + 359 + ], + "spans": [ + { + "bbox": [ + 308, + 305, + 545, + 359 + ], + "type": "text", + "content": "[43] Juewen Peng, Jianming Zhang, Xianrui Luo, Hao Lu, Ke Xian, and Zhiguo Cao. Mpib: An mpi-based bokeh rendering framework for realistic partial occlusion effects. In Proceedings of the European Conference on Computer Vision (ECCV), pages 590-607. Springer, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 361, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 361, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 308, + 361, + 545, + 415 + ], + "type": "text", + "content": "[44] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10318-10327, 2021. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 416, + 545, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 416, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 308, + 416, + 545, + 458 + ], + "type": "text", + "content": "[45] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 12179-12188, 2021. 2, 4, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 460, + 545, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 460, + 545, + 513 + ], + "spans": [ + { + "bbox": [ + 308, + 460, + 545, + 513 + ], + "type": "text", + "content": "[46] Yurui Ren, Xiaoming Yu, Junming Chen, Thomas H Li, and Ge Li. Deep image spatial transformation for person image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7690-7699, 2020. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 515, + 545, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 515, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 308, + 515, + 545, + 569 + ], + "type": "text", + "content": "[47] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10684-10695, 2022. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 571, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 571, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 308, + 571, + 545, + 624 + ], + "type": "text", + "content": "[48] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI), pages 234–241. Springer, 2015. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 625, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 625, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 308, + 625, + 545, + 668 + ], + "type": "text", + "content": "[49] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. In Advances in Neural Information Processing Systems (NeurIPS), 2016. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "type": "text", + "content": "[50] Jonathan Shade, Steven Gortler, Li-wei He, and Richard Szeliski. Layered depth images. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 231–242, 1998. 2, 3, 4" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4604" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 126 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 126 + ], + "type": "text", + "content": "[51] Tamar Rott Shaham, Tali Dekel, and Tomer Michaeli. SinGAN: Learning a generative model from a single natural image. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4570-4580, 2019. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "type": "text", + "content": "[52] Meng-Li Shih, Shih-Yang Su, Johannes Kopf, and Jia-Bin Huang. 3d photography using context-aware layered depth inpainting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 4, 5, 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 186, + 288, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 288, + 240 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 288, + 240 + ], + "type": "text", + "content": "[53] Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. Animating arbitrary objects via deep motion transfer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2377-2386, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 241, + 288, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 288, + 286 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 288, + 286 + ], + "type": "text", + "content": "[54] Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. First order motion model for image animation. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 287, + 288, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 287, + 288, + 341 + ], + "spans": [ + { + "bbox": [ + 48, + 287, + 288, + 341 + ], + "type": "text", + "content": "[55] Aliaksandr Siarohin, Enver Sangineto, Stéphane Lathuiliere, and Nicu Sebe. Deformable gans for pose-based human image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3408-3416, 2018. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 342, + 288, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 288, + 398 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 288, + 398 + ], + "type": "text", + "content": "[56] Aliaksandr Siarohin, Oliver J Woodford, Jian Ren, Mengei Chai, and Sergey Tulyakov. Motion representations for articulated animation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13653-13662, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 399, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 288, + 453 + ], + "type": "text", + "content": "[57] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-A-Video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022.2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 455, + 288, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 455, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 455, + 288, + 510 + ], + "type": "text", + "content": "[58] Vincent Sitzmann, Michael Zollhoefer, and Gordon Wetzstein. Scene Representation Networks: Continuous 3d-structure-aware neural scene representations. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 511, + 288, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 511, + 288, + 566 + ], + "spans": [ + { + "bbox": [ + 48, + 511, + 288, + 566 + ], + "type": "text", + "content": "[59] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning (ICML), pages 2256-2265. PMLR, 2015. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 567, + 288, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 288, + 622 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 288, + 622 + ], + "type": "text", + "content": "[60] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. PWC-Net: Cnns for optical flow using pyramid, warping, and cost volume. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8934–8943, 2018. 3, 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "type": "text", + "content": "[61] Zachary Teed and Jia Deng. RAFT: Recurrent all-pairs field transforms for optical flow. In Proceedings of the European Conference on Computer Vision (ECCV), pages 402-419, 2020. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 669, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 288, + 714 + ], + "type": "text", + "content": "[62] Richard Tucker and Noah Snavely. Single-view view synthesis with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 551-560, 2020. 2" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 665 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 307, + 73, + 546, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 546, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 546, + 117 + ], + "type": "text", + "content": "[63] Shubham Tulsiani, Richard Tucker, and Noah Snavely. Layer-structured 3d scene inference via view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), pages 302–317, 2018. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 118, + 547, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 547, + 172 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 547, + 172 + ], + "type": "text", + "content": "[64] Qianqian Wang, Zhengqi Li, David Salesin, Noah Snavely, Brian Curless, and Janne Kontkanen. 3d moments from near-duplicate photos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 3, 4, 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 174, + 546, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 174, + 546, + 228 + ], + "spans": [ + { + "bbox": [ + 307, + 174, + 546, + 228 + ], + "type": "text", + "content": "[65] Yiran Wang, Zhiyu Pan, Xingyi Li, Zhiguo Cao, Ke Xian, and Jianming Zhang. Less is more: Consistent video depth estimation with masked frames modeling. In Proceedings of the 30th ACM International Conference on Multimedia (ACM MM), pages 6347-6358, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 229, + 546, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 229, + 546, + 284 + ], + "spans": [ + { + "bbox": [ + 307, + 229, + 546, + 284 + ], + "type": "text", + "content": "[66] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin Johnson. SynSin: End-to-end view synthesis from a single image. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7467-7477, 2020. 2, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 285, + 547, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 285, + 547, + 341 + ], + "spans": [ + { + "bbox": [ + 307, + 285, + 547, + 341 + ], + "type": "text", + "content": "[67] Ke Xian, Chunhua Shen, Zhiguo Cao, Hao Lu, Yang Xiao, Ruibo Li, and Zhenbo Luo. Monocular relative depth perception with web stereo data supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 311-320, 2018. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 341, + 546, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 341, + 546, + 396 + ], + "spans": [ + { + "bbox": [ + 307, + 341, + 546, + 396 + ], + "type": "text", + "content": "[68] Ke Xian, Jianming Zhang, Oliver Wang, Long Mai, Zhe Lin, and Zhiguo Cao. Structure-guided ranking loss for single image depth prediction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 611-620, 2020. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 396, + 547, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 396, + 547, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 396, + 547, + 453 + ], + "type": "text", + "content": "[69] Wei Xiong, Wenhan Luo, Lin Ma, Wei Liu, and Jiebo Luo. Learning to generate time-lapse videos using multi-stage dynamic generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2364-2373, 2018. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 453, + 546, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 453, + 546, + 508 + ], + "spans": [ + { + "bbox": [ + 307, + 453, + 546, + 508 + ], + "type": "text", + "content": "[70] Dejia Xu, Yifan Jiang, Peihao Wang, Zhiwen Fan, Humphrey Shi, and Zhangyang Wang. SinNeRF: Training neural radiance fields on complex scenes from a single image. In Proceedings of the European Conference on Computer Vision (ECCV), pages 736-753. Springer, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 508, + 547, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 508, + 547, + 552 + ], + "spans": [ + { + "bbox": [ + 307, + 508, + 547, + 552 + ], + "type": "text", + "content": "[71] Hang Yan, Yebin Liu, and Yasutaka Furukawa. Turning an urban scene video into a cinematograph. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 394-402, 2017. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 553, + 546, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 553, + 546, + 608 + ], + "spans": [ + { + "bbox": [ + 307, + 553, + 546, + 608 + ], + "type": "text", + "content": "[72] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4578-4587, 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 609, + 547, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 609, + 547, + 665 + ], + "spans": [ + { + "bbox": [ + 307, + 609, + 547, + 665 + ], + "type": "text", + "content": "[73] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 586-595, 2018. 5, 6" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4605" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Concept Learning and Reasoning From Multi-View Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_content_list.json b/2023/3D Concept Learning and Reasoning From Multi-View Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7ab3c58c70d3fa5e934399a306b4b0423edd9cf5 --- /dev/null +++ b/2023/3D Concept Learning and Reasoning From Multi-View Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_content_list.json @@ -0,0 +1,1355 @@ +[ + { + "type": "text", + "text": "3D Concept Learning and Reasoning from Multi-View Images", + "text_level": 1, + "bbox": [ + 169, + 130, + 799, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yining Hong $^{1}$ , Chunru Lin $^{2}$ , Yilun Du $^{3}$ , Zhenfang Chen $^{5}$ , Joshua B. Tenenbaum $^{3}$ , Chuang Gan $^{4,5}$ , $^{1}$ UCLA, $^{2}$ Shanghai Jiaotong University, $^{3}$ MIT CSAIL, $^{4}$ UMass Amherst, $^{5}$ MIT-IBM Watson AI Lab https://vis-www.cs.umass.edu/3d-clr/", + "bbox": [ + 258, + 179, + 709, + 268 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0139c5dde162a2aa92dcfb3c8dcbb7922e85a282412b0fa7132181e9fb7d8996.jpg", + "image_caption": [ + "Concept: Q: Are there any televisions? A: Yes", + "Counting: \nQ: How many chairs are close to the table in the room with plant on the cabinet? A: 6", + "Q: How many rooms have sofas? A: 1", + "Figure 1. An exemplar scene with multi-view images and question-answer pairs of our 3DMV-VQA dataset. 3DMV-VQA contains four question types: concept, counting, relation, comparison. Orange words denote semantic concepts; blue words denote the relations." + ], + "image_footnote": [], + "bbox": [ + 81, + 305, + 292, + 536 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9c2499a72b5232613521f83970886d73892accd35822abbe76380eef8f2aa6d4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 302, + 306, + 444, + 419 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bcdc54a8ca47ecfc2e80d202e50dbd8f6ca69bcd75c2d68e299c3a97942513b0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 300, + 422, + 444, + 534 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/adb9c46914582c93ffb458cac411cec963fc04c004c0d353d24ebdb993d37a5a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 450, + 306, + 593, + 419 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8b29d6f5ef01af8dd76de5ff59d2297cdec550b6c7ae842222df4b69bb5b62d8.jpg", + "image_caption": [ + "Relation: Q: Facing the computer from the curtain, is there a lamp on the right? A: Yes", + "Q: What's on the cabinet in the smaller room? A: Plant" + ], + "image_footnote": [], + "bbox": [ + 450, + 422, + 591, + 532 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/49645c25bde17b0db6d9b773fde5388ec829dbf8fc93453c2f9d34d1a84bd58e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 596, + 306, + 740, + 419 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/533b12da53016d7d003d444faa1199643658affa31d497d3c6002146c1f27325.jpg", + "image_caption": [ + "Comparison: \nQ: Are there fewer pictures in the larger room than the other room? A: No \nQ: Is the computer closer to a printer or a lamp? \nA: Printer" + ], + "image_footnote": [], + "bbox": [ + 596, + 422, + 738, + 532 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/874bbee417fccf7c5a5cbc21ef58df9624603c16927e8facc7e608b9c78004c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 745, + 308, + 887, + 419 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8c5023c7c3bb44a5cbe06b847a2eb0751a7ff0bfc87e1dd62dc7e15795a12609.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 745, + 422, + 887, + 532 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 700, + 313, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Humans are able to accurately reason in 3D by gathering multi-view observations of the surrounding world. Inspired by this insight, we introduce a new large-scale benchmark for 3D multi-view visual question answering (3DMV-VQA). This dataset is collected by an embodied agent actively moving and capturing RGB images in an environment using the Habitat simulator. In total, it consists of approximately 5k scenes, 600k images, paired with 50k questions. We evaluate various state-of-the-art models for visual reasoning on our benchmark and find that they all perform poorly. We suggest", + "bbox": [ + 75, + 750, + 472, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "that a principled approach for 3D reasoning from multi-view images should be to infer a compact 3D representation of the world from the multi-view images, which is further grounded on open-vocabulary semantic concepts, and then to execute reasoning on these 3D representations. As the first step towards this approach, we propose a novel 3D concept learning and reasoning (3D-CLR) framework that seamlessly combines these components via neural fields, 2D pre-trained vision-language models, and neural reasoning operators. Experimental results suggest that our framework outperforms baseline models by a large margin, but the challenge remains largely unsolved. We further perform an in-depth analysis of the challenges and highlight potential future directions.", + "bbox": [ + 496, + 703, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "9202", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 78, + 89, + 207, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Visual reasoning, the ability to composite rules on internal representations to reason and answer questions about visual scenes, has been a long-standing challenge in the field of artificial intelligence and computer vision. Several datasets [23, 33, 69] have been proposed to tackle this challenge. However, they mainly focus on visual reasoning on 2D single-view images. Since 2D single-view images only cover a limited region of the whole space, such reasoning inevitably has several weaknesses, including occlusion, and failing to answer 3D-related questions about the entire scene that we are interested in. As shown in Fig. 1, it's difficult, even for humans, to count the number of chairs in a scene due to the object occlusion, and it's even harder to infer 3D relations like \"closer\" from a single-view 2D image.", + "bbox": [ + 76, + 114, + 472, + 325 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "On the other hand, there's strong psychological evidence that human beings conduct visual reasoning in the underlying 3D representations [55]. Recently, there have been several works focusing on 3D visual question answering [2,16,62,64]. They mainly use traditional 3D representations (e.g., point clouds) for visual reasoning. This is inconsistent with the way human beings perform 3D reasoning in real life. Instead of being given an entire 3D representation of the scene at once, humans will actively walk around and explore the whole environment, ingesting image observations from different views and converting them into a holistic 3D representation that assists them in understanding and reasoning about the environment. Such abilities are crucial for many embodied AI applications, such as building assistive robots.", + "bbox": [ + 76, + 327, + 472, + 537 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, we propose the novel task of 3D visual reasoning from multi-view images taken by active exploration of an embodied agent. Specifically, we generate a large-scale benchmark, 3DMV-VQA (3D multi-view visual question answering), that contains approximately 5k scenes and 50k question-answering pairs about these scenes. For each scene, we provide a collection of multi-view image observations. We generate this dataset by placing an embodied agent in the Habitat-Matterport environment [47], which actively explores the environment and takes pictures from different views. We also obtain scene graph annotations from the Habitat-Matterport 3D semantics dataset (HM3DSem) [61], including ground-truth locations, segmentations, semantic information of the objects, as well as relationships among the objects in the environments, for model diagnosis. To evaluate the models' 3D reasoning abilities on the entire environment, we design several 3D-related question types, including concept, counting, relation and comparison.", + "bbox": [ + 76, + 537, + 472, + 809 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Given this new task, the key challenges we would like to investigate include: 1) how to efficiently obtain the compact visual representation to encode crucial properties (e.g., semantics and relations) by integrating all incomplete observations of the environment in the process of active exploration for 3D visual reasoning? 2) How to ground the semantic con", + "bbox": [ + 76, + 810, + 472, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "cepts on these 3D representations that could be leveraged for downstream tasks, such as visual reasoning? 3) How to infer the relations among the objects, and perform step-by-step reasoning?", + "bbox": [ + 496, + 90, + 893, + 151 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As the first step to tackling these challenges, we propose a novel model, 3D-CLR (3D Concept Learning and Reasoning). First, to efficiently obtain a compact 3D representation from multi-view images, we use a neural-field model based on compact voxel grids [57] which is both fast to train and effective at storing scene properties in its voxel grids. As for concept learning, we observe that previous works on 3D scene understanding [1,3] lack the diversity and scale with regard to semantic concepts due to the limited amount of paired 3D-and-language data. Although large-scale vision-language models (VLMs) have achieved impressive performances for zero-shot semantic grounding on 2D images, leveraging these pretrained models for effective open-vocabulary 3D grounding of semantic concepts remains a challenge. To address these challenges, we propose to encode the features of a pre-trained 2D vision-language model (VLM) into the compact 3D representation defined across voxel locations. Specifically, we use the CLIP-LSeg [37] model to obtain features on multi-view images, and propose an alignment loss to map the features in our 3D voxel grid to 2D pixels. By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we can ground the semantic concepts in the 3D compact representation. Finally, to answer the questions, we introduce a set of neural reasoning operators, including FILTER, COUNT, RELATION operators and so on, which take the 3D representations of different objects as input and output the predictions.", + "bbox": [ + 496, + 152, + 893, + 559 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct experiments on our proposed 3DMV-VQA benchmark. Experimental results show that our proposed 3D-CLR outperforms all baseline models a lot. However, failure cases and model diagnosis show that challenges still exist concerning the grounding of small objects and the separation of close object instances. We provide an in-depth analysis of the challenges and discuss potential future directions.", + "bbox": [ + 496, + 560, + 893, + 665 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To sum up, we have the following contributions in this paper.", + "bbox": [ + 500, + 666, + 893, + 681 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose the novel task of 3D concept learning and reasoning from multi-view images.", + "- By having robots actively explore the embodied environments, we collect a large-scale benchmark on 3D multiview visual question answering (3DMV-VQA).", + "- We devise a model that incorporates a neural radiance field, 2D pretrained vision and language model, and neural reasoning operators to ground the concepts and perform 3D reasoning on the multi-view images. We illustrate that our model outperforms all baseline models.", + "- We perform an in-depth analysis of the challenges of this new task and highlight potential future directions." + ], + "bbox": [ + 500, + 700, + 893, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "9203", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 89, + 218, + 106 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Visual Reasoning There have been numerous tasks focusing on learning visual concepts from natural language, including visually-grounded question answering [18, 19], text-image retrieval [59] and so on. Visual reasoning has drawn much attention recently as it requires human-like understanding of the visual scene. A wide variety of benchmarks have been created over the recent years [7, 8, 23, 27, 33, 69]. However, they mainly focus on visual reasoning from 2D single-view images, while there's strong psychological evidence that human beings perform visual reasoning on the underlying 3D representations. In this paper, we propose the novel task of visual reasoning from multi-view images, and collect a large-scale benchmark for this task. In recent years, numerous visual reasoning models have also been proposed, ranging from attention-based methods [5, 30], graph-based methods [28], to models based on large pretrained vision-language model [9, 38]. These methods model the reasoning process implicitly with neural networks. Neural-symbolic methods [6, 40, 65] explicitly perform symbolic reasoning on the objects representations and language representations. They use perception models to extract 2D masks as a first step, and then execute operators and ground concepts on these pre-segmented masks, but are limited to a set of predefined concepts on simple scenes. [26] proposes to use the feature vectors from occupancy networks [42] to do visual reasoning in the 3D space. However, they also use a synthetic dataset, and learn a limited set of semantic concepts from scratch. We propose to learn 3D neural field features from 2D multi-view real-world images, and incorporate a 2D VLM for open-vocabulary reasoning.", + "bbox": [ + 76, + 114, + 472, + 566 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D Reasoning Understanding and reasoning about 3D scenes has been a long-standing challenge. Recent works focus on leveraging language to explore 3D scenes, such as object captioning [3,4] and object localization from language [1, 17, 29]. Our work is mostly related to 3D Visual Question Answering [2, 16, 62, 64] as we both focus on answering questions and reasoning about 3D scenes. However, these works use point clouds as 3D representations, which diverts from the way human beings perform 3D reasoning. Instead of being given an entire 3D representation all at once, human beings would actively move and explore the environment, integrating multi-view information to get a compact 3D representation. Therefore, we propose 3D reasoning from multi-view images. In addition, since 3D assets paired with natural language descriptions are hard to get in real-life scenarios, previous works struggle to ground open-vocabulary concepts. In our work, we leverage 2D VLMs for zero-shot open-vocabulary concept grounding in the 3D space.", + "bbox": [ + 76, + 568, + 472, + 840 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Embodied Reasoning Our work is also closely related to Embodied Question Answering (EQA) [11, 67] and Interactive Question Answering (IQA) [22, 35], which also involve an embodied agent exploring the environment and answering", + "bbox": [ + 76, + 840, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the question. However, the reasoning mainly focuses on the outcome or the history of the navigation on 2D images and does not require a holistic 3D understanding of the environment. There are also works [12, 20, 51, 54, 56, 68] targeting instruction following in embodied environments, in which an agent is asked to perform a series of tasks based on language instructions. Different from their settings, for our benchmark an embodied agent actively explores the environment and takes multi-view images for 3D-related reasoning.", + "bbox": [ + 496, + 90, + 893, + 227 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Neural Fields Our approach utilizes neural fields to parameterize an underlying 3D compact representations of scenes for reasoning. Neural field models (e.g., [43]) have gained much popularity since they can reconstruct a volumetric 3D scene representation from a set of images. Recent works [21, 24, 57, 66] have pushed it further by using classic voxel-grids to explicitly store the scene properties (e.g., density, color and feature) for rendering, which allows for real-time rendering and is utilized by this paper. Neural fields have also been used to represent dynamic scenes [14, 44], appearance [43, 45, 49, 53, 63], physics [34], robotics [32, 52], acoustics [39] and more general multi-modal signals [13]. There are also some works that integrate semantics or language in neural fields [31, 60]. However, they mainly focus on using language for manipulation, editing or generation. [26] leverages neural descriptor field [52] for 3D concept grounding. However, they require ground-truth occupancy values to train the neural field, which can not be applied to real-world scenes. In this paper, we propose to leverage voxel-based neural radiance field [57] to get the compact representations for 3D visual reasoning.", + "bbox": [ + 496, + 228, + 895, + 546 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Dataset Generation", + "text_level": 1, + "bbox": [ + 500, + 561, + 687, + 577 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Multi-View Images", + "text_level": 1, + "bbox": [ + 500, + 587, + 683, + 603 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our dataset includes 5k 3D scenes from the Habitat-Matterport 3D Dataset (HM3D) dataset [47], and approximately 600k images rendered from the 3D scenes. The images are rendered via Habitat [50, 58].", + "bbox": [ + 496, + 611, + 893, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Scene Generation We build our benchmark on top of the HM3DSem dataset [61], which is a large-scale dataset of 3D real-world indoor scenes with densely annotated semantics. It consists of 142,646 object instance annotations across 216 3D spaces and 3,100 rooms within those spaces. HM3D dataset uses texture information to annotate pixel-accurate object boundaries, which provides large-scale object annotations and ensures the scale, quality, and diversity of 3D visual reasoning questions of our benchmark.", + "bbox": [ + 496, + 672, + 895, + 808 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To construct a benchmark that covers questions of different difficulty levels, it's crucial that we include 3D scenes of different scales in our benchmark. We start with single rooms in HM3D scenes, which has an appropriate amount of semantic concepts and relationships to base some simple questions on. To get the scale of single rooms, we calculate bounding", + "bbox": [ + 496, + 809, + 895, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "9204", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "boxes of rooms according to floor instance segmentations. We then proceed to generate bounding boxes for scenes with multiple adjacent rooms. For more complex holistic scene understanding, we also include whole-house scenes, which may contain tens of rooms. Overall, the 3DMV-VQA benchmark contains three levels of scenes (2000 single-room scenes, 2000 multi-room scenes and 100 whole-house scenes).", + "bbox": [ + 75, + 90, + 470, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image Rendering After we get the bounding box of each scene, we load the scene into the Habitat simulator. We also put a robot agent with an RGB sensor at a random initial point in the bounding box. The data is collected via exploration of the robot agent. Specifically, at each step of the data collection process, we sample a navigable point and make the agent move to the point along the shortest path. When the agent has arrived at a point, we rotate the agent $30^{\\circ}$ along z-axis for 12 times so that the agent can observe the $360^{\\circ}$ view of the scene at the position. It can also look up and down, with a random mild angle from $[-10^{\\circ}, 10^{\\circ}]$ along the x-axis. A picture is taken each time the agent rotates to a new orientation. In total 12 pictures are taken from each point. While traveling between points, the robot agent further takes pictures. We also exploit a policy such that when the camera is too far from or too close to an object and thus the agent cannot see anything, we discard the bad-view images.", + "bbox": [ + 75, + 196, + 473, + 454 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Questions and Answers", + "text_level": 1, + "bbox": [ + 76, + 460, + 294, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We pair each scene with machine-generated questions from pre-defined templates. All questions are open-ended and can be answered with a single word (samples in Fig. 1). Concepts and Relationships To generate questions and answers, we utilize the semantic annotations of HM3DSem [61] to get the semantic concepts and their bounding boxes, as well as the bounding boxes of the rooms. We merge semantic concepts with similar meanings (e.g., L-shaped sofa to sofa, desk chair / computer chair e.g. to chair). We also define 11 relationships: inside, above, below, on the top of, close, far, large, small, between, on the left, and on the right. Before generating questions, we first generate a scene graph for each scene containing all concepts and relationships.", + "bbox": [ + 75, + 483, + 470, + 680 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Question Types We define four types of questions: concept, counting, relation and comparison.", + "bbox": [ + 76, + 680, + 470, + 709 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Concept. Conceptual questions query if there's an object of a certain semantic concept in the scene, or whether there's a room containing the objects of the semantic concept.", + "- Counting. Counting-related questions ask about how many instances of a semantic concept are in the scene, or how many rooms contain objects of the semantic concept.", + "- Relation. Relational questions ask about the 11 relationships and their compositions. Based on the number of relations in a question, we have one-hop to three-hop questions for the relation type.", + "- Comparison. The comparison question type focuses on the comparison of two objects, two semantic concepts or two" + ], + "bbox": [ + 76, + 709, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "rooms. It can be combined with the relational concepts to compare two objects (e.g., larger, closer to, more left etc). It also compares the number of instances of two semantic concepts, or the number of objects of certain concepts in different rooms.", + "bbox": [ + 506, + 90, + 893, + 167 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Bias Control. Similar to previous visual reasoning benchmarks [26, 33], we use machine-generated questions since the generation process is fully controllable so that we can avoid dataset bias. Questions are generated from pre-defined templates, and transformed into natural language questions with associated semantic concepts and relationships from the scene. We manually define 41 templates for question generation. We use depth-first search to generate questions. We perform bias control based on three perspectives: template counts, answer counts, and concept counts. For selecting templates, we sort the templates each time we generate a question to ensure a balanced question distribution. We force a flat answer distribution for each template by rejection sampling. Specifically, once we generate a question and an answer, if the number of the questions having the same answer and template is significantly larger than other answers, we discard it and continue searching. Once we find an answer that fits in the ideal answer distribution, we stop the depth-first searching for this question. We also force a flat concept distribution for each template using the same method. In addition to controlling the number of concepts mentioned in the templates, we also control the number of relation tuples consisting of the same concept sets.", + "bbox": [ + 496, + 176, + 893, + 525 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Method", + "text_level": 1, + "bbox": [ + 500, + 537, + 589, + 554 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fig. 2 illustrates an overview of our framework. Specifically, our framework consists of three steps. First, we learn a 3D compact representation from multi-view images using neural field. And then we propose to leverage pre-trained 2D vision-and-language model to ground concepts on 3D space. This is achieved by 1) generating 2D pixel features using CLIP-LSeg; 2) aligning the features of 3D voxel grid and 2D pixel features from CLIP-LSeg [37]; 3) dot-product attention between the 3D features and CLIP language features [37]. Finally, to perform visual reasoning, we propose neural reasoning operators, which execute the question step by step on the 3D compact representation and outputs a final answer. For example, we use FILTER operators to ground semantic concepts on the 3D representation, GETINSTANCE to get all instances of a semantic class, and COUNT_RELATION to count how many pairs of the two semantic classes have the queried relation.", + "bbox": [ + 496, + 564, + 893, + 821 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Learning 3D Compact Scene Representations", + "text_level": 1, + "bbox": [ + 500, + 830, + 883, + 848 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Neural radiance fields [43] are capable of learning a 3D representation that can reconstruct a volumetric 3D scene representation from a set of images. Voxel-based meth", + "bbox": [ + 496, + 854, + 893, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "9205", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/19407b31f659eff8444b6c2a799e47318398d9458986c4f843c53129e65b011a.jpg", + "image_caption": [ + "Figure 2. An overview of our 3D-CLR framework. First, we learn a 3D compact scene representation from multi-view images using neural fields (I). Second, we use CLIP-LSeg model to get per-pixel 2D features (II). We utilize a 3D-2D alignment loss to assign features to the 3D compact representation (III). By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we could get the concept grounding in 3D (IV). Finally, the reasoning process is performed via a set of neural reasoning operators, such as FILTER, GET instances and COUNT_RELATION (V). Relation operators are learned via relation networks." + ], + "image_footnote": [], + "bbox": [ + 83, + 90, + 890, + 385 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ods [21, 24, 57, 66] speed up the learning process by explicitly storing the scene properties (e.g., density, color and feature) in its voxel grids. We leverage Direct Voxel Grid Optimization (DVGO) [57] as our backbone for 3D compact representation for its fast speed. DVGO stores the learned density and color properties in its grid cells. The rendering of multi-view images is by interpolating through the voxel grids to get the density and color for each sampled point along each sampled ray, and integrating the colors based on the rendering alpha weights calculated from densities according to quadrature rule [41]. The model is trained by minimizing the L2 loss between the rendered multi-view images and the ground-truth multi-view images. By extracting the density voxel grid, we can get the 3D compact representation (e.g., By visualizing points with density greater than 0.5, we can get the 3D representation as shown in Fig. 2 I.)", + "bbox": [ + 75, + 469, + 472, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. 3D Semantic Concept Grounding", + "text_level": 1, + "bbox": [ + 76, + 724, + 369, + 742 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Once we extract the 3D compact representation of the scene, we need to ground the semantic concepts for reasoning from language. Recent work from [26] has proposed to ground concepts from paired 3D assets and question-answers. Though promising results have been achieved on synthetic data, it is not feasible for open-vocabulary 3D reasoning in real-world data, since it is hard to collect largescale 3D vision-and-language paired data. To address this challenge, our idea is to leverage pre-trained 2D vision and language model [46, 48] for 3D concept grounding in real-", + "bbox": [ + 75, + 750, + 472, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "world scenes. But how can we map 2D concepts into 3D neural field representations? Note that 3D compact representations can be learned from 2D multi-view images and that each 2D pixel actually corresponds to several 3D points along the ray. Therefore, it's possible to get 3D features from 2D per-pixel features. Inspired by this, we first add a feature voxel grid representation to DVGO, in addition to density and color, to represent 3D features. We then apply CLIP-LSeg [37] to learn per-pixel 2D features, which can be attended to by CLIP concept embeddings. We use an alignment loss to align 3D features with 2D features so that we can perform concept grounding on the 3D representations.", + "bbox": [ + 496, + 469, + 893, + 651 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2D Feature Extraction. To get per-pixel features that can be attended by concept embeddings, we use the features from language-driven semantic segmentation (CLIP-LSeg) [37], which learns 2D per-pixel features from a pre-trained vision-language model (i.e., [46]). Specifically, it uses the text encoder from CLIP, trains an image encoder to produce an embedding vector for each pixel, and calculates the scores of word-pixel correlation by dot-product. By outputting the semantic class with the maximum score of each pixel, CLIP-LSeg is able to perform zero-shot 2D semantic segmentation.", + "bbox": [ + 496, + 654, + 893, + 806 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3D-2D Alignment. In addition to density and color, we also store a 512-dim feature in each grid cell in the compact representation. To align the 3D per-point features with 2D per-pixel features, we calculate an L1 loss between each pixel and each 3D point sampled on the ray of the pixel. The overall L1 loss along a ray is the weighted sum of all", + "bbox": [ + 496, + 809, + 893, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "9206", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the pixel-point alignment losses, with weights same as the rendering weights: $\\mathcal{L}_{\\mathrm{feature}} = \\sum_{i=1}^{K} w_i (\\| \\pmb{f}_i - F(\\pmb{r}) \\|)$ , where $\\pmb{r}$ is a ray corresponding to a 2D pixel, $F(\\pmb{r})$ is the 2D feature from CLIP-LSeg, $K$ is the total number of sampled points along the ray and $\\pmb{f}_i$ is the feature of point $i$ by interpolating through the feature voxel grid, $w_i$ is the rendering weight.", + "bbox": [ + 75, + 90, + 468, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Concept Grounding through Attention. Since our feature voxel grid representation is learnt from CLIP-LSeg, by calculating the dot-product attention $< f, v >$ between perpoint 3D feature $f$ and the CLIP concept embeddings $v$ , we can get zero-shot view-independent concept grounding and semantic segmentations in the 3D representation, as is presented in Fig. 2 IV.", + "bbox": [ + 75, + 181, + 472, + 287 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Neural Reasoning Operators", + "text_level": 1, + "bbox": [ + 76, + 297, + 334, + 314 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Finally, we use the grounded semantic concepts for 3D reasoning from language. We first transform questions into a sequence of operators that can be executed on the 3D representation for reasoning. We adopt a LSTM-based semantic parser [65] for that. As [26, 40], we further devise a set of operators which can be executed on the 3D representation. Please refer to Appendix for a full list of operators.", + "bbox": [ + 75, + 321, + 470, + 426 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Filter Operators. We filter all the grid cells with a certain semantic concept.", + "bbox": [ + 76, + 428, + 468, + 458 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Get Instance Operators. We implement this by utilizing DBSCAN [15], an unsupervised algorithm which assigns clusters to a set of points. Specifically, given a set of points in the 3D space, it can group together the points that are closely packed together for instance segmentation.", + "bbox": [ + 75, + 458, + 468, + 534 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Relation Operators. We cannot directly execute the relation on the 3D representation as we have not grounded relations. Thus, we represent each relation using a distinct neural module (which is practical as the vocabulary of relations is limited [36]). We first concatenate the voxel grid representations of all the referred objects and feed them into the relation network. The relation network consists of three 3D convolutional layers and then three 3D deconvolutional layers. A score is output by the relation network indicating whether the objects have the relationship or not. Since vanilla 3D CNNs are very slow, we use Sparse Convolution [10] instead. Based on the relations asked in the questions, different relation modules are chosen.", + "bbox": [ + 75, + 534, + 470, + 731 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 744, + 209, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 770, + 267, + 787 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Metric. We report the visual question answering accuracy on the proposed 3DMV-VQA dataset w.r.t the four types of questions. The train/val/test split is 7:1:2.", + "bbox": [ + 75, + 794, + 468, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details For 3D compact representations, we adopt the same architectures as DVGO, except skipping the coarse reconstruction phase and directly training the fine reconstruction phase. After that, we freeze the density voxel", + "bbox": [ + 75, + 840, + 470, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "grid and color voxel grid, for the optimization of the feature voxel grid only. The feature grid has a world size of 100 and feature dim of 512. We train the compact representations for 100,000 iterations and the 3D features for another 20,000 iterations. For LSeg, we use the official demo model, which has the ViT-L/16 image encoder and CLIP's ViT-B/32 text encoder. We follow the official script for inference and use multi-scale inference. For DBSCAN, we use an epsilon value of 1.5, minimum samples of 2, and we use L1 as the clustering method. For the relation networks, each relation is encoded into a three-layer sparse 3D convolution network with hidden size 64. The output is then fed into a one-layer linear network to produce a score, which is normalized by sigmoid function. We use cross-entropy loss to train the relation networks, and we use the one-hop relational questions with \"yes/no\" answers to train the relation networks.", + "bbox": [ + 496, + 90, + 893, + 332 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Baselines", + "text_level": 1, + "bbox": [ + 500, + 342, + 607, + 357 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our baselines range from vanilla neural networks, attention-based methods, fine-tuned from large-scale VLM, and graph-based methods, to neural-symbolic methods.", + "bbox": [ + 498, + 364, + 893, + 411 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- LSTM. The question is transferred to word embeddings which are input into a word-level LSTM [25]. The last LSTM hidden state is fed into a multi-layer perceptron (MLP) that outputs a distribution over answers. This method is able to model question-conditional bias since it uses no image information.", + "- CNN+LSTM. The question is encoded by the final hidden states from LSTM. We use a resnet-50 to extract frame-level features of images and average them over the time dimension. The features are fed to an MLP to predict the final answer. This is a simple baseline that examines how vanilla neural networks perform on 3DMV-VQA.", + "- 3D-Feature+LSTM. We use the 3D features we get from 3D-2D alignment and downsample the voxel grids using 3D-CNN as input, concatenated with language features from LSTM and fed to an MLP.", + "- MAC [30]. MAC utilizes a Memory, Attention and Composition cell to perform iterative reasoning process. Like CNN+LSTM, we use the average pooling over multi-view images as the feature map.", + "- MAC(V). We treat the multi-view images along a trajectory as a video. We modify the MAC model by applying a temporal attention unit across the video frames to generate a latent encoding for the video.", + "- NS-VQA [65]. This is a 2D version of our 3D-CLR model. We use CLIP-LSeg to ground 2D semantic concepts from multi-view images, and the relation network also takes the 2D features as input. We execute the operators on each image and max pool from the answers to get our final predictions." + ], + "bbox": [ + 500, + 420, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "9207", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/23ab7991e1cfd752f1d4a8a42861878aba7055929c68ab960b80aebbac7c7b4f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsConceptCountingRelationComparisonOverall
Q-type (rand.)49.410.721.649.226.4
LSTM53.415.324.055.229.8
CNN+LSTM57.822.135.259.737.8
MAC62.419.747.862.346.7
MAC(V)60.024.651.665.950.0
NS-VQA59.821.533.461.638.0
ALPRO65.812.742.268.243.3
LGCN56.219.535.566.739.1
3D-Feature+LSTM61.222.449.961.348.2
3D-CLR (Ours)66.141.357.672.357.7
", + "bbox": [ + 233, + 88, + 736, + 268 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1. Question-answering accuracy of 3D visual reasoning baselines on different question types.", + "bbox": [ + 189, + 276, + 774, + 291 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- ALPRO [38]. ALPRO is a video-and-language pre-training framework. A transformer model is pretrained on large webly-source video-text pairs and can be used for downstream tasks like Video Question answering.", + "- LGCN [28]. LGCN represents the contents in the video as a location-aware graph by incorporating the location information of an object into the graph construction." + ], + "bbox": [ + 76, + 301, + 470, + 415 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Experimental Results", + "text_level": 1, + "bbox": [ + 76, + 422, + 279, + 439 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Result Analysis. We summarize the performances for each question type of baseline models in Table 1. All models are trained on the training set until convergence, tuned on the validation set, and evaluated on the test set. We provide detailed analysis below.", + "bbox": [ + 76, + 446, + 468, + 522 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "First, for the examination of language-bias of the dataset, we find that the performance of LSTM is only slightly higher than random and frequency, and all other baselines outperform LSTM a lot. This suggests that there's little language bias in our dataset. Second, we observe that encoding temporal information in MAC (i.e., MAC(V)) is better than average-pooling of the features, especially in counting and relation. This suggests that average-pooling of the features may cause the model to lose information from multi-view images, while attention on multi-view images helps boost the 3D reasoning performances. Third, we also find that fine-tuning on large-scale pretrained model (i.e., ALPRO) has relatively high accuracies in concept-related questions, but for counting it's only slightly higher than the random baseline, suggesting that pretraining on large-scale video-language dataset may improve the model's perception ability, but does not provide the model with the ability to tackle with more difficult reasoning types such as counting. Next, we find that LGCN has poor performances on the relational questions, indicating that building a location-aware graph over 2D objects still doesn't equip the model with 3D location reasoning abilities. Last but not least, we find that 3D-based baselines are better than their 2D counterparts. 3D-Feature+LSTM performs well on the 3D-related questions, such as counting and relation, than most of the image-based", + "bbox": [ + 76, + 523, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "basielines. Compared with 3D-CLR, NS-VQA can perform well in the conceptual questions. However, it underperforms 3D-CLR a lot in counting and relation, suggesting that these two types of questions require the holistic 3D understanding of the entire 3D scenes. Our 3D-CLR outperforms other baselines by a large margin, but is still far from satisfying. From the accuracy of the conceptual question, we can see that it can only ground approximately $66\\%$ of the semantic concepts. This indicates that our 3DMV-VQA dataset is indeed very challenging.", + "bbox": [ + 496, + 301, + 893, + 453 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative Examples. In Fig. 3, we show four qualitative examples. From the examples, we show that our 3D-CLR can infer an accurate 3D representation from multi-view images, as well as ground semantic concepts on the 3D representations to get the semantic segmentations of the entire scene. Our 3D-CLR can also learn 3D relationships such as \"close\", \"largest\", \"on top of\" and so on. However, 3D-CLR also fails on some questions. For the third scene in the qualitative examples, it fails to ground the concepts \"mouse\" and \"printer\". Also, it cannot accurately count the instances sometimes. We give detailed discussions below.", + "bbox": [ + 496, + 455, + 893, + 621 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.4. Discussions", + "text_level": 1, + "bbox": [ + 500, + 633, + 624, + 647 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We perform an in-depth analysis to understand the challenge of this dataset. We leverage the modular design of our 3D-CLR, replacing individual components of the framework with ground-truth annotations for model diagnosis. The result is shown in Fig 4. 3D-CLR w/ Semantic denotes our model with ground-truth semantic concepts from HM3DSem annotations. 3D-CLR w/ Instance denotes that we have ground-truth instance segmentations of semantic concepts. From Fig. 3 and Fig. 4, we summarize several key challenges of our benchmark:", + "bbox": [ + 496, + 657, + 893, + 808 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Very close object instances From Fig. 4, we can see that even with ground-truth semantic labeling of the 3D points, 3D-CLR still has unsatisfying results on counting questions. This suggests that the instance segmentations provided by DBSCAN are not accurate enough. From the top two qualitative examples in Fig. 3, we can also see that if two chairs", + "bbox": [ + 496, + 810, + 893, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "9208", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e5f9988ee998345087af1d898b960d9fdfd34ec0911c04da788d53c90dc149f8.jpg", + "image_caption": [ + "Figure 3. Qualitative examples of our 3D-CLR. We can see that 3D-CLR can ground most of the concepts and answer most questions correctly. However, it still fails sometimes, mainly because it cannot separate close object instances and ground small objects." + ], + "image_footnote": [], + "bbox": [ + 83, + 90, + 885, + 454 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5ce1c4d9a9daa672b4fac5b8b44d25fd911caa8af39a62561b7b2db9edf488ea.jpg", + "image_caption": [ + "Figure 4. Model diagnosis of our 3D-CLR." + ], + "image_footnote": [], + "bbox": [ + 83, + 503, + 460, + 614 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "contact each other, DBSCAN will not tell them apart and thus have poor performance on counting. One crucial future direction is to improve unsupervised instance segmentations on very close object instances.", + "bbox": [ + 75, + 641, + 468, + 700 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Grounding small objects Fig. 4 suggests that 3D-CLR fails to ground a large portion of the semantic concepts, which hinders the performance. From the last example in Fig. 3, we can see that 3D-CLR fails to ground small objects like \"computer mouse\". Further examination indicates there are two possible reasons: 1) CLIP-LSeg fails to assign the right features to objects with limited pixels; 2) The resolution of feature voxel grid is not high enough and therefore small objects cannot be represented in the compact representation. An interesting future direction would be learning exploration policies that enable the agents to get closer to uncertain objects that cannot be grounded.", + "bbox": [ + 75, + 702, + 472, + 883 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ambiguity on 3D relations Even with ground-truth seman", + "bbox": [ + 76, + 885, + 472, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tic and instance segmentations, the performance of the relation network still needs to be improved. We find that most of the failure cases are correlated to the \"inside\" relation. From the segmentations in Fig. 3, we can see that 3D-CLR is unable to ground the objects in the cabinets. A potential solution can be joint depth and segmentation predictions.", + "bbox": [ + 496, + 503, + 893, + 597 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 614, + 617, + 631 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we introduce the novel task of 3D reasoning from multi-view images. By placing embodied robot that actively explores indoor environments, we collect a large-scale benchmark named 3DMV-VQA. We also propose a new 3D-CLR model that incorporates neural field, 2D VLM, as well as reasoning operators for this task and illustrate its effectiveness. Finally, we perform an in-depth analysis to understand the challenges of this dataset and also point out potential future directions. We hope that 3DMV-VQA can be used to push the frontiers of 3D reasoning.", + "bbox": [ + 496, + 642, + 893, + 794 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. This work was supported by the MIT-IBM Watson AI Lab, DARPA MCS, DSO grant DSOCO21072, and gift funding from MERL, Cisco, Sony, and Amazon. We would also like to thank the computation support from AiMOS, a server cluster for the IBM Research AI Hardware Center.", + "bbox": [ + 496, + 809, + 893, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "9209", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Panos Achlioptas, Ahmed Abdelreehm, Fei Xia, Mohamed Elhoseiny, and Leonidas J. Guibas. Referit3d: Neural listeners for fine-grained 3d object identification in real-world scenes. In ECCV, 2020. 2, 3", + "[2] Daich Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoki Kawanabe. Scanqa: 3d question answering for spatial scene understanding. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19107-19117, 2022. 2, 3", + "[3] Dave Zhenyu Chen, Angel X. Chang, and Matthias Nießner. Scanrefer: 3d object localization in rgb-d scans using natural language. In ECCV, 2020. 2, 3", + "[4] Dave Zhenyu Chen, Ali Gholami, Matthias Nießner, and Angel X. Chang. Scan2cap: Context-aware dense captioning in rgb-d scans. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3192-3202, 2021. 3", + "[5] Z Chen, L Ma, W Luo, and KKY Wong. Weakly-supervised spatio-temporally grounding natural sentence in video. In ACL, 2019. 3", + "[6] Zhenfang Chen, Jiayuan Mao, Jiajun Wu, Kwan-Yee Kenneth Wong, Joshua B Tenenbaum, and Chuang Gan. Grounding physical concepts of objects and events through dynamic visual reasoning. *ICLR*, 2021. 3", + "[7] Zhenfang Chen, Peng Wang, Lin Ma, Kwan-Yee K Wong, and Qi Wu. Cops-ref: A new dataset and task on compositional referring expression comprehension. In CVPR, 2020. 3", + "[8] Zhenfang Chen, Kexin Yi, Yunzhu Li, Mingyu Ding, Antonio Torralba, Joshua B Tenenbaum, and Chuang Gan. Comphy: Compositional physical reasoning of objects and events from videos. In ICLR, 2022. 3", + "[9] Zhenfang Chen, Qinhong Zhou, Yikang Shen, Yining Hong, Hao Zhang, and Chuang Gan. See, think, confirm: Interactive prompting between vision and language models for knowledge-based visual reasoning. arXiv preprint arXiv:2301.05226, 2023. 3", + "[10] Spconv Contributors. Spconv: Spatially sparse convolution library. https://github.com/traveller59/spconv, 2022.6", + "[11] Abhishek Das, Samyak Datta, Georgia Gkioxari, Stefan Lee, Devi Parikh, and Dhruv Batra. Embodied question answering. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 2135-213509, 2018. 3", + "[12] Mingyu Ding, Yan Xu, Zhenfang Chen, David Daniel Cox, Ping Luo, Joshua B Tenenbaum, and Chuang Gan. Embodied concept learner: Self-supervised learning of concepts and mapping through instruction following. In CoRL. 3", + "[13] Yilun Du, M. Katherine Collins, B. Joshua Tenenbaum, and Vincent Sitzmann. Learning signal-agnostic manifolds of neural fields. In Advances in Neural Information Processing Systems, 2021. 3", + "[14] Yilun Du, Yinan Zhang, Hong-Xing Yu, Joshua B. Tenenbaum, and Jiajun Wu. Neural radiance flow for 4d view synthesis and video processing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021. 3" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Martin Ester, Hans-Peter Kriegel, Jörg Sander, and Xiaowei Xu. A density-based algorithm for discovering clusters in large spatial databases with noise. In KDD, 1996. 6", + "[16] Yasaman Etesam, Leon Kochiev, and Angel X Chang. 3dvqa: Visual question answering for 3d environments. In 2022 19th Conference on Robots and Vision (CRV), pages 233-240. IEEE, 2022. 2, 3", + "[17] Mingtao Feng, Zhen Li, Qi Li, Liang Zhang, Xiangdong Zhang, Guangming Zhu, Hui Zhang, Yaonan Wang, and Ajmal S. Mian. Free-form description guided 3d visual graph network for object grounding in point cloud. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 3702-3711, 2021. 3", + "[18] Chuang Gan, Yandong Li, Haoxiang Li, Chen Sun, and Boqing Gong. Vqs: Linking segmentations to questions and answers for supervised attention in vqa and question-focused semantic segmentation. In ICCV, pages 1811-1820, 2017. 3", + "[19] Siddha Ganju, Olga Russakovsky, and Abhinav Kumar Gupta. What's in a question: Using visual questions as a form of supervision. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6422-6431, 2017. 3", + "[20] Xiaofeng Gao, Qiaozi Gao, Ran Gong, Kaixiang Lin, Govind Thattai, and Gaurav S Sukhatme. Dialfred: Dialogue-enabled agents for embodied instruction following. arXiv, 2022. 3", + "[21] Stephan J. Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien P. C. Valentin. Fastnerf: High-fidelity neural rendering at 200fps. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 14326-14335, 2021. 3, 5", + "[22] Daniel Gordon, Aniruddha Kembhavi, Mohammad Rastegari, Joseph Redmon, Dieter Fox, and Ali Farhadi. Iqa: Visual question answering in interactive environments. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4089-4098, 2018. 3", + "[23] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6325-6334, 2017. 2, 3", + "[24] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul E. Debevec. Baking neural radiance fields for real-time view synthesis. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5855-5864, 2021. 3, 5", + "[25] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural Computation, 9:1735-1780, 1997. 6", + "[26] Yining Hong, Yilun Du, Chunru Lin, Joshua B Tenenbaum, and Chuang Gan. 3d concept grounding on neural fields. arXiv preprint arXiv:2207.06403, 2022. 3, 4, 5, 6", + "[27] Yining Hong, Li Yi, Joshua B. Tenenbaum, Antonio Torralba, and Chuang Gan.Ptr: A benchmark for part-based conceptual, relational, and physical reasoning. In NeurIPS, 2021. 3", + "[28] Deng Huang, Peihao Chen, Runhao Zeng, Qing Du, Mingkui Tan, and Chuang Gan. Location-aware graph convolutional networks for video question answering. In AAAI, 2020. 3, 7", + "[29] Pin-Hao Huang, Han-Hung Lee, Hwann-Tzong Chen, and Tyng-Luh Liu. Text-guided graph neural networks for referring 3d instance segmentation. In AAAI, 2021. 3" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9210", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] D. A. Hudson and Christopher D. Manning. Compositional attention networks for machine reasoning. *ICLR*, 2018. 3, 6", + "[31] Ajay Jain, Ben Mildenhall, Jonathan T. Barron, P. Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 857-866, 2022. 3", + "[32] Zhenyu Jiang, Yifeng Zhu, Maxwell Svetlik, Kuan Fang, and Yuke Zhu. Synergies between affordance and geometry: 6-dof grasp detection via implicit representations. ArXiv, abs/2104.01542, 2021. 3", + "[33] J. Johnson, Bharath Hariharan, L. V. D. Maaten, Li Fei-Fei, C. L. Zitnick, and Ross B. Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1988-1997, 2017. 2, 3, 4", + "[34] Stefan Kollmannsberger, Davide D'Angella, Moritz Jokeit, and Leon Alexander Herrmann. Physics-informed neural networks. Deep Learning in Computational Mechanics, 2021. 3", + "[35] Natalia Konstantinova and Constantin Orasan. Interactive question answering. In EMNLP. IGI Global, 2013. 3", + "[36] Barbara Landau and Ray Jackendoff. “what” and “where” in spatial language and spatial cognition. Behavioral and Brain Sciences, 16:217-238, 1993. 6", + "[37] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and René Ranftl. Language-driven semantic segmentation. *ICLR*, 2022, 2, 4, 5", + "[38] Dongxu Li, Junnan Li, Hongdong Li, Juan Carlos Niebles, and Steven C. H. Hoi. Align and prompt: Video-and-language pre-training with entity prompts. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4943-4953, 2022. 3, 7", + "[39] Andrew Luo, Yilun Du, Michael J Tarr, Joshua B Tenenbaum, Antonio Torralba, and Chuang Gan. Learning neural acoustic fields. arXiv preprint arXiv:2204.00628, 2022. 3", + "[40] Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B. Tenenbaum, and Jiajun Wu. The neuro-symbolic concept learner: Interpreting scenes words and sentences from natural supervision. ArXiv, abs/1904.12584, 2019. 3, 6", + "[41] Nelson L. Max. Optical models for direct volume rendering. IEEE Trans. Vis. Comput. Graph., 1:99-108, 1995. 5", + "[42] Lars M. Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4455-4465, 2019. 3", + "[43] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. ECCV, 2020. 3, 4", + "[44] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4d reconstruction by learning particle dynamics. In Proceedings of the IEEE International Conference on Computer Vision, pages 5379-5389, 2019. 3" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. CVPR, 2020. 3", + "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 5", + "[47] Santhosh K. Ramakrishnan, Aaron Gokaslan, Erik Wijmans, Oleksandr Maksymets, Alexander Clegg, John Turner, Eric Undersander, Wojciech Galuba, Andrew Westbury, Angel X. Chang, Manolis Savva, Yili Zhao, and Dhruv Batra. Habitatmatterport 3d dataset (hm3d): 1000 large-scale 3d environments for embodied ai. ArXiv, abs/2109.08238, 2021. 2, 3", + "[48] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. ArXiv, abs/2102.12092, 2021.5", + "[49] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proc. ICCV, pages 2304-2314, 2019. 3", + "[50] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, Devi Parikh, and Dhruv Batra. Habitat: A Platform for Embodied AI Research. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 3", + "[51] Mohit Shridhar, Jesse Thomason, Daniel Gordon, Yonatan Bisk, Winson Han, Roozbeh Mottaghi, Luke Zettlemoyer, and Dieter Fox. Alfred: A benchmark for interpreting grounded instructions for everyday tasks. In CVPR, 2020. 3", + "[52] Anthony Simeonov, Yilun Du, Andrea Tagliasacchi, Joshua B Tenenbaum, Alberto Rodriguez, Pulkit Agrawal, and Vincent Sitzmann. Neural descriptor fields: Se (3)-equivariant object representations for manipulation. arXiv preprint arXiv:2112.05124, 2021. 3", + "[53] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In Proc. NeurIPS 2019, 2019. 3", + "[54] Chan Hee Song, Jihyung Kil, Tai-Yu Pan, Brian M Sadler, Wei-Lun Chao, and Yu Su. One step at a time: Long-horizon vision-and-language navigation with milestones. arXiv preprint arXiv:2202.07028, 2022.3", + "[55] Elizabeth S Spelke, Karen Breinlinger, Kristen Jacobson, and Ann Phillips. Gestalt Relations and Object Perception: A Developmental Study. Perception, 22(12):1483-1501, 1993. 2", + "[56] Alessandro Suglia, Qiaozi Gao, Jesse Thomason, Govind Thattai, and Gaurav Sukhatme. Embodied bert: A transformer model for embodied, language-guided visual task completion. arXiv preprint arXiv:2108.04927, 2021. 3", + "[57] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "9211", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "reconstruction. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5449-5459, 2022. 2, 3, 5", + "[58] Andrew Szot, Alex Clegg, Eric Undersander, Erik Wijmans, Yili Zhao, John Turner, Noah Maestre, Mustafa Mukadam, Devendra Chaplot, Oleksandr Maksymets, Aaron Gokaslan, Vladimir Vondrus, Sameer Dharur, Franziska Meier, Wojciech Galuba, Angel Chang, Zsolt Kira, Vladlen Koltun, Jitendra Malik, Manolis Savva, and Dhruv Batra. Habitat 2.0: Training home assistants to rearrange their habitat. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 3", + "[59] Ivan Vendrov, Ryan Kiros, Sanja Fidler, and Raquel Urtasun. Order-embeddings of images and language. CoRR, abs/1511.06361, 2016. 3", + "[60] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. ArXiv, abs/2112.05139, 2021. 3", + "[61] Karmesh Yadav, Ram Ramrakhya, Santhosh Kumar Ramakrishnan, Theo Gervet, John Turner, Aaron Gokaslan, Noah Maestre, Angel Xuan Chang, Dhruv Batra, Manolis Savva, et al. Habitat-matterport 3d semantics dataset. arXiv preprint arXiv:2210.05633, 2022. 2, 3, 4", + "[62] Xu Yan, Zhihao Yuan, Yuhao Du, Yinghong Liao, Yao Guo, Zhen Li, and Shuguang Cui. Clevr3d: Compositional language and elementary visual reasoning for question answering in 3d real-world scenes. arXiv preprint arXiv:2112.11691, 2021. 2, 3", + "[63] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. Proc. NeurIPS, 2020. 3", + "[64] Shuquan Ye, Dongdong Chen, Songfang Han, and Jing Liao. 3d question answering. ArXiv, abs/2112.08359, 2021. 2, 3", + "[65] Kexin Yi, Jiajun Wu, Chuang Gan, Antonio Torralba, Pushmeet Kohli, and Joshua B. Tenenbaum. Neural-symbolic vqa: Disentangling reasoning from vision and language understanding. In NeurIPS, 2018. 3, 6", + "[66] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoptrees for real-time rendering of neural radiance fields. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5732-5741, 2021. 3, 5", + "[67] Licheng Yu, Xinlei Chen, Georgia Gkioxari, Mohit Bansal, Tamara L Berg, and Dhruv Batra. Multi-target embodied question answering. In CVPR, pages 6309-6318, 2019. 3", + "[68] Kaizhi Zheng, Xiaotong Chen, Odest Chadwicke Jenkins, and Xin Eric Wang. Vlmbench: A compositional benchmark for vision-and-language manipulation. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, 2022. 3", + "[69] Yuke Zhu, O. Groth, Michael S. Bernstein, and Li Fei-Fei. Visual7w: Grounded question answering in images. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4995-5004, 2016. 2, 3" + ], + "bbox": [ + 78, + 90, + 470, + 854 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "9212", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3D Concept Learning and Reasoning From Multi-View Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_model.json b/2023/3D Concept Learning and Reasoning From Multi-View Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6fdab27747a7d0b63f4f6f1af778a551bec5f9b4 --- /dev/null +++ b/2023/3D Concept Learning and Reasoning From Multi-View Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_model.json @@ -0,0 +1,2268 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.131, + 0.8, + 0.154 + ], + "angle": 0, + "content": "3D Concept Learning and Reasoning from Multi-View Images" + }, + { + "type": "text", + "bbox": [ + 0.259, + 0.18, + 0.71, + 0.27 + ], + "angle": 0, + "content": "Yining Hong\\(^{1}\\), Chunru Lin\\(^{2}\\), Yilun Du\\(^{3}\\), Zhenfang Chen\\(^{5}\\), Joshua B. Tenenbaum\\(^{3}\\), Chuang Gan\\(^{4,5}\\), \\(^{1}\\)UCLA, \\(^{2}\\)Shanghai Jiaotong University, \\(^{3}\\)MIT CSAIL, \\(^{4}\\)UMass Amherst, \\(^{5}\\)MIT-IBM Watson AI Lab https://vis-www.cs.umass.edu/3d-clr/" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.306, + 0.294, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.543, + 0.276, + 0.586 + ], + "angle": 0, + "content": "Concept: Q: Are there any televisions? A: Yes" + }, + { + "type": "image", + "bbox": [ + 0.303, + 0.308, + 0.445, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.302, + 0.424, + 0.446, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.308, + 0.594, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.424, + 0.593, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.598, + 0.308, + 0.741, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.598, + 0.424, + 0.74, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.746, + 0.309, + 0.888, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.746, + 0.424, + 0.888, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.279, + 0.541, + 0.48, + 0.602 + ], + "angle": 0, + "content": "Counting: \nQ: How many chairs are close to the table in the room with plant on the cabinet? A: 6" + }, + { + "type": "image_caption", + "bbox": [ + 0.279, + 0.617, + 0.456, + 0.645 + ], + "angle": 0, + "content": "Q: How many rooms have sofas? A: 1" + }, + { + "type": "image_caption", + "bbox": [ + 0.483, + 0.542, + 0.68, + 0.602 + ], + "angle": 0, + "content": "Relation: Q: Facing the computer from the curtain, is there a lamp on the right? A: Yes" + }, + { + "type": "image_caption", + "bbox": [ + 0.483, + 0.618, + 0.667, + 0.645 + ], + "angle": 0, + "content": "Q: What's on the cabinet in the smaller room? A: Plant" + }, + { + "type": "image_caption", + "bbox": [ + 0.684, + 0.542, + 0.884, + 0.645 + ], + "angle": 0, + "content": "Comparison: \nQ: Are there fewer pictures in the larger room than the other room? A: No \nQ: Is the computer closer to a printer or a lamp? \nA: Printer" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.648, + 0.894, + 0.677 + ], + "angle": 0, + "content": "Figure 1. An exemplar scene with multi-view images and question-answer pairs of our 3DMV-VQA dataset. 3DMV-VQA contains four question types: concept, counting, relation, comparison. Orange words denote semantic concepts; blue words denote the relations." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.702, + 0.314, + 0.718 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Humans are able to accurately reason in 3D by gathering multi-view observations of the surrounding world. Inspired by this insight, we introduce a new large-scale benchmark for 3D multi-view visual question answering (3DMV-VQA). This dataset is collected by an embodied agent actively moving and capturing RGB images in an environment using the Habitat simulator. In total, it consists of approximately 5k scenes, 600k images, paired with 50k questions. We evaluate various state-of-the-art models for visual reasoning on our benchmark and find that they all perform poorly. We suggest" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.895, + 0.901 + ], + "angle": 0, + "content": "that a principled approach for 3D reasoning from multi-view images should be to infer a compact 3D representation of the world from the multi-view images, which is further grounded on open-vocabulary semantic concepts, and then to execute reasoning on these 3D representations. As the first step towards this approach, we propose a novel 3D concept learning and reasoning (3D-CLR) framework that seamlessly combines these components via neural fields, 2D pre-trained vision-language models, and neural reasoning operators. Experimental results suggest that our framework outperforms baseline models by a large margin, but the challenge remains largely unsolved. We further perform an in-depth analysis of the challenges and highlight potential future directions." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9202" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.208, + 0.106 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.116, + 0.473, + 0.326 + ], + "angle": 0, + "content": "Visual reasoning, the ability to composite rules on internal representations to reason and answer questions about visual scenes, has been a long-standing challenge in the field of artificial intelligence and computer vision. Several datasets [23, 33, 69] have been proposed to tackle this challenge. However, they mainly focus on visual reasoning on 2D single-view images. Since 2D single-view images only cover a limited region of the whole space, such reasoning inevitably has several weaknesses, including occlusion, and failing to answer 3D-related questions about the entire scene that we are interested in. As shown in Fig. 1, it's difficult, even for humans, to count the number of chairs in a scene due to the object occlusion, and it's even harder to infer 3D relations like \"closer\" from a single-view 2D image." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.328, + 0.473, + 0.538 + ], + "angle": 0, + "content": "On the other hand, there's strong psychological evidence that human beings conduct visual reasoning in the underlying 3D representations [55]. Recently, there have been several works focusing on 3D visual question answering [2,16,62,64]. They mainly use traditional 3D representations (e.g., point clouds) for visual reasoning. This is inconsistent with the way human beings perform 3D reasoning in real life. Instead of being given an entire 3D representation of the scene at once, humans will actively walk around and explore the whole environment, ingesting image observations from different views and converting them into a holistic 3D representation that assists them in understanding and reasoning about the environment. Such abilities are crucial for many embodied AI applications, such as building assistive robots." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.539, + 0.473, + 0.81 + ], + "angle": 0, + "content": "To this end, we propose the novel task of 3D visual reasoning from multi-view images taken by active exploration of an embodied agent. Specifically, we generate a large-scale benchmark, 3DMV-VQA (3D multi-view visual question answering), that contains approximately 5k scenes and 50k question-answering pairs about these scenes. For each scene, we provide a collection of multi-view image observations. We generate this dataset by placing an embodied agent in the Habitat-Matterport environment [47], which actively explores the environment and takes pictures from different views. We also obtain scene graph annotations from the Habitat-Matterport 3D semantics dataset (HM3DSem) [61], including ground-truth locations, segmentations, semantic information of the objects, as well as relationships among the objects in the environments, for model diagnosis. To evaluate the models' 3D reasoning abilities on the entire environment, we design several 3D-related question types, including concept, counting, relation and comparison." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.811, + 0.473, + 0.902 + ], + "angle": 0, + "content": "Given this new task, the key challenges we would like to investigate include: 1) how to efficiently obtain the compact visual representation to encode crucial properties (e.g., semantics and relations) by integrating all incomplete observations of the environment in the process of active exploration for 3D visual reasoning? 2) How to ground the semantic con" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.894, + 0.152 + ], + "angle": 0, + "content": "cepts on these 3D representations that could be leveraged for downstream tasks, such as visual reasoning? 3) How to infer the relations among the objects, and perform step-by-step reasoning?" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.153, + 0.895, + 0.56 + ], + "angle": 0, + "content": "As the first step to tackling these challenges, we propose a novel model, 3D-CLR (3D Concept Learning and Reasoning). First, to efficiently obtain a compact 3D representation from multi-view images, we use a neural-field model based on compact voxel grids [57] which is both fast to train and effective at storing scene properties in its voxel grids. As for concept learning, we observe that previous works on 3D scene understanding [1,3] lack the diversity and scale with regard to semantic concepts due to the limited amount of paired 3D-and-language data. Although large-scale vision-language models (VLMs) have achieved impressive performances for zero-shot semantic grounding on 2D images, leveraging these pretrained models for effective open-vocabulary 3D grounding of semantic concepts remains a challenge. To address these challenges, we propose to encode the features of a pre-trained 2D vision-language model (VLM) into the compact 3D representation defined across voxel locations. Specifically, we use the CLIP-LSeg [37] model to obtain features on multi-view images, and propose an alignment loss to map the features in our 3D voxel grid to 2D pixels. By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we can ground the semantic concepts in the 3D compact representation. Finally, to answer the questions, we introduce a set of neural reasoning operators, including FILTER, COUNT, RELATION operators and so on, which take the 3D representations of different objects as input and output the predictions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.561, + 0.895, + 0.666 + ], + "angle": 0, + "content": "We conduct experiments on our proposed 3DMV-VQA benchmark. Experimental results show that our proposed 3D-CLR outperforms all baseline models a lot. However, failure cases and model diagnosis show that challenges still exist concerning the grounding of small objects and the separation of close object instances. We provide an in-depth analysis of the challenges and discuss potential future directions." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.667, + 0.894, + 0.682 + ], + "angle": 0, + "content": "To sum up, we have the following contributions in this paper." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.702, + 0.892, + 0.732 + ], + "angle": 0, + "content": "- We propose the novel task of 3D concept learning and reasoning from multi-view images." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.738, + 0.894, + 0.783 + ], + "angle": 0, + "content": "- By having robots actively explore the embodied environments, we collect a large-scale benchmark on 3D multiview visual question answering (3DMV-VQA)." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.789, + 0.894, + 0.864 + ], + "angle": 0, + "content": "- We devise a model that incorporates a neural radiance field, 2D pretrained vision and language model, and neural reasoning operators to ground the concepts and perform 3D reasoning on the multi-view images. We illustrate that our model outperforms all baseline models." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "- We perform an in-depth analysis of the challenges of this new task and highlight potential future directions." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.702, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9203" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.09, + 0.22, + 0.107 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.116, + 0.473, + 0.568 + ], + "angle": 0, + "content": "Visual Reasoning There have been numerous tasks focusing on learning visual concepts from natural language, including visually-grounded question answering [18, 19], text-image retrieval [59] and so on. Visual reasoning has drawn much attention recently as it requires human-like understanding of the visual scene. A wide variety of benchmarks have been created over the recent years [7, 8, 23, 27, 33, 69]. However, they mainly focus on visual reasoning from 2D single-view images, while there's strong psychological evidence that human beings perform visual reasoning on the underlying 3D representations. In this paper, we propose the novel task of visual reasoning from multi-view images, and collect a large-scale benchmark for this task. In recent years, numerous visual reasoning models have also been proposed, ranging from attention-based methods [5, 30], graph-based methods [28], to models based on large pretrained vision-language model [9, 38]. These methods model the reasoning process implicitly with neural networks. Neural-symbolic methods [6, 40, 65] explicitly perform symbolic reasoning on the objects representations and language representations. They use perception models to extract 2D masks as a first step, and then execute operators and ground concepts on these pre-segmented masks, but are limited to a set of predefined concepts on simple scenes. [26] proposes to use the feature vectors from occupancy networks [42] to do visual reasoning in the 3D space. However, they also use a synthetic dataset, and learn a limited set of semantic concepts from scratch. We propose to learn 3D neural field features from 2D multi-view real-world images, and incorporate a 2D VLM for open-vocabulary reasoning." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.569, + 0.473, + 0.841 + ], + "angle": 0, + "content": "3D Reasoning Understanding and reasoning about 3D scenes has been a long-standing challenge. Recent works focus on leveraging language to explore 3D scenes, such as object captioning [3,4] and object localization from language [1, 17, 29]. Our work is mostly related to 3D Visual Question Answering [2, 16, 62, 64] as we both focus on answering questions and reasoning about 3D scenes. However, these works use point clouds as 3D representations, which diverts from the way human beings perform 3D reasoning. Instead of being given an entire 3D representation all at once, human beings would actively move and explore the environment, integrating multi-view information to get a compact 3D representation. Therefore, we propose 3D reasoning from multi-view images. In addition, since 3D assets paired with natural language descriptions are hard to get in real-life scenarios, previous works struggle to ground open-vocabulary concepts. In our work, we leverage 2D VLMs for zero-shot open-vocabulary concept grounding in the 3D space." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.841, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Embodied Reasoning Our work is also closely related to Embodied Question Answering (EQA) [11, 67] and Interactive Question Answering (IQA) [22, 35], which also involve an embodied agent exploring the environment and answering" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.228 + ], + "angle": 0, + "content": "the question. However, the reasoning mainly focuses on the outcome or the history of the navigation on 2D images and does not require a holistic 3D understanding of the environment. There are also works [12, 20, 51, 54, 56, 68] targeting instruction following in embodied environments, in which an agent is asked to perform a series of tasks based on language instructions. Different from their settings, for our benchmark an embodied agent actively explores the environment and takes multi-view images for 3D-related reasoning." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.229, + 0.897, + 0.547 + ], + "angle": 0, + "content": "Neural Fields Our approach utilizes neural fields to parameterize an underlying 3D compact representations of scenes for reasoning. Neural field models (e.g., [43]) have gained much popularity since they can reconstruct a volumetric 3D scene representation from a set of images. Recent works [21, 24, 57, 66] have pushed it further by using classic voxel-grids to explicitly store the scene properties (e.g., density, color and feature) for rendering, which allows for real-time rendering and is utilized by this paper. Neural fields have also been used to represent dynamic scenes [14, 44], appearance [43, 45, 49, 53, 63], physics [34], robotics [32, 52], acoustics [39] and more general multi-modal signals [13]. There are also some works that integrate semantics or language in neural fields [31, 60]. However, they mainly focus on using language for manipulation, editing or generation. [26] leverages neural descriptor field [52] for 3D concept grounding. However, they require ground-truth occupancy values to train the neural field, which can not be applied to real-world scenes. In this paper, we propose to leverage voxel-based neural radiance field [57] to get the compact representations for 3D visual reasoning." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.562, + 0.688, + 0.578 + ], + "angle": 0, + "content": "3. Dataset Generation" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.588, + 0.684, + 0.604 + ], + "angle": 0, + "content": "3.1. Multi-View Images" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.612, + 0.895, + 0.673 + ], + "angle": 0, + "content": "Our dataset includes 5k 3D scenes from the Habitat-Matterport 3D Dataset (HM3D) dataset [47], and approximately 600k images rendered from the 3D scenes. The images are rendered via Habitat [50, 58]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.673, + 0.896, + 0.809 + ], + "angle": 0, + "content": "Scene Generation We build our benchmark on top of the HM3DSem dataset [61], which is a large-scale dataset of 3D real-world indoor scenes with densely annotated semantics. It consists of 142,646 object instance annotations across 216 3D spaces and 3,100 rooms within those spaces. HM3D dataset uses texture information to annotate pixel-accurate object boundaries, which provides large-scale object annotations and ensures the scale, quality, and diversity of 3D visual reasoning questions of our benchmark." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.897, + 0.903 + ], + "angle": 0, + "content": "To construct a benchmark that covers questions of different difficulty levels, it's crucial that we include 3D scenes of different scales in our benchmark. We start with single rooms in HM3D scenes, which has an appropriate amount of semantic concepts and relationships to base some simple questions on. To get the scale of single rooms, we calculate bounding" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "9204" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.197 + ], + "angle": 0, + "content": "boxes of rooms according to floor instance segmentations. We then proceed to generate bounding boxes for scenes with multiple adjacent rooms. For more complex holistic scene understanding, we also include whole-house scenes, which may contain tens of rooms. Overall, the 3DMV-VQA benchmark contains three levels of scenes (2000 single-room scenes, 2000 multi-room scenes and 100 whole-house scenes)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.198, + 0.474, + 0.455 + ], + "angle": 0, + "content": "Image Rendering After we get the bounding box of each scene, we load the scene into the Habitat simulator. We also put a robot agent with an RGB sensor at a random initial point in the bounding box. The data is collected via exploration of the robot agent. Specifically, at each step of the data collection process, we sample a navigable point and make the agent move to the point along the shortest path. When the agent has arrived at a point, we rotate the agent \\(30^{\\circ}\\) along z-axis for 12 times so that the agent can observe the \\(360^{\\circ}\\) view of the scene at the position. It can also look up and down, with a random mild angle from \\([-10^{\\circ}, 10^{\\circ}]\\) along the x-axis. A picture is taken each time the agent rotates to a new orientation. In total 12 pictures are taken from each point. While traveling between points, the robot agent further takes pictures. We also exploit a policy such that when the camera is too far from or too close to an object and thus the agent cannot see anything, we discard the bad-view images." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.461, + 0.295, + 0.476 + ], + "angle": 0, + "content": "3.2. Questions and Answers" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.484, + 0.472, + 0.681 + ], + "angle": 0, + "content": "We pair each scene with machine-generated questions from pre-defined templates. All questions are open-ended and can be answered with a single word (samples in Fig. 1). Concepts and Relationships To generate questions and answers, we utilize the semantic annotations of HM3DSem [61] to get the semantic concepts and their bounding boxes, as well as the bounding boxes of the rooms. We merge semantic concepts with similar meanings (e.g., L-shaped sofa to sofa, desk chair / computer chair e.g. to chair). We also define 11 relationships: inside, above, below, on the top of, close, far, large, small, between, on the left, and on the right. Before generating questions, we first generate a scene graph for each scene containing all concepts and relationships." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.681, + 0.471, + 0.71 + ], + "angle": 0, + "content": "Question Types We define four types of questions: concept, counting, relation and comparison." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.71, + 0.472, + 0.755 + ], + "angle": 0, + "content": "- Concept. Conceptual questions query if there's an object of a certain semantic concept in the scene, or whether there's a room containing the objects of the semantic concept." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.758, + 0.471, + 0.803 + ], + "angle": 0, + "content": "- Counting. Counting-related questions ask about how many instances of a semantic concept are in the scene, or how many rooms contain objects of the semantic concept." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.807, + 0.472, + 0.868 + ], + "angle": 0, + "content": "- Relation. Relational questions ask about the 11 relationships and their compositions. Based on the number of relations in a question, we have one-hop to three-hop questions for the relation type." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.871, + 0.471, + 0.902 + ], + "angle": 0, + "content": "- Comparison. The comparison question type focuses on the comparison of two objects, two semantic concepts or two" + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.71, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.092, + 0.895, + 0.168 + ], + "angle": 0, + "content": "rooms. It can be combined with the relational concepts to compare two objects (e.g., larger, closer to, more left etc). It also compares the number of instances of two semantic concepts, or the number of objects of certain concepts in different rooms." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.178, + 0.895, + 0.526 + ], + "angle": 0, + "content": "Bias Control. Similar to previous visual reasoning benchmarks [26, 33], we use machine-generated questions since the generation process is fully controllable so that we can avoid dataset bias. Questions are generated from pre-defined templates, and transformed into natural language questions with associated semantic concepts and relationships from the scene. We manually define 41 templates for question generation. We use depth-first search to generate questions. We perform bias control based on three perspectives: template counts, answer counts, and concept counts. For selecting templates, we sort the templates each time we generate a question to ensure a balanced question distribution. We force a flat answer distribution for each template by rejection sampling. Specifically, once we generate a question and an answer, if the number of the questions having the same answer and template is significantly larger than other answers, we discard it and continue searching. Once we find an answer that fits in the ideal answer distribution, we stop the depth-first searching for this question. We also force a flat concept distribution for each template using the same method. In addition to controlling the number of concepts mentioned in the templates, we also control the number of relation tuples consisting of the same concept sets." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.539, + 0.591, + 0.555 + ], + "angle": 0, + "content": "4. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.565, + 0.895, + 0.822 + ], + "angle": 0, + "content": "Fig. 2 illustrates an overview of our framework. Specifically, our framework consists of three steps. First, we learn a 3D compact representation from multi-view images using neural field. And then we propose to leverage pre-trained 2D vision-and-language model to ground concepts on 3D space. This is achieved by 1) generating 2D pixel features using CLIP-LSeg; 2) aligning the features of 3D voxel grid and 2D pixel features from CLIP-LSeg [37]; 3) dot-product attention between the 3D features and CLIP language features [37]. Finally, to perform visual reasoning, we propose neural reasoning operators, which execute the question step by step on the 3D compact representation and outputs a final answer. For example, we use FILTER operators to ground semantic concepts on the 3D representation, GETINSTANCE to get all instances of a semantic class, and COUNT_RELATION to count how many pairs of the two semantic classes have the queried relation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.885, + 0.849 + ], + "angle": 0, + "content": "4.1. Learning 3D Compact Scene Representations" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Neural radiance fields [43] are capable of learning a 3D representation that can reconstruct a volumetric 3D scene representation from a set of images. Voxel-based meth" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9205" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.092, + 0.891, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.391, + 0.893, + 0.461 + ], + "angle": 0, + "content": "Figure 2. An overview of our 3D-CLR framework. First, we learn a 3D compact scene representation from multi-view images using neural fields (I). Second, we use CLIP-LSeg model to get per-pixel 2D features (II). We utilize a 3D-2D alignment loss to assign features to the 3D compact representation (III). By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we could get the concept grounding in 3D (IV). Finally, the reasoning process is performed via a set of neural reasoning operators, such as FILTER, GET instances and COUNT_RELATION (V). Relation operators are learned via relation networks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.47, + 0.473, + 0.712 + ], + "angle": 0, + "content": "ods [21, 24, 57, 66] speed up the learning process by explicitly storing the scene properties (e.g., density, color and feature) in its voxel grids. We leverage Direct Voxel Grid Optimization (DVGO) [57] as our backbone for 3D compact representation for its fast speed. DVGO stores the learned density and color properties in its grid cells. The rendering of multi-view images is by interpolating through the voxel grids to get the density and color for each sampled point along each sampled ray, and integrating the colors based on the rendering alpha weights calculated from densities according to quadrature rule [41]. The model is trained by minimizing the L2 loss between the rendered multi-view images and the ground-truth multi-view images. By extracting the density voxel grid, we can get the 3D compact representation (e.g., By visualizing points with density greater than 0.5, we can get the 3D representation as shown in Fig. 2 I.)" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.725, + 0.37, + 0.743 + ], + "angle": 0, + "content": "4.2. 3D Semantic Concept Grounding" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Once we extract the 3D compact representation of the scene, we need to ground the semantic concepts for reasoning from language. Recent work from [26] has proposed to ground concepts from paired 3D assets and question-answers. Though promising results have been achieved on synthetic data, it is not feasible for open-vocabulary 3D reasoning in real-world data, since it is hard to collect largescale 3D vision-and-language paired data. To address this challenge, our idea is to leverage pre-trained 2D vision and language model [46, 48] for 3D concept grounding in real-" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.47, + 0.895, + 0.652 + ], + "angle": 0, + "content": "world scenes. But how can we map 2D concepts into 3D neural field representations? Note that 3D compact representations can be learned from 2D multi-view images and that each 2D pixel actually corresponds to several 3D points along the ray. Therefore, it's possible to get 3D features from 2D per-pixel features. Inspired by this, we first add a feature voxel grid representation to DVGO, in addition to density and color, to represent 3D features. We then apply CLIP-LSeg [37] to learn per-pixel 2D features, which can be attended to by CLIP concept embeddings. We use an alignment loss to align 3D features with 2D features so that we can perform concept grounding on the 3D representations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.655, + 0.895, + 0.807 + ], + "angle": 0, + "content": "2D Feature Extraction. To get per-pixel features that can be attended by concept embeddings, we use the features from language-driven semantic segmentation (CLIP-LSeg) [37], which learns 2D per-pixel features from a pre-trained vision-language model (i.e., [46]). Specifically, it uses the text encoder from CLIP, trains an image encoder to produce an embedding vector for each pixel, and calculates the scores of word-pixel correlation by dot-product. By outputting the semantic class with the maximum score of each pixel, CLIP-LSeg is able to perform zero-shot 2D semantic segmentation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.895, + 0.902 + ], + "angle": 0, + "content": "3D-2D Alignment. In addition to density and color, we also store a 512-dim feature in each grid cell in the compact representation. To align the 3D per-point features with 2D per-pixel features, we calculate an L1 loss between each pixel and each 3D point sampled on the ray of the pixel. The overall L1 loss along a ray is the weighted sum of all" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9206" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.182 + ], + "angle": 0, + "content": "the pixel-point alignment losses, with weights same as the rendering weights: \\(\\mathcal{L}_{\\mathrm{feature}} = \\sum_{i=1}^{K} w_i (\\| \\pmb{f}_i - F(\\pmb{r}) \\|)\\), where \\(\\pmb{r}\\) is a ray corresponding to a 2D pixel, \\(F(\\pmb{r})\\) is the 2D feature from CLIP-LSeg, \\(K\\) is the total number of sampled points along the ray and \\(\\pmb{f}_i\\) is the feature of point \\(i\\) by interpolating through the feature voxel grid, \\(w_i\\) is the rendering weight." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.183, + 0.473, + 0.289 + ], + "angle": 0, + "content": "Concept Grounding through Attention. Since our feature voxel grid representation is learnt from CLIP-LSeg, by calculating the dot-product attention \\(< f, v >\\) between perpoint 3D feature \\(f\\) and the CLIP concept embeddings \\(v\\), we can get zero-shot view-independent concept grounding and semantic segmentations in the 3D representation, as is presented in Fig. 2 IV." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.299, + 0.336, + 0.315 + ], + "angle": 0, + "content": "4.3. Neural Reasoning Operators" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.323, + 0.471, + 0.428 + ], + "angle": 0, + "content": "Finally, we use the grounded semantic concepts for 3D reasoning from language. We first transform questions into a sequence of operators that can be executed on the 3D representation for reasoning. We adopt a LSTM-based semantic parser [65] for that. As [26, 40], we further devise a set of operators which can be executed on the 3D representation. Please refer to Appendix for a full list of operators." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.429, + 0.47, + 0.459 + ], + "angle": 0, + "content": "Filter Operators. We filter all the grid cells with a certain semantic concept." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.47, + 0.535 + ], + "angle": 0, + "content": "Get Instance Operators. We implement this by utilizing DBSCAN [15], an unsupervised algorithm which assigns clusters to a set of points. Specifically, given a set of points in the 3D space, it can group together the points that are closely packed together for instance segmentation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.535, + 0.472, + 0.732 + ], + "angle": 0, + "content": "Relation Operators. We cannot directly execute the relation on the 3D representation as we have not grounded relations. Thus, we represent each relation using a distinct neural module (which is practical as the vocabulary of relations is limited [36]). We first concatenate the voxel grid representations of all the referred objects and feed them into the relation network. The relation network consists of three 3D convolutional layers and then three 3D deconvolutional layers. A score is output by the relation network indicating whether the objects have the relationship or not. Since vanilla 3D CNNs are very slow, we use Sparse Convolution [10] instead. Based on the relations asked in the questions, different relation modules are chosen." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.746, + 0.21, + 0.763 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.771, + 0.268, + 0.788 + ], + "angle": 0, + "content": "5.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.47, + 0.84 + ], + "angle": 0, + "content": "Evaluation Metric. We report the visual question answering accuracy on the proposed 3DMV-VQA dataset w.r.t the four types of questions. The train/val/test split is 7:1:2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.841, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Implementation Details For 3D compact representations, we adopt the same architectures as DVGO, except skipping the coarse reconstruction phase and directly training the fine reconstruction phase. After that, we freeze the density voxel" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.333 + ], + "angle": 0, + "content": "grid and color voxel grid, for the optimization of the feature voxel grid only. The feature grid has a world size of 100 and feature dim of 512. We train the compact representations for 100,000 iterations and the 3D features for another 20,000 iterations. For LSeg, we use the official demo model, which has the ViT-L/16 image encoder and CLIP's ViT-B/32 text encoder. We follow the official script for inference and use multi-scale inference. For DBSCAN, we use an epsilon value of 1.5, minimum samples of 2, and we use L1 as the clustering method. For the relation networks, each relation is encoded into a three-layer sparse 3D convolution network with hidden size 64. The output is then fed into a one-layer linear network to produce a score, which is normalized by sigmoid function. We use cross-entropy loss to train the relation networks, and we use the one-hop relational questions with \"yes/no\" answers to train the relation networks." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.343, + 0.608, + 0.358 + ], + "angle": 0, + "content": "5.2. Baselines" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.366, + 0.894, + 0.412 + ], + "angle": 0, + "content": "Our baselines range from vanilla neural networks, attention-based methods, fine-tuned from large-scale VLM, and graph-based methods, to neural-symbolic methods." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.421, + 0.894, + 0.513 + ], + "angle": 0, + "content": "- LSTM. The question is transferred to word embeddings which are input into a word-level LSTM [25]. The last LSTM hidden state is fed into a multi-layer perceptron (MLP) that outputs a distribution over answers. This method is able to model question-conditional bias since it uses no image information." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.517, + 0.894, + 0.609 + ], + "angle": 0, + "content": "- CNN+LSTM. The question is encoded by the final hidden states from LSTM. We use a resnet-50 to extract frame-level features of images and average them over the time dimension. The features are fed to an MLP to predict the final answer. This is a simple baseline that examines how vanilla neural networks perform on 3DMV-VQA." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.614, + 0.894, + 0.674 + ], + "angle": 0, + "content": "- 3D-Feature+LSTM. We use the 3D features we get from 3D-2D alignment and downsample the voxel grids using 3D-CNN as input, concatenated with language features from LSTM and fed to an MLP." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.679, + 0.894, + 0.74 + ], + "angle": 0, + "content": "- MAC [30]. MAC utilizes a Memory, Attention and Composition cell to perform iterative reasoning process. Like CNN+LSTM, we use the average pooling over multi-view images as the feature map." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.745, + 0.894, + 0.805 + ], + "angle": 0, + "content": "- MAC(V). We treat the multi-view images along a trajectory as a video. We modify the MAC model by applying a temporal attention unit across the video frames to generate a latent encoding for the video." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.81, + 0.894, + 0.901 + ], + "angle": 0, + "content": "- NS-VQA [65]. This is a 2D version of our 3D-CLR model. We use CLIP-LSeg to ground 2D semantic concepts from multi-view images, and the relation network also takes the 2D features as input. We execute the operators on each image and max pool from the answers to get our final predictions." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.421, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9207" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.234, + 0.089, + 0.737, + 0.27 + ], + "angle": 0, + "content": "
MethodsConceptCountingRelationComparisonOverall
Q-type (rand.)49.410.721.649.226.4
LSTM53.415.324.055.229.8
CNN+LSTM57.822.135.259.737.8
MAC62.419.747.862.346.7
MAC(V)60.024.651.665.950.0
NS-VQA59.821.533.461.638.0
ALPRO65.812.742.268.243.3
LGCN56.219.535.566.739.1
3D-Feature+LSTM61.222.449.961.348.2
3D-CLR (Ours)66.141.357.672.357.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.19, + 0.277, + 0.776, + 0.292 + ], + "angle": 0, + "content": "Table 1. Question-answering accuracy of 3D visual reasoning baselines on different question types." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.303, + 0.471, + 0.365 + ], + "angle": 0, + "content": "- ALPRO [38]. ALPRO is a video-and-language pre-training framework. A transformer model is pretrained on large webly-source video-text pairs and can be used for downstream tasks like Video Question answering." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.369, + 0.47, + 0.416 + ], + "angle": 0, + "content": "- LGCN [28]. LGCN represents the contents in the video as a location-aware graph by incorporating the location information of an object into the graph construction." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.303, + 0.471, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.424, + 0.281, + 0.44 + ], + "angle": 0, + "content": "5.3. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.448, + 0.47, + 0.523 + ], + "angle": 0, + "content": "Result Analysis. We summarize the performances for each question type of baseline models in Table 1. All models are trained on the training set until convergence, tuned on the validation set, and evaluated on the test set. We provide detailed analysis below." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.524, + 0.471, + 0.902 + ], + "angle": 0, + "content": "First, for the examination of language-bias of the dataset, we find that the performance of LSTM is only slightly higher than random and frequency, and all other baselines outperform LSTM a lot. This suggests that there's little language bias in our dataset. Second, we observe that encoding temporal information in MAC (i.e., MAC(V)) is better than average-pooling of the features, especially in counting and relation. This suggests that average-pooling of the features may cause the model to lose information from multi-view images, while attention on multi-view images helps boost the 3D reasoning performances. Third, we also find that fine-tuning on large-scale pretrained model (i.e., ALPRO) has relatively high accuracies in concept-related questions, but for counting it's only slightly higher than the random baseline, suggesting that pretraining on large-scale video-language dataset may improve the model's perception ability, but does not provide the model with the ability to tackle with more difficult reasoning types such as counting. Next, we find that LGCN has poor performances on the relational questions, indicating that building a location-aware graph over 2D objects still doesn't equip the model with 3D location reasoning abilities. Last but not least, we find that 3D-based baselines are better than their 2D counterparts. 3D-Feature+LSTM performs well on the 3D-related questions, such as counting and relation, than most of the image-based" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.303, + 0.895, + 0.454 + ], + "angle": 0, + "content": "basielines. Compared with 3D-CLR, NS-VQA can perform well in the conceptual questions. However, it underperforms 3D-CLR a lot in counting and relation, suggesting that these two types of questions require the holistic 3D understanding of the entire 3D scenes. Our 3D-CLR outperforms other baselines by a large margin, but is still far from satisfying. From the accuracy of the conceptual question, we can see that it can only ground approximately \\(66\\%\\) of the semantic concepts. This indicates that our 3DMV-VQA dataset is indeed very challenging." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.456, + 0.895, + 0.622 + ], + "angle": 0, + "content": "Qualitative Examples. In Fig. 3, we show four qualitative examples. From the examples, we show that our 3D-CLR can infer an accurate 3D representation from multi-view images, as well as ground semantic concepts on the 3D representations to get the semantic segmentations of the entire scene. Our 3D-CLR can also learn 3D relationships such as \"close\", \"largest\", \"on top of\" and so on. However, 3D-CLR also fails on some questions. For the third scene in the qualitative examples, it fails to ground the concepts \"mouse\" and \"printer\". Also, it cannot accurately count the instances sometimes. We give detailed discussions below." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.634, + 0.625, + 0.648 + ], + "angle": 0, + "content": "5.4. Discussions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.658, + 0.895, + 0.809 + ], + "angle": 0, + "content": "We perform an in-depth analysis to understand the challenge of this dataset. We leverage the modular design of our 3D-CLR, replacing individual components of the framework with ground-truth annotations for model diagnosis. The result is shown in Fig 4. 3D-CLR w/ Semantic denotes our model with ground-truth semantic concepts from HM3DSem annotations. 3D-CLR w/ Instance denotes that we have ground-truth instance segmentations of semantic concepts. From Fig. 3 and Fig. 4, we summarize several key challenges of our benchmark:" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Very close object instances From Fig. 4, we can see that even with ground-truth semantic labeling of the 3D points, 3D-CLR still has unsatisfying results on counting questions. This suggests that the instance segmentations provided by DBSCAN are not accurate enough. From the top two qualitative examples in Fig. 3, we can also see that if two chairs" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9208" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.091, + 0.887, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.461, + 0.893, + 0.491 + ], + "angle": 0, + "content": "Figure 3. Qualitative examples of our 3D-CLR. We can see that 3D-CLR can ground most of the concepts and answer most questions correctly. However, it still fails sometimes, mainly because it cannot separate close object instances and ground small objects." + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.504, + 0.462, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.145, + 0.619, + 0.402, + 0.631 + ], + "angle": 0, + "content": "Figure 4. Model diagnosis of our 3D-CLR." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.642, + 0.47, + 0.702 + ], + "angle": 0, + "content": "contact each other, DBSCAN will not tell them apart and thus have poor performance on counting. One crucial future direction is to improve unsupervised instance segmentations on very close object instances." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.703, + 0.473, + 0.884 + ], + "angle": 0, + "content": "Grounding small objects Fig. 4 suggests that 3D-CLR fails to ground a large portion of the semantic concepts, which hinders the performance. From the last example in Fig. 3, we can see that 3D-CLR fails to ground small objects like \"computer mouse\". Further examination indicates there are two possible reasons: 1) CLIP-LSeg fails to assign the right features to objects with limited pixels; 2) The resolution of feature voxel grid is not high enough and therefore small objects cannot be represented in the compact representation. An interesting future direction would be learning exploration policies that enable the agents to get closer to uncertain objects that cannot be grounded." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.886, + 0.473, + 0.902 + ], + "angle": 0, + "content": "Ambiguity on 3D relations Even with ground-truth seman" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.505, + 0.895, + 0.598 + ], + "angle": 0, + "content": "tic and instance segmentations, the performance of the relation network still needs to be improved. We find that most of the failure cases are correlated to the \"inside\" relation. From the segmentations in Fig. 3, we can see that 3D-CLR is unable to ground the objects in the cabinets. A potential solution can be joint depth and segmentation predictions." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.616, + 0.619, + 0.632 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.643, + 0.895, + 0.795 + ], + "angle": 0, + "content": "In this paper, we introduce the novel task of 3D reasoning from multi-view images. By placing embodied robot that actively explores indoor environments, we collect a large-scale benchmark named 3DMV-VQA. We also propose a new 3D-CLR model that incorporates neural field, 2D VLM, as well as reasoning operators for this task and illustrate its effectiveness. Finally, we perform an in-depth analysis to understand the challenges of this dataset and also point out potential future directions. We hope that 3DMV-VQA can be used to push the frontiers of 3D reasoning." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.895, + 0.9 + ], + "angle": 0, + "content": "Acknowledgements. This work was supported by the MIT-IBM Watson AI Lab, DARPA MCS, DSO grant DSOCO21072, and gift funding from MERL, Cisco, Sony, and Amazon. We would also like to thank the computation support from AiMOS, a server cluster for the IBM Research AI Hardware Center." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9209" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Panos Achlioptas, Ahmed Abdelreehm, Fei Xia, Mohamed Elhoseiny, and Leonidas J. Guibas. Referit3d: Neural listeners for fine-grained 3d object identification in real-world scenes. In ECCV, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.472, + 0.241 + ], + "angle": 0, + "content": "[2] Daich Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoki Kawanabe. Scanqa: 3d question answering for spatial scene understanding. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19107-19117, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.245, + 0.472, + 0.285 + ], + "angle": 0, + "content": "[3] Dave Zhenyu Chen, Angel X. Chang, and Matthias Nießner. Scanrefer: 3d object localization in rgb-d scans using natural language. In ECCV, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.288, + 0.472, + 0.343 + ], + "angle": 0, + "content": "[4] Dave Zhenyu Chen, Ali Gholami, Matthias Nießner, and Angel X. Chang. Scan2cap: Context-aware dense captioning in rgb-d scans. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3192-3202, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.345, + 0.472, + 0.385 + ], + "angle": 0, + "content": "[5] Z Chen, L Ma, W Luo, and KKY Wong. Weakly-supervised spatio-temporally grounding natural sentence in video. In ACL, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.388, + 0.472, + 0.443 + ], + "angle": 0, + "content": "[6] Zhenfang Chen, Jiayuan Mao, Jiajun Wu, Kwan-Yee Kenneth Wong, Joshua B Tenenbaum, and Chuang Gan. Grounding physical concepts of objects and events through dynamic visual reasoning. *ICLR*, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.446, + 0.472, + 0.487 + ], + "angle": 0, + "content": "[7] Zhenfang Chen, Peng Wang, Lin Ma, Kwan-Yee K Wong, and Qi Wu. Cops-ref: A new dataset and task on compositional referring expression comprehension. In CVPR, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.489, + 0.472, + 0.543 + ], + "angle": 0, + "content": "[8] Zhenfang Chen, Kexin Yi, Yunzhu Li, Mingyu Ding, Antonio Torralba, Joshua B Tenenbaum, and Chuang Gan. Comphy: Compositional physical reasoning of objects and events from videos. In ICLR, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.546, + 0.472, + 0.613 + ], + "angle": 0, + "content": "[9] Zhenfang Chen, Qinhong Zhou, Yikang Shen, Yining Hong, Hao Zhang, and Chuang Gan. See, think, confirm: Interactive prompting between vision and language models for knowledge-based visual reasoning. arXiv preprint arXiv:2301.05226, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.617, + 0.472, + 0.657 + ], + "angle": 0, + "content": "[10] Spconv Contributors. Spconv: Spatially sparse convolution library. https://github.com/traveller59/spconv, 2022.6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.472, + 0.728 + ], + "angle": 0, + "content": "[11] Abhishek Das, Samyak Datta, Georgia Gkioxari, Stefan Lee, Devi Parikh, and Dhruv Batra. Embodied question answering. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 2135-213509, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.472, + 0.786 + ], + "angle": 0, + "content": "[12] Mingyu Ding, Yan Xu, Zhenfang Chen, David Daniel Cox, Ping Luo, Joshua B Tenenbaum, and Chuang Gan. Embodied concept learner: Self-supervised learning of concepts and mapping through instruction following. In CoRL. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.789, + 0.472, + 0.843 + ], + "angle": 0, + "content": "[13] Yilun Du, M. Katherine Collins, B. Joshua Tenenbaum, and Vincent Sitzmann. Learning signal-agnostic manifolds of neural fields. In Advances in Neural Information Processing Systems, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[14] Yilun Du, Yinan Zhang, Hong-Xing Yu, Joshua B. Tenenbaum, and Jiajun Wu. Neural radiance flow for 4d view synthesis and video processing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[15] Martin Ester, Hans-Peter Kriegel, Jörg Sander, and Xiaowei Xu. A density-based algorithm for discovering clusters in large spatial databases with noise. In KDD, 1996. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.894, + 0.188 + ], + "angle": 0, + "content": "[16] Yasaman Etesam, Leon Kochiev, and Angel X Chang. 3dvqa: Visual question answering for 3d environments. In 2022 19th Conference on Robots and Vision (CRV), pages 233-240. IEEE, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.19, + 0.894, + 0.272 + ], + "angle": 0, + "content": "[17] Mingtao Feng, Zhen Li, Qi Li, Liang Zhang, Xiangdong Zhang, Guangming Zhu, Hui Zhang, Yaonan Wang, and Ajmal S. Mian. Free-form description guided 3d visual graph network for object grounding in point cloud. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 3702-3711, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.274, + 0.894, + 0.328 + ], + "angle": 0, + "content": "[18] Chuang Gan, Yandong Li, Haoxiang Li, Chen Sun, and Boqing Gong. Vqs: Linking segmentations to questions and answers for supervised attention in vqa and question-focused semantic segmentation. In ICCV, pages 1811-1820, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.33, + 0.894, + 0.385 + ], + "angle": 0, + "content": "[19] Siddha Ganju, Olga Russakovsky, and Abhinav Kumar Gupta. What's in a question: Using visual questions as a form of supervision. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6422-6431, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.385, + 0.892, + 0.426 + ], + "angle": 0, + "content": "[20] Xiaofeng Gao, Qiaozi Gao, Ran Gong, Kaixiang Lin, Govind Thattai, and Gaurav S Sukhatme. Dialfred: Dialogue-enabled agents for embodied instruction following. arXiv, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.428, + 0.894, + 0.495 + ], + "angle": 0, + "content": "[21] Stephan J. Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien P. C. Valentin. Fastnerf: High-fidelity neural rendering at 200fps. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 14326-14335, 2021. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.497, + 0.894, + 0.566 + ], + "angle": 0, + "content": "[22] Daniel Gordon, Aniruddha Kembhavi, Mohammad Rastegari, Joseph Redmon, Dieter Fox, and Ali Farhadi. Iqa: Visual question answering in interactive environments. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4089-4098, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.567, + 0.894, + 0.635 + ], + "angle": 0, + "content": "[23] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6325-6334, 2017. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.637, + 0.894, + 0.703 + ], + "angle": 0, + "content": "[24] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul E. Debevec. Baking neural radiance fields for real-time view synthesis. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5855-5864, 2021. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.706, + 0.892, + 0.733 + ], + "angle": 0, + "content": "[25] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural Computation, 9:1735-1780, 1997. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.735, + 0.894, + 0.774 + ], + "angle": 0, + "content": "[26] Yining Hong, Yilun Du, Chunru Lin, Joshua B Tenenbaum, and Chuang Gan. 3d concept grounding on neural fields. arXiv preprint arXiv:2207.06403, 2022. 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.776, + 0.894, + 0.816 + ], + "angle": 0, + "content": "[27] Yining Hong, Li Yi, Joshua B. Tenenbaum, Antonio Torralba, and Chuang Gan.Ptr: A benchmark for part-based conceptual, relational, and physical reasoning. In NeurIPS, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.818, + 0.892, + 0.859 + ], + "angle": 0, + "content": "[28] Deng Huang, Peihao Chen, Runhao Zeng, Qing Du, Mingkui Tan, and Chuang Gan. Location-aware graph convolutional networks for video question answering. In AAAI, 2020. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[29] Pin-Hao Huang, Han-Hung Lee, Hwann-Tzong Chen, and Tyng-Luh Liu. Text-guided graph neural networks for referring 3d instance segmentation. In AAAI, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "9210" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.12 + ], + "angle": 0, + "content": "[30] D. A. Hudson and Christopher D. Manning. Compositional attention networks for machine reasoning. *ICLR*, 2018. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.123, + 0.472, + 0.191 + ], + "angle": 0, + "content": "[31] Ajay Jain, Ben Mildenhall, Jonathan T. Barron, P. Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 857-866, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.194, + 0.472, + 0.248 + ], + "angle": 0, + "content": "[32] Zhenyu Jiang, Yifeng Zhu, Maxwell Svetlik, Kuan Fang, and Yuke Zhu. Synergies between affordance and geometry: 6-dof grasp detection via implicit representations. ArXiv, abs/2104.01542, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.252, + 0.472, + 0.322 + ], + "angle": 0, + "content": "[33] J. Johnson, Bharath Hariharan, L. V. D. Maaten, Li Fei-Fei, C. L. Zitnick, and Ross B. Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1988-1997, 2017. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.324, + 0.472, + 0.378 + ], + "angle": 0, + "content": "[34] Stefan Kollmannsberger, Davide D'Angella, Moritz Jokeit, and Leon Alexander Herrmann. Physics-informed neural networks. Deep Learning in Computational Mechanics, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.381, + 0.47, + 0.409 + ], + "angle": 0, + "content": "[35] Natalia Konstantinova and Constantin Orasan. Interactive question answering. In EMNLP. IGI Global, 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.412, + 0.47, + 0.453 + ], + "angle": 0, + "content": "[36] Barbara Landau and Ray Jackendoff. “what” and “where” in spatial language and spatial cognition. Behavioral and Brain Sciences, 16:217-238, 1993. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.455, + 0.47, + 0.496 + ], + "angle": 0, + "content": "[37] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and René Ranftl. Language-driven semantic segmentation. *ICLR*, 2022, 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.499, + 0.47, + 0.568 + ], + "angle": 0, + "content": "[38] Dongxu Li, Junnan Li, Hongdong Li, Juan Carlos Niebles, and Steven C. H. Hoi. Align and prompt: Video-and-language pre-training with entity prompts. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4943-4953, 2022. 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.571, + 0.47, + 0.612 + ], + "angle": 0, + "content": "[39] Andrew Luo, Yilun Du, Michael J Tarr, Joshua B Tenenbaum, Antonio Torralba, and Chuang Gan. Learning neural acoustic fields. arXiv preprint arXiv:2204.00628, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.615, + 0.472, + 0.669 + ], + "angle": 0, + "content": "[40] Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B. Tenenbaum, and Jiajun Wu. The neuro-symbolic concept learner: Interpreting scenes words and sentences from natural supervision. ArXiv, abs/1904.12584, 2019. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.673, + 0.47, + 0.7 + ], + "angle": 0, + "content": "[41] Nelson L. Max. Optical models for direct volume rendering. IEEE Trans. Vis. Comput. Graph., 1:99-108, 1995. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.703, + 0.47, + 0.771 + ], + "angle": 0, + "content": "[42] Lars M. Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4455-4465, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.774, + 0.47, + 0.829 + ], + "angle": 0, + "content": "[43] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. ECCV, 2020. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.47, + 0.899 + ], + "angle": 0, + "content": "[44] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4d reconstruction by learning particle dynamics. In Proceedings of the IEEE International Conference on Computer Vision, pages 5379-5389, 2019. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.147 + ], + "angle": 0, + "content": "[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. CVPR, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.15, + 0.894, + 0.219 + ], + "angle": 0, + "content": "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.894, + 0.315 + ], + "angle": 0, + "content": "[47] Santhosh K. Ramakrishnan, Aaron Gokaslan, Erik Wijmans, Oleksandr Maksymets, Alexander Clegg, John Turner, Eric Undersander, Wojciech Galuba, Andrew Westbury, Angel X. Chang, Manolis Savva, Yili Zhao, and Dhruv Batra. Habitatmatterport 3d dataset (hm3d): 1000 large-scale 3d environments for embodied ai. ArXiv, abs/2109.08238, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.319, + 0.894, + 0.373 + ], + "angle": 0, + "content": "[48] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. ArXiv, abs/2102.12092, 2021.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.376, + 0.894, + 0.431 + ], + "angle": 0, + "content": "[49] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proc. ICCV, pages 2304-2314, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.433, + 0.894, + 0.515 + ], + "angle": 0, + "content": "[50] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, Devi Parikh, and Dhruv Batra. Habitat: A Platform for Embodied AI Research. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.518, + 0.894, + 0.572 + ], + "angle": 0, + "content": "[51] Mohit Shridhar, Jesse Thomason, Daniel Gordon, Yonatan Bisk, Winson Han, Roozbeh Mottaghi, Luke Zettlemoyer, and Dieter Fox. Alfred: A benchmark for interpreting grounded instructions for everyday tasks. In CVPR, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.575, + 0.894, + 0.643 + ], + "angle": 0, + "content": "[52] Anthony Simeonov, Yilun Du, Andrea Tagliasacchi, Joshua B Tenenbaum, Alberto Rodriguez, Pulkit Agrawal, and Vincent Sitzmann. Neural descriptor fields: Se (3)-equivariant object representations for manipulation. arXiv preprint arXiv:2112.05124, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.646, + 0.894, + 0.699 + ], + "angle": 0, + "content": "[53] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In Proc. NeurIPS 2019, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.703, + 0.894, + 0.757 + ], + "angle": 0, + "content": "[54] Chan Hee Song, Jihyung Kil, Tai-Yu Pan, Brian M Sadler, Wei-Lun Chao, and Yu Su. One step at a time: Long-horizon vision-and-language navigation with milestones. arXiv preprint arXiv:2202.07028, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.76, + 0.894, + 0.813 + ], + "angle": 0, + "content": "[55] Elizabeth S Spelke, Karen Breinlinger, Kristen Jacobson, and Ann Phillips. Gestalt Relations and Object Perception: A Developmental Study. Perception, 22(12):1483-1501, 1993. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.817, + 0.894, + 0.871 + ], + "angle": 0, + "content": "[56] Alessandro Suglia, Qiaozi Gao, Jesse Thomason, Govind Thattai, and Gaurav Sukhatme. Embodied bert: A transformer model for embodied, language-guided visual task completion. arXiv preprint arXiv:2108.04927, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.874, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[57] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.956 + ], + "angle": 0, + "content": "9211" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.472, + 0.133 + ], + "angle": 0, + "content": "reconstruction. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5449-5459, 2022. 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.472, + 0.246 + ], + "angle": 0, + "content": "[58] Andrew Szot, Alex Clegg, Eric Undersander, Erik Wijmans, Yili Zhao, John Turner, Noah Maestre, Mustafa Mukadam, Devendra Chaplot, Oleksandr Maksymets, Aaron Gokaslan, Vladimir Vondrus, Sameer Dharur, Franziska Meier, Wojciech Galuba, Angel Chang, Zsolt Kira, Vladlen Koltun, Jitendra Malik, Manolis Savva, and Dhruv Batra. Habitat 2.0: Training home assistants to rearrange their habitat. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.248, + 0.472, + 0.288 + ], + "angle": 0, + "content": "[59] Ivan Vendrov, Ryan Kiros, Sanja Fidler, and Raquel Urtasun. Order-embeddings of images and language. CoRR, abs/1511.06361, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.291, + 0.472, + 0.331 + ], + "angle": 0, + "content": "[60] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. ArXiv, abs/2112.05139, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.333, + 0.472, + 0.401 + ], + "angle": 0, + "content": "[61] Karmesh Yadav, Ram Ramrakhya, Santhosh Kumar Ramakrishnan, Theo Gervet, John Turner, Aaron Gokaslan, Noah Maestre, Angel Xuan Chang, Dhruv Batra, Manolis Savva, et al. Habitat-matterport 3d semantics dataset. arXiv preprint arXiv:2210.05633, 2022. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.404, + 0.472, + 0.471 + ], + "angle": 0, + "content": "[62] Xu Yan, Zhihao Yuan, Yuhao Du, Yinghong Liao, Yao Guo, Zhen Li, and Shuguang Cui. Clevr3d: Compositional language and elementary visual reasoning for question answering in 3d real-world scenes. arXiv preprint arXiv:2112.11691, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.474, + 0.472, + 0.528 + ], + "angle": 0, + "content": "[63] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. Proc. NeurIPS, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.531, + 0.472, + 0.558 + ], + "angle": 0, + "content": "[64] Shuquan Ye, Dongdong Chen, Songfang Han, and Jing Liao. 3d question answering. ArXiv, abs/2112.08359, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.56, + 0.472, + 0.614 + ], + "angle": 0, + "content": "[65] Kexin Yi, Jiajun Wu, Chuang Gan, Antonio Torralba, Pushmeet Kohli, and Joshua B. Tenenbaum. Neural-symbolic vqa: Disentangling reasoning from vision and language understanding. In NeurIPS, 2018. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.616, + 0.472, + 0.683 + ], + "angle": 0, + "content": "[66] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoptrees for real-time rendering of neural radiance fields. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5732-5741, 2021. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.687, + 0.472, + 0.728 + ], + "angle": 0, + "content": "[67] Licheng Yu, Xinlei Chen, Georgia Gkioxari, Mohit Bansal, Tamara L Berg, and Dhruv Batra. Multi-target embodied question answering. In CVPR, pages 6309-6318, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.73, + 0.472, + 0.797 + ], + "angle": 0, + "content": "[68] Kaizhi Zheng, Xiaotong Chen, Odest Chadwicke Jenkins, and Xin Eric Wang. Vlmbench: A compositional benchmark for vision-and-language manipulation. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.8, + 0.472, + 0.855 + ], + "angle": 0, + "content": "[69] Yuke Zhu, O. Groth, Michael S. Bernstein, and Li Fei-Fei. Visual7w: Grounded question answering in images. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4995-5004, 2016. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.855 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "9212" + } + ] +] \ No newline at end of file diff --git a/2023/3D Concept Learning and Reasoning From Multi-View Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_origin.pdf b/2023/3D Concept Learning and Reasoning From Multi-View Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..02e33d886e0af0a9146da54fa51483a3b8f1b5c2 --- /dev/null +++ b/2023/3D Concept Learning and Reasoning From Multi-View Images/6720ecfb-203e-4307-9b9b-8d1051d4343b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:718cb48c1916546760f33f826a20f20aedfde740e6b72467a39b5867fc1c8adc +size 4375068 diff --git a/2023/3D Concept Learning and Reasoning From Multi-View Images/full.md b/2023/3D Concept Learning and Reasoning From Multi-View Images/full.md new file mode 100644 index 0000000000000000000000000000000000000000..080ec425d37570082e7376fc5ada583579d01d98 --- /dev/null +++ b/2023/3D Concept Learning and Reasoning From Multi-View Images/full.md @@ -0,0 +1,280 @@ +# 3D Concept Learning and Reasoning from Multi-View Images + +Yining Hong $^{1}$ , Chunru Lin $^{2}$ , Yilun Du $^{3}$ , Zhenfang Chen $^{5}$ , Joshua B. Tenenbaum $^{3}$ , Chuang Gan $^{4,5}$ , $^{1}$ UCLA, $^{2}$ Shanghai Jiaotong University, $^{3}$ MIT CSAIL, $^{4}$ UMass Amherst, $^{5}$ MIT-IBM Watson AI Lab https://vis-www.cs.umass.edu/3d-clr/ + +![](images/0139c5dde162a2aa92dcfb3c8dcbb7922e85a282412b0fa7132181e9fb7d8996.jpg) +Concept: Q: Are there any televisions? A: Yes +Counting: +Q: How many chairs are close to the table in the room with plant on the cabinet? A: 6 +Q: How many rooms have sofas? A: 1 +Figure 1. An exemplar scene with multi-view images and question-answer pairs of our 3DMV-VQA dataset. 3DMV-VQA contains four question types: concept, counting, relation, comparison. Orange words denote semantic concepts; blue words denote the relations. + +![](images/9c2499a72b5232613521f83970886d73892accd35822abbe76380eef8f2aa6d4.jpg) + +![](images/bcdc54a8ca47ecfc2e80d202e50dbd8f6ca69bcd75c2d68e299c3a97942513b0.jpg) + +![](images/adb9c46914582c93ffb458cac411cec963fc04c004c0d353d24ebdb993d37a5a.jpg) + +![](images/8b29d6f5ef01af8dd76de5ff59d2297cdec550b6c7ae842222df4b69bb5b62d8.jpg) +Relation: Q: Facing the computer from the curtain, is there a lamp on the right? A: Yes +Q: What's on the cabinet in the smaller room? A: Plant + +![](images/49645c25bde17b0db6d9b773fde5388ec829dbf8fc93453c2f9d34d1a84bd58e.jpg) + +![](images/533b12da53016d7d003d444faa1199643658affa31d497d3c6002146c1f27325.jpg) +Comparison: +Q: Are there fewer pictures in the larger room than the other room? A: No +Q: Is the computer closer to a printer or a lamp? +A: Printer + +![](images/874bbee417fccf7c5a5cbc21ef58df9624603c16927e8facc7e608b9c78004c5.jpg) + +![](images/8c5023c7c3bb44a5cbe06b847a2eb0751a7ff0bfc87e1dd62dc7e15795a12609.jpg) + +# Abstract + +Humans are able to accurately reason in 3D by gathering multi-view observations of the surrounding world. Inspired by this insight, we introduce a new large-scale benchmark for 3D multi-view visual question answering (3DMV-VQA). This dataset is collected by an embodied agent actively moving and capturing RGB images in an environment using the Habitat simulator. In total, it consists of approximately 5k scenes, 600k images, paired with 50k questions. We evaluate various state-of-the-art models for visual reasoning on our benchmark and find that they all perform poorly. We suggest + +that a principled approach for 3D reasoning from multi-view images should be to infer a compact 3D representation of the world from the multi-view images, which is further grounded on open-vocabulary semantic concepts, and then to execute reasoning on these 3D representations. As the first step towards this approach, we propose a novel 3D concept learning and reasoning (3D-CLR) framework that seamlessly combines these components via neural fields, 2D pre-trained vision-language models, and neural reasoning operators. Experimental results suggest that our framework outperforms baseline models by a large margin, but the challenge remains largely unsolved. We further perform an in-depth analysis of the challenges and highlight potential future directions. + +# 1. Introduction + +Visual reasoning, the ability to composite rules on internal representations to reason and answer questions about visual scenes, has been a long-standing challenge in the field of artificial intelligence and computer vision. Several datasets [23, 33, 69] have been proposed to tackle this challenge. However, they mainly focus on visual reasoning on 2D single-view images. Since 2D single-view images only cover a limited region of the whole space, such reasoning inevitably has several weaknesses, including occlusion, and failing to answer 3D-related questions about the entire scene that we are interested in. As shown in Fig. 1, it's difficult, even for humans, to count the number of chairs in a scene due to the object occlusion, and it's even harder to infer 3D relations like "closer" from a single-view 2D image. + +On the other hand, there's strong psychological evidence that human beings conduct visual reasoning in the underlying 3D representations [55]. Recently, there have been several works focusing on 3D visual question answering [2,16,62,64]. They mainly use traditional 3D representations (e.g., point clouds) for visual reasoning. This is inconsistent with the way human beings perform 3D reasoning in real life. Instead of being given an entire 3D representation of the scene at once, humans will actively walk around and explore the whole environment, ingesting image observations from different views and converting them into a holistic 3D representation that assists them in understanding and reasoning about the environment. Such abilities are crucial for many embodied AI applications, such as building assistive robots. + +To this end, we propose the novel task of 3D visual reasoning from multi-view images taken by active exploration of an embodied agent. Specifically, we generate a large-scale benchmark, 3DMV-VQA (3D multi-view visual question answering), that contains approximately 5k scenes and 50k question-answering pairs about these scenes. For each scene, we provide a collection of multi-view image observations. We generate this dataset by placing an embodied agent in the Habitat-Matterport environment [47], which actively explores the environment and takes pictures from different views. We also obtain scene graph annotations from the Habitat-Matterport 3D semantics dataset (HM3DSem) [61], including ground-truth locations, segmentations, semantic information of the objects, as well as relationships among the objects in the environments, for model diagnosis. To evaluate the models' 3D reasoning abilities on the entire environment, we design several 3D-related question types, including concept, counting, relation and comparison. + +Given this new task, the key challenges we would like to investigate include: 1) how to efficiently obtain the compact visual representation to encode crucial properties (e.g., semantics and relations) by integrating all incomplete observations of the environment in the process of active exploration for 3D visual reasoning? 2) How to ground the semantic con + +cepts on these 3D representations that could be leveraged for downstream tasks, such as visual reasoning? 3) How to infer the relations among the objects, and perform step-by-step reasoning? + +As the first step to tackling these challenges, we propose a novel model, 3D-CLR (3D Concept Learning and Reasoning). First, to efficiently obtain a compact 3D representation from multi-view images, we use a neural-field model based on compact voxel grids [57] which is both fast to train and effective at storing scene properties in its voxel grids. As for concept learning, we observe that previous works on 3D scene understanding [1,3] lack the diversity and scale with regard to semantic concepts due to the limited amount of paired 3D-and-language data. Although large-scale vision-language models (VLMs) have achieved impressive performances for zero-shot semantic grounding on 2D images, leveraging these pretrained models for effective open-vocabulary 3D grounding of semantic concepts remains a challenge. To address these challenges, we propose to encode the features of a pre-trained 2D vision-language model (VLM) into the compact 3D representation defined across voxel locations. Specifically, we use the CLIP-LSeg [37] model to obtain features on multi-view images, and propose an alignment loss to map the features in our 3D voxel grid to 2D pixels. By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we can ground the semantic concepts in the 3D compact representation. Finally, to answer the questions, we introduce a set of neural reasoning operators, including FILTER, COUNT, RELATION operators and so on, which take the 3D representations of different objects as input and output the predictions. + +We conduct experiments on our proposed 3DMV-VQA benchmark. Experimental results show that our proposed 3D-CLR outperforms all baseline models a lot. However, failure cases and model diagnosis show that challenges still exist concerning the grounding of small objects and the separation of close object instances. We provide an in-depth analysis of the challenges and discuss potential future directions. + +To sum up, we have the following contributions in this paper. + +- We propose the novel task of 3D concept learning and reasoning from multi-view images. +- By having robots actively explore the embodied environments, we collect a large-scale benchmark on 3D multiview visual question answering (3DMV-VQA). +- We devise a model that incorporates a neural radiance field, 2D pretrained vision and language model, and neural reasoning operators to ground the concepts and perform 3D reasoning on the multi-view images. We illustrate that our model outperforms all baseline models. +- We perform an in-depth analysis of the challenges of this new task and highlight potential future directions. + +# 2. Related Work + +Visual Reasoning There have been numerous tasks focusing on learning visual concepts from natural language, including visually-grounded question answering [18, 19], text-image retrieval [59] and so on. Visual reasoning has drawn much attention recently as it requires human-like understanding of the visual scene. A wide variety of benchmarks have been created over the recent years [7, 8, 23, 27, 33, 69]. However, they mainly focus on visual reasoning from 2D single-view images, while there's strong psychological evidence that human beings perform visual reasoning on the underlying 3D representations. In this paper, we propose the novel task of visual reasoning from multi-view images, and collect a large-scale benchmark for this task. In recent years, numerous visual reasoning models have also been proposed, ranging from attention-based methods [5, 30], graph-based methods [28], to models based on large pretrained vision-language model [9, 38]. These methods model the reasoning process implicitly with neural networks. Neural-symbolic methods [6, 40, 65] explicitly perform symbolic reasoning on the objects representations and language representations. They use perception models to extract 2D masks as a first step, and then execute operators and ground concepts on these pre-segmented masks, but are limited to a set of predefined concepts on simple scenes. [26] proposes to use the feature vectors from occupancy networks [42] to do visual reasoning in the 3D space. However, they also use a synthetic dataset, and learn a limited set of semantic concepts from scratch. We propose to learn 3D neural field features from 2D multi-view real-world images, and incorporate a 2D VLM for open-vocabulary reasoning. + +3D Reasoning Understanding and reasoning about 3D scenes has been a long-standing challenge. Recent works focus on leveraging language to explore 3D scenes, such as object captioning [3,4] and object localization from language [1, 17, 29]. Our work is mostly related to 3D Visual Question Answering [2, 16, 62, 64] as we both focus on answering questions and reasoning about 3D scenes. However, these works use point clouds as 3D representations, which diverts from the way human beings perform 3D reasoning. Instead of being given an entire 3D representation all at once, human beings would actively move and explore the environment, integrating multi-view information to get a compact 3D representation. Therefore, we propose 3D reasoning from multi-view images. In addition, since 3D assets paired with natural language descriptions are hard to get in real-life scenarios, previous works struggle to ground open-vocabulary concepts. In our work, we leverage 2D VLMs for zero-shot open-vocabulary concept grounding in the 3D space. + +Embodied Reasoning Our work is also closely related to Embodied Question Answering (EQA) [11, 67] and Interactive Question Answering (IQA) [22, 35], which also involve an embodied agent exploring the environment and answering + +the question. However, the reasoning mainly focuses on the outcome or the history of the navigation on 2D images and does not require a holistic 3D understanding of the environment. There are also works [12, 20, 51, 54, 56, 68] targeting instruction following in embodied environments, in which an agent is asked to perform a series of tasks based on language instructions. Different from their settings, for our benchmark an embodied agent actively explores the environment and takes multi-view images for 3D-related reasoning. + +Neural Fields Our approach utilizes neural fields to parameterize an underlying 3D compact representations of scenes for reasoning. Neural field models (e.g., [43]) have gained much popularity since they can reconstruct a volumetric 3D scene representation from a set of images. Recent works [21, 24, 57, 66] have pushed it further by using classic voxel-grids to explicitly store the scene properties (e.g., density, color and feature) for rendering, which allows for real-time rendering and is utilized by this paper. Neural fields have also been used to represent dynamic scenes [14, 44], appearance [43, 45, 49, 53, 63], physics [34], robotics [32, 52], acoustics [39] and more general multi-modal signals [13]. There are also some works that integrate semantics or language in neural fields [31, 60]. However, they mainly focus on using language for manipulation, editing or generation. [26] leverages neural descriptor field [52] for 3D concept grounding. However, they require ground-truth occupancy values to train the neural field, which can not be applied to real-world scenes. In this paper, we propose to leverage voxel-based neural radiance field [57] to get the compact representations for 3D visual reasoning. + +# 3. Dataset Generation + +# 3.1. Multi-View Images + +Our dataset includes 5k 3D scenes from the Habitat-Matterport 3D Dataset (HM3D) dataset [47], and approximately 600k images rendered from the 3D scenes. The images are rendered via Habitat [50, 58]. + +Scene Generation We build our benchmark on top of the HM3DSem dataset [61], which is a large-scale dataset of 3D real-world indoor scenes with densely annotated semantics. It consists of 142,646 object instance annotations across 216 3D spaces and 3,100 rooms within those spaces. HM3D dataset uses texture information to annotate pixel-accurate object boundaries, which provides large-scale object annotations and ensures the scale, quality, and diversity of 3D visual reasoning questions of our benchmark. + +To construct a benchmark that covers questions of different difficulty levels, it's crucial that we include 3D scenes of different scales in our benchmark. We start with single rooms in HM3D scenes, which has an appropriate amount of semantic concepts and relationships to base some simple questions on. To get the scale of single rooms, we calculate bounding + +boxes of rooms according to floor instance segmentations. We then proceed to generate bounding boxes for scenes with multiple adjacent rooms. For more complex holistic scene understanding, we also include whole-house scenes, which may contain tens of rooms. Overall, the 3DMV-VQA benchmark contains three levels of scenes (2000 single-room scenes, 2000 multi-room scenes and 100 whole-house scenes). + +Image Rendering After we get the bounding box of each scene, we load the scene into the Habitat simulator. We also put a robot agent with an RGB sensor at a random initial point in the bounding box. The data is collected via exploration of the robot agent. Specifically, at each step of the data collection process, we sample a navigable point and make the agent move to the point along the shortest path. When the agent has arrived at a point, we rotate the agent $30^{\circ}$ along z-axis for 12 times so that the agent can observe the $360^{\circ}$ view of the scene at the position. It can also look up and down, with a random mild angle from $[-10^{\circ}, 10^{\circ}]$ along the x-axis. A picture is taken each time the agent rotates to a new orientation. In total 12 pictures are taken from each point. While traveling between points, the robot agent further takes pictures. We also exploit a policy such that when the camera is too far from or too close to an object and thus the agent cannot see anything, we discard the bad-view images. + +# 3.2. Questions and Answers + +We pair each scene with machine-generated questions from pre-defined templates. All questions are open-ended and can be answered with a single word (samples in Fig. 1). Concepts and Relationships To generate questions and answers, we utilize the semantic annotations of HM3DSem [61] to get the semantic concepts and their bounding boxes, as well as the bounding boxes of the rooms. We merge semantic concepts with similar meanings (e.g., L-shaped sofa to sofa, desk chair / computer chair e.g. to chair). We also define 11 relationships: inside, above, below, on the top of, close, far, large, small, between, on the left, and on the right. Before generating questions, we first generate a scene graph for each scene containing all concepts and relationships. + +Question Types We define four types of questions: concept, counting, relation and comparison. + +- Concept. Conceptual questions query if there's an object of a certain semantic concept in the scene, or whether there's a room containing the objects of the semantic concept. +- Counting. Counting-related questions ask about how many instances of a semantic concept are in the scene, or how many rooms contain objects of the semantic concept. +- Relation. Relational questions ask about the 11 relationships and their compositions. Based on the number of relations in a question, we have one-hop to three-hop questions for the relation type. +- Comparison. The comparison question type focuses on the comparison of two objects, two semantic concepts or two + +rooms. It can be combined with the relational concepts to compare two objects (e.g., larger, closer to, more left etc). It also compares the number of instances of two semantic concepts, or the number of objects of certain concepts in different rooms. + +Bias Control. Similar to previous visual reasoning benchmarks [26, 33], we use machine-generated questions since the generation process is fully controllable so that we can avoid dataset bias. Questions are generated from pre-defined templates, and transformed into natural language questions with associated semantic concepts and relationships from the scene. We manually define 41 templates for question generation. We use depth-first search to generate questions. We perform bias control based on three perspectives: template counts, answer counts, and concept counts. For selecting templates, we sort the templates each time we generate a question to ensure a balanced question distribution. We force a flat answer distribution for each template by rejection sampling. Specifically, once we generate a question and an answer, if the number of the questions having the same answer and template is significantly larger than other answers, we discard it and continue searching. Once we find an answer that fits in the ideal answer distribution, we stop the depth-first searching for this question. We also force a flat concept distribution for each template using the same method. In addition to controlling the number of concepts mentioned in the templates, we also control the number of relation tuples consisting of the same concept sets. + +# 4. Method + +Fig. 2 illustrates an overview of our framework. Specifically, our framework consists of three steps. First, we learn a 3D compact representation from multi-view images using neural field. And then we propose to leverage pre-trained 2D vision-and-language model to ground concepts on 3D space. This is achieved by 1) generating 2D pixel features using CLIP-LSeg; 2) aligning the features of 3D voxel grid and 2D pixel features from CLIP-LSeg [37]; 3) dot-product attention between the 3D features and CLIP language features [37]. Finally, to perform visual reasoning, we propose neural reasoning operators, which execute the question step by step on the 3D compact representation and outputs a final answer. For example, we use FILTER operators to ground semantic concepts on the 3D representation, GETINSTANCE to get all instances of a semantic class, and COUNT_RELATION to count how many pairs of the two semantic classes have the queried relation. + +# 4.1. Learning 3D Compact Scene Representations + +Neural radiance fields [43] are capable of learning a 3D representation that can reconstruct a volumetric 3D scene representation from a set of images. Voxel-based meth + +![](images/19407b31f659eff8444b6c2a799e47318398d9458986c4f843c53129e65b011a.jpg) +Figure 2. An overview of our 3D-CLR framework. First, we learn a 3D compact scene representation from multi-view images using neural fields (I). Second, we use CLIP-LSeg model to get per-pixel 2D features (II). We utilize a 3D-2D alignment loss to assign features to the 3D compact representation (III). By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we could get the concept grounding in 3D (IV). Finally, the reasoning process is performed via a set of neural reasoning operators, such as FILTER, GET instances and COUNT_RELATION (V). Relation operators are learned via relation networks. + +ods [21, 24, 57, 66] speed up the learning process by explicitly storing the scene properties (e.g., density, color and feature) in its voxel grids. We leverage Direct Voxel Grid Optimization (DVGO) [57] as our backbone for 3D compact representation for its fast speed. DVGO stores the learned density and color properties in its grid cells. The rendering of multi-view images is by interpolating through the voxel grids to get the density and color for each sampled point along each sampled ray, and integrating the colors based on the rendering alpha weights calculated from densities according to quadrature rule [41]. The model is trained by minimizing the L2 loss between the rendered multi-view images and the ground-truth multi-view images. By extracting the density voxel grid, we can get the 3D compact representation (e.g., By visualizing points with density greater than 0.5, we can get the 3D representation as shown in Fig. 2 I.) + +# 4.2. 3D Semantic Concept Grounding + +Once we extract the 3D compact representation of the scene, we need to ground the semantic concepts for reasoning from language. Recent work from [26] has proposed to ground concepts from paired 3D assets and question-answers. Though promising results have been achieved on synthetic data, it is not feasible for open-vocabulary 3D reasoning in real-world data, since it is hard to collect largescale 3D vision-and-language paired data. To address this challenge, our idea is to leverage pre-trained 2D vision and language model [46, 48] for 3D concept grounding in real- + +world scenes. But how can we map 2D concepts into 3D neural field representations? Note that 3D compact representations can be learned from 2D multi-view images and that each 2D pixel actually corresponds to several 3D points along the ray. Therefore, it's possible to get 3D features from 2D per-pixel features. Inspired by this, we first add a feature voxel grid representation to DVGO, in addition to density and color, to represent 3D features. We then apply CLIP-LSeg [37] to learn per-pixel 2D features, which can be attended to by CLIP concept embeddings. We use an alignment loss to align 3D features with 2D features so that we can perform concept grounding on the 3D representations. + +2D Feature Extraction. To get per-pixel features that can be attended by concept embeddings, we use the features from language-driven semantic segmentation (CLIP-LSeg) [37], which learns 2D per-pixel features from a pre-trained vision-language model (i.e., [46]). Specifically, it uses the text encoder from CLIP, trains an image encoder to produce an embedding vector for each pixel, and calculates the scores of word-pixel correlation by dot-product. By outputting the semantic class with the maximum score of each pixel, CLIP-LSeg is able to perform zero-shot 2D semantic segmentation. + +3D-2D Alignment. In addition to density and color, we also store a 512-dim feature in each grid cell in the compact representation. To align the 3D per-point features with 2D per-pixel features, we calculate an L1 loss between each pixel and each 3D point sampled on the ray of the pixel. The overall L1 loss along a ray is the weighted sum of all + +the pixel-point alignment losses, with weights same as the rendering weights: $\mathcal{L}_{\mathrm{feature}} = \sum_{i=1}^{K} w_i (\| \pmb{f}_i - F(\pmb{r}) \|)$ , where $\pmb{r}$ is a ray corresponding to a 2D pixel, $F(\pmb{r})$ is the 2D feature from CLIP-LSeg, $K$ is the total number of sampled points along the ray and $\pmb{f}_i$ is the feature of point $i$ by interpolating through the feature voxel grid, $w_i$ is the rendering weight. + +Concept Grounding through Attention. Since our feature voxel grid representation is learnt from CLIP-LSeg, by calculating the dot-product attention $< f, v >$ between perpoint 3D feature $f$ and the CLIP concept embeddings $v$ , we can get zero-shot view-independent concept grounding and semantic segmentations in the 3D representation, as is presented in Fig. 2 IV. + +# 4.3. Neural Reasoning Operators + +Finally, we use the grounded semantic concepts for 3D reasoning from language. We first transform questions into a sequence of operators that can be executed on the 3D representation for reasoning. We adopt a LSTM-based semantic parser [65] for that. As [26, 40], we further devise a set of operators which can be executed on the 3D representation. Please refer to Appendix for a full list of operators. + +Filter Operators. We filter all the grid cells with a certain semantic concept. + +Get Instance Operators. We implement this by utilizing DBSCAN [15], an unsupervised algorithm which assigns clusters to a set of points. Specifically, given a set of points in the 3D space, it can group together the points that are closely packed together for instance segmentation. + +Relation Operators. We cannot directly execute the relation on the 3D representation as we have not grounded relations. Thus, we represent each relation using a distinct neural module (which is practical as the vocabulary of relations is limited [36]). We first concatenate the voxel grid representations of all the referred objects and feed them into the relation network. The relation network consists of three 3D convolutional layers and then three 3D deconvolutional layers. A score is output by the relation network indicating whether the objects have the relationship or not. Since vanilla 3D CNNs are very slow, we use Sparse Convolution [10] instead. Based on the relations asked in the questions, different relation modules are chosen. + +# 5. Experiments + +# 5.1. Experimental Setup + +Evaluation Metric. We report the visual question answering accuracy on the proposed 3DMV-VQA dataset w.r.t the four types of questions. The train/val/test split is 7:1:2. + +Implementation Details For 3D compact representations, we adopt the same architectures as DVGO, except skipping the coarse reconstruction phase and directly training the fine reconstruction phase. After that, we freeze the density voxel + +grid and color voxel grid, for the optimization of the feature voxel grid only. The feature grid has a world size of 100 and feature dim of 512. We train the compact representations for 100,000 iterations and the 3D features for another 20,000 iterations. For LSeg, we use the official demo model, which has the ViT-L/16 image encoder and CLIP's ViT-B/32 text encoder. We follow the official script for inference and use multi-scale inference. For DBSCAN, we use an epsilon value of 1.5, minimum samples of 2, and we use L1 as the clustering method. For the relation networks, each relation is encoded into a three-layer sparse 3D convolution network with hidden size 64. The output is then fed into a one-layer linear network to produce a score, which is normalized by sigmoid function. We use cross-entropy loss to train the relation networks, and we use the one-hop relational questions with "yes/no" answers to train the relation networks. + +# 5.2. Baselines + +Our baselines range from vanilla neural networks, attention-based methods, fine-tuned from large-scale VLM, and graph-based methods, to neural-symbolic methods. + +- LSTM. The question is transferred to word embeddings which are input into a word-level LSTM [25]. The last LSTM hidden state is fed into a multi-layer perceptron (MLP) that outputs a distribution over answers. This method is able to model question-conditional bias since it uses no image information. +- CNN+LSTM. The question is encoded by the final hidden states from LSTM. We use a resnet-50 to extract frame-level features of images and average them over the time dimension. The features are fed to an MLP to predict the final answer. This is a simple baseline that examines how vanilla neural networks perform on 3DMV-VQA. +- 3D-Feature+LSTM. We use the 3D features we get from 3D-2D alignment and downsample the voxel grids using 3D-CNN as input, concatenated with language features from LSTM and fed to an MLP. +- MAC [30]. MAC utilizes a Memory, Attention and Composition cell to perform iterative reasoning process. Like CNN+LSTM, we use the average pooling over multi-view images as the feature map. +- MAC(V). We treat the multi-view images along a trajectory as a video. We modify the MAC model by applying a temporal attention unit across the video frames to generate a latent encoding for the video. +- NS-VQA [65]. This is a 2D version of our 3D-CLR model. We use CLIP-LSeg to ground 2D semantic concepts from multi-view images, and the relation network also takes the 2D features as input. We execute the operators on each image and max pool from the answers to get our final predictions. + +
MethodsConceptCountingRelationComparisonOverall
Q-type (rand.)49.410.721.649.226.4
LSTM53.415.324.055.229.8
CNN+LSTM57.822.135.259.737.8
MAC62.419.747.862.346.7
MAC(V)60.024.651.665.950.0
NS-VQA59.821.533.461.638.0
ALPRO65.812.742.268.243.3
LGCN56.219.535.566.739.1
3D-Feature+LSTM61.222.449.961.348.2
3D-CLR (Ours)66.141.357.672.357.7
+ +Table 1. Question-answering accuracy of 3D visual reasoning baselines on different question types. + +- ALPRO [38]. ALPRO is a video-and-language pre-training framework. A transformer model is pretrained on large webly-source video-text pairs and can be used for downstream tasks like Video Question answering. +- LGCN [28]. LGCN represents the contents in the video as a location-aware graph by incorporating the location information of an object into the graph construction. + +# 5.3. Experimental Results + +Result Analysis. We summarize the performances for each question type of baseline models in Table 1. All models are trained on the training set until convergence, tuned on the validation set, and evaluated on the test set. We provide detailed analysis below. + +First, for the examination of language-bias of the dataset, we find that the performance of LSTM is only slightly higher than random and frequency, and all other baselines outperform LSTM a lot. This suggests that there's little language bias in our dataset. Second, we observe that encoding temporal information in MAC (i.e., MAC(V)) is better than average-pooling of the features, especially in counting and relation. This suggests that average-pooling of the features may cause the model to lose information from multi-view images, while attention on multi-view images helps boost the 3D reasoning performances. Third, we also find that fine-tuning on large-scale pretrained model (i.e., ALPRO) has relatively high accuracies in concept-related questions, but for counting it's only slightly higher than the random baseline, suggesting that pretraining on large-scale video-language dataset may improve the model's perception ability, but does not provide the model with the ability to tackle with more difficult reasoning types such as counting. Next, we find that LGCN has poor performances on the relational questions, indicating that building a location-aware graph over 2D objects still doesn't equip the model with 3D location reasoning abilities. Last but not least, we find that 3D-based baselines are better than their 2D counterparts. 3D-Feature+LSTM performs well on the 3D-related questions, such as counting and relation, than most of the image-based + +basielines. Compared with 3D-CLR, NS-VQA can perform well in the conceptual questions. However, it underperforms 3D-CLR a lot in counting and relation, suggesting that these two types of questions require the holistic 3D understanding of the entire 3D scenes. Our 3D-CLR outperforms other baselines by a large margin, but is still far from satisfying. From the accuracy of the conceptual question, we can see that it can only ground approximately $66\%$ of the semantic concepts. This indicates that our 3DMV-VQA dataset is indeed very challenging. + +Qualitative Examples. In Fig. 3, we show four qualitative examples. From the examples, we show that our 3D-CLR can infer an accurate 3D representation from multi-view images, as well as ground semantic concepts on the 3D representations to get the semantic segmentations of the entire scene. Our 3D-CLR can also learn 3D relationships such as "close", "largest", "on top of" and so on. However, 3D-CLR also fails on some questions. For the third scene in the qualitative examples, it fails to ground the concepts "mouse" and "printer". Also, it cannot accurately count the instances sometimes. We give detailed discussions below. + +# 5.4. Discussions + +We perform an in-depth analysis to understand the challenge of this dataset. We leverage the modular design of our 3D-CLR, replacing individual components of the framework with ground-truth annotations for model diagnosis. The result is shown in Fig 4. 3D-CLR w/ Semantic denotes our model with ground-truth semantic concepts from HM3DSem annotations. 3D-CLR w/ Instance denotes that we have ground-truth instance segmentations of semantic concepts. From Fig. 3 and Fig. 4, we summarize several key challenges of our benchmark: + +Very close object instances From Fig. 4, we can see that even with ground-truth semantic labeling of the 3D points, 3D-CLR still has unsatisfying results on counting questions. This suggests that the instance segmentations provided by DBSCAN are not accurate enough. From the top two qualitative examples in Fig. 3, we can also see that if two chairs + +![](images/e5f9988ee998345087af1d898b960d9fdfd34ec0911c04da788d53c90dc149f8.jpg) +Figure 3. Qualitative examples of our 3D-CLR. We can see that 3D-CLR can ground most of the concepts and answer most questions correctly. However, it still fails sometimes, mainly because it cannot separate close object instances and ground small objects. + +![](images/5ce1c4d9a9daa672b4fac5b8b44d25fd911caa8af39a62561b7b2db9edf488ea.jpg) +Figure 4. Model diagnosis of our 3D-CLR. + +contact each other, DBSCAN will not tell them apart and thus have poor performance on counting. One crucial future direction is to improve unsupervised instance segmentations on very close object instances. + +Grounding small objects Fig. 4 suggests that 3D-CLR fails to ground a large portion of the semantic concepts, which hinders the performance. From the last example in Fig. 3, we can see that 3D-CLR fails to ground small objects like "computer mouse". Further examination indicates there are two possible reasons: 1) CLIP-LSeg fails to assign the right features to objects with limited pixels; 2) The resolution of feature voxel grid is not high enough and therefore small objects cannot be represented in the compact representation. An interesting future direction would be learning exploration policies that enable the agents to get closer to uncertain objects that cannot be grounded. + +Ambiguity on 3D relations Even with ground-truth seman + +tic and instance segmentations, the performance of the relation network still needs to be improved. We find that most of the failure cases are correlated to the "inside" relation. From the segmentations in Fig. 3, we can see that 3D-CLR is unable to ground the objects in the cabinets. A potential solution can be joint depth and segmentation predictions. + +# 6. Conclusion + +In this paper, we introduce the novel task of 3D reasoning from multi-view images. By placing embodied robot that actively explores indoor environments, we collect a large-scale benchmark named 3DMV-VQA. We also propose a new 3D-CLR model that incorporates neural field, 2D VLM, as well as reasoning operators for this task and illustrate its effectiveness. Finally, we perform an in-depth analysis to understand the challenges of this dataset and also point out potential future directions. We hope that 3DMV-VQA can be used to push the frontiers of 3D reasoning. + +Acknowledgements. This work was supported by the MIT-IBM Watson AI Lab, DARPA MCS, DSO grant DSOCO21072, and gift funding from MERL, Cisco, Sony, and Amazon. We would also like to thank the computation support from AiMOS, a server cluster for the IBM Research AI Hardware Center. + +# References + +[1] Panos Achlioptas, Ahmed Abdelreehm, Fei Xia, Mohamed Elhoseiny, and Leonidas J. Guibas. Referit3d: Neural listeners for fine-grained 3d object identification in real-world scenes. In ECCV, 2020. 2, 3 +[2] Daich Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoki Kawanabe. Scanqa: 3d question answering for spatial scene understanding. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19107-19117, 2022. 2, 3 +[3] Dave Zhenyu Chen, Angel X. Chang, and Matthias Nießner. Scanrefer: 3d object localization in rgb-d scans using natural language. In ECCV, 2020. 2, 3 +[4] Dave Zhenyu Chen, Ali Gholami, Matthias Nießner, and Angel X. Chang. Scan2cap: Context-aware dense captioning in rgb-d scans. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3192-3202, 2021. 3 +[5] Z Chen, L Ma, W Luo, and KKY Wong. Weakly-supervised spatio-temporally grounding natural sentence in video. In ACL, 2019. 3 +[6] Zhenfang Chen, Jiayuan Mao, Jiajun Wu, Kwan-Yee Kenneth Wong, Joshua B Tenenbaum, and Chuang Gan. Grounding physical concepts of objects and events through dynamic visual reasoning. *ICLR*, 2021. 3 +[7] Zhenfang Chen, Peng Wang, Lin Ma, Kwan-Yee K Wong, and Qi Wu. Cops-ref: A new dataset and task on compositional referring expression comprehension. In CVPR, 2020. 3 +[8] Zhenfang Chen, Kexin Yi, Yunzhu Li, Mingyu Ding, Antonio Torralba, Joshua B Tenenbaum, and Chuang Gan. Comphy: Compositional physical reasoning of objects and events from videos. In ICLR, 2022. 3 +[9] Zhenfang Chen, Qinhong Zhou, Yikang Shen, Yining Hong, Hao Zhang, and Chuang Gan. See, think, confirm: Interactive prompting between vision and language models for knowledge-based visual reasoning. arXiv preprint arXiv:2301.05226, 2023. 3 +[10] Spconv Contributors. Spconv: Spatially sparse convolution library. https://github.com/traveller59/spconv, 2022.6 +[11] Abhishek Das, Samyak Datta, Georgia Gkioxari, Stefan Lee, Devi Parikh, and Dhruv Batra. Embodied question answering. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 2135-213509, 2018. 3 +[12] Mingyu Ding, Yan Xu, Zhenfang Chen, David Daniel Cox, Ping Luo, Joshua B Tenenbaum, and Chuang Gan. Embodied concept learner: Self-supervised learning of concepts and mapping through instruction following. In CoRL. 3 +[13] Yilun Du, M. Katherine Collins, B. Joshua Tenenbaum, and Vincent Sitzmann. Learning signal-agnostic manifolds of neural fields. In Advances in Neural Information Processing Systems, 2021. 3 +[14] Yilun Du, Yinan Zhang, Hong-Xing Yu, Joshua B. Tenenbaum, and Jiajun Wu. Neural radiance flow for 4d view synthesis and video processing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021. 3 + +[15] Martin Ester, Hans-Peter Kriegel, Jörg Sander, and Xiaowei Xu. A density-based algorithm for discovering clusters in large spatial databases with noise. In KDD, 1996. 6 +[16] Yasaman Etesam, Leon Kochiev, and Angel X Chang. 3dvqa: Visual question answering for 3d environments. In 2022 19th Conference on Robots and Vision (CRV), pages 233-240. IEEE, 2022. 2, 3 +[17] Mingtao Feng, Zhen Li, Qi Li, Liang Zhang, Xiangdong Zhang, Guangming Zhu, Hui Zhang, Yaonan Wang, and Ajmal S. Mian. Free-form description guided 3d visual graph network for object grounding in point cloud. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 3702-3711, 2021. 3 +[18] Chuang Gan, Yandong Li, Haoxiang Li, Chen Sun, and Boqing Gong. Vqs: Linking segmentations to questions and answers for supervised attention in vqa and question-focused semantic segmentation. In ICCV, pages 1811-1820, 2017. 3 +[19] Siddha Ganju, Olga Russakovsky, and Abhinav Kumar Gupta. What's in a question: Using visual questions as a form of supervision. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6422-6431, 2017. 3 +[20] Xiaofeng Gao, Qiaozi Gao, Ran Gong, Kaixiang Lin, Govind Thattai, and Gaurav S Sukhatme. Dialfred: Dialogue-enabled agents for embodied instruction following. arXiv, 2022. 3 +[21] Stephan J. Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien P. C. Valentin. Fastnerf: High-fidelity neural rendering at 200fps. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 14326-14335, 2021. 3, 5 +[22] Daniel Gordon, Aniruddha Kembhavi, Mohammad Rastegari, Joseph Redmon, Dieter Fox, and Ali Farhadi. Iqa: Visual question answering in interactive environments. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4089-4098, 2018. 3 +[23] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6325-6334, 2017. 2, 3 +[24] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul E. Debevec. Baking neural radiance fields for real-time view synthesis. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5855-5864, 2021. 3, 5 +[25] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural Computation, 9:1735-1780, 1997. 6 +[26] Yining Hong, Yilun Du, Chunru Lin, Joshua B Tenenbaum, and Chuang Gan. 3d concept grounding on neural fields. arXiv preprint arXiv:2207.06403, 2022. 3, 4, 5, 6 +[27] Yining Hong, Li Yi, Joshua B. Tenenbaum, Antonio Torralba, and Chuang Gan.Ptr: A benchmark for part-based conceptual, relational, and physical reasoning. In NeurIPS, 2021. 3 +[28] Deng Huang, Peihao Chen, Runhao Zeng, Qing Du, Mingkui Tan, and Chuang Gan. Location-aware graph convolutional networks for video question answering. In AAAI, 2020. 3, 7 +[29] Pin-Hao Huang, Han-Hung Lee, Hwann-Tzong Chen, and Tyng-Luh Liu. Text-guided graph neural networks for referring 3d instance segmentation. In AAAI, 2021. 3 + +[30] D. A. Hudson and Christopher D. Manning. Compositional attention networks for machine reasoning. *ICLR*, 2018. 3, 6 +[31] Ajay Jain, Ben Mildenhall, Jonathan T. Barron, P. Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 857-866, 2022. 3 +[32] Zhenyu Jiang, Yifeng Zhu, Maxwell Svetlik, Kuan Fang, and Yuke Zhu. Synergies between affordance and geometry: 6-dof grasp detection via implicit representations. ArXiv, abs/2104.01542, 2021. 3 +[33] J. Johnson, Bharath Hariharan, L. V. D. Maaten, Li Fei-Fei, C. L. Zitnick, and Ross B. Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1988-1997, 2017. 2, 3, 4 +[34] Stefan Kollmannsberger, Davide D'Angella, Moritz Jokeit, and Leon Alexander Herrmann. Physics-informed neural networks. Deep Learning in Computational Mechanics, 2021. 3 +[35] Natalia Konstantinova and Constantin Orasan. Interactive question answering. In EMNLP. IGI Global, 2013. 3 +[36] Barbara Landau and Ray Jackendoff. “what” and “where” in spatial language and spatial cognition. Behavioral and Brain Sciences, 16:217-238, 1993. 6 +[37] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and René Ranftl. Language-driven semantic segmentation. *ICLR*, 2022, 2, 4, 5 +[38] Dongxu Li, Junnan Li, Hongdong Li, Juan Carlos Niebles, and Steven C. H. Hoi. Align and prompt: Video-and-language pre-training with entity prompts. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4943-4953, 2022. 3, 7 +[39] Andrew Luo, Yilun Du, Michael J Tarr, Joshua B Tenenbaum, Antonio Torralba, and Chuang Gan. Learning neural acoustic fields. arXiv preprint arXiv:2204.00628, 2022. 3 +[40] Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B. Tenenbaum, and Jiajun Wu. The neuro-symbolic concept learner: Interpreting scenes words and sentences from natural supervision. ArXiv, abs/1904.12584, 2019. 3, 6 +[41] Nelson L. Max. Optical models for direct volume rendering. IEEE Trans. Vis. Comput. Graph., 1:99-108, 1995. 5 +[42] Lars M. Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4455-4465, 2019. 3 +[43] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. ECCV, 2020. 3, 4 +[44] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4d reconstruction by learning particle dynamics. In Proceedings of the IEEE International Conference on Computer Vision, pages 5379-5389, 2019. 3 + +[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. CVPR, 2020. 3 +[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 5 +[47] Santhosh K. Ramakrishnan, Aaron Gokaslan, Erik Wijmans, Oleksandr Maksymets, Alexander Clegg, John Turner, Eric Undersander, Wojciech Galuba, Andrew Westbury, Angel X. Chang, Manolis Savva, Yili Zhao, and Dhruv Batra. Habitatmatterport 3d dataset (hm3d): 1000 large-scale 3d environments for embodied ai. ArXiv, abs/2109.08238, 2021. 2, 3 +[48] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. ArXiv, abs/2102.12092, 2021.5 +[49] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proc. ICCV, pages 2304-2314, 2019. 3 +[50] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, Devi Parikh, and Dhruv Batra. Habitat: A Platform for Embodied AI Research. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 3 +[51] Mohit Shridhar, Jesse Thomason, Daniel Gordon, Yonatan Bisk, Winson Han, Roozbeh Mottaghi, Luke Zettlemoyer, and Dieter Fox. Alfred: A benchmark for interpreting grounded instructions for everyday tasks. In CVPR, 2020. 3 +[52] Anthony Simeonov, Yilun Du, Andrea Tagliasacchi, Joshua B Tenenbaum, Alberto Rodriguez, Pulkit Agrawal, and Vincent Sitzmann. Neural descriptor fields: Se (3)-equivariant object representations for manipulation. arXiv preprint arXiv:2112.05124, 2021. 3 +[53] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In Proc. NeurIPS 2019, 2019. 3 +[54] Chan Hee Song, Jihyung Kil, Tai-Yu Pan, Brian M Sadler, Wei-Lun Chao, and Yu Su. One step at a time: Long-horizon vision-and-language navigation with milestones. arXiv preprint arXiv:2202.07028, 2022.3 +[55] Elizabeth S Spelke, Karen Breinlinger, Kristen Jacobson, and Ann Phillips. Gestalt Relations and Object Perception: A Developmental Study. Perception, 22(12):1483-1501, 1993. 2 +[56] Alessandro Suglia, Qiaozi Gao, Jesse Thomason, Govind Thattai, and Gaurav Sukhatme. Embodied bert: A transformer model for embodied, language-guided visual task completion. arXiv preprint arXiv:2108.04927, 2021. 3 +[57] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields + +reconstruction. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5449-5459, 2022. 2, 3, 5 +[58] Andrew Szot, Alex Clegg, Eric Undersander, Erik Wijmans, Yili Zhao, John Turner, Noah Maestre, Mustafa Mukadam, Devendra Chaplot, Oleksandr Maksymets, Aaron Gokaslan, Vladimir Vondrus, Sameer Dharur, Franziska Meier, Wojciech Galuba, Angel Chang, Zsolt Kira, Vladlen Koltun, Jitendra Malik, Manolis Savva, and Dhruv Batra. Habitat 2.0: Training home assistants to rearrange their habitat. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 3 +[59] Ivan Vendrov, Ryan Kiros, Sanja Fidler, and Raquel Urtasun. Order-embeddings of images and language. CoRR, abs/1511.06361, 2016. 3 +[60] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. ArXiv, abs/2112.05139, 2021. 3 +[61] Karmesh Yadav, Ram Ramrakhya, Santhosh Kumar Ramakrishnan, Theo Gervet, John Turner, Aaron Gokaslan, Noah Maestre, Angel Xuan Chang, Dhruv Batra, Manolis Savva, et al. Habitat-matterport 3d semantics dataset. arXiv preprint arXiv:2210.05633, 2022. 2, 3, 4 +[62] Xu Yan, Zhihao Yuan, Yuhao Du, Yinghong Liao, Yao Guo, Zhen Li, and Shuguang Cui. Clevr3d: Compositional language and elementary visual reasoning for question answering in 3d real-world scenes. arXiv preprint arXiv:2112.11691, 2021. 2, 3 +[63] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. Proc. NeurIPS, 2020. 3 +[64] Shuquan Ye, Dongdong Chen, Songfang Han, and Jing Liao. 3d question answering. ArXiv, abs/2112.08359, 2021. 2, 3 +[65] Kexin Yi, Jiajun Wu, Chuang Gan, Antonio Torralba, Pushmeet Kohli, and Joshua B. Tenenbaum. Neural-symbolic vqa: Disentangling reasoning from vision and language understanding. In NeurIPS, 2018. 3, 6 +[66] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoptrees for real-time rendering of neural radiance fields. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5732-5741, 2021. 3, 5 +[67] Licheng Yu, Xinlei Chen, Georgia Gkioxari, Mohit Bansal, Tamara L Berg, and Dhruv Batra. Multi-target embodied question answering. In CVPR, pages 6309-6318, 2019. 3 +[68] Kaizhi Zheng, Xiaotong Chen, Odest Chadwicke Jenkins, and Xin Eric Wang. Vlmbench: A compositional benchmark for vision-and-language manipulation. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, 2022. 3 +[69] Yuke Zhu, O. Groth, Michael S. Bernstein, and Li Fei-Fei. Visual7w: Grounded question answering in images. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4995-5004, 2016. 2, 3 \ No newline at end of file diff --git a/2023/3D Concept Learning and Reasoning From Multi-View Images/images.zip b/2023/3D Concept Learning and Reasoning From Multi-View Images/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..7818ca29906699584defafbefa8e0d477e86e0a7 --- /dev/null +++ b/2023/3D Concept Learning and Reasoning From Multi-View Images/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:481d99324bf0ab35b13619238a29be4835a010f9bce781b4d007d4c896ba6fec +size 482300 diff --git a/2023/3D Concept Learning and Reasoning From Multi-View Images/layout.json b/2023/3D Concept Learning and Reasoning From Multi-View Images/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6ac723d772168065761d78cf6bf84369ddb29f70 --- /dev/null +++ b/2023/3D Concept Learning and Reasoning From Multi-View Images/layout.json @@ -0,0 +1,7345 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 104, + 103, + 489, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 103, + 489, + 121 + ], + "spans": [ + { + "bbox": [ + 104, + 103, + 489, + 121 + ], + "type": "text", + "content": "3D Concept Learning and Reasoning from Multi-View Images" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "spans": [ + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "Yining Hong" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Chunru Lin" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Yilun Du" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Zhenfang Chen" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Joshua B. Tenenbaum" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", Chuang Gan" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{4,5}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "UCLA, " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "Shanghai Jiaotong University, " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "MIT CSAIL, " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "UMass Amherst, " + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 158, + 142, + 434, + 213 + ], + "type": "text", + "content": "MIT-IBM Watson AI Lab https://vis-www.cs.umass.edu/3d-clr/" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 50, + 242, + 179, + 425 + ], + "blocks": [ + { + "bbox": [ + 50, + 242, + 179, + 425 + ], + "lines": [ + { + "bbox": [ + 50, + 242, + 179, + 425 + ], + "spans": [ + { + "bbox": [ + 50, + 242, + 179, + 425 + ], + "type": "image", + "image_path": "0139c5dde162a2aa92dcfb3c8dcbb7922e85a282412b0fa7132181e9fb7d8996.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 430, + 168, + 464 + ], + "lines": [ + { + "bbox": [ + 50, + 430, + 168, + 464 + ], + "spans": [ + { + "bbox": [ + 50, + 430, + 168, + 464 + ], + "type": "text", + "content": "Concept: Q: Are there any televisions? A: Yes" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 170, + 428, + 293, + 476 + ], + "lines": [ + { + "bbox": [ + 170, + 428, + 293, + 476 + ], + "spans": [ + { + "bbox": [ + 170, + 428, + 293, + 476 + ], + "type": "text", + "content": "Counting: \nQ: How many chairs are close to the table in the room with plant on the cabinet? A: 6" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 170, + 488, + 279, + 510 + ], + "lines": [ + { + "bbox": [ + 170, + 488, + 279, + 510 + ], + "spans": [ + { + "bbox": [ + 170, + 488, + 279, + 510 + ], + "type": "text", + "content": "Q: How many rooms have sofas? A: 1" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 513, + 547, + 536 + ], + "lines": [ + { + "bbox": [ + 46, + 513, + 547, + 536 + ], + "spans": [ + { + "bbox": [ + 46, + 513, + 547, + 536 + ], + "type": "text", + "content": "Figure 1. An exemplar scene with multi-view images and question-answer pairs of our 3DMV-VQA dataset. 3DMV-VQA contains four question types: concept, counting, relation, comparison. Orange words denote semantic concepts; blue words denote the relations." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 185, + 243, + 272, + 332 + ], + "blocks": [ + { + "bbox": [ + 185, + 243, + 272, + 332 + ], + "lines": [ + { + "bbox": [ + 185, + 243, + 272, + 332 + ], + "spans": [ + { + "bbox": [ + 185, + 243, + 272, + 332 + ], + "type": "image", + "image_path": "9c2499a72b5232613521f83970886d73892accd35822abbe76380eef8f2aa6d4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 184, + 335, + 272, + 423 + ], + "blocks": [ + { + "bbox": [ + 184, + 335, + 272, + 423 + ], + "lines": [ + { + "bbox": [ + 184, + 335, + 272, + 423 + ], + "spans": [ + { + "bbox": [ + 184, + 335, + 272, + 423 + ], + "type": "image", + "image_path": "bcdc54a8ca47ecfc2e80d202e50dbd8f6ca69bcd75c2d68e299c3a97942513b0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 276, + 243, + 363, + 332 + ], + "blocks": [ + { + "bbox": [ + 276, + 243, + 363, + 332 + ], + "lines": [ + { + "bbox": [ + 276, + 243, + 363, + 332 + ], + "spans": [ + { + "bbox": [ + 276, + 243, + 363, + 332 + ], + "type": "image", + "image_path": "adb9c46914582c93ffb458cac411cec963fc04c004c0d353d24ebdb993d37a5a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 276, + 335, + 362, + 422 + ], + "blocks": [ + { + "bbox": [ + 276, + 335, + 362, + 422 + ], + "lines": [ + { + "bbox": [ + 276, + 335, + 362, + 422 + ], + "spans": [ + { + "bbox": [ + 276, + 335, + 362, + 422 + ], + "type": "image", + "image_path": "8b29d6f5ef01af8dd76de5ff59d2297cdec550b6c7ae842222df4b69bb5b62d8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 295, + 429, + 416, + 476 + ], + "lines": [ + { + "bbox": [ + 295, + 429, + 416, + 476 + ], + "spans": [ + { + "bbox": [ + 295, + 429, + 416, + 476 + ], + "type": "text", + "content": "Relation: Q: Facing the computer from the curtain, is there a lamp on the right? A: Yes" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 295, + 489, + 408, + 510 + ], + "lines": [ + { + "bbox": [ + 295, + 489, + 408, + 510 + ], + "spans": [ + { + "bbox": [ + 295, + 489, + 408, + 510 + ], + "type": "text", + "content": "Q: What's on the cabinet in the smaller room? A: Plant" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 365, + 243, + 453, + 332 + ], + "blocks": [ + { + "bbox": [ + 365, + 243, + 453, + 332 + ], + "lines": [ + { + "bbox": [ + 365, + 243, + 453, + 332 + ], + "spans": [ + { + "bbox": [ + 365, + 243, + 453, + 332 + ], + "type": "image", + "image_path": "49645c25bde17b0db6d9b773fde5388ec829dbf8fc93453c2f9d34d1a84bd58e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 365, + 335, + 452, + 422 + ], + "blocks": [ + { + "bbox": [ + 365, + 335, + 452, + 422 + ], + "lines": [ + { + "bbox": [ + 365, + 335, + 452, + 422 + ], + "spans": [ + { + "bbox": [ + 365, + 335, + 452, + 422 + ], + "type": "image", + "image_path": "533b12da53016d7d003d444faa1199643658affa31d497d3c6002146c1f27325.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 418, + 429, + 541, + 510 + ], + "lines": [ + { + "bbox": [ + 418, + 429, + 541, + 510 + ], + "spans": [ + { + "bbox": [ + 418, + 429, + 541, + 510 + ], + "type": "text", + "content": "Comparison: \nQ: Are there fewer pictures in the larger room than the other room? A: No \nQ: Is the computer closer to a printer or a lamp? \nA: Printer" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 456, + 244, + 543, + 332 + ], + "blocks": [ + { + "bbox": [ + 456, + 244, + 543, + 332 + ], + "lines": [ + { + "bbox": [ + 456, + 244, + 543, + 332 + ], + "spans": [ + { + "bbox": [ + 456, + 244, + 543, + 332 + ], + "type": "image", + "image_path": "874bbee417fccf7c5a5cbc21ef58df9624603c16927e8facc7e608b9c78004c5.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 456, + 335, + 543, + 422 + ], + "blocks": [ + { + "bbox": [ + 456, + 335, + 543, + 422 + ], + "lines": [ + { + "bbox": [ + 456, + 335, + 543, + 422 + ], + "spans": [ + { + "bbox": [ + 456, + 335, + 543, + 422 + ], + "type": "image", + "image_path": "8c5023c7c3bb44a5cbe06b847a2eb0751a7ff0bfc87e1dd62dc7e15795a12609.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 143, + 555, + 192, + 568 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 555, + 192, + 568 + ], + "spans": [ + { + "bbox": [ + 143, + 555, + 192, + 568 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "Humans are able to accurately reason in 3D by gathering multi-view observations of the surrounding world. Inspired by this insight, we introduce a new large-scale benchmark for 3D multi-view visual question answering (3DMV-VQA). This dataset is collected by an embodied agent actively moving and capturing RGB images in an environment using the Habitat simulator. In total, it consists of approximately 5k scenes, 600k images, paired with 50k questions. We evaluate various state-of-the-art models for visual reasoning on our benchmark and find that they all perform poorly. We suggest" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 557, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 547, + 713 + ], + "type": "text", + "content": "that a principled approach for 3D reasoning from multi-view images should be to infer a compact 3D representation of the world from the multi-view images, which is further grounded on open-vocabulary semantic concepts, and then to execute reasoning on these 3D representations. As the first step towards this approach, we propose a novel 3D concept learning and reasoning (3D-CLR) framework that seamlessly combines these components via neural fields, 2D pre-trained vision-language models, and neural reasoning operators. Experimental results suggest that our framework outperforms baseline models by a large margin, but the challenge remains largely unsolved. We further perform an in-depth analysis of the challenges and highlight potential future directions." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9202" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 127, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 127, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 127, + 83 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 289, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 289, + 258 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 289, + 258 + ], + "type": "text", + "content": "Visual reasoning, the ability to composite rules on internal representations to reason and answer questions about visual scenes, has been a long-standing challenge in the field of artificial intelligence and computer vision. Several datasets [23, 33, 69] have been proposed to tackle this challenge. However, they mainly focus on visual reasoning on 2D single-view images. Since 2D single-view images only cover a limited region of the whole space, such reasoning inevitably has several weaknesses, including occlusion, and failing to answer 3D-related questions about the entire scene that we are interested in. As shown in Fig. 1, it's difficult, even for humans, to count the number of chairs in a scene due to the object occlusion, and it's even harder to infer 3D relations like \"closer\" from a single-view 2D image." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 259, + 289, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 259, + 289, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 259, + 289, + 426 + ], + "type": "text", + "content": "On the other hand, there's strong psychological evidence that human beings conduct visual reasoning in the underlying 3D representations [55]. Recently, there have been several works focusing on 3D visual question answering [2,16,62,64]. They mainly use traditional 3D representations (e.g., point clouds) for visual reasoning. This is inconsistent with the way human beings perform 3D reasoning in real life. Instead of being given an entire 3D representation of the scene at once, humans will actively walk around and explore the whole environment, ingesting image observations from different views and converting them into a holistic 3D representation that assists them in understanding and reasoning about the environment. Such abilities are crucial for many embodied AI applications, such as building assistive robots." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 426, + 289, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 426, + 289, + 641 + ], + "spans": [ + { + "bbox": [ + 47, + 426, + 289, + 641 + ], + "type": "text", + "content": "To this end, we propose the novel task of 3D visual reasoning from multi-view images taken by active exploration of an embodied agent. Specifically, we generate a large-scale benchmark, 3DMV-VQA (3D multi-view visual question answering), that contains approximately 5k scenes and 50k question-answering pairs about these scenes. For each scene, we provide a collection of multi-view image observations. We generate this dataset by placing an embodied agent in the Habitat-Matterport environment [47], which actively explores the environment and takes pictures from different views. We also obtain scene graph annotations from the Habitat-Matterport 3D semantics dataset (HM3DSem) [61], including ground-truth locations, segmentations, semantic information of the objects, as well as relationships among the objects in the environments, for model diagnosis. To evaluate the models' 3D reasoning abilities on the entire environment, we design several 3D-related question types, including concept, counting, relation and comparison." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 642, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 642, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 642, + 289, + 714 + ], + "type": "text", + "content": "Given this new task, the key challenges we would like to investigate include: 1) how to efficiently obtain the compact visual representation to encode crucial properties (e.g., semantics and relations) by integrating all incomplete observations of the environment in the process of active exploration for 3D visual reasoning? 2) How to ground the semantic con" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 120 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 120 + ], + "type": "text", + "content": "cepts on these 3D representations that could be leveraged for downstream tasks, such as visual reasoning? 3) How to infer the relations among the objects, and perform step-by-step reasoning?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 121, + 547, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 121, + 547, + 443 + ], + "spans": [ + { + "bbox": [ + 304, + 121, + 547, + 443 + ], + "type": "text", + "content": "As the first step to tackling these challenges, we propose a novel model, 3D-CLR (3D Concept Learning and Reasoning). First, to efficiently obtain a compact 3D representation from multi-view images, we use a neural-field model based on compact voxel grids [57] which is both fast to train and effective at storing scene properties in its voxel grids. As for concept learning, we observe that previous works on 3D scene understanding [1,3] lack the diversity and scale with regard to semantic concepts due to the limited amount of paired 3D-and-language data. Although large-scale vision-language models (VLMs) have achieved impressive performances for zero-shot semantic grounding on 2D images, leveraging these pretrained models for effective open-vocabulary 3D grounding of semantic concepts remains a challenge. To address these challenges, we propose to encode the features of a pre-trained 2D vision-language model (VLM) into the compact 3D representation defined across voxel locations. Specifically, we use the CLIP-LSeg [37] model to obtain features on multi-view images, and propose an alignment loss to map the features in our 3D voxel grid to 2D pixels. By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we can ground the semantic concepts in the 3D compact representation. Finally, to answer the questions, we introduce a set of neural reasoning operators, including FILTER, COUNT, RELATION operators and so on, which take the 3D representations of different objects as input and output the predictions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 444, + 547, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 444, + 547, + 527 + ], + "spans": [ + { + "bbox": [ + 304, + 444, + 547, + 527 + ], + "type": "text", + "content": "We conduct experiments on our proposed 3DMV-VQA benchmark. Experimental results show that our proposed 3D-CLR outperforms all baseline models a lot. However, failure cases and model diagnosis show that challenges still exist concerning the grounding of small objects and the separation of close object instances. We provide an in-depth analysis of the challenges and discuss potential future directions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 528, + 547, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 528, + 547, + 540 + ], + "spans": [ + { + "bbox": [ + 306, + 528, + 547, + 540 + ], + "type": "text", + "content": "To sum up, we have the following contributions in this paper." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 555, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 306, + 555, + 545, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 555, + 545, + 579 + ], + "spans": [ + { + "bbox": [ + 306, + 555, + 545, + 579 + ], + "type": "text", + "content": "- We propose the novel task of 3D concept learning and reasoning from multi-view images." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 584, + 547, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 584, + 547, + 620 + ], + "spans": [ + { + "bbox": [ + 306, + 584, + 547, + 620 + ], + "type": "text", + "content": "- By having robots actively explore the embodied environments, we collect a large-scale benchmark on 3D multiview visual question answering (3DMV-VQA)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 624, + 547, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 624, + 547, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 624, + 547, + 684 + ], + "type": "text", + "content": "- We devise a model that incorporates a neural radiance field, 2D pretrained vision and language model, and neural reasoning operators to ground the concepts and perform 3D reasoning on the multi-view images. We illustrate that our model outperforms all baseline models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "- We perform an in-depth analysis of the challenges of this new task and highlight potential future directions." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9203" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 134, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 134, + 84 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 134, + 84 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 289, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 289, + 449 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 289, + 449 + ], + "type": "text", + "content": "Visual Reasoning There have been numerous tasks focusing on learning visual concepts from natural language, including visually-grounded question answering [18, 19], text-image retrieval [59] and so on. Visual reasoning has drawn much attention recently as it requires human-like understanding of the visual scene. A wide variety of benchmarks have been created over the recent years [7, 8, 23, 27, 33, 69]. However, they mainly focus on visual reasoning from 2D single-view images, while there's strong psychological evidence that human beings perform visual reasoning on the underlying 3D representations. In this paper, we propose the novel task of visual reasoning from multi-view images, and collect a large-scale benchmark for this task. In recent years, numerous visual reasoning models have also been proposed, ranging from attention-based methods [5, 30], graph-based methods [28], to models based on large pretrained vision-language model [9, 38]. These methods model the reasoning process implicitly with neural networks. Neural-symbolic methods [6, 40, 65] explicitly perform symbolic reasoning on the objects representations and language representations. They use perception models to extract 2D masks as a first step, and then execute operators and ground concepts on these pre-segmented masks, but are limited to a set of predefined concepts on simple scenes. [26] proposes to use the feature vectors from occupancy networks [42] to do visual reasoning in the 3D space. However, they also use a synthetic dataset, and learn a limited set of semantic concepts from scratch. We propose to learn 3D neural field features from 2D multi-view real-world images, and incorporate a 2D VLM for open-vocabulary reasoning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 450, + 289, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 450, + 289, + 666 + ], + "spans": [ + { + "bbox": [ + 47, + 450, + 289, + 666 + ], + "type": "text", + "content": "3D Reasoning Understanding and reasoning about 3D scenes has been a long-standing challenge. Recent works focus on leveraging language to explore 3D scenes, such as object captioning [3,4] and object localization from language [1, 17, 29]. Our work is mostly related to 3D Visual Question Answering [2, 16, 62, 64] as we both focus on answering questions and reasoning about 3D scenes. However, these works use point clouds as 3D representations, which diverts from the way human beings perform 3D reasoning. Instead of being given an entire 3D representation all at once, human beings would actively move and explore the environment, integrating multi-view information to get a compact 3D representation. Therefore, we propose 3D reasoning from multi-view images. In addition, since 3D assets paired with natural language descriptions are hard to get in real-life scenarios, previous works struggle to ground open-vocabulary concepts. In our work, we leverage 2D VLMs for zero-shot open-vocabulary concept grounding in the 3D space." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 666, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 288, + 715 + ], + "type": "text", + "content": "Embodied Reasoning Our work is also closely related to Embodied Question Answering (EQA) [11, 67] and Interactive Question Answering (IQA) [22, 35], which also involve an embodied agent exploring the environment and answering" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "content": "the question. However, the reasoning mainly focuses on the outcome or the history of the navigation on 2D images and does not require a holistic 3D understanding of the environment. There are also works [12, 20, 51, 54, 56, 68] targeting instruction following in embodied environments, in which an agent is asked to perform a series of tasks based on language instructions. Different from their settings, for our benchmark an embodied agent actively explores the environment and takes multi-view images for 3D-related reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 181, + 548, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 181, + 548, + 433 + ], + "spans": [ + { + "bbox": [ + 304, + 181, + 548, + 433 + ], + "type": "text", + "content": "Neural Fields Our approach utilizes neural fields to parameterize an underlying 3D compact representations of scenes for reasoning. Neural field models (e.g., [43]) have gained much popularity since they can reconstruct a volumetric 3D scene representation from a set of images. Recent works [21, 24, 57, 66] have pushed it further by using classic voxel-grids to explicitly store the scene properties (e.g., density, color and feature) for rendering, which allows for real-time rendering and is utilized by this paper. Neural fields have also been used to represent dynamic scenes [14, 44], appearance [43, 45, 49, 53, 63], physics [34], robotics [32, 52], acoustics [39] and more general multi-modal signals [13]. There are also some works that integrate semantics or language in neural fields [31, 60]. However, they mainly focus on using language for manipulation, editing or generation. [26] leverages neural descriptor field [52] for 3D concept grounding. However, they require ground-truth occupancy values to train the neural field, which can not be applied to real-world scenes. In this paper, we propose to leverage voxel-based neural radiance field [57] to get the compact representations for 3D visual reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 445, + 421, + 457 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 445, + 421, + 457 + ], + "spans": [ + { + "bbox": [ + 306, + 445, + 421, + 457 + ], + "type": "text", + "content": "3. Dataset Generation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 465, + 418, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 465, + 418, + 478 + ], + "spans": [ + { + "bbox": [ + 306, + 465, + 418, + 478 + ], + "type": "text", + "content": "3.1. Multi-View Images" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 484, + 547, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 484, + 547, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 484, + 547, + 533 + ], + "type": "text", + "content": "Our dataset includes 5k 3D scenes from the Habitat-Matterport 3D Dataset (HM3D) dataset [47], and approximately 600k images rendered from the 3D scenes. The images are rendered via Habitat [50, 58]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 533, + 548, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 548, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 548, + 640 + ], + "type": "text", + "content": "Scene Generation We build our benchmark on top of the HM3DSem dataset [61], which is a large-scale dataset of 3D real-world indoor scenes with densely annotated semantics. It consists of 142,646 object instance annotations across 216 3D spaces and 3,100 rooms within those spaces. HM3D dataset uses texture information to annotate pixel-accurate object boundaries, which provides large-scale object annotations and ensures the scale, quality, and diversity of 3D visual reasoning questions of our benchmark." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 641, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 548, + 715 + ], + "type": "text", + "content": "To construct a benchmark that covers questions of different difficulty levels, it's crucial that we include 3D scenes of different scales in our benchmark. We start with single rooms in HM3D scenes, which has an appropriate amount of semantic concepts and relationships to base some simple questions on. To get the scale of single rooms, we calculate bounding" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "9204" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": "boxes of rooms according to floor instance segmentations. We then proceed to generate bounding boxes for scenes with multiple adjacent rooms. For more complex holistic scene understanding, we also include whole-house scenes, which may contain tens of rooms. Overall, the 3DMV-VQA benchmark contains three levels of scenes (2000 single-room scenes, 2000 multi-room scenes and 100 whole-house scenes)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "spans": [ + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "content": "Image Rendering After we get the bounding box of each scene, we load the scene into the Habitat simulator. We also put a robot agent with an RGB sensor at a random initial point in the bounding box. The data is collected via exploration of the robot agent. Specifically, at each step of the data collection process, we sample a navigable point and make the agent move to the point along the shortest path. When the agent has arrived at a point, we rotate the agent " + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "inline_equation", + "content": "30^{\\circ}" + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "content": " along z-axis for 12 times so that the agent can observe the " + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "content": " view of the scene at the position. It can also look up and down, with a random mild angle from " + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "inline_equation", + "content": "[-10^{\\circ}, 10^{\\circ}]" + }, + { + "bbox": [ + 46, + 156, + 290, + 360 + ], + "type": "text", + "content": " along the x-axis. A picture is taken each time the agent rotates to a new orientation. In total 12 pictures are taken from each point. While traveling between points, the robot agent further takes pictures. We also exploit a policy such that when the camera is too far from or too close to an object and thus the agent cannot see anything, we discard the bad-view images." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 365, + 180, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 365, + 180, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 180, + 376 + ], + "type": "text", + "content": "3.2. Questions and Answers" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 383, + 288, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 383, + 288, + 539 + ], + "spans": [ + { + "bbox": [ + 46, + 383, + 288, + 539 + ], + "type": "text", + "content": "We pair each scene with machine-generated questions from pre-defined templates. All questions are open-ended and can be answered with a single word (samples in Fig. 1). Concepts and Relationships To generate questions and answers, we utilize the semantic annotations of HM3DSem [61] to get the semantic concepts and their bounding boxes, as well as the bounding boxes of the rooms. We merge semantic concepts with similar meanings (e.g., L-shaped sofa to sofa, desk chair / computer chair e.g. to chair). We also define 11 relationships: inside, above, below, on the top of, close, far, large, small, between, on the left, and on the right. Before generating questions, we first generate a scene graph for each scene containing all concepts and relationships." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 539, + 288, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 288, + 562 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 288, + 562 + ], + "type": "text", + "content": "Question Types We define four types of questions: concept, counting, relation and comparison." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 562, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 47, + 562, + 288, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 562, + 288, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 562, + 288, + 597 + ], + "type": "text", + "content": "- Concept. Conceptual questions query if there's an object of a certain semantic concept in the scene, or whether there's a room containing the objects of the semantic concept." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 600, + 288, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 600, + 288, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 600, + 288, + 635 + ], + "type": "text", + "content": "- Counting. Counting-related questions ask about how many instances of a semantic concept are in the scene, or how many rooms contain objects of the semantic concept." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 639, + 288, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 639, + 288, + 687 + ], + "spans": [ + { + "bbox": [ + 47, + 639, + 288, + 687 + ], + "type": "text", + "content": "- Relation. Relational questions ask about the 11 relationships and their compositions. Based on the number of relations in a question, we have one-hop to three-hop questions for the relation type." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "type": "text", + "content": "- Comparison. The comparison question type focuses on the comparison of two objects, two semantic concepts or two" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 310, + 72, + 547, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 72, + 547, + 133 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 547, + 133 + ], + "type": "text", + "content": "rooms. It can be combined with the relational concepts to compare two objects (e.g., larger, closer to, more left etc). It also compares the number of instances of two semantic concepts, or the number of objects of certain concepts in different rooms." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 140, + 547, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 140, + 547, + 416 + ], + "spans": [ + { + "bbox": [ + 304, + 140, + 547, + 416 + ], + "type": "text", + "content": "Bias Control. Similar to previous visual reasoning benchmarks [26, 33], we use machine-generated questions since the generation process is fully controllable so that we can avoid dataset bias. Questions are generated from pre-defined templates, and transformed into natural language questions with associated semantic concepts and relationships from the scene. We manually define 41 templates for question generation. We use depth-first search to generate questions. We perform bias control based on three perspectives: template counts, answer counts, and concept counts. For selecting templates, we sort the templates each time we generate a question to ensure a balanced question distribution. We force a flat answer distribution for each template by rejection sampling. Specifically, once we generate a question and an answer, if the number of the questions having the same answer and template is significantly larger than other answers, we discard it and continue searching. Once we find an answer that fits in the ideal answer distribution, we stop the depth-first searching for this question. We also force a flat concept distribution for each template using the same method. In addition to controlling the number of concepts mentioned in the templates, we also control the number of relation tuples consisting of the same concept sets." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 426, + 361, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 426, + 361, + 439 + ], + "spans": [ + { + "bbox": [ + 306, + 426, + 361, + 439 + ], + "type": "text", + "content": "4. Method" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 447, + 547, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 447, + 547, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 447, + 547, + 651 + ], + "type": "text", + "content": "Fig. 2 illustrates an overview of our framework. Specifically, our framework consists of three steps. First, we learn a 3D compact representation from multi-view images using neural field. And then we propose to leverage pre-trained 2D vision-and-language model to ground concepts on 3D space. This is achieved by 1) generating 2D pixel features using CLIP-LSeg; 2) aligning the features of 3D voxel grid and 2D pixel features from CLIP-LSeg [37]; 3) dot-product attention between the 3D features and CLIP language features [37]. Finally, to perform visual reasoning, we propose neural reasoning operators, which execute the question step by step on the 3D compact representation and outputs a final answer. For example, we use FILTER operators to ground semantic concepts on the 3D representation, GETINSTANCE to get all instances of a semantic class, and COUNT_RELATION to count how many pairs of the two semantic classes have the queried relation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 658, + 541, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 541, + 672 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 541, + 672 + ], + "type": "text", + "content": "4.1. Learning 3D Compact Scene Representations" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "Neural radiance fields [43] are capable of learning a 3D representation that can reconstruct a volumetric 3D scene representation from a set of images. Voxel-based meth" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9205" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 72, + 545, + 305 + ], + "blocks": [ + { + "bbox": [ + 51, + 72, + 545, + 305 + ], + "lines": [ + { + "bbox": [ + 51, + 72, + 545, + 305 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 545, + 305 + ], + "type": "image", + "image_path": "19407b31f659eff8444b6c2a799e47318398d9458986c4f843c53129e65b011a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 309, + 546, + 365 + ], + "lines": [ + { + "bbox": [ + 46, + 309, + 546, + 365 + ], + "spans": [ + { + "bbox": [ + 46, + 309, + 546, + 365 + ], + "type": "text", + "content": "Figure 2. An overview of our 3D-CLR framework. First, we learn a 3D compact scene representation from multi-view images using neural fields (I). Second, we use CLIP-LSeg model to get per-pixel 2D features (II). We utilize a 3D-2D alignment loss to assign features to the 3D compact representation (III). By calculating the dot-product attention between the 3D per-point features and CLIP language embeddings, we could get the concept grounding in 3D (IV). Finally, the reasoning process is performed via a set of neural reasoning operators, such as FILTER, GET instances and COUNT_RELATION (V). Relation operators are learned via relation networks." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 372, + 289, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 372, + 289, + 563 + ], + "spans": [ + { + "bbox": [ + 46, + 372, + 289, + 563 + ], + "type": "text", + "content": "ods [21, 24, 57, 66] speed up the learning process by explicitly storing the scene properties (e.g., density, color and feature) in its voxel grids. We leverage Direct Voxel Grid Optimization (DVGO) [57] as our backbone for 3D compact representation for its fast speed. DVGO stores the learned density and color properties in its grid cells. The rendering of multi-view images is by interpolating through the voxel grids to get the density and color for each sampled point along each sampled ray, and integrating the colors based on the rendering alpha weights calculated from densities according to quadrature rule [41]. The model is trained by minimizing the L2 loss between the rendered multi-view images and the ground-truth multi-view images. By extracting the density voxel grid, we can get the 3D compact representation (e.g., By visualizing points with density greater than 0.5, we can get the 3D representation as shown in Fig. 2 I.)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 574, + 226, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 226, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 226, + 588 + ], + "type": "text", + "content": "4.2. 3D Semantic Concept Grounding" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "Once we extract the 3D compact representation of the scene, we need to ground the semantic concepts for reasoning from language. Recent work from [26] has proposed to ground concepts from paired 3D assets and question-answers. Though promising results have been achieved on synthetic data, it is not feasible for open-vocabulary 3D reasoning in real-world data, since it is hard to collect largescale 3D vision-and-language paired data. To address this challenge, our idea is to leverage pre-trained 2D vision and language model [46, 48] for 3D concept grounding in real-" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 372, + 547, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 372, + 547, + 516 + ], + "spans": [ + { + "bbox": [ + 304, + 372, + 547, + 516 + ], + "type": "text", + "content": "world scenes. But how can we map 2D concepts into 3D neural field representations? Note that 3D compact representations can be learned from 2D multi-view images and that each 2D pixel actually corresponds to several 3D points along the ray. Therefore, it's possible to get 3D features from 2D per-pixel features. Inspired by this, we first add a feature voxel grid representation to DVGO, in addition to density and color, to represent 3D features. We then apply CLIP-LSeg [37] to learn per-pixel 2D features, which can be attended to by CLIP concept embeddings. We use an alignment loss to align 3D features with 2D features so that we can perform concept grounding on the 3D representations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 518, + 547, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 518, + 547, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 518, + 547, + 639 + ], + "type": "text", + "content": "2D Feature Extraction. To get per-pixel features that can be attended by concept embeddings, we use the features from language-driven semantic segmentation (CLIP-LSeg) [37], which learns 2D per-pixel features from a pre-trained vision-language model (i.e., [46]). Specifically, it uses the text encoder from CLIP, trains an image encoder to produce an embedding vector for each pixel, and calculates the scores of word-pixel correlation by dot-product. By outputting the semantic class with the maximum score of each pixel, CLIP-LSeg is able to perform zero-shot 2D semantic segmentation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "content": "3D-2D Alignment. In addition to density and color, we also store a 512-dim feature in each grid cell in the compact representation. To align the 3D per-point features with 2D per-pixel features, we calculate an L1 loss between each pixel and each 3D point sampled on the ray of the pixel. The overall L1 loss along a ray is the weighted sum of all" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9206" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": "the pixel-point alignment losses, with weights same as the rendering weights: " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{feature}} = \\sum_{i=1}^{K} w_i (\\| \\pmb{f}_i - F(\\pmb{r}) \\|)" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "\\pmb{r}" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is a ray corresponding to a 2D pixel, " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "F(\\pmb{r})" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is the 2D feature from CLIP-LSeg, " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is the total number of sampled points along the ray and " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "\\pmb{f}_i" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is the feature of point " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " by interpolating through the feature voxel grid, " + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "inline_equation", + "content": "w_i" + }, + { + "bbox": [ + 46, + 72, + 287, + 144 + ], + "type": "text", + "content": " is the rendering weight." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "content": "Concept Grounding through Attention. Since our feature voxel grid representation is learnt from CLIP-LSeg, by calculating the dot-product attention " + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "inline_equation", + "content": "< f, v >" + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "content": " between perpoint 3D feature " + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "content": " and the CLIP concept embeddings " + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 144, + 289, + 228 + ], + "type": "text", + "content": ", we can get zero-shot view-independent concept grounding and semantic segmentations in the 3D representation, as is presented in Fig. 2 IV." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 236, + 205, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 236, + 205, + 249 + ], + "spans": [ + { + "bbox": [ + 47, + 236, + 205, + 249 + ], + "type": "text", + "content": "4.3. Neural Reasoning Operators" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 255, + 288, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 288, + 338 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 288, + 338 + ], + "type": "text", + "content": "Finally, we use the grounded semantic concepts for 3D reasoning from language. We first transform questions into a sequence of operators that can be executed on the 3D representation for reasoning. We adopt a LSTM-based semantic parser [65] for that. As [26, 40], we further devise a set of operators which can be executed on the 3D representation. Please refer to Appendix for a full list of operators." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 47, + 339, + 287, + 363 + ], + "type": "text", + "content": "Filter Operators. We filter all the grid cells with a certain semantic concept." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 363, + 287, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 287, + 423 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 287, + 423 + ], + "type": "text", + "content": "Get Instance Operators. We implement this by utilizing DBSCAN [15], an unsupervised algorithm which assigns clusters to a set of points. Specifically, given a set of points in the 3D space, it can group together the points that are closely packed together for instance segmentation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 423, + 288, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 423, + 288, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 423, + 288, + 579 + ], + "type": "text", + "content": "Relation Operators. We cannot directly execute the relation on the 3D representation as we have not grounded relations. Thus, we represent each relation using a distinct neural module (which is practical as the vocabulary of relations is limited [36]). We first concatenate the voxel grid representations of all the referred objects and feed them into the relation network. The relation network consists of three 3D convolutional layers and then three 3D deconvolutional layers. A score is output by the relation network indicating whether the objects have the relationship or not. Since vanilla 3D CNNs are very slow, we use Sparse Convolution [10] instead. Based on the relations asked in the questions, different relation modules are chosen." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 590, + 128, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 590, + 128, + 604 + ], + "spans": [ + { + "bbox": [ + 47, + 590, + 128, + 604 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 610, + 164, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 610, + 164, + 624 + ], + "spans": [ + { + "bbox": [ + 47, + 610, + 164, + 624 + ], + "type": "text", + "content": "5.1. Experimental Setup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 629, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 665 + ], + "type": "text", + "content": "Evaluation Metric. We report the visual question answering accuracy on the proposed 3DMV-VQA dataset w.r.t the four types of questions. The train/val/test split is 7:1:2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 666, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 288, + 713 + ], + "type": "text", + "content": "Implementation Details For 3D compact representations, we adopt the same architectures as DVGO, except skipping the coarse reconstruction phase and directly training the fine reconstruction phase. After that, we freeze the density voxel" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 72, + 547, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 263 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 263 + ], + "type": "text", + "content": "grid and color voxel grid, for the optimization of the feature voxel grid only. The feature grid has a world size of 100 and feature dim of 512. We train the compact representations for 100,000 iterations and the 3D features for another 20,000 iterations. For LSeg, we use the official demo model, which has the ViT-L/16 image encoder and CLIP's ViT-B/32 text encoder. We follow the official script for inference and use multi-scale inference. For DBSCAN, we use an epsilon value of 1.5, minimum samples of 2, and we use L1 as the clustering method. For the relation networks, each relation is encoded into a three-layer sparse 3D convolution network with hidden size 64. The output is then fed into a one-layer linear network to produce a score, which is normalized by sigmoid function. We use cross-entropy loss to train the relation networks, and we use the one-hop relational questions with \"yes/no\" answers to train the relation networks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 271, + 372, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 271, + 372, + 283 + ], + "spans": [ + { + "bbox": [ + 306, + 271, + 372, + 283 + ], + "type": "text", + "content": "5.2. Baselines" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 289, + 547, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 289, + 547, + 326 + ], + "spans": [ + { + "bbox": [ + 305, + 289, + 547, + 326 + ], + "type": "text", + "content": "Our baselines range from vanilla neural networks, attention-based methods, fine-tuned from large-scale VLM, and graph-based methods, to neural-symbolic methods." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 333, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 306, + 333, + 547, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 333, + 547, + 406 + ], + "spans": [ + { + "bbox": [ + 306, + 333, + 547, + 406 + ], + "type": "text", + "content": "- LSTM. The question is transferred to word embeddings which are input into a word-level LSTM [25]. The last LSTM hidden state is fed into a multi-layer perceptron (MLP) that outputs a distribution over answers. This method is able to model question-conditional bias since it uses no image information." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 409, + 547, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 409, + 547, + 482 + ], + "spans": [ + { + "bbox": [ + 306, + 409, + 547, + 482 + ], + "type": "text", + "content": "- CNN+LSTM. The question is encoded by the final hidden states from LSTM. We use a resnet-50 to extract frame-level features of images and average them over the time dimension. The features are fed to an MLP to predict the final answer. This is a simple baseline that examines how vanilla neural networks perform on 3DMV-VQA." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 486, + 547, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 486, + 547, + 533 + ], + "spans": [ + { + "bbox": [ + 306, + 486, + 547, + 533 + ], + "type": "text", + "content": "- 3D-Feature+LSTM. We use the 3D features we get from 3D-2D alignment and downsample the voxel grids using 3D-CNN as input, concatenated with language features from LSTM and fed to an MLP." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 537, + 547, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 537, + 547, + 586 + ], + "spans": [ + { + "bbox": [ + 306, + 537, + 547, + 586 + ], + "type": "text", + "content": "- MAC [30]. MAC utilizes a Memory, Attention and Composition cell to perform iterative reasoning process. Like CNN+LSTM, we use the average pooling over multi-view images as the feature map." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 590, + 547, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 590, + 547, + 637 + ], + "spans": [ + { + "bbox": [ + 306, + 590, + 547, + 637 + ], + "type": "text", + "content": "- MAC(V). We treat the multi-view images along a trajectory as a video. We modify the MAC model by applying a temporal attention unit across the video frames to generate a latent encoding for the video." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 641, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 641, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 641, + 547, + 713 + ], + "type": "text", + "content": "- NS-VQA [65]. This is a 2D version of our 3D-CLR model. We use CLIP-LSeg to ground 2D semantic concepts from multi-view images, and the relation network also takes the 2D features as input. We execute the operators on each image and max pool from the answers to get our final predictions." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9207" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 143, + 70, + 451, + 213 + ], + "blocks": [ + { + "bbox": [ + 143, + 70, + 451, + 213 + ], + "lines": [ + { + "bbox": [ + 143, + 70, + 451, + 213 + ], + "spans": [ + { + "bbox": [ + 143, + 70, + 451, + 213 + ], + "type": "table", + "html": "
MethodsConceptCountingRelationComparisonOverall
Q-type (rand.)49.410.721.649.226.4
LSTM53.415.324.055.229.8
CNN+LSTM57.822.135.259.737.8
MAC62.419.747.862.346.7
MAC(V)60.024.651.665.950.0
NS-VQA59.821.533.461.638.0
ALPRO65.812.742.268.243.3
LGCN56.219.535.566.739.1
3D-Feature+LSTM61.222.449.961.348.2
3D-CLR (Ours)66.141.357.672.357.7
", + "image_path": "23ab7991e1cfd752f1d4a8a42861878aba7055929c68ab960b80aebbac7c7b4f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 219, + 474, + 231 + ], + "lines": [ + { + "bbox": [ + 116, + 219, + 474, + 231 + ], + "spans": [ + { + "bbox": [ + 116, + 219, + 474, + 231 + ], + "type": "text", + "content": "Table 1. Question-answering accuracy of 3D visual reasoning baselines on different question types." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 239, + 288, + 329 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 47, + 239, + 288, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 288, + 289 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 288, + 289 + ], + "type": "text", + "content": "- ALPRO [38]. ALPRO is a video-and-language pre-training framework. A transformer model is pretrained on large webly-source video-text pairs and can be used for downstream tasks like Video Question answering." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 292, + 287, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 292, + 287, + 329 + ], + "spans": [ + { + "bbox": [ + 47, + 292, + 287, + 329 + ], + "type": "text", + "content": "- LGCN [28]. LGCN represents the contents in the video as a location-aware graph by incorporating the location information of an object into the graph construction." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 335, + 171, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 335, + 171, + 348 + ], + "spans": [ + { + "bbox": [ + 47, + 335, + 171, + 348 + ], + "type": "text", + "content": "5.3. Experimental Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 354, + 287, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 287, + 414 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 287, + 414 + ], + "type": "text", + "content": "Result Analysis. We summarize the performances for each question type of baseline models in Table 1. All models are trained on the training set until convergence, tuned on the validation set, and evaluated on the test set. We provide detailed analysis below." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 415, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 415, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 415, + 288, + 714 + ], + "type": "text", + "content": "First, for the examination of language-bias of the dataset, we find that the performance of LSTM is only slightly higher than random and frequency, and all other baselines outperform LSTM a lot. This suggests that there's little language bias in our dataset. Second, we observe that encoding temporal information in MAC (i.e., MAC(V)) is better than average-pooling of the features, especially in counting and relation. This suggests that average-pooling of the features may cause the model to lose information from multi-view images, while attention on multi-view images helps boost the 3D reasoning performances. Third, we also find that fine-tuning on large-scale pretrained model (i.e., ALPRO) has relatively high accuracies in concept-related questions, but for counting it's only slightly higher than the random baseline, suggesting that pretraining on large-scale video-language dataset may improve the model's perception ability, but does not provide the model with the ability to tackle with more difficult reasoning types such as counting. Next, we find that LGCN has poor performances on the relational questions, indicating that building a location-aware graph over 2D objects still doesn't equip the model with 3D location reasoning abilities. Last but not least, we find that 3D-based baselines are better than their 2D counterparts. 3D-Feature+LSTM performs well on the 3D-related questions, such as counting and relation, than most of the image-based" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 239, + 547, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 239, + 547, + 359 + ], + "spans": [ + { + "bbox": [ + 304, + 239, + 547, + 359 + ], + "type": "text", + "content": "basielines. Compared with 3D-CLR, NS-VQA can perform well in the conceptual questions. However, it underperforms 3D-CLR a lot in counting and relation, suggesting that these two types of questions require the holistic 3D understanding of the entire 3D scenes. Our 3D-CLR outperforms other baselines by a large margin, but is still far from satisfying. From the accuracy of the conceptual question, we can see that it can only ground approximately " + }, + { + "bbox": [ + 304, + 239, + 547, + 359 + ], + "type": "inline_equation", + "content": "66\\%" + }, + { + "bbox": [ + 304, + 239, + 547, + 359 + ], + "type": "text", + "content": " of the semantic concepts. This indicates that our 3DMV-VQA dataset is indeed very challenging." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 361, + 547, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 361, + 547, + 492 + ], + "spans": [ + { + "bbox": [ + 304, + 361, + 547, + 492 + ], + "type": "text", + "content": "Qualitative Examples. In Fig. 3, we show four qualitative examples. From the examples, we show that our 3D-CLR can infer an accurate 3D representation from multi-view images, as well as ground semantic concepts on the 3D representations to get the semantic segmentations of the entire scene. Our 3D-CLR can also learn 3D relationships such as \"close\", \"largest\", \"on top of\" and so on. However, 3D-CLR also fails on some questions. For the third scene in the qualitative examples, it fails to ground the concepts \"mouse\" and \"printer\". Also, it cannot accurately count the instances sometimes. We give detailed discussions below." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 502, + 382, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 502, + 382, + 513 + ], + "spans": [ + { + "bbox": [ + 306, + 502, + 382, + 513 + ], + "type": "text", + "content": "5.4. Discussions" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 521, + 547, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 547, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 547, + 640 + ], + "type": "text", + "content": "We perform an in-depth analysis to understand the challenge of this dataset. We leverage the modular design of our 3D-CLR, replacing individual components of the framework with ground-truth annotations for model diagnosis. The result is shown in Fig 4. 3D-CLR w/ Semantic denotes our model with ground-truth semantic concepts from HM3DSem annotations. 3D-CLR w/ Instance denotes that we have ground-truth instance segmentations of semantic concepts. From Fig. 3 and Fig. 4, we summarize several key challenges of our benchmark:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "text", + "content": "Very close object instances From Fig. 4, we can see that even with ground-truth semantic labeling of the 3D points, 3D-CLR still has unsatisfying results on counting questions. This suggests that the instance segmentations provided by DBSCAN are not accurate enough. From the top two qualitative examples in Fig. 3, we can also see that if two chairs" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9208" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 72, + 542, + 360 + ], + "blocks": [ + { + "bbox": [ + 51, + 72, + 542, + 360 + ], + "lines": [ + { + "bbox": [ + 51, + 72, + 542, + 360 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 542, + 360 + ], + "type": "image", + "image_path": "e5f9988ee998345087af1d898b960d9fdfd34ec0911c04da788d53c90dc149f8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 365, + 546, + 388 + ], + "lines": [ + { + "bbox": [ + 46, + 365, + 546, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 365, + 546, + 388 + ], + "type": "text", + "content": "Figure 3. Qualitative examples of our 3D-CLR. We can see that 3D-CLR can ground most of the concepts and answer most questions correctly. However, it still fails sometimes, mainly because it cannot separate close object instances and ground small objects." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 51, + 399, + 282, + 487 + ], + "blocks": [ + { + "bbox": [ + 51, + 399, + 282, + 487 + ], + "lines": [ + { + "bbox": [ + 51, + 399, + 282, + 487 + ], + "spans": [ + { + "bbox": [ + 51, + 399, + 282, + 487 + ], + "type": "image", + "image_path": "5ce1c4d9a9daa672b4fac5b8b44d25fd911caa8af39a62561b7b2db9edf488ea.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 88, + 490, + 246, + 499 + ], + "lines": [ + { + "bbox": [ + 88, + 490, + 246, + 499 + ], + "spans": [ + { + "bbox": [ + 88, + 490, + 246, + 499 + ], + "type": "text", + "content": "Figure 4. Model diagnosis of our 3D-CLR." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 508, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 508, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 508, + 287, + 555 + ], + "type": "text", + "content": "contact each other, DBSCAN will not tell them apart and thus have poor performance on counting. One crucial future direction is to improve unsupervised instance segmentations on very close object instances." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 556, + 289, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 556, + 289, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 556, + 289, + 700 + ], + "type": "text", + "content": "Grounding small objects Fig. 4 suggests that 3D-CLR fails to ground a large portion of the semantic concepts, which hinders the performance. From the last example in Fig. 3, we can see that 3D-CLR fails to ground small objects like \"computer mouse\". Further examination indicates there are two possible reasons: 1) CLIP-LSeg fails to assign the right features to objects with limited pixels; 2) The resolution of feature voxel grid is not high enough and therefore small objects cannot be represented in the compact representation. An interesting future direction would be learning exploration policies that enable the agents to get closer to uncertain objects that cannot be grounded." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 701, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 289, + 714 + ], + "type": "text", + "content": "Ambiguity on 3D relations Even with ground-truth seman" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 399, + 547, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 547, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 547, + 473 + ], + "type": "text", + "content": "tic and instance segmentations, the performance of the relation network still needs to be improved. We find that most of the failure cases are correlated to the \"inside\" relation. From the segmentations in Fig. 3, we can see that 3D-CLR is unable to ground the objects in the cabinets. A potential solution can be joint depth and segmentation predictions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 487, + 378, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 487, + 378, + 500 + ], + "spans": [ + { + "bbox": [ + 306, + 487, + 378, + 500 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 509, + 547, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 509, + 547, + 629 + ], + "spans": [ + { + "bbox": [ + 304, + 509, + 547, + 629 + ], + "type": "text", + "content": "In this paper, we introduce the novel task of 3D reasoning from multi-view images. By placing embodied robot that actively explores indoor environments, we collect a large-scale benchmark named 3DMV-VQA. We also propose a new 3D-CLR model that incorporates neural field, 2D VLM, as well as reasoning operators for this task and illustrate its effectiveness. Finally, we perform an in-depth analysis to understand the challenges of this dataset and also point out potential future directions. We hope that 3DMV-VQA can be used to push the frontiers of 3D reasoning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 641, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 712 + ], + "type": "text", + "content": "Acknowledgements. This work was supported by the MIT-IBM Watson AI Lab, DARPA MCS, DSO grant DSOCO21072, and gift funding from MERL, Cisco, Sony, and Amazon. We would also like to thank the computation support from AiMOS, a server cluster for the IBM Research AI Hardware Center." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "9209" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Panos Achlioptas, Ahmed Abdelreehm, Fei Xia, Mohamed Elhoseiny, and Leonidas J. Guibas. Referit3d: Neural listeners for fine-grained 3d object identification in real-world scenes. In ECCV, 2020. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 190 + ], + "type": "text", + "content": "[2] Daich Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoki Kawanabe. Scanqa: 3d question answering for spatial scene understanding. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19107-19117, 2022. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 194, + 288, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 194, + 288, + 225 + ], + "spans": [ + { + "bbox": [ + 53, + 194, + 288, + 225 + ], + "type": "text", + "content": "[3] Dave Zhenyu Chen, Angel X. Chang, and Matthias Nießner. Scanrefer: 3d object localization in rgb-d scans using natural language. In ECCV, 2020. 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 228, + 288, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 228, + 288, + 271 + ], + "spans": [ + { + "bbox": [ + 53, + 228, + 288, + 271 + ], + "type": "text", + "content": "[4] Dave Zhenyu Chen, Ali Gholami, Matthias Nießner, and Angel X. Chang. Scan2cap: Context-aware dense captioning in rgb-d scans. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3192-3202, 2021. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 273, + 288, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 273, + 288, + 304 + ], + "spans": [ + { + "bbox": [ + 53, + 273, + 288, + 304 + ], + "type": "text", + "content": "[5] Z Chen, L Ma, W Luo, and KKY Wong. Weakly-supervised spatio-temporally grounding natural sentence in video. In ACL, 2019. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 307, + 288, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 307, + 288, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 307, + 288, + 350 + ], + "type": "text", + "content": "[6] Zhenfang Chen, Jiayuan Mao, Jiajun Wu, Kwan-Yee Kenneth Wong, Joshua B Tenenbaum, and Chuang Gan. Grounding physical concepts of objects and events through dynamic visual reasoning. *ICLR*, 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 353, + 288, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 353, + 288, + 385 + ], + "spans": [ + { + "bbox": [ + 53, + 353, + 288, + 385 + ], + "type": "text", + "content": "[7] Zhenfang Chen, Peng Wang, Lin Ma, Kwan-Yee K Wong, and Qi Wu. Cops-ref: A new dataset and task on compositional referring expression comprehension. In CVPR, 2020. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 387, + 288, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 387, + 288, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 387, + 288, + 430 + ], + "type": "text", + "content": "[8] Zhenfang Chen, Kexin Yi, Yunzhu Li, Mingyu Ding, Antonio Torralba, Joshua B Tenenbaum, and Chuang Gan. Comphy: Compositional physical reasoning of objects and events from videos. In ICLR, 2022. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 432, + 288, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 432, + 288, + 485 + ], + "spans": [ + { + "bbox": [ + 53, + 432, + 288, + 485 + ], + "type": "text", + "content": "[9] Zhenfang Chen, Qinhong Zhou, Yikang Shen, Yining Hong, Hao Zhang, and Chuang Gan. See, think, confirm: Interactive prompting between vision and language models for knowledge-based visual reasoning. arXiv preprint arXiv:2301.05226, 2023. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 488, + 288, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 488, + 288, + 520 + ], + "spans": [ + { + "bbox": [ + 48, + 488, + 288, + 520 + ], + "type": "text", + "content": "[10] Spconv Contributors. Spconv: Spatially sparse convolution library. https://github.com/traveller59/spconv, 2022.6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 522, + 288, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 288, + 576 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 288, + 576 + ], + "type": "text", + "content": "[11] Abhishek Das, Samyak Datta, Georgia Gkioxari, Stefan Lee, Devi Parikh, and Dhruv Batra. Embodied question answering. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 2135-213509, 2018. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 578, + 288, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 288, + 622 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 288, + 622 + ], + "type": "text", + "content": "[12] Mingyu Ding, Yan Xu, Zhenfang Chen, David Daniel Cox, Ping Luo, Joshua B Tenenbaum, and Chuang Gan. Embodied concept learner: Self-supervised learning of concepts and mapping through instruction following. In CoRL. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 288, + 667 + ], + "type": "text", + "content": "[13] Yilun Du, M. Katherine Collins, B. Joshua Tenenbaum, and Vincent Sitzmann. Learning signal-agnostic manifolds of neural fields. In Advances in Neural Information Processing Systems, 2021. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "text", + "content": "[14] Yilun Du, Yinan Zhang, Hong-Xing Yu, Joshua B. Tenenbaum, and Jiajun Wu. Neural radiance flow for 4d view synthesis and video processing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021. 3" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[15] Martin Ester, Hans-Peter Kriegel, Jörg Sander, and Xiaowei Xu. A density-based algorithm for discovering clusters in large spatial databases with noise. In KDD, 1996. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 107, + 547, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 547, + 148 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 547, + 148 + ], + "type": "text", + "content": "[16] Yasaman Etesam, Leon Kochiev, and Angel X Chang. 3dvqa: Visual question answering for 3d environments. In 2022 19th Conference on Robots and Vision (CRV), pages 233-240. IEEE, 2022. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 150, + 547, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 150, + 547, + 215 + ], + "spans": [ + { + "bbox": [ + 307, + 150, + 547, + 215 + ], + "type": "text", + "content": "[17] Mingtao Feng, Zhen Li, Qi Li, Liang Zhang, Xiangdong Zhang, Guangming Zhu, Hui Zhang, Yaonan Wang, and Ajmal S. Mian. Free-form description guided 3d visual graph network for object grounding in point cloud. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 3702-3711, 2021. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 217, + 547, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 217, + 547, + 259 + ], + "spans": [ + { + "bbox": [ + 307, + 217, + 547, + 259 + ], + "type": "text", + "content": "[18] Chuang Gan, Yandong Li, Haoxiang Li, Chen Sun, and Boqing Gong. Vqs: Linking segmentations to questions and answers for supervised attention in vqa and question-focused semantic segmentation. In ICCV, pages 1811-1820, 2017. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 261, + 547, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 261, + 547, + 304 + ], + "spans": [ + { + "bbox": [ + 307, + 261, + 547, + 304 + ], + "type": "text", + "content": "[19] Siddha Ganju, Olga Russakovsky, and Abhinav Kumar Gupta. What's in a question: Using visual questions as a form of supervision. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6422-6431, 2017. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 304, + 545, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 304, + 545, + 337 + ], + "spans": [ + { + "bbox": [ + 307, + 304, + 545, + 337 + ], + "type": "text", + "content": "[20] Xiaofeng Gao, Qiaozi Gao, Ran Gong, Kaixiang Lin, Govind Thattai, and Gaurav S Sukhatme. Dialfred: Dialogue-enabled agents for embodied instruction following. arXiv, 2022. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 338, + 547, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 338, + 547, + 392 + ], + "spans": [ + { + "bbox": [ + 307, + 338, + 547, + 392 + ], + "type": "text", + "content": "[21] Stephan J. Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien P. C. Valentin. Fastnerf: High-fidelity neural rendering at 200fps. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 14326-14335, 2021. 3, 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 393, + 547, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 393, + 547, + 448 + ], + "spans": [ + { + "bbox": [ + 307, + 393, + 547, + 448 + ], + "type": "text", + "content": "[22] Daniel Gordon, Aniruddha Kembhavi, Mohammad Rastegari, Joseph Redmon, Dieter Fox, and Ali Farhadi. Iqa: Visual question answering in interactive environments. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4089-4098, 2018. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 449, + 547, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 449, + 547, + 502 + ], + "spans": [ + { + "bbox": [ + 307, + 449, + 547, + 502 + ], + "type": "text", + "content": "[23] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6325-6334, 2017. 2, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 504, + 547, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 504, + 547, + 556 + ], + "spans": [ + { + "bbox": [ + 307, + 504, + 547, + 556 + ], + "type": "text", + "content": "[24] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul E. Debevec. Baking neural radiance fields for real-time view synthesis. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5855-5864, 2021. 3, 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 559, + 545, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 559, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 307, + 559, + 545, + 580 + ], + "type": "text", + "content": "[25] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural Computation, 9:1735-1780, 1997. 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 582, + 547, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 582, + 547, + 613 + ], + "spans": [ + { + "bbox": [ + 307, + 582, + 547, + 613 + ], + "type": "text", + "content": "[26] Yining Hong, Yilun Du, Chunru Lin, Joshua B Tenenbaum, and Chuang Gan. 3d concept grounding on neural fields. arXiv preprint arXiv:2207.06403, 2022. 3, 4, 5, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 614, + 547, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 614, + 547, + 646 + ], + "spans": [ + { + "bbox": [ + 307, + 614, + 547, + 646 + ], + "type": "text", + "content": "[27] Yining Hong, Li Yi, Joshua B. Tenenbaum, Antonio Torralba, and Chuang Gan.Ptr: A benchmark for part-based conceptual, relational, and physical reasoning. In NeurIPS, 2021. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 647, + 545, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 680 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 680 + ], + "type": "text", + "content": "[28] Deng Huang, Peihao Chen, Runhao Zeng, Qing Du, Mingkui Tan, and Chuang Gan. Location-aware graph convolutional networks for video question answering. In AAAI, 2020. 3, 7" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 681, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 547, + 713 + ], + "type": "text", + "content": "[29] Pin-Hao Huang, Han-Hung Lee, Hwann-Tzong Chen, and Tyng-Luh Liu. Text-guided graph neural networks for referring 3d instance segmentation. In AAAI, 2021. 3" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "9210" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 95 + ], + "type": "text", + "content": "[30] D. A. Hudson and Christopher D. Manning. Compositional attention networks for machine reasoning. *ICLR*, 2018. 3, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 97, + 288, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 97, + 288, + 151 + ], + "spans": [ + { + "bbox": [ + 48, + 97, + 288, + 151 + ], + "type": "text", + "content": "[31] Ajay Jain, Ben Mildenhall, Jonathan T. Barron, P. Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 857-866, 2022. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 153, + 288, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 153, + 288, + 196 + ], + "spans": [ + { + "bbox": [ + 48, + 153, + 288, + 196 + ], + "type": "text", + "content": "[32] Zhenyu Jiang, Yifeng Zhu, Maxwell Svetlik, Kuan Fang, and Yuke Zhu. Synergies between affordance and geometry: 6-dof grasp detection via implicit representations. ArXiv, abs/2104.01542, 2021. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 199, + 288, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 199, + 288, + 255 + ], + "spans": [ + { + "bbox": [ + 48, + 199, + 288, + 255 + ], + "type": "text", + "content": "[33] J. Johnson, Bharath Hariharan, L. V. D. Maaten, Li Fei-Fei, C. L. Zitnick, and Ross B. Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1988-1997, 2017. 2, 3, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 256, + 288, + 299 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 256, + 288, + 299 + ], + "spans": [ + { + "bbox": [ + 48, + 256, + 288, + 299 + ], + "type": "text", + "content": "[34] Stefan Kollmannsberger, Davide D'Angella, Moritz Jokeit, and Leon Alexander Herrmann. Physics-informed neural networks. Deep Learning in Computational Mechanics, 2021. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 301, + 287, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 301, + 287, + 323 + ], + "spans": [ + { + "bbox": [ + 48, + 301, + 287, + 323 + ], + "type": "text", + "content": "[35] Natalia Konstantinova and Constantin Orasan. Interactive question answering. In EMNLP. IGI Global, 2013. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 326, + 287, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 326, + 287, + 358 + ], + "spans": [ + { + "bbox": [ + 48, + 326, + 287, + 358 + ], + "type": "text", + "content": "[36] Barbara Landau and Ray Jackendoff. “what” and “where” in spatial language and spatial cognition. Behavioral and Brain Sciences, 16:217-238, 1993. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 360, + 287, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 360, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 360, + 287, + 392 + ], + "type": "text", + "content": "[37] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and René Ranftl. Language-driven semantic segmentation. *ICLR*, 2022, 2, 4, 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 395, + 287, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 395, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 48, + 395, + 287, + 449 + ], + "type": "text", + "content": "[38] Dongxu Li, Junnan Li, Hongdong Li, Juan Carlos Niebles, and Steven C. H. Hoi. Align and prompt: Video-and-language pre-training with entity prompts. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4943-4953, 2022. 3, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 452, + 287, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 452, + 287, + 484 + ], + "spans": [ + { + "bbox": [ + 48, + 452, + 287, + 484 + ], + "type": "text", + "content": "[39] Andrew Luo, Yilun Du, Michael J Tarr, Joshua B Tenenbaum, Antonio Torralba, and Chuang Gan. Learning neural acoustic fields. arXiv preprint arXiv:2204.00628, 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 487, + 288, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 487, + 288, + 529 + ], + "spans": [ + { + "bbox": [ + 48, + 487, + 288, + 529 + ], + "type": "text", + "content": "[40] Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B. Tenenbaum, and Jiajun Wu. The neuro-symbolic concept learner: Interpreting scenes words and sentences from natural supervision. ArXiv, abs/1904.12584, 2019. 3, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 533, + 287, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 533, + 287, + 554 + ], + "spans": [ + { + "bbox": [ + 48, + 533, + 287, + 554 + ], + "type": "text", + "content": "[41] Nelson L. Max. Optical models for direct volume rendering. IEEE Trans. Vis. Comput. Graph., 1:99-108, 1995. 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 556, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 556, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 48, + 556, + 287, + 610 + ], + "type": "text", + "content": "[42] Lars M. Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4455-4465, 2019. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 613, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 656 + ], + "type": "text", + "content": "[43] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proc. ECCV, 2020. 3, 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[44] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Occupancy flow: 4d reconstruction by learning particle dynamics. In Proceedings of the IEEE International Conference on Computer Vision, pages 5379-5389, 2019. 3" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "text", + "content": "[45] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. CVPR, 2020. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 118, + 547, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 547, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 547, + 173 + ], + "type": "text", + "content": "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 175, + 547, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 547, + 249 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 547, + 249 + ], + "type": "text", + "content": "[47] Santhosh K. Ramakrishnan, Aaron Gokaslan, Erik Wijmans, Oleksandr Maksymets, Alexander Clegg, John Turner, Eric Undersander, Wojciech Galuba, Andrew Westbury, Angel X. Chang, Manolis Savva, Yili Zhao, and Dhruv Batra. Habitatmatterport 3d dataset (hm3d): 1000 large-scale 3d environments for embodied ai. ArXiv, abs/2109.08238, 2021. 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 252, + 547, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 252, + 547, + 295 + ], + "spans": [ + { + "bbox": [ + 308, + 252, + 547, + 295 + ], + "type": "text", + "content": "[48] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. ArXiv, abs/2102.12092, 2021.5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 297, + 547, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 297, + 547, + 341 + ], + "spans": [ + { + "bbox": [ + 308, + 297, + 547, + 341 + ], + "type": "text", + "content": "[49] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proc. ICCV, pages 2304-2314, 2019. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 342, + 547, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 342, + 547, + 407 + ], + "spans": [ + { + "bbox": [ + 308, + 342, + 547, + 407 + ], + "type": "text", + "content": "[50] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, Devi Parikh, and Dhruv Batra. Habitat: A Platform for Embodied AI Research. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 410, + 547, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 410, + 547, + 453 + ], + "spans": [ + { + "bbox": [ + 308, + 410, + 547, + 453 + ], + "type": "text", + "content": "[51] Mohit Shridhar, Jesse Thomason, Daniel Gordon, Yonatan Bisk, Winson Han, Roozbeh Mottaghi, Luke Zettlemoyer, and Dieter Fox. Alfred: A benchmark for interpreting grounded instructions for everyday tasks. In CVPR, 2020. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 455, + 547, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 455, + 547, + 509 + ], + "spans": [ + { + "bbox": [ + 308, + 455, + 547, + 509 + ], + "type": "text", + "content": "[52] Anthony Simeonov, Yilun Du, Andrea Tagliasacchi, Joshua B Tenenbaum, Alberto Rodriguez, Pulkit Agrawal, and Vincent Sitzmann. Neural descriptor fields: Se (3)-equivariant object representations for manipulation. arXiv preprint arXiv:2112.05124, 2021. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 511, + 547, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 511, + 547, + 553 + ], + "spans": [ + { + "bbox": [ + 308, + 511, + 547, + 553 + ], + "type": "text", + "content": "[53] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In Proc. NeurIPS 2019, 2019. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 556, + 547, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 556, + 547, + 599 + ], + "spans": [ + { + "bbox": [ + 308, + 556, + 547, + 599 + ], + "type": "text", + "content": "[54] Chan Hee Song, Jihyung Kil, Tai-Yu Pan, Brian M Sadler, Wei-Lun Chao, and Yu Su. One step at a time: Long-horizon vision-and-language navigation with milestones. arXiv preprint arXiv:2202.07028, 2022.3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 601, + 547, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 601, + 547, + 643 + ], + "spans": [ + { + "bbox": [ + 308, + 601, + 547, + 643 + ], + "type": "text", + "content": "[55] Elizabeth S Spelke, Karen Breinlinger, Kristen Jacobson, and Ann Phillips. Gestalt Relations and Object Perception: A Developmental Study. Perception, 22(12):1483-1501, 1993. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 647, + 547, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 547, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 547, + 689 + ], + "type": "text", + "content": "[56] Alessandro Suglia, Qiaozi Gao, Jesse Thomason, Govind Thattai, and Gaurav Sukhatme. Embodied bert: A transformer model for embodied, language-guided visual task completion. arXiv preprint arXiv:2108.04927, 2021. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 692, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 692, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 692, + 547, + 713 + ], + "type": "text", + "content": "[57] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "9211" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 677 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 288, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 288, + 105 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 288, + 105 + ], + "type": "text", + "content": "reconstruction. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5449-5459, 2022. 2, 3, 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 288, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 288, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 288, + 194 + ], + "type": "text", + "content": "[58] Andrew Szot, Alex Clegg, Eric Undersander, Erik Wijmans, Yili Zhao, John Turner, Noah Maestre, Mustafa Mukadam, Devendra Chaplot, Oleksandr Maksymets, Aaron Gokaslan, Vladimir Vondrus, Sameer Dharur, Franziska Meier, Wojciech Galuba, Angel Chang, Zsolt Kira, Vladlen Koltun, Jitendra Malik, Manolis Savva, and Dhruv Batra. Habitat 2.0: Training home assistants to rearrange their habitat. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 196, + 288, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 196, + 288, + 228 + ], + "spans": [ + { + "bbox": [ + 48, + 196, + 288, + 228 + ], + "type": "text", + "content": "[59] Ivan Vendrov, Ryan Kiros, Sanja Fidler, and Raquel Urtasun. Order-embeddings of images and language. CoRR, abs/1511.06361, 2016. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 230, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 230, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 230, + 288, + 262 + ], + "type": "text", + "content": "[60] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. ArXiv, abs/2112.05139, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 263, + 288, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 263, + 288, + 317 + ], + "spans": [ + { + "bbox": [ + 48, + 263, + 288, + 317 + ], + "type": "text", + "content": "[61] Karmesh Yadav, Ram Ramrakhya, Santhosh Kumar Ramakrishnan, Theo Gervet, John Turner, Aaron Gokaslan, Noah Maestre, Angel Xuan Chang, Dhruv Batra, Manolis Savva, et al. Habitat-matterport 3d semantics dataset. arXiv preprint arXiv:2210.05633, 2022. 2, 3, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 319, + 288, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 319, + 288, + 373 + ], + "spans": [ + { + "bbox": [ + 48, + 319, + 288, + 373 + ], + "type": "text", + "content": "[62] Xu Yan, Zhihao Yuan, Yuhao Du, Yinghong Liao, Yao Guo, Zhen Li, and Shuguang Cui. Clevr3d: Compositional language and elementary visual reasoning for question answering in 3d real-world scenes. arXiv preprint arXiv:2112.11691, 2021. 2, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 375, + 288, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 375, + 288, + 418 + ], + "spans": [ + { + "bbox": [ + 48, + 375, + 288, + 418 + ], + "type": "text", + "content": "[63] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Basri Ronen, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. Proc. NeurIPS, 2020. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 420, + 288, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 420, + 288, + 441 + ], + "spans": [ + { + "bbox": [ + 48, + 420, + 288, + 441 + ], + "type": "text", + "content": "[64] Shuquan Ye, Dongdong Chen, Songfang Han, and Jing Liao. 3d question answering. ArXiv, abs/2112.08359, 2021. 2, 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 443, + 288, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 443, + 288, + 486 + ], + "spans": [ + { + "bbox": [ + 48, + 443, + 288, + 486 + ], + "type": "text", + "content": "[65] Kexin Yi, Jiajun Wu, Chuang Gan, Antonio Torralba, Pushmeet Kohli, and Joshua B. Tenenbaum. Neural-symbolic vqa: Disentangling reasoning from vision and language understanding. In NeurIPS, 2018. 3, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 487, + 288, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 487, + 288, + 540 + ], + "spans": [ + { + "bbox": [ + 48, + 487, + 288, + 540 + ], + "type": "text", + "content": "[66] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoptrees for real-time rendering of neural radiance fields. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5732-5741, 2021. 3, 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 544, + 288, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 288, + 576 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 288, + 576 + ], + "type": "text", + "content": "[67] Licheng Yu, Xinlei Chen, Georgia Gkioxari, Mohit Bansal, Tamara L Berg, and Dhruv Batra. Multi-target embodied question answering. In CVPR, pages 6309-6318, 2019. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 578, + 288, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 288, + 631 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 288, + 631 + ], + "type": "text", + "content": "[68] Kaizhi Zheng, Xiaotong Chen, Odest Chadwicke Jenkins, and Xin Eric Wang. Vlmbench: A compositional benchmark for vision-and-language manipulation. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, 2022. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 633, + 288, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 633, + 288, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 633, + 288, + 677 + ], + "type": "text", + "content": "[69] Yuke Zhu, O. Groth, Michael S. Bernstein, and Li Fei-Fei. Visual7w: Grounded question answering in images. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4995-5004, 2016. 2, 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "9212" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D GAN Inversion With Facial Symmetry Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_content_list.json b/2023/3D GAN Inversion With Facial Symmetry Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..543197f8c9039e55107de5f1f56c3abb29909515 --- /dev/null +++ b/2023/3D GAN Inversion With Facial Symmetry Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_content_list.json @@ -0,0 +1,1761 @@ +[ + { + "type": "text", + "text": "3D GAN Inversion with Facial Symmetry Prior", + "text_level": 1, + "bbox": [ + 245, + 130, + 723, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fei Yin $^{1}$ , Yong Zhang $^{2\\dagger}$ , Xuan Wang $^{3}$ , Tengfei Wang $^{4}$ , Xiaoyu Li $^{2}$ , Yuan Gong $^{1}$ , Yanbo Fan $^{2}$ , Xiaodong Cun $^{2}$ , Ying Shan $^{2}$ , Cengiz Öztireli $^{5}$ , Yujiu Yang $^{1\\dagger}$ , Shenzhen International Graduate School, Tsinghua University \n $^{2}$ Tencent AI Lab $^{3}$ Ant Group $^{4}$ HKUST $^{5}$ University of Cambridge", + "bbox": [ + 171, + 179, + 797, + 253 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 286, + 313, + 303 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, a surge of high-quality 3D-aware GANs have been proposed, which leverage the generative power of neural rendering. It is natural to associate 3D GANs with GAN inversion methods to project a real image into the generator's latent space, allowing free-view consistent synthesis and editing, referred as 3D GAN inversion. Although with the facial prior preserved in pre-trained 3D GANs, reconstructing a 3D portrait with only one monocular image is still an ill-posed problem. The straightforward application of 2D GAN inversion methods focuses on texture similarity only while ignoring the correctness of 3D geometry shapes. It may raise geometry collapse effects, especially when reconstructing a side face under an extreme pose. Besides, the synthetic results in novel views are prone to be blurry. In this work, we propose a novel method to promote 3D GAN inversion by introducing facial symmetry prior. We design a pipeline and constraints to make full use of the pseudo auxiliary view obtained via image flipping, which helps obtain a view-consistent and well-structured geometry shape during the inversion process. To enhance texture fidelity in unobserved viewpoints, pseudo labels from depth-guided 3D warping can provide extra supervision. We design constraints to filter out conflict areas for optimization in asymmetric situations. Comprehensive quantitative and qualitative evaluations on image reconstruction and editing demonstrate the superiority of our method.", + "bbox": [ + 76, + 319, + 473, + 713 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 78, + 744, + 209, + 760 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent 3D-aware generative adversarial networks (3D GANs) have seen immense progress. By incorporating a neural rendering engine into the generator network architecture, 3D GANs can synthesize view-consistent images. To increase the generation resolution, existing methods [5,12,25,30,31,36-38,41] boost the 3D inductive bias", + "bbox": [ + 75, + 770, + 468, + 863 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9260a43193a7b1a051371d2fff12dabdcb84fd7ca87930b2bc075b9a6bd50a9b.jpg", + "image_caption": [ + "Figure 1. Visual examples of our inversion method. Direct applying 2D GAN inversion methods (PTI [28]) to the 3D GAN suffers from inaccurate geometry in novel views. Our method excels in synthesizing consistent geometry and high-fidelity texture in different views, even reconstructing a face under an extreme pose." + ], + "image_footnote": [], + "bbox": [ + 501, + 284, + 890, + 599 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "with an additional 2D CNN-based upsampler or an efficient 3D representation modeling method. With tremendous effort, 3D GANs can produce photorealistic images while enforcing strong 3D consistency across different views.", + "bbox": [ + 496, + 688, + 892, + 748 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We are interested in the task of reconstructing a human face with 3D geometry and texture given only one monocular image. It is an ill-posed problem and close to the harsh condition of real scenarios. With the power of 3D GANs, it seems achievable via projecting a target image onto the manifold of a pre-trained generator. The process is referred as 3D GAN inversion. A straightforward path is to follow the 2D GAN inversion method [28], i.e., optimizing the latent code and the network parameters of the generator to overfit the specific portrait.", + "bbox": [ + 496, + 750, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Work done during an internship at Tencent AI Lab.", + "bbox": [ + 96, + 875, + 370, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding Author.", + "bbox": [ + 96, + 887, + 225, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "342", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, since the ground truth 3D geometry is absent given one monocular image, the inversion result is far from satisfactory. The process of fitting a 3D GAN to one image would sacrifice geometric correctness in order to make the synthetic texture as close as possible to the input, even destroying the original semantic-rich latent space. As the optimization process goes, the face geometry tends to degenerate into a flattened shape, due to the absence of geometry supervision, e.g., images from other views. Besides, there exist quality issues in texture synthesis under novel views. The rendered images of unseen views tend to be blurry and inconsistent with the original image, especially when reconstructing a side face under an extreme pose. Because there is no texture supervision for unseen views given only one monocular image. The failure cases of directly applying [28] are illustrated in Fig. 1.", + "bbox": [ + 76, + 90, + 472, + 332 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, to alleviate the issue caused by missing geometry and texture supervision under multiple views, we propose a novel 3D GAN inversion approach by taking full advantage of facial symmetry prior to construct pseudo supervision of different views. Intuitively, we note that human faces are almost symmetric. Assuming the given portrait is symmetric, we can obtain an additional perspective of the portrait by simply mirroring the image. The images of two distinct views can provide geometric relations between the 3D points and their 2D projections based on epipolar geometry. Motivated by this, we seek to leverage facial symmetry as the geometric prior constraining the inversion. The symmetry prior is also employed in a traditional 3D reconstruction work [35]. We leverage the mirrored image as extra supervision of another view when performing the inversion, which prevents the geometry collapse. A rough geometry can be obtained by the inversion with the original and mirror images.", + "bbox": [ + 76, + 332, + 472, + 604 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To further enhance texture quality and geometry in novel views, we employ depth-guided 3D warping to generate the pseudo images of the views surrounding the input and symmetric camera pose. The depth is inferred from the rough 3D volume. The original image along with the pseudo images are used to fine-tune the generator's parameters for the joint promotion of texture and geometry. To prevent the optimized geometry from deviating too much from the rough geometry, we design a geometry regularization term as a constraint. However, human faces are never fully symmetric in practice, neither in shape nor appearance. Therefore, we design several constraints to extract meaningful information adaptively from the mirror image without compromising the original reconstruction quality.", + "bbox": [ + 76, + 604, + 472, + 815 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our main contributions are as follows:", + "bbox": [ + 96, + 816, + 354, + 830 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We propose a novel 3D GAN inversion method by incorporating facial symmetry prior. It enables a high-quality reconstruction while preserving the multi-view consistency in geometry and texture.", + "bbox": [ + 94, + 839, + 468, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We conduct comprehensive experiments to demonstrate the effectiveness of our method and compare it with many state-of-the-art inversion methods. We also apply our method to various downstream applications.", + "bbox": [ + 517, + 90, + 890, + 152 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 183, + 640, + 199 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. 3D-Aware GANs", + "text_level": 1, + "bbox": [ + 500, + 212, + 665, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, neural scene representations have incorporated 3D prior into image synthesis with explicit camera control. Inspired by the success of Neural Radiance Fields (NeRF) [22], [6,24] employ implicit volumetric neural rendering structure for consistent novel view synthesis, required only unconstrained monocular images training. To overcome the computational cost and lift the generation resolution, the following methods adopt a two-stage rendering process [5, 12, 21, 25, 30, 31, 37, 38, 41, 42]. Since 2D upsamplers may introduce view-inconsistent artifacts, NeRF path regularization [12] and dual discriminators [5] are proposed. Different 3D modeling representations are further designed for scalable and fast rendering. EG3D [5] introduces tri-plane representation, and GRAM-HD [36] proposes to render radiance manifolds first for efficient sampling. Boosting with the powerful high-fidelity unconditioned 3D GANs, we can achieve real image 3D reconstruction and editing. Specifically, we select the state-of-the-art EG3D [5] as our backbone.", + "bbox": [ + 496, + 241, + 893, + 527 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. GAN Inversion", + "text_level": 1, + "bbox": [ + 500, + 550, + 653, + 566 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To edit a real image [29, 39], GAN inversion is applied first to discover a corresponding latent code from which the generator can synthesize the real image. Existing 2D GAN inversion approaches can be categorized into optimization-based, learning-based, and hybrid methods. [1, 16] directly minimize the reconstruction distance via optimizing the latent codes. Learning-based methods [2, 3, 32, 34] exploit a general encoder network to map the input image into latent space in real-time. Hybrid methods would apply the latent code predicted from the encoder as initialization in the later optimization process. Beyond the original inversion latent space, PTI [28] further optimizes the parameters of the generator to enhance the visual fidelity.", + "bbox": [ + 496, + 578, + 893, + 776 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As for the 3D GAN inversion task, most methods directly transfer the 2D methods, e.g., PTI [28] and e4e [32], which may suffer from the poor results in novel views. Pix2NeRF [4] introduced a joint distillation strategy for training a 3D inversion encoder. A concurrent work [18] proposes to perform camera pose optimization simultaneously to ensure view consistency. However, none of the above methods take geometry shape into consideration.", + "bbox": [ + 496, + 779, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "343", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d75a1f0e84d6476ff667bcfbebf14831b191b283b1dc5e67fdd592cbabc96bd2.jpg", + "image_caption": [ + "Figure 2. The proposed framework. A) Our method first performs inversion with the help of the symmetry view to achieve the latent code $w^{+}$ with a roughly correct geometry. B) The original image and the mirror one, along with adjacent warping pseudos, are used for joint optimization to enhance the geometry and texture of rendered images in novel views. C) Depth-guided 3D warping are used to generate pseudo images in novel views to provide extra supervision. Unfaithful regions are filtered out with the authentic mask." + ], + "image_footnote": [], + "bbox": [ + 76, + 88, + 321, + 275 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/962dca2b8aea70d9409e2bf2c19d3023a6a0d158b6e429885d6d85077916bff9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 88, + 630, + 275 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/617c2fdcc313ba70621dcd2ad8b8b0e39aaf1a3d8a6e574efb070d9c5e7b9ead.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 88, + 887, + 273 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Few-shot NeRF", + "text_level": 1, + "bbox": [ + 76, + 356, + 230, + 372 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Few-shot NeRF aims at reconstructing general 3D scenarios where only a few observed views are available, which shares a similar setting with 3D GAN inversion. MVS-NeRF [7] leverages plane-swept cost volumes in multi-view stereo for geometry-aware scene reasoning to improve performance. DietNeRF [13] enforces semantic consistency between rendered images from unseen view and seen images via a CLIP encoder [27]. RegNeRF [23] regularizes the texture of patches rendered from unobserved viewpoints without relying on additional training modules. Since it is hard to find a common prior for general scenes, these methods investigate how to ensure the geometry consistency of different views, which gives us inspiration.", + "bbox": [ + 75, + 380, + 473, + 578 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Definition of 3D GAN Inversion", + "text_level": 1, + "bbox": [ + 76, + 589, + 366, + 606 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Similar to 2D GAN inversion, 3D GAN inversion aims to project an input image $I$ onto the manifold of a pretrained unconditional 3D GAN model $G_{\\mathrm{3D}}(\\cdot ;\\theta)$ parameterized by weight $\\theta$ . After inversion, $G_{\\mathrm{3D}}$ can reconstruct the image faithfully given the corresponding camera pose, synthesize content-consistent images in novel views, and facilitate downstream tasks like face editing. One formulation of the 3D GAN inversion problem is defined as follows:", + "bbox": [ + 75, + 614, + 468, + 737 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nw ^ {*} = \\underset {w} {\\arg \\max } = \\mathcal {L} \\left(G _ {3 D} (w, \\pi ; \\theta), I\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 750, + 468, + 773 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $w$ is the latent representation in $\\mathcal{W}^+$ space and $\\pi$ is the corresponding camera matrix of input image. The loss function $\\mathcal{L}(\\cdot, \\cdot)$ is usually defined as pixel-wise reconstruction loss or perceptual loss. In our settings, camera matrix $\\pi$ is known, which is extracted by a pre-trained detector [9]. This formulation cares about the $\\mathcal{W}^+$ space. However, the inversion in the $\\mathcal{W}^+$ space is always not enough to capture local facial details, resulting in inaccurate reconstruction.", + "bbox": [ + 75, + 779, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Following the recent optimization-based 2D GAN inversion method [28], we perform the inversion in the extended latent space for more accurate reconstruction, i.e., the combination of the $\\mathcal{W}^{+}$ space and the parameter space. The formulation is defined as:", + "bbox": [ + 496, + 357, + 890, + 433 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nw ^ {*}, \\theta^ {*} = \\underset {w, \\theta} {\\arg \\max } = \\mathcal {L} \\left(G _ {3 D} (w, \\pi ; \\theta), I\\right). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 439, + 890, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Note that $w$ and $\\theta$ are optimized alternatively, i.e., $w$ is optimized using Eq. (1) first and then $\\theta$ is optimized with the fixed $w^{*}$ .", + "bbox": [ + 496, + 470, + 890, + 515 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. The Proposed Approach", + "text_level": 1, + "bbox": [ + 498, + 527, + 728, + 545 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our goal is to reconstruct a human face through a pretrained 3D GAN given a single monocular image. The reconstruction is supposed to preserve authentic appearance texture and geometry shape in novel views. Due to the limited information about geometry and texture from a single image, overfitting a single view tends to be trapped in geometry collapse, get the blurry texture and miss details in unseen views, especially when reconstructing a side face under an extreme pose. To overcome the issue of lacking information about other views, we introduce facial symmetry prior to promote inversion. We propose a two-stage inversion pipeline, i.e., inversion for rough geometry and joint optimization of geometry and texture. In the first stage, we obtain a rough geometry by optimizing the latent code $w$ using the original and mirror images in Sec. 4.1. In the second stage, we refine the geometry and texture by optimizing the parameter $\\theta$ with the depth-guided 3D warping and a set of designed constraints in Sec 4.2. An overview of our method is shown in Fig. 2.", + "bbox": [ + 496, + 553, + 892, + 840 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1. Inversion with Symmetry for Rough Geometry", + "text_level": 1, + "bbox": [ + 498, + 847, + 890, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The purpose of this stage is to learn a rough geometry as a pivot for further tuning. To compensate for the missing", + "bbox": [ + 498, + 869, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "344", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0f5db3830d58f25681435aa4f8ed08732d28a412480e407a1a989c5cef01d562.jpg", + "image_caption": [ + "Figure 3. Visualization of warped pseudos. The red bounding box contains the range of employed pseudos, depending on the yaw angle of the input image. A frontal face can be warped by a wider range of yaw angles than a side face to get authentic pseudos." + ], + "image_footnote": [], + "bbox": [ + 78, + 89, + 890, + 186 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/42c87874b858d5fe2f82d16dea9111ba8e70fe1b34c608585d3396830611401c.jpg", + "image_caption": [ + "Source Image", + "Figure 4. Visualization of authentic mask and warped pseudo." + ], + "image_footnote": [], + "bbox": [ + 78, + 239, + 176, + 315 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/be838299f22ea3da41917e8e21bad9d732202582cb08a69961d057302938841a.jpg", + "image_caption": [ + "Warped Image" + ], + "image_footnote": [], + "bbox": [ + 176, + 239, + 272, + 315 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/32e55225f4c19c8ebfad56bc936e8466d3b4936d4665531f0c1b819be3ca68f4.jpg", + "image_caption": [ + "Authentic Mask" + ], + "image_footnote": [], + "bbox": [ + 274, + 239, + 370, + 315 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/c00774424ba472ae986435bbc55c6eddacfd8e638237193878207153cf037ce7.jpg", + "image_caption": [ + "Pseudo" + ], + "image_footnote": [], + "bbox": [ + 372, + 239, + 467, + 315 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "information of unseen views, we resort to facial symmetry prior, i.e., the left face is almost the same as the right one. We simply flip the input image $I_{s}$ horizontally to get the mirror image $I_{m}$ whose corresponding camera pose $\\pi_{m}$ can be calculated by multiplying a fixed matrix by the camera extrinsic parameters of $\\pi_{s}$ . The intrinsic parameters are unchanged. The mirror image serves as the pseudo-projected image under a novel view.", + "bbox": [ + 75, + 372, + 467, + 492 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Since human faces are not always perfectly symmetric, the mirror image is just an approximation under the novel view. There exists inconsistent content between the original image and the mirror one if they have an overlapping face region, i.e., different colors in the position, referred as conflict content. The inversion should depend more on the original image and take partial useful information from the mirror one. Furthermore, we observe that a frontal face can provide more effective information than a side face. A nearly frontal face provides plenty of facial information, and we should trust less on its mirror image to avoid conflict in the overlapping region. While a side face provides information for only half one face, it has only a small overlapping conflict region with its mirror image. Hence, we should trust more on the mirror image. We exploit an adaptive weighting strategy for the importance of the mirror image according to its yaw angle $\\alpha_{\\mathrm{yaw}}$ . We use a Gaussian function with respect to $\\alpha_{\\mathrm{yaw}}$ to approximate the importance of different views. The weight $\\lambda_{m}$ of the mirror image is defined as:", + "bbox": [ + 75, + 493, + 468, + 781 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {E} (x) = \\frac {1}{\\sigma \\sqrt {2 \\pi}} e ^ {- \\frac {(x - \\mu) ^ {2}}{2 \\sigma^ {2}}}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 801, + 468, + 834 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {m} = \\left\\{ \\begin{array}{l l} 1 - \\mathcal {E} \\left(\\alpha_ {\\text {y a w}}\\right), & \\text {i f} \\mathcal {E} \\left(\\alpha_ {\\text {y a w}}\\right) \\leq k; \\\\ 0, & \\text {i f} \\mathcal {E} \\left(\\alpha_ {\\text {y a w}}\\right) > k; \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 835, + 468, + 876 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\sigma, \\mu$ and $k$ are hyper-parameters. As a nearly frontal", + "bbox": [ + 76, + 885, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "mirror face can compensate for very limited extra information for the original image, its weight $\\lambda_{m}$ is clamped to 0.", + "bbox": [ + 498, + 243, + 890, + 273 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To optimize the latent code in $\\mathcal{W}^+$ space, the Perceptual loss [40] is used to minimize the distance between the generated results and the original and mirror images. Following [17, 28], a noise regularization term $\\mathcal{L}_n(n)$ is employed to prevent the noise vector from containing vital information. The objective in this stage is defined as follows:", + "bbox": [ + 496, + 273, + 890, + 364 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {i n v}} = \\mathcal {L} _ {\\mathrm {L P I P S}} \\left(G _ {3 \\mathrm {D}} \\left(w, \\pi_ {s}; \\theta\\right), I _ {s}\\right) + \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 527, + 369, + 890, + 393 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {m} \\mathcal {L} _ {\\text {L P I P S}} \\left(G _ {3 \\mathrm {D}} \\left(w, \\pi_ {m}; \\theta\\right), I _ {m}\\right) + \\lambda_ {n} \\mathcal {L} _ {n} (n),\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 390, + 841, + 405 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $n$ is the noise vector and $\\lambda_{n}$ is a trade-off parameter. The generator is kept frozen at this stage. Visual illustrations in Fig. 8 show that the geometry can be greatly improved with the facial symmetry prior.", + "bbox": [ + 496, + 410, + 890, + 470 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Joint Optimization of Geometry and Texture", + "text_level": 1, + "bbox": [ + 498, + 478, + 879, + 494 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Though we obtain the rough geometry via the optimization of $w$ in the first stage, there is a distinct gap between the texture of the rendered face and that of the original one, even under the same camera pose. The rendered face shares a similar face geometry with the original one, but it becomes a different identity. In this stage, we optimize the generator's parameters $\\theta$ to bridge the texture gap for identity preservation and refine the rough geometry as well. We design a geometry regularization constraint to avoid the model degrading to generate flattened geometry. Moreover, we construct a set of pseudo images in different views to provide supervision via depth-guided 3D warping.", + "bbox": [ + 496, + 501, + 890, + 681 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Geometry Regularization. We observe that optimizing the generator without any constraint on the geometry will cause the deviation of the geometry from the rough one, resulting in a flattened geometry similar to the case of inversion with a single image. To avoid the geometry drift during overfitting the texture, we regularize the optimized density obtained from the 3D volume of 3D GAN to be similar to that from the rough volume obtained in the first stage. Specifically, with the fixed $w$ , we generate depth maps $D$ from 3D GAN under different sampled views and calculate $\\mathcal{L}_2$ distance between them with the corresponding depth maps $D_0$ generated from the un-tuned generator in the first stage:", + "bbox": [ + 496, + 685, + 890, + 866 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {d e p t h}} = \\sum_ {i \\in \\mathbb {S}} \\| D ^ {i} - D _ {0} ^ {i} \\| _ {2}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 871, + 890, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "345", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbb{S}$ is the sampled camera pose set.", + "bbox": [ + 76, + 90, + 344, + 104 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Depth-guided 3D Warping for Pseudo Supervision. Optimizing the generator with only two images is still not enough to capture the facial details, resulting in blurry effects around facial components such as eyes (see Fig. 11). Hence, we propose to construct pseudo images of different views for extra supervision using the rough geometry and the original and mirror images. Specifically, given the original image (source view) and the rough geometry, we can synthesize an image under a novel view (target view) by warping with 3D guidance. A coordinate pixel $p_t$ of the synthesized image in the target view can be obtained by projecting back onto the source view with the relative camera pose $\\pi_{t\\rightarrow s}$ and the camera intrinsic parameters $K$ :", + "bbox": [ + 75, + 108, + 468, + 305 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\np _ {t \\rightarrow s} = K \\pi_ {t \\rightarrow s} D _ {t} \\left(p _ {t}\\right) K ^ {- 1} p _ {t}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 168, + 313, + 468, + 329 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $D_{t}(\\cdot)$ is the depth map of the target view. Since the projected coordinate $p_{t\\rightarrow s}$ are continuous values, we can extract the color values from the original image with a differentiable bilinear sampling mechanism, i.e., $I_{s\\rightarrow t} = I_s(p_{t\\rightarrow s})$ . The low-resolution depth map will be upsampled to match the dimension of the image.", + "bbox": [ + 76, + 335, + 468, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Authentic Mask. Without distinguishing the foreground pixels from the background, the background pixels in the original image may be projected onto the foreground plane, leading to erroneous results. To overcome this issue, we form a mask to indicate the visibility of pixels to filter invisible areas using the rendered depth values. Specifically, we can get the projected depth value $D_{s}(p_{t\\rightarrow s})$ via sampling from the depth map in the source view. Here we employ the euclidean distance between $D_{s}(p_{t\\rightarrow s})$ and the depth map $D_{t}(p_{t})$ in the target view to calculate the mask. A large distance indicates the pixel $p_t$ is invisible. To ensure the projected pixels are located on the front visible surface, we only preserve the area where the distance is under a threshold $\\tau$ :", + "bbox": [ + 75, + 428, + 468, + 637 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nM \\left(p _ {t}\\right) = \\left\\| D _ {t} \\left(p _ {t}\\right) - D _ {s} \\left(p _ {t \\rightarrow s}\\right)\\right\\| < \\tau . \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 145, + 647, + 468, + 664 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Furthermore, due to the poor depth estimation of the background, only the facial part would be warped. We warp the facial mask of the source view to the target view and multiply it with the visibility mask $M(p_{t})$ to get the authentic mask $M_{t}$ . An example is shown in Fig. 4. After multiplying the mask $M_{t}$ with the warped image $I_{s\\rightarrow t}$ , the resulting image can be used for supervision.", + "bbox": [ + 75, + 671, + 468, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Adjacent View Warping. Fig. 3 illustrates the warping results of two examples. When the yaw angle between the source and target views increases, the warping results have more distortions and become less authentic. Therefore, it is intuitive to abandon the pseudo images of the target views that deviate a lot from the source view. Empirically, a frontal face can be warped by a wider range of yaw angles than a side face to get authentic pseudo images. The", + "bbox": [ + 75, + 779, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "variance of sampling yaw angles for constructing pseudo images is set to a fixed ratio of $\\lambda_{m}$ that depends on the viewpoint mentioned in Sec. 4.1. The LPIPS loss [14] is used to compute the multi-view pixel-wise distance as follows:", + "bbox": [ + 498, + 90, + 890, + 151 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {a d j}} = \\mathcal {L} _ {\\mathrm {L P I P S}} \\left(M _ {t} \\cdot G _ {\\mathrm {3 D}} (w, \\pi_ {t}; \\theta), M _ {t} \\cdot I _ {s \\rightarrow t}\\right). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 540, + 164, + 890, + 181 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Although the pseudo images of several unseen adjacent views around the source view have been constructed, it brings marginal improvements on remote views. Especially for a side face, the pseudo images of the remote views are blurry and have incomplete texture (see Fig. 3). Therefore, we also construct pseudo images of the adjacent views around the view of the mirror image.", + "bbox": [ + 496, + 194, + 890, + 299 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Since the conflict region between the original and mirror images has a side effect on the generator optimization process, resulting in blurry effects on rendered images, even reconstructing the source view (see Fig. 9), we propose to take partial meaningful information from the symmetric views without harming the original inversion quality. We compute the similarities only for facial components, rather than the whole face region. Besides, instead of using a pixelwise loss, we exploit the contextual loss [20] to improve the texture quality. The loss for symmetric views is defined as:", + "bbox": [ + 496, + 300, + 890, + 450 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {s y m}} = \\sum_ {\\mathrm {c} \\in \\mathbb {F}} \\mathcal {L} _ {\\mathrm {C X}} \\left(\\operatorname {R O I} ^ {c} \\left(G _ {3 \\mathrm {D}} \\left(w, \\pi_ {t}; \\theta\\right)\\right), \\operatorname {R O I} ^ {c} \\left(I _ {m \\rightarrow t}\\right)\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 463, + 890, + 507 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $I_{m\\rightarrow t}$ is the pseudo image of the viewpoint $\\pi_t$ warped from the mirror image $I_{m}$ . $\\mathrm{ROI}^c (\\cdot)$ refers to the region of interest component $c$ from the collection $\\mathbb{F} = \\{\\text{eyes, nose, mouth}\\}$ .", + "bbox": [ + 496, + 508, + 890, + 568 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The reconstruction loss between the original image and its corresponding rendered image is still in use to ensure the quality of the initial perspective, which is defined as:", + "bbox": [ + 496, + 569, + 890, + 614 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {o r i}} = \\mathcal {L} _ {2} \\left(G _ {\\mathrm {3 D}} \\left(w, \\pi_ {s}; \\theta\\right), I _ {s}\\right) + \\mathcal {L} _ {\\mathrm {L P I P S}} \\left(G _ {\\mathrm {3 D}} \\left(w, \\pi_ {s}; \\theta\\right), I _ {s}\\right). \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 501, + 627, + 890, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The overall objective of optimizing the generator's parameters is defined as:", + "bbox": [ + 496, + 657, + 890, + 686 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {o p t}} = \\mathcal {L} _ {\\text {o r i}} + \\lambda_ {\\text {a d j}} \\mathcal {L} _ {\\text {a d j}} + \\lambda_ {\\text {s y m}} \\mathcal {L} _ {\\text {s y m}} + \\lambda_ {\\text {d e p t h}} \\mathcal {L} _ {\\text {d e p t h}}. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 700, + 890, + 718 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The trade-off hyper-parameters are set as follows: $\\lambda_{\\mathrm{adj}} = 0.1$ , $\\lambda_{\\mathrm{sym}} = 0.05$ , and $\\lambda_{\\mathrm{depth}} = 1$ .", + "bbox": [ + 496, + 729, + 890, + 762 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 776, + 633, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 500, + 801, + 705, + 816 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We conduct the experiments on human faces datasets. For all experiments, we select EG3D [5] as our 3D GAN prior, which is pre-trained on FFHQ dataset [15]. We verified quantitative metrics on CelebA-HQ test dataset [19]. We further evaluated on MEAD [33], a", + "bbox": [ + 496, + 824, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "346", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1470c26a073523eeed04b623eec00b3a4506c2f62ea9d69dad6437cf3de65479.jpg", + "image_caption": [ + "SG2" + ], + "image_footnote": [], + "bbox": [ + 78, + 87, + 166, + 358 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6f39e9d126fa143ebda910254e2cd452fc4782e5fb5112c75a624b98dfc3f054.jpg", + "image_caption": [ + "SG2 $W^{+}$" + ], + "image_footnote": [], + "bbox": [ + 166, + 88, + 253, + 358 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8dae65f2f2a02f5c53ea8ac3cb13de8615d53ec51bbdd1fecc7008652fa909a6.jpg", + "image_caption": [ + "PTI" + ], + "image_footnote": [], + "bbox": [ + 253, + 88, + 338, + 358 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/33e8f1cb68534e6bd42f66de2805b7e39eb7c0c210af7c41ec2c04d775f800dd.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 339, + 88, + 424, + 358 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8212cc13a848a7c393a0b90e1ac2e011acd090eac9c9eb3f7d04f9c59d2a0e00.jpg", + "image_caption": [ + "Source Image" + ], + "image_footnote": [], + "bbox": [ + 426, + 123, + 542, + 213 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/635c354de2acf40041996d77ca926dcf9f864cce12a4dfd408101568c6f69d9b.jpg", + "image_caption": [ + "Source Image" + ], + "image_footnote": [], + "bbox": [ + 426, + 247, + 542, + 338 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a109649a33cb0f0a26d936e4cc64438d53480ab09ccd92b9a91660b7acce29d3.jpg", + "image_caption": [ + "SG2" + ], + "image_footnote": [], + "bbox": [ + 542, + 88, + 629, + 358 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2e65c836a8e794365c38d4e39c1ff2acc56f59cc49fece8bcd124bcbb4a5c1ca.jpg", + "image_caption": [ + "SG2 $W^{+}$" + ], + "image_footnote": [], + "bbox": [ + 629, + 89, + 715, + 358 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f29b17df448d553e0cb66f8bdd7006215fcd2889a15267c75bc1c83c65980209.jpg", + "image_caption": [ + "PTI" + ], + "image_footnote": [], + "bbox": [ + 717, + 89, + 803, + 358 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b40919e94b2a054a78398d3044dd4babad9cd9fbde75bff6fed0dc54feafafb6.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 803, + 89, + 890, + 358 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/f39ad32e77ec31c76de8c5434bd4e2ffa93755129aa0f84a7efcfdc3337892c1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodMSE ↓LPIPS ↓MS-SSIM ↓ID ↑Pose ↓Depth ↓
SG2 [16]0.08810.32310.35570.82090.0430.0505
SG2 W+ [1]0.04390.22610.24830.87350.0400.0500
PTI [28]0.00840.09200.09800.94320.0370.0510
SPI (Ours)0.00820.08650.09910.94700.0360.0476
", + "bbox": [ + 78, + 420, + 465, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Quantitative comparison on CelebA-HQ [19].", + "bbox": [ + 109, + 494, + 434, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "multi-view high-quality video dataset. The first frame from each viewpoint video of 10 identities is extracted for testing.", + "bbox": [ + 75, + 526, + 468, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. We evaluate image reconstruction quality and similarity with the following metrics: mean squared error (MSE), perceptual similarity loss (LPIPS) [40], structural similarity (MS-SSIM), and identity similarity (ID) by employing a pre-trained face recognition network [8].", + "bbox": [ + 75, + 559, + 468, + 635 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We mainly compare our methods with optimization-based 2D GAN inversion methods. SG2 [16] directly inverts real images into $\\mathcal{W}$ space with an optimization scheme. [1] extends the inversion into $\\mathcal{W}^+$ space, denoted by SG2 $\\mathcal{W}^+$ . PTI [28] would further tune generator parameters in a second stage. For a fair comparison, both PTI and ours first optimize the latent for 500 steps and then fine-tune the generator for 1,000 steps, while SG2 and SG2 $\\mathcal{W}^+$ optimize the latent for 1,500 steps.", + "bbox": [ + 75, + 638, + 468, + 773 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Reconstruction and Novel View Synthesis", + "text_level": 1, + "bbox": [ + 76, + 785, + 429, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative Evaluation. Fig. 5 presents a qualitative comparison of texture and geometry quality of different views. As for the original view, our method is able to inverse challenging details such as earrings, make-up, and wrinkles, which demonstrates that we do not sacrifice the original reconstruction performance. When the camera rotates to", + "bbox": [ + 75, + 809, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9290fe419202b1600b795c1a2479733fc03d03e00ef18faa70abd8d60f7cfc82.jpg", + "image_caption": [ + "Figure 5. Qualitative comparisons with state-of-the-art methods on novel view synthesis. The reconstruction quality of the original view is presented in the first row. The texture and geometry in novel views are shown in the rest rows.", + "Figure 6. Comparison of identity preservation in novel views. The x-axis represents the yaw angle of the input image. '0' indicates the frontal face." + ], + "image_footnote": [], + "bbox": [ + 498, + 419, + 893, + 604 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "novel views, images generated from 2D inversion methods present a twisted appearance, due to the nearly flattened geometry shape. Since SG2 does not deviate too far from the initial GAN space, it can generate a portrait with a structured geometry, but fails to preserve the identity. Our method is capable of maintaining authentic and consistent geometry in novel views along with a sharp appearance, even when rotated to an extreme pose.", + "bbox": [ + 496, + 671, + 890, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative Evaluation. The reconstruction metrics of the original view are shown in Table 1. As can be seen, the results align with our qualitative evaluation as we achieved comparable scores to the current 2D state-of-the-art inversion methods [28]. The MSE, LPIPS, and ID similarities of ours are further improved, which can be attributed to the employment of $\\mathcal{W}^+$ latent space. Following EG3D, we", + "bbox": [ + 496, + 795, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "347", + "bbox": [ + 485, + 944, + 509, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/549688891ff4cf7f4ccba5d7fa7eb4ab784a254da5e92e34c81f613878d4c6be.jpg", + "image_caption": [ + "Figure 7. Qualitative comparisons with PTI [28] on MEAD [33]." + ], + "image_footnote": [], + "bbox": [ + 78, + 88, + 467, + 224 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/864a823edc119faabd82356225added9e0c199703a73b28abb93a46873430445.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodViewMSE ↓LPIPS ↓MS-SSIM ↓ID ↑
PTIF0.032040.29710.20700.8445
Ours0.032960.30880.21350.8388
PTIL300.043550.29920.22740.8446
Ours0.033990.27960.20250.8469
PTIL600.082550.39020.31430.7568
Ours0.040690.31130.23790.8272
PTIR300.045740.31100.23930.8383
Ours0.032030.28070.20570.8529
PTIR600.078650.38290.31060.7995
Ours0.045410.31600.24000.8335
", + "bbox": [ + 78, + 253, + 475, + 445 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Quantitative comparison on MEAD [33]. View denotes the yaw angle of the input image. F is frontal, L is left side, and R is right side. 30 and 60 are the rotation degrees. Each time we use one view as the inversion input and use all 5 views as ground truth for evaluation. The average performance of 4 unseen views and 1 seen view is reported.", + "bbox": [ + 75, + 455, + 468, + 541 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "evaluate shape quality by calculating $\\mathcal{L}_2$ for pseudo-ground-truth depth-maps (Depth) generated from DECA [10], and poses (Pose) estimated from synthesized images.", + "bbox": [ + 75, + 564, + 468, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also use identity similarity to evaluate the identity preservation of the synthesized novel views. Given a portrait, we synthesize a novel view image under the symmetric camera pose of the portrait. The similarity between the synthesized image and the flipped image portrait is calculated. The results are shown in Fig. 6. It can be observed that when the yaw angle of a portrait is small, all methods can perform well with a high similarity score. But when the yaw angle is large, only our method can maintain a high score, while other methods encounter a sharp performance drop due to the inaccurate geometry. As we employ the symmetry prior and the adjacent pseudo supervision, the rendered faces can better preserve the texture and geometry. These results demonstrate that we can achieve an identity-consistent 3D inversion.", + "bbox": [ + 75, + 611, + 468, + 835 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation on MEAD. To get a comprehensive understanding of the performance of our method, we evaluate on MEAD, a multi-view dataset. The quantitative comparison between the reconstruction portraits and the ground truth in", + "bbox": [ + 75, + 839, + 468, + 901 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/01f64dbb372a527572505301802d38858746ef8b6cc0e5f875f43db08a93c32b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 87, + 890, + 189 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/81a45e6fe76e4e1ef8dde0a83a39460da3d2eed14819a8f3290a238cff396eee.jpg", + "image_caption": [ + "Figure 8. Ablation study of facial symmetry prior.", + "Figure 9. Ablation study of authentic mask. Vanilla denotes simply using the full mirror image for supervision. While Ours filters out conflict areas with the designed constraints." + ], + "image_footnote": [], + "bbox": [ + 501, + 214, + 890, + 369 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "different views is shown in Tab. 2. PTI [28] and our method achieve comparable performance when given a frontal portrait. When the view of the input face has an offset from the canonical one, our method surpasses PTI distinctly. Our metrics remain stable as the yaw angle becomes larger while the performance of PTI degrades significantly. The qualitative results are shown in Fig. 7. The geometry shape of PTI suffers from the flattening phenomenon. In contrast, our method can generate a consistent geometry and texture in novel views.", + "bbox": [ + 496, + 438, + 892, + 589 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Evaluation of Symmetry Prior", + "text_level": 1, + "bbox": [ + 500, + 603, + 767, + 619 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To understand the importance of the symmetry prior, we perform an ablation study by conducting the inversion with or without using the prior. The visual results are shown in Fig. 8. Both approaches can obtain good geometries in the original view. However, in the first row, the geometry of the woman with a thin face turns to be obese as the camera gradually rotates, which aligns with its rendered image. The second row shows that the geometry and the rendered image maintain a better view consistency. We even find that, with the auxiliary view, some expression details can be strengthened, such as the slightly opened mouth.", + "bbox": [ + 496, + 628, + 890, + 792 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The symmetry prior cannot be directly employed in the optimization stage because there exist asymmetric areas in a human face. Optimizing the conflict areas will lead to poor results. As shown in Fig. 9, the slanted hair and the single earring in the source image mismatch those in the mirror one. In the first row, when simply using both two images to optimize the generator, the reconstruction quality suffers", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "348", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7c547f6157cd26838a1220a87b88dc74faa9bf4c29c6f4faa564171b3ded4081.jpg", + "image_caption": [ + "Figure 10. Editing results incorporated with [26] and [11]." + ], + "image_footnote": [], + "bbox": [ + 76, + 87, + 468, + 333 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "from degradation. Novel views synthesized by the vanilla version will encounter incorrect texture and blurry results in the conflict areas. Our method can handle such asymmetric cases without the quality worsening by filtering out conflict areas with the designed constraints. Hair, teeth, and other details are consistent in different views, which validates the effectiveness of the proposed constraints.", + "bbox": [ + 75, + 373, + 468, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4. View-consistent Face Editing", + "text_level": 1, + "bbox": [ + 76, + 489, + 334, + 506 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Editing a facial image should preserve the original identity while performing a meaningful and visually plausible modification. We extend our methods to downstream editing tasks to validate that the 3D GAN inversion process does not degrade the editability of the original generator. We follow StyleCLIP [26] to achieve text-guided semantic editing and StyleGAN-NADA [11] for stylization, shown in Fig. 10. The editing operation not only influences the original view but also changes the novel view's appearance consistently. It demonstrates that our inversion solution retains the properties in the original space of the generator and can be associated with other editing methods flexibly.", + "bbox": [ + 75, + 513, + 468, + 695 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.5. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 705, + 228, + 720 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Adjacent Warping. Recall that we employ depth-guided warping to create pseudo supervision to improve the texture quality of novel views. In Fig. 11, we can find that this operation can enhance facial component details such as eyelashes and teeth, improving the overall visual quality.", + "bbox": [ + 75, + 729, + 468, + 808 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Depth Regularization. Since supervision signals all come from RGB images, there is no explicit geometry supervision to ensure shape correctness. The shape is prone to drift to overfit the single image. Unnatural distortions will appear in novel views with the drifted shape. In the third column of Fig. 11, the jaw and nose are elongated with no con", + "bbox": [ + 75, + 810, + 468, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/335f4b7c612ab4dd30d80990b3c3e7550938932cfdca8dfed32fdddceb463838.jpg", + "image_caption": [ + "Figure 11. Ablation study of different designed modules." + ], + "image_footnote": [], + "bbox": [ + 501, + 87, + 890, + 325 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "straints. With depth regularization, geometry will be calibrated within reasonable limits.", + "bbox": [ + 498, + 375, + 890, + 402 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Two-stage Optimization. The joint optimization stage via utilizing a large parameter space can further improve texture, allowing to reconstruct the out-of-domain details, e.g., auspicious mole, as shown in the last column of Fig. 11.", + "bbox": [ + 498, + 412, + 890, + 474 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 505, + 617, + 520 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We propose a novel 3D GAN inversion method with facial symmetry prior. As demonstrated in massive experiments, our method can support 3D reconstruction at extreme angles with robust geometry. With the designed constraints on texture and geometry, the reconstructed portraits are high-fidelity and possess consistent identity across different views. Besides, the proposed method enables various downstream applications without compromising faithfulness and photorealism.", + "bbox": [ + 496, + 536, + 890, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitation and Future Works. Since the effect of illumination is ignored in our assumption, the illumination is modeled implicitly. During the fitting process of the given image with symmetry prior, light sources sometimes become perfectly symmetrical and distorted. We will attempt to settle the problem via modeling illumination explicitly with albedo and normal in future work.", + "bbox": [ + 496, + 680, + 890, + 785 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. This work was partly supported by the National Natural Science Foundation of China (Grant No. U1903213) and the Shenzhen Science and Technology Program (JCYJ20220818101014030, ZDSYS20200811142605016). This work was partly supported by a UKRI Future Leaders Fellowship [grant number G104084].", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "349", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4432-4441, 2019. 2, 6", + "[2] Yuval Alaluf, Omer Tov, Ron Mokady, Rinon Gal, and Amit H Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. arXiv preprint arXiv:2111.15666, 2021. 2", + "[3] Qingyan Bai, Yinghao Xu, Jiapeng Zhu, Weihao Xia, Yu-jiu Yang, and Yujun Shen. High-fidelity gan inversion with padding space. arXiv preprint arXiv:2203.11105, 2022. 2", + "[4] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional p-gan for single image to neural radiance fields translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3981-3990, 2022. 2", + "[5] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In CVPR, 2022. 1, 2, 5", + "[6] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 2", + "[7] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14124-14133, 2021. 3", + "[8] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, 2019. 6", + "[9] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In CVPR Workshops, 2019. 3", + "[10] Yao Feng, Haiwen Feng, Michael J Black, and Timo Bolkart. Learning an animatable detailed 3d face model from inthe-wild images. ACM Transactions on Graphics (ToG), 40(4):1-13, 2021. 7", + "[11] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 8", + "[12] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis. In ICLR, 2022. 1, 2", + "[13] Ajay Jain, Matthew Tancik, and Pieter Abbeel. Putting nerf on a diet: Semantically consistent few-shot view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5885-5894, 2021. 3", + "[14] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In ECCV, 2016. 5" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 5", + "[16] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, 2020. 2, 6", + "[17] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 4", + "[18] Jaehoon Ko, Kyusun Cho, Daewon Choi, Kwangrok Ryoo, and Seungryong Kim. 3d gan inversion with pose optimization. arXiv preprint arXiv:2210.07301, 2022. 2", + "[19] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In CVPR, 2020. 5, 6", + "[20] Roey Mechrez, Itamar Talmi, and Lihi Zelnik-Manor. The contextual loss for image transformation with non-aligned data. In Proceedings of the European conference on computer vision (ECCV), pages 768-783, 2018. 5", + "[21] Youssef A Mejjati, Isa Milefchik, Aaron Gokaslan, Oliver Wang, Kwang In Kim, and James Tompkin. Gaussian: Controllable image synthesis with 3d gaussians from unposed silhouettes. arXiv preprint arXiv:2106.13215, 2021. 2", + "[22] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2", + "[23] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5480-5490, 2022. 3", + "[24] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2", + "[25] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 1, 2", + "[26] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2085–2094, 2021. 8", + "[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learn-" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "350", + "bbox": [ + 486, + 945, + 511, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ing transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 3", + "[28] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. arXiv preprint arXiv:2106.05744, 2021. 1, 2, 3, 4, 6, 7", + "[29] Yujun Shen, Jinjin Gu, Xiaou Tang, and Bolei Zhou. Interpreting the latent space of gans for semantic face editing. In CVPR, 2020. 2", + "[30] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. arXiv preprint arXiv:2205.15517, 2022. 1, 2", + "[31] Feitong Tan, Sean Fanello, Abhimitra Meka, Sergio Orts-Escolano, Danhang Tang, Rohit Pandey, Jonathan Taylor, Ping Tan, and Yinda Zhang. Volux-gan: A generative model for 3d face synthesis with hdri relighting. arXiv preprint arXiv:2201.04873, 2022. 1, 2", + "[32] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. TOG, 2021. 2", + "[33] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 5, 7", + "[34] Tengfei Wang, Yong Zhang, Yanbo Fan, Jue Wang, and Qifeng Chen. High-fidelity gan inversion for image attribute editing. arXiv preprint arXiv:2109.06590, 2021. 2", + "[35] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1-10, 2020. 2", + "[36] Jianfeng Xiang, Jiaolong Yang, Yu Deng, and Xin Tong. Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. arXiv preprint arXiv:2206.07255, 2022. 1, 2", + "[37] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18430-18439, 2022. 1, 2", + "[38] Yang Xue, Yuheng Li, Krishna Kumar Singh, and Yong Jae Lee. Giraffe hd: A high-resolution 3d-aware generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18440-18449, 2022. 1, 2", + "[39] Fei Yin, Yong Zhang, Xiaodong Cun, Mingdeng Cao, Yanbo Fan, Xuan Wang, Qingyan Bai, Baoyuan Wu, Jue Wang, and Yujiu Yang. Styleheat: One-shot high-resolution editable talking face generation via pretrained stylegan. In European conference on computer vision, 2022. 2", + "[40] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 4, 6" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[41] Xuanmeng Zhang, Zhedong Zheng, Daiheng Gao, Bang Zhang, Pan Pan, and Yi Yang. Multi-view consistent generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18450-18459, 2022. 1, 2", + "[42] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 2" + ], + "bbox": [ + 501, + 92, + 890, + 229 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "351", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/3D GAN Inversion With Facial Symmetry Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_model.json b/2023/3D GAN Inversion With Facial Symmetry Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_model.json new file mode 100644 index 0000000000000000000000000000000000000000..17215d75c83fb8546dc2df56c5d91991d7741ebb --- /dev/null +++ b/2023/3D GAN Inversion With Facial Symmetry Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_model.json @@ -0,0 +1,2354 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.246, + 0.131, + 0.725, + 0.154 + ], + "angle": 0, + "content": "3D GAN Inversion with Facial Symmetry Prior" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.18, + 0.798, + 0.254 + ], + "angle": 0, + "content": "Fei Yin\\(^{1}\\), Yong Zhang\\(^{2\\dagger}\\), Xuan Wang\\(^{3}\\), Tengfei Wang\\(^{4}\\), Xiaoyu Li\\(^{2}\\), Yuan Gong\\(^{1}\\), Yanbo Fan\\(^{2}\\), Xiaodong Cun\\(^{2}\\), Ying Shan\\(^{2}\\), Cengiz Öztireli\\(^{5}\\), Yujiu Yang\\(^{1\\dagger}\\), Shenzhen International Graduate School, Tsinghua University \n\\(^{2}\\)Tencent AI Lab \\(^{3}\\)Ant Group \\(^{4}\\)HKUST \\(^{5}\\)University of Cambridge" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.287, + 0.314, + 0.304 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.32, + 0.474, + 0.714 + ], + "angle": 0, + "content": "Recently, a surge of high-quality 3D-aware GANs have been proposed, which leverage the generative power of neural rendering. It is natural to associate 3D GANs with GAN inversion methods to project a real image into the generator's latent space, allowing free-view consistent synthesis and editing, referred as 3D GAN inversion. Although with the facial prior preserved in pre-trained 3D GANs, reconstructing a 3D portrait with only one monocular image is still an ill-posed problem. The straightforward application of 2D GAN inversion methods focuses on texture similarity only while ignoring the correctness of 3D geometry shapes. It may raise geometry collapse effects, especially when reconstructing a side face under an extreme pose. Besides, the synthetic results in novel views are prone to be blurry. In this work, we propose a novel method to promote 3D GAN inversion by introducing facial symmetry prior. We design a pipeline and constraints to make full use of the pseudo auxiliary view obtained via image flipping, which helps obtain a view-consistent and well-structured geometry shape during the inversion process. To enhance texture fidelity in unobserved viewpoints, pseudo labels from depth-guided 3D warping can provide extra supervision. We design constraints to filter out conflict areas for optimization in asymmetric situations. Comprehensive quantitative and qualitative evaluations on image reconstruction and editing demonstrate the superiority of our method." + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.745, + 0.21, + 0.761 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.771, + 0.47, + 0.864 + ], + "angle": 0, + "content": "Recent 3D-aware generative adversarial networks (3D GANs) have seen immense progress. By incorporating a neural rendering engine into the generator network architecture, 3D GANs can synthesize view-consistent images. To increase the generation resolution, existing methods [5,12,25,30,31,36-38,41] boost the 3D inductive bias" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.285, + 0.891, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.61, + 0.894, + 0.682 + ], + "angle": 0, + "content": "Figure 1. Visual examples of our inversion method. Direct applying 2D GAN inversion methods (PTI [28]) to the 3D GAN suffers from inaccurate geometry in novel views. Our method excels in synthesizing consistent geometry and high-fidelity texture in different views, even reconstructing a face under an extreme pose." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.893, + 0.749 + ], + "angle": 0, + "content": "with an additional 2D CNN-based upsampler or an efficient 3D representation modeling method. With tremendous effort, 3D GANs can produce photorealistic images while enforcing strong 3D consistency across different views." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.902 + ], + "angle": 0, + "content": "We are interested in the task of reconstructing a human face with 3D geometry and texture given only one monocular image. It is an ill-posed problem and close to the harsh condition of real scenarios. With the power of 3D GANs, it seems achievable via projecting a target image onto the manifold of a pre-trained generator. The process is referred as 3D GAN inversion. A straightforward path is to follow the 2D GAN inversion method [28], i.e., optimizing the latent code and the network parameters of the generator to overfit the specific portrait." + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.875, + 0.371, + 0.888 + ], + "angle": 0, + "content": "Work done during an internship at Tencent AI Lab." + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.888, + 0.227, + 0.9 + ], + "angle": 0, + "content": "† Corresponding Author." + }, + { + "type": "list", + "bbox": [ + 0.098, + 0.875, + 0.371, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "342" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.333 + ], + "angle": 0, + "content": "However, since the ground truth 3D geometry is absent given one monocular image, the inversion result is far from satisfactory. The process of fitting a 3D GAN to one image would sacrifice geometric correctness in order to make the synthetic texture as close as possible to the input, even destroying the original semantic-rich latent space. As the optimization process goes, the face geometry tends to degenerate into a flattened shape, due to the absence of geometry supervision, e.g., images from other views. Besides, there exist quality issues in texture synthesis under novel views. The rendered images of unseen views tend to be blurry and inconsistent with the original image, especially when reconstructing a side face under an extreme pose. Because there is no texture supervision for unseen views given only one monocular image. The failure cases of directly applying [28] are illustrated in Fig. 1." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.333, + 0.473, + 0.605 + ], + "angle": 0, + "content": "In this work, to alleviate the issue caused by missing geometry and texture supervision under multiple views, we propose a novel 3D GAN inversion approach by taking full advantage of facial symmetry prior to construct pseudo supervision of different views. Intuitively, we note that human faces are almost symmetric. Assuming the given portrait is symmetric, we can obtain an additional perspective of the portrait by simply mirroring the image. The images of two distinct views can provide geometric relations between the 3D points and their 2D projections based on epipolar geometry. Motivated by this, we seek to leverage facial symmetry as the geometric prior constraining the inversion. The symmetry prior is also employed in a traditional 3D reconstruction work [35]. We leverage the mirrored image as extra supervision of another view when performing the inversion, which prevents the geometry collapse. A rough geometry can be obtained by the inversion with the original and mirror images." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.606, + 0.473, + 0.816 + ], + "angle": 0, + "content": "To further enhance texture quality and geometry in novel views, we employ depth-guided 3D warping to generate the pseudo images of the views surrounding the input and symmetric camera pose. The depth is inferred from the rough 3D volume. The original image along with the pseudo images are used to fine-tune the generator's parameters for the joint promotion of texture and geometry. To prevent the optimized geometry from deviating too much from the rough geometry, we design a geometry regularization term as a constraint. However, human faces are never fully symmetric in practice, neither in shape nor appearance. Therefore, we design several constraints to extract meaningful information adaptively from the mirror image without compromising the original reconstruction quality." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.817, + 0.355, + 0.831 + ], + "angle": 0, + "content": "Our main contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.84, + 0.47, + 0.903 + ], + "angle": 0, + "content": "- We propose a novel 3D GAN inversion method by incorporating facial symmetry prior. It enables a high-quality reconstruction while preserving the multi-view consistency in geometry and texture." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.092, + 0.892, + 0.153 + ], + "angle": 0, + "content": "- We conduct comprehensive experiments to demonstrate the effectiveness of our method and compare it with many state-of-the-art inversion methods. We also apply our method to various downstream applications." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.184, + 0.642, + 0.2 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.213, + 0.666, + 0.228 + ], + "angle": 0, + "content": "2.1. 3D-Aware GANs" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.242, + 0.895, + 0.529 + ], + "angle": 0, + "content": "Recently, neural scene representations have incorporated 3D prior into image synthesis with explicit camera control. Inspired by the success of Neural Radiance Fields (NeRF) [22], [6,24] employ implicit volumetric neural rendering structure for consistent novel view synthesis, required only unconstrained monocular images training. To overcome the computational cost and lift the generation resolution, the following methods adopt a two-stage rendering process [5, 12, 21, 25, 30, 31, 37, 38, 41, 42]. Since 2D upsamplers may introduce view-inconsistent artifacts, NeRF path regularization [12] and dual discriminators [5] are proposed. Different 3D modeling representations are further designed for scalable and fast rendering. EG3D [5] introduces tri-plane representation, and GRAM-HD [36] proposes to render radiance manifolds first for efficient sampling. Boosting with the powerful high-fidelity unconditioned 3D GANs, we can achieve real image 3D reconstruction and editing. Specifically, we select the state-of-the-art EG3D [5] as our backbone." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.551, + 0.655, + 0.567 + ], + "angle": 0, + "content": "2.2. GAN Inversion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.579, + 0.895, + 0.777 + ], + "angle": 0, + "content": "To edit a real image [29, 39], GAN inversion is applied first to discover a corresponding latent code from which the generator can synthesize the real image. Existing 2D GAN inversion approaches can be categorized into optimization-based, learning-based, and hybrid methods. [1, 16] directly minimize the reconstruction distance via optimizing the latent codes. Learning-based methods [2, 3, 32, 34] exploit a general encoder network to map the input image into latent space in real-time. Hybrid methods would apply the latent code predicted from the encoder as initialization in the later optimization process. Beyond the original inversion latent space, PTI [28] further optimizes the parameters of the generator to enhance the visual fidelity." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.903 + ], + "angle": 0, + "content": "As for the 3D GAN inversion task, most methods directly transfer the 2D methods, e.g., PTI [28] and e4e [32], which may suffer from the poor results in novel views. Pix2NeRF [4] introduced a joint distillation strategy for training a 3D inversion encoder. A concurrent work [18] proposes to perform camera pose optimization simultaneously to ensure view consistency. However, none of the above methods take geometry shape into consideration." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "343" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.089, + 0.322, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.089, + 0.631, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.089, + 0.888, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.288, + 0.895, + 0.346 + ], + "angle": 0, + "content": "Figure 2. The proposed framework. A) Our method first performs inversion with the help of the symmetry view to achieve the latent code \\( w^{+} \\) with a roughly correct geometry. B) The original image and the mirror one, along with adjacent warping pseudos, are used for joint optimization to enhance the geometry and texture of rendered images in novel views. C) Depth-guided 3D warping are used to generate pseudo images in novel views to provide extra supervision. Unfaithful regions are filtered out with the authentic mask." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.357, + 0.232, + 0.373 + ], + "angle": 0, + "content": "2.3. Few-shot NeRF" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.381, + 0.474, + 0.579 + ], + "angle": 0, + "content": "Few-shot NeRF aims at reconstructing general 3D scenarios where only a few observed views are available, which shares a similar setting with 3D GAN inversion. MVS-NeRF [7] leverages plane-swept cost volumes in multi-view stereo for geometry-aware scene reasoning to improve performance. DietNeRF [13] enforces semantic consistency between rendered images from unseen view and seen images via a CLIP encoder [27]. RegNeRF [23] regularizes the texture of patches rendered from unobserved viewpoints without relying on additional training modules. Since it is hard to find a common prior for general scenes, these methods investigate how to ensure the geometry consistency of different views, which gives us inspiration." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.59, + 0.367, + 0.607 + ], + "angle": 0, + "content": "3. Definition of 3D GAN Inversion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.615, + 0.47, + 0.738 + ], + "angle": 0, + "content": "Similar to 2D GAN inversion, 3D GAN inversion aims to project an input image \\(I\\) onto the manifold of a pretrained unconditional 3D GAN model \\(G_{\\mathrm{3D}}(\\cdot ;\\theta)\\) parameterized by weight \\(\\theta\\). After inversion, \\(G_{\\mathrm{3D}}\\) can reconstruct the image faithfully given the corresponding camera pose, synthesize content-consistent images in novel views, and facilitate downstream tasks like face editing. One formulation of the 3D GAN inversion problem is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.751, + 0.47, + 0.775 + ], + "angle": 0, + "content": "\\[\nw ^ {*} = \\underset {w} {\\arg \\max } = \\mathcal {L} \\left(G _ {3 D} (w, \\pi ; \\theta), I\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.902 + ], + "angle": 0, + "content": "where \\( w \\) is the latent representation in \\( \\mathcal{W}^+ \\) space and \\( \\pi \\) is the corresponding camera matrix of input image. The loss function \\( \\mathcal{L}(\\cdot, \\cdot) \\) is usually defined as pixel-wise reconstruction loss or perceptual loss. In our settings, camera matrix \\( \\pi \\) is known, which is extracted by a pre-trained detector [9]. This formulation cares about the \\( \\mathcal{W}^+ \\) space. However, the inversion in the \\( \\mathcal{W}^+ \\) space is always not enough to capture local facial details, resulting in inaccurate reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.358, + 0.892, + 0.434 + ], + "angle": 0, + "content": "Following the recent optimization-based 2D GAN inversion method [28], we perform the inversion in the extended latent space for more accurate reconstruction, i.e., the combination of the \\(\\mathcal{W}^{+}\\) space and the parameter space. The formulation is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.44, + 0.892, + 0.466 + ], + "angle": 0, + "content": "\\[\nw ^ {*}, \\theta^ {*} = \\underset {w, \\theta} {\\arg \\max } = \\mathcal {L} \\left(G _ {3 D} (w, \\pi ; \\theta), I\\right). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.471, + 0.892, + 0.516 + ], + "angle": 0, + "content": "Note that \\( w \\) and \\( \\theta \\) are optimized alternatively, i.e., \\( w \\) is optimized using Eq. (1) first and then \\( \\theta \\) is optimized with the fixed \\( w^{*} \\)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.528, + 0.729, + 0.546 + ], + "angle": 0, + "content": "4. The Proposed Approach" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.893, + 0.842 + ], + "angle": 0, + "content": "Our goal is to reconstruct a human face through a pretrained 3D GAN given a single monocular image. The reconstruction is supposed to preserve authentic appearance texture and geometry shape in novel views. Due to the limited information about geometry and texture from a single image, overfitting a single view tends to be trapped in geometry collapse, get the blurry texture and miss details in unseen views, especially when reconstructing a side face under an extreme pose. To overcome the issue of lacking information about other views, we introduce facial symmetry prior to promote inversion. We propose a two-stage inversion pipeline, i.e., inversion for rough geometry and joint optimization of geometry and texture. In the first stage, we obtain a rough geometry by optimizing the latent code \\( w \\) using the original and mirror images in Sec. 4.1. In the second stage, we refine the geometry and texture by optimizing the parameter \\( \\theta \\) with the depth-guided 3D warping and a set of designed constraints in Sec 4.2. An overview of our method is shown in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.848, + 0.892, + 0.865 + ], + "angle": 0, + "content": "4.1. Inversion with Symmetry for Rough Geometry" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "The purpose of this stage is to learn a rough geometry as a pivot for further tuning. To compensate for the missing" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "344" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.09, + 0.891, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.202, + 0.893, + 0.23 + ], + "angle": 0, + "content": "Figure 3. Visualization of warped pseudos. The red bounding box contains the range of employed pseudos, depending on the yaw angle of the input image. A frontal face can be warped by a wider range of yaw angles than a side face to get authentic pseudos." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.241, + 0.177, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.316, + 0.145, + 0.335 + ], + "angle": 0, + "content": "Source Image" + }, + { + "type": "image", + "bbox": [ + 0.178, + 0.241, + 0.274, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.316, + 0.245, + 0.335 + ], + "angle": 0, + "content": "Warped Image" + }, + { + "type": "image", + "bbox": [ + 0.275, + 0.241, + 0.371, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.302, + 0.316, + 0.351, + 0.334 + ], + "angle": 0, + "content": "Authentic Mask" + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.241, + 0.468, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.316, + 0.439, + 0.325 + ], + "angle": 0, + "content": "Pseudo" + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.346, + 0.457, + 0.361 + ], + "angle": 0, + "content": "Figure 4. Visualization of authentic mask and warped pseudo." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.373, + 0.468, + 0.493 + ], + "angle": 0, + "content": "information of unseen views, we resort to facial symmetry prior, i.e., the left face is almost the same as the right one. We simply flip the input image \\( I_{s} \\) horizontally to get the mirror image \\( I_{m} \\) whose corresponding camera pose \\( \\pi_{m} \\) can be calculated by multiplying a fixed matrix by the camera extrinsic parameters of \\( \\pi_{s} \\). The intrinsic parameters are unchanged. The mirror image serves as the pseudo-projected image under a novel view." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.494, + 0.47, + 0.782 + ], + "angle": 0, + "content": "Since human faces are not always perfectly symmetric, the mirror image is just an approximation under the novel view. There exists inconsistent content between the original image and the mirror one if they have an overlapping face region, i.e., different colors in the position, referred as conflict content. The inversion should depend more on the original image and take partial useful information from the mirror one. Furthermore, we observe that a frontal face can provide more effective information than a side face. A nearly frontal face provides plenty of facial information, and we should trust less on its mirror image to avoid conflict in the overlapping region. While a side face provides information for only half one face, it has only a small overlapping conflict region with its mirror image. Hence, we should trust more on the mirror image. We exploit an adaptive weighting strategy for the importance of the mirror image according to its yaw angle \\(\\alpha_{\\mathrm{yaw}}\\). We use a Gaussian function with respect to \\(\\alpha_{\\mathrm{yaw}}\\) to approximate the importance of different views. The weight \\(\\lambda_{m}\\) of the mirror image is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.802, + 0.469, + 0.835 + ], + "angle": 0, + "content": "\\[\n\\mathcal {E} (x) = \\frac {1}{\\sigma \\sqrt {2 \\pi}} e ^ {- \\frac {(x - \\mu) ^ {2}}{2 \\sigma^ {2}}}, \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.837, + 0.469, + 0.877 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {m} = \\left\\{ \\begin{array}{l l} 1 - \\mathcal {E} \\left(\\alpha_ {\\text {y a w}}\\right), & \\text {i f} \\mathcal {E} \\left(\\alpha_ {\\text {y a w}}\\right) \\leq k; \\\\ 0, & \\text {i f} \\mathcal {E} \\left(\\alpha_ {\\text {y a w}}\\right) > k; \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.886, + 0.469, + 0.901 + ], + "angle": 0, + "content": "where \\(\\sigma, \\mu\\) and \\(k\\) are hyper-parameters. As a nearly frontal" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.244, + 0.892, + 0.274 + ], + "angle": 0, + "content": "mirror face can compensate for very limited extra information for the original image, its weight \\(\\lambda_{m}\\) is clamped to 0." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.275, + 0.892, + 0.365 + ], + "angle": 0, + "content": "To optimize the latent code in \\(\\mathcal{W}^+\\) space, the Perceptual loss [40] is used to minimize the distance between the generated results and the original and mirror images. Following [17, 28], a noise regularization term \\(\\mathcal{L}_n(n)\\) is employed to prevent the noise vector from containing vital information. The objective in this stage is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.528, + 0.37, + 0.891, + 0.395 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {i n v}} = \\mathcal {L} _ {\\mathrm {L P I P S}} \\left(G _ {3 \\mathrm {D}} \\left(w, \\pi_ {s}; \\theta\\right), I _ {s}\\right) + \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.556, + 0.391, + 0.843, + 0.406 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {m} \\mathcal {L} _ {\\text {L P I P S}} \\left(G _ {3 \\mathrm {D}} \\left(w, \\pi_ {m}; \\theta\\right), I _ {m}\\right) + \\lambda_ {n} \\mathcal {L} _ {n} (n),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.411, + 0.892, + 0.472 + ], + "angle": 0, + "content": "where \\( n \\) is the noise vector and \\( \\lambda_{n} \\) is a trade-off parameter. The generator is kept frozen at this stage. Visual illustrations in Fig. 8 show that the geometry can be greatly improved with the facial symmetry prior." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.479, + 0.88, + 0.495 + ], + "angle": 0, + "content": "4.2. Joint Optimization of Geometry and Texture" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.502, + 0.892, + 0.683 + ], + "angle": 0, + "content": "Though we obtain the rough geometry via the optimization of \\( w \\) in the first stage, there is a distinct gap between the texture of the rendered face and that of the original one, even under the same camera pose. The rendered face shares a similar face geometry with the original one, but it becomes a different identity. In this stage, we optimize the generator's parameters \\( \\theta \\) to bridge the texture gap for identity preservation and refine the rough geometry as well. We design a geometry regularization constraint to avoid the model degrading to generate flattened geometry. Moreover, we construct a set of pseudo images in different views to provide supervision via depth-guided 3D warping." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.686, + 0.892, + 0.867 + ], + "angle": 0, + "content": "Geometry Regularization. We observe that optimizing the generator without any constraint on the geometry will cause the deviation of the geometry from the rough one, resulting in a flattened geometry similar to the case of inversion with a single image. To avoid the geometry drift during overfitting the texture, we regularize the optimized density obtained from the 3D volume of 3D GAN to be similar to that from the rough volume obtained in the first stage. Specifically, with the fixed \\( w \\), we generate depth maps \\( D \\) from 3D GAN under different sampled views and calculate \\( \\mathcal{L}_2 \\) distance between them with the corresponding depth maps \\( D_0 \\) generated from the un-tuned generator in the first stage:" + }, + { + "type": "equation", + "bbox": [ + 0.606, + 0.872, + 0.891, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {d e p t h}} = \\sum_ {i \\in \\mathbb {S}} \\| D ^ {i} - D _ {0} ^ {i} \\| _ {2}, \\tag {6}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "345" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.345, + 0.106 + ], + "angle": 0, + "content": "where \\(\\mathbb{S}\\) is the sampled camera pose set." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.109, + 0.47, + 0.306 + ], + "angle": 0, + "content": "Depth-guided 3D Warping for Pseudo Supervision. Optimizing the generator with only two images is still not enough to capture the facial details, resulting in blurry effects around facial components such as eyes (see Fig. 11). Hence, we propose to construct pseudo images of different views for extra supervision using the rough geometry and the original and mirror images. Specifically, given the original image (source view) and the rough geometry, we can synthesize an image under a novel view (target view) by warping with 3D guidance. A coordinate pixel \\( p_t \\) of the synthesized image in the target view can be obtained by projecting back onto the source view with the relative camera pose \\( \\pi_{t\\rightarrow s} \\) and the camera intrinsic parameters \\( K \\):" + }, + { + "type": "equation", + "bbox": [ + 0.169, + 0.314, + 0.469, + 0.33 + ], + "angle": 0, + "content": "\\[\np _ {t \\rightarrow s} = K \\pi_ {t \\rightarrow s} D _ {t} \\left(p _ {t}\\right) K ^ {- 1} p _ {t}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.337, + 0.469, + 0.428 + ], + "angle": 0, + "content": "where \\( D_{t}(\\cdot) \\) is the depth map of the target view. Since the projected coordinate \\( p_{t\\rightarrow s} \\) are continuous values, we can extract the color values from the original image with a differentiable bilinear sampling mechanism, i.e., \\( I_{s\\rightarrow t} = I_s(p_{t\\rightarrow s}) \\). The low-resolution depth map will be upsampled to match the dimension of the image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.429, + 0.47, + 0.638 + ], + "angle": 0, + "content": "Authentic Mask. Without distinguishing the foreground pixels from the background, the background pixels in the original image may be projected onto the foreground plane, leading to erroneous results. To overcome this issue, we form a mask to indicate the visibility of pixels to filter invisible areas using the rendered depth values. Specifically, we can get the projected depth value \\( D_{s}(p_{t\\rightarrow s}) \\) via sampling from the depth map in the source view. Here we employ the euclidean distance between \\( D_{s}(p_{t\\rightarrow s}) \\) and the depth map \\( D_{t}(p_{t}) \\) in the target view to calculate the mask. A large distance indicates the pixel \\( p_t \\) is invisible. To ensure the projected pixels are located on the front visible surface, we only preserve the area where the distance is under a threshold \\( \\tau \\):" + }, + { + "type": "equation", + "bbox": [ + 0.147, + 0.648, + 0.469, + 0.665 + ], + "angle": 0, + "content": "\\[\nM \\left(p _ {t}\\right) = \\left\\| D _ {t} \\left(p _ {t}\\right) - D _ {s} \\left(p _ {t \\rightarrow s}\\right)\\right\\| < \\tau . \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.672, + 0.469, + 0.778 + ], + "angle": 0, + "content": "Furthermore, due to the poor depth estimation of the background, only the facial part would be warped. We warp the facial mask of the source view to the target view and multiply it with the visibility mask \\( M(p_{t}) \\) to get the authentic mask \\( M_{t} \\). An example is shown in Fig. 4. After multiplying the mask \\( M_{t} \\) with the warped image \\( I_{s\\rightarrow t} \\), the resulting image can be used for supervision." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Adjacent View Warping. Fig. 3 illustrates the warping results of two examples. When the yaw angle between the source and target views increases, the warping results have more distortions and become less authentic. Therefore, it is intuitive to abandon the pseudo images of the target views that deviate a lot from the source view. Empirically, a frontal face can be warped by a wider range of yaw angles than a side face to get authentic pseudo images. The" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.152 + ], + "angle": 0, + "content": "variance of sampling yaw angles for constructing pseudo images is set to a fixed ratio of \\(\\lambda_{m}\\) that depends on the viewpoint mentioned in Sec. 4.1. The LPIPS loss [14] is used to compute the multi-view pixel-wise distance as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.542, + 0.165, + 0.892, + 0.182 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {a d j}} = \\mathcal {L} _ {\\mathrm {L P I P S}} \\left(M _ {t} \\cdot G _ {\\mathrm {3 D}} (w, \\pi_ {t}; \\theta), M _ {t} \\cdot I _ {s \\rightarrow t}\\right). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.195, + 0.892, + 0.3 + ], + "angle": 0, + "content": "Although the pseudo images of several unseen adjacent views around the source view have been constructed, it brings marginal improvements on remote views. Especially for a side face, the pseudo images of the remote views are blurry and have incomplete texture (see Fig. 3). Therefore, we also construct pseudo images of the adjacent views around the view of the mirror image." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.301, + 0.892, + 0.452 + ], + "angle": 0, + "content": "Since the conflict region between the original and mirror images has a side effect on the generator optimization process, resulting in blurry effects on rendered images, even reconstructing the source view (see Fig. 9), we propose to take partial meaningful information from the symmetric views without harming the original inversion quality. We compute the similarities only for facial components, rather than the whole face region. Besides, instead of using a pixelwise loss, we exploit the contextual loss [20] to improve the texture quality. The loss for symmetric views is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.464, + 0.891, + 0.508 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {s y m}} = \\sum_ {\\mathrm {c} \\in \\mathbb {F}} \\mathcal {L} _ {\\mathrm {C X}} \\left(\\operatorname {R O I} ^ {c} \\left(G _ {3 \\mathrm {D}} \\left(w, \\pi_ {t}; \\theta\\right)\\right), \\operatorname {R O I} ^ {c} \\left(I _ {m \\rightarrow t}\\right)\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.509, + 0.892, + 0.569 + ], + "angle": 0, + "content": "where \\(I_{m\\rightarrow t}\\) is the pseudo image of the viewpoint \\(\\pi_t\\) warped from the mirror image \\(I_{m}\\). \\(\\mathrm{ROI}^c (\\cdot)\\) refers to the region of interest component \\(c\\) from the collection \\(\\mathbb{F} = \\{\\text{eyes, nose, mouth}\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.57, + 0.892, + 0.616 + ], + "angle": 0, + "content": "The reconstruction loss between the original image and its corresponding rendered image is still in use to ensure the quality of the initial perspective, which is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.502, + 0.628, + 0.891, + 0.658 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {o r i}} = \\mathcal {L} _ {2} \\left(G _ {\\mathrm {3 D}} \\left(w, \\pi_ {s}; \\theta\\right), I _ {s}\\right) + \\mathcal {L} _ {\\mathrm {L P I P S}} \\left(G _ {\\mathrm {3 D}} \\left(w, \\pi_ {s}; \\theta\\right), I _ {s}\\right). \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.891, + 0.688 + ], + "angle": 0, + "content": "The overall objective of optimizing the generator's parameters is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.702, + 0.891, + 0.719 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {o p t}} = \\mathcal {L} _ {\\text {o r i}} + \\lambda_ {\\text {a d j}} \\mathcal {L} _ {\\text {a d j}} + \\lambda_ {\\text {s y m}} \\mathcal {L} _ {\\text {s y m}} + \\lambda_ {\\text {d e p t h}} \\mathcal {L} _ {\\text {d e p t h}}. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.731, + 0.892, + 0.763 + ], + "angle": 0, + "content": "The trade-off hyper-parameters are set as follows: \\(\\lambda_{\\mathrm{adj}} = 0.1\\), \\(\\lambda_{\\mathrm{sym}} = 0.05\\), and \\(\\lambda_{\\mathrm{depth}} = 1\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.777, + 0.634, + 0.794 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.802, + 0.707, + 0.818 + ], + "angle": 0, + "content": "5.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Datasets. We conduct the experiments on human faces datasets. For all experiments, we select EG3D [5] as our 3D GAN prior, which is pre-trained on FFHQ dataset [15]. We verified quantitative metrics on CelebA-HQ test dataset [19]. We further evaluated on MEAD [33], a" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "346" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.079, + 0.088, + 0.167, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.359, + 0.137, + 0.368 + ], + "angle": 0, + "content": "SG2" + }, + { + "type": "image", + "bbox": [ + 0.168, + 0.089, + 0.254, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.187, + 0.359, + 0.236, + 0.368 + ], + "angle": 0, + "content": "SG2 \\(W^{+}\\)" + }, + { + "type": "image", + "bbox": [ + 0.254, + 0.089, + 0.339, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.284, + 0.359, + 0.305, + 0.368 + ], + "angle": 0, + "content": "PTI" + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.089, + 0.426, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.368, + 0.36, + 0.396, + 0.369 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.427, + 0.124, + 0.543, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.451, + 0.215, + 0.519, + 0.224 + ], + "angle": 0, + "content": "Source Image" + }, + { + "type": "image", + "bbox": [ + 0.427, + 0.248, + 0.543, + 0.339 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.451, + 0.339, + 0.518, + 0.348 + ], + "angle": 0, + "content": "Source Image" + }, + { + "type": "image", + "bbox": [ + 0.543, + 0.089, + 0.63, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.574, + 0.359, + 0.599, + 0.369 + ], + "angle": 0, + "content": "SG2" + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.09, + 0.716, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.652, + 0.359, + 0.699, + 0.368 + ], + "angle": 0, + "content": "SG2 \\(W^{+}\\)" + }, + { + "type": "image", + "bbox": [ + 0.718, + 0.09, + 0.804, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.75, + 0.359, + 0.771, + 0.368 + ], + "angle": 0, + "content": "PTI" + }, + { + "type": "image", + "bbox": [ + 0.804, + 0.09, + 0.891, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.83, + 0.359, + 0.857, + 0.368 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.381, + 0.893, + 0.41 + ], + "angle": 0, + "content": "Figure 5. Qualitative comparisons with state-of-the-art methods on novel view synthesis. The reconstruction quality of the original view is presented in the first row. The texture and geometry in novel views are shown in the rest rows." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.421, + 0.466, + 0.486 + ], + "angle": 0, + "content": "
MethodMSE ↓LPIPS ↓MS-SSIM ↓ID ↑Pose ↓Depth ↓
SG2 [16]0.08810.32310.35570.82090.0430.0505
SG2 W+ [1]0.04390.22610.24830.87350.0400.0500
PTI [28]0.00840.09200.09800.94320.0370.0510
SPI (Ours)0.00820.08650.09910.94700.0360.0476
" + }, + { + "type": "table_caption", + "bbox": [ + 0.11, + 0.495, + 0.435, + 0.51 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison on CelebA-HQ [19]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.527, + 0.469, + 0.557 + ], + "angle": 0, + "content": "multi-view high-quality video dataset. The first frame from each viewpoint video of 10 identities is extracted for testing." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.56, + 0.469, + 0.636 + ], + "angle": 0, + "content": "Metrics. We evaluate image reconstruction quality and similarity with the following metrics: mean squared error (MSE), perceptual similarity loss (LPIPS) [40], structural similarity (MS-SSIM), and identity similarity (ID) by employing a pre-trained face recognition network [8]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.639, + 0.47, + 0.775 + ], + "angle": 0, + "content": "Baselines. We mainly compare our methods with optimization-based 2D GAN inversion methods. SG2 [16] directly inverts real images into \\(\\mathcal{W}\\) space with an optimization scheme. [1] extends the inversion into \\(\\mathcal{W}^+\\) space, denoted by SG2 \\(\\mathcal{W}^+\\). PTI [28] would further tune generator parameters in a second stage. For a fair comparison, both PTI and ours first optimize the latent for 500 steps and then fine-tune the generator for 1,000 steps, while SG2 and SG2 \\(\\mathcal{W}^+\\) optimize the latent for 1,500 steps." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.786, + 0.431, + 0.803 + ], + "angle": 0, + "content": "5.2. Reconstruction and Novel View Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Qualitative Evaluation. Fig. 5 presents a qualitative comparison of texture and geometry quality of different views. As for the original view, our method is able to inverse challenging details such as earrings, make-up, and wrinkles, which demonstrates that we do not sacrifice the original reconstruction performance. When the camera rotates to" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.42, + 0.895, + 0.606 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.614, + 0.893, + 0.656 + ], + "angle": 0, + "content": "Figure 6. Comparison of identity preservation in novel views. The x-axis represents the yaw angle of the input image. '0' indicates the frontal face." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.672, + 0.892, + 0.793 + ], + "angle": 0, + "content": "novel views, images generated from 2D inversion methods present a twisted appearance, due to the nearly flattened geometry shape. Since SG2 does not deviate too far from the initial GAN space, it can generate a portrait with a structured geometry, but fails to preserve the identity. Our method is capable of maintaining authentic and consistent geometry in novel views along with a sharp appearance, even when rotated to an extreme pose." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Quantitative Evaluation. The reconstruction metrics of the original view are shown in Table 1. As can be seen, the results align with our qualitative evaluation as we achieved comparable scores to the current 2D state-of-the-art inversion methods [28]. The MSE, LPIPS, and ID similarities of ours are further improved, which can be attributed to the employment of \\(\\mathcal{W}^+\\) latent space. Following EG3D, we" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.945, + 0.511, + 0.956 + ], + "angle": 0, + "content": "347" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.079, + 0.089, + 0.468, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.08, + 0.237, + 0.466, + 0.251 + ], + "angle": 0, + "content": "Figure 7. Qualitative comparisons with PTI [28] on MEAD [33]." + }, + { + "type": "table", + "bbox": [ + 0.079, + 0.255, + 0.476, + 0.446 + ], + "angle": 0, + "content": "
MethodViewMSE ↓LPIPS ↓MS-SSIM ↓ID ↑
PTIF0.032040.29710.20700.8445
Ours0.032960.30880.21350.8388
PTIL300.043550.29920.22740.8446
Ours0.033990.27960.20250.8469
PTIL600.082550.39020.31430.7568
Ours0.040690.31130.23790.8272
PTIR300.045740.31100.23930.8383
Ours0.032030.28070.20570.8529
PTIR600.078650.38290.31060.7995
Ours0.045410.31600.24000.8335
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.457, + 0.47, + 0.542 + ], + "angle": 0, + "content": "Table 2. Quantitative comparison on MEAD [33]. View denotes the yaw angle of the input image. F is frontal, L is left side, and R is right side. 30 and 60 are the rotation degrees. Each time we use one view as the inversion input and use all 5 views as ground truth for evaluation. The average performance of 4 unseen views and 1 seen view is reported." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.565, + 0.469, + 0.611 + ], + "angle": 0, + "content": "evaluate shape quality by calculating \\(\\mathcal{L}_2\\) for pseudo-ground-truth depth-maps (Depth) generated from DECA [10], and poses (Pose) estimated from synthesized images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.612, + 0.469, + 0.837 + ], + "angle": 0, + "content": "We also use identity similarity to evaluate the identity preservation of the synthesized novel views. Given a portrait, we synthesize a novel view image under the symmetric camera pose of the portrait. The similarity between the synthesized image and the flipped image portrait is calculated. The results are shown in Fig. 6. It can be observed that when the yaw angle of a portrait is small, all methods can perform well with a high similarity score. But when the yaw angle is large, only our method can maintain a high score, while other methods encounter a sharp performance drop due to the inaccurate geometry. As we employ the symmetry prior and the adjacent pseudo supervision, the rendered faces can better preserve the texture and geometry. These results demonstrate that we can achieve an identity-consistent 3D inversion." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Evaluation on MEAD. To get a comprehensive understanding of the performance of our method, we evaluate on MEAD, a multi-view dataset. The quantitative comparison between the reconstruction portraits and the ground truth in" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.088, + 0.892, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.546, + 0.2, + 0.845, + 0.215 + ], + "angle": 0, + "content": "Figure 8. Ablation study of facial symmetry prior." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.215, + 0.892, + 0.371 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.381, + 0.892, + 0.424 + ], + "angle": 0, + "content": "Figure 9. Ablation study of authentic mask. Vanilla denotes simply using the full mirror image for supervision. While Ours filters out conflict areas with the designed constraints." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.439, + 0.893, + 0.59 + ], + "angle": 0, + "content": "different views is shown in Tab. 2. PTI [28] and our method achieve comparable performance when given a frontal portrait. When the view of the input face has an offset from the canonical one, our method surpasses PTI distinctly. Our metrics remain stable as the yaw angle becomes larger while the performance of PTI degrades significantly. The qualitative results are shown in Fig. 7. The geometry shape of PTI suffers from the flattening phenomenon. In contrast, our method can generate a consistent geometry and texture in novel views." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.604, + 0.769, + 0.62 + ], + "angle": 0, + "content": "5.3. Evaluation of Symmetry Prior" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.892, + 0.794 + ], + "angle": 0, + "content": "To understand the importance of the symmetry prior, we perform an ablation study by conducting the inversion with or without using the prior. The visual results are shown in Fig. 8. Both approaches can obtain good geometries in the original view. However, in the first row, the geometry of the woman with a thin face turns to be obese as the camera gradually rotates, which aligns with its rendered image. The second row shows that the geometry and the rendered image maintain a better view consistency. We even find that, with the auxiliary view, some expression details can be strengthened, such as the slightly opened mouth." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The symmetry prior cannot be directly employed in the optimization stage because there exist asymmetric areas in a human face. Optimizing the conflict areas will lead to poor results. As shown in Fig. 9, the slanted hair and the single earring in the source image mismatch those in the mirror one. In the first row, when simply using both two images to optimize the generator, the reconstruction quality suffers" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.956 + ], + "angle": 0, + "content": "348" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.088, + 0.47, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.099, + 0.345, + 0.446, + 0.358 + ], + "angle": 0, + "content": "Figure 10. Editing results incorporated with [26] and [11]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.374, + 0.47, + 0.48 + ], + "angle": 0, + "content": "from degradation. Novel views synthesized by the vanilla version will encounter incorrect texture and blurry results in the conflict areas. Our method can handle such asymmetric cases without the quality worsening by filtering out conflict areas with the designed constraints. Hair, teeth, and other details are consistent in different views, which validates the effectiveness of the proposed constraints." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.49, + 0.336, + 0.507 + ], + "angle": 0, + "content": "5.4. View-consistent Face Editing" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.514, + 0.47, + 0.696 + ], + "angle": 0, + "content": "Editing a facial image should preserve the original identity while performing a meaningful and visually plausible modification. We extend our methods to downstream editing tasks to validate that the 3D GAN inversion process does not degrade the editability of the original generator. We follow StyleCLIP [26] to achieve text-guided semantic editing and StyleGAN-NADA [11] for stylization, shown in Fig. 10. The editing operation not only influences the original view but also changes the novel view's appearance consistently. It demonstrates that our inversion solution retains the properties in the original space of the generator and can be associated with other editing methods flexibly." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.706, + 0.23, + 0.722 + ], + "angle": 0, + "content": "5.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.731, + 0.469, + 0.809 + ], + "angle": 0, + "content": "Adjacent Warping. Recall that we employ depth-guided warping to create pseudo supervision to improve the texture quality of novel views. In Fig. 11, we can find that this operation can enhance facial component details such as eyelashes and teeth, improving the overall visual quality." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Depth Regularization. Since supervision signals all come from RGB images, there is no explicit geometry supervision to ensure shape correctness. The shape is prone to drift to overfit the single image. Unnatural distortions will appear in novel views with the drifted shape. In the third column of Fig. 11, the jaw and nose are elongated with no con" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.088, + 0.891, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.526, + 0.334, + 0.866, + 0.35 + ], + "angle": 0, + "content": "Figure 11. Ablation study of different designed modules." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.375, + 0.891, + 0.404 + ], + "angle": 0, + "content": "straints. With depth regularization, geometry will be calibrated within reasonable limits." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.414, + 0.892, + 0.475 + ], + "angle": 0, + "content": "Two-stage Optimization. The joint optimization stage via utilizing a large parameter space can further improve texture, allowing to reconstruct the out-of-domain details, e.g., auspicious mole, as shown in the last column of Fig. 11." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.506, + 0.619, + 0.521 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.537, + 0.892, + 0.673 + ], + "angle": 0, + "content": "We propose a novel 3D GAN inversion method with facial symmetry prior. As demonstrated in massive experiments, our method can support 3D reconstruction at extreme angles with robust geometry. With the designed constraints on texture and geometry, the reconstructed portraits are high-fidelity and possess consistent identity across different views. Besides, the proposed method enables various downstream applications without compromising faithfulness and photorealism." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.681, + 0.892, + 0.786 + ], + "angle": 0, + "content": "Limitation and Future Works. Since the effect of illumination is ignored in our assumption, the illumination is modeled implicitly. During the fitting process of the given image with symmetry prior, light sources sometimes become perfectly symmetrical and distorted. We will attempt to settle the problem via modeling illumination explicitly with albedo and normal in future work." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgement. This work was partly supported by the National Natural Science Foundation of China (Grant No. U1903213) and the Shenzhen Science and Technology Program (JCYJ20220818101014030, ZDSYS20200811142605016). This work was partly supported by a UKRI Future Leaders Fellowship [grant number G104084]." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "349" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4432-4441, 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.472, + 0.226 + ], + "angle": 0, + "content": "[2] Yuval Alaluf, Omer Tov, Ron Mokady, Rinon Gal, and Amit H Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. arXiv preprint arXiv:2111.15666, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.227, + 0.471, + 0.269 + ], + "angle": 0, + "content": "[3] Qingyan Bai, Yinghao Xu, Jiapeng Zhu, Weihao Xia, Yu-jiu Yang, and Yujun Shen. High-fidelity gan inversion with padding space. arXiv preprint arXiv:2203.11105, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.27, + 0.471, + 0.339 + ], + "angle": 0, + "content": "[4] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional p-gan for single image to neural radiance fields translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3981-3990, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.34, + 0.471, + 0.408 + ], + "angle": 0, + "content": "[5] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In CVPR, 2022. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.41, + 0.471, + 0.479 + ], + "angle": 0, + "content": "[6] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.48, + 0.471, + 0.549 + ], + "angle": 0, + "content": "[7] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14124-14133, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.55, + 0.471, + 0.592 + ], + "angle": 0, + "content": "[8] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.592, + 0.471, + 0.647 + ], + "angle": 0, + "content": "[9] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In CVPR Workshops, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.649, + 0.47, + 0.704 + ], + "angle": 0, + "content": "[10] Yao Feng, Haiwen Feng, Michael J Black, and Timo Bolkart. Learning an animatable detailed 3d face model from inthe-wild images. ACM Transactions on Graphics (ToG), 40(4):1-13, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.47, + 0.76 + ], + "angle": 0, + "content": "[11] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.76, + 0.47, + 0.802 + ], + "angle": 0, + "content": "[12] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis. In ICLR, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.471, + 0.859 + ], + "angle": 0, + "content": "[13] Ajay Jain, Matthew Tancik, and Pieter Abbeel. Putting nerf on a diet: Semantically consistent few-shot view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5885-5894, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[14] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In ECCV, 2016. 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.148 + ], + "angle": 0, + "content": "[15] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.192 + ], + "angle": 0, + "content": "[16] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, 2020. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.895, + 0.262 + ], + "angle": 0, + "content": "[17] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.263, + 0.892, + 0.305 + ], + "angle": 0, + "content": "[18] Jaehoon Ko, Kyusun Cho, Daewon Choi, Kwangrok Ryoo, and Seungryong Kim. 3d gan inversion with pose optimization. arXiv preprint arXiv:2210.07301, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.306, + 0.892, + 0.348 + ], + "angle": 0, + "content": "[19] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In CVPR, 2020. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.349, + 0.892, + 0.406 + ], + "angle": 0, + "content": "[20] Roey Mechrez, Itamar Talmi, and Lihi Zelnik-Manor. The contextual loss for image transformation with non-aligned data. In Proceedings of the European conference on computer vision (ECCV), pages 768-783, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.407, + 0.892, + 0.473 + ], + "angle": 0, + "content": "[21] Youssef A Mejjati, Isa Milefchik, Aaron Gokaslan, Oliver Wang, Kwang In Kim, and James Tompkin. Gaussian: Controllable image synthesis with 3d gaussians from unposed silhouettes. arXiv preprint arXiv:2106.13215, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.476, + 0.892, + 0.545 + ], + "angle": 0, + "content": "[22] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.547, + 0.892, + 0.631 + ], + "angle": 0, + "content": "[23] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5480-5490, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.632, + 0.892, + 0.702 + ], + "angle": 0, + "content": "[24] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.703, + 0.892, + 0.786 + ], + "angle": 0, + "content": "[25] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.788, + 0.892, + 0.856 + ], + "angle": 0, + "content": "[26] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2085–2094, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.858, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learn-" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.895, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "350" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.469, + 0.134 + ], + "angle": 0, + "content": "ing transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.137, + 0.469, + 0.19 + ], + "angle": 0, + "content": "[28] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. arXiv preprint arXiv:2106.05744, 2021. 1, 2, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.194, + 0.469, + 0.235 + ], + "angle": 0, + "content": "[29] Yujun Shen, Jinjin Gu, Xiaou Tang, and Bolei Zhou. Interpreting the latent space of gans for semantic face editing. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.239, + 0.469, + 0.293 + ], + "angle": 0, + "content": "[30] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. arXiv preprint arXiv:2205.15517, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.297, + 0.469, + 0.365 + ], + "angle": 0, + "content": "[31] Feitong Tan, Sean Fanello, Abhimitra Meka, Sergio Orts-Escolano, Danhang Tang, Rohit Pandey, Jonathan Taylor, Ping Tan, and Yinda Zhang. Volux-gan: A generative model for 3d face synthesis with hdri relighting. arXiv preprint arXiv:2201.04873, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.368, + 0.469, + 0.41 + ], + "angle": 0, + "content": "[32] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. TOG, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.413, + 0.469, + 0.467 + ], + "angle": 0, + "content": "[33] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.47, + 0.469, + 0.512 + ], + "angle": 0, + "content": "[34] Tengfei Wang, Yong Zhang, Yanbo Fan, Jue Wang, and Qifeng Chen. High-fidelity gan inversion for image attribute editing. arXiv preprint arXiv:2109.06590, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.514, + 0.47, + 0.583 + ], + "angle": 0, + "content": "[35] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1-10, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.586, + 0.469, + 0.641 + ], + "angle": 0, + "content": "[36] Jianfeng Xiang, Jiaolong Yang, Yu Deng, and Xin Tong. Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. arXiv preprint arXiv:2206.07255, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.644, + 0.469, + 0.713 + ], + "angle": 0, + "content": "[37] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18430-18439, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.716, + 0.469, + 0.784 + ], + "angle": 0, + "content": "[38] Yang Xue, Yuheng Li, Krishna Kumar Singh, and Yong Jae Lee. Giraffe hd: A high-resolution 3d-aware generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18440-18449, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.787, + 0.469, + 0.857 + ], + "angle": 0, + "content": "[39] Fei Yin, Yong Zhang, Xiaodong Cun, Mingdeng Cao, Yanbo Fan, Xuan Wang, Qingyan Bai, Baoyuan Wu, Jue Wang, and Yujiu Yang. Styleheat: One-shot high-resolution editable talking face generation via pretrained stylegan. In European conference on computer vision, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[40] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 4, 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.174 + ], + "angle": 0, + "content": "[41] Xuanmeng Zhang, Zhedong Zheng, Daiheng Gao, Bang Zhang, Pan Pan, and Yi Yang. Multi-view consistent generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18450-18459, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.178, + 0.892, + 0.231 + ], + "angle": 0, + "content": "[42] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.51, + 0.957 + ], + "angle": 0, + "content": "351" + } + ] +] \ No newline at end of file diff --git a/2023/3D GAN Inversion With Facial Symmetry Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_origin.pdf b/2023/3D GAN Inversion With Facial Symmetry Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..17a041252c2737f96d3596df45b5bba313b2e45e --- /dev/null +++ b/2023/3D GAN Inversion With Facial Symmetry Prior/02a489c6-c89c-4dc3-afcb-600bfa013373_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5015115cacda66cd1a115f68013580ad6b85d8f97da5bd953fe1ba57306859e2 +size 2397676 diff --git a/2023/3D GAN Inversion With Facial Symmetry Prior/full.md b/2023/3D GAN Inversion With Facial Symmetry Prior/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d640c68f20b137147a47829a14fbcd767c500f7f --- /dev/null +++ b/2023/3D GAN Inversion With Facial Symmetry Prior/full.md @@ -0,0 +1,348 @@ +# 3D GAN Inversion with Facial Symmetry Prior + +Fei Yin $^{1}$ , Yong Zhang $^{2\dagger}$ , Xuan Wang $^{3}$ , Tengfei Wang $^{4}$ , Xiaoyu Li $^{2}$ , Yuan Gong $^{1}$ , Yanbo Fan $^{2}$ , Xiaodong Cun $^{2}$ , Ying Shan $^{2}$ , Cengiz Öztireli $^{5}$ , Yujiu Yang $^{1\dagger}$ , Shenzhen International Graduate School, Tsinghua University + $^{2}$ Tencent AI Lab $^{3}$ Ant Group $^{4}$ HKUST $^{5}$ University of Cambridge + +# Abstract + +Recently, a surge of high-quality 3D-aware GANs have been proposed, which leverage the generative power of neural rendering. It is natural to associate 3D GANs with GAN inversion methods to project a real image into the generator's latent space, allowing free-view consistent synthesis and editing, referred as 3D GAN inversion. Although with the facial prior preserved in pre-trained 3D GANs, reconstructing a 3D portrait with only one monocular image is still an ill-posed problem. The straightforward application of 2D GAN inversion methods focuses on texture similarity only while ignoring the correctness of 3D geometry shapes. It may raise geometry collapse effects, especially when reconstructing a side face under an extreme pose. Besides, the synthetic results in novel views are prone to be blurry. In this work, we propose a novel method to promote 3D GAN inversion by introducing facial symmetry prior. We design a pipeline and constraints to make full use of the pseudo auxiliary view obtained via image flipping, which helps obtain a view-consistent and well-structured geometry shape during the inversion process. To enhance texture fidelity in unobserved viewpoints, pseudo labels from depth-guided 3D warping can provide extra supervision. We design constraints to filter out conflict areas for optimization in asymmetric situations. Comprehensive quantitative and qualitative evaluations on image reconstruction and editing demonstrate the superiority of our method. + +# 1. Introduction + +Recent 3D-aware generative adversarial networks (3D GANs) have seen immense progress. By incorporating a neural rendering engine into the generator network architecture, 3D GANs can synthesize view-consistent images. To increase the generation resolution, existing methods [5,12,25,30,31,36-38,41] boost the 3D inductive bias + +![](images/9260a43193a7b1a051371d2fff12dabdcb84fd7ca87930b2bc075b9a6bd50a9b.jpg) +Figure 1. Visual examples of our inversion method. Direct applying 2D GAN inversion methods (PTI [28]) to the 3D GAN suffers from inaccurate geometry in novel views. Our method excels in synthesizing consistent geometry and high-fidelity texture in different views, even reconstructing a face under an extreme pose. + +with an additional 2D CNN-based upsampler or an efficient 3D representation modeling method. With tremendous effort, 3D GANs can produce photorealistic images while enforcing strong 3D consistency across different views. + +We are interested in the task of reconstructing a human face with 3D geometry and texture given only one monocular image. It is an ill-posed problem and close to the harsh condition of real scenarios. With the power of 3D GANs, it seems achievable via projecting a target image onto the manifold of a pre-trained generator. The process is referred as 3D GAN inversion. A straightforward path is to follow the 2D GAN inversion method [28], i.e., optimizing the latent code and the network parameters of the generator to overfit the specific portrait. + +However, since the ground truth 3D geometry is absent given one monocular image, the inversion result is far from satisfactory. The process of fitting a 3D GAN to one image would sacrifice geometric correctness in order to make the synthetic texture as close as possible to the input, even destroying the original semantic-rich latent space. As the optimization process goes, the face geometry tends to degenerate into a flattened shape, due to the absence of geometry supervision, e.g., images from other views. Besides, there exist quality issues in texture synthesis under novel views. The rendered images of unseen views tend to be blurry and inconsistent with the original image, especially when reconstructing a side face under an extreme pose. Because there is no texture supervision for unseen views given only one monocular image. The failure cases of directly applying [28] are illustrated in Fig. 1. + +In this work, to alleviate the issue caused by missing geometry and texture supervision under multiple views, we propose a novel 3D GAN inversion approach by taking full advantage of facial symmetry prior to construct pseudo supervision of different views. Intuitively, we note that human faces are almost symmetric. Assuming the given portrait is symmetric, we can obtain an additional perspective of the portrait by simply mirroring the image. The images of two distinct views can provide geometric relations between the 3D points and their 2D projections based on epipolar geometry. Motivated by this, we seek to leverage facial symmetry as the geometric prior constraining the inversion. The symmetry prior is also employed in a traditional 3D reconstruction work [35]. We leverage the mirrored image as extra supervision of another view when performing the inversion, which prevents the geometry collapse. A rough geometry can be obtained by the inversion with the original and mirror images. + +To further enhance texture quality and geometry in novel views, we employ depth-guided 3D warping to generate the pseudo images of the views surrounding the input and symmetric camera pose. The depth is inferred from the rough 3D volume. The original image along with the pseudo images are used to fine-tune the generator's parameters for the joint promotion of texture and geometry. To prevent the optimized geometry from deviating too much from the rough geometry, we design a geometry regularization term as a constraint. However, human faces are never fully symmetric in practice, neither in shape nor appearance. Therefore, we design several constraints to extract meaningful information adaptively from the mirror image without compromising the original reconstruction quality. + +Our main contributions are as follows: + +- We propose a novel 3D GAN inversion method by incorporating facial symmetry prior. It enables a high-quality reconstruction while preserving the multi-view consistency in geometry and texture. + +- We conduct comprehensive experiments to demonstrate the effectiveness of our method and compare it with many state-of-the-art inversion methods. We also apply our method to various downstream applications. + +# 2. Related Work + +# 2.1. 3D-Aware GANs + +Recently, neural scene representations have incorporated 3D prior into image synthesis with explicit camera control. Inspired by the success of Neural Radiance Fields (NeRF) [22], [6,24] employ implicit volumetric neural rendering structure for consistent novel view synthesis, required only unconstrained monocular images training. To overcome the computational cost and lift the generation resolution, the following methods adopt a two-stage rendering process [5, 12, 21, 25, 30, 31, 37, 38, 41, 42]. Since 2D upsamplers may introduce view-inconsistent artifacts, NeRF path regularization [12] and dual discriminators [5] are proposed. Different 3D modeling representations are further designed for scalable and fast rendering. EG3D [5] introduces tri-plane representation, and GRAM-HD [36] proposes to render radiance manifolds first for efficient sampling. Boosting with the powerful high-fidelity unconditioned 3D GANs, we can achieve real image 3D reconstruction and editing. Specifically, we select the state-of-the-art EG3D [5] as our backbone. + +# 2.2. GAN Inversion + +To edit a real image [29, 39], GAN inversion is applied first to discover a corresponding latent code from which the generator can synthesize the real image. Existing 2D GAN inversion approaches can be categorized into optimization-based, learning-based, and hybrid methods. [1, 16] directly minimize the reconstruction distance via optimizing the latent codes. Learning-based methods [2, 3, 32, 34] exploit a general encoder network to map the input image into latent space in real-time. Hybrid methods would apply the latent code predicted from the encoder as initialization in the later optimization process. Beyond the original inversion latent space, PTI [28] further optimizes the parameters of the generator to enhance the visual fidelity. + +As for the 3D GAN inversion task, most methods directly transfer the 2D methods, e.g., PTI [28] and e4e [32], which may suffer from the poor results in novel views. Pix2NeRF [4] introduced a joint distillation strategy for training a 3D inversion encoder. A concurrent work [18] proposes to perform camera pose optimization simultaneously to ensure view consistency. However, none of the above methods take geometry shape into consideration. + +![](images/d75a1f0e84d6476ff667bcfbebf14831b191b283b1dc5e67fdd592cbabc96bd2.jpg) +Figure 2. The proposed framework. A) Our method first performs inversion with the help of the symmetry view to achieve the latent code $w^{+}$ with a roughly correct geometry. B) The original image and the mirror one, along with adjacent warping pseudos, are used for joint optimization to enhance the geometry and texture of rendered images in novel views. C) Depth-guided 3D warping are used to generate pseudo images in novel views to provide extra supervision. Unfaithful regions are filtered out with the authentic mask. + +![](images/962dca2b8aea70d9409e2bf2c19d3023a6a0d158b6e429885d6d85077916bff9.jpg) + +![](images/617c2fdcc313ba70621dcd2ad8b8b0e39aaf1a3d8a6e574efb070d9c5e7b9ead.jpg) + +# 2.3. Few-shot NeRF + +Few-shot NeRF aims at reconstructing general 3D scenarios where only a few observed views are available, which shares a similar setting with 3D GAN inversion. MVS-NeRF [7] leverages plane-swept cost volumes in multi-view stereo for geometry-aware scene reasoning to improve performance. DietNeRF [13] enforces semantic consistency between rendered images from unseen view and seen images via a CLIP encoder [27]. RegNeRF [23] regularizes the texture of patches rendered from unobserved viewpoints without relying on additional training modules. Since it is hard to find a common prior for general scenes, these methods investigate how to ensure the geometry consistency of different views, which gives us inspiration. + +# 3. Definition of 3D GAN Inversion + +Similar to 2D GAN inversion, 3D GAN inversion aims to project an input image $I$ onto the manifold of a pretrained unconditional 3D GAN model $G_{\mathrm{3D}}(\cdot ;\theta)$ parameterized by weight $\theta$ . After inversion, $G_{\mathrm{3D}}$ can reconstruct the image faithfully given the corresponding camera pose, synthesize content-consistent images in novel views, and facilitate downstream tasks like face editing. One formulation of the 3D GAN inversion problem is defined as follows: + +$$ +w ^ {*} = \underset {w} {\arg \max } = \mathcal {L} \left(G _ {3 D} (w, \pi ; \theta), I\right), \tag {1} +$$ + +where $w$ is the latent representation in $\mathcal{W}^+$ space and $\pi$ is the corresponding camera matrix of input image. The loss function $\mathcal{L}(\cdot, \cdot)$ is usually defined as pixel-wise reconstruction loss or perceptual loss. In our settings, camera matrix $\pi$ is known, which is extracted by a pre-trained detector [9]. This formulation cares about the $\mathcal{W}^+$ space. However, the inversion in the $\mathcal{W}^+$ space is always not enough to capture local facial details, resulting in inaccurate reconstruction. + +Following the recent optimization-based 2D GAN inversion method [28], we perform the inversion in the extended latent space for more accurate reconstruction, i.e., the combination of the $\mathcal{W}^{+}$ space and the parameter space. The formulation is defined as: + +$$ +w ^ {*}, \theta^ {*} = \underset {w, \theta} {\arg \max } = \mathcal {L} \left(G _ {3 D} (w, \pi ; \theta), I\right). \tag {2} +$$ + +Note that $w$ and $\theta$ are optimized alternatively, i.e., $w$ is optimized using Eq. (1) first and then $\theta$ is optimized with the fixed $w^{*}$ . + +# 4. The Proposed Approach + +Our goal is to reconstruct a human face through a pretrained 3D GAN given a single monocular image. The reconstruction is supposed to preserve authentic appearance texture and geometry shape in novel views. Due to the limited information about geometry and texture from a single image, overfitting a single view tends to be trapped in geometry collapse, get the blurry texture and miss details in unseen views, especially when reconstructing a side face under an extreme pose. To overcome the issue of lacking information about other views, we introduce facial symmetry prior to promote inversion. We propose a two-stage inversion pipeline, i.e., inversion for rough geometry and joint optimization of geometry and texture. In the first stage, we obtain a rough geometry by optimizing the latent code $w$ using the original and mirror images in Sec. 4.1. In the second stage, we refine the geometry and texture by optimizing the parameter $\theta$ with the depth-guided 3D warping and a set of designed constraints in Sec 4.2. An overview of our method is shown in Fig. 2. + +# 4.1. Inversion with Symmetry for Rough Geometry + +The purpose of this stage is to learn a rough geometry as a pivot for further tuning. To compensate for the missing + +![](images/0f5db3830d58f25681435aa4f8ed08732d28a412480e407a1a989c5cef01d562.jpg) +Figure 3. Visualization of warped pseudos. The red bounding box contains the range of employed pseudos, depending on the yaw angle of the input image. A frontal face can be warped by a wider range of yaw angles than a side face to get authentic pseudos. + +![](images/42c87874b858d5fe2f82d16dea9111ba8e70fe1b34c608585d3396830611401c.jpg) +Source Image +Figure 4. Visualization of authentic mask and warped pseudo. + +![](images/be838299f22ea3da41917e8e21bad9d732202582cb08a69961d057302938841a.jpg) +Warped Image + +![](images/32e55225f4c19c8ebfad56bc936e8466d3b4936d4665531f0c1b819be3ca68f4.jpg) +Authentic Mask + +![](images/c00774424ba472ae986435bbc55c6eddacfd8e638237193878207153cf037ce7.jpg) +Pseudo + +information of unseen views, we resort to facial symmetry prior, i.e., the left face is almost the same as the right one. We simply flip the input image $I_{s}$ horizontally to get the mirror image $I_{m}$ whose corresponding camera pose $\pi_{m}$ can be calculated by multiplying a fixed matrix by the camera extrinsic parameters of $\pi_{s}$ . The intrinsic parameters are unchanged. The mirror image serves as the pseudo-projected image under a novel view. + +Since human faces are not always perfectly symmetric, the mirror image is just an approximation under the novel view. There exists inconsistent content between the original image and the mirror one if they have an overlapping face region, i.e., different colors in the position, referred as conflict content. The inversion should depend more on the original image and take partial useful information from the mirror one. Furthermore, we observe that a frontal face can provide more effective information than a side face. A nearly frontal face provides plenty of facial information, and we should trust less on its mirror image to avoid conflict in the overlapping region. While a side face provides information for only half one face, it has only a small overlapping conflict region with its mirror image. Hence, we should trust more on the mirror image. We exploit an adaptive weighting strategy for the importance of the mirror image according to its yaw angle $\alpha_{\mathrm{yaw}}$ . We use a Gaussian function with respect to $\alpha_{\mathrm{yaw}}$ to approximate the importance of different views. The weight $\lambda_{m}$ of the mirror image is defined as: + +$$ +\mathcal {E} (x) = \frac {1}{\sigma \sqrt {2 \pi}} e ^ {- \frac {(x - \mu) ^ {2}}{2 \sigma^ {2}}}, \tag {3} +$$ + +$$ +\lambda_ {m} = \left\{ \begin{array}{l l} 1 - \mathcal {E} \left(\alpha_ {\text {y a w}}\right), & \text {i f} \mathcal {E} \left(\alpha_ {\text {y a w}}\right) \leq k; \\ 0, & \text {i f} \mathcal {E} \left(\alpha_ {\text {y a w}}\right) > k; \end{array} \right. \tag {4} +$$ + +where $\sigma, \mu$ and $k$ are hyper-parameters. As a nearly frontal + +mirror face can compensate for very limited extra information for the original image, its weight $\lambda_{m}$ is clamped to 0. + +To optimize the latent code in $\mathcal{W}^+$ space, the Perceptual loss [40] is used to minimize the distance between the generated results and the original and mirror images. Following [17, 28], a noise regularization term $\mathcal{L}_n(n)$ is employed to prevent the noise vector from containing vital information. The objective in this stage is defined as follows: + +$$ +\mathcal {L} _ {\text {i n v}} = \mathcal {L} _ {\mathrm {L P I P S}} \left(G _ {3 \mathrm {D}} \left(w, \pi_ {s}; \theta\right), I _ {s}\right) + \tag {5} +$$ + +$$ +\lambda_ {m} \mathcal {L} _ {\text {L P I P S}} \left(G _ {3 \mathrm {D}} \left(w, \pi_ {m}; \theta\right), I _ {m}\right) + \lambda_ {n} \mathcal {L} _ {n} (n), +$$ + +where $n$ is the noise vector and $\lambda_{n}$ is a trade-off parameter. The generator is kept frozen at this stage. Visual illustrations in Fig. 8 show that the geometry can be greatly improved with the facial symmetry prior. + +# 4.2. Joint Optimization of Geometry and Texture + +Though we obtain the rough geometry via the optimization of $w$ in the first stage, there is a distinct gap between the texture of the rendered face and that of the original one, even under the same camera pose. The rendered face shares a similar face geometry with the original one, but it becomes a different identity. In this stage, we optimize the generator's parameters $\theta$ to bridge the texture gap for identity preservation and refine the rough geometry as well. We design a geometry regularization constraint to avoid the model degrading to generate flattened geometry. Moreover, we construct a set of pseudo images in different views to provide supervision via depth-guided 3D warping. + +Geometry Regularization. We observe that optimizing the generator without any constraint on the geometry will cause the deviation of the geometry from the rough one, resulting in a flattened geometry similar to the case of inversion with a single image. To avoid the geometry drift during overfitting the texture, we regularize the optimized density obtained from the 3D volume of 3D GAN to be similar to that from the rough volume obtained in the first stage. Specifically, with the fixed $w$ , we generate depth maps $D$ from 3D GAN under different sampled views and calculate $\mathcal{L}_2$ distance between them with the corresponding depth maps $D_0$ generated from the un-tuned generator in the first stage: + +$$ +\mathcal {L} _ {\text {d e p t h}} = \sum_ {i \in \mathbb {S}} \| D ^ {i} - D _ {0} ^ {i} \| _ {2}, \tag {6} +$$ + +where $\mathbb{S}$ is the sampled camera pose set. + +Depth-guided 3D Warping for Pseudo Supervision. Optimizing the generator with only two images is still not enough to capture the facial details, resulting in blurry effects around facial components such as eyes (see Fig. 11). Hence, we propose to construct pseudo images of different views for extra supervision using the rough geometry and the original and mirror images. Specifically, given the original image (source view) and the rough geometry, we can synthesize an image under a novel view (target view) by warping with 3D guidance. A coordinate pixel $p_t$ of the synthesized image in the target view can be obtained by projecting back onto the source view with the relative camera pose $\pi_{t\rightarrow s}$ and the camera intrinsic parameters $K$ : + +$$ +p _ {t \rightarrow s} = K \pi_ {t \rightarrow s} D _ {t} \left(p _ {t}\right) K ^ {- 1} p _ {t}, \tag {7} +$$ + +where $D_{t}(\cdot)$ is the depth map of the target view. Since the projected coordinate $p_{t\rightarrow s}$ are continuous values, we can extract the color values from the original image with a differentiable bilinear sampling mechanism, i.e., $I_{s\rightarrow t} = I_s(p_{t\rightarrow s})$ . The low-resolution depth map will be upsampled to match the dimension of the image. + +Authentic Mask. Without distinguishing the foreground pixels from the background, the background pixels in the original image may be projected onto the foreground plane, leading to erroneous results. To overcome this issue, we form a mask to indicate the visibility of pixels to filter invisible areas using the rendered depth values. Specifically, we can get the projected depth value $D_{s}(p_{t\rightarrow s})$ via sampling from the depth map in the source view. Here we employ the euclidean distance between $D_{s}(p_{t\rightarrow s})$ and the depth map $D_{t}(p_{t})$ in the target view to calculate the mask. A large distance indicates the pixel $p_t$ is invisible. To ensure the projected pixels are located on the front visible surface, we only preserve the area where the distance is under a threshold $\tau$ : + +$$ +M \left(p _ {t}\right) = \left\| D _ {t} \left(p _ {t}\right) - D _ {s} \left(p _ {t \rightarrow s}\right)\right\| < \tau . \tag {8} +$$ + +Furthermore, due to the poor depth estimation of the background, only the facial part would be warped. We warp the facial mask of the source view to the target view and multiply it with the visibility mask $M(p_{t})$ to get the authentic mask $M_{t}$ . An example is shown in Fig. 4. After multiplying the mask $M_{t}$ with the warped image $I_{s\rightarrow t}$ , the resulting image can be used for supervision. + +Adjacent View Warping. Fig. 3 illustrates the warping results of two examples. When the yaw angle between the source and target views increases, the warping results have more distortions and become less authentic. Therefore, it is intuitive to abandon the pseudo images of the target views that deviate a lot from the source view. Empirically, a frontal face can be warped by a wider range of yaw angles than a side face to get authentic pseudo images. The + +variance of sampling yaw angles for constructing pseudo images is set to a fixed ratio of $\lambda_{m}$ that depends on the viewpoint mentioned in Sec. 4.1. The LPIPS loss [14] is used to compute the multi-view pixel-wise distance as follows: + +$$ +\mathcal {L} _ {\mathrm {a d j}} = \mathcal {L} _ {\mathrm {L P I P S}} \left(M _ {t} \cdot G _ {\mathrm {3 D}} (w, \pi_ {t}; \theta), M _ {t} \cdot I _ {s \rightarrow t}\right). \tag {9} +$$ + +Although the pseudo images of several unseen adjacent views around the source view have been constructed, it brings marginal improvements on remote views. Especially for a side face, the pseudo images of the remote views are blurry and have incomplete texture (see Fig. 3). Therefore, we also construct pseudo images of the adjacent views around the view of the mirror image. + +Since the conflict region between the original and mirror images has a side effect on the generator optimization process, resulting in blurry effects on rendered images, even reconstructing the source view (see Fig. 9), we propose to take partial meaningful information from the symmetric views without harming the original inversion quality. We compute the similarities only for facial components, rather than the whole face region. Besides, instead of using a pixelwise loss, we exploit the contextual loss [20] to improve the texture quality. The loss for symmetric views is defined as: + +$$ +\mathcal {L} _ {\mathrm {s y m}} = \sum_ {\mathrm {c} \in \mathbb {F}} \mathcal {L} _ {\mathrm {C X}} \left(\operatorname {R O I} ^ {c} \left(G _ {3 \mathrm {D}} \left(w, \pi_ {t}; \theta\right)\right), \operatorname {R O I} ^ {c} \left(I _ {m \rightarrow t}\right)\right), \tag {10} +$$ + +where $I_{m\rightarrow t}$ is the pseudo image of the viewpoint $\pi_t$ warped from the mirror image $I_{m}$ . $\mathrm{ROI}^c (\cdot)$ refers to the region of interest component $c$ from the collection $\mathbb{F} = \{\text{eyes, nose, mouth}\}$ . + +The reconstruction loss between the original image and its corresponding rendered image is still in use to ensure the quality of the initial perspective, which is defined as: + +$$ +\mathcal {L} _ {\mathrm {o r i}} = \mathcal {L} _ {2} \left(G _ {\mathrm {3 D}} \left(w, \pi_ {s}; \theta\right), I _ {s}\right) + \mathcal {L} _ {\mathrm {L P I P S}} \left(G _ {\mathrm {3 D}} \left(w, \pi_ {s}; \theta\right), I _ {s}\right). \tag {11} +$$ + +The overall objective of optimizing the generator's parameters is defined as: + +$$ +\mathcal {L} _ {\text {o p t}} = \mathcal {L} _ {\text {o r i}} + \lambda_ {\text {a d j}} \mathcal {L} _ {\text {a d j}} + \lambda_ {\text {s y m}} \mathcal {L} _ {\text {s y m}} + \lambda_ {\text {d e p t h}} \mathcal {L} _ {\text {d e p t h}}. \tag {12} +$$ + +The trade-off hyper-parameters are set as follows: $\lambda_{\mathrm{adj}} = 0.1$ , $\lambda_{\mathrm{sym}} = 0.05$ , and $\lambda_{\mathrm{depth}} = 1$ . + +# 5. Experiments + +# 5.1. Experimental Settings + +Datasets. We conduct the experiments on human faces datasets. For all experiments, we select EG3D [5] as our 3D GAN prior, which is pre-trained on FFHQ dataset [15]. We verified quantitative metrics on CelebA-HQ test dataset [19]. We further evaluated on MEAD [33], a + +![](images/1470c26a073523eeed04b623eec00b3a4506c2f62ea9d69dad6437cf3de65479.jpg) +SG2 + +![](images/6f39e9d126fa143ebda910254e2cd452fc4782e5fb5112c75a624b98dfc3f054.jpg) +SG2 $W^{+}$ + +![](images/8dae65f2f2a02f5c53ea8ac3cb13de8615d53ec51bbdd1fecc7008652fa909a6.jpg) +PTI + +![](images/33e8f1cb68534e6bd42f66de2805b7e39eb7c0c210af7c41ec2c04d775f800dd.jpg) +Ours + +![](images/8212cc13a848a7c393a0b90e1ac2e011acd090eac9c9eb3f7d04f9c59d2a0e00.jpg) +Source Image + +![](images/635c354de2acf40041996d77ca926dcf9f864cce12a4dfd408101568c6f69d9b.jpg) +Source Image + +![](images/a109649a33cb0f0a26d936e4cc64438d53480ab09ccd92b9a91660b7acce29d3.jpg) +SG2 + +![](images/2e65c836a8e794365c38d4e39c1ff2acc56f59cc49fece8bcd124bcbb4a5c1ca.jpg) +SG2 $W^{+}$ + +![](images/f29b17df448d553e0cb66f8bdd7006215fcd2889a15267c75bc1c83c65980209.jpg) +PTI + +![](images/b40919e94b2a054a78398d3044dd4babad9cd9fbde75bff6fed0dc54feafafb6.jpg) +Ours + +
MethodMSE ↓LPIPS ↓MS-SSIM ↓ID ↑Pose ↓Depth ↓
SG2 [16]0.08810.32310.35570.82090.0430.0505
SG2 W+ [1]0.04390.22610.24830.87350.0400.0500
PTI [28]0.00840.09200.09800.94320.0370.0510
SPI (Ours)0.00820.08650.09910.94700.0360.0476
+ +Table 1. Quantitative comparison on CelebA-HQ [19]. + +multi-view high-quality video dataset. The first frame from each viewpoint video of 10 identities is extracted for testing. + +Metrics. We evaluate image reconstruction quality and similarity with the following metrics: mean squared error (MSE), perceptual similarity loss (LPIPS) [40], structural similarity (MS-SSIM), and identity similarity (ID) by employing a pre-trained face recognition network [8]. + +Baselines. We mainly compare our methods with optimization-based 2D GAN inversion methods. SG2 [16] directly inverts real images into $\mathcal{W}$ space with an optimization scheme. [1] extends the inversion into $\mathcal{W}^+$ space, denoted by SG2 $\mathcal{W}^+$ . PTI [28] would further tune generator parameters in a second stage. For a fair comparison, both PTI and ours first optimize the latent for 500 steps and then fine-tune the generator for 1,000 steps, while SG2 and SG2 $\mathcal{W}^+$ optimize the latent for 1,500 steps. + +# 5.2. Reconstruction and Novel View Synthesis + +Qualitative Evaluation. Fig. 5 presents a qualitative comparison of texture and geometry quality of different views. As for the original view, our method is able to inverse challenging details such as earrings, make-up, and wrinkles, which demonstrates that we do not sacrifice the original reconstruction performance. When the camera rotates to + +![](images/9290fe419202b1600b795c1a2479733fc03d03e00ef18faa70abd8d60f7cfc82.jpg) +Figure 5. Qualitative comparisons with state-of-the-art methods on novel view synthesis. The reconstruction quality of the original view is presented in the first row. The texture and geometry in novel views are shown in the rest rows. +Figure 6. Comparison of identity preservation in novel views. The x-axis represents the yaw angle of the input image. '0' indicates the frontal face. + +novel views, images generated from 2D inversion methods present a twisted appearance, due to the nearly flattened geometry shape. Since SG2 does not deviate too far from the initial GAN space, it can generate a portrait with a structured geometry, but fails to preserve the identity. Our method is capable of maintaining authentic and consistent geometry in novel views along with a sharp appearance, even when rotated to an extreme pose. + +Quantitative Evaluation. The reconstruction metrics of the original view are shown in Table 1. As can be seen, the results align with our qualitative evaluation as we achieved comparable scores to the current 2D state-of-the-art inversion methods [28]. The MSE, LPIPS, and ID similarities of ours are further improved, which can be attributed to the employment of $\mathcal{W}^+$ latent space. Following EG3D, we + +![](images/549688891ff4cf7f4ccba5d7fa7eb4ab784a254da5e92e34c81f613878d4c6be.jpg) +Figure 7. Qualitative comparisons with PTI [28] on MEAD [33]. + +
MethodViewMSE ↓LPIPS ↓MS-SSIM ↓ID ↑
PTIF0.032040.29710.20700.8445
Ours0.032960.30880.21350.8388
PTIL300.043550.29920.22740.8446
Ours0.033990.27960.20250.8469
PTIL600.082550.39020.31430.7568
Ours0.040690.31130.23790.8272
PTIR300.045740.31100.23930.8383
Ours0.032030.28070.20570.8529
PTIR600.078650.38290.31060.7995
Ours0.045410.31600.24000.8335
+ +Table 2. Quantitative comparison on MEAD [33]. View denotes the yaw angle of the input image. F is frontal, L is left side, and R is right side. 30 and 60 are the rotation degrees. Each time we use one view as the inversion input and use all 5 views as ground truth for evaluation. The average performance of 4 unseen views and 1 seen view is reported. + +evaluate shape quality by calculating $\mathcal{L}_2$ for pseudo-ground-truth depth-maps (Depth) generated from DECA [10], and poses (Pose) estimated from synthesized images. + +We also use identity similarity to evaluate the identity preservation of the synthesized novel views. Given a portrait, we synthesize a novel view image under the symmetric camera pose of the portrait. The similarity between the synthesized image and the flipped image portrait is calculated. The results are shown in Fig. 6. It can be observed that when the yaw angle of a portrait is small, all methods can perform well with a high similarity score. But when the yaw angle is large, only our method can maintain a high score, while other methods encounter a sharp performance drop due to the inaccurate geometry. As we employ the symmetry prior and the adjacent pseudo supervision, the rendered faces can better preserve the texture and geometry. These results demonstrate that we can achieve an identity-consistent 3D inversion. + +Evaluation on MEAD. To get a comprehensive understanding of the performance of our method, we evaluate on MEAD, a multi-view dataset. The quantitative comparison between the reconstruction portraits and the ground truth in + +![](images/01f64dbb372a527572505301802d38858746ef8b6cc0e5f875f43db08a93c32b.jpg) + +![](images/81a45e6fe76e4e1ef8dde0a83a39460da3d2eed14819a8f3290a238cff396eee.jpg) +Figure 8. Ablation study of facial symmetry prior. +Figure 9. Ablation study of authentic mask. Vanilla denotes simply using the full mirror image for supervision. While Ours filters out conflict areas with the designed constraints. + +different views is shown in Tab. 2. PTI [28] and our method achieve comparable performance when given a frontal portrait. When the view of the input face has an offset from the canonical one, our method surpasses PTI distinctly. Our metrics remain stable as the yaw angle becomes larger while the performance of PTI degrades significantly. The qualitative results are shown in Fig. 7. The geometry shape of PTI suffers from the flattening phenomenon. In contrast, our method can generate a consistent geometry and texture in novel views. + +# 5.3. Evaluation of Symmetry Prior + +To understand the importance of the symmetry prior, we perform an ablation study by conducting the inversion with or without using the prior. The visual results are shown in Fig. 8. Both approaches can obtain good geometries in the original view. However, in the first row, the geometry of the woman with a thin face turns to be obese as the camera gradually rotates, which aligns with its rendered image. The second row shows that the geometry and the rendered image maintain a better view consistency. We even find that, with the auxiliary view, some expression details can be strengthened, such as the slightly opened mouth. + +The symmetry prior cannot be directly employed in the optimization stage because there exist asymmetric areas in a human face. Optimizing the conflict areas will lead to poor results. As shown in Fig. 9, the slanted hair and the single earring in the source image mismatch those in the mirror one. In the first row, when simply using both two images to optimize the generator, the reconstruction quality suffers + +![](images/7c547f6157cd26838a1220a87b88dc74faa9bf4c29c6f4faa564171b3ded4081.jpg) +Figure 10. Editing results incorporated with [26] and [11]. + +from degradation. Novel views synthesized by the vanilla version will encounter incorrect texture and blurry results in the conflict areas. Our method can handle such asymmetric cases without the quality worsening by filtering out conflict areas with the designed constraints. Hair, teeth, and other details are consistent in different views, which validates the effectiveness of the proposed constraints. + +# 5.4. View-consistent Face Editing + +Editing a facial image should preserve the original identity while performing a meaningful and visually plausible modification. We extend our methods to downstream editing tasks to validate that the 3D GAN inversion process does not degrade the editability of the original generator. We follow StyleCLIP [26] to achieve text-guided semantic editing and StyleGAN-NADA [11] for stylization, shown in Fig. 10. The editing operation not only influences the original view but also changes the novel view's appearance consistently. It demonstrates that our inversion solution retains the properties in the original space of the generator and can be associated with other editing methods flexibly. + +# 5.5. Ablation Study + +Adjacent Warping. Recall that we employ depth-guided warping to create pseudo supervision to improve the texture quality of novel views. In Fig. 11, we can find that this operation can enhance facial component details such as eyelashes and teeth, improving the overall visual quality. + +Depth Regularization. Since supervision signals all come from RGB images, there is no explicit geometry supervision to ensure shape correctness. The shape is prone to drift to overfit the single image. Unnatural distortions will appear in novel views with the drifted shape. In the third column of Fig. 11, the jaw and nose are elongated with no con + +![](images/335f4b7c612ab4dd30d80990b3c3e7550938932cfdca8dfed32fdddceb463838.jpg) +Figure 11. Ablation study of different designed modules. + +straints. With depth regularization, geometry will be calibrated within reasonable limits. + +Two-stage Optimization. The joint optimization stage via utilizing a large parameter space can further improve texture, allowing to reconstruct the out-of-domain details, e.g., auspicious mole, as shown in the last column of Fig. 11. + +# 6. Conclusion + +We propose a novel 3D GAN inversion method with facial symmetry prior. As demonstrated in massive experiments, our method can support 3D reconstruction at extreme angles with robust geometry. With the designed constraints on texture and geometry, the reconstructed portraits are high-fidelity and possess consistent identity across different views. Besides, the proposed method enables various downstream applications without compromising faithfulness and photorealism. + +Limitation and Future Works. Since the effect of illumination is ignored in our assumption, the illumination is modeled implicitly. During the fitting process of the given image with symmetry prior, light sources sometimes become perfectly symmetrical and distorted. We will attempt to settle the problem via modeling illumination explicitly with albedo and normal in future work. + +Acknowledgement. This work was partly supported by the National Natural Science Foundation of China (Grant No. U1903213) and the Shenzhen Science and Technology Program (JCYJ20220818101014030, ZDSYS20200811142605016). This work was partly supported by a UKRI Future Leaders Fellowship [grant number G104084]. + +# References + +[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4432-4441, 2019. 2, 6 +[2] Yuval Alaluf, Omer Tov, Ron Mokady, Rinon Gal, and Amit H Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. arXiv preprint arXiv:2111.15666, 2021. 2 +[3] Qingyan Bai, Yinghao Xu, Jiapeng Zhu, Weihao Xia, Yu-jiu Yang, and Yujun Shen. High-fidelity gan inversion with padding space. arXiv preprint arXiv:2203.11105, 2022. 2 +[4] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional p-gan for single image to neural radiance fields translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3981-3990, 2022. 2 +[5] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In CVPR, 2022. 1, 2, 5 +[6] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 2 +[7] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14124-14133, 2021. 3 +[8] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, 2019. 6 +[9] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In CVPR Workshops, 2019. 3 +[10] Yao Feng, Haiwen Feng, Michael J Black, and Timo Bolkart. Learning an animatable detailed 3d face model from inthe-wild images. ACM Transactions on Graphics (ToG), 40(4):1-13, 2021. 7 +[11] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 8 +[12] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis. In ICLR, 2022. 1, 2 +[13] Ajay Jain, Matthew Tancik, and Pieter Abbeel. Putting nerf on a diet: Semantically consistent few-shot view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5885-5894, 2021. 3 +[14] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In ECCV, 2016. 5 + +[15] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 5 +[16] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, 2020. 2, 6 +[17] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 4 +[18] Jaehoon Ko, Kyusun Cho, Daewon Choi, Kwangrok Ryoo, and Seungryong Kim. 3d gan inversion with pose optimization. arXiv preprint arXiv:2210.07301, 2022. 2 +[19] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In CVPR, 2020. 5, 6 +[20] Roey Mechrez, Itamar Talmi, and Lihi Zelnik-Manor. The contextual loss for image transformation with non-aligned data. In Proceedings of the European conference on computer vision (ECCV), pages 768-783, 2018. 5 +[21] Youssef A Mejjati, Isa Milefchik, Aaron Gokaslan, Oliver Wang, Kwang In Kim, and James Tompkin. Gaussian: Controllable image synthesis with 3d gaussians from unposed silhouettes. arXiv preprint arXiv:2106.13215, 2021. 2 +[22] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2 +[23] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5480-5490, 2022. 3 +[24] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2 +[25] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 1, 2 +[26] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2085–2094, 2021. 8 +[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learn- + +ing transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 3 +[28] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. arXiv preprint arXiv:2106.05744, 2021. 1, 2, 3, 4, 6, 7 +[29] Yujun Shen, Jinjin Gu, Xiaou Tang, and Bolei Zhou. Interpreting the latent space of gans for semantic face editing. In CVPR, 2020. 2 +[30] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. arXiv preprint arXiv:2205.15517, 2022. 1, 2 +[31] Feitong Tan, Sean Fanello, Abhimitra Meka, Sergio Orts-Escolano, Danhang Tang, Rohit Pandey, Jonathan Taylor, Ping Tan, and Yinda Zhang. Volux-gan: A generative model for 3d face synthesis with hdri relighting. arXiv preprint arXiv:2201.04873, 2022. 1, 2 +[32] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. TOG, 2021. 2 +[33] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 5, 7 +[34] Tengfei Wang, Yong Zhang, Yanbo Fan, Jue Wang, and Qifeng Chen. High-fidelity gan inversion for image attribute editing. arXiv preprint arXiv:2109.06590, 2021. 2 +[35] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1-10, 2020. 2 +[36] Jianfeng Xiang, Jiaolong Yang, Yu Deng, and Xin Tong. Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. arXiv preprint arXiv:2206.07255, 2022. 1, 2 +[37] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18430-18439, 2022. 1, 2 +[38] Yang Xue, Yuheng Li, Krishna Kumar Singh, and Yong Jae Lee. Giraffe hd: A high-resolution 3d-aware generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18440-18449, 2022. 1, 2 +[39] Fei Yin, Yong Zhang, Xiaodong Cun, Mingdeng Cao, Yanbo Fan, Xuan Wang, Qingyan Bai, Baoyuan Wu, Jue Wang, and Yujiu Yang. Styleheat: One-shot high-resolution editable talking face generation via pretrained stylegan. In European conference on computer vision, 2022. 2 +[40] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 4, 6 + +[41] Xuanmeng Zhang, Zhedong Zheng, Daiheng Gao, Bang Zhang, Pan Pan, and Yi Yang. Multi-view consistent generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18450-18459, 2022. 1, 2 +[42] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 2 \ No newline at end of file diff --git a/2023/3D GAN Inversion With Facial Symmetry Prior/images.zip b/2023/3D GAN Inversion With Facial Symmetry Prior/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..c02a8c471f03029fe9eeaea94c247d872799cb66 --- /dev/null +++ b/2023/3D GAN Inversion With Facial Symmetry Prior/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46ce051e9317d7fd75fadfc4431a5a1f127b43b3d0d15543d9966eff203685d6 +size 708491 diff --git a/2023/3D GAN Inversion With Facial Symmetry Prior/layout.json b/2023/3D GAN Inversion With Facial Symmetry Prior/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..8e74ef409fd53277665a637e348c28504ce911a9 --- /dev/null +++ b/2023/3D GAN Inversion With Facial Symmetry Prior/layout.json @@ -0,0 +1,9102 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 150, + 103, + 443, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 103, + 443, + 121 + ], + "spans": [ + { + "bbox": [ + 150, + 103, + 443, + 121 + ], + "type": "text", + "content": "3D GAN Inversion with Facial Symmetry Prior" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": "Fei Yin" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Yong Zhang" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Xuan Wang" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Tengfei Wang" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Xiaoyu Li" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Yuan Gong" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Yanbo Fan" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Xiaodong Cun" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Ying Shan" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Cengiz Öztireli" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Yujiu Yang" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": ", Shenzhen International Graduate School, Tsinghua University \n" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": "Tencent AI Lab " + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": "Ant Group " + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": "HKUST " + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 105, + 142, + 488, + 201 + ], + "type": "text", + "content": "University of Cambridge" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 227, + 192, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 227, + 192, + 240 + ], + "spans": [ + { + "bbox": [ + 143, + 227, + 192, + 240 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 253, + 290, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 253, + 290, + 565 + ], + "spans": [ + { + "bbox": [ + 47, + 253, + 290, + 565 + ], + "type": "text", + "content": "Recently, a surge of high-quality 3D-aware GANs have been proposed, which leverage the generative power of neural rendering. It is natural to associate 3D GANs with GAN inversion methods to project a real image into the generator's latent space, allowing free-view consistent synthesis and editing, referred as 3D GAN inversion. Although with the facial prior preserved in pre-trained 3D GANs, reconstructing a 3D portrait with only one monocular image is still an ill-posed problem. The straightforward application of 2D GAN inversion methods focuses on texture similarity only while ignoring the correctness of 3D geometry shapes. It may raise geometry collapse effects, especially when reconstructing a side face under an extreme pose. Besides, the synthetic results in novel views are prone to be blurry. In this work, we propose a novel method to promote 3D GAN inversion by introducing facial symmetry prior. We design a pipeline and constraints to make full use of the pseudo auxiliary view obtained via image flipping, which helps obtain a view-consistent and well-structured geometry shape during the inversion process. To enhance texture fidelity in unobserved viewpoints, pseudo labels from depth-guided 3D warping can provide extra supervision. We design constraints to filter out conflict areas for optimization in asymmetric situations. Comprehensive quantitative and qualitative evaluations on image reconstruction and editing demonstrate the superiority of our method." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 590, + 128, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 128, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 128, + 602 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 610, + 287, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 610, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 46, + 610, + 287, + 684 + ], + "type": "text", + "content": "Recent 3D-aware generative adversarial networks (3D GANs) have seen immense progress. By incorporating a neural rendering engine into the generator network architecture, 3D GANs can synthesize view-consistent images. To increase the generation resolution, existing methods [5,12,25,30,31,36-38,41] boost the 3D inductive bias" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 225, + 545, + 475 + ], + "blocks": [ + { + "bbox": [ + 307, + 225, + 545, + 475 + ], + "lines": [ + { + "bbox": [ + 307, + 225, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 307, + 225, + 545, + 475 + ], + "type": "image", + "image_path": "9260a43193a7b1a051371d2fff12dabdcb84fd7ca87930b2bc075b9a6bd50a9b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 483, + 547, + 540 + ], + "lines": [ + { + "bbox": [ + 304, + 483, + 547, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 547, + 540 + ], + "type": "text", + "content": "Figure 1. Visual examples of our inversion method. Direct applying 2D GAN inversion methods (PTI [28]) to the 3D GAN suffers from inaccurate geometry in novel views. Our method excels in synthesizing consistent geometry and high-fidelity texture in different views, even reconstructing a face under an extreme pose." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 545, + 546, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 546, + 593 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 546, + 593 + ], + "type": "text", + "content": "with an additional 2D CNN-based upsampler or an efficient 3D representation modeling method. With tremendous effort, 3D GANs can produce photorealistic images while enforcing strong 3D consistency across different views." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "type": "text", + "content": "We are interested in the task of reconstructing a human face with 3D geometry and texture given only one monocular image. It is an ill-posed problem and close to the harsh condition of real scenarios. With the power of 3D GANs, it seems achievable via projecting a target image onto the manifold of a pre-trained generator. The process is referred as 3D GAN inversion. A straightforward path is to follow the 2D GAN inversion method [28], i.e., optimizing the latent code and the network parameters of the generator to overfit the specific portrait." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 693, + 227, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 693, + 227, + 703 + ], + "spans": [ + { + "bbox": [ + 59, + 693, + 227, + 703 + ], + "type": "text", + "content": "Work done during an internship at Tencent AI Lab." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 59, + 703, + 138, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 138, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 138, + 712 + ], + "type": "text", + "content": "† Corresponding Author." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "342" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 263 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 263 + ], + "type": "text", + "content": "However, since the ground truth 3D geometry is absent given one monocular image, the inversion result is far from satisfactory. The process of fitting a 3D GAN to one image would sacrifice geometric correctness in order to make the synthetic texture as close as possible to the input, even destroying the original semantic-rich latent space. As the optimization process goes, the face geometry tends to degenerate into a flattened shape, due to the absence of geometry supervision, e.g., images from other views. Besides, there exist quality issues in texture synthesis under novel views. The rendered images of unseen views tend to be blurry and inconsistent with the original image, especially when reconstructing a side face under an extreme pose. Because there is no texture supervision for unseen views given only one monocular image. The failure cases of directly applying [28] are illustrated in Fig. 1." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 263, + 289, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 263, + 289, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 263, + 289, + 479 + ], + "type": "text", + "content": "In this work, to alleviate the issue caused by missing geometry and texture supervision under multiple views, we propose a novel 3D GAN inversion approach by taking full advantage of facial symmetry prior to construct pseudo supervision of different views. Intuitively, we note that human faces are almost symmetric. Assuming the given portrait is symmetric, we can obtain an additional perspective of the portrait by simply mirroring the image. The images of two distinct views can provide geometric relations between the 3D points and their 2D projections based on epipolar geometry. Motivated by this, we seek to leverage facial symmetry as the geometric prior constraining the inversion. The symmetry prior is also employed in a traditional 3D reconstruction work [35]. We leverage the mirrored image as extra supervision of another view when performing the inversion, which prevents the geometry collapse. A rough geometry can be obtained by the inversion with the original and mirror images." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 479, + 289, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 479, + 289, + 646 + ], + "spans": [ + { + "bbox": [ + 47, + 479, + 289, + 646 + ], + "type": "text", + "content": "To further enhance texture quality and geometry in novel views, we employ depth-guided 3D warping to generate the pseudo images of the views surrounding the input and symmetric camera pose. The depth is inferred from the rough 3D volume. The original image along with the pseudo images are used to fine-tune the generator's parameters for the joint promotion of texture and geometry. To prevent the optimized geometry from deviating too much from the rough geometry, we design a geometry regularization term as a constraint. However, human faces are never fully symmetric in practice, neither in shape nor appearance. Therefore, we design several constraints to extract meaningful information adaptively from the mirror image without compromising the original reconstruction quality." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 647, + 217, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 647, + 217, + 658 + ], + "spans": [ + { + "bbox": [ + 59, + 647, + 217, + 658 + ], + "type": "text", + "content": "Our main contributions are as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 665, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 665, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 58, + 665, + 287, + 715 + ], + "type": "text", + "content": "- We propose a novel 3D GAN inversion method by incorporating facial symmetry prior. It enables a high-quality reconstruction while preserving the multi-view consistency in geometry and texture." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 72, + 545, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 545, + 121 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 545, + 121 + ], + "type": "text", + "content": "- We conduct comprehensive experiments to demonstrate the effectiveness of our method and compare it with many state-of-the-art inversion methods. We also apply our method to various downstream applications." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 145, + 392, + 158 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 145, + 392, + 158 + ], + "spans": [ + { + "bbox": [ + 306, + 145, + 392, + 158 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 168, + 407, + 180 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 168, + 407, + 180 + ], + "spans": [ + { + "bbox": [ + 306, + 168, + 407, + 180 + ], + "type": "text", + "content": "2.1. 3D-Aware GANs" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 191, + 547, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 191, + 547, + 418 + ], + "spans": [ + { + "bbox": [ + 304, + 191, + 547, + 418 + ], + "type": "text", + "content": "Recently, neural scene representations have incorporated 3D prior into image synthesis with explicit camera control. Inspired by the success of Neural Radiance Fields (NeRF) [22], [6,24] employ implicit volumetric neural rendering structure for consistent novel view synthesis, required only unconstrained monocular images training. To overcome the computational cost and lift the generation resolution, the following methods adopt a two-stage rendering process [5, 12, 21, 25, 30, 31, 37, 38, 41, 42]. Since 2D upsamplers may introduce view-inconsistent artifacts, NeRF path regularization [12] and dual discriminators [5] are proposed. Different 3D modeling representations are further designed for scalable and fast rendering. EG3D [5] introduces tri-plane representation, and GRAM-HD [36] proposes to render radiance manifolds first for efficient sampling. Boosting with the powerful high-fidelity unconditioned 3D GANs, we can achieve real image 3D reconstruction and editing. Specifically, we select the state-of-the-art EG3D [5] as our backbone." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 436, + 400, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 436, + 400, + 449 + ], + "spans": [ + { + "bbox": [ + 306, + 436, + 400, + 449 + ], + "type": "text", + "content": "2.2. GAN Inversion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 458, + 547, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 458, + 547, + 615 + ], + "spans": [ + { + "bbox": [ + 304, + 458, + 547, + 615 + ], + "type": "text", + "content": "To edit a real image [29, 39], GAN inversion is applied first to discover a corresponding latent code from which the generator can synthesize the real image. Existing 2D GAN inversion approaches can be categorized into optimization-based, learning-based, and hybrid methods. [1, 16] directly minimize the reconstruction distance via optimizing the latent codes. Learning-based methods [2, 3, 32, 34] exploit a general encoder network to map the input image into latent space in real-time. Hybrid methods would apply the latent code predicted from the encoder as initialization in the later optimization process. Beyond the original inversion latent space, PTI [28] further optimizes the parameters of the generator to enhance the visual fidelity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": "As for the 3D GAN inversion task, most methods directly transfer the 2D methods, e.g., PTI [28] and e4e [32], which may suffer from the poor results in novel views. Pix2NeRF [4] introduced a joint distillation strategy for training a 3D inversion encoder. A concurrent work [18] proposes to perform camera pose optimization simultaneously to ensure view consistency. However, none of the above methods take geometry shape into consideration." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "343" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 70, + 197, + 218 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 197, + 218 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 197, + 218 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 197, + 218 + ], + "type": "image", + "image_path": "d75a1f0e84d6476ff667bcfbebf14831b191b283b1dc5e67fdd592cbabc96bd2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 228, + 547, + 274 + ], + "lines": [ + { + "bbox": [ + 46, + 228, + 547, + 274 + ], + "spans": [ + { + "bbox": [ + 46, + 228, + 547, + 274 + ], + "type": "text", + "content": "Figure 2. The proposed framework. A) Our method first performs inversion with the help of the symmetry view to achieve the latent code " + }, + { + "bbox": [ + 46, + 228, + 547, + 274 + ], + "type": "inline_equation", + "content": "w^{+}" + }, + { + "bbox": [ + 46, + 228, + 547, + 274 + ], + "type": "text", + "content": " with a roughly correct geometry. B) The original image and the mirror one, along with adjacent warping pseudos, are used for joint optimization to enhance the geometry and texture of rendered images in novel views. C) Depth-guided 3D warping are used to generate pseudo images in novel views to provide extra supervision. Unfaithful regions are filtered out with the authentic mask." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 198, + 70, + 386, + 218 + ], + "blocks": [ + { + "bbox": [ + 198, + 70, + 386, + 218 + ], + "lines": [ + { + "bbox": [ + 198, + 70, + 386, + 218 + ], + "spans": [ + { + "bbox": [ + 198, + 70, + 386, + 218 + ], + "type": "image", + "image_path": "962dca2b8aea70d9409e2bf2c19d3023a6a0d158b6e429885d6d85077916bff9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 387, + 70, + 543, + 217 + ], + "blocks": [ + { + "bbox": [ + 387, + 70, + 543, + 217 + ], + "lines": [ + { + "bbox": [ + 387, + 70, + 543, + 217 + ], + "spans": [ + { + "bbox": [ + 387, + 70, + 543, + 217 + ], + "type": "image", + "image_path": "617c2fdcc313ba70621dcd2ad8b8b0e39aaf1a3d8a6e574efb070d9c5e7b9ead.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 282, + 141, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 282, + 141, + 295 + ], + "spans": [ + { + "bbox": [ + 47, + 282, + 141, + 295 + ], + "type": "text", + "content": "2.3. Few-shot NeRF" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 301, + 290, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 301, + 290, + 458 + ], + "spans": [ + { + "bbox": [ + 46, + 301, + 290, + 458 + ], + "type": "text", + "content": "Few-shot NeRF aims at reconstructing general 3D scenarios where only a few observed views are available, which shares a similar setting with 3D GAN inversion. MVS-NeRF [7] leverages plane-swept cost volumes in multi-view stereo for geometry-aware scene reasoning to improve performance. DietNeRF [13] enforces semantic consistency between rendered images from unseen view and seen images via a CLIP encoder [27]. RegNeRF [23] regularizes the texture of patches rendered from unobserved viewpoints without relying on additional training modules. Since it is hard to find a common prior for general scenes, these methods investigate how to ensure the geometry consistency of different views, which gives us inspiration." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 467, + 224, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 467, + 224, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 467, + 224, + 480 + ], + "type": "text", + "content": "3. Definition of 3D GAN Inversion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "spans": [ + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "text", + "content": "Similar to 2D GAN inversion, 3D GAN inversion aims to project an input image " + }, + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "text", + "content": " onto the manifold of a pretrained unconditional 3D GAN model " + }, + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{3D}}(\\cdot ;\\theta)" + }, + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "text", + "content": " parameterized by weight " + }, + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "text", + "content": ". After inversion, " + }, + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{3D}}" + }, + { + "bbox": [ + 46, + 487, + 287, + 584 + ], + "type": "text", + "content": " can reconstruct the image faithfully given the corresponding camera pose, synthesize content-consistent images in novel views, and facilitate downstream tasks like face editing. One formulation of the 3D GAN inversion problem is defined as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 88, + 594, + 287, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 594, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 88, + 594, + 287, + 613 + ], + "type": "interline_equation", + "content": "w ^ {*} = \\underset {w} {\\arg \\max } = \\mathcal {L} \\left(G _ {3 D} (w, \\pi ; \\theta), I\\right), \\tag {1}", + "image_path": "68410aea0bdc9840de51a0d8bc452d95aaf1ccfb259b9150a8053f44bb608a7a.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": " is the latent representation in " + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^+" + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": " space and " + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": " is the corresponding camera matrix of input image. The loss function " + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot, \\cdot)" + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": " is usually defined as pixel-wise reconstruction loss or perceptual loss. In our settings, camera matrix " + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": " is known, which is extracted by a pre-trained detector [9]. This formulation cares about the " + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^+" + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": " space. However, the inversion in the " + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^+" + }, + { + "bbox": [ + 46, + 617, + 288, + 714 + ], + "type": "text", + "content": " space is always not enough to capture local facial details, resulting in inaccurate reconstruction." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 283, + 545, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 283, + 545, + 343 + ], + "spans": [ + { + "bbox": [ + 304, + 283, + 545, + 343 + ], + "type": "text", + "content": "Following the recent optimization-based 2D GAN inversion method [28], we perform the inversion in the extended latent space for more accurate reconstruction, i.e., the combination of the " + }, + { + "bbox": [ + 304, + 283, + 545, + 343 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^{+}" + }, + { + "bbox": [ + 304, + 283, + 545, + 343 + ], + "type": "text", + "content": " space and the parameter space. The formulation is defined as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 340, + 348, + 545, + 369 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 348, + 545, + 369 + ], + "spans": [ + { + "bbox": [ + 340, + 348, + 545, + 369 + ], + "type": "interline_equation", + "content": "w ^ {*}, \\theta^ {*} = \\underset {w, \\theta} {\\arg \\max } = \\mathcal {L} \\left(G _ {3 D} (w, \\pi ; \\theta), I\\right). \\tag {2}", + "image_path": "f62222cdbac21988e8e703be8a32b3fd77a7b051f56aa9cb65b10239bfffa519.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "spans": [ + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "text", + "content": " are optimized alternatively, i.e., " + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "text", + "content": " is optimized using Eq. (1) first and then " + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "text", + "content": " is optimized with the fixed " + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "inline_equation", + "content": "w^{*}" + }, + { + "bbox": [ + 304, + 373, + 545, + 408 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 418, + 446, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 418, + 446, + 432 + ], + "spans": [ + { + "bbox": [ + 305, + 418, + 446, + 432 + ], + "type": "text", + "content": "4. The Proposed Approach" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 438, + 546, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 546, + 666 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 546, + 666 + ], + "type": "text", + "content": "Our goal is to reconstruct a human face through a pretrained 3D GAN given a single monocular image. The reconstruction is supposed to preserve authentic appearance texture and geometry shape in novel views. Due to the limited information about geometry and texture from a single image, overfitting a single view tends to be trapped in geometry collapse, get the blurry texture and miss details in unseen views, especially when reconstructing a side face under an extreme pose. To overcome the issue of lacking information about other views, we introduce facial symmetry prior to promote inversion. We propose a two-stage inversion pipeline, i.e., inversion for rough geometry and joint optimization of geometry and texture. In the first stage, we obtain a rough geometry by optimizing the latent code " + }, + { + "bbox": [ + 304, + 438, + 546, + 666 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 438, + 546, + 666 + ], + "type": "text", + "content": " using the original and mirror images in Sec. 4.1. In the second stage, we refine the geometry and texture by optimizing the parameter " + }, + { + "bbox": [ + 304, + 438, + 546, + 666 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 438, + 546, + 666 + ], + "type": "text", + "content": " with the depth-guided 3D warping and a set of designed constraints in Sec 4.2. An overview of our method is shown in Fig. 2." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 671, + 545, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 671, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 305, + 671, + 545, + 685 + ], + "type": "text", + "content": "4.1. Inversion with Symmetry for Rough Geometry" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": "The purpose of this stage is to learn a rough geometry as a pivot for further tuning. To compensate for the missing" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "344" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 71, + 545, + 148 + ], + "blocks": [ + { + "bbox": [ + 48, + 71, + 545, + 148 + ], + "lines": [ + { + "bbox": [ + 48, + 71, + 545, + 148 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 545, + 148 + ], + "type": "image", + "image_path": "0f5db3830d58f25681435aa4f8ed08732d28a412480e407a1a989c5cef01d562.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 159, + 546, + 182 + ], + "lines": [ + { + "bbox": [ + 46, + 159, + 546, + 182 + ], + "spans": [ + { + "bbox": [ + 46, + 159, + 546, + 182 + ], + "type": "text", + "content": "Figure 3. Visualization of warped pseudos. The red bounding box contains the range of employed pseudos, depending on the yaw angle of the input image. A frontal face can be warped by a wider range of yaw angles than a side face to get authentic pseudos." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 48, + 190, + 108, + 250 + ], + "blocks": [ + { + "bbox": [ + 48, + 190, + 108, + 250 + ], + "lines": [ + { + "bbox": [ + 48, + 190, + 108, + 250 + ], + "spans": [ + { + "bbox": [ + 48, + 190, + 108, + 250 + ], + "type": "image", + "image_path": "42c87874b858d5fe2f82d16dea9111ba8e70fe1b34c608585d3396830611401c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 250, + 88, + 265 + ], + "lines": [ + { + "bbox": [ + 67, + 250, + 88, + 265 + ], + "spans": [ + { + "bbox": [ + 67, + 250, + 88, + 265 + ], + "type": "text", + "content": "Source Image" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 274, + 279, + 285 + ], + "lines": [ + { + "bbox": [ + 53, + 274, + 279, + 285 + ], + "spans": [ + { + "bbox": [ + 53, + 274, + 279, + 285 + ], + "type": "text", + "content": "Figure 4. Visualization of authentic mask and warped pseudo." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 190, + 167, + 250 + ], + "blocks": [ + { + "bbox": [ + 108, + 190, + 167, + 250 + ], + "lines": [ + { + "bbox": [ + 108, + 190, + 167, + 250 + ], + "spans": [ + { + "bbox": [ + 108, + 190, + 167, + 250 + ], + "type": "image", + "image_path": "be838299f22ea3da41917e8e21bad9d732202582cb08a69961d057302938841a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 250, + 149, + 265 + ], + "lines": [ + { + "bbox": [ + 126, + 250, + 149, + 265 + ], + "spans": [ + { + "bbox": [ + 126, + 250, + 149, + 265 + ], + "type": "text", + "content": "Warped Image" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 168, + 190, + 227, + 250 + ], + "blocks": [ + { + "bbox": [ + 168, + 190, + 227, + 250 + ], + "lines": [ + { + "bbox": [ + 168, + 190, + 227, + 250 + ], + "spans": [ + { + "bbox": [ + 168, + 190, + 227, + 250 + ], + "type": "image", + "image_path": "32e55225f4c19c8ebfad56bc936e8466d3b4936d4665531f0c1b819be3ca68f4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 250, + 214, + 264 + ], + "lines": [ + { + "bbox": [ + 184, + 250, + 214, + 264 + ], + "spans": [ + { + "bbox": [ + 184, + 250, + 214, + 264 + ], + "type": "text", + "content": "Authentic Mask" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 228, + 190, + 286, + 250 + ], + "blocks": [ + { + "bbox": [ + 228, + 190, + 286, + 250 + ], + "lines": [ + { + "bbox": [ + 228, + 190, + 286, + 250 + ], + "spans": [ + { + "bbox": [ + 228, + 190, + 286, + 250 + ], + "type": "image", + "image_path": "c00774424ba472ae986435bbc55c6eddacfd8e638237193878207153cf037ce7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 246, + 250, + 268, + 257 + ], + "lines": [ + { + "bbox": [ + 246, + 250, + 268, + 257 + ], + "spans": [ + { + "bbox": [ + 246, + 250, + 268, + 257 + ], + "type": "text", + "content": "Pseudo" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "spans": [ + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "text", + "content": "information of unseen views, we resort to facial symmetry prior, i.e., the left face is almost the same as the right one. We simply flip the input image " + }, + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "inline_equation", + "content": "I_{s}" + }, + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "text", + "content": " horizontally to get the mirror image " + }, + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "inline_equation", + "content": "I_{m}" + }, + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "text", + "content": " whose corresponding camera pose " + }, + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "inline_equation", + "content": "\\pi_{m}" + }, + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "text", + "content": " can be calculated by multiplying a fixed matrix by the camera extrinsic parameters of " + }, + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "inline_equation", + "content": "\\pi_{s}" + }, + { + "bbox": [ + 46, + 295, + 286, + 390 + ], + "type": "text", + "content": ". The intrinsic parameters are unchanged. The mirror image serves as the pseudo-projected image under a novel view." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 391, + 287, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 391, + 287, + 619 + ], + "spans": [ + { + "bbox": [ + 46, + 391, + 287, + 619 + ], + "type": "text", + "content": "Since human faces are not always perfectly symmetric, the mirror image is just an approximation under the novel view. There exists inconsistent content between the original image and the mirror one if they have an overlapping face region, i.e., different colors in the position, referred as conflict content. The inversion should depend more on the original image and take partial useful information from the mirror one. Furthermore, we observe that a frontal face can provide more effective information than a side face. A nearly frontal face provides plenty of facial information, and we should trust less on its mirror image to avoid conflict in the overlapping region. While a side face provides information for only half one face, it has only a small overlapping conflict region with its mirror image. Hence, we should trust more on the mirror image. We exploit an adaptive weighting strategy for the importance of the mirror image according to its yaw angle " + }, + { + "bbox": [ + 46, + 391, + 287, + 619 + ], + "type": "inline_equation", + "content": "\\alpha_{\\mathrm{yaw}}" + }, + { + "bbox": [ + 46, + 391, + 287, + 619 + ], + "type": "text", + "content": ". We use a Gaussian function with respect to " + }, + { + "bbox": [ + 46, + 391, + 287, + 619 + ], + "type": "inline_equation", + "content": "\\alpha_{\\mathrm{yaw}}" + }, + { + "bbox": [ + 46, + 391, + 287, + 619 + ], + "type": "text", + "content": " to approximate the importance of different views. The weight " + }, + { + "bbox": [ + 46, + 391, + 287, + 619 + ], + "type": "inline_equation", + "content": "\\lambda_{m}" + }, + { + "bbox": [ + 46, + 391, + 287, + 619 + ], + "type": "text", + "content": " of the mirror image is defined as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 635, + 287, + 661 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 635, + 287, + 661 + ], + "spans": [ + { + "bbox": [ + 115, + 635, + 287, + 661 + ], + "type": "interline_equation", + "content": "\\mathcal {E} (x) = \\frac {1}{\\sigma \\sqrt {2 \\pi}} e ^ {- \\frac {(x - \\mu) ^ {2}}{2 \\sigma^ {2}}}, \\tag {3}", + "image_path": "9e513469faa4d3d9a181a3d8ca3d17f7cfc4e137cc159a00b98ba51bce42d06f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 85, + 662, + 287, + 694 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 662, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 85, + 662, + 287, + 694 + ], + "type": "interline_equation", + "content": "\\lambda_ {m} = \\left\\{ \\begin{array}{l l} 1 - \\mathcal {E} \\left(\\alpha_ {\\text {y a w}}\\right), & \\text {i f} \\mathcal {E} \\left(\\alpha_ {\\text {y a w}}\\right) \\leq k; \\\\ 0, & \\text {i f} \\mathcal {E} \\left(\\alpha_ {\\text {y a w}}\\right) > k; \\end{array} \\right. \\tag {4}", + "image_path": "5ad50954e4af6ef635a1546ca43326c04a7c577933c75174e785ba4d533feb9f.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 701, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 287, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 701, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\sigma, \\mu" + }, + { + "bbox": [ + 47, + 701, + 287, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 701, + 287, + 713 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 47, + 701, + 287, + 713 + ], + "type": "text", + "content": " are hyper-parameters. As a nearly frontal" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 193, + 545, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 193, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 305, + 193, + 545, + 217 + ], + "type": "text", + "content": "mirror face can compensate for very limited extra information for the original image, its weight " + }, + { + "bbox": [ + 305, + 193, + 545, + 217 + ], + "type": "inline_equation", + "content": "\\lambda_{m}" + }, + { + "bbox": [ + 305, + 193, + 545, + 217 + ], + "type": "text", + "content": " is clamped to 0." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 217, + 545, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 217, + 545, + 289 + ], + "spans": [ + { + "bbox": [ + 304, + 217, + 545, + 289 + ], + "type": "text", + "content": "To optimize the latent code in " + }, + { + "bbox": [ + 304, + 217, + 545, + 289 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^+" + }, + { + "bbox": [ + 304, + 217, + 545, + 289 + ], + "type": "text", + "content": " space, the Perceptual loss [40] is used to minimize the distance between the generated results and the original and mirror images. Following [17, 28], a noise regularization term " + }, + { + "bbox": [ + 304, + 217, + 545, + 289 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_n(n)" + }, + { + "bbox": [ + 304, + 217, + 545, + 289 + ], + "type": "text", + "content": " is employed to prevent the noise vector from containing vital information. The objective in this stage is defined as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 323, + 293, + 545, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 293, + 545, + 312 + ], + "spans": [ + { + "bbox": [ + 323, + 293, + 545, + 312 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {i n v}} = \\mathcal {L} _ {\\mathrm {L P I P S}} \\left(G _ {3 \\mathrm {D}} \\left(w, \\pi_ {s}; \\theta\\right), I _ {s}\\right) + \\tag {5}", + "image_path": "f223697ac58da8acb1a1a7361a3b4d91cb37956bf055b1624c480fc5f3e03bef.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 340, + 309, + 515, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 309, + 515, + 321 + ], + "spans": [ + { + "bbox": [ + 340, + 309, + 515, + 321 + ], + "type": "interline_equation", + "content": "\\lambda_ {m} \\mathcal {L} _ {\\text {L P I P S}} \\left(G _ {3 \\mathrm {D}} \\left(w, \\pi_ {m}; \\theta\\right), I _ {m}\\right) + \\lambda_ {n} \\mathcal {L} _ {n} (n),", + "image_path": "a1ac13a8379c27828886069675ceb3dbae03da9a773c60a02def1157659807f6.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 325, + 545, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 325, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 304, + 325, + 545, + 373 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 325, + 545, + 373 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 325, + 545, + 373 + ], + "type": "text", + "content": " is the noise vector and " + }, + { + "bbox": [ + 304, + 325, + 545, + 373 + ], + "type": "inline_equation", + "content": "\\lambda_{n}" + }, + { + "bbox": [ + 304, + 325, + 545, + 373 + ], + "type": "text", + "content": " is a trade-off parameter. The generator is kept frozen at this stage. Visual illustrations in Fig. 8 show that the geometry can be greatly improved with the facial symmetry prior." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 379, + 538, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 379, + 538, + 392 + ], + "spans": [ + { + "bbox": [ + 305, + 379, + 538, + 392 + ], + "type": "text", + "content": "4.2. Joint Optimization of Geometry and Texture" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 397, + 545, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 397, + 545, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 397, + 545, + 540 + ], + "type": "text", + "content": "Though we obtain the rough geometry via the optimization of " + }, + { + "bbox": [ + 304, + 397, + 545, + 540 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 397, + 545, + 540 + ], + "type": "text", + "content": " in the first stage, there is a distinct gap between the texture of the rendered face and that of the original one, even under the same camera pose. The rendered face shares a similar face geometry with the original one, but it becomes a different identity. In this stage, we optimize the generator's parameters " + }, + { + "bbox": [ + 304, + 397, + 545, + 540 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 397, + 545, + 540 + ], + "type": "text", + "content": " to bridge the texture gap for identity preservation and refine the rough geometry as well. We design a geometry regularization constraint to avoid the model degrading to generate flattened geometry. Moreover, we construct a set of pseudo images in different views to provide supervision via depth-guided 3D warping." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "spans": [ + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "text", + "content": "Geometry Regularization. We observe that optimizing the generator without any constraint on the geometry will cause the deviation of the geometry from the rough one, resulting in a flattened geometry similar to the case of inversion with a single image. To avoid the geometry drift during overfitting the texture, we regularize the optimized density obtained from the 3D volume of 3D GAN to be similar to that from the rough volume obtained in the first stage. Specifically, with the fixed " + }, + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "text", + "content": ", we generate depth maps " + }, + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "text", + "content": " from 3D GAN under different sampled views and calculate " + }, + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_2" + }, + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "text", + "content": " distance between them with the corresponding depth maps " + }, + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "inline_equation", + "content": "D_0" + }, + { + "bbox": [ + 304, + 543, + 545, + 686 + ], + "type": "text", + "content": " generated from the un-tuned generator in the first stage:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 370, + 690, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 690, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 370, + 690, + 545, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {d e p t h}} = \\sum_ {i \\in \\mathbb {S}} \\| D ^ {i} - D _ {0} ^ {i} \\| _ {2}, \\tag {6}", + "image_path": "6525be1a9e5d6e06ac271ebf3a57ce5876c18cf8c43c240bc12f4870fb394e86.jpg" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "345" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 211, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 211, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 211, + 83 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 72, + 211, + 83 + ], + "type": "inline_equation", + "content": "\\mathbb{S}" + }, + { + "bbox": [ + 47, + 72, + 211, + 83 + ], + "type": "text", + "content": " is the sampled camera pose set." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 86, + 287, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 86, + 287, + 242 + ], + "spans": [ + { + "bbox": [ + 46, + 86, + 287, + 242 + ], + "type": "text", + "content": "Depth-guided 3D Warping for Pseudo Supervision. Optimizing the generator with only two images is still not enough to capture the facial details, resulting in blurry effects around facial components such as eyes (see Fig. 11). Hence, we propose to construct pseudo images of different views for extra supervision using the rough geometry and the original and mirror images. Specifically, given the original image (source view) and the rough geometry, we can synthesize an image under a novel view (target view) by warping with 3D guidance. A coordinate pixel " + }, + { + "bbox": [ + 46, + 86, + 287, + 242 + ], + "type": "inline_equation", + "content": "p_t" + }, + { + "bbox": [ + 46, + 86, + 287, + 242 + ], + "type": "text", + "content": " of the synthesized image in the target view can be obtained by projecting back onto the source view with the relative camera pose " + }, + { + "bbox": [ + 46, + 86, + 287, + 242 + ], + "type": "inline_equation", + "content": "\\pi_{t\\rightarrow s}" + }, + { + "bbox": [ + 46, + 86, + 287, + 242 + ], + "type": "text", + "content": " and the camera intrinsic parameters " + }, + { + "bbox": [ + 46, + 86, + 287, + 242 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 86, + 287, + 242 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 103, + 248, + 287, + 261 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 248, + 287, + 261 + ], + "spans": [ + { + "bbox": [ + 103, + 248, + 287, + 261 + ], + "type": "interline_equation", + "content": "p _ {t \\rightarrow s} = K \\pi_ {t \\rightarrow s} D _ {t} \\left(p _ {t}\\right) K ^ {- 1} p _ {t}, \\tag {7}", + "image_path": "4ca574a5c70b85b2d575fe2dec6b1947499dccbf236b4a132f51086101265d10.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 266, + 287, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 266, + 287, + 338 + ], + "spans": [ + { + "bbox": [ + 47, + 266, + 287, + 338 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 266, + 287, + 338 + ], + "type": "inline_equation", + "content": "D_{t}(\\cdot)" + }, + { + "bbox": [ + 47, + 266, + 287, + 338 + ], + "type": "text", + "content": " is the depth map of the target view. Since the projected coordinate " + }, + { + "bbox": [ + 47, + 266, + 287, + 338 + ], + "type": "inline_equation", + "content": "p_{t\\rightarrow s}" + }, + { + "bbox": [ + 47, + 266, + 287, + 338 + ], + "type": "text", + "content": " are continuous values, we can extract the color values from the original image with a differentiable bilinear sampling mechanism, i.e., " + }, + { + "bbox": [ + 47, + 266, + 287, + 338 + ], + "type": "inline_equation", + "content": "I_{s\\rightarrow t} = I_s(p_{t\\rightarrow s})" + }, + { + "bbox": [ + 47, + 266, + 287, + 338 + ], + "type": "text", + "content": ". The low-resolution depth map will be upsampled to match the dimension of the image." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "spans": [ + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "text", + "content": "Authentic Mask. Without distinguishing the foreground pixels from the background, the background pixels in the original image may be projected onto the foreground plane, leading to erroneous results. To overcome this issue, we form a mask to indicate the visibility of pixels to filter invisible areas using the rendered depth values. Specifically, we can get the projected depth value " + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "inline_equation", + "content": "D_{s}(p_{t\\rightarrow s})" + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "text", + "content": " via sampling from the depth map in the source view. Here we employ the euclidean distance between " + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "inline_equation", + "content": "D_{s}(p_{t\\rightarrow s})" + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "text", + "content": " and the depth map " + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "inline_equation", + "content": "D_{t}(p_{t})" + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "text", + "content": " in the target view to calculate the mask. A large distance indicates the pixel " + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "inline_equation", + "content": "p_t" + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "text", + "content": " is invisible. To ensure the projected pixels are located on the front visible surface, we only preserve the area where the distance is under a threshold " + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 339, + 287, + 505 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 89, + 513, + 287, + 526 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 513, + 287, + 526 + ], + "spans": [ + { + "bbox": [ + 89, + 513, + 287, + 526 + ], + "type": "interline_equation", + "content": "M \\left(p _ {t}\\right) = \\left\\| D _ {t} \\left(p _ {t}\\right) - D _ {s} \\left(p _ {t \\rightarrow s}\\right)\\right\\| < \\tau . \\tag {8}", + "image_path": "53632f11c7ec5ed63fafc75a5450157d6a502f005b8fb5600a1a0421e21a5f3a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "text", + "content": "Furthermore, due to the poor depth estimation of the background, only the facial part would be warped. We warp the facial mask of the source view to the target view and multiply it with the visibility mask " + }, + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "inline_equation", + "content": "M(p_{t})" + }, + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "text", + "content": " to get the authentic mask " + }, + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "inline_equation", + "content": "M_{t}" + }, + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "text", + "content": ". An example is shown in Fig. 4. After multiplying the mask " + }, + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "inline_equation", + "content": "M_{t}" + }, + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "text", + "content": " with the warped image " + }, + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "inline_equation", + "content": "I_{s\\rightarrow t}" + }, + { + "bbox": [ + 46, + 532, + 287, + 616 + ], + "type": "text", + "content": ", the resulting image can be used for supervision." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 617, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 287, + 713 + ], + "type": "text", + "content": "Adjacent View Warping. Fig. 3 illustrates the warping results of two examples. When the yaw angle between the source and target views increases, the warping results have more distortions and become less authentic. Therefore, it is intuitive to abandon the pseudo images of the target views that deviate a lot from the source view. Empirically, a frontal face can be warped by a wider range of yaw angles than a side face to get authentic pseudo images. The" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "text", + "content": "variance of sampling yaw angles for constructing pseudo images is set to a fixed ratio of " + }, + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "inline_equation", + "content": "\\lambda_{m}" + }, + { + "bbox": [ + 305, + 72, + 545, + 120 + ], + "type": "text", + "content": " that depends on the viewpoint mentioned in Sec. 4.1. The LPIPS loss [14] is used to compute the multi-view pixel-wise distance as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 331, + 130, + 545, + 144 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 130, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 331, + 130, + 545, + 144 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {a d j}} = \\mathcal {L} _ {\\mathrm {L P I P S}} \\left(M _ {t} \\cdot G _ {\\mathrm {3 D}} (w, \\pi_ {t}; \\theta), M _ {t} \\cdot I _ {s \\rightarrow t}\\right). \\tag {9}", + "image_path": "4491a62106e105e8841343df8925716b3533b2fcf39f91f04cac1de1782ea72e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 154, + 545, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 154, + 545, + 237 + ], + "spans": [ + { + "bbox": [ + 304, + 154, + 545, + 237 + ], + "type": "text", + "content": "Although the pseudo images of several unseen adjacent views around the source view have been constructed, it brings marginal improvements on remote views. Especially for a side face, the pseudo images of the remote views are blurry and have incomplete texture (see Fig. 3). Therefore, we also construct pseudo images of the adjacent views around the view of the mirror image." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 238, + 545, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 238, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 304, + 238, + 545, + 357 + ], + "type": "text", + "content": "Since the conflict region between the original and mirror images has a side effect on the generator optimization process, resulting in blurry effects on rendered images, even reconstructing the source view (see Fig. 9), we propose to take partial meaningful information from the symmetric views without harming the original inversion quality. We compute the similarities only for facial components, rather than the whole face region. Besides, instead of using a pixelwise loss, we exploit the contextual loss [20] to improve the texture quality. The loss for symmetric views is defined as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 367, + 545, + 402 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 367, + 545, + 402 + ], + "spans": [ + { + "bbox": [ + 315, + 367, + 545, + 402 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {s y m}} = \\sum_ {\\mathrm {c} \\in \\mathbb {F}} \\mathcal {L} _ {\\mathrm {C X}} \\left(\\operatorname {R O I} ^ {c} \\left(G _ {3 \\mathrm {D}} \\left(w, \\pi_ {t}; \\theta\\right)\\right), \\operatorname {R O I} ^ {c} \\left(I _ {m \\rightarrow t}\\right)\\right), \\tag {10}", + "image_path": "69064abec3b28cae2db46021834d5a0cfdb8cf3bd5e575862c8cbaa0d304cfc4.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "inline_equation", + "content": "I_{m\\rightarrow t}" + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "text", + "content": " is the pseudo image of the viewpoint " + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "inline_equation", + "content": "\\pi_t" + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "text", + "content": " warped from the mirror image " + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "inline_equation", + "content": "I_{m}" + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "inline_equation", + "content": "\\mathrm{ROI}^c (\\cdot)" + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "text", + "content": " refers to the region of interest component " + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "text", + "content": " from the collection " + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "inline_equation", + "content": "\\mathbb{F} = \\{\\text{eyes, nose, mouth}\\}" + }, + { + "bbox": [ + 304, + 403, + 545, + 450 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 451, + 545, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 451, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 304, + 451, + 545, + 487 + ], + "type": "text", + "content": "The reconstruction loss between the original image and its corresponding rendered image is still in use to ensure the quality of the initial perspective, which is defined as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 497, + 545, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 497, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 307, + 497, + 545, + 521 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {o r i}} = \\mathcal {L} _ {2} \\left(G _ {\\mathrm {3 D}} \\left(w, \\pi_ {s}; \\theta\\right), I _ {s}\\right) + \\mathcal {L} _ {\\mathrm {L P I P S}} \\left(G _ {\\mathrm {3 D}} \\left(w, \\pi_ {s}; \\theta\\right), I _ {s}\\right). \\tag {11}", + "image_path": "3f96588286c270be831b2fd3e76ec01ba714ca7d1b15d7059e95a4496ba05293.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 521, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 544 + ], + "type": "text", + "content": "The overall objective of optimizing the generator's parameters is defined as:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 318, + 555, + 545, + 569 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 555, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 318, + 555, + 545, + 569 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {o p t}} = \\mathcal {L} _ {\\text {o r i}} + \\lambda_ {\\text {a d j}} \\mathcal {L} _ {\\text {a d j}} + \\lambda_ {\\text {s y m}} \\mathcal {L} _ {\\text {s y m}} + \\lambda_ {\\text {d e p t h}} \\mathcal {L} _ {\\text {d e p t h}}. \\tag {12}", + "image_path": "391ca02bc99e1b147919006f4873e9f7d1544d74a55ee78385d7b526eb6d4ffe.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 578, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 578, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 578, + 545, + 604 + ], + "type": "text", + "content": "The trade-off hyper-parameters are set as follows: " + }, + { + "bbox": [ + 304, + 578, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{adj}} = 0.1" + }, + { + "bbox": [ + 304, + 578, + 545, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 578, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{sym}} = 0.05" + }, + { + "bbox": [ + 304, + 578, + 545, + 604 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 578, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{depth}} = 1" + }, + { + "bbox": [ + 304, + 578, + 545, + 604 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 615, + 388, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 615, + 388, + 628 + ], + "spans": [ + { + "bbox": [ + 306, + 615, + 388, + 628 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 635, + 432, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 635, + 432, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 635, + 432, + 647 + ], + "type": "text", + "content": "5.1. Experimental Settings" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 713 + ], + "type": "text", + "content": "Datasets. We conduct the experiments on human faces datasets. For all experiments, we select EG3D [5] as our 3D GAN prior, which is pre-trained on FFHQ dataset [15]. We verified quantitative metrics on CelebA-HQ test dataset [19]. We further evaluated on MEAD [33], a" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "346" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 69, + 102, + 284 + ], + "blocks": [ + { + "bbox": [ + 48, + 69, + 102, + 284 + ], + "lines": [ + { + "bbox": [ + 48, + 69, + 102, + 284 + ], + "spans": [ + { + "bbox": [ + 48, + 69, + 102, + 284 + ], + "type": "image", + "image_path": "1470c26a073523eeed04b623eec00b3a4506c2f62ea9d69dad6437cf3de65479.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 284, + 83, + 291 + ], + "lines": [ + { + "bbox": [ + 67, + 284, + 83, + 291 + ], + "spans": [ + { + "bbox": [ + 67, + 284, + 83, + 291 + ], + "type": "text", + "content": "SG2" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 102, + 70, + 155, + 284 + ], + "blocks": [ + { + "bbox": [ + 102, + 70, + 155, + 284 + ], + "lines": [ + { + "bbox": [ + 102, + 70, + 155, + 284 + ], + "spans": [ + { + "bbox": [ + 102, + 70, + 155, + 284 + ], + "type": "image", + "image_path": "6f39e9d126fa143ebda910254e2cd452fc4782e5fb5112c75a624b98dfc3f054.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 114, + 284, + 144, + 291 + ], + "lines": [ + { + "bbox": [ + 114, + 284, + 144, + 291 + ], + "spans": [ + { + "bbox": [ + 114, + 284, + 144, + 291 + ], + "type": "text", + "content": "SG2 " + }, + { + "bbox": [ + 114, + 284, + 144, + 291 + ], + "type": "inline_equation", + "content": "W^{+}" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 155, + 70, + 207, + 284 + ], + "blocks": [ + { + "bbox": [ + 155, + 70, + 207, + 284 + ], + "lines": [ + { + "bbox": [ + 155, + 70, + 207, + 284 + ], + "spans": [ + { + "bbox": [ + 155, + 70, + 207, + 284 + ], + "type": "image", + "image_path": "8dae65f2f2a02f5c53ea8ac3cb13de8615d53ec51bbdd1fecc7008652fa909a6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 284, + 186, + 291 + ], + "lines": [ + { + "bbox": [ + 173, + 284, + 186, + 291 + ], + "spans": [ + { + "bbox": [ + 173, + 284, + 186, + 291 + ], + "type": "text", + "content": "PTI" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 208, + 70, + 260, + 284 + ], + "blocks": [ + { + "bbox": [ + 208, + 70, + 260, + 284 + ], + "lines": [ + { + "bbox": [ + 208, + 70, + 260, + 284 + ], + "spans": [ + { + "bbox": [ + 208, + 70, + 260, + 284 + ], + "type": "image", + "image_path": "33e8f1cb68534e6bd42f66de2805b7e39eb7c0c210af7c41ec2c04d775f800dd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 285, + 242, + 292 + ], + "lines": [ + { + "bbox": [ + 225, + 285, + 242, + 292 + ], + "spans": [ + { + "bbox": [ + 225, + 285, + 242, + 292 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 261, + 98, + 332, + 169 + ], + "blocks": [ + { + "bbox": [ + 261, + 98, + 332, + 169 + ], + "lines": [ + { + "bbox": [ + 261, + 98, + 332, + 169 + ], + "spans": [ + { + "bbox": [ + 261, + 98, + 332, + 169 + ], + "type": "image", + "image_path": "8212cc13a848a7c393a0b90e1ac2e011acd090eac9c9eb3f7d04f9c59d2a0e00.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 276, + 170, + 317, + 177 + ], + "lines": [ + { + "bbox": [ + 276, + 170, + 317, + 177 + ], + "spans": [ + { + "bbox": [ + 276, + 170, + 317, + 177 + ], + "type": "text", + "content": "Source Image" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 261, + 196, + 332, + 268 + ], + "blocks": [ + { + "bbox": [ + 261, + 196, + 332, + 268 + ], + "lines": [ + { + "bbox": [ + 261, + 196, + 332, + 268 + ], + "spans": [ + { + "bbox": [ + 261, + 196, + 332, + 268 + ], + "type": "image", + "image_path": "635c354de2acf40041996d77ca926dcf9f864cce12a4dfd408101568c6f69d9b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 276, + 268, + 317, + 275 + ], + "lines": [ + { + "bbox": [ + 276, + 268, + 317, + 275 + ], + "spans": [ + { + "bbox": [ + 276, + 268, + 317, + 275 + ], + "type": "text", + "content": "Source Image" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 332, + 70, + 385, + 284 + ], + "blocks": [ + { + "bbox": [ + 332, + 70, + 385, + 284 + ], + "lines": [ + { + "bbox": [ + 332, + 70, + 385, + 284 + ], + "spans": [ + { + "bbox": [ + 332, + 70, + 385, + 284 + ], + "type": "image", + "image_path": "a109649a33cb0f0a26d936e4cc64438d53480ab09ccd92b9a91660b7acce29d3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 351, + 284, + 366, + 292 + ], + "lines": [ + { + "bbox": [ + 351, + 284, + 366, + 292 + ], + "spans": [ + { + "bbox": [ + 351, + 284, + 366, + 292 + ], + "type": "text", + "content": "SG2" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 385, + 71, + 438, + 284 + ], + "blocks": [ + { + "bbox": [ + 385, + 71, + 438, + 284 + ], + "lines": [ + { + "bbox": [ + 385, + 71, + 438, + 284 + ], + "spans": [ + { + "bbox": [ + 385, + 71, + 438, + 284 + ], + "type": "image", + "image_path": "2e65c836a8e794365c38d4e39c1ff2acc56f59cc49fece8bcd124bcbb4a5c1ca.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 399, + 284, + 427, + 291 + ], + "lines": [ + { + "bbox": [ + 399, + 284, + 427, + 291 + ], + "spans": [ + { + "bbox": [ + 399, + 284, + 427, + 291 + ], + "type": "text", + "content": "SG2 " + }, + { + "bbox": [ + 399, + 284, + 427, + 291 + ], + "type": "inline_equation", + "content": "W^{+}" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 439, + 71, + 492, + 284 + ], + "blocks": [ + { + "bbox": [ + 439, + 71, + 492, + 284 + ], + "lines": [ + { + "bbox": [ + 439, + 71, + 492, + 284 + ], + "spans": [ + { + "bbox": [ + 439, + 71, + 492, + 284 + ], + "type": "image", + "image_path": "f29b17df448d553e0cb66f8bdd7006215fcd2889a15267c75bc1c83c65980209.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 459, + 284, + 471, + 291 + ], + "lines": [ + { + "bbox": [ + 459, + 284, + 471, + 291 + ], + "spans": [ + { + "bbox": [ + 459, + 284, + 471, + 291 + ], + "type": "text", + "content": "PTI" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 492, + 71, + 545, + 284 + ], + "blocks": [ + { + "bbox": [ + 492, + 71, + 545, + 284 + ], + "lines": [ + { + "bbox": [ + 492, + 71, + 545, + 284 + ], + "spans": [ + { + "bbox": [ + 492, + 71, + 545, + 284 + ], + "type": "image", + "image_path": "b40919e94b2a054a78398d3044dd4babad9cd9fbde75bff6fed0dc54feafafb6.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 507, + 284, + 524, + 291 + ], + "lines": [ + { + "bbox": [ + 507, + 284, + 524, + 291 + ], + "spans": [ + { + "bbox": [ + 507, + 284, + 524, + 291 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 48, + 333, + 285, + 384 + ], + "blocks": [ + { + "bbox": [ + 48, + 333, + 285, + 384 + ], + "lines": [ + { + "bbox": [ + 48, + 333, + 285, + 384 + ], + "spans": [ + { + "bbox": [ + 48, + 333, + 285, + 384 + ], + "type": "table", + "html": "
MethodMSE ↓LPIPS ↓MS-SSIM ↓ID ↑Pose ↓Depth ↓
SG2 [16]0.08810.32310.35570.82090.0430.0505
SG2 W+ [1]0.04390.22610.24830.87350.0400.0500
PTI [28]0.00840.09200.09800.94320.0370.0510
SPI (Ours)0.00820.08650.09910.94700.0360.0476
", + "image_path": "f39ad32e77ec31c76de8c5434bd4e2ffa93755129aa0f84a7efcfdc3337892c1.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 67, + 392, + 266, + 403 + ], + "lines": [ + { + "bbox": [ + 67, + 392, + 266, + 403 + ], + "spans": [ + { + "bbox": [ + 67, + 392, + 266, + 403 + ], + "type": "text", + "content": "Table 1. Quantitative comparison on CelebA-HQ [19]." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 417, + 287, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 417, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 46, + 417, + 287, + 441 + ], + "type": "text", + "content": "multi-view high-quality video dataset. The first frame from each viewpoint video of 10 identities is extracted for testing." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 443, + 287, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 443, + 287, + 503 + ], + "spans": [ + { + "bbox": [ + 46, + 443, + 287, + 503 + ], + "type": "text", + "content": "Metrics. We evaluate image reconstruction quality and similarity with the following metrics: mean squared error (MSE), perceptual similarity loss (LPIPS) [40], structural similarity (MS-SSIM), and identity similarity (ID) by employing a pre-trained face recognition network [8]." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "text", + "content": "Baselines. We mainly compare our methods with optimization-based 2D GAN inversion methods. SG2 [16] directly inverts real images into " + }, + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "text", + "content": " space with an optimization scheme. [1] extends the inversion into " + }, + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^+" + }, + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "text", + "content": " space, denoted by SG2 " + }, + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^+" + }, + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "text", + "content": ". PTI [28] would further tune generator parameters in a second stage. For a fair comparison, both PTI and ours first optimize the latent for 500 steps and then fine-tune the generator for 1,000 steps, while SG2 and SG2 " + }, + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^+" + }, + { + "bbox": [ + 46, + 506, + 287, + 613 + ], + "type": "text", + "content": " optimize the latent for 1,500 steps." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 47, + 622, + 263, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 622, + 263, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 622, + 263, + 635 + ], + "type": "text", + "content": "5.2. Reconstruction and Novel View Synthesis" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 641, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 713 + ], + "type": "text", + "content": "Qualitative Evaluation. Fig. 5 presents a qualitative comparison of texture and geometry quality of different views. As for the original view, our method is able to inverse challenging details such as earrings, make-up, and wrinkles, which demonstrates that we do not sacrifice the original reconstruction performance. When the camera rotates to" + } + ] + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 305, + 332, + 547, + 479 + ], + "blocks": [ + { + "bbox": [ + 46, + 301, + 546, + 324 + ], + "lines": [ + { + "bbox": [ + 46, + 301, + 546, + 324 + ], + "spans": [ + { + "bbox": [ + 46, + 301, + 546, + 324 + ], + "type": "text", + "content": "Figure 5. Qualitative comparisons with state-of-the-art methods on novel view synthesis. The reconstruction quality of the original view is presented in the first row. The texture and geometry in novel views are shown in the rest rows." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 332, + 547, + 479 + ], + "lines": [ + { + "bbox": [ + 305, + 332, + 547, + 479 + ], + "spans": [ + { + "bbox": [ + 305, + 332, + 547, + 479 + ], + "type": "image", + "image_path": "9290fe419202b1600b795c1a2479733fc03d03e00ef18faa70abd8d60f7cfc82.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 486, + 546, + 519 + ], + "lines": [ + { + "bbox": [ + 305, + 486, + 546, + 519 + ], + "spans": [ + { + "bbox": [ + 305, + 486, + 546, + 519 + ], + "type": "text", + "content": "Figure 6. Comparison of identity preservation in novel views. The x-axis represents the yaw angle of the input image. '0' indicates the frontal face." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 532, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 532, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 532, + 545, + 628 + ], + "type": "text", + "content": "novel views, images generated from 2D inversion methods present a twisted appearance, due to the nearly flattened geometry shape. Since SG2 does not deviate too far from the initial GAN space, it can generate a portrait with a structured geometry, but fails to preserve the identity. Our method is capable of maintaining authentic and consistent geometry in novel views along with a sharp appearance, even when rotated to an extreme pose." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "type": "text", + "content": "Quantitative Evaluation. The reconstruction metrics of the original view are shown in Table 1. As can be seen, the results align with our qualitative evaluation as we achieved comparable scores to the current 2D state-of-the-art inversion methods [28]. The MSE, LPIPS, and ID similarities of ours are further improved, which can be attributed to the employment of " + }, + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{W}^+" + }, + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "type": "text", + "content": " latent space. Following EG3D, we" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "text", + "content": "347" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 70, + 286, + 178 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 286, + 178 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 286, + 178 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 286, + 178 + ], + "type": "image", + "image_path": "549688891ff4cf7f4ccba5d7fa7eb4ab784a254da5e92e34c81f613878d4c6be.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 187, + 285, + 198 + ], + "lines": [ + { + "bbox": [ + 48, + 187, + 285, + 198 + ], + "spans": [ + { + "bbox": [ + 48, + 187, + 285, + 198 + ], + "type": "text", + "content": "Figure 7. Qualitative comparisons with PTI [28] on MEAD [33]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 48, + 201, + 291, + 353 + ], + "blocks": [ + { + "bbox": [ + 48, + 201, + 291, + 353 + ], + "lines": [ + { + "bbox": [ + 48, + 201, + 291, + 353 + ], + "spans": [ + { + "bbox": [ + 48, + 201, + 291, + 353 + ], + "type": "table", + "html": "
MethodViewMSE ↓LPIPS ↓MS-SSIM ↓ID ↑
PTIF0.032040.29710.20700.8445
Ours0.032960.30880.21350.8388
PTIL300.043550.29920.22740.8446
Ours0.033990.27960.20250.8469
PTIL600.082550.39020.31430.7568
Ours0.040690.31130.23790.8272
PTIR300.045740.31100.23930.8383
Ours0.032030.28070.20570.8529
PTIR600.078650.38290.31060.7995
Ours0.045410.31600.24000.8335
", + "image_path": "864a823edc119faabd82356225added9e0c199703a73b28abb93a46873430445.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 361, + 287, + 429 + ], + "lines": [ + { + "bbox": [ + 46, + 361, + 287, + 429 + ], + "spans": [ + { + "bbox": [ + 46, + 361, + 287, + 429 + ], + "type": "text", + "content": "Table 2. Quantitative comparison on MEAD [33]. View denotes the yaw angle of the input image. F is frontal, L is left side, and R is right side. 30 and 60 are the rotation degrees. Each time we use one view as the inversion input and use all 5 views as ground truth for evaluation. The average performance of 4 unseen views and 1 seen view is reported." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 447, + 287, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 447, + 287, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 447, + 287, + 483 + ], + "type": "text", + "content": "evaluate shape quality by calculating " + }, + { + "bbox": [ + 46, + 447, + 287, + 483 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_2" + }, + { + "bbox": [ + 46, + 447, + 287, + 483 + ], + "type": "text", + "content": " for pseudo-ground-truth depth-maps (Depth) generated from DECA [10], and poses (Pose) estimated from synthesized images." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 484, + 287, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 484, + 287, + 662 + ], + "spans": [ + { + "bbox": [ + 46, + 484, + 287, + 662 + ], + "type": "text", + "content": "We also use identity similarity to evaluate the identity preservation of the synthesized novel views. Given a portrait, we synthesize a novel view image under the symmetric camera pose of the portrait. The similarity between the synthesized image and the flipped image portrait is calculated. The results are shown in Fig. 6. It can be observed that when the yaw angle of a portrait is small, all methods can perform well with a high similarity score. But when the yaw angle is large, only our method can maintain a high score, while other methods encounter a sharp performance drop due to the inaccurate geometry. As we employ the symmetry prior and the adjacent pseudo supervision, the rendered faces can better preserve the texture and geometry. These results demonstrate that we can achieve an identity-consistent 3D inversion." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "text", + "content": "Evaluation on MEAD. To get a comprehensive understanding of the performance of our method, we evaluate on MEAD, a multi-view dataset. The quantitative comparison between the reconstruction portraits and the ground truth in" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 69, + 545, + 150 + ], + "blocks": [ + { + "bbox": [ + 306, + 69, + 545, + 150 + ], + "lines": [ + { + "bbox": [ + 306, + 69, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 306, + 69, + 545, + 150 + ], + "type": "image", + "image_path": "01f64dbb372a527572505301802d38858746ef8b6cc0e5f875f43db08a93c32b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 170, + 545, + 293 + ], + "blocks": [ + { + "bbox": [ + 334, + 158, + 517, + 170 + ], + "lines": [ + { + "bbox": [ + 334, + 158, + 517, + 170 + ], + "spans": [ + { + "bbox": [ + 334, + 158, + 517, + 170 + ], + "type": "text", + "content": "Figure 8. Ablation study of facial symmetry prior." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 170, + 545, + 293 + ], + "lines": [ + { + "bbox": [ + 307, + 170, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 307, + 170, + 545, + 293 + ], + "type": "image", + "image_path": "81a45e6fe76e4e1ef8dde0a83a39460da3d2eed14819a8f3290a238cff396eee.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 301, + 545, + 335 + ], + "lines": [ + { + "bbox": [ + 304, + 301, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 304, + 301, + 545, + 335 + ], + "type": "text", + "content": "Figure 9. Ablation study of authentic mask. Vanilla denotes simply using the full mirror image for supervision. While Ours filters out conflict areas with the designed constraints." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 347, + 546, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 347, + 546, + 467 + ], + "spans": [ + { + "bbox": [ + 304, + 347, + 546, + 467 + ], + "type": "text", + "content": "different views is shown in Tab. 2. PTI [28] and our method achieve comparable performance when given a frontal portrait. When the view of the input face has an offset from the canonical one, our method surpasses PTI distinctly. Our metrics remain stable as the yaw angle becomes larger while the performance of PTI degrades significantly. The qualitative results are shown in Fig. 7. The geometry shape of PTI suffers from the flattening phenomenon. In contrast, our method can generate a consistent geometry and texture in novel views." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 478, + 470, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 478, + 470, + 491 + ], + "spans": [ + { + "bbox": [ + 306, + 478, + 470, + 491 + ], + "type": "text", + "content": "5.3. Evaluation of Symmetry Prior" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 498, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 628 + ], + "type": "text", + "content": "To understand the importance of the symmetry prior, we perform an ablation study by conducting the inversion with or without using the prior. The visual results are shown in Fig. 8. Both approaches can obtain good geometries in the original view. However, in the first row, the geometry of the woman with a thin face turns to be obese as the camera gradually rotates, which aligns with its rendered image. The second row shows that the geometry and the rendered image maintain a better view consistency. We even find that, with the auxiliary view, some expression details can be strengthened, such as the slightly opened mouth." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "The symmetry prior cannot be directly employed in the optimization stage because there exist asymmetric areas in a human face. Optimizing the conflict areas will lead to poor results. As shown in Fig. 9, the slanted hair and the single earring in the source image mismatch those in the mirror one. In the first row, when simply using both two images to optimize the generator, the reconstruction quality suffers" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "348" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 69, + 287, + 264 + ], + "blocks": [ + { + "bbox": [ + 47, + 69, + 287, + 264 + ], + "lines": [ + { + "bbox": [ + 47, + 69, + 287, + 264 + ], + "spans": [ + { + "bbox": [ + 47, + 69, + 287, + 264 + ], + "type": "image", + "image_path": "7c547f6157cd26838a1220a87b88dc74faa9bf4c29c6f4faa564171b3ded4081.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 60, + 273, + 272, + 283 + ], + "lines": [ + { + "bbox": [ + 60, + 273, + 272, + 283 + ], + "spans": [ + { + "bbox": [ + 60, + 273, + 272, + 283 + ], + "type": "text", + "content": "Figure 10. Editing results incorporated with [26] and [11]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 296, + 287, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 296, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 46, + 296, + 287, + 380 + ], + "type": "text", + "content": "from degradation. Novel views synthesized by the vanilla version will encounter incorrect texture and blurry results in the conflict areas. Our method can handle such asymmetric cases without the quality worsening by filtering out conflict areas with the designed constraints. Hair, teeth, and other details are consistent in different views, which validates the effectiveness of the proposed constraints." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 388, + 205, + 401 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 388, + 205, + 401 + ], + "spans": [ + { + "bbox": [ + 47, + 388, + 205, + 401 + ], + "type": "text", + "content": "5.4. View-consistent Face Editing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 407, + 287, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 407, + 287, + 551 + ], + "spans": [ + { + "bbox": [ + 46, + 407, + 287, + 551 + ], + "type": "text", + "content": "Editing a facial image should preserve the original identity while performing a meaningful and visually plausible modification. We extend our methods to downstream editing tasks to validate that the 3D GAN inversion process does not degrade the editability of the original generator. We follow StyleCLIP [26] to achieve text-guided semantic editing and StyleGAN-NADA [11] for stylization, shown in Fig. 10. The editing operation not only influences the original view but also changes the novel view's appearance consistently. It demonstrates that our inversion solution retains the properties in the original space of the generator and can be associated with other editing methods flexibly." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 559, + 140, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 559, + 140, + 571 + ], + "spans": [ + { + "bbox": [ + 47, + 559, + 140, + 571 + ], + "type": "text", + "content": "5.5. Ablation Study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 578, + 287, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 578, + 287, + 640 + ], + "spans": [ + { + "bbox": [ + 46, + 578, + 287, + 640 + ], + "type": "text", + "content": "Adjacent Warping. Recall that we employ depth-guided warping to create pseudo supervision to improve the texture quality of novel views. In Fig. 11, we can find that this operation can enhance facial component details such as eyelashes and teeth, improving the overall visual quality." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 287, + 714 + ], + "type": "text", + "content": "Depth Regularization. Since supervision signals all come from RGB images, there is no explicit geometry supervision to ensure shape correctness. The shape is prone to drift to overfit the single image. Unnatural distortions will appear in novel views with the drifted shape. In the third column of Fig. 11, the jaw and nose are elongated with no con" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 69, + 545, + 258 + ], + "blocks": [ + { + "bbox": [ + 307, + 69, + 545, + 258 + ], + "lines": [ + { + "bbox": [ + 307, + 69, + 545, + 258 + ], + "spans": [ + { + "bbox": [ + 307, + 69, + 545, + 258 + ], + "type": "image", + "image_path": "335f4b7c612ab4dd30d80990b3c3e7550938932cfdca8dfed32fdddceb463838.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 264, + 529, + 277 + ], + "lines": [ + { + "bbox": [ + 321, + 264, + 529, + 277 + ], + "spans": [ + { + "bbox": [ + 321, + 264, + 529, + 277 + ], + "type": "text", + "content": "Figure 11. Ablation study of different designed modules." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 297, + 545, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 297, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 305, + 297, + 545, + 319 + ], + "type": "text", + "content": "straints. With depth regularization, geometry will be calibrated within reasonable limits." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 327, + 545, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 327, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 305, + 327, + 545, + 376 + ], + "type": "text", + "content": "Two-stage Optimization. The joint optimization stage via utilizing a large parameter space can further improve texture, allowing to reconstruct the out-of-domain details, e.g., auspicious mole, as shown in the last column of Fig. 11." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 400, + 378, + 412 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 400, + 378, + 412 + ], + "spans": [ + { + "bbox": [ + 306, + 400, + 378, + 412 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 425, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 425, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 545, + 533 + ], + "type": "text", + "content": "We propose a novel 3D GAN inversion method with facial symmetry prior. As demonstrated in massive experiments, our method can support 3D reconstruction at extreme angles with robust geometry. With the designed constraints on texture and geometry, the reconstructed portraits are high-fidelity and possess consistent identity across different views. Besides, the proposed method enables various downstream applications without compromising faithfulness and photorealism." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 539, + 545, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 539, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 304, + 539, + 545, + 622 + ], + "type": "text", + "content": "Limitation and Future Works. Since the effect of illumination is ignored in our assumption, the illumination is modeled implicitly. During the fitting process of the given image with symmetry prior, light sources sometimes become perfectly symmetrical and distorted. We will attempt to settle the problem via modeling illumination explicitly with albedo and normal in future work." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgement. This work was partly supported by the National Natural Science Foundation of China (Grant No. U1903213) and the Shenzhen Science and Technology Program (JCYJ20220818101014030, ZDSYS20200811142605016). This work was partly supported by a UKRI Future Leaders Fellowship [grant number G104084]." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "349" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4432-4441, 2019. 2, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 288, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 288, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 288, + 178 + ], + "type": "text", + "content": "[2] Yuval Alaluf, Omer Tov, Ron Mokady, Rinon Gal, and Amit H Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. arXiv preprint arXiv:2111.15666, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 179, + 288, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 179, + 288, + 213 + ], + "spans": [ + { + "bbox": [ + 53, + 179, + 288, + 213 + ], + "type": "text", + "content": "[3] Qingyan Bai, Yinghao Xu, Jiapeng Zhu, Weihao Xia, Yu-jiu Yang, and Yujun Shen. High-fidelity gan inversion with padding space. arXiv preprint arXiv:2203.11105, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 213, + 288, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 213, + 288, + 268 + ], + "spans": [ + { + "bbox": [ + 53, + 213, + 288, + 268 + ], + "type": "text", + "content": "[4] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional p-gan for single image to neural radiance fields translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3981-3990, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 269, + 288, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 269, + 288, + 323 + ], + "spans": [ + { + "bbox": [ + 53, + 269, + 288, + 323 + ], + "type": "text", + "content": "[5] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In CVPR, 2022. 1, 2, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 324, + 288, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 324, + 288, + 379 + ], + "spans": [ + { + "bbox": [ + 53, + 324, + 288, + 379 + ], + "type": "text", + "content": "[6] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 380, + 288, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 380, + 288, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 380, + 288, + 434 + ], + "type": "text", + "content": "[7] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14124-14133, 2021. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 435, + 288, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 435, + 288, + 468 + ], + "spans": [ + { + "bbox": [ + 53, + 435, + 288, + 468 + ], + "type": "text", + "content": "[8] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, 2019. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 468, + 288, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 468, + 288, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 468, + 288, + 512 + ], + "type": "text", + "content": "[9] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In CVPR Workshops, 2019. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 557 + ], + "type": "text", + "content": "[10] Yao Feng, Haiwen Feng, Michael J Black, and Timo Bolkart. Learning an animatable detailed 3d face model from inthe-wild images. ACM Transactions on Graphics (ToG), 40(4):1-13, 2021. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 558, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 601 + ], + "type": "text", + "content": "[11] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. ACM Transactions on Graphics (TOG), 41(4):1-13, 2022. 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 601, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 635 + ], + "type": "text", + "content": "[12] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis. In ICLR, 2022. 1, 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 635, + 288, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 288, + 680 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 288, + 680 + ], + "type": "text", + "content": "[13] Ajay Jain, Matthew Tancik, and Pieter Abbeel. Putting nerf on a diet: Semantically consistent few-shot view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5885-5894, 2021. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 681, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 287, + 712 + ], + "type": "text", + "content": "[14] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In ECCV, 2016. 5" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "type": "text", + "content": "[15] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4401-4410, 2019. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 118, + 545, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 152 + ], + "type": "text", + "content": "[16] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, 2020. 2, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 152, + 547, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 547, + 207 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 547, + 207 + ], + "type": "text", + "content": "[17] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8110-8119, 2020. 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 208, + 545, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 208, + 545, + 241 + ], + "spans": [ + { + "bbox": [ + 307, + 208, + 545, + 241 + ], + "type": "text", + "content": "[18] Jaehoon Ko, Kyusun Cho, Daewon Choi, Kwangrok Ryoo, and Seungryong Kim. 3d gan inversion with pose optimization. arXiv preprint arXiv:2210.07301, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 242, + 545, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 242, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 307, + 242, + 545, + 275 + ], + "type": "text", + "content": "[19] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In CVPR, 2020. 5, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 276, + 545, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 276, + 545, + 321 + ], + "spans": [ + { + "bbox": [ + 307, + 276, + 545, + 321 + ], + "type": "text", + "content": "[20] Roey Mechrez, Itamar Talmi, and Lihi Zelnik-Manor. The contextual loss for image transformation with non-aligned data. In Proceedings of the European conference on computer vision (ECCV), pages 768-783, 2018. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 322, + 545, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 322, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 307, + 322, + 545, + 374 + ], + "type": "text", + "content": "[21] Youssef A Mejjati, Isa Milefchik, Aaron Gokaslan, Oliver Wang, Kwang In Kim, and James Tompkin. Gaussian: Controllable image synthesis with 3d gaussians from unposed silhouettes. arXiv preprint arXiv:2106.13215, 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 376, + 545, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 545, + 431 + ], + "type": "text", + "content": "[22] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 433, + 545, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 433, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 307, + 433, + 545, + 499 + ], + "type": "text", + "content": "[23] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5480-5490, 2022. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 500, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 500, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 307, + 500, + 545, + 555 + ], + "type": "text", + "content": "[24] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 556, + 545, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 556, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 307, + 556, + 545, + 622 + ], + "type": "text", + "content": "[25] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 1, 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 624, + 545, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 545, + 677 + ], + "type": "text", + "content": "[26] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2085–2094, 2021. 8" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 679, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 679, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 679, + 545, + 713 + ], + "type": "text", + "content": "[27] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learn-" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "text", + "content": "350" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "text", + "content": "ing transferable visual models from natural language supervision. In International Conference on Machine Learning, pages 8748-8763. PMLR, 2021. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 108, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 108, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 108, + 287, + 150 + ], + "type": "text", + "content": "[28] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. arXiv preprint arXiv:2106.05744, 2021. 1, 2, 3, 4, 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 153, + 287, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 153, + 287, + 186 + ], + "spans": [ + { + "bbox": [ + 48, + 153, + 287, + 186 + ], + "type": "text", + "content": "[29] Yujun Shen, Jinjin Gu, Xiaou Tang, and Bolei Zhou. Interpreting the latent space of gans for semantic face editing. In CVPR, 2020. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 189, + 287, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 189, + 287, + 232 + ], + "spans": [ + { + "bbox": [ + 48, + 189, + 287, + 232 + ], + "type": "text", + "content": "[30] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. arXiv preprint arXiv:2205.15517, 2022. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 235, + 287, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 235, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 48, + 235, + 287, + 289 + ], + "type": "text", + "content": "[31] Feitong Tan, Sean Fanello, Abhimitra Meka, Sergio Orts-Escolano, Danhang Tang, Rohit Pandey, Jonathan Taylor, Ping Tan, and Yinda Zhang. Volux-gan: A generative model for 3d face synthesis with hdri relighting. arXiv preprint arXiv:2201.04873, 2022. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 291, + 287, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 291, + 287, + 324 + ], + "spans": [ + { + "bbox": [ + 48, + 291, + 287, + 324 + ], + "type": "text", + "content": "[32] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. TOG, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 327, + 287, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 287, + 369 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 287, + 369 + ], + "type": "text", + "content": "[33] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 5, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 372, + 287, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 372, + 287, + 405 + ], + "spans": [ + { + "bbox": [ + 48, + 372, + 287, + 405 + ], + "type": "text", + "content": "[34] Tengfei Wang, Yong Zhang, Yanbo Fan, Jue Wang, and Qifeng Chen. High-fidelity gan inversion for image attribute editing. arXiv preprint arXiv:2109.06590, 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 407, + 287, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 407, + 287, + 461 + ], + "spans": [ + { + "bbox": [ + 48, + 407, + 287, + 461 + ], + "type": "text", + "content": "[35] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1-10, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 464, + 287, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 464, + 287, + 507 + ], + "spans": [ + { + "bbox": [ + 48, + 464, + 287, + 507 + ], + "type": "text", + "content": "[36] Jianfeng Xiang, Jiaolong Yang, Yu Deng, and Xin Tong. Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. arXiv preprint arXiv:2206.07255, 2022. 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 510, + 287, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 510, + 287, + 564 + ], + "spans": [ + { + "bbox": [ + 48, + 510, + 287, + 564 + ], + "type": "text", + "content": "[37] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18430-18439, 2022. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 567, + 287, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 287, + 620 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 287, + 620 + ], + "type": "text", + "content": "[38] Yang Xue, Yuheng Li, Krishna Kumar Singh, and Yong Jae Lee. Giraffe hd: A high-resolution 3d-aware generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18440-18449, 2022. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 623, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 623, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 623, + 287, + 678 + ], + "type": "text", + "content": "[39] Fei Yin, Yong Zhang, Xiaodong Cun, Mingdeng Cao, Yanbo Fan, Xuan Wang, Qingyan Bai, Baoyuan Wu, Jue Wang, and Yujiu Yang. Styleheat: One-shot high-resolution editable talking face generation via pretrained stylegan. In European conference on computer vision, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[40] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 4, 6" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 182 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 137 + ], + "type": "text", + "content": "[41] Xuanmeng Zhang, Zhedong Zheng, Daiheng Gao, Bang Zhang, Pan Pan, and Yi Yang. Multi-view consistent generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18450-18459, 2022. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 140, + 545, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 545, + 182 + ], + "type": "text", + "content": "[42] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 2" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "351" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_content_list.json b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2099771042c85da7c636e82cb28d5c6e4dc2d066 --- /dev/null +++ b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_content_list.json @@ -0,0 +1,1813 @@ +[ + { + "type": "text", + "text": "3D Highlighter: Localizing Regions on 3D Shapes via Text Descriptions", + "text_level": 1, + "bbox": [ + 125, + 130, + 843, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dale Decatur", + "bbox": [ + 183, + 181, + 292, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Chicago", + "bbox": [ + 148, + 198, + 328, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ddecatur@uchicago.edu", + "bbox": [ + 143, + 219, + 333, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Itai Lang", + "bbox": [ + 433, + 181, + 509, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Chicago", + "bbox": [ + 383, + 198, + 558, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "itailang@uchicago.edu", + "bbox": [ + 377, + 219, + 565, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rana Hanocka", + "bbox": [ + 658, + 181, + 777, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Chicago", + "bbox": [ + 629, + 198, + 805, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ranahanocka@uchicago.edu", + "bbox": [ + 611, + 219, + 825, + 233 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3ce3a39f1eb0d64ad65575fb6db76724d3770833ec5661ef4d5b62770f653931.jpg", + "image_caption": [ + "Hat" + ], + "image_footnote": [], + "bbox": [ + 78, + 252, + 215, + 400 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/60c4879aeb4ae28ea5aa5ff682daf21552ad4ac22b576e77b4c48bb58976f923.jpg", + "image_caption": [ + "Necklace" + ], + "image_footnote": [], + "bbox": [ + 228, + 260, + 418, + 400 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/fe90a136ea0f9260c9f288ed71d72975217f89971a6a4916f93a99516821e195.jpg", + "image_caption": [ + "Headlights", + "Figure 1. 3D Highlighter localizes semantic regions on a shape using text as input. Our technique reasons about where to place seemingly unrelated concepts in semantically meaningful locations on the 3D shape, such as a 'necklace' on a horse or 'shoes' on an alien." + ], + "image_footnote": [], + "bbox": [ + 398, + 260, + 584, + 400 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5fb07b1e0540704ea8d02f727270040e868b72855b1128eb69cfd2362028f4f0.jpg", + "image_caption": [ + "Shoes" + ], + "image_footnote": [], + "bbox": [ + 589, + 260, + 787, + 400 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4ae578474bf040ed0bec23be64939a47d3ec22c8b57557e5f3c545daefabbbef.jpg", + "image_caption": [ + "Eyeglasses" + ], + "image_footnote": [], + "bbox": [ + 797, + 252, + 888, + 400 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 460, + 313, + 476 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present 3D Highlighter, a technique for localizing semantic regions on a mesh using text as input. A key feature of our system is the ability to interpret \"out-of-domain\" localizations. Our system demonstrates the ability to reason about where to place non-obviously related concepts on an input 3D shape, such as adding clothing to a bare 3D animal model. Our method contextualizes the text description using a neural field and colors the corresponding region of the shape using a probability-weighted blend. Our neural optimization is guided by a pre-trained CLIP encoder, which bypasses the need for any 3D datasets or 3D annotations. Thus, 3D Highlighter is highly flexible, general, and capable of producing localizations on a myriad of input shapes. Our code is publicly available at https://github.com/threedle/3DHighlighter.", + "bbox": [ + 75, + 492, + 473, + 720 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 738, + 209, + 753 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Semantic localization of regions on 3D meshes is an important problem in computer graphics and vision with broad applications. One such application is the incorporation of semantic information into the 3D modeling process. A particularly challenging aspect of this task emerges when 3D geometric signals are insufficient for performing segmentation, e.g. where to add a shirt to a bare 3D human model.", + "bbox": [ + 75, + 763, + 468, + 869 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We propose 3D Highlighter, a method for automatically localizing fine-grained semantic regions on a shape based", + "bbox": [ + 75, + 869, + 468, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "on only a text description. Our system contextualizes the text prompt and highlights the corresponding shape region using the network-predicted probabilities. Using only text, users are able to semantically identify regions on a shape. Our system takes meshes as input, making it compatible with 3D modeling workflows and tools.", + "bbox": [ + 496, + 462, + 892, + 551 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This highlighting task requires both object-level and part-level understanding. 3D Highlighter demonstrates the ability to reason about where to place seemingly unrelated concepts on the 3D shape, such as a hat on a candle (Fig. 1). Our system localizes attributes that are geometrically absent from a shape, which we refer to as hallucinated highlighting. Understanding a part's global shape context is challenging even when relying on salient geometric features [17,27], let alone without them.", + "bbox": [ + 496, + 551, + 892, + 688 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We optimize the weights of a neural network to produce probabilities that are used to color a given 3D shape in accordance with the specified text. We leverage a pre-trained vision-language model (CLIP [31]) to guide the neural optimization towards the text-specified region. This neural optimization formulation is flexible, bypassing the need for any 3D datasets, 3D annotations, or 3D pre-training. Our system is not bound to a specific set of classes, and, as shown in Fig. 2, is not limited to object parts defined by salient geometric features.", + "bbox": [ + 496, + 688, + 892, + 838 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We encode the part selection as a neural field [44] over the mesh surface. Our network learns to map each point on the surface to a probability of belonging to the text-specified region. We translate the inferred probabilities to a visual at-", + "bbox": [ + 496, + 839, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "20930", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/017b26d3608a89b48df7256a49f16654424c0c1d823dddfbe5754975975d1db4.jpg", + "image_caption": [ + "Figure 2. Hallucinated part highlighting. Our system is able to reason about where to highlight a geometrically-absent region on shapes. The resulting localizations demonstrate global understanding and localized part-awareness." + ], + "image_footnote": [], + "bbox": [ + 81, + 85, + 893, + 545 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "tribute on the mesh surface, which can be rendered and visually understood. The network-predicted probabilities act as a soft-selection operator which blends the highlighter color onto the mesh. The network weights are updated by encouraging the CLIP [31] embedding of the 2D renders of the highlighted mesh to adhere to the specified text. As a result, the network implicitly learns to segment the object to adhere to the text prompt.", + "bbox": [ + 75, + 602, + 472, + 724 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We make several design choices that are key to the success of 3D Highlighter. Our network does not directly color the mesh. Rather, we predict a probability of being inside the text-specified highlight, which is used to blend colors on the mesh. The network is initialized such that points have roughly a $50\\%$ probability of being highlighted, resulting in a mesh with albedo halfway between the highlight and background color. During optimization, the relative blend weight of the highlight color directly corresponds to the highlight probability. This blending enables the network to naturally and smoothly increase or decrease the segmenta", + "bbox": [ + 75, + 734, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "tion probability in accordance with the text specification of the target region.", + "bbox": [ + 496, + 602, + 890, + 632 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, we present a method for localizing semantic regions on 3D shapes. The localization is specified by a textual description, which is intuitive, flexible, and not limited to a specific training dataset. We demonstrate applications of our method to shape editing and stylization. Furthermore, our field formulation enables the 3D Highlighter to work with different mesh resolutions and triangulations. A key feature of our system is the ability to interpret out-of-domain localizations. For example, 3D Highlighter is able to figure out where to place a 'hat' on a candle as seen in Fig. 1, demonstrating the ability to reason about where to place seemingly unrelated concepts on the 3D shape.", + "bbox": [ + 496, + 632, + 892, + 815 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 825, + 640, + 842 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Geometry-driven segmentation. Traditional works in geometry processing use low-level geometric features (such as surface area, curvature, or geodesic distance) in or-", + "bbox": [ + 496, + 854, + 890, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "20931", + "bbox": [ + 478, + 944, + 517, + 957 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9766ef4a11261cc1f15997de6dd888eb8713529b811146c3bb3ee2bfeb3e8788.jpg", + "image_caption": [ + "Headphones" + ], + "image_footnote": [], + "bbox": [ + 81, + 88, + 200, + 170 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/81cd7b51d2cfd8d78a71821d36fd79da22a5b7a19117eff2d1f513b1af286837.jpg", + "image_caption": [ + "Shoes" + ], + "image_footnote": [], + "bbox": [ + 212, + 88, + 331, + 170 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/294bd74ac95b194ac50b261400ea011e725354d47acbb368eeae197473ee4fbe.jpg", + "image_caption": [ + "Hat" + ], + "image_footnote": [], + "bbox": [ + 339, + 88, + 455, + 170 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/959b668781fd20b838201489cefa175f07a439dd1efd9bafbc25d1795cc64b51.jpg", + "image_caption": [ + "Shoes" + ], + "image_footnote": [], + "bbox": [ + 81, + 191, + 196, + 257 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7c59cf4c4e5b68a607234db82cf9d6bcbab0a29c916e3e3e1d450e3ec7c0381d.jpg", + "image_caption": [ + "Necklace" + ], + "image_footnote": [], + "bbox": [ + 197, + 191, + 330, + 258 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/77c4c76920741c24fa86ab07bfca602d48b239ea9ec27eea54eea9565ea4204c.jpg", + "image_caption": [ + "Glasses" + ], + "image_footnote": [], + "bbox": [ + 334, + 191, + 467, + 258 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/2019c1bb121776727762325c858011eb3ff3e6de4ad7452ff2614f5f21bf286c.jpg", + "image_caption": [ + "Belt" + ], + "image_footnote": [], + "bbox": [ + 83, + 280, + 197, + 345 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/770a8424474d00430078d9ff43613e1f1d548b70b88925618a0696fb033d8dc8.jpg", + "image_caption": [ + "Hat" + ], + "image_footnote": [], + "bbox": [ + 210, + 280, + 330, + 348 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/57decd69583ee7eab9d4754d219932c4bfbc74046be0677bb3003a98f148289a.jpg", + "image_caption": [ + "Necklace" + ], + "image_footnote": [], + "bbox": [ + 341, + 280, + 457, + 348 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/78d80f0d2b6d1a2c6fcc7194230b72a5c46477d56b3699b6d6aa54ef4b3657cc.jpg", + "image_caption": [ + "Necklace", + "Figure 3. Our method is able to highlight different parts on the same object. For target selections that correspond to distinct regions, 3D Highlighter produces selections that are semantically meaningful and spatially separated without signal from underlying geometry." + ], + "image_footnote": [], + "bbox": [ + 101, + 369, + 176, + 460 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ad0b618d045f1d549053c56f8bda5ecb063249ae42b25d58f0f874c73ea53607.jpg", + "image_caption": [ + "Roof" + ], + "image_footnote": [], + "bbox": [ + 238, + 371, + 303, + 459 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/7c5d9feaaeb944ad53557f28a02665cc718c380495e4710928de625be0f015b7.jpg", + "image_caption": [ + "Arms" + ], + "image_footnote": [], + "bbox": [ + 362, + 369, + 429, + 460 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "der to infer high-level semantic attributes for segmenting shapes [35]. In particular, decomposing shapes into smaller parts or segments often corresponds with physical 3D semantic parts [13, 35]. One approach is to partition shapes based on convexity, or an approximation of convexity [1, 23]. The medial axis carries topological information, which may also be used as a guideline for segmentation [6,8,35,47].", + "bbox": [ + 75, + 574, + 467, + 695 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The underlying assumption in these works is that processing the local geometry can be used to understand the semantics for segmentation. By contrast, a key aspect of our work is the ability to perform hallucinated highlights: segmentations that can not necessarily be inferred by geometry alone. See example highlights in Fig. 2 (e.g., localizing a heart on a goat).", + "bbox": [ + 75, + 696, + 467, + 801 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data-driven segmentation. In the deep learning era, the 3D part segmentation task has been widely tackled by neural network models [11, 15, 20, 26, 36, 45]. Training such a model is typically done in a fully-supervised manner on a large dataset of shapes annotated with a given set of part classes. For example, MeshCNN [11] was trained on a", + "bbox": [ + 75, + 810, + 467, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "human-body segmentation dataset [24] for learning semantic part segmentation. To alleviate the need for 3D annotations, unsupervised learning schemes utilize large collections of unlabelled data [5,7,14,37,49]. For example, Hong et al. [14] inferred part-segmentation through question answering on rendered images from PartNet [46].", + "bbox": [ + 496, + 90, + 890, + 181 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In contrast to existing deep learning approaches for shape segmentation, we do not rely on any 3D dataset, nor are we bounded to a specific shape category or set of parts. Instead, we specify the desired localization using text and a pre-trained CLIP model which encompasses rich semantic object understanding. Thus, our 3D Highlighter is capable of localizing various semantic regions on a wide variety of 3D shapes.", + "bbox": [ + 496, + 181, + 892, + 301 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Text-guidance. Recent works have leveraged pre-trained vision-language embedding spaces, such as CLIP [31], for analysis, synthesis, and editing. Some techniques leverage pre-trained image encoders for achieving semantic segmentation in images and neural radiance fields [2, 19, 21]. Such techniques are capable of segmenting entire objects within a scene based on text, e.g., a chair inside a room. However, they may struggle to segment parts within an object; e.g., failing to distinguish a window (part) from a house (object) [21].", + "bbox": [ + 496, + 306, + 890, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our work is inspired by the emergent analysis in text-driven synthesis techniques for 3D data [10, 16, 18, 25, 30, 42]. Specifically, Text2Mesh [25] devised a framework for text-driven stylization of 3D meshes, observing that the resulting textures consider part-aware semantics. Yet, since Text2Mesh directly synthesizes stylizations, there is no obvious way to extract any underlying semantic analysis. To address this, we opt to use a highlighter color only as a means for visualizing the network-predicted segmentations.", + "bbox": [ + 496, + 457, + 890, + 606 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 619, + 589, + 635 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "An illustration of our method is shown in Fig. 5. The inputs to our system are a mesh $M$ , represented by vertices $V \\in \\mathbb{R}^{n \\times 3}$ and faces $F \\in \\{1, \\dots, n\\}^{m \\times 3}$ , and a text description $T$ . Our neural network, referred to as neural highlighter, is optimized to map vertex positions $v \\in V$ to a", + "bbox": [ + 496, + 645, + 890, + 722 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ea684b3ec72b0b19c97fc2fdb468f84d805f3b34d80640e79d6e262bad979e7b.jpg", + "image_caption": [ + "Figure 4. Localized editing. We incorporate textures and displacements to a region highlighted with 3D Highlighter. Used styles: Brick (left), Colorful Crochet (middle), Cactus (right)." + ], + "image_footnote": [], + "bbox": [ + 501, + 739, + 888, + 853 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "20932", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/37f87f8d97108d41f0f6ba47ae01309f80e0de863dac84e8359d80d6fcc580bd.jpg", + "image_caption": [ + "Figure 5. Overview of 3D Highlighter. The Neural Highlighter maps each point on the input mesh to a probability. The mesh is colored using a probability-weighted blend and then rendered from multiple views. The neural highlighter weights are guided by the similarity between the CLIP embeddings of the 2D augmented images and the input text." + ], + "image_footnote": [], + "bbox": [ + 76, + 85, + 893, + 200 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "probability $p$ of belonging to the text-specified region. Each vertex on the mesh is colored according to a probability-weighted blend between the highlighter color and a gray background color. The resulting highlighted mesh $M'$ is rendered from multiple views, and we apply 2D augmentations to obtain a set of images. We supervise the network optimization by comparing the CLIP-embedded images to the CLIP embedding of the desired text.", + "bbox": [ + 75, + 277, + 472, + 401 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Neural Highlighter", + "text_level": 1, + "bbox": [ + 76, + 411, + 261, + 428 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our neural highlighter is a neural field [44] mapping coordinates $\\mathbf{x} \\in \\mathbb{R}^3$ to $p \\in [0,1]$ , where $p$ is the probability that $\\mathbf{x}$ belongs to the text-specified region. The neural highlighter is represented as a multi-layer perceptron (MLP) $\\mathcal{F}_{\\theta}$ that takes an input vertex $v$ in the form of a 3D coordinate $\\mathbf{x}_v = (x,y,z)$ and predicts a highlight probability $p_v$ , $\\mathcal{F}_{\\theta}(\\mathbf{x}_v) = p_v$ . This formulation allows us to query the neural field to obtain meaningful highlight probabilities for any 3D point on (or near) the mesh surface. Thus, once optimized, the network weights conveniently transfer the localization to different meshes of the same object without requiring further optimization (Fig. 9).", + "bbox": [ + 75, + 435, + 468, + 618 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Representing our neural highlighter as an MLP produces contiguous localizations and reduces artifacts. MLPs have been shown to exhibit a spectral bias towards smooth solutions [32], especially on low-dimensional inputs such as 3D coordinates [38]. The bias towards low-frequency outputs encourages our 3D Highlighter to predict contiguous localizations with sharp boundaries and discourages noisy highlights (Fig. 7). For this reason, our approach does not utilize positional encoding. See supplemental material for", + "bbox": [ + 75, + 618, + 470, + 755 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/00d170565c319d089b1f02247d1441eae50c822102195bd45107ab7ca0b587ad.jpg", + "image_caption": [ + "Figure 6. Viewpoint robustness. Our system produces consistent results even when using different primary viewpoints. Results for three different primary viewpoints for the target text 'necklace'." + ], + "image_footnote": [], + "bbox": [ + 78, + 776, + 467, + 853 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "additional details.", + "bbox": [ + 500, + 277, + 620, + 292 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Mesh Color Blending", + "text_level": 1, + "bbox": [ + 500, + 301, + 700, + 318 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We leverage the per-point highlight probability to color the mesh in a continuous, differentiable manner, generating semantically meaningful renders for CLIP supervision. We use a probability-weighted blend, where each vertex color $C_v$ is a linear combination of the highlight color $H$ and gray color $G$ weighted by the network-predicted highlight probability $C_v = p_v \\cdot H + (1 - p_v) \\cdot G$ .", + "bbox": [ + 498, + 325, + 890, + 431 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "At the start of the optimization process, all vertex probabilities are initialized near 0.5 and thus the entire mesh is half-lighted. As the optimization progresses, vertices smoothly transition towards gray or highlighter color (based on the network predictions) such that vertices predicted to be highlighted adhere to the text-specified region. This formulation translates each step of the optimization to a colored mesh that is semantically meaningful to CLIP. Our method provides continuous gradients, in contrast to coloring vertices according to the argmax of the highlight probability. Our blending scheme results in a smoother optimization landscape and reduces highlight artifacts (Fig. 7).", + "bbox": [ + 496, + 431, + 892, + 612 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This formulation is also important for downstream applications that wish to use the localizations, e.g. editing and stylization. Predicting per-point highlight probabilities provides an explicit representation of the highlight region on the mesh surface. An alternative approach, optimizing the surface color directly, would only provide a visual result without explicit information about which vertices belong to the localization.", + "bbox": [ + 496, + 612, + 893, + 733 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Unsupervised Guidance", + "text_level": 1, + "bbox": [ + 500, + 741, + 720, + 758 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We guide our neural optimization using the joint vision-language embedding space of CLIP [31]. We formulate the desired highlight by describing the association between the input mesh [object] and target localization [region]. Specifically, we design our target text $T$ to be: \"a gray [object] with highlighted [region].\" We render the highlighted geometry from multiple views using differentiable rendering [4]. At each optimization step, we randomly sample $n$ views from a Gaussian distribution centered around a primary view. This", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "20933", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ensures that the underlying object is recognizable in the majority of views shown to CLIP.", + "bbox": [ + 75, + 90, + 468, + 119 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In a preliminary viewpoint prediction stage, we render $360^{\\circ}$ views of the mesh and measure the CLIP similarity to the target text prompt. We select the primary view to be the render with the highest CLIP similarity. We found that there exist many possible viewpoints which produce desirable highlighter results (see Fig. 6). More details about how the primary view is selected can be found in the supplemental material.", + "bbox": [ + 75, + 121, + 467, + 241 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For each view $\\psi$ , we render a 2D image $I_{\\psi}$ and apply a random perspective 2D augmentation $\\phi$ , as done in previous works [9, 25]. We then encode each of the augmented images into the CLIP embedding space (in $\\mathbb{R}^{768}$ ) using CLIP's image encoder, denoted as $E_I$ . Our final aggregate image representation $\\mathsf{e}_I$ is the average CLIP encoding over all views:", + "bbox": [ + 75, + 244, + 467, + 347 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathsf {e} _ {I} = \\frac {1}{n} \\sum_ {\\psi} E _ {I} \\left(\\phi \\left(I _ {\\psi}\\right)\\right) \\in \\mathbb {R} ^ {7 6 8}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 165, + 349, + 468, + 385 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similarly, we encode the target selection text $T$ with CLIP's text encoder $E_{T}$ to get the encoded target representation $\\mathsf{e}_T = E_T(T)\\in \\mathbb{R}^{768}$ . Our loss $\\mathcal{L}$ for optimizing the neural highlighter parameters $\\theta$ is formulated as the negative cosine similarity between the aggregate image embedding and the text embedding:", + "bbox": [ + 75, + 393, + 468, + 484 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\theta} {\\operatorname {a r g m i n}} \\mathcal {L} (\\theta) = - \\frac {\\mathrm {e} _ {I} \\cdot \\mathrm {e} _ {T}}{\\left| \\mathrm {e} _ {I} \\right| \\cdot \\left| \\mathrm {e} _ {T} \\right|}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 497, + 468, + 527 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When the loss is minimized, the CLIP embedding of the rendered highlighted mesh becomes similar to the target text embedding. Thus, the localized region will reflect the target text region.", + "bbox": [ + 75, + 539, + 468, + 599 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 616, + 207, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section we examine various capabilities of 3D Highlighter. First, we demonstrate the fidelity of our highlighter localization in Sec. 4.1, including qualitative and quantitative evaluations. As far as we can ascertain, our method is the first technique to perform text-driven localization on 3D shapes without pre-training on 3D data. Thus, we adapt an existing language-guided segmentation technique for 2D images to serve as a baseline [21]. Moreover, we demonstrate the robustness of 3D Highlighter in Sec. 4.2. Then we explore several applications of our method in Sec. 4.3, such as selective editing, localized manipulation, and segmentation. Finally, in Sec. 4.4 we evaluate the influence of key components of 3D Highlighter and discuss its limitations in Sec. 4.5.", + "bbox": [ + 75, + 642, + 468, + 852 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We apply our method to a large variety of meshes from different sources: COSEG [41], Turbo Squid [40], Thingi10K [48], Toys4k [34], ModelNet [43], and", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ShapeNet [3]. 3D Highlighter does not impose any restrictions on the mesh quality; many of the meshes used contain artifacts, such as elements that are non-manifold, unoriented, and contain boundaries or self-intersections. Our PyTorch [29] implementation optimization takes around 5 minutes to run on an Nvidia A40 GPU. In our experiments, we used CLIP ViT-L/14 at $224 \\times 224$ resolution.", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Generality and Fidelity of 3D Highlighter", + "text_level": 1, + "bbox": [ + 498, + 207, + 852, + 223 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Highlight generality. 3D Highlighter is not restricted to any particular category for either the input mesh or the text-specified localization, since it does not rely on a 3D dataset or 3D pre-training. In Fig. 2, we see our method achieves accurate localization for a diverse collection of meshes from various domains such as humanoids, animals, and manufactured objects. 3D Highlighter is capable of localizing a wide variety of diverse attributes even when the context of these target attributes is entirely unrelated to the input mesh. Moreover, 3D Highlighter demonstrates that it can perform hallucinated highlighting, where it selects regions on meshes with no underlying geometric signal (such as a bow tie on a camel or a hat on a pig).", + "bbox": [ + 496, + 229, + 890, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Highlight specificity. In Fig. 3, we observe that semantic differences are reflected in the network-predicted highlight. 3D Highlighter is able to successfully localize different text-specified regions on the same mesh. Our framework demonstrates the nuanced understanding required to disambiguate different target regions, such as headphones and hat on the rabbit. Finally, the ability to identify many different regions on a single mesh allows users intuitive, comprehensive, and fine-grained control over part localization.", + "bbox": [ + 496, + 431, + 890, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Quantitative evaluation. 3D Highlighter is the first system to select semantic regions on 3D shapes using text guidance, without any 3D datasets. Since there are no quantitative benchmarks to evaluate the quality of our highlights, we do so with a perceptual user study.", + "bbox": [ + 496, + 571, + 890, + 647 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Moreover, since there are no existing approaches for text-based segmentation in 3D, we create two baselines by", + "bbox": [ + 498, + 648, + 890, + 678 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/bc15b6e2fd97a1bca99a84cbc1cc4d816c22f4721d8c569ef7f947c5c65b0fdf.jpg", + "image_caption": [ + "full 0.332", + "Figure 7. Ablation experiments. We present ablation results for target text 'shoes' using our system (full), direct optimization (direct), without probability-weighted blending (no blend), and without 2D augmentations (no augs). Resulting CLIP scores shown below each image." + ], + "image_footnote": [], + "bbox": [ + 501, + 702, + 596, + 768 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/52a2f7096b5ab00c340d7d071fed787c78435618dc985fa5fa6b478c27ed7455.jpg", + "image_caption": [ + "direct 0.319" + ], + "image_footnote": [], + "bbox": [ + 601, + 702, + 691, + 768 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6b07bb4a37b67c5f9e25a7fb0f25dd1d8c1db0da75ff7c2ab87ecddb8d4b90de.jpg", + "image_caption": [ + "no blend 0.297" + ], + "image_footnote": [], + "bbox": [ + 696, + 702, + 789, + 768 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8670507fec27605e15a778c258920e876d7976893e58d1abc1a1f06678d85e08.jpg", + "image_caption": [ + "no augs 0.287" + ], + "image_footnote": [], + "bbox": [ + 794, + 702, + 885, + 768 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "20934", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/b3be0d0e666ca5f462af074cce672d8c2cae5aff8a5262c1ee93e5c9b349656c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodControlLSegText2LIVEOurs
Average Score ↑1.001.262.234.38
", + "bbox": [ + 89, + 88, + 459, + 133 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Perceptual study. We extend two image-based approaches LSeg [21] (segmentation) and Text2LIVE [2] (localized editing) to the highlighting task and report mean user rating.", + "bbox": [ + 75, + 143, + 468, + 186 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "extending two different 2D image-based approaches. The first baseline extends LSeg [21] which directly predicts a segmentation in 2D, while the second baseline extends Text2LIVE [2] which infers an edit mask for 2D image manipulation. To evaluate these baselines, we render a bare mesh from a view where the target localization region is clearly visible. We extract the 2D segmentation produced by the image baselines and use it to color the rendered image. Then we ask users to rate the highlight quality of both baselines and our 3D Highlighter result rendered from the same view in our perceptual study.", + "bbox": [ + 75, + 224, + 468, + 391 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our perceptual study reports quantitative results on the quality of highlights from both 3D Highlighter and baselines. Users were asked to rate each result from 1-5 on how effectively the highlight represents \"an [object] with a region corresponding to a [region] highlighted.\" Visual examples from our study are shown in the supplemental material (Fig. 21). In total, 33 users evaluated each method on 5 mesh and region combinations.", + "bbox": [ + 75, + 393, + 468, + 513 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our 3D Highlighter achieved the highest ratings compared to the baselines (Tab. 1). LSeg is built for text-driven semantic segmentation and excels at segmenting entire objects within a scene. However, LSeg struggles to identify parts within a single object, leading to subpar performance on our highlighting task. Text2LIVE was not explicitly built for segmentation, however it does rely on inferring a continuously-valued edit mask (i.e. a soft-segmentation) when performing localized image editing. The edit mask is designed to produce high-quality image manipulations; however, it is not directly suitable for identifying the sharp segmentation boundaries required for our highlighting task. Qualitative comparisons and an additional quantitative comparison using a modified CLIP R-Precision metric are discussed in the supplemental material.", + "bbox": [ + 75, + 513, + 468, + 741 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Robustness of 3D Highlighter", + "text_level": 1, + "bbox": [ + 76, + 751, + 339, + 767 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Localization transfer. An important benefit of formulating 3D Highlighter as a neural field optimization is the ability to trivially transfer localization results between different meshings. This ability is useful for many tasks in geometry processing which require an object to be re-triangulated, simplified, subdivided, or otherwise remeshed. Localization transfer is possible since our neural highlighter is represented as a field over the shape and is independent of any", + "bbox": [ + 75, + 780, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/966054bc4a85eb1b71c9edcf7fe929a75c3553c5be242fe48ac20cedf257a5fb.jpg", + "image_caption": [ + "Figure 8. Controlled stylization. Given three different stylizations of the same object, we use 3D Highlighter to select different regions and combine them together (Ours). Attempting to achieve this composition with a holistic approach leads to an undesirable result (Text2Mesh [25])." + ], + "image_footnote": [], + "bbox": [ + 501, + 85, + 893, + 236 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "specific meshing. Although the neural highlighter is trained on mesh vertices, the resulting network encodes a smooth field and produces meaningful outputs for any 3D point on (or near) the mesh surface.", + "bbox": [ + 496, + 332, + 890, + 392 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Fig. 9, we show an optimization of the 3D Highlighter on a single mesh triangulation (original) for the prompt 'shoes'. We then apply the already-optimized neural highlighter to remeshed (middle) and subdivided (right) versions of the original mesh, showing the transferability of the selected region to different triangulations. This result demonstrates how 3D Highlighter is independent of the input mesh and that, once we have a localization for one mesh, we can trivially transfer it to any other meshing of the same object.", + "bbox": [ + 496, + 393, + 892, + 530 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Viewpoint robustness. Our method is robust to the primary view choice. This property is important for our localization task, as we may not know a priori which view is ideal. In Fig. 6, we perform our optimization using three different primary viewpoints: $0^{\\circ}$ , $90^{\\circ}$ , and $-90^{\\circ}$ (viewpoints shown in blue). We then present predicted localizations, showing that for all three views, 3D Highlighter is able to accurately identify the target localization region, regardless of whether that region is visible from the primary view.", + "bbox": [ + 496, + 532, + 893, + 669 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "From the $-90^{\\circ}$ primary view, the target region (the neck) is not visible. However, is still visible with a low probability for views sampled from the Gaussian distribution", + "bbox": [ + 496, + 669, + 890, + 714 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f018918fbaf8736ed616c12a107abff622ccc97a3598f00583aa31289e4ad0d6.jpg", + "image_caption": [ + "Figure 9. Localization transfer. We optimize our neural highlighter on one mesh (original) for the prompt 'shoes'. Once optimized, the network weights transfer the localization to different meshings of the same object (remeshed and subdivided)." + ], + "image_footnote": [], + "bbox": [ + 501, + 736, + 888, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "20935", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "around the primary view. This means that over the course of optimization, regions other than the neck are mostly seen while the target region is rarely visible. Nonetheless, our method manages to highlight the desired region, which implies its robustness to how frequently the target region for localization is seen. Furthermore, it shows that oversampling views where the target region is not visible does not negatively influence the optimization.", + "bbox": [ + 75, + 90, + 472, + 212 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Applications of 3D Highlighter", + "text_level": 1, + "bbox": [ + 76, + 220, + 349, + 237 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Selective editing. In Fig. 4, we show that it is possible to use 3D Highlighter to selectively edit a 3D object within a semantic region. This is applicable to techniques which incorporate global texture or material properties over the entire shape, such as in Text2Mesh [25] or MatCap [39]. Starting with different bare input meshes, we edit the entire shape using a global stylization technique [25]. Then, we use 3D Highlighter to select a text-specified region and incorporate the modifications only in the selected area. Thus 3D Highlighter provides direct control over where to stylize shapes, enabling users to obtain localized stylizations based on semantic cues.", + "bbox": [ + 75, + 243, + 468, + 422 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Controlled stylization via composition. Achieving compositionality with language models is a challenging task [33]. For example, starting with a human mesh and using Text2Mesh [25] to stylize 'Iron Man with the head of Steve Jobs and Yeti legs', leads to muddled and undesirable results (Fig. 8, rightmost). Our method enables compositionality between different shape modifications by chaining simple concepts together (Fig. 8). Specifically, we decompose the desired modification into three separate attainable targets ('Iron Man', 'Steve Jobs', and 'Yeti'), which we stylize individually with Text2Mesh. We then utilize our 3D Highlighter to localize the text-specified regions. We achieve the desired composition by combining the highlighted regions together, obtaining clear boundaries between stylizations.", + "bbox": [ + 75, + 428, + 468, + 654 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Semantic segmentation. In Fig. 10, we show that our technique is not restricted to hallucinated highlighting and is capable of localizing semantically-specified geometric regions. These text-driven localizations identify unique geometric parts without utilizing any 3D datasets or part labels.", + "bbox": [ + 75, + 657, + 468, + 734 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Components of 3D Highlighter", + "text_level": 1, + "bbox": [ + 76, + 741, + 349, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation study. Several components are key for facilitating 3D Highlighter. We provide ablation results in Fig. 7 to demonstrate the effect of our design choices. First, using a direct optimization of the vertex color (direct) instead of optimizing a neural field results in splotchy highlight artifacts. Since the neural field has a spectral bias towards smooth solutions [32], omitting it leads to an undesired noisy output. Second, removing the probability weighted blending (no blend) and instead coloring vertices using only", + "bbox": [ + 75, + 763, + 468, + 901 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5f07d2dbe7886e50bd891038211e2c20cd2aa37502ddaa9905c6454b084a0a90.jpg", + "image_caption": [ + "Arm" + ], + "image_footnote": [], + "bbox": [ + 526, + 90, + 617, + 212 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6b22d823e0eed84c95e938a67070eeed0862e956e2f688fba10eb3562f8f2beb.jpg", + "image_caption": [ + "Slide" + ], + "image_footnote": [], + "bbox": [ + 632, + 90, + 700, + 212 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/68ef7f752136e4660dafb1582c8a971b0bd35ab5640a501d31930cc7a2fb506d.jpg", + "image_caption": [ + "Propeller", + "Figure 10. Semantic Segmentation. 3D Highlighter produces semantic segmentations for unique geometric parts without any 3D dataset or annotations." + ], + "image_footnote": [], + "bbox": [ + 707, + 152, + 867, + 212 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "two distinct values also produces a noisy highlight pattern. Without a continuous color blend, the gradients become ill-conditioned and unstable, leading to highlight artifacts and irregular localization boundaries. Lastly, similar to previous works [9, 25], we observe that without 2D perspective augmentations (no augs), 3D Highlighter outputs degenerate solutions. The ablation study emphasizes the importance of our key design choices in 3D Highlighter for its ability to highlight a coherent and localized region on the input shape.", + "bbox": [ + 496, + 297, + 890, + 434 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Prompt formulation and CLIP understanding. Our prompt formulation combined with our coloring scheme results in the correct association between objects and their properties, a known challenge when using CLIP [33]. In Fig. 12, we analyze the CLIP score for two different prompts: 'gray chair with highlighted back' (left) and 'blue chair with red back' (right). For each prompt, we measure the CLIP similarity to renders of both the correct assignment and flipped assignment.", + "bbox": [ + 496, + 436, + 890, + 573 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We observe that our prompt formulation ('gray chair with highlighted back') results in a higher average CLIP score for the correct assignment. In contrast, when specifying colors in the prompt ('blue chair with red back') and styling the mesh accordingly, we see higher CLIP scores for the flipped association. Using the same gray and yellow renders (left), we also compare to a prompt specifying colors ('gray chair with yellow back') and find that the higher", + "bbox": [ + 496, + 574, + 890, + 695 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/97dddc40b0091821cfab65ba2c7e2cfb43f71a6956850708ff94d0d1f24f3e7e.jpg", + "image_caption": [ + "Figure 11. Network initialization. We optimize 3D Highlighter for the text prompt 'belt' using different initialization methods: using a default initialization where all output probabilities are near 0.5 (middle) or altering the final layer so that all outputs are 0 (left) or 1 (right). Initializing with 0 or 1 leads to an undesirable result." + ], + "image_footnote": [], + "bbox": [ + 540, + 715, + 851, + 825 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "20936", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/46a442d6d6c2c3000f719cefdc0f5284a2910bc7d071a06bbdd8e125c11897ff.jpg", + "image_caption": [ + "Figure 12. CLIP understanding. We examine CLIP similarity scores for several prompt formulations targeting the 'back' of the chair while using the correct color assignment and where the coloring is flipped. For the prompt 'gray chair with highlighted back' (left) we observe that the CLIP score is higher for the correct assignment. For the prompt 'blue chair with red back' (right) the CLIP score is higher for the flipped (incorrect) assignment." + ], + "image_footnote": [], + "bbox": [ + 114, + 70, + 429, + 209 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "CLIP score corresponds to the flipped selection (data not shown).", + "bbox": [ + 75, + 335, + 468, + 364 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We also measure the CLIP scores for our standard prompt formulation: 'gray chair with highlighted back', replacing the yellow color in the rendering with other colors, such as red and blue, and find that the correct selection has a higher CLIP score (data not shown). To conclude, our prompt formulation (i.e., the use of the term 'highlighted') coincides with CLIP's understanding and 3D Highlighter is robust to the highlight color.", + "bbox": [ + 75, + 366, + 470, + 487 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Network initialization. Initializing the network such that the object is partially highlighted (i.e., with highlight probability equal to 0.5) is important for obtaining desirable results. In Fig. 11, we show the optimization of our method for the target text prompt 'belt' using three different initializations. Our method (middle) initializes all output probabilities near 0.5 by random weight initialization of the network. We compare to initializing the output probabilities to 0 (left) or 1 (right), in which we set the weights of the last layer to 0, and the bias to 0 or 1, respectively.", + "bbox": [ + 75, + 489, + 468, + 641 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For the initialization to both 0.5 and 1, a highlight color is uniformly present on the styled mesh, whereas with 0, the mesh is gray with no highlight. Consequently, we hypothesize that the presence of highlight color at initialization is important for CLIP's supervision.", + "bbox": [ + 75, + 642, + 470, + 717 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Limitations", + "text_level": 1, + "bbox": [ + 76, + 726, + 204, + 742 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3D Highlighter is robust to variations of the object specification in the target prompt. However, there should still be a logical connection between the 3D shape and its description. Fig. 13 shows results for a camel mesh and the target highlight 'shinguards'. For each optimization, we use a slightly different target prompt by varying the object specification. The prompts are of the form \"[object] with highlighted shinguards\", where [object] is replaced with camel, pig, animal, or chair.", + "bbox": [ + 75, + 750, + 468, + 885 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Fig. 13, we observe that with object specifications", + "bbox": [ + 96, + 885, + 470, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "that resemble the geometry of camel, such as pig and animal, 3D Highlighter accurately localizes the desired region. However, for a description that is incompatible with the object's geometry (i.e., referring to a camel as a chair), our method does not produce meaningful results. This result sheds light on 3D Highlighter's robustness to text descriptions: 3D Highlighter is able to reason about a mesh even when its description is not perfectly accurate, provided that it is sufficiently similar to the true description (i.e., referring to a camel mesh as a pig).", + "bbox": [ + 496, + 90, + 893, + 243 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 270, + 627, + 286 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present a technique for highlighting semantic regions on meshes using text as input, without any 3D datasets or 3D pre-training. 3D Highlighter can reason about where to place a non-obviously related part on a 3D object (i.e. a hat on a candle). The ability to combine unconnected parts and objects together is reminiscent of ideas from image analogies [12, 22]. In this work, we show that we can identify part-concepts that are geometrically absent from a shape, giving rise to our hallucinated highlighting capability.", + "bbox": [ + 496, + 296, + 890, + 431 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "During neural optimization, our neural network infers a probability which we use to blend the highlight color onto the mesh. The network-predicted probabilities are general, and provide a soft-segmentation which we show can be used for a variety of different applications (Figs. 4 and 8). In the future, we are interested in extending our framework to obtain part correspondence between shapes that differ topologically but are semantically related.", + "bbox": [ + 496, + 431, + 892, + 551 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Acknowledgments", + "text_level": 1, + "bbox": [ + 500, + 565, + 679, + 583 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We thank the University of Chicago for providing the AI cluster resources, services, and the professional support of the technical staff. This work was also supported in part by gifts from Adobe Research. Finally, we would like to thank Richard Liu, Avery Zhou, and the members of 3DL for their thorough and insightful feedback on our work.", + "bbox": [ + 496, + 590, + 893, + 681 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d466cfa6af2a35e8ea84c273f2ed61469a2e59581258127dfb4738c887104207.jpg", + "image_caption": [ + "Figure 13. Prompt generality. Our system is robust to certain variations in object specifications. We achieve desirable results for the text input 'camel with highlighted shinguards' (left), as well as for other variations ('pig' and 'animal'). If the object specification, such as 'chair', is incompatible with the input geometry, 3D Highlighter no longer produces meaningful results." + ], + "image_footnote": [], + "bbox": [ + 501, + 700, + 890, + 810 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "20937", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Shmuel Asafi, Avi Goren, and Daniel Cohen-Or. Weak convex decomposition by lines-of-sight. Computer graphics forum, 32(5):23-31, 2013. 3", + "[2] Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. arXiv preprint arXiv:2204.02491, 2022. 3, 6, 12, 13", + "[3] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 5", + "[4] Wenzheng Chen, Huan Ling, Jun Gao, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Advances in Neural Information Processing Systems, 32, 2019. 4", + "[5] Zhiqin Chen, Kangxue Yin, Matthew Fisher, Siddhartha Chaudhuri, and Hao Zhang. Bae-net: Branched autoencoder for shape co-segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8490-8499, 2019. 3", + "[6] Nicu D Cornea, Deborah Silver, and Patrick Min. **Curve-skeleton properties, applications, and algorithms. IEEE Transactions on visualization and computer graphics*, 13(3):530, 2007. 3", + "[7] Boyang Deng, Kyle Genova, Soroosh Yazdani, Sofien Bouaziz, Geoffrey Hinton, and Andrea Tagliasacchi. Cvxnet: Learnable convex decomposition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 31-44, 2020. 3", + "[8] Tamal K Dey and Wulue Zhao. Approximating the medial axis from the voronoi diagram with a convergence guarantee. Algorithmica, 38(1):179-200, 2004. 3", + "[9] Kevin Frans, Lisa B. Soros, and Olaf Witkowski. Clipdraw: Exploring text-to-drawing synthesis through language-image encoders. ArXiv, abs/2106.14843, 2021. 5, 7", + "[10] Rao Fu, Xiao Zhan, Yiwen Chen, Daniel Ritchie, and Srinath Sridhar. Shapecrafter: A recursive text-conditioned 3d shape generation model. arXiv preprint arXiv:2207.09446, 2022. 3", + "[11] Rana Hanocka, Amir Hertz, Noa Fish, Raja Giryes, Shachar Fleishman, and Daniel Cohen-Or. MeshCNN: A network with an edge. ACM Transactions on Graphics (TOG), 38(4):90:1–90:12, 2019. 3", + "[12] Aaron Hertzmann, Charles E Jacobs, Nuria Oliver, Brian Curless, and David H Salesin. Image analogies. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 327-340, 2001. 8", + "[13] Donald D Hoffman and Whitman A Richards. Parts of recognition. Cognition, 18(1-3):65-96, 1984. 3", + "[14] Yining Hong, Yilun Du, Chunru Lin, Josh Tenenbaum, and Chuang Gan. 3d concept grounding on neural fields. In Annual Conference on Neural Information Processing Systems, 2022. 3" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Shi-Min Hu, Zheng-Ning Liu, Meng-Hao Guo, Junxiong Cai, Jiahui Huang, Tai-Jiang Mu, and Ralph R. Martin. Subdivision-based mesh convolution networks. ACM Trans. Graph., 41(3):25:1-25:16, 2022. 3", + "[16] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 3, 12", + "[17] Oliver Van Kaick, Noa Fish, Yanir Kleiman, Shmuel Asafi, and Daniel Cohen-Or. Shape segmentation by approximate convexity analysis. ACM Transactions on Graphics (TOG), 34(1):1–11, 2014. 1", + "[18] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Popa Tiberiu. Clip-mesh: Generating textured meshes from text using pretrained image-text models. SIGGRAPH Asia 2022 Conference Papers, December 2022. 3", + "[19] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nef for editing via feature field distillation. arXiv, 2022. 3", + "[20] Alon Lahav and Ayellet Tal. Meshwalker: Deep mesh understanding by random walks. ACM Transactions on Graphics (TOG), 39(6):1-13, 2020. 3", + "[21] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and Rene Ranftl. Language-driven semantic segmentation. In International Conference on Learning Representations, 2022. 3, 5, 6, 12, 13", + "[22] Jing Liao, Yuan Yao, Lu Yuan, Gang Hua, and Sing Bing Kang. Visual attribute transfer through deep image analogy. arXiv preprint arXiv:1705.01088, 2017. 8", + "[23] Jyh-Ming Lien and Nancy M Amato. Approximate convex decomposition of polyhedra. In Proceedings of the 2007 ACM symposium on Solid and physical modeling, pages 121-131, 2007. 3", + "[24] Haggai Maron, Meirav Galun, Noam Aigerman, Miri Trope, Nadav Dym, Ersin Yumer, Vladimir G Kim, and Yaron Lipman. Convolutional neural networks on surfaces via seamless toric covers. ACM Trans. Graph., 36(4):71-1, 2017. 3", + "[25] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13492-13502, 2022. 3, 5, 6, 7, 11", + "[26] Francesco Milano, Antonio Loquercio, Antoni Rosinol, Davide Scaramuzzi, and Luca Carlone. Primal-dual mesh convolutional neural networks. Advances in Neural Information Processing Systems, 33:952-963, 2020. 3", + "[27] Kaichun Mo, Shilin Zhu, Angel X. Chang, Li Yi, Subarna Tripathi, Leonidas J. Guibas, and Hao Su. Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding, 2018. 1", + "[28] Dong Huk Park, Samaneh Azadi, Xihui Liu, Trevor Darrell, and Anna Rohrbach. Benchmark for compositional text-to-image synthesis. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 12" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "20938", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[29] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. 5", + "[30] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv, 2022. 3", + "[31] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. arXiv preprint arXiv:2103.00020, 2021. 1, 2, 3, 4", + "[32] Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 5301-5310. PMLR, 09-15 Jun 2019. 4, 7", + "[33] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022.7", + "[34] James Matthew Rehg. Toys4k 3d object dataset, 2022. https://github.com/rehg-lab/lowshot-shapebias/tree/main/toys4k.5", + "[35] Ariel Shamir. A survey on mesh segmentation techniques. Computer graphics forum, 27(6):1539-1556, 2008. 3", + "[36] Nicholas Sharp, Souhaib Attaiki, Keenan Crane, and Maks Ovsjanikov. Diffusionnet: Discretization agnostic learning on surfaces. ACM Transactions on Graphics (TOG), 41(3):1-16, 2022. 3", + "[37] Weiwei Sun, Andrea Tagliasacchi, Boyang Deng, Sara Sabour, Soroosh Yazdani, Geoffrey E Hinton, and Kwang Moo Yi. Canonical capsules: Self-supervised capsules in canonical pose. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 24993-25005. Curran Associates, Inc., 2021. 3", + "[38] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020. 4, 12", + "[39] Hideki Todo, Ken Anjyo, and Shun'Ichi Yokoyama. Litsphere extension for artistic rendering. Vis. Comput., 29(6-8):473-480, jun 2013. 7", + "[40] TurboSquid. Turbosquid 3d model repository, 2021. https://www.turbosquid.com/.5", + "[41] Oliver van Kaick, Andrea Tagliasacchi, Oana Sidi, Hao Zhang, Daniel Cohen-Or, Lior Wolf, and Ghassan Hamarneh. Prior knowledge for part correspondence. Computer Graphics Forum, 30(2):553–562, 2011. 5", + "[42] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In Proceedings of the" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3835-3844, 2022. 3", + "[43] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 5", + "[44] Yiheng Xie, Towaki Takikawa, Shunsuke Saito, Or Litany, Shiqin Yan, Numair Khan, Federico Tombari, James Tompkin, Vincent Sitzmann, and Srinath Sridhar. Neural fields in visual computing and beyond. Computer Graphics Forum, 2022. 1, 4, 12", + "[45] Li Yi, Hao Su, Xingwen Guo, and Leonidas J Guibas. Syncspeccnn: Synchronized spectral cnn for 3d shape segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2282-2290, 2017. 3", + "[46] Fenggen Yu, Kun Liu, Yan Zhang, Chenyang Zhu, and Kai Xu. Partnet: A recursive part decomposition network for fine-grained and hierarchical shape segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9491-9500, 2019. 3", + "[47] Qian Zheng, Zhuming Hao, Hui Huang, Kai Xu, Hao Zhang, Daniel Cohen-Or, and Baoquan Chen. Skeleton-intrinsic symmetrization of shapes. Computer Graphics Forum, 34(2):275-286, 2015. 3", + "[48] Qingnan Zhou and Alec Jacobson. Thingi10k: A dataset of 10,000 3d-printing models. arXiv preprint arXiv:1605.04797, 2016. 5", + "[49] Chenyang Zhu, Kai Xu, Siddhartha Chaudhuri, Li Yi, Leonidas J Guibas, and Hao Zhang. Adacoseg: Adaptive shape co-segmentation with group consistency loss. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8543-8552, 2020. 3" + ], + "bbox": [ + 503, + 92, + 890, + 556 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "20939", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_model.json b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_model.json new file mode 100644 index 0000000000000000000000000000000000000000..afeabd286d6a327766e2b690117fc42b4f805207 --- /dev/null +++ b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_model.json @@ -0,0 +1,2574 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.126, + 0.131, + 0.844, + 0.154 + ], + "angle": 0, + "content": "3D Highlighter: Localizing Regions on 3D Shapes via Text Descriptions" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.182, + 0.294, + 0.198 + ], + "angle": 0, + "content": "Dale Decatur" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.199, + 0.329, + 0.217 + ], + "angle": 0, + "content": "University of Chicago" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.22, + 0.334, + 0.234 + ], + "angle": 0, + "content": "ddecatur@uchicago.edu" + }, + { + "type": "text", + "bbox": [ + 0.434, + 0.182, + 0.51, + 0.199 + ], + "angle": 0, + "content": "Itai Lang" + }, + { + "type": "text", + "bbox": [ + 0.384, + 0.199, + 0.56, + 0.218 + ], + "angle": 0, + "content": "University of Chicago" + }, + { + "type": "text", + "bbox": [ + 0.379, + 0.22, + 0.566, + 0.234 + ], + "angle": 0, + "content": "itailang@uchicago.edu" + }, + { + "type": "text", + "bbox": [ + 0.66, + 0.182, + 0.778, + 0.198 + ], + "angle": 0, + "content": "Rana Hanocka" + }, + { + "type": "text", + "bbox": [ + 0.63, + 0.199, + 0.807, + 0.218 + ], + "angle": 0, + "content": "University of Chicago" + }, + { + "type": "text", + "bbox": [ + 0.612, + 0.22, + 0.826, + 0.234 + ], + "angle": 0, + "content": "ranahanocka@uchicago.edu" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.253, + 0.216, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.136, + 0.403, + 0.163, + 0.415 + ], + "angle": 0, + "content": "Hat" + }, + { + "type": "image", + "bbox": [ + 0.23, + 0.261, + 0.419, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.274, + 0.403, + 0.338, + 0.416 + ], + "angle": 0, + "content": "Necklace" + }, + { + "type": "image", + "bbox": [ + 0.4, + 0.261, + 0.586, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.452, + 0.403, + 0.525, + 0.417 + ], + "angle": 0, + "content": "Headlights" + }, + { + "type": "image", + "bbox": [ + 0.59, + 0.261, + 0.788, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.67, + 0.403, + 0.712, + 0.416 + ], + "angle": 0, + "content": "Shoes" + }, + { + "type": "image", + "bbox": [ + 0.799, + 0.253, + 0.89, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.807, + 0.403, + 0.882, + 0.417 + ], + "angle": 0, + "content": "Eyeglasses" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.42, + 0.893, + 0.449 + ], + "angle": 0, + "content": "Figure 1. 3D Highlighter localizes semantic regions on a shape using text as input. Our technique reasons about where to place seemingly unrelated concepts in semantically meaningful locations on the 3D shape, such as a 'necklace' on a horse or 'shoes' on an alien." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.461, + 0.314, + 0.477 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.493, + 0.474, + 0.721 + ], + "angle": 0, + "content": "We present 3D Highlighter, a technique for localizing semantic regions on a mesh using text as input. A key feature of our system is the ability to interpret \"out-of-domain\" localizations. Our system demonstrates the ability to reason about where to place non-obviously related concepts on an input 3D shape, such as adding clothing to a bare 3D animal model. Our method contextualizes the text description using a neural field and colors the corresponding region of the shape using a probability-weighted blend. Our neural optimization is guided by a pre-trained CLIP encoder, which bypasses the need for any 3D datasets or 3D annotations. Thus, 3D Highlighter is highly flexible, general, and capable of producing localizations on a myriad of input shapes. Our code is publicly available at https://github.com/threedle/3DHighlighter." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.739, + 0.21, + 0.755 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.764, + 0.47, + 0.87 + ], + "angle": 0, + "content": "Semantic localization of regions on 3D meshes is an important problem in computer graphics and vision with broad applications. One such application is the incorporation of semantic information into the 3D modeling process. A particularly challenging aspect of this task emerges when 3D geometric signals are insufficient for performing segmentation, e.g. where to add a shirt to a bare 3D human model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.902 + ], + "angle": 0, + "content": "We propose 3D Highlighter, a method for automatically localizing fine-grained semantic regions on a shape based" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.893, + 0.553 + ], + "angle": 0, + "content": "on only a text description. Our system contextualizes the text prompt and highlights the corresponding shape region using the network-predicted probabilities. Using only text, users are able to semantically identify regions on a shape. Our system takes meshes as input, making it compatible with 3D modeling workflows and tools." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.553, + 0.893, + 0.689 + ], + "angle": 0, + "content": "This highlighting task requires both object-level and part-level understanding. 3D Highlighter demonstrates the ability to reason about where to place seemingly unrelated concepts on the 3D shape, such as a hat on a candle (Fig. 1). Our system localizes attributes that are geometrically absent from a shape, which we refer to as hallucinated highlighting. Understanding a part's global shape context is challenging even when relying on salient geometric features [17,27], let alone without them." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.893, + 0.839 + ], + "angle": 0, + "content": "We optimize the weights of a neural network to produce probabilities that are used to color a given 3D shape in accordance with the specified text. We leverage a pre-trained vision-language model (CLIP [31]) to guide the neural optimization towards the text-specified region. This neural optimization formulation is flexible, bypassing the need for any 3D datasets, 3D annotations, or 3D pre-training. Our system is not bound to a specific set of classes, and, as shown in Fig. 2, is not limited to object parts defined by salient geometric features." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.901 + ], + "angle": 0, + "content": "We encode the part selection as a neural field [44] over the mesh surface. Our network learns to map each point on the surface to a probability of belonging to the text-specified region. We translate the inferred probabilities to a visual at-" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20930" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.083, + 0.086, + 0.895, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.548, + 0.895, + 0.577 + ], + "angle": 0, + "content": "Figure 2. Hallucinated part highlighting. Our system is able to reason about where to highlight a geometrically-absent region on shapes. The resulting localizations demonstrate global understanding and localized part-awareness." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.603, + 0.473, + 0.725 + ], + "angle": 0, + "content": "tribute on the mesh surface, which can be rendered and visually understood. The network-predicted probabilities act as a soft-selection operator which blends the highlighter color onto the mesh. The network weights are updated by encouraging the CLIP [31] embedding of the 2D renders of the highlighted mesh to adhere to the specified text. As a result, the network implicitly learns to segment the object to adhere to the text prompt." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.473, + 0.903 + ], + "angle": 0, + "content": "We make several design choices that are key to the success of 3D Highlighter. Our network does not directly color the mesh. Rather, we predict a probability of being inside the text-specified highlight, which is used to blend colors on the mesh. The network is initialized such that points have roughly a \\(50\\%\\) probability of being highlighted, resulting in a mesh with albedo halfway between the highlight and background color. During optimization, the relative blend weight of the highlight color directly corresponds to the highlight probability. This blending enables the network to naturally and smoothly increase or decrease the segmenta" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.603, + 0.892, + 0.633 + ], + "angle": 0, + "content": "tion probability in accordance with the text specification of the target region." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.633, + 0.893, + 0.816 + ], + "angle": 0, + "content": "In summary, we present a method for localizing semantic regions on 3D shapes. The localization is specified by a textual description, which is intuitive, flexible, and not limited to a specific training dataset. We demonstrate applications of our method to shape editing and stylization. Furthermore, our field formulation enables the 3D Highlighter to work with different mesh resolutions and triangulations. A key feature of our system is the ability to interpret out-of-domain localizations. For example, 3D Highlighter is able to figure out where to place a 'hat' on a candle as seen in Fig. 1, demonstrating the ability to reason about where to place seemingly unrelated concepts on the 3D shape." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.827, + 0.642, + 0.843 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Geometry-driven segmentation. Traditional works in geometry processing use low-level geometric features (such as surface area, curvature, or geodesic distance) in or-" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.958 + ], + "angle": 0, + "content": "20931" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.083, + 0.089, + 0.201, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.106, + 0.173, + 0.189, + 0.188 + ], + "angle": 0, + "content": "Headphones" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.089, + 0.332, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.261, + 0.173, + 0.302, + 0.186 + ], + "angle": 0, + "content": "Shoes" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.089, + 0.457, + 0.171 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.386, + 0.173, + 0.413, + 0.185 + ], + "angle": 0, + "content": "Hat" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.192, + 0.197, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.121, + 0.26, + 0.162, + 0.272 + ], + "angle": 0, + "content": "Shoes" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.192, + 0.331, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.232, + 0.26, + 0.296, + 0.272 + ], + "angle": 0, + "content": "Necklace" + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.192, + 0.468, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.372, + 0.26, + 0.424, + 0.272 + ], + "angle": 0, + "content": "Glasses" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.281, + 0.199, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.349, + 0.156, + 0.361 + ], + "angle": 0, + "content": "Belt" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.281, + 0.331, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.255, + 0.349, + 0.282, + 0.361 + ], + "angle": 0, + "content": "Hat" + }, + { + "type": "image", + "bbox": [ + 0.343, + 0.281, + 0.458, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.366, + 0.349, + 0.431, + 0.361 + ], + "angle": 0, + "content": "Necklace" + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.37, + 0.177, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.106, + 0.462, + 0.169, + 0.473 + ], + "angle": 0, + "content": "Necklace" + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.372, + 0.305, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.256, + 0.462, + 0.292, + 0.473 + ], + "angle": 0, + "content": "Roof" + }, + { + "type": "image", + "bbox": [ + 0.363, + 0.371, + 0.43, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.462, + 0.415, + 0.473 + ], + "angle": 0, + "content": "Arms" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.476, + 0.47, + 0.546 + ], + "angle": 0, + "content": "Figure 3. Our method is able to highlight different parts on the same object. For target selections that correspond to distinct regions, 3D Highlighter produces selections that are semantically meaningful and spatially separated without signal from underlying geometry." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.575, + 0.468, + 0.696 + ], + "angle": 0, + "content": "der to infer high-level semantic attributes for segmenting shapes [35]. In particular, decomposing shapes into smaller parts or segments often corresponds with physical 3D semantic parts [13, 35]. One approach is to partition shapes based on convexity, or an approximation of convexity [1, 23]. The medial axis carries topological information, which may also be used as a guideline for segmentation [6,8,35,47]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.698, + 0.468, + 0.803 + ], + "angle": 0, + "content": "The underlying assumption in these works is that processing the local geometry can be used to understand the semantics for segmentation. By contrast, a key aspect of our work is the ability to perform hallucinated highlights: segmentations that can not necessarily be inferred by geometry alone. See example highlights in Fig. 2 (e.g., localizing a heart on a goat)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Data-driven segmentation. In the deep learning era, the 3D part segmentation task has been widely tackled by neural network models [11, 15, 20, 26, 36, 45]. Training such a model is typically done in a fully-supervised manner on a large dataset of shapes annotated with a given set of part classes. For example, MeshCNN [11] was trained on a" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.182 + ], + "angle": 0, + "content": "human-body segmentation dataset [24] for learning semantic part segmentation. To alleviate the need for 3D annotations, unsupervised learning schemes utilize large collections of unlabelled data [5,7,14,37,49]. For example, Hong et al. [14] inferred part-segmentation through question answering on rendered images from PartNet [46]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.183, + 0.893, + 0.303 + ], + "angle": 0, + "content": "In contrast to existing deep learning approaches for shape segmentation, we do not rely on any 3D dataset, nor are we bounded to a specific shape category or set of parts. Instead, we specify the desired localization using text and a pre-trained CLIP model which encompasses rich semantic object understanding. Thus, our 3D Highlighter is capable of localizing various semantic regions on a wide variety of 3D shapes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.307, + 0.892, + 0.457 + ], + "angle": 0, + "content": "Text-guidance. Recent works have leveraged pre-trained vision-language embedding spaces, such as CLIP [31], for analysis, synthesis, and editing. Some techniques leverage pre-trained image encoders for achieving semantic segmentation in images and neural radiance fields [2, 19, 21]. Such techniques are capable of segmenting entire objects within a scene based on text, e.g., a chair inside a room. However, they may struggle to segment parts within an object; e.g., failing to distinguish a window (part) from a house (object) [21]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.458, + 0.892, + 0.607 + ], + "angle": 0, + "content": "Our work is inspired by the emergent analysis in text-driven synthesis techniques for 3D data [10, 16, 18, 25, 30, 42]. Specifically, Text2Mesh [25] devised a framework for text-driven stylization of 3D meshes, observing that the resulting textures consider part-aware semantics. Yet, since Text2Mesh directly synthesizes stylizations, there is no obvious way to extract any underlying semantic analysis. To address this, we opt to use a highlighter color only as a means for visualizing the network-predicted segmentations." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.621, + 0.59, + 0.636 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.646, + 0.892, + 0.723 + ], + "angle": 0, + "content": "An illustration of our method is shown in Fig. 5. The inputs to our system are a mesh \\(M\\), represented by vertices \\(V \\in \\mathbb{R}^{n \\times 3}\\) and faces \\(F \\in \\{1, \\dots, n\\}^{m \\times 3}\\), and a text description \\(T\\). Our neural network, referred to as neural highlighter, is optimized to map vertex positions \\(v \\in V\\) to a" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.741, + 0.89, + 0.854 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.855, + 0.892, + 0.897 + ], + "angle": 0, + "content": "Figure 4. Localized editing. We incorporate textures and displacements to a region highlighted with 3D Highlighter. Used styles: Brick (left), Colorful Crochet (middle), Cactus (right)." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20932" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.086, + 0.895, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.21, + 0.895, + 0.255 + ], + "angle": 0, + "content": "Figure 5. Overview of 3D Highlighter. The Neural Highlighter maps each point on the input mesh to a probability. The mesh is colored using a probability-weighted blend and then rendered from multiple views. The neural highlighter weights are guided by the similarity between the CLIP embeddings of the 2D augmented images and the input text." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.279, + 0.473, + 0.402 + ], + "angle": 0, + "content": "probability \\( p \\) of belonging to the text-specified region. Each vertex on the mesh is colored according to a probability-weighted blend between the highlighter color and a gray background color. The resulting highlighted mesh \\( M' \\) is rendered from multiple views, and we apply 2D augmentations to obtain a set of images. We supervise the network optimization by comparing the CLIP-embedded images to the CLIP embedding of the desired text." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.412, + 0.262, + 0.429 + ], + "angle": 0, + "content": "3.1. Neural Highlighter" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.436, + 0.47, + 0.619 + ], + "angle": 0, + "content": "Our neural highlighter is a neural field [44] mapping coordinates \\(\\mathbf{x} \\in \\mathbb{R}^3\\) to \\(p \\in [0,1]\\), where \\(p\\) is the probability that \\(\\mathbf{x}\\) belongs to the text-specified region. The neural highlighter is represented as a multi-layer perceptron (MLP) \\(\\mathcal{F}_{\\theta}\\) that takes an input vertex \\(v\\) in the form of a 3D coordinate \\(\\mathbf{x}_v = (x,y,z)\\) and predicts a highlight probability \\(p_v\\), \\(\\mathcal{F}_{\\theta}(\\mathbf{x}_v) = p_v\\). This formulation allows us to query the neural field to obtain meaningful highlight probabilities for any 3D point on (or near) the mesh surface. Thus, once optimized, the network weights conveniently transfer the localization to different meshes of the same object without requiring further optimization (Fig. 9)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.619, + 0.471, + 0.756 + ], + "angle": 0, + "content": "Representing our neural highlighter as an MLP produces contiguous localizations and reduces artifacts. MLPs have been shown to exhibit a spectral bias towards smooth solutions [32], especially on low-dimensional inputs such as 3D coordinates [38]. The bias towards low-frequency outputs encourages our 3D Highlighter to predict contiguous localizations with sharp boundaries and discourages noisy highlights (Fig. 7). For this reason, our approach does not utilize positional encoding. See supplemental material for" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.777, + 0.468, + 0.854 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.856, + 0.473, + 0.899 + ], + "angle": 0, + "content": "Figure 6. Viewpoint robustness. Our system produces consistent results even when using different primary viewpoints. Results for three different primary viewpoints for the target text 'necklace'." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.279, + 0.622, + 0.293 + ], + "angle": 0, + "content": "additional details." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.302, + 0.702, + 0.319 + ], + "angle": 0, + "content": "3.2. Mesh Color Blending" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.326, + 0.892, + 0.432 + ], + "angle": 0, + "content": "We leverage the per-point highlight probability to color the mesh in a continuous, differentiable manner, generating semantically meaningful renders for CLIP supervision. We use a probability-weighted blend, where each vertex color \\( C_v \\) is a linear combination of the highlight color \\( H \\) and gray color \\( G \\) weighted by the network-predicted highlight probability \\( C_v = p_v \\cdot H + (1 - p_v) \\cdot G \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.432, + 0.893, + 0.613 + ], + "angle": 0, + "content": "At the start of the optimization process, all vertex probabilities are initialized near 0.5 and thus the entire mesh is half-lighted. As the optimization progresses, vertices smoothly transition towards gray or highlighter color (based on the network predictions) such that vertices predicted to be highlighted adhere to the text-specified region. This formulation translates each step of the optimization to a colored mesh that is semantically meaningful to CLIP. Our method provides continuous gradients, in contrast to coloring vertices according to the argmax of the highlight probability. Our blending scheme results in a smoother optimization landscape and reduces highlight artifacts (Fig. 7)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.613, + 0.895, + 0.734 + ], + "angle": 0, + "content": "This formulation is also important for downstream applications that wish to use the localizations, e.g. editing and stylization. Predicting per-point highlight probabilities provides an explicit representation of the highlight region on the mesh surface. An alternative approach, optimizing the surface color directly, would only provide a visual result without explicit information about which vertices belong to the localization." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.742, + 0.722, + 0.759 + ], + "angle": 0, + "content": "3.3. Unsupervised Guidance" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.903 + ], + "angle": 0, + "content": "We guide our neural optimization using the joint vision-language embedding space of CLIP [31]. We formulate the desired highlight by describing the association between the input mesh [object] and target localization [region]. Specifically, we design our target text \\( T \\) to be: \"a gray [object] with highlighted [region].\" We render the highlighted geometry from multiple views using differentiable rendering [4]. At each optimization step, we randomly sample \\( n \\) views from a Gaussian distribution centered around a primary view. This" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20933" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "ensures that the underlying object is recognizable in the majority of views shown to CLIP." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.122, + 0.468, + 0.242 + ], + "angle": 0, + "content": "In a preliminary viewpoint prediction stage, we render \\(360^{\\circ}\\) views of the mesh and measure the CLIP similarity to the target text prompt. We select the primary view to be the render with the highest CLIP similarity. We found that there exist many possible viewpoints which produce desirable highlighter results (see Fig. 6). More details about how the primary view is selected can be found in the supplemental material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.245, + 0.468, + 0.348 + ], + "angle": 0, + "content": "For each view \\(\\psi\\), we render a 2D image \\(I_{\\psi}\\) and apply a random perspective 2D augmentation \\(\\phi\\), as done in previous works [9, 25]. We then encode each of the augmented images into the CLIP embedding space (in \\(\\mathbb{R}^{768}\\)) using CLIP's image encoder, denoted as \\(E_I\\). Our final aggregate image representation \\(\\mathsf{e}_I\\) is the average CLIP encoding over all views:" + }, + { + "type": "equation", + "bbox": [ + 0.166, + 0.35, + 0.469, + 0.386 + ], + "angle": 0, + "content": "\\[\n\\mathsf {e} _ {I} = \\frac {1}{n} \\sum_ {\\psi} E _ {I} \\left(\\phi \\left(I _ {\\psi}\\right)\\right) \\in \\mathbb {R} ^ {7 6 8}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.395, + 0.469, + 0.485 + ], + "angle": 0, + "content": "Similarly, we encode the target selection text \\(T\\) with CLIP's text encoder \\(E_{T}\\) to get the encoded target representation \\(\\mathsf{e}_T = E_T(T)\\in \\mathbb{R}^{768}\\). Our loss \\(\\mathcal{L}\\) for optimizing the neural highlighter parameters \\(\\theta\\) is formulated as the negative cosine similarity between the aggregate image embedding and the text embedding:" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.498, + 0.469, + 0.528 + ], + "angle": 0, + "content": "\\[\n\\underset {\\theta} {\\operatorname {a r g m i n}} \\mathcal {L} (\\theta) = - \\frac {\\mathrm {e} _ {I} \\cdot \\mathrm {e} _ {T}}{\\left| \\mathrm {e} _ {I} \\right| \\cdot \\left| \\mathrm {e} _ {T} \\right|}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.54, + 0.469, + 0.6 + ], + "angle": 0, + "content": "When the loss is minimized, the CLIP embedding of the rendered highlighted mesh becomes similar to the target text embedding. Thus, the localized region will reflect the target text region." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.617, + 0.209, + 0.634 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.643, + 0.469, + 0.853 + ], + "angle": 0, + "content": "In this section we examine various capabilities of 3D Highlighter. First, we demonstrate the fidelity of our highlighter localization in Sec. 4.1, including qualitative and quantitative evaluations. As far as we can ascertain, our method is the first technique to perform text-driven localization on 3D shapes without pre-training on 3D data. Thus, we adapt an existing language-guided segmentation technique for 2D images to serve as a baseline [21]. Moreover, we demonstrate the robustness of 3D Highlighter in Sec. 4.2. Then we explore several applications of our method in Sec. 4.3, such as selective editing, localized manipulation, and segmentation. Finally, in Sec. 4.4 we evaluate the influence of key components of 3D Highlighter and discuss its limitations in Sec. 4.5." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "We apply our method to a large variety of meshes from different sources: COSEG [41], Turbo Squid [40], Thingi10K [48], Toys4k [34], ModelNet [43], and" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.197 + ], + "angle": 0, + "content": "ShapeNet [3]. 3D Highlighter does not impose any restrictions on the mesh quality; many of the meshes used contain artifacts, such as elements that are non-manifold, unoriented, and contain boundaries or self-intersections. Our PyTorch [29] implementation optimization takes around 5 minutes to run on an Nvidia A40 GPU. In our experiments, we used CLIP ViT-L/14 at \\(224 \\times 224\\) resolution." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.208, + 0.854, + 0.224 + ], + "angle": 0, + "content": "4.1. Generality and Fidelity of 3D Highlighter" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.231, + 0.892, + 0.428 + ], + "angle": 0, + "content": "Highlight generality. 3D Highlighter is not restricted to any particular category for either the input mesh or the text-specified localization, since it does not rely on a 3D dataset or 3D pre-training. In Fig. 2, we see our method achieves accurate localization for a diverse collection of meshes from various domains such as humanoids, animals, and manufactured objects. 3D Highlighter is capable of localizing a wide variety of diverse attributes even when the context of these target attributes is entirely unrelated to the input mesh. Moreover, 3D Highlighter demonstrates that it can perform hallucinated highlighting, where it selects regions on meshes with no underlying geometric signal (such as a bow tie on a camel or a hat on a pig)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.432, + 0.892, + 0.568 + ], + "angle": 0, + "content": "Highlight specificity. In Fig. 3, we observe that semantic differences are reflected in the network-predicted highlight. 3D Highlighter is able to successfully localize different text-specified regions on the same mesh. Our framework demonstrates the nuanced understanding required to disambiguate different target regions, such as headphones and hat on the rabbit. Finally, the ability to identify many different regions on a single mesh allows users intuitive, comprehensive, and fine-grained control over part localization." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.573, + 0.891, + 0.648 + ], + "angle": 0, + "content": "Quantitative evaluation. 3D Highlighter is the first system to select semantic regions on 3D shapes using text guidance, without any 3D datasets. Since there are no quantitative benchmarks to evaluate the quality of our highlights, we do so with a perceptual user study." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.649, + 0.892, + 0.679 + ], + "angle": 0, + "content": "Moreover, since there are no existing approaches for text-based segmentation in 3D, we create two baselines by" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.703, + 0.597, + 0.769 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.521, + 0.77, + 0.562, + 0.795 + ], + "angle": 0, + "content": "full 0.332" + }, + { + "type": "image", + "bbox": [ + 0.602, + 0.703, + 0.692, + 0.769 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.617, + 0.77, + 0.658, + 0.795 + ], + "angle": 0, + "content": "direct 0.319" + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.703, + 0.79, + 0.769 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.706, + 0.77, + 0.767, + 0.795 + ], + "angle": 0, + "content": "no blend 0.297" + }, + { + "type": "image", + "bbox": [ + 0.795, + 0.703, + 0.887, + 0.769 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.812, + 0.77, + 0.865, + 0.795 + ], + "angle": 0, + "content": "no augs 0.287" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.802, + 0.892, + 0.871 + ], + "angle": 0, + "content": "Figure 7. Ablation experiments. We present ablation results for target text 'shoes' using our system (full), direct optimization (direct), without probability-weighted blending (no blend), and without 2D augmentations (no augs). Resulting CLIP scores shown below each image." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20934" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.09, + 0.089, + 0.46, + 0.134 + ], + "angle": 0, + "content": "
MethodControlLSegText2LIVEOurs
Average Score ↑1.001.262.234.38
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.144, + 0.47, + 0.187 + ], + "angle": 0, + "content": "Table 1. Perceptual study. We extend two image-based approaches LSeg [21] (segmentation) and Text2LIVE [2] (localized editing) to the highlighting task and report mean user rating." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.226, + 0.469, + 0.392 + ], + "angle": 0, + "content": "extending two different 2D image-based approaches. The first baseline extends LSeg [21] which directly predicts a segmentation in 2D, while the second baseline extends Text2LIVE [2] which infers an edit mask for 2D image manipulation. To evaluate these baselines, we render a bare mesh from a view where the target localization region is clearly visible. We extract the 2D segmentation produced by the image baselines and use it to color the rendered image. Then we ask users to rate the highlight quality of both baselines and our 3D Highlighter result rendered from the same view in our perceptual study." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.394, + 0.469, + 0.514 + ], + "angle": 0, + "content": "Our perceptual study reports quantitative results on the quality of highlights from both 3D Highlighter and baselines. Users were asked to rate each result from 1-5 on how effectively the highlight represents \"an [object] with a region corresponding to a [region] highlighted.\" Visual examples from our study are shown in the supplemental material (Fig. 21). In total, 33 users evaluated each method on 5 mesh and region combinations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.515, + 0.47, + 0.742 + ], + "angle": 0, + "content": "Our 3D Highlighter achieved the highest ratings compared to the baselines (Tab. 1). LSeg is built for text-driven semantic segmentation and excels at segmenting entire objects within a scene. However, LSeg struggles to identify parts within a single object, leading to subpar performance on our highlighting task. Text2LIVE was not explicitly built for segmentation, however it does rely on inferring a continuously-valued edit mask (i.e. a soft-segmentation) when performing localized image editing. The edit mask is designed to produce high-quality image manipulations; however, it is not directly suitable for identifying the sharp segmentation boundaries required for our highlighting task. Qualitative comparisons and an additional quantitative comparison using a modified CLIP R-Precision metric are discussed in the supplemental material." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.752, + 0.34, + 0.768 + ], + "angle": 0, + "content": "4.2. Robustness of 3D Highlighter" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.781, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Localization transfer. An important benefit of formulating 3D Highlighter as a neural field optimization is the ability to trivially transfer localization results between different meshings. This ability is useful for many tasks in geometry processing which require an object to be re-triangulated, simplified, subdivided, or otherwise remeshed. Localization transfer is possible since our neural highlighter is represented as a field over the shape and is independent of any" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.086, + 0.895, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.238, + 0.892, + 0.306 + ], + "angle": 0, + "content": "Figure 8. Controlled stylization. Given three different stylizations of the same object, we use 3D Highlighter to select different regions and combine them together (Ours). Attempting to achieve this composition with a holistic approach leads to an undesirable result (Text2Mesh [25])." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.333, + 0.892, + 0.393 + ], + "angle": 0, + "content": "specific meshing. Although the neural highlighter is trained on mesh vertices, the resulting network encodes a smooth field and produces meaningful outputs for any 3D point on (or near) the mesh surface." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.394, + 0.893, + 0.531 + ], + "angle": 0, + "content": "In Fig. 9, we show an optimization of the 3D Highlighter on a single mesh triangulation (original) for the prompt 'shoes'. We then apply the already-optimized neural highlighter to remeshed (middle) and subdivided (right) versions of the original mesh, showing the transferability of the selected region to different triangulations. This result demonstrates how 3D Highlighter is independent of the input mesh and that, once we have a localization for one mesh, we can trivially transfer it to any other meshing of the same object." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.534, + 0.894, + 0.67 + ], + "angle": 0, + "content": "Viewpoint robustness. Our method is robust to the primary view choice. This property is important for our localization task, as we may not know a priori which view is ideal. In Fig. 6, we perform our optimization using three different primary viewpoints: \\(0^{\\circ}\\), \\(90^{\\circ}\\), and \\(-90^{\\circ}\\) (viewpoints shown in blue). We then present predicted localizations, showing that for all three views, 3D Highlighter is able to accurately identify the target localization region, regardless of whether that region is visible from the primary view." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.67, + 0.892, + 0.715 + ], + "angle": 0, + "content": "From the \\(-90^{\\circ}\\) primary view, the target region (the neck) is not visible. However, is still visible with a low probability for views sampled from the Gaussian distribution" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.737, + 0.89, + 0.841 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.842, + 0.892, + 0.897 + ], + "angle": 0, + "content": "Figure 9. Localization transfer. We optimize our neural highlighter on one mesh (original) for the prompt 'shoes'. Once optimized, the network weights transfer the localization to different meshings of the same object (remeshed and subdivided)." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "20935" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.213 + ], + "angle": 0, + "content": "around the primary view. This means that over the course of optimization, regions other than the neck are mostly seen while the target region is rarely visible. Nonetheless, our method manages to highlight the desired region, which implies its robustness to how frequently the target region for localization is seen. Furthermore, it shows that oversampling views where the target region is not visible does not negatively influence the optimization." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.221, + 0.35, + 0.238 + ], + "angle": 0, + "content": "4.3. Applications of 3D Highlighter" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.244, + 0.47, + 0.424 + ], + "angle": 0, + "content": "Selective editing. In Fig. 4, we show that it is possible to use 3D Highlighter to selectively edit a 3D object within a semantic region. This is applicable to techniques which incorporate global texture or material properties over the entire shape, such as in Text2Mesh [25] or MatCap [39]. Starting with different bare input meshes, we edit the entire shape using a global stylization technique [25]. Then, we use 3D Highlighter to select a text-specified region and incorporate the modifications only in the selected area. Thus 3D Highlighter provides direct control over where to stylize shapes, enabling users to obtain localized stylizations based on semantic cues." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.429, + 0.47, + 0.655 + ], + "angle": 0, + "content": "Controlled stylization via composition. Achieving compositionality with language models is a challenging task [33]. For example, starting with a human mesh and using Text2Mesh [25] to stylize 'Iron Man with the head of Steve Jobs and Yeti legs', leads to muddled and undesirable results (Fig. 8, rightmost). Our method enables compositionality between different shape modifications by chaining simple concepts together (Fig. 8). Specifically, we decompose the desired modification into three separate attainable targets ('Iron Man', 'Steve Jobs', and 'Yeti'), which we stylize individually with Text2Mesh. We then utilize our 3D Highlighter to localize the text-specified regions. We achieve the desired composition by combining the highlighted regions together, obtaining clear boundaries between stylizations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.658, + 0.469, + 0.735 + ], + "angle": 0, + "content": "Semantic segmentation. In Fig. 10, we show that our technique is not restricted to hallucinated highlighting and is capable of localizing semantically-specified geometric regions. These text-driven localizations identify unique geometric parts without utilizing any 3D datasets or part labels." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.742, + 0.35, + 0.759 + ], + "angle": 0, + "content": "4.4. Components of 3D Highlighter" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Ablation study. Several components are key for facilitating 3D Highlighter. We provide ablation results in Fig. 7 to demonstrate the effect of our design choices. First, using a direct optimization of the vertex color (direct) instead of optimizing a neural field results in splotchy highlight artifacts. Since the neural field has a spectral bias towards smooth solutions [32], omitting it leads to an undesired noisy output. Second, removing the probability weighted blending (no blend) and instead coloring vertices using only" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.092, + 0.619, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.558, + 0.215, + 0.59, + 0.227 + ], + "angle": 0, + "content": "Arm" + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.092, + 0.701, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.65, + 0.214, + 0.686, + 0.227 + ], + "angle": 0, + "content": "Slide" + }, + { + "type": "image", + "bbox": [ + 0.709, + 0.153, + 0.868, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.765, + 0.214, + 0.828, + 0.228 + ], + "angle": 0, + "content": "Propeller" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.229, + 0.892, + 0.269 + ], + "angle": 0, + "content": "Figure 10. Semantic Segmentation. 3D Highlighter produces semantic segmentations for unique geometric parts without any 3D dataset or annotations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.298, + 0.892, + 0.435 + ], + "angle": 0, + "content": "two distinct values also produces a noisy highlight pattern. Without a continuous color blend, the gradients become ill-conditioned and unstable, leading to highlight artifacts and irregular localization boundaries. Lastly, similar to previous works [9, 25], we observe that without 2D perspective augmentations (no augs), 3D Highlighter outputs degenerate solutions. The ablation study emphasizes the importance of our key design choices in 3D Highlighter for its ability to highlight a coherent and localized region on the input shape." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.438, + 0.892, + 0.574 + ], + "angle": 0, + "content": "Prompt formulation and CLIP understanding. Our prompt formulation combined with our coloring scheme results in the correct association between objects and their properties, a known challenge when using CLIP [33]. In Fig. 12, we analyze the CLIP score for two different prompts: 'gray chair with highlighted back' (left) and 'blue chair with red back' (right). For each prompt, we measure the CLIP similarity to renders of both the correct assignment and flipped assignment." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.575, + 0.892, + 0.696 + ], + "angle": 0, + "content": "We observe that our prompt formulation ('gray chair with highlighted back') results in a higher average CLIP score for the correct assignment. In contrast, when specifying colors in the prompt ('blue chair with red back') and styling the mesh accordingly, we see higher CLIP scores for the flipped association. Using the same gray and yellow renders (left), we also compare to a prompt specifying colors ('gray chair with yellow back') and find that the higher" + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.716, + 0.852, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.828, + 0.892, + 0.897 + ], + "angle": 0, + "content": "Figure 11. Network initialization. We optimize 3D Highlighter for the text prompt 'belt' using different initialization methods: using a default initialization where all output probabilities are near 0.5 (middle) or altering the final layer so that all outputs are 0 (left) or 1 (right). Initializing with 0 or 1 leads to an undesirable result." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20936" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.071, + 0.43, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.213, + 0.473, + 0.312 + ], + "angle": 0, + "content": "Figure 12. CLIP understanding. We examine CLIP similarity scores for several prompt formulations targeting the 'back' of the chair while using the correct color assignment and where the coloring is flipped. For the prompt 'gray chair with highlighted back' (left) we observe that the CLIP score is higher for the correct assignment. For the prompt 'blue chair with red back' (right) the CLIP score is higher for the flipped (incorrect) assignment." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.336, + 0.47, + 0.366 + ], + "angle": 0, + "content": "CLIP score corresponds to the flipped selection (data not shown)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.367, + 0.471, + 0.488 + ], + "angle": 0, + "content": "We also measure the CLIP scores for our standard prompt formulation: 'gray chair with highlighted back', replacing the yellow color in the rendering with other colors, such as red and blue, and find that the correct selection has a higher CLIP score (data not shown). To conclude, our prompt formulation (i.e., the use of the term 'highlighted') coincides with CLIP's understanding and 3D Highlighter is robust to the highlight color." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.491, + 0.47, + 0.642 + ], + "angle": 0, + "content": "Network initialization. Initializing the network such that the object is partially highlighted (i.e., with highlight probability equal to 0.5) is important for obtaining desirable results. In Fig. 11, we show the optimization of our method for the target text prompt 'belt' using three different initializations. Our method (middle) initializes all output probabilities near 0.5 by random weight initialization of the network. We compare to initializing the output probabilities to 0 (left) or 1 (right), in which we set the weights of the last layer to 0, and the bias to 0 or 1, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.643, + 0.471, + 0.718 + ], + "angle": 0, + "content": "For the initialization to both 0.5 and 1, a highlight color is uniformly present on the styled mesh, whereas with 0, the mesh is gray with no highlight. Consequently, we hypothesize that the presence of highlight color at initialization is important for CLIP's supervision." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.727, + 0.205, + 0.743 + ], + "angle": 0, + "content": "4.5. Limitations" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.886 + ], + "angle": 0, + "content": "3D Highlighter is robust to variations of the object specification in the target prompt. However, there should still be a logical connection between the 3D shape and its description. Fig. 13 shows results for a camel mesh and the target highlight 'shinguards'. For each optimization, we use a slightly different target prompt by varying the object specification. The prompts are of the form \"[object] with highlighted shinguards\", where [object] is replaced with camel, pig, animal, or chair." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.471, + 0.902 + ], + "angle": 0, + "content": "In Fig. 13, we observe that with object specifications" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.244 + ], + "angle": 0, + "content": "that resemble the geometry of camel, such as pig and animal, 3D Highlighter accurately localizes the desired region. However, for a description that is incompatible with the object's geometry (i.e., referring to a camel as a chair), our method does not produce meaningful results. This result sheds light on 3D Highlighter's robustness to text descriptions: 3D Highlighter is able to reason about a mesh even when its description is not perfectly accurate, provided that it is sufficiently similar to the true description (i.e., referring to a camel mesh as a pig)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.271, + 0.628, + 0.287 + ], + "angle": 0, + "content": "5. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.297, + 0.892, + 0.432 + ], + "angle": 0, + "content": "We present a technique for highlighting semantic regions on meshes using text as input, without any 3D datasets or 3D pre-training. 3D Highlighter can reason about where to place a non-obviously related part on a 3D object (i.e. a hat on a candle). The ability to combine unconnected parts and objects together is reminiscent of ideas from image analogies [12, 22]. In this work, we show that we can identify part-concepts that are geometrically absent from a shape, giving rise to our hallucinated highlighting capability." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.433, + 0.893, + 0.553 + ], + "angle": 0, + "content": "During neural optimization, our neural network infers a probability which we use to blend the highlight color onto the mesh. The network-predicted probabilities are general, and provide a soft-segmentation which we show can be used for a variety of different applications (Figs. 4 and 8). In the future, we are interested in extending our framework to obtain part correspondence between shapes that differ topologically but are semantically related." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.566, + 0.681, + 0.584 + ], + "angle": 0, + "content": "6. Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.591, + 0.894, + 0.682 + ], + "angle": 0, + "content": "We thank the University of Chicago for providing the AI cluster resources, services, and the professional support of the technical staff. This work was also supported in part by gifts from Adobe Research. Finally, we would like to thank Richard Liu, Avery Zhou, and the members of 3DL for their thorough and insightful feedback on our work." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.701, + 0.892, + 0.811 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.814, + 0.895, + 0.898 + ], + "angle": 0, + "content": "Figure 13. Prompt generality. Our system is robust to certain variations in object specifications. We achieve desirable results for the text input 'camel with highlighted shinguards' (left), as well as for other variations ('pig' and 'animal'). If the object specification, such as 'chair', is incompatible with the input geometry, 3D Highlighter no longer produces meaningful results." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20937" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Shmuel Asafi, Avi Goren, and Daniel Cohen-Or. Weak convex decomposition by lines-of-sight. Computer graphics forum, 32(5):23-31, 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.472, + 0.213 + ], + "angle": 0, + "content": "[2] Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. arXiv preprint arXiv:2204.02491, 2022. 3, 6, 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.217, + 0.47, + 0.285 + ], + "angle": 0, + "content": "[3] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.287, + 0.47, + 0.356 + ], + "angle": 0, + "content": "[4] Wenzheng Chen, Huan Ling, Jun Gao, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Advances in Neural Information Processing Systems, 32, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.359, + 0.47, + 0.426 + ], + "angle": 0, + "content": "[5] Zhiqin Chen, Kangxue Yin, Matthew Fisher, Siddhartha Chaudhuri, and Hao Zhang. Bae-net: Branched autoencoder for shape co-segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8490-8499, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.429, + 0.47, + 0.484 + ], + "angle": 0, + "content": "[6] Nicu D Cornea, Deborah Silver, and Patrick Min. **Curve-skeleton properties, applications, and algorithms. IEEE Transactions on visualization and computer graphics*, 13(3):530, 2007. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.487, + 0.47, + 0.556 + ], + "angle": 0, + "content": "[7] Boyang Deng, Kyle Genova, Soroosh Yazdani, Sofien Bouaziz, Geoffrey Hinton, and Andrea Tagliasacchi. Cvxnet: Learnable convex decomposition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 31-44, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.558, + 0.469, + 0.599 + ], + "angle": 0, + "content": "[8] Tamal K Dey and Wulue Zhao. Approximating the medial axis from the voronoi diagram with a convergence guarantee. Algorithmica, 38(1):179-200, 2004. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.601, + 0.469, + 0.642 + ], + "angle": 0, + "content": "[9] Kevin Frans, Lisa B. Soros, and Olaf Witkowski. Clipdraw: Exploring text-to-drawing synthesis through language-image encoders. ArXiv, abs/2106.14843, 2021. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.644, + 0.469, + 0.697 + ], + "angle": 0, + "content": "[10] Rao Fu, Xiao Zhan, Yiwen Chen, Daniel Ritchie, and Srinath Sridhar. Shapecrafter: A recursive text-conditioned 3d shape generation model. arXiv preprint arXiv:2207.09446, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.701, + 0.469, + 0.756 + ], + "angle": 0, + "content": "[11] Rana Hanocka, Amir Hertz, Noa Fish, Raja Giryes, Shachar Fleishman, and Daniel Cohen-Or. MeshCNN: A network with an edge. ACM Transactions on Graphics (TOG), 38(4):90:1–90:12, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.759, + 0.469, + 0.814 + ], + "angle": 0, + "content": "[12] Aaron Hertzmann, Charles E Jacobs, Nuria Oliver, Brian Curless, and David H Salesin. Image analogies. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 327-340, 2001. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[13] Donald D Hoffman and Whitman A Richards. Parts of recognition. Cognition, 18(1-3):65-96, 1984. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[14] Yining Hong, Yilun Du, Chunru Lin, Josh Tenenbaum, and Chuang Gan. 3d concept grounding on neural fields. In Annual Conference on Neural Information Processing Systems, 2022. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.472, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[15] Shi-Min Hu, Zheng-Ning Liu, Meng-Hao Guo, Junxiong Cai, Jiahui Huang, Tai-Jiang Mu, and Ralph R. Martin. Subdivision-based mesh convolution networks. ACM Trans. Graph., 41(3):25:1-25:16, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[16] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 3, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.222, + 0.892, + 0.277 + ], + "angle": 0, + "content": "[17] Oliver Van Kaick, Noa Fish, Yanir Kleiman, Shmuel Asafi, and Daniel Cohen-Or. Shape segmentation by approximate convexity analysis. ACM Transactions on Graphics (TOG), 34(1):1–11, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.28, + 0.892, + 0.335 + ], + "angle": 0, + "content": "[18] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Popa Tiberiu. Clip-mesh: Generating textured meshes from text using pretrained image-text models. SIGGRAPH Asia 2022 Conference Papers, December 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.338, + 0.892, + 0.379 + ], + "angle": 0, + "content": "[19] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nef for editing via feature field distillation. arXiv, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.382, + 0.892, + 0.422 + ], + "angle": 0, + "content": "[20] Alon Lahav and Ayellet Tal. Meshwalker: Deep mesh understanding by random walks. ACM Transactions on Graphics (TOG), 39(6):1-13, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.426, + 0.892, + 0.481 + ], + "angle": 0, + "content": "[21] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and Rene Ranftl. Language-driven semantic segmentation. In International Conference on Learning Representations, 2022. 3, 5, 6, 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.892, + 0.525 + ], + "angle": 0, + "content": "[22] Jing Liao, Yuan Yao, Lu Yuan, Gang Hua, and Sing Bing Kang. Visual attribute transfer through deep image analogy. arXiv preprint arXiv:1705.01088, 2017. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.528, + 0.892, + 0.582 + ], + "angle": 0, + "content": "[23] Jyh-Ming Lien and Nancy M Amato. Approximate convex decomposition of polyhedra. In Proceedings of the 2007 ACM symposium on Solid and physical modeling, pages 121-131, 2007. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.586, + 0.892, + 0.641 + ], + "angle": 0, + "content": "[24] Haggai Maron, Meirav Galun, Noam Aigerman, Miri Trope, Nadav Dym, Ersin Yumer, Vladimir G Kim, and Yaron Lipman. Convolutional neural networks on surfaces via seamless toric covers. ACM Trans. Graph., 36(4):71-1, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.644, + 0.892, + 0.712 + ], + "angle": 0, + "content": "[25] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13492-13502, 2022. 3, 5, 6, 7, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.716, + 0.892, + 0.77 + ], + "angle": 0, + "content": "[26] Francesco Milano, Antonio Loquercio, Antoni Rosinol, Davide Scaramuzzi, and Luca Carlone. Primal-dual mesh convolutional neural networks. Advances in Neural Information Processing Systems, 33:952-963, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.773, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[27] Kaichun Mo, Shilin Zhu, Angel X. Chang, Li Yi, Subarna Tripathi, Leonidas J. Guibas, and Hao Su. Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.832, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[28] Dong Huk Park, Samaneh Azadi, Xihui Liu, Trevor Darrell, and Anna Rohrbach. Benchmark for compositional text-to-image synthesis. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 12" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20938" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[29] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.468, + 0.188 + ], + "angle": 0, + "content": "[30] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.19, + 0.468, + 0.259 + ], + "angle": 0, + "content": "[31] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. arXiv preprint arXiv:2103.00020, 2021. 1, 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.261, + 0.468, + 0.369 + ], + "angle": 0, + "content": "[32] Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 5301-5310. PMLR, 09-15 Jun 2019. 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.371, + 0.468, + 0.424 + ], + "angle": 0, + "content": "[33] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022.7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.427, + 0.468, + 0.468 + ], + "angle": 0, + "content": "[34] James Matthew Rehg. Toys4k 3d object dataset, 2022. https://github.com/rehg-lab/lowshot-shapebias/tree/main/toys4k.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.469, + 0.468, + 0.496 + ], + "angle": 0, + "content": "[35] Ariel Shamir. A survey on mesh segmentation techniques. Computer graphics forum, 27(6):1539-1556, 2008. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.498, + 0.468, + 0.551 + ], + "angle": 0, + "content": "[36] Nicholas Sharp, Souhaib Attaiki, Keenan Crane, and Maks Ovsjanikov. Diffusionnet: Discretization agnostic learning on surfaces. ACM Transactions on Graphics (TOG), 41(3):1-16, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.553, + 0.468, + 0.662 + ], + "angle": 0, + "content": "[37] Weiwei Sun, Andrea Tagliasacchi, Boyang Deng, Sara Sabour, Soroosh Yazdani, Geoffrey E Hinton, and Kwang Moo Yi. Canonical capsules: Self-supervised capsules in canonical pose. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 24993-25005. Curran Associates, Inc., 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.664, + 0.468, + 0.732 + ], + "angle": 0, + "content": "[38] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020. 4, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.734, + 0.468, + 0.774 + ], + "angle": 0, + "content": "[39] Hideki Todo, Ken Anjyo, and Shun'Ichi Yokoyama. Litsphere extension for artistic rendering. Vis. Comput., 29(6-8):473-480, jun 2013. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.776, + 0.468, + 0.803 + ], + "angle": 0, + "content": "[40] TurboSquid. Turbosquid 3d model repository, 2021. https://www.turbosquid.com/.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.805, + 0.468, + 0.858 + ], + "angle": 0, + "content": "[41] Oliver van Kaick, Andrea Tagliasacchi, Oana Sidi, Hao Zhang, Daniel Cohen-Or, Lior Wolf, and Ghassan Hamarneh. Prior knowledge for part correspondence. Computer Graphics Forum, 30(2):553–562, 2011. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.86, + 0.468, + 0.9 + ], + "angle": 0, + "content": "[42] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In Proceedings of the" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3835-3844, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[43] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.193, + 0.892, + 0.26 + ], + "angle": 0, + "content": "[44] Yiheng Xie, Towaki Takikawa, Shunsuke Saito, Or Litany, Shiqin Yan, Numair Khan, Federico Tombari, James Tompkin, Vincent Sitzmann, and Srinath Sridhar. Neural fields in visual computing and beyond. Computer Graphics Forum, 2022. 1, 4, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.263, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[45] Li Yi, Hao Su, Xingwen Guo, and Leonidas J Guibas. Syncspeccnn: Synchronized spectral cnn for 3d shape segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2282-2290, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.32, + 0.892, + 0.387 + ], + "angle": 0, + "content": "[46] Fenggen Yu, Kun Liu, Yan Zhang, Chenyang Zhu, and Kai Xu. Partnet: A recursive part decomposition network for fine-grained and hierarchical shape segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9491-9500, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.39, + 0.892, + 0.444 + ], + "angle": 0, + "content": "[47] Qian Zheng, Zhuming Hao, Hui Huang, Kai Xu, Hao Zhang, Daniel Cohen-Or, and Baoquan Chen. Skeleton-intrinsic symmetrization of shapes. Computer Graphics Forum, 34(2):275-286, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.446, + 0.892, + 0.487 + ], + "angle": 0, + "content": "[48] Qingnan Zhou and Alec Jacobson. Thingi10k: A dataset of 10,000 3d-printing models. arXiv preprint arXiv:1605.04797, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.489, + 0.892, + 0.558 + ], + "angle": 0, + "content": "[49] Chenyang Zhu, Kai Xu, Siddhartha Chaudhuri, Li Yi, Leonidas J Guibas, and Hao Zhang. Adacoseg: Adaptive shape co-segmentation with group consistency loss. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8543-8552, 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.558 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20939" + } + ] +] \ No newline at end of file diff --git a/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_origin.pdf b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..81299c17dccb1050ec895be1149d392b00601f2f --- /dev/null +++ b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/40cb675d-902c-46da-982e-90a4332ad0f2_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fe99f4c309ed8eedc3f06c41a5b474bf3c3ca7f476e76730d5692fab5345b3f +size 5936579 diff --git a/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/full.md b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d88722985d2916a924dfa0dbb58d96e3b7a06c97 --- /dev/null +++ b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/full.md @@ -0,0 +1,352 @@ +# 3D Highlighter: Localizing Regions on 3D Shapes via Text Descriptions + +Dale Decatur + +University of Chicago + +ddecatur@uchicago.edu + +Itai Lang + +University of Chicago + +itailang@uchicago.edu + +Rana Hanocka + +University of Chicago + +ranahanocka@uchicago.edu + +![](images/3ce3a39f1eb0d64ad65575fb6db76724d3770833ec5661ef4d5b62770f653931.jpg) +Hat + +![](images/60c4879aeb4ae28ea5aa5ff682daf21552ad4ac22b576e77b4c48bb58976f923.jpg) +Necklace + +![](images/fe90a136ea0f9260c9f288ed71d72975217f89971a6a4916f93a99516821e195.jpg) +Headlights +Figure 1. 3D Highlighter localizes semantic regions on a shape using text as input. Our technique reasons about where to place seemingly unrelated concepts in semantically meaningful locations on the 3D shape, such as a 'necklace' on a horse or 'shoes' on an alien. + +![](images/5fb07b1e0540704ea8d02f727270040e868b72855b1128eb69cfd2362028f4f0.jpg) +Shoes + +![](images/4ae578474bf040ed0bec23be64939a47d3ec22c8b57557e5f3c545daefabbbef.jpg) +Eyeglasses + +# Abstract + +We present 3D Highlighter, a technique for localizing semantic regions on a mesh using text as input. A key feature of our system is the ability to interpret "out-of-domain" localizations. Our system demonstrates the ability to reason about where to place non-obviously related concepts on an input 3D shape, such as adding clothing to a bare 3D animal model. Our method contextualizes the text description using a neural field and colors the corresponding region of the shape using a probability-weighted blend. Our neural optimization is guided by a pre-trained CLIP encoder, which bypasses the need for any 3D datasets or 3D annotations. Thus, 3D Highlighter is highly flexible, general, and capable of producing localizations on a myriad of input shapes. Our code is publicly available at https://github.com/threedle/3DHighlighter. + +# 1. Introduction + +Semantic localization of regions on 3D meshes is an important problem in computer graphics and vision with broad applications. One such application is the incorporation of semantic information into the 3D modeling process. A particularly challenging aspect of this task emerges when 3D geometric signals are insufficient for performing segmentation, e.g. where to add a shirt to a bare 3D human model. + +We propose 3D Highlighter, a method for automatically localizing fine-grained semantic regions on a shape based + +on only a text description. Our system contextualizes the text prompt and highlights the corresponding shape region using the network-predicted probabilities. Using only text, users are able to semantically identify regions on a shape. Our system takes meshes as input, making it compatible with 3D modeling workflows and tools. + +This highlighting task requires both object-level and part-level understanding. 3D Highlighter demonstrates the ability to reason about where to place seemingly unrelated concepts on the 3D shape, such as a hat on a candle (Fig. 1). Our system localizes attributes that are geometrically absent from a shape, which we refer to as hallucinated highlighting. Understanding a part's global shape context is challenging even when relying on salient geometric features [17,27], let alone without them. + +We optimize the weights of a neural network to produce probabilities that are used to color a given 3D shape in accordance with the specified text. We leverage a pre-trained vision-language model (CLIP [31]) to guide the neural optimization towards the text-specified region. This neural optimization formulation is flexible, bypassing the need for any 3D datasets, 3D annotations, or 3D pre-training. Our system is not bound to a specific set of classes, and, as shown in Fig. 2, is not limited to object parts defined by salient geometric features. + +We encode the part selection as a neural field [44] over the mesh surface. Our network learns to map each point on the surface to a probability of belonging to the text-specified region. We translate the inferred probabilities to a visual at- + +![](images/017b26d3608a89b48df7256a49f16654424c0c1d823dddfbe5754975975d1db4.jpg) +Figure 2. Hallucinated part highlighting. Our system is able to reason about where to highlight a geometrically-absent region on shapes. The resulting localizations demonstrate global understanding and localized part-awareness. + +tribute on the mesh surface, which can be rendered and visually understood. The network-predicted probabilities act as a soft-selection operator which blends the highlighter color onto the mesh. The network weights are updated by encouraging the CLIP [31] embedding of the 2D renders of the highlighted mesh to adhere to the specified text. As a result, the network implicitly learns to segment the object to adhere to the text prompt. + +We make several design choices that are key to the success of 3D Highlighter. Our network does not directly color the mesh. Rather, we predict a probability of being inside the text-specified highlight, which is used to blend colors on the mesh. The network is initialized such that points have roughly a $50\%$ probability of being highlighted, resulting in a mesh with albedo halfway between the highlight and background color. During optimization, the relative blend weight of the highlight color directly corresponds to the highlight probability. This blending enables the network to naturally and smoothly increase or decrease the segmenta + +tion probability in accordance with the text specification of the target region. + +In summary, we present a method for localizing semantic regions on 3D shapes. The localization is specified by a textual description, which is intuitive, flexible, and not limited to a specific training dataset. We demonstrate applications of our method to shape editing and stylization. Furthermore, our field formulation enables the 3D Highlighter to work with different mesh resolutions and triangulations. A key feature of our system is the ability to interpret out-of-domain localizations. For example, 3D Highlighter is able to figure out where to place a 'hat' on a candle as seen in Fig. 1, demonstrating the ability to reason about where to place seemingly unrelated concepts on the 3D shape. + +# 2. Related Work + +Geometry-driven segmentation. Traditional works in geometry processing use low-level geometric features (such as surface area, curvature, or geodesic distance) in or- + +![](images/9766ef4a11261cc1f15997de6dd888eb8713529b811146c3bb3ee2bfeb3e8788.jpg) +Headphones + +![](images/81cd7b51d2cfd8d78a71821d36fd79da22a5b7a19117eff2d1f513b1af286837.jpg) +Shoes + +![](images/294bd74ac95b194ac50b261400ea011e725354d47acbb368eeae197473ee4fbe.jpg) +Hat + +![](images/959b668781fd20b838201489cefa175f07a439dd1efd9bafbc25d1795cc64b51.jpg) +Shoes + +![](images/7c59cf4c4e5b68a607234db82cf9d6bcbab0a29c916e3e3e1d450e3ec7c0381d.jpg) +Necklace + +![](images/77c4c76920741c24fa86ab07bfca602d48b239ea9ec27eea54eea9565ea4204c.jpg) +Glasses + +![](images/2019c1bb121776727762325c858011eb3ff3e6de4ad7452ff2614f5f21bf286c.jpg) +Belt + +![](images/770a8424474d00430078d9ff43613e1f1d548b70b88925618a0696fb033d8dc8.jpg) +Hat + +![](images/57decd69583ee7eab9d4754d219932c4bfbc74046be0677bb3003a98f148289a.jpg) +Necklace + +![](images/78d80f0d2b6d1a2c6fcc7194230b72a5c46477d56b3699b6d6aa54ef4b3657cc.jpg) +Necklace +Figure 3. Our method is able to highlight different parts on the same object. For target selections that correspond to distinct regions, 3D Highlighter produces selections that are semantically meaningful and spatially separated without signal from underlying geometry. + +![](images/ad0b618d045f1d549053c56f8bda5ecb063249ae42b25d58f0f874c73ea53607.jpg) +Roof + +![](images/7c5d9feaaeb944ad53557f28a02665cc718c380495e4710928de625be0f015b7.jpg) +Arms + +der to infer high-level semantic attributes for segmenting shapes [35]. In particular, decomposing shapes into smaller parts or segments often corresponds with physical 3D semantic parts [13, 35]. One approach is to partition shapes based on convexity, or an approximation of convexity [1, 23]. The medial axis carries topological information, which may also be used as a guideline for segmentation [6,8,35,47]. + +The underlying assumption in these works is that processing the local geometry can be used to understand the semantics for segmentation. By contrast, a key aspect of our work is the ability to perform hallucinated highlights: segmentations that can not necessarily be inferred by geometry alone. See example highlights in Fig. 2 (e.g., localizing a heart on a goat). + +Data-driven segmentation. In the deep learning era, the 3D part segmentation task has been widely tackled by neural network models [11, 15, 20, 26, 36, 45]. Training such a model is typically done in a fully-supervised manner on a large dataset of shapes annotated with a given set of part classes. For example, MeshCNN [11] was trained on a + +human-body segmentation dataset [24] for learning semantic part segmentation. To alleviate the need for 3D annotations, unsupervised learning schemes utilize large collections of unlabelled data [5,7,14,37,49]. For example, Hong et al. [14] inferred part-segmentation through question answering on rendered images from PartNet [46]. + +In contrast to existing deep learning approaches for shape segmentation, we do not rely on any 3D dataset, nor are we bounded to a specific shape category or set of parts. Instead, we specify the desired localization using text and a pre-trained CLIP model which encompasses rich semantic object understanding. Thus, our 3D Highlighter is capable of localizing various semantic regions on a wide variety of 3D shapes. + +Text-guidance. Recent works have leveraged pre-trained vision-language embedding spaces, such as CLIP [31], for analysis, synthesis, and editing. Some techniques leverage pre-trained image encoders for achieving semantic segmentation in images and neural radiance fields [2, 19, 21]. Such techniques are capable of segmenting entire objects within a scene based on text, e.g., a chair inside a room. However, they may struggle to segment parts within an object; e.g., failing to distinguish a window (part) from a house (object) [21]. + +Our work is inspired by the emergent analysis in text-driven synthesis techniques for 3D data [10, 16, 18, 25, 30, 42]. Specifically, Text2Mesh [25] devised a framework for text-driven stylization of 3D meshes, observing that the resulting textures consider part-aware semantics. Yet, since Text2Mesh directly synthesizes stylizations, there is no obvious way to extract any underlying semantic analysis. To address this, we opt to use a highlighter color only as a means for visualizing the network-predicted segmentations. + +# 3. Method + +An illustration of our method is shown in Fig. 5. The inputs to our system are a mesh $M$ , represented by vertices $V \in \mathbb{R}^{n \times 3}$ and faces $F \in \{1, \dots, n\}^{m \times 3}$ , and a text description $T$ . Our neural network, referred to as neural highlighter, is optimized to map vertex positions $v \in V$ to a + +![](images/ea684b3ec72b0b19c97fc2fdb468f84d805f3b34d80640e79d6e262bad979e7b.jpg) +Figure 4. Localized editing. We incorporate textures and displacements to a region highlighted with 3D Highlighter. Used styles: Brick (left), Colorful Crochet (middle), Cactus (right). + +![](images/37f87f8d97108d41f0f6ba47ae01309f80e0de863dac84e8359d80d6fcc580bd.jpg) +Figure 5. Overview of 3D Highlighter. The Neural Highlighter maps each point on the input mesh to a probability. The mesh is colored using a probability-weighted blend and then rendered from multiple views. The neural highlighter weights are guided by the similarity between the CLIP embeddings of the 2D augmented images and the input text. + +probability $p$ of belonging to the text-specified region. Each vertex on the mesh is colored according to a probability-weighted blend between the highlighter color and a gray background color. The resulting highlighted mesh $M'$ is rendered from multiple views, and we apply 2D augmentations to obtain a set of images. We supervise the network optimization by comparing the CLIP-embedded images to the CLIP embedding of the desired text. + +# 3.1. Neural Highlighter + +Our neural highlighter is a neural field [44] mapping coordinates $\mathbf{x} \in \mathbb{R}^3$ to $p \in [0,1]$ , where $p$ is the probability that $\mathbf{x}$ belongs to the text-specified region. The neural highlighter is represented as a multi-layer perceptron (MLP) $\mathcal{F}_{\theta}$ that takes an input vertex $v$ in the form of a 3D coordinate $\mathbf{x}_v = (x,y,z)$ and predicts a highlight probability $p_v$ , $\mathcal{F}_{\theta}(\mathbf{x}_v) = p_v$ . This formulation allows us to query the neural field to obtain meaningful highlight probabilities for any 3D point on (or near) the mesh surface. Thus, once optimized, the network weights conveniently transfer the localization to different meshes of the same object without requiring further optimization (Fig. 9). + +Representing our neural highlighter as an MLP produces contiguous localizations and reduces artifacts. MLPs have been shown to exhibit a spectral bias towards smooth solutions [32], especially on low-dimensional inputs such as 3D coordinates [38]. The bias towards low-frequency outputs encourages our 3D Highlighter to predict contiguous localizations with sharp boundaries and discourages noisy highlights (Fig. 7). For this reason, our approach does not utilize positional encoding. See supplemental material for + +![](images/00d170565c319d089b1f02247d1441eae50c822102195bd45107ab7ca0b587ad.jpg) +Figure 6. Viewpoint robustness. Our system produces consistent results even when using different primary viewpoints. Results for three different primary viewpoints for the target text 'necklace'. + +additional details. + +# 3.2. Mesh Color Blending + +We leverage the per-point highlight probability to color the mesh in a continuous, differentiable manner, generating semantically meaningful renders for CLIP supervision. We use a probability-weighted blend, where each vertex color $C_v$ is a linear combination of the highlight color $H$ and gray color $G$ weighted by the network-predicted highlight probability $C_v = p_v \cdot H + (1 - p_v) \cdot G$ . + +At the start of the optimization process, all vertex probabilities are initialized near 0.5 and thus the entire mesh is half-lighted. As the optimization progresses, vertices smoothly transition towards gray or highlighter color (based on the network predictions) such that vertices predicted to be highlighted adhere to the text-specified region. This formulation translates each step of the optimization to a colored mesh that is semantically meaningful to CLIP. Our method provides continuous gradients, in contrast to coloring vertices according to the argmax of the highlight probability. Our blending scheme results in a smoother optimization landscape and reduces highlight artifacts (Fig. 7). + +This formulation is also important for downstream applications that wish to use the localizations, e.g. editing and stylization. Predicting per-point highlight probabilities provides an explicit representation of the highlight region on the mesh surface. An alternative approach, optimizing the surface color directly, would only provide a visual result without explicit information about which vertices belong to the localization. + +# 3.3. Unsupervised Guidance + +We guide our neural optimization using the joint vision-language embedding space of CLIP [31]. We formulate the desired highlight by describing the association between the input mesh [object] and target localization [region]. Specifically, we design our target text $T$ to be: "a gray [object] with highlighted [region]." We render the highlighted geometry from multiple views using differentiable rendering [4]. At each optimization step, we randomly sample $n$ views from a Gaussian distribution centered around a primary view. This + +ensures that the underlying object is recognizable in the majority of views shown to CLIP. + +In a preliminary viewpoint prediction stage, we render $360^{\circ}$ views of the mesh and measure the CLIP similarity to the target text prompt. We select the primary view to be the render with the highest CLIP similarity. We found that there exist many possible viewpoints which produce desirable highlighter results (see Fig. 6). More details about how the primary view is selected can be found in the supplemental material. + +For each view $\psi$ , we render a 2D image $I_{\psi}$ and apply a random perspective 2D augmentation $\phi$ , as done in previous works [9, 25]. We then encode each of the augmented images into the CLIP embedding space (in $\mathbb{R}^{768}$ ) using CLIP's image encoder, denoted as $E_I$ . Our final aggregate image representation $\mathsf{e}_I$ is the average CLIP encoding over all views: + +$$ +\mathsf {e} _ {I} = \frac {1}{n} \sum_ {\psi} E _ {I} \left(\phi \left(I _ {\psi}\right)\right) \in \mathbb {R} ^ {7 6 8}. \tag {1} +$$ + +Similarly, we encode the target selection text $T$ with CLIP's text encoder $E_{T}$ to get the encoded target representation $\mathsf{e}_T = E_T(T)\in \mathbb{R}^{768}$ . Our loss $\mathcal{L}$ for optimizing the neural highlighter parameters $\theta$ is formulated as the negative cosine similarity between the aggregate image embedding and the text embedding: + +$$ +\underset {\theta} {\operatorname {a r g m i n}} \mathcal {L} (\theta) = - \frac {\mathrm {e} _ {I} \cdot \mathrm {e} _ {T}}{\left| \mathrm {e} _ {I} \right| \cdot \left| \mathrm {e} _ {T} \right|}. \tag {2} +$$ + +When the loss is minimized, the CLIP embedding of the rendered highlighted mesh becomes similar to the target text embedding. Thus, the localized region will reflect the target text region. + +# 4. Experiments + +In this section we examine various capabilities of 3D Highlighter. First, we demonstrate the fidelity of our highlighter localization in Sec. 4.1, including qualitative and quantitative evaluations. As far as we can ascertain, our method is the first technique to perform text-driven localization on 3D shapes without pre-training on 3D data. Thus, we adapt an existing language-guided segmentation technique for 2D images to serve as a baseline [21]. Moreover, we demonstrate the robustness of 3D Highlighter in Sec. 4.2. Then we explore several applications of our method in Sec. 4.3, such as selective editing, localized manipulation, and segmentation. Finally, in Sec. 4.4 we evaluate the influence of key components of 3D Highlighter and discuss its limitations in Sec. 4.5. + +We apply our method to a large variety of meshes from different sources: COSEG [41], Turbo Squid [40], Thingi10K [48], Toys4k [34], ModelNet [43], and + +ShapeNet [3]. 3D Highlighter does not impose any restrictions on the mesh quality; many of the meshes used contain artifacts, such as elements that are non-manifold, unoriented, and contain boundaries or self-intersections. Our PyTorch [29] implementation optimization takes around 5 minutes to run on an Nvidia A40 GPU. In our experiments, we used CLIP ViT-L/14 at $224 \times 224$ resolution. + +# 4.1. Generality and Fidelity of 3D Highlighter + +Highlight generality. 3D Highlighter is not restricted to any particular category for either the input mesh or the text-specified localization, since it does not rely on a 3D dataset or 3D pre-training. In Fig. 2, we see our method achieves accurate localization for a diverse collection of meshes from various domains such as humanoids, animals, and manufactured objects. 3D Highlighter is capable of localizing a wide variety of diverse attributes even when the context of these target attributes is entirely unrelated to the input mesh. Moreover, 3D Highlighter demonstrates that it can perform hallucinated highlighting, where it selects regions on meshes with no underlying geometric signal (such as a bow tie on a camel or a hat on a pig). + +Highlight specificity. In Fig. 3, we observe that semantic differences are reflected in the network-predicted highlight. 3D Highlighter is able to successfully localize different text-specified regions on the same mesh. Our framework demonstrates the nuanced understanding required to disambiguate different target regions, such as headphones and hat on the rabbit. Finally, the ability to identify many different regions on a single mesh allows users intuitive, comprehensive, and fine-grained control over part localization. + +Quantitative evaluation. 3D Highlighter is the first system to select semantic regions on 3D shapes using text guidance, without any 3D datasets. Since there are no quantitative benchmarks to evaluate the quality of our highlights, we do so with a perceptual user study. + +Moreover, since there are no existing approaches for text-based segmentation in 3D, we create two baselines by + +![](images/bc15b6e2fd97a1bca99a84cbc1cc4d816c22f4721d8c569ef7f947c5c65b0fdf.jpg) +full 0.332 +Figure 7. Ablation experiments. We present ablation results for target text 'shoes' using our system (full), direct optimization (direct), without probability-weighted blending (no blend), and without 2D augmentations (no augs). Resulting CLIP scores shown below each image. + +![](images/52a2f7096b5ab00c340d7d071fed787c78435618dc985fa5fa6b478c27ed7455.jpg) +direct 0.319 + +![](images/6b07bb4a37b67c5f9e25a7fb0f25dd1d8c1db0da75ff7c2ab87ecddb8d4b90de.jpg) +no blend 0.297 + +![](images/8670507fec27605e15a778c258920e876d7976893e58d1abc1a1f06678d85e08.jpg) +no augs 0.287 + +
MethodControlLSegText2LIVEOurs
Average Score ↑1.001.262.234.38
+ +Table 1. Perceptual study. We extend two image-based approaches LSeg [21] (segmentation) and Text2LIVE [2] (localized editing) to the highlighting task and report mean user rating. + +extending two different 2D image-based approaches. The first baseline extends LSeg [21] which directly predicts a segmentation in 2D, while the second baseline extends Text2LIVE [2] which infers an edit mask for 2D image manipulation. To evaluate these baselines, we render a bare mesh from a view where the target localization region is clearly visible. We extract the 2D segmentation produced by the image baselines and use it to color the rendered image. Then we ask users to rate the highlight quality of both baselines and our 3D Highlighter result rendered from the same view in our perceptual study. + +Our perceptual study reports quantitative results on the quality of highlights from both 3D Highlighter and baselines. Users were asked to rate each result from 1-5 on how effectively the highlight represents "an [object] with a region corresponding to a [region] highlighted." Visual examples from our study are shown in the supplemental material (Fig. 21). In total, 33 users evaluated each method on 5 mesh and region combinations. + +Our 3D Highlighter achieved the highest ratings compared to the baselines (Tab. 1). LSeg is built for text-driven semantic segmentation and excels at segmenting entire objects within a scene. However, LSeg struggles to identify parts within a single object, leading to subpar performance on our highlighting task. Text2LIVE was not explicitly built for segmentation, however it does rely on inferring a continuously-valued edit mask (i.e. a soft-segmentation) when performing localized image editing. The edit mask is designed to produce high-quality image manipulations; however, it is not directly suitable for identifying the sharp segmentation boundaries required for our highlighting task. Qualitative comparisons and an additional quantitative comparison using a modified CLIP R-Precision metric are discussed in the supplemental material. + +# 4.2. Robustness of 3D Highlighter + +Localization transfer. An important benefit of formulating 3D Highlighter as a neural field optimization is the ability to trivially transfer localization results between different meshings. This ability is useful for many tasks in geometry processing which require an object to be re-triangulated, simplified, subdivided, or otherwise remeshed. Localization transfer is possible since our neural highlighter is represented as a field over the shape and is independent of any + +![](images/966054bc4a85eb1b71c9edcf7fe929a75c3553c5be242fe48ac20cedf257a5fb.jpg) +Figure 8. Controlled stylization. Given three different stylizations of the same object, we use 3D Highlighter to select different regions and combine them together (Ours). Attempting to achieve this composition with a holistic approach leads to an undesirable result (Text2Mesh [25]). + +specific meshing. Although the neural highlighter is trained on mesh vertices, the resulting network encodes a smooth field and produces meaningful outputs for any 3D point on (or near) the mesh surface. + +In Fig. 9, we show an optimization of the 3D Highlighter on a single mesh triangulation (original) for the prompt 'shoes'. We then apply the already-optimized neural highlighter to remeshed (middle) and subdivided (right) versions of the original mesh, showing the transferability of the selected region to different triangulations. This result demonstrates how 3D Highlighter is independent of the input mesh and that, once we have a localization for one mesh, we can trivially transfer it to any other meshing of the same object. + +Viewpoint robustness. Our method is robust to the primary view choice. This property is important for our localization task, as we may not know a priori which view is ideal. In Fig. 6, we perform our optimization using three different primary viewpoints: $0^{\circ}$ , $90^{\circ}$ , and $-90^{\circ}$ (viewpoints shown in blue). We then present predicted localizations, showing that for all three views, 3D Highlighter is able to accurately identify the target localization region, regardless of whether that region is visible from the primary view. + +From the $-90^{\circ}$ primary view, the target region (the neck) is not visible. However, is still visible with a low probability for views sampled from the Gaussian distribution + +![](images/f018918fbaf8736ed616c12a107abff622ccc97a3598f00583aa31289e4ad0d6.jpg) +Figure 9. Localization transfer. We optimize our neural highlighter on one mesh (original) for the prompt 'shoes'. Once optimized, the network weights transfer the localization to different meshings of the same object (remeshed and subdivided). + +around the primary view. This means that over the course of optimization, regions other than the neck are mostly seen while the target region is rarely visible. Nonetheless, our method manages to highlight the desired region, which implies its robustness to how frequently the target region for localization is seen. Furthermore, it shows that oversampling views where the target region is not visible does not negatively influence the optimization. + +# 4.3. Applications of 3D Highlighter + +Selective editing. In Fig. 4, we show that it is possible to use 3D Highlighter to selectively edit a 3D object within a semantic region. This is applicable to techniques which incorporate global texture or material properties over the entire shape, such as in Text2Mesh [25] or MatCap [39]. Starting with different bare input meshes, we edit the entire shape using a global stylization technique [25]. Then, we use 3D Highlighter to select a text-specified region and incorporate the modifications only in the selected area. Thus 3D Highlighter provides direct control over where to stylize shapes, enabling users to obtain localized stylizations based on semantic cues. + +Controlled stylization via composition. Achieving compositionality with language models is a challenging task [33]. For example, starting with a human mesh and using Text2Mesh [25] to stylize 'Iron Man with the head of Steve Jobs and Yeti legs', leads to muddled and undesirable results (Fig. 8, rightmost). Our method enables compositionality between different shape modifications by chaining simple concepts together (Fig. 8). Specifically, we decompose the desired modification into three separate attainable targets ('Iron Man', 'Steve Jobs', and 'Yeti'), which we stylize individually with Text2Mesh. We then utilize our 3D Highlighter to localize the text-specified regions. We achieve the desired composition by combining the highlighted regions together, obtaining clear boundaries between stylizations. + +Semantic segmentation. In Fig. 10, we show that our technique is not restricted to hallucinated highlighting and is capable of localizing semantically-specified geometric regions. These text-driven localizations identify unique geometric parts without utilizing any 3D datasets or part labels. + +# 4.4. Components of 3D Highlighter + +Ablation study. Several components are key for facilitating 3D Highlighter. We provide ablation results in Fig. 7 to demonstrate the effect of our design choices. First, using a direct optimization of the vertex color (direct) instead of optimizing a neural field results in splotchy highlight artifacts. Since the neural field has a spectral bias towards smooth solutions [32], omitting it leads to an undesired noisy output. Second, removing the probability weighted blending (no blend) and instead coloring vertices using only + +![](images/5f07d2dbe7886e50bd891038211e2c20cd2aa37502ddaa9905c6454b084a0a90.jpg) +Arm + +![](images/6b22d823e0eed84c95e938a67070eeed0862e956e2f688fba10eb3562f8f2beb.jpg) +Slide + +![](images/68ef7f752136e4660dafb1582c8a971b0bd35ab5640a501d31930cc7a2fb506d.jpg) +Propeller +Figure 10. Semantic Segmentation. 3D Highlighter produces semantic segmentations for unique geometric parts without any 3D dataset or annotations. + +two distinct values also produces a noisy highlight pattern. Without a continuous color blend, the gradients become ill-conditioned and unstable, leading to highlight artifacts and irregular localization boundaries. Lastly, similar to previous works [9, 25], we observe that without 2D perspective augmentations (no augs), 3D Highlighter outputs degenerate solutions. The ablation study emphasizes the importance of our key design choices in 3D Highlighter for its ability to highlight a coherent and localized region on the input shape. + +Prompt formulation and CLIP understanding. Our prompt formulation combined with our coloring scheme results in the correct association between objects and their properties, a known challenge when using CLIP [33]. In Fig. 12, we analyze the CLIP score for two different prompts: 'gray chair with highlighted back' (left) and 'blue chair with red back' (right). For each prompt, we measure the CLIP similarity to renders of both the correct assignment and flipped assignment. + +We observe that our prompt formulation ('gray chair with highlighted back') results in a higher average CLIP score for the correct assignment. In contrast, when specifying colors in the prompt ('blue chair with red back') and styling the mesh accordingly, we see higher CLIP scores for the flipped association. Using the same gray and yellow renders (left), we also compare to a prompt specifying colors ('gray chair with yellow back') and find that the higher + +![](images/97dddc40b0091821cfab65ba2c7e2cfb43f71a6956850708ff94d0d1f24f3e7e.jpg) +Figure 11. Network initialization. We optimize 3D Highlighter for the text prompt 'belt' using different initialization methods: using a default initialization where all output probabilities are near 0.5 (middle) or altering the final layer so that all outputs are 0 (left) or 1 (right). Initializing with 0 or 1 leads to an undesirable result. + +![](images/46a442d6d6c2c3000f719cefdc0f5284a2910bc7d071a06bbdd8e125c11897ff.jpg) +Figure 12. CLIP understanding. We examine CLIP similarity scores for several prompt formulations targeting the 'back' of the chair while using the correct color assignment and where the coloring is flipped. For the prompt 'gray chair with highlighted back' (left) we observe that the CLIP score is higher for the correct assignment. For the prompt 'blue chair with red back' (right) the CLIP score is higher for the flipped (incorrect) assignment. + +CLIP score corresponds to the flipped selection (data not shown). + +We also measure the CLIP scores for our standard prompt formulation: 'gray chair with highlighted back', replacing the yellow color in the rendering with other colors, such as red and blue, and find that the correct selection has a higher CLIP score (data not shown). To conclude, our prompt formulation (i.e., the use of the term 'highlighted') coincides with CLIP's understanding and 3D Highlighter is robust to the highlight color. + +Network initialization. Initializing the network such that the object is partially highlighted (i.e., with highlight probability equal to 0.5) is important for obtaining desirable results. In Fig. 11, we show the optimization of our method for the target text prompt 'belt' using three different initializations. Our method (middle) initializes all output probabilities near 0.5 by random weight initialization of the network. We compare to initializing the output probabilities to 0 (left) or 1 (right), in which we set the weights of the last layer to 0, and the bias to 0 or 1, respectively. + +For the initialization to both 0.5 and 1, a highlight color is uniformly present on the styled mesh, whereas with 0, the mesh is gray with no highlight. Consequently, we hypothesize that the presence of highlight color at initialization is important for CLIP's supervision. + +# 4.5. Limitations + +3D Highlighter is robust to variations of the object specification in the target prompt. However, there should still be a logical connection between the 3D shape and its description. Fig. 13 shows results for a camel mesh and the target highlight 'shinguards'. For each optimization, we use a slightly different target prompt by varying the object specification. The prompts are of the form "[object] with highlighted shinguards", where [object] is replaced with camel, pig, animal, or chair. + +In Fig. 13, we observe that with object specifications + +that resemble the geometry of camel, such as pig and animal, 3D Highlighter accurately localizes the desired region. However, for a description that is incompatible with the object's geometry (i.e., referring to a camel as a chair), our method does not produce meaningful results. This result sheds light on 3D Highlighter's robustness to text descriptions: 3D Highlighter is able to reason about a mesh even when its description is not perfectly accurate, provided that it is sufficiently similar to the true description (i.e., referring to a camel mesh as a pig). + +# 5. Conclusions + +We present a technique for highlighting semantic regions on meshes using text as input, without any 3D datasets or 3D pre-training. 3D Highlighter can reason about where to place a non-obviously related part on a 3D object (i.e. a hat on a candle). The ability to combine unconnected parts and objects together is reminiscent of ideas from image analogies [12, 22]. In this work, we show that we can identify part-concepts that are geometrically absent from a shape, giving rise to our hallucinated highlighting capability. + +During neural optimization, our neural network infers a probability which we use to blend the highlight color onto the mesh. The network-predicted probabilities are general, and provide a soft-segmentation which we show can be used for a variety of different applications (Figs. 4 and 8). In the future, we are interested in extending our framework to obtain part correspondence between shapes that differ topologically but are semantically related. + +# 6. Acknowledgments + +We thank the University of Chicago for providing the AI cluster resources, services, and the professional support of the technical staff. This work was also supported in part by gifts from Adobe Research. Finally, we would like to thank Richard Liu, Avery Zhou, and the members of 3DL for their thorough and insightful feedback on our work. + +![](images/d466cfa6af2a35e8ea84c273f2ed61469a2e59581258127dfb4738c887104207.jpg) +Figure 13. Prompt generality. Our system is robust to certain variations in object specifications. We achieve desirable results for the text input 'camel with highlighted shinguards' (left), as well as for other variations ('pig' and 'animal'). If the object specification, such as 'chair', is incompatible with the input geometry, 3D Highlighter no longer produces meaningful results. + +# References + +[1] Shmuel Asafi, Avi Goren, and Daniel Cohen-Or. Weak convex decomposition by lines-of-sight. Computer graphics forum, 32(5):23-31, 2013. 3 +[2] Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. arXiv preprint arXiv:2204.02491, 2022. 3, 6, 12, 13 +[3] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 5 +[4] Wenzheng Chen, Huan Ling, Jun Gao, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Advances in Neural Information Processing Systems, 32, 2019. 4 +[5] Zhiqin Chen, Kangxue Yin, Matthew Fisher, Siddhartha Chaudhuri, and Hao Zhang. Bae-net: Branched autoencoder for shape co-segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8490-8499, 2019. 3 +[6] Nicu D Cornea, Deborah Silver, and Patrick Min. **Curve-skeleton properties, applications, and algorithms. IEEE Transactions on visualization and computer graphics*, 13(3):530, 2007. 3 +[7] Boyang Deng, Kyle Genova, Soroosh Yazdani, Sofien Bouaziz, Geoffrey Hinton, and Andrea Tagliasacchi. Cvxnet: Learnable convex decomposition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 31-44, 2020. 3 +[8] Tamal K Dey and Wulue Zhao. Approximating the medial axis from the voronoi diagram with a convergence guarantee. Algorithmica, 38(1):179-200, 2004. 3 +[9] Kevin Frans, Lisa B. Soros, and Olaf Witkowski. Clipdraw: Exploring text-to-drawing synthesis through language-image encoders. ArXiv, abs/2106.14843, 2021. 5, 7 +[10] Rao Fu, Xiao Zhan, Yiwen Chen, Daniel Ritchie, and Srinath Sridhar. Shapecrafter: A recursive text-conditioned 3d shape generation model. arXiv preprint arXiv:2207.09446, 2022. 3 +[11] Rana Hanocka, Amir Hertz, Noa Fish, Raja Giryes, Shachar Fleishman, and Daniel Cohen-Or. MeshCNN: A network with an edge. ACM Transactions on Graphics (TOG), 38(4):90:1–90:12, 2019. 3 +[12] Aaron Hertzmann, Charles E Jacobs, Nuria Oliver, Brian Curless, and David H Salesin. Image analogies. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 327-340, 2001. 8 +[13] Donald D Hoffman and Whitman A Richards. Parts of recognition. Cognition, 18(1-3):65-96, 1984. 3 +[14] Yining Hong, Yilun Du, Chunru Lin, Josh Tenenbaum, and Chuang Gan. 3d concept grounding on neural fields. In Annual Conference on Neural Information Processing Systems, 2022. 3 + +[15] Shi-Min Hu, Zheng-Ning Liu, Meng-Hao Guo, Junxiong Cai, Jiahui Huang, Tai-Jiang Mu, and Ralph R. Martin. Subdivision-based mesh convolution networks. ACM Trans. Graph., 41(3):25:1-25:16, 2022. 3 +[16] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 3, 12 +[17] Oliver Van Kaick, Noa Fish, Yanir Kleiman, Shmuel Asafi, and Daniel Cohen-Or. Shape segmentation by approximate convexity analysis. ACM Transactions on Graphics (TOG), 34(1):1–11, 2014. 1 +[18] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Popa Tiberiu. Clip-mesh: Generating textured meshes from text using pretrained image-text models. SIGGRAPH Asia 2022 Conference Papers, December 2022. 3 +[19] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nef for editing via feature field distillation. arXiv, 2022. 3 +[20] Alon Lahav and Ayellet Tal. Meshwalker: Deep mesh understanding by random walks. ACM Transactions on Graphics (TOG), 39(6):1-13, 2020. 3 +[21] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and Rene Ranftl. Language-driven semantic segmentation. In International Conference on Learning Representations, 2022. 3, 5, 6, 12, 13 +[22] Jing Liao, Yuan Yao, Lu Yuan, Gang Hua, and Sing Bing Kang. Visual attribute transfer through deep image analogy. arXiv preprint arXiv:1705.01088, 2017. 8 +[23] Jyh-Ming Lien and Nancy M Amato. Approximate convex decomposition of polyhedra. In Proceedings of the 2007 ACM symposium on Solid and physical modeling, pages 121-131, 2007. 3 +[24] Haggai Maron, Meirav Galun, Noam Aigerman, Miri Trope, Nadav Dym, Ersin Yumer, Vladimir G Kim, and Yaron Lipman. Convolutional neural networks on surfaces via seamless toric covers. ACM Trans. Graph., 36(4):71-1, 2017. 3 +[25] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13492-13502, 2022. 3, 5, 6, 7, 11 +[26] Francesco Milano, Antonio Loquercio, Antoni Rosinol, Davide Scaramuzzi, and Luca Carlone. Primal-dual mesh convolutional neural networks. Advances in Neural Information Processing Systems, 33:952-963, 2020. 3 +[27] Kaichun Mo, Shilin Zhu, Angel X. Chang, Li Yi, Subarna Tripathi, Leonidas J. Guibas, and Hao Su. Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding, 2018. 1 +[28] Dong Huk Park, Samaneh Azadi, Xihui Liu, Trevor Darrell, and Anna Rohrbach. Benchmark for compositional text-to-image synthesis. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 12 + +[29] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. 5 +[30] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv, 2022. 3 +[31] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. arXiv preprint arXiv:2103.00020, 2021. 1, 2, 3, 4 +[32] Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 5301-5310. PMLR, 09-15 Jun 2019. 4, 7 +[33] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022.7 +[34] James Matthew Rehg. Toys4k 3d object dataset, 2022. https://github.com/rehg-lab/lowshot-shapebias/tree/main/toys4k.5 +[35] Ariel Shamir. A survey on mesh segmentation techniques. Computer graphics forum, 27(6):1539-1556, 2008. 3 +[36] Nicholas Sharp, Souhaib Attaiki, Keenan Crane, and Maks Ovsjanikov. Diffusionnet: Discretization agnostic learning on surfaces. ACM Transactions on Graphics (TOG), 41(3):1-16, 2022. 3 +[37] Weiwei Sun, Andrea Tagliasacchi, Boyang Deng, Sara Sabour, Soroosh Yazdani, Geoffrey E Hinton, and Kwang Moo Yi. Canonical capsules: Self-supervised capsules in canonical pose. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 24993-25005. Curran Associates, Inc., 2021. 3 +[38] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020. 4, 12 +[39] Hideki Todo, Ken Anjyo, and Shun'Ichi Yokoyama. Litsphere extension for artistic rendering. Vis. Comput., 29(6-8):473-480, jun 2013. 7 +[40] TurboSquid. Turbosquid 3d model repository, 2021. https://www.turbosquid.com/.5 +[41] Oliver van Kaick, Andrea Tagliasacchi, Oana Sidi, Hao Zhang, Daniel Cohen-Or, Lior Wolf, and Ghassan Hamarneh. Prior knowledge for part correspondence. Computer Graphics Forum, 30(2):553–562, 2011. 5 +[42] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In Proceedings of the + +IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3835-3844, 2022. 3 +[43] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 5 +[44] Yiheng Xie, Towaki Takikawa, Shunsuke Saito, Or Litany, Shiqin Yan, Numair Khan, Federico Tombari, James Tompkin, Vincent Sitzmann, and Srinath Sridhar. Neural fields in visual computing and beyond. Computer Graphics Forum, 2022. 1, 4, 12 +[45] Li Yi, Hao Su, Xingwen Guo, and Leonidas J Guibas. Syncspeccnn: Synchronized spectral cnn for 3d shape segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2282-2290, 2017. 3 +[46] Fenggen Yu, Kun Liu, Yan Zhang, Chenyang Zhu, and Kai Xu. Partnet: A recursive part decomposition network for fine-grained and hierarchical shape segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9491-9500, 2019. 3 +[47] Qian Zheng, Zhuming Hao, Hui Huang, Kai Xu, Hao Zhang, Daniel Cohen-Or, and Baoquan Chen. Skeleton-intrinsic symmetrization of shapes. Computer Graphics Forum, 34(2):275-286, 2015. 3 +[48] Qingnan Zhou and Alec Jacobson. Thingi10k: A dataset of 10,000 3d-printing models. arXiv preprint arXiv:1605.04797, 2016. 5 +[49] Chenyang Zhu, Kai Xu, Siddhartha Chaudhuri, Li Yi, Leonidas J Guibas, and Hao Zhang. Adacoseg: Adaptive shape co-segmentation with group consistency loss. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8543-8552, 2020. 3 \ No newline at end of file diff --git a/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/images.zip b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..295d6bef34fdb5b5a1742be4059f78413c166dae --- /dev/null +++ b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c5f237b47e3aa48f84dabe7017bb70233e978dfd15e92f4b2f4bf36e1aa6939 +size 379468 diff --git a/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/layout.json b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0adebf8be828fe73a9fef8a236f026e5ee1a35f1 --- /dev/null +++ b/2023/3D Highlighter_ Localizing Regions on 3D Shapes via Text Descriptions/layout.json @@ -0,0 +1,8907 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 77, + 103, + 516, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 103, + 516, + 121 + ], + "spans": [ + { + "bbox": [ + 77, + 103, + 516, + 121 + ], + "type": "text", + "content": "3D Highlighter: Localizing Regions on 3D Shapes via Text Descriptions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 144, + 179, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 144, + 179, + 156 + ], + "spans": [ + { + "bbox": [ + 112, + 144, + 179, + 156 + ], + "type": "text", + "content": "Dale Decatur" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 91, + 157, + 201, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 157, + 201, + 171 + ], + "spans": [ + { + "bbox": [ + 91, + 157, + 201, + 171 + ], + "type": "text", + "content": "University of Chicago" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 88, + 174, + 204, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 174, + 204, + 185 + ], + "spans": [ + { + "bbox": [ + 88, + 174, + 204, + 185 + ], + "type": "text", + "content": "ddecatur@uchicago.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 265, + 144, + 312, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 144, + 312, + 157 + ], + "spans": [ + { + "bbox": [ + 265, + 144, + 312, + 157 + ], + "type": "text", + "content": "Itai Lang" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 235, + 157, + 342, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 157, + 342, + 172 + ], + "spans": [ + { + "bbox": [ + 235, + 157, + 342, + 172 + ], + "type": "text", + "content": "University of Chicago" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 231, + 174, + 346, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 174, + 346, + 185 + ], + "spans": [ + { + "bbox": [ + 231, + 174, + 346, + 185 + ], + "type": "text", + "content": "itailang@uchicago.edu" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 403, + 144, + 476, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 403, + 144, + 476, + 156 + ], + "spans": [ + { + "bbox": [ + 403, + 144, + 476, + 156 + ], + "type": "text", + "content": "Rana Hanocka" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 385, + 157, + 493, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 385, + 157, + 493, + 172 + ], + "spans": [ + { + "bbox": [ + 385, + 157, + 493, + 172 + ], + "type": "text", + "content": "University of Chicago" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 374, + 174, + 505, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 174, + 505, + 185 + ], + "spans": [ + { + "bbox": [ + 374, + 174, + 505, + 185 + ], + "type": "text", + "content": "ranahanocka@uchicago.edu" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 48, + 200, + 132, + 317 + ], + "blocks": [ + { + "bbox": [ + 48, + 200, + 132, + 317 + ], + "lines": [ + { + "bbox": [ + 48, + 200, + 132, + 317 + ], + "spans": [ + { + "bbox": [ + 48, + 200, + 132, + 317 + ], + "type": "image", + "image_path": "3ce3a39f1eb0d64ad65575fb6db76724d3770833ec5661ef4d5b62770f653931.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 83, + 319, + 99, + 328 + ], + "lines": [ + { + "bbox": [ + 83, + 319, + 99, + 328 + ], + "spans": [ + { + "bbox": [ + 83, + 319, + 99, + 328 + ], + "type": "text", + "content": "Hat" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 140, + 206, + 256, + 317 + ], + "blocks": [ + { + "bbox": [ + 140, + 206, + 256, + 317 + ], + "lines": [ + { + "bbox": [ + 140, + 206, + 256, + 317 + ], + "spans": [ + { + "bbox": [ + 140, + 206, + 256, + 317 + ], + "type": "image", + "image_path": "60c4879aeb4ae28ea5aa5ff682daf21552ad4ac22b576e77b4c48bb58976f923.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 167, + 319, + 206, + 329 + ], + "lines": [ + { + "bbox": [ + 167, + 319, + 206, + 329 + ], + "spans": [ + { + "bbox": [ + 167, + 319, + 206, + 329 + ], + "type": "text", + "content": "Necklace" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 244, + 206, + 358, + 317 + ], + "blocks": [ + { + "bbox": [ + 244, + 206, + 358, + 317 + ], + "lines": [ + { + "bbox": [ + 244, + 206, + 358, + 317 + ], + "spans": [ + { + "bbox": [ + 244, + 206, + 358, + 317 + ], + "type": "image", + "image_path": "fe90a136ea0f9260c9f288ed71d72975217f89971a6a4916f93a99516821e195.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 276, + 319, + 321, + 330 + ], + "lines": [ + { + "bbox": [ + 276, + 319, + 321, + 330 + ], + "spans": [ + { + "bbox": [ + 276, + 319, + 321, + 330 + ], + "type": "text", + "content": "Headlights" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 332, + 546, + 355 + ], + "lines": [ + { + "bbox": [ + 46, + 332, + 546, + 355 + ], + "spans": [ + { + "bbox": [ + 46, + 332, + 546, + 355 + ], + "type": "text", + "content": "Figure 1. 3D Highlighter localizes semantic regions on a shape using text as input. Our technique reasons about where to place seemingly unrelated concepts in semantically meaningful locations on the 3D shape, such as a 'necklace' on a horse or 'shoes' on an alien." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 361, + 206, + 482, + 317 + ], + "blocks": [ + { + "bbox": [ + 361, + 206, + 482, + 317 + ], + "lines": [ + { + "bbox": [ + 361, + 206, + 482, + 317 + ], + "spans": [ + { + "bbox": [ + 361, + 206, + 482, + 317 + ], + "type": "image", + "image_path": "5fb07b1e0540704ea8d02f727270040e868b72855b1128eb69cfd2362028f4f0.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 319, + 435, + 329 + ], + "lines": [ + { + "bbox": [ + 410, + 319, + 435, + 329 + ], + "spans": [ + { + "bbox": [ + 410, + 319, + 435, + 329 + ], + "type": "text", + "content": "Shoes" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 488, + 200, + 544, + 317 + ], + "blocks": [ + { + "bbox": [ + 488, + 200, + 544, + 317 + ], + "lines": [ + { + "bbox": [ + 488, + 200, + 544, + 317 + ], + "spans": [ + { + "bbox": [ + 488, + 200, + 544, + 317 + ], + "type": "image", + "image_path": "4ae578474bf040ed0bec23be64939a47d3ec22c8b57557e5f3c545daefabbbef.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 493, + 319, + 539, + 330 + ], + "lines": [ + { + "bbox": [ + 493, + 319, + 539, + 330 + ], + "spans": [ + { + "bbox": [ + 493, + 319, + 539, + 330 + ], + "type": "text", + "content": "Eyeglasses" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 143, + 365, + 192, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 365, + 192, + 377 + ], + "spans": [ + { + "bbox": [ + 143, + 365, + 192, + 377 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 390, + 290, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 390, + 290, + 571 + ], + "spans": [ + { + "bbox": [ + 46, + 390, + 290, + 571 + ], + "type": "text", + "content": "We present 3D Highlighter, a technique for localizing semantic regions on a mesh using text as input. A key feature of our system is the ability to interpret \"out-of-domain\" localizations. Our system demonstrates the ability to reason about where to place non-obviously related concepts on an input 3D shape, such as adding clothing to a bare 3D animal model. Our method contextualizes the text description using a neural field and colors the corresponding region of the shape using a probability-weighted blend. Our neural optimization is guided by a pre-trained CLIP encoder, which bypasses the need for any 3D datasets or 3D annotations. Thus, 3D Highlighter is highly flexible, general, and capable of producing localizations on a myriad of input shapes. Our code is publicly available at https://github.com/threedle/3DHighlighter." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 47, + 585, + 128, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 585, + 128, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 585, + 128, + 597 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 46, + 605, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 689 + ], + "type": "text", + "content": "Semantic localization of regions on 3D meshes is an important problem in computer graphics and vision with broad applications. One such application is the incorporation of semantic information into the 3D modeling process. A particularly challenging aspect of this task emerges when 3D geometric signals are insufficient for performing segmentation, e.g. where to add a shirt to a bare 3D human model." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": "We propose 3D Highlighter, a method for automatically localizing fine-grained semantic regions on a shape based" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 304, + 366, + 546, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 546, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 546, + 437 + ], + "type": "text", + "content": "on only a text description. Our system contextualizes the text prompt and highlights the corresponding shape region using the network-predicted probabilities. Using only text, users are able to semantically identify regions on a shape. Our system takes meshes as input, making it compatible with 3D modeling workflows and tools." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 437, + 546, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 546, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 546, + 545 + ], + "type": "text", + "content": "This highlighting task requires both object-level and part-level understanding. 3D Highlighter demonstrates the ability to reason about where to place seemingly unrelated concepts on the 3D shape, such as a hat on a candle (Fig. 1). Our system localizes attributes that are geometrically absent from a shape, which we refer to as hallucinated highlighting. Understanding a part's global shape context is challenging even when relying on salient geometric features [17,27], let alone without them." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 304, + 545, + 546, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 546, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 546, + 664 + ], + "type": "text", + "content": "We optimize the weights of a neural network to produce probabilities that are used to color a given 3D shape in accordance with the specified text. We leverage a pre-trained vision-language model (CLIP [31]) to guide the neural optimization towards the text-specified region. This neural optimization formulation is flexible, bypassing the need for any 3D datasets, 3D annotations, or 3D pre-training. Our system is not bound to a specific set of classes, and, as shown in Fig. 2, is not limited to object parts defined by salient geometric features." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": "We encode the part selection as a neural field [44] over the mesh surface. Our network learns to map each point on the surface to a probability of belonging to the text-specified region. We translate the inferred probabilities to a visual at-" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20930" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 68, + 547, + 432 + ], + "blocks": [ + { + "bbox": [ + 50, + 68, + 547, + 432 + ], + "lines": [ + { + "bbox": [ + 50, + 68, + 547, + 432 + ], + "spans": [ + { + "bbox": [ + 50, + 68, + 547, + 432 + ], + "type": "image", + "image_path": "017b26d3608a89b48df7256a49f16654424c0c1d823dddfbe5754975975d1db4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 434, + 547, + 456 + ], + "lines": [ + { + "bbox": [ + 46, + 434, + 547, + 456 + ], + "spans": [ + { + "bbox": [ + 46, + 434, + 547, + 456 + ], + "type": "text", + "content": "Figure 2. Hallucinated part highlighting. Our system is able to reason about where to highlight a geometrically-absent region on shapes. The resulting localizations demonstrate global understanding and localized part-awareness." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 477, + 289, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 477, + 289, + 574 + ], + "spans": [ + { + "bbox": [ + 46, + 477, + 289, + 574 + ], + "type": "text", + "content": "tribute on the mesh surface, which can be rendered and visually understood. The network-predicted probabilities act as a soft-selection operator which blends the highlighter color onto the mesh. The network weights are updated by encouraging the CLIP [31] embedding of the 2D renders of the highlighted mesh to adhere to the specified text. As a result, the network implicitly learns to segment the object to adhere to the text prompt." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": "We make several design choices that are key to the success of 3D Highlighter. Our network does not directly color the mesh. Rather, we predict a probability of being inside the text-specified highlight, which is used to blend colors on the mesh. The network is initialized such that points have roughly a " + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": " probability of being highlighted, resulting in a mesh with albedo halfway between the highlight and background color. During optimization, the relative blend weight of the highlight color directly corresponds to the highlight probability. This blending enables the network to naturally and smoothly increase or decrease the segmenta" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 477, + 545, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 477, + 545, + 501 + ], + "spans": [ + { + "bbox": [ + 304, + 477, + 545, + 501 + ], + "type": "text", + "content": "tion probability in accordance with the text specification of the target region." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 501, + 546, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 501, + 546, + 646 + ], + "spans": [ + { + "bbox": [ + 304, + 501, + 546, + 646 + ], + "type": "text", + "content": "In summary, we present a method for localizing semantic regions on 3D shapes. The localization is specified by a textual description, which is intuitive, flexible, and not limited to a specific training dataset. We demonstrate applications of our method to shape editing and stylization. Furthermore, our field formulation enables the 3D Highlighter to work with different mesh resolutions and triangulations. A key feature of our system is the ability to interpret out-of-domain localizations. For example, 3D Highlighter is able to figure out where to place a 'hat' on a candle as seen in Fig. 1, demonstrating the ability to reason about where to place seemingly unrelated concepts on the 3D shape." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 654, + 392, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 654, + 392, + 667 + ], + "spans": [ + { + "bbox": [ + 306, + 654, + 392, + 667 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "content": "Geometry-driven segmentation. Traditional works in geometry processing use low-level geometric features (such as surface area, curvature, or geodesic distance) in or-" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "text", + "content": "20931" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 70, + 123, + 135 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 123, + 135 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 123, + 135 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 123, + 135 + ], + "type": "image", + "image_path": "9766ef4a11261cc1f15997de6dd888eb8713529b811146c3bb3ee2bfeb3e8788.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 64, + 137, + 115, + 148 + ], + "lines": [ + { + "bbox": [ + 64, + 137, + 115, + 148 + ], + "spans": [ + { + "bbox": [ + 64, + 137, + 115, + 148 + ], + "type": "text", + "content": "Headphones" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 130, + 70, + 203, + 135 + ], + "blocks": [ + { + "bbox": [ + 130, + 70, + 203, + 135 + ], + "lines": [ + { + "bbox": [ + 130, + 70, + 203, + 135 + ], + "spans": [ + { + "bbox": [ + 130, + 70, + 203, + 135 + ], + "type": "image", + "image_path": "81cd7b51d2cfd8d78a71821d36fd79da22a5b7a19117eff2d1f513b1af286837.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 137, + 184, + 147 + ], + "lines": [ + { + "bbox": [ + 159, + 137, + 184, + 147 + ], + "spans": [ + { + "bbox": [ + 159, + 137, + 184, + 147 + ], + "type": "text", + "content": "Shoes" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 208, + 70, + 279, + 135 + ], + "blocks": [ + { + "bbox": [ + 208, + 70, + 279, + 135 + ], + "lines": [ + { + "bbox": [ + 208, + 70, + 279, + 135 + ], + "spans": [ + { + "bbox": [ + 208, + 70, + 279, + 135 + ], + "type": "image", + "image_path": "294bd74ac95b194ac50b261400ea011e725354d47acbb368eeae197473ee4fbe.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 236, + 137, + 252, + 146 + ], + "lines": [ + { + "bbox": [ + 236, + 137, + 252, + 146 + ], + "spans": [ + { + "bbox": [ + 236, + 137, + 252, + 146 + ], + "type": "text", + "content": "Hat" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 50, + 152, + 120, + 204 + ], + "blocks": [ + { + "bbox": [ + 50, + 152, + 120, + 204 + ], + "lines": [ + { + "bbox": [ + 50, + 152, + 120, + 204 + ], + "spans": [ + { + "bbox": [ + 50, + 152, + 120, + 204 + ], + "type": "image", + "image_path": "959b668781fd20b838201489cefa175f07a439dd1efd9bafbc25d1795cc64b51.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 74, + 205, + 99, + 215 + ], + "lines": [ + { + "bbox": [ + 74, + 205, + 99, + 215 + ], + "spans": [ + { + "bbox": [ + 74, + 205, + 99, + 215 + ], + "type": "text", + "content": "Shoes" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 121, + 152, + 202, + 205 + ], + "blocks": [ + { + "bbox": [ + 121, + 152, + 202, + 205 + ], + "lines": [ + { + "bbox": [ + 121, + 152, + 202, + 205 + ], + "spans": [ + { + "bbox": [ + 121, + 152, + 202, + 205 + ], + "type": "image", + "image_path": "7c59cf4c4e5b68a607234db82cf9d6bcbab0a29c916e3e3e1d450e3ec7c0381d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 141, + 205, + 181, + 215 + ], + "lines": [ + { + "bbox": [ + 141, + 205, + 181, + 215 + ], + "spans": [ + { + "bbox": [ + 141, + 205, + 181, + 215 + ], + "type": "text", + "content": "Necklace" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 205, + 152, + 286, + 205 + ], + "blocks": [ + { + "bbox": [ + 205, + 152, + 286, + 205 + ], + "lines": [ + { + "bbox": [ + 205, + 152, + 286, + 205 + ], + "spans": [ + { + "bbox": [ + 205, + 152, + 286, + 205 + ], + "type": "image", + "image_path": "77c4c76920741c24fa86ab07bfca602d48b239ea9ec27eea54eea9565ea4204c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 227, + 205, + 259, + 215 + ], + "lines": [ + { + "bbox": [ + 227, + 205, + 259, + 215 + ], + "spans": [ + { + "bbox": [ + 227, + 205, + 259, + 215 + ], + "type": "text", + "content": "Glasses" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 51, + 222, + 121, + 274 + ], + "blocks": [ + { + "bbox": [ + 51, + 222, + 121, + 274 + ], + "lines": [ + { + "bbox": [ + 51, + 222, + 121, + 274 + ], + "spans": [ + { + "bbox": [ + 51, + 222, + 121, + 274 + ], + "type": "image", + "image_path": "2019c1bb121776727762325c858011eb3ff3e6de4ad7452ff2614f5f21bf286c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 276, + 95, + 285 + ], + "lines": [ + { + "bbox": [ + 77, + 276, + 95, + 285 + ], + "spans": [ + { + "bbox": [ + 77, + 276, + 95, + 285 + ], + "type": "text", + "content": "Belt" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 129, + 222, + 202, + 276 + ], + "blocks": [ + { + "bbox": [ + 129, + 222, + 202, + 276 + ], + "lines": [ + { + "bbox": [ + 129, + 222, + 202, + 276 + ], + "spans": [ + { + "bbox": [ + 129, + 222, + 202, + 276 + ], + "type": "image", + "image_path": "770a8424474d00430078d9ff43613e1f1d548b70b88925618a0696fb033d8dc8.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 156, + 276, + 172, + 285 + ], + "lines": [ + { + "bbox": [ + 156, + 276, + 172, + 285 + ], + "spans": [ + { + "bbox": [ + 156, + 276, + 172, + 285 + ], + "type": "text", + "content": "Hat" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 209, + 222, + 280, + 276 + ], + "blocks": [ + { + "bbox": [ + 209, + 222, + 280, + 276 + ], + "lines": [ + { + "bbox": [ + 209, + 222, + 280, + 276 + ], + "spans": [ + { + "bbox": [ + 209, + 222, + 280, + 276 + ], + "type": "image", + "image_path": "57decd69583ee7eab9d4754d219932c4bfbc74046be0677bb3003a98f148289a.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 223, + 276, + 263, + 285 + ], + "lines": [ + { + "bbox": [ + 223, + 276, + 263, + 285 + ], + "spans": [ + { + "bbox": [ + 223, + 276, + 263, + 285 + ], + "type": "text", + "content": "Necklace" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 62, + 293, + 108, + 365 + ], + "blocks": [ + { + "bbox": [ + 62, + 293, + 108, + 365 + ], + "lines": [ + { + "bbox": [ + 62, + 293, + 108, + 365 + ], + "spans": [ + { + "bbox": [ + 62, + 293, + 108, + 365 + ], + "type": "image", + "image_path": "78d80f0d2b6d1a2c6fcc7194230b72a5c46477d56b3699b6d6aa54ef4b3657cc.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 64, + 365, + 103, + 374 + ], + "lines": [ + { + "bbox": [ + 64, + 365, + 103, + 374 + ], + "spans": [ + { + "bbox": [ + 64, + 365, + 103, + 374 + ], + "type": "text", + "content": "Necklace" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 376, + 287, + 432 + ], + "lines": [ + { + "bbox": [ + 46, + 376, + 287, + 432 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 287, + 432 + ], + "type": "text", + "content": "Figure 3. Our method is able to highlight different parts on the same object. For target selections that correspond to distinct regions, 3D Highlighter produces selections that are semantically meaningful and spatially separated without signal from underlying geometry." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 146, + 294, + 186, + 364 + ], + "blocks": [ + { + "bbox": [ + 146, + 294, + 186, + 364 + ], + "lines": [ + { + "bbox": [ + 146, + 294, + 186, + 364 + ], + "spans": [ + { + "bbox": [ + 146, + 294, + 186, + 364 + ], + "type": "image", + "image_path": "ad0b618d045f1d549053c56f8bda5ecb063249ae42b25d58f0f874c73ea53607.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 156, + 365, + 178, + 374 + ], + "lines": [ + { + "bbox": [ + 156, + 365, + 178, + 374 + ], + "spans": [ + { + "bbox": [ + 156, + 365, + 178, + 374 + ], + "type": "text", + "content": "Roof" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 222, + 293, + 263, + 365 + ], + "blocks": [ + { + "bbox": [ + 222, + 293, + 263, + 365 + ], + "lines": [ + { + "bbox": [ + 222, + 293, + 263, + 365 + ], + "spans": [ + { + "bbox": [ + 222, + 293, + 263, + 365 + ], + "type": "image", + "image_path": "7c5d9feaaeb944ad53557f28a02665cc718c380495e4710928de625be0f015b7.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 365, + 253, + 374 + ], + "lines": [ + { + "bbox": [ + 230, + 365, + 253, + 374 + ], + "spans": [ + { + "bbox": [ + 230, + 365, + 253, + 374 + ], + "type": "text", + "content": "Arms" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 46, + 455, + 286, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 455, + 286, + 551 + ], + "spans": [ + { + "bbox": [ + 46, + 455, + 286, + 551 + ], + "type": "text", + "content": "der to infer high-level semantic attributes for segmenting shapes [35]. In particular, decomposing shapes into smaller parts or segments often corresponds with physical 3D semantic parts [13, 35]. One approach is to partition shapes based on convexity, or an approximation of convexity [1, 23]. The medial axis carries topological information, which may also be used as a guideline for segmentation [6,8,35,47]." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 46, + 552, + 286, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 552, + 286, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 552, + 286, + 635 + ], + "type": "text", + "content": "The underlying assumption in these works is that processing the local geometry can be used to understand the semantics for segmentation. By contrast, a key aspect of our work is the ability to perform hallucinated highlights: segmentations that can not necessarily be inferred by geometry alone. See example highlights in Fig. 2 (e.g., localizing a heart on a goat)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 286, + 713 + ], + "type": "text", + "content": "Data-driven segmentation. In the deep learning era, the 3D part segmentation task has been widely tackled by neural network models [11, 15, 20, 26, 36, 45]. Training such a model is typically done in a fully-supervised manner on a large dataset of shapes annotated with a given set of part classes. For example, MeshCNN [11] was trained on a" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 144 + ], + "type": "text", + "content": "human-body segmentation dataset [24] for learning semantic part segmentation. To alleviate the need for 3D annotations, unsupervised learning schemes utilize large collections of unlabelled data [5,7,14,37,49]. For example, Hong et al. [14] inferred part-segmentation through question answering on rendered images from PartNet [46]." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 144, + 546, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 144, + 546, + 239 + ], + "spans": [ + { + "bbox": [ + 304, + 144, + 546, + 239 + ], + "type": "text", + "content": "In contrast to existing deep learning approaches for shape segmentation, we do not rely on any 3D dataset, nor are we bounded to a specific shape category or set of parts. Instead, we specify the desired localization using text and a pre-trained CLIP model which encompasses rich semantic object understanding. Thus, our 3D Highlighter is capable of localizing various semantic regions on a wide variety of 3D shapes." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 304, + 243, + 545, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 243, + 545, + 361 + ], + "spans": [ + { + "bbox": [ + 304, + 243, + 545, + 361 + ], + "type": "text", + "content": "Text-guidance. Recent works have leveraged pre-trained vision-language embedding spaces, such as CLIP [31], for analysis, synthesis, and editing. Some techniques leverage pre-trained image encoders for achieving semantic segmentation in images and neural radiance fields [2, 19, 21]. Such techniques are capable of segmenting entire objects within a scene based on text, e.g., a chair inside a room. However, they may struggle to segment parts within an object; e.g., failing to distinguish a window (part) from a house (object) [21]." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 304, + 362, + 545, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 362, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 304, + 362, + 545, + 480 + ], + "type": "text", + "content": "Our work is inspired by the emergent analysis in text-driven synthesis techniques for 3D data [10, 16, 18, 25, 30, 42]. Specifically, Text2Mesh [25] devised a framework for text-driven stylization of 3D meshes, observing that the resulting textures consider part-aware semantics. Yet, since Text2Mesh directly synthesizes stylizations, there is no obvious way to extract any underlying semantic analysis. To address this, we opt to use a highlighter color only as a means for visualizing the network-predicted segmentations." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 306, + 491, + 361, + 503 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 491, + 361, + 503 + ], + "spans": [ + { + "bbox": [ + 306, + 491, + 361, + 503 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "spans": [ + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "text", + "content": "An illustration of our method is shown in Fig. 5. The inputs to our system are a mesh " + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "text", + "content": ", represented by vertices " + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "inline_equation", + "content": "V \\in \\mathbb{R}^{n \\times 3}" + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "text", + "content": " and faces " + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "inline_equation", + "content": "F \\in \\{1, \\dots, n\\}^{m \\times 3}" + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "text", + "content": ", and a text description " + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "text", + "content": ". Our neural network, referred to as neural highlighter, is optimized to map vertex positions " + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "inline_equation", + "content": "v \\in V" + }, + { + "bbox": [ + 304, + 511, + 545, + 572 + ], + "type": "text", + "content": " to a" + } + ] + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 307, + 586, + 544, + 676 + ], + "blocks": [ + { + "bbox": [ + 307, + 586, + 544, + 676 + ], + "lines": [ + { + "bbox": [ + 307, + 586, + 544, + 676 + ], + "spans": [ + { + "bbox": [ + 307, + 586, + 544, + 676 + ], + "type": "image", + "image_path": "ea684b3ec72b0b19c97fc2fdb468f84d805f3b34d80640e79d6e262bad979e7b.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 677, + 545, + 710 + ], + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 710 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 710 + ], + "type": "text", + "content": "Figure 4. Localized editing. We incorporate textures and displacements to a region highlighted with 3D Highlighter. Used styles: Brick (left), Colorful Crochet (middle), Cactus (right)." + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20932" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 68, + 547, + 159 + ], + "blocks": [ + { + "bbox": [ + 47, + 68, + 547, + 159 + ], + "lines": [ + { + "bbox": [ + 47, + 68, + 547, + 159 + ], + "spans": [ + { + "bbox": [ + 47, + 68, + 547, + 159 + ], + "type": "image", + "image_path": "37f87f8d97108d41f0f6ba47ae01309f80e0de863dac84e8359d80d6fcc580bd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 166, + 547, + 201 + ], + "lines": [ + { + "bbox": [ + 46, + 166, + 547, + 201 + ], + "spans": [ + { + "bbox": [ + 46, + 166, + 547, + 201 + ], + "type": "text", + "content": "Figure 5. Overview of 3D Highlighter. The Neural Highlighter maps each point on the input mesh to a probability. The mesh is colored using a probability-weighted blend and then rendered from multiple views. The neural highlighter weights are guided by the similarity between the CLIP embeddings of the 2D augmented images and the input text." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 220, + 289, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 220, + 289, + 318 + ], + "spans": [ + { + "bbox": [ + 46, + 220, + 289, + 318 + ], + "type": "text", + "content": "probability " + }, + { + "bbox": [ + 46, + 220, + 289, + 318 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 46, + 220, + 289, + 318 + ], + "type": "text", + "content": " of belonging to the text-specified region. Each vertex on the mesh is colored according to a probability-weighted blend between the highlighter color and a gray background color. The resulting highlighted mesh " + }, + { + "bbox": [ + 46, + 220, + 289, + 318 + ], + "type": "inline_equation", + "content": "M'" + }, + { + "bbox": [ + 46, + 220, + 289, + 318 + ], + "type": "text", + "content": " is rendered from multiple views, and we apply 2D augmentations to obtain a set of images. We supervise the network optimization by comparing the CLIP-embedded images to the CLIP embedding of the desired text." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 326, + 160, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 326, + 160, + 339 + ], + "spans": [ + { + "bbox": [ + 47, + 326, + 160, + 339 + ], + "type": "text", + "content": "3.1. Neural Highlighter" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "spans": [ + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": "Our neural highlighter is a neural field [44] mapping coordinates " + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "inline_equation", + "content": "p \\in [0,1]" + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": " is the probability that " + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": " belongs to the text-specified region. The neural highlighter is represented as a multi-layer perceptron (MLP) " + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}" + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": " that takes an input vertex " + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": " in the form of a 3D coordinate " + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_v = (x,y,z)" + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": " and predicts a highlight probability " + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "inline_equation", + "content": "p_v" + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta}(\\mathbf{x}_v) = p_v" + }, + { + "bbox": [ + 46, + 345, + 287, + 490 + ], + "type": "text", + "content": ". This formulation allows us to query the neural field to obtain meaningful highlight probabilities for any 3D point on (or near) the mesh surface. Thus, once optimized, the network weights conveniently transfer the localization to different meshes of the same object without requiring further optimization (Fig. 9)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 490, + 288, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 490, + 288, + 598 + ], + "spans": [ + { + "bbox": [ + 46, + 490, + 288, + 598 + ], + "type": "text", + "content": "Representing our neural highlighter as an MLP produces contiguous localizations and reduces artifacts. MLPs have been shown to exhibit a spectral bias towards smooth solutions [32], especially on low-dimensional inputs such as 3D coordinates [38]. The bias towards low-frequency outputs encourages our 3D Highlighter to predict contiguous localizations with sharp boundaries and discourages noisy highlights (Fig. 7). For this reason, our approach does not utilize positional encoding. See supplemental material for" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 48, + 615, + 286, + 676 + ], + "blocks": [ + { + "bbox": [ + 48, + 615, + 286, + 676 + ], + "lines": [ + { + "bbox": [ + 48, + 615, + 286, + 676 + ], + "spans": [ + { + "bbox": [ + 48, + 615, + 286, + 676 + ], + "type": "image", + "image_path": "00d170565c319d089b1f02247d1441eae50c822102195bd45107ab7ca0b587ad.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 677, + 289, + 712 + ], + "lines": [ + { + "bbox": [ + 46, + 677, + 289, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 289, + 712 + ], + "type": "text", + "content": "Figure 6. Viewpoint robustness. Our system produces consistent results even when using different primary viewpoints. Results for three different primary viewpoints for the target text 'necklace'." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 220, + 380, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 220, + 380, + 232 + ], + "spans": [ + { + "bbox": [ + 306, + 220, + 380, + 232 + ], + "type": "text", + "content": "additional details." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 239, + 429, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 239, + 429, + 252 + ], + "spans": [ + { + "bbox": [ + 306, + 239, + 429, + 252 + ], + "type": "text", + "content": "3.2. Mesh Color Blending" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "text", + "content": "We leverage the per-point highlight probability to color the mesh in a continuous, differentiable manner, generating semantically meaningful renders for CLIP supervision. We use a probability-weighted blend, where each vertex color " + }, + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "inline_equation", + "content": "C_v" + }, + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "text", + "content": " is a linear combination of the highlight color " + }, + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "text", + "content": " and gray color " + }, + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "text", + "content": " weighted by the network-predicted highlight probability " + }, + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "inline_equation", + "content": "C_v = p_v \\cdot H + (1 - p_v) \\cdot G" + }, + { + "bbox": [ + 305, + 258, + 545, + 342 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 342, + 546, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 546, + 485 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 546, + 485 + ], + "type": "text", + "content": "At the start of the optimization process, all vertex probabilities are initialized near 0.5 and thus the entire mesh is half-lighted. As the optimization progresses, vertices smoothly transition towards gray or highlighter color (based on the network predictions) such that vertices predicted to be highlighted adhere to the text-specified region. This formulation translates each step of the optimization to a colored mesh that is semantically meaningful to CLIP. Our method provides continuous gradients, in contrast to coloring vertices according to the argmax of the highlight probability. Our blending scheme results in a smoother optimization landscape and reduces highlight artifacts (Fig. 7)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 485, + 547, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 485, + 547, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 485, + 547, + 581 + ], + "type": "text", + "content": "This formulation is also important for downstream applications that wish to use the localizations, e.g. editing and stylization. Predicting per-point highlight probabilities provides an explicit representation of the highlight region on the mesh surface. An alternative approach, optimizing the surface color directly, would only provide a visual result without explicit information about which vertices belong to the localization." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 587, + 441, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 587, + 441, + 601 + ], + "spans": [ + { + "bbox": [ + 306, + 587, + 441, + 601 + ], + "type": "text", + "content": "3.3. Unsupervised Guidance" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "We guide our neural optimization using the joint vision-language embedding space of CLIP [31]. We formulate the desired highlight by describing the association between the input mesh [object] and target localization [region]. Specifically, we design our target text " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " to be: \"a gray [object] with highlighted [region].\" We render the highlighted geometry from multiple views using differentiable rendering [4]. At each optimization step, we randomly sample " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " views from a Gaussian distribution centered around a primary view. This" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20933" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": "ensures that the underlying object is recognizable in the majority of views shown to CLIP." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "type": "text", + "content": "In a preliminary viewpoint prediction stage, we render " + }, + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "type": "text", + "content": " views of the mesh and measure the CLIP similarity to the target text prompt. We select the primary view to be the render with the highest CLIP similarity. We found that there exist many possible viewpoints which produce desirable highlighter results (see Fig. 6). More details about how the primary view is selected can be found in the supplemental material." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "text", + "content": "For each view " + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "text", + "content": ", we render a 2D image " + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "inline_equation", + "content": "I_{\\psi}" + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "text", + "content": " and apply a random perspective 2D augmentation " + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "text", + "content": ", as done in previous works [9, 25]. We then encode each of the augmented images into the CLIP embedding space (in " + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{768}" + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "text", + "content": ") using CLIP's image encoder, denoted as " + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "inline_equation", + "content": "E_I" + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "text", + "content": ". Our final aggregate image representation " + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "inline_equation", + "content": "\\mathsf{e}_I" + }, + { + "bbox": [ + 46, + 194, + 286, + 275 + ], + "type": "text", + "content": " is the average CLIP encoding over all views:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 101, + 277, + 287, + 305 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 277, + 287, + 305 + ], + "spans": [ + { + "bbox": [ + 101, + 277, + 287, + 305 + ], + "type": "interline_equation", + "content": "\\mathsf {e} _ {I} = \\frac {1}{n} \\sum_ {\\psi} E _ {I} \\left(\\phi \\left(I _ {\\psi}\\right)\\right) \\in \\mathbb {R} ^ {7 6 8}. \\tag {1}", + "image_path": "b53759eb1a149a7570a0b46326057d816c4d6291dd2d5ec86980788ba8adcd37.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "text", + "content": "Similarly, we encode the target selection text " + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "text", + "content": " with CLIP's text encoder " + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "inline_equation", + "content": "E_{T}" + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "text", + "content": " to get the encoded target representation " + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "inline_equation", + "content": "\\mathsf{e}_T = E_T(T)\\in \\mathbb{R}^{768}" + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "text", + "content": ". Our loss " + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "text", + "content": " for optimizing the neural highlighter parameters " + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "text", + "content": " is formulated as the negative cosine similarity between the aggregate image embedding and the text embedding:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 394, + 287, + 418 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 394, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 107, + 394, + 287, + 418 + ], + "type": "interline_equation", + "content": "\\underset {\\theta} {\\operatorname {a r g m i n}} \\mathcal {L} (\\theta) = - \\frac {\\mathrm {e} _ {I} \\cdot \\mathrm {e} _ {T}}{\\left| \\mathrm {e} _ {I} \\right| \\cdot \\left| \\mathrm {e} _ {T} \\right|}. \\tag {2}", + "image_path": "3310edc3c5b316c28770391c941174736bfb9b99516e231d4e6e97abc4b66882.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 427, + 287, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 427, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 427, + 287, + 475 + ], + "type": "text", + "content": "When the loss is minimized, the CLIP embedding of the rendered highlighted mesh becomes similar to the target text embedding. Thus, the localized region will reflect the target text region." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 488, + 127, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 488, + 127, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 488, + 127, + 502 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 509, + 287, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 509, + 287, + 675 + ], + "spans": [ + { + "bbox": [ + 46, + 509, + 287, + 675 + ], + "type": "text", + "content": "In this section we examine various capabilities of 3D Highlighter. First, we demonstrate the fidelity of our highlighter localization in Sec. 4.1, including qualitative and quantitative evaluations. As far as we can ascertain, our method is the first technique to perform text-driven localization on 3D shapes without pre-training on 3D data. Thus, we adapt an existing language-guided segmentation technique for 2D images to serve as a baseline [21]. Moreover, we demonstrate the robustness of 3D Highlighter in Sec. 4.2. Then we explore several applications of our method in Sec. 4.3, such as selective editing, localized manipulation, and segmentation. Finally, in Sec. 4.4 we evaluate the influence of key components of 3D Highlighter and discuss its limitations in Sec. 4.5." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "We apply our method to a large variety of meshes from different sources: COSEG [41], Turbo Squid [40], Thingi10K [48], Toys4k [34], ModelNet [43], and" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "ShapeNet [3]. 3D Highlighter does not impose any restrictions on the mesh quality; many of the meshes used contain artifacts, such as elements that are non-manifold, unoriented, and contain boundaries or self-intersections. Our PyTorch [29] implementation optimization takes around 5 minutes to run on an Nvidia A40 GPU. In our experiments, we used CLIP ViT-L/14 at " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " resolution." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 164, + 522, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 164, + 522, + 177 + ], + "spans": [ + { + "bbox": [ + 305, + 164, + 522, + 177 + ], + "type": "text", + "content": "4.1. Generality and Fidelity of 3D Highlighter" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 182, + 545, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 182, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 304, + 182, + 545, + 338 + ], + "type": "text", + "content": "Highlight generality. 3D Highlighter is not restricted to any particular category for either the input mesh or the text-specified localization, since it does not rely on a 3D dataset or 3D pre-training. In Fig. 2, we see our method achieves accurate localization for a diverse collection of meshes from various domains such as humanoids, animals, and manufactured objects. 3D Highlighter is capable of localizing a wide variety of diverse attributes even when the context of these target attributes is entirely unrelated to the input mesh. Moreover, 3D Highlighter demonstrates that it can perform hallucinated highlighting, where it selects regions on meshes with no underlying geometric signal (such as a bow tie on a camel or a hat on a pig)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 342, + 545, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 545, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 545, + 449 + ], + "type": "text", + "content": "Highlight specificity. In Fig. 3, we observe that semantic differences are reflected in the network-predicted highlight. 3D Highlighter is able to successfully localize different text-specified regions on the same mesh. Our framework demonstrates the nuanced understanding required to disambiguate different target regions, such as headphones and hat on the rabbit. Finally, the ability to identify many different regions on a single mesh allows users intuitive, comprehensive, and fine-grained control over part localization." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 453, + 545, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 453, + 545, + 513 + ], + "spans": [ + { + "bbox": [ + 304, + 453, + 545, + 513 + ], + "type": "text", + "content": "Quantitative evaluation. 3D Highlighter is the first system to select semantic regions on 3D shapes using text guidance, without any 3D datasets. Since there are no quantitative benchmarks to evaluate the quality of our highlights, we do so with a perceptual user study." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 514, + 545, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 514, + 545, + 537 + ], + "spans": [ + { + "bbox": [ + 305, + 514, + 545, + 537 + ], + "type": "text", + "content": "Moreover, since there are no existing approaches for text-based segmentation in 3D, we create two baselines by" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 307, + 556, + 365, + 609 + ], + "blocks": [ + { + "bbox": [ + 307, + 556, + 365, + 609 + ], + "lines": [ + { + "bbox": [ + 307, + 556, + 365, + 609 + ], + "spans": [ + { + "bbox": [ + 307, + 556, + 365, + 609 + ], + "type": "image", + "image_path": "bc15b6e2fd97a1bca99a84cbc1cc4d816c22f4721d8c569ef7f947c5c65b0fdf.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 609, + 343, + 629 + ], + "lines": [ + { + "bbox": [ + 318, + 609, + 343, + 629 + ], + "spans": [ + { + "bbox": [ + 318, + 609, + 343, + 629 + ], + "type": "text", + "content": "full 0.332" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 635, + 545, + 689 + ], + "lines": [ + { + "bbox": [ + 304, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 635, + 545, + 689 + ], + "type": "text", + "content": "Figure 7. Ablation experiments. We present ablation results for target text 'shoes' using our system (full), direct optimization (direct), without probability-weighted blending (no blend), and without 2D augmentations (no augs). Resulting CLIP scores shown below each image." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 368, + 556, + 423, + 609 + ], + "blocks": [ + { + "bbox": [ + 368, + 556, + 423, + 609 + ], + "lines": [ + { + "bbox": [ + 368, + 556, + 423, + 609 + ], + "spans": [ + { + "bbox": [ + 368, + 556, + 423, + 609 + ], + "type": "image", + "image_path": "52a2f7096b5ab00c340d7d071fed787c78435618dc985fa5fa6b478c27ed7455.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 377, + 609, + 402, + 629 + ], + "lines": [ + { + "bbox": [ + 377, + 609, + 402, + 629 + ], + "spans": [ + { + "bbox": [ + 377, + 609, + 402, + 629 + ], + "type": "text", + "content": "direct 0.319" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 426, + 556, + 483, + 609 + ], + "blocks": [ + { + "bbox": [ + 426, + 556, + 483, + 609 + ], + "lines": [ + { + "bbox": [ + 426, + 556, + 483, + 609 + ], + "spans": [ + { + "bbox": [ + 426, + 556, + 483, + 609 + ], + "type": "image", + "image_path": "6b07bb4a37b67c5f9e25a7fb0f25dd1d8c1db0da75ff7c2ab87ecddb8d4b90de.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 432, + 609, + 469, + 629 + ], + "lines": [ + { + "bbox": [ + 432, + 609, + 469, + 629 + ], + "spans": [ + { + "bbox": [ + 432, + 609, + 469, + 629 + ], + "type": "text", + "content": "no blend 0.297" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 486, + 556, + 542, + 609 + ], + "blocks": [ + { + "bbox": [ + 486, + 556, + 542, + 609 + ], + "lines": [ + { + "bbox": [ + 486, + 556, + 542, + 609 + ], + "spans": [ + { + "bbox": [ + 486, + 556, + 542, + 609 + ], + "type": "image", + "image_path": "8670507fec27605e15a778c258920e876d7976893e58d1abc1a1f06678d85e08.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 496, + 609, + 529, + 629 + ], + "lines": [ + { + "bbox": [ + 496, + 609, + 529, + 629 + ], + "spans": [ + { + "bbox": [ + 496, + 609, + 529, + 629 + ], + "type": "text", + "content": "no augs 0.287" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20934" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 55, + 70, + 281, + 106 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 281, + 106 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 281, + 106 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 281, + 106 + ], + "type": "table", + "html": "
MethodControlLSegText2LIVEOurs
Average Score ↑1.001.262.234.38
", + "image_path": "b3be0d0e666ca5f462af074cce672d8c2cae5aff8a5262c1ee93e5c9b349656c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 114, + 287, + 148 + ], + "lines": [ + { + "bbox": [ + 46, + 114, + 287, + 148 + ], + "spans": [ + { + "bbox": [ + 46, + 114, + 287, + 148 + ], + "type": "text", + "content": "Table 1. Perceptual study. We extend two image-based approaches LSeg [21] (segmentation) and Text2LIVE [2] (localized editing) to the highlighting task and report mean user rating." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 178, + 287, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 178, + 287, + 310 + ], + "spans": [ + { + "bbox": [ + 46, + 178, + 287, + 310 + ], + "type": "text", + "content": "extending two different 2D image-based approaches. The first baseline extends LSeg [21] which directly predicts a segmentation in 2D, while the second baseline extends Text2LIVE [2] which infers an edit mask for 2D image manipulation. To evaluate these baselines, we render a bare mesh from a view where the target localization region is clearly visible. We extract the 2D segmentation produced by the image baselines and use it to color the rendered image. Then we ask users to rate the highlight quality of both baselines and our 3D Highlighter result rendered from the same view in our perceptual study." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 312, + 287, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 287, + 407 + ], + "type": "text", + "content": "Our perceptual study reports quantitative results on the quality of highlights from both 3D Highlighter and baselines. Users were asked to rate each result from 1-5 on how effectively the highlight represents \"an [object] with a region corresponding to a [region] highlighted.\" Visual examples from our study are shown in the supplemental material (Fig. 21). In total, 33 users evaluated each method on 5 mesh and region combinations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 407, + 287, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 407, + 287, + 587 + ], + "spans": [ + { + "bbox": [ + 46, + 407, + 287, + 587 + ], + "type": "text", + "content": "Our 3D Highlighter achieved the highest ratings compared to the baselines (Tab. 1). LSeg is built for text-driven semantic segmentation and excels at segmenting entire objects within a scene. However, LSeg struggles to identify parts within a single object, leading to subpar performance on our highlighting task. Text2LIVE was not explicitly built for segmentation, however it does rely on inferring a continuously-valued edit mask (i.e. a soft-segmentation) when performing localized image editing. The edit mask is designed to produce high-quality image manipulations; however, it is not directly suitable for identifying the sharp segmentation boundaries required for our highlighting task. Qualitative comparisons and an additional quantitative comparison using a modified CLIP R-Precision metric are discussed in the supplemental material." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 595, + 208, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 595, + 208, + 608 + ], + "spans": [ + { + "bbox": [ + 47, + 595, + 208, + 608 + ], + "type": "text", + "content": "4.2. Robustness of 3D Highlighter" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 618, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 618, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 618, + 287, + 713 + ], + "type": "text", + "content": "Localization transfer. An important benefit of formulating 3D Highlighter as a neural field optimization is the ability to trivially transfer localization results between different meshings. This ability is useful for many tasks in geometry processing which require an object to be re-triangulated, simplified, subdivided, or otherwise remeshed. Localization transfer is possible since our neural highlighter is represented as a field over the shape and is independent of any" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 68, + 547, + 187 + ], + "blocks": [ + { + "bbox": [ + 307, + 68, + 547, + 187 + ], + "lines": [ + { + "bbox": [ + 307, + 68, + 547, + 187 + ], + "spans": [ + { + "bbox": [ + 307, + 68, + 547, + 187 + ], + "type": "image", + "image_path": "966054bc4a85eb1b71c9edcf7fe929a75c3553c5be242fe48ac20cedf257a5fb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 188, + 545, + 242 + ], + "lines": [ + { + "bbox": [ + 305, + 188, + 545, + 242 + ], + "spans": [ + { + "bbox": [ + 305, + 188, + 545, + 242 + ], + "type": "text", + "content": "Figure 8. Controlled stylization. Given three different stylizations of the same object, we use 3D Highlighter to select different regions and combine them together (Ours). Attempting to achieve this composition with a holistic approach leads to an undesirable result (Text2Mesh [25])." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 263, + 545, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 263, + 545, + 311 + ], + "spans": [ + { + "bbox": [ + 304, + 263, + 545, + 311 + ], + "type": "text", + "content": "specific meshing. Although the neural highlighter is trained on mesh vertices, the resulting network encodes a smooth field and produces meaningful outputs for any 3D point on (or near) the mesh surface." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 312, + 546, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 312, + 546, + 420 + ], + "spans": [ + { + "bbox": [ + 304, + 312, + 546, + 420 + ], + "type": "text", + "content": "In Fig. 9, we show an optimization of the 3D Highlighter on a single mesh triangulation (original) for the prompt 'shoes'. We then apply the already-optimized neural highlighter to remeshed (middle) and subdivided (right) versions of the original mesh, showing the transferability of the selected region to different triangulations. This result demonstrates how 3D Highlighter is independent of the input mesh and that, once we have a localization for one mesh, we can trivially transfer it to any other meshing of the same object." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 422, + 547, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 422, + 547, + 530 + ], + "spans": [ + { + "bbox": [ + 304, + 422, + 547, + 530 + ], + "type": "text", + "content": "Viewpoint robustness. Our method is robust to the primary view choice. This property is important for our localization task, as we may not know a priori which view is ideal. In Fig. 6, we perform our optimization using three different primary viewpoints: " + }, + { + "bbox": [ + 304, + 422, + 547, + 530 + ], + "type": "inline_equation", + "content": "0^{\\circ}" + }, + { + "bbox": [ + 304, + 422, + 547, + 530 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 422, + 547, + 530 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 304, + 422, + 547, + 530 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 422, + 547, + 530 + ], + "type": "inline_equation", + "content": "-90^{\\circ}" + }, + { + "bbox": [ + 304, + 422, + 547, + 530 + ], + "type": "text", + "content": " (viewpoints shown in blue). We then present predicted localizations, showing that for all three views, 3D Highlighter is able to accurately identify the target localization region, regardless of whether that region is visible from the primary view." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 530, + 545, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 530, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 304, + 530, + 545, + 566 + ], + "type": "text", + "content": "From the " + }, + { + "bbox": [ + 304, + 530, + 545, + 566 + ], + "type": "inline_equation", + "content": "-90^{\\circ}" + }, + { + "bbox": [ + 304, + 530, + 545, + 566 + ], + "type": "text", + "content": " primary view, the target region (the neck) is not visible. However, is still visible with a low probability for views sampled from the Gaussian distribution" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 307, + 583, + 544, + 666 + ], + "blocks": [ + { + "bbox": [ + 307, + 583, + 544, + 666 + ], + "lines": [ + { + "bbox": [ + 307, + 583, + 544, + 666 + ], + "spans": [ + { + "bbox": [ + 307, + 583, + 544, + 666 + ], + "type": "image", + "image_path": "f018918fbaf8736ed616c12a107abff622ccc97a3598f00583aa31289e4ad0d6.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 666, + 545, + 710 + ], + "lines": [ + { + "bbox": [ + 304, + 666, + 545, + 710 + ], + "spans": [ + { + "bbox": [ + 304, + 666, + 545, + 710 + ], + "type": "text", + "content": "Figure 9. Localization transfer. We optimize our neural highlighter on one mesh (original) for the prompt 'shoes'. Once optimized, the network weights transfer the localization to different meshings of the same object (remeshed and subdivided)." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20935" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "content": "around the primary view. This means that over the course of optimization, regions other than the neck are mostly seen while the target region is rarely visible. Nonetheless, our method manages to highlight the desired region, which implies its robustness to how frequently the target region for localization is seen. Furthermore, it shows that oversampling views where the target region is not visible does not negatively influence the optimization." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 175, + 214, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 175, + 214, + 188 + ], + "spans": [ + { + "bbox": [ + 47, + 175, + 214, + 188 + ], + "type": "text", + "content": "4.3. Applications of 3D Highlighter" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 193, + 287, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 193, + 287, + 335 + ], + "spans": [ + { + "bbox": [ + 46, + 193, + 287, + 335 + ], + "type": "text", + "content": "Selective editing. In Fig. 4, we show that it is possible to use 3D Highlighter to selectively edit a 3D object within a semantic region. This is applicable to techniques which incorporate global texture or material properties over the entire shape, such as in Text2Mesh [25] or MatCap [39]. Starting with different bare input meshes, we edit the entire shape using a global stylization technique [25]. Then, we use 3D Highlighter to select a text-specified region and incorporate the modifications only in the selected area. Thus 3D Highlighter provides direct control over where to stylize shapes, enabling users to obtain localized stylizations based on semantic cues." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 339, + 287, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 339, + 287, + 518 + ], + "spans": [ + { + "bbox": [ + 46, + 339, + 287, + 518 + ], + "type": "text", + "content": "Controlled stylization via composition. Achieving compositionality with language models is a challenging task [33]. For example, starting with a human mesh and using Text2Mesh [25] to stylize 'Iron Man with the head of Steve Jobs and Yeti legs', leads to muddled and undesirable results (Fig. 8, rightmost). Our method enables compositionality between different shape modifications by chaining simple concepts together (Fig. 8). Specifically, we decompose the desired modification into three separate attainable targets ('Iron Man', 'Steve Jobs', and 'Yeti'), which we stylize individually with Text2Mesh. We then utilize our 3D Highlighter to localize the text-specified regions. We achieve the desired composition by combining the highlighted regions together, obtaining clear boundaries between stylizations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 521, + 287, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 287, + 582 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 287, + 582 + ], + "type": "text", + "content": "Semantic segmentation. In Fig. 10, we show that our technique is not restricted to hallucinated highlighting and is capable of localizing semantically-specified geometric regions. These text-driven localizations identify unique geometric parts without utilizing any 3D datasets or part labels." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 587, + 214, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 587, + 214, + 601 + ], + "spans": [ + { + "bbox": [ + 47, + 587, + 214, + 601 + ], + "type": "text", + "content": "4.4. Components of 3D Highlighter" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": "Ablation study. Several components are key for facilitating 3D Highlighter. We provide ablation results in Fig. 7 to demonstrate the effect of our design choices. First, using a direct optimization of the vertex color (direct) instead of optimizing a neural field results in splotchy highlight artifacts. Since the neural field has a spectral bias towards smooth solutions [32], omitting it leads to an undesired noisy output. Second, removing the probability weighted blending (no blend) and instead coloring vertices using only" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 322, + 72, + 378, + 168 + ], + "blocks": [ + { + "bbox": [ + 322, + 72, + 378, + 168 + ], + "lines": [ + { + "bbox": [ + 322, + 72, + 378, + 168 + ], + "spans": [ + { + "bbox": [ + 322, + 72, + 378, + 168 + ], + "type": "image", + "image_path": "5f07d2dbe7886e50bd891038211e2c20cd2aa37502ddaa9905c6454b084a0a90.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 341, + 170, + 361, + 179 + ], + "lines": [ + { + "bbox": [ + 341, + 170, + 361, + 179 + ], + "spans": [ + { + "bbox": [ + 341, + 170, + 361, + 179 + ], + "type": "text", + "content": "Arm" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 387, + 72, + 429, + 168 + ], + "blocks": [ + { + "bbox": [ + 387, + 72, + 429, + 168 + ], + "lines": [ + { + "bbox": [ + 387, + 72, + 429, + 168 + ], + "spans": [ + { + "bbox": [ + 387, + 72, + 429, + 168 + ], + "type": "image", + "image_path": "6b22d823e0eed84c95e938a67070eeed0862e956e2f688fba10eb3562f8f2beb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 169, + 419, + 179 + ], + "lines": [ + { + "bbox": [ + 397, + 169, + 419, + 179 + ], + "spans": [ + { + "bbox": [ + 397, + 169, + 419, + 179 + ], + "type": "text", + "content": "Slide" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 433, + 121, + 531, + 168 + ], + "blocks": [ + { + "bbox": [ + 433, + 121, + 531, + 168 + ], + "lines": [ + { + "bbox": [ + 433, + 121, + 531, + 168 + ], + "spans": [ + { + "bbox": [ + 433, + 121, + 531, + 168 + ], + "type": "image", + "image_path": "68ef7f752136e4660dafb1582c8a971b0bd35ab5640a501d31930cc7a2fb506d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 468, + 169, + 506, + 180 + ], + "lines": [ + { + "bbox": [ + 468, + 169, + 506, + 180 + ], + "spans": [ + { + "bbox": [ + 468, + 169, + 506, + 180 + ], + "type": "text", + "content": "Propeller" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 181, + 545, + 213 + ], + "lines": [ + { + "bbox": [ + 305, + 181, + 545, + 213 + ], + "spans": [ + { + "bbox": [ + 305, + 181, + 545, + 213 + ], + "type": "text", + "content": "Figure 10. Semantic Segmentation. 3D Highlighter produces semantic segmentations for unique geometric parts without any 3D dataset or annotations." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 236, + 545, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 236, + 545, + 344 + ], + "spans": [ + { + "bbox": [ + 304, + 236, + 545, + 344 + ], + "type": "text", + "content": "two distinct values also produces a noisy highlight pattern. Without a continuous color blend, the gradients become ill-conditioned and unstable, leading to highlight artifacts and irregular localization boundaries. Lastly, similar to previous works [9, 25], we observe that without 2D perspective augmentations (no augs), 3D Highlighter outputs degenerate solutions. The ablation study emphasizes the importance of our key design choices in 3D Highlighter for its ability to highlight a coherent and localized region on the input shape." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 346, + 545, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 346, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 304, + 346, + 545, + 454 + ], + "type": "text", + "content": "Prompt formulation and CLIP understanding. Our prompt formulation combined with our coloring scheme results in the correct association between objects and their properties, a known challenge when using CLIP [33]. In Fig. 12, we analyze the CLIP score for two different prompts: 'gray chair with highlighted back' (left) and 'blue chair with red back' (right). For each prompt, we measure the CLIP similarity to renders of both the correct assignment and flipped assignment." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 455, + 545, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 455, + 545, + 551 + ], + "spans": [ + { + "bbox": [ + 304, + 455, + 545, + 551 + ], + "type": "text", + "content": "We observe that our prompt formulation ('gray chair with highlighted back') results in a higher average CLIP score for the correct assignment. In contrast, when specifying colors in the prompt ('blue chair with red back') and styling the mesh accordingly, we see higher CLIP scores for the flipped association. Using the same gray and yellow renders (left), we also compare to a prompt specifying colors ('gray chair with yellow back') and find that the higher" + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 331, + 567, + 521, + 654 + ], + "blocks": [ + { + "bbox": [ + 331, + 567, + 521, + 654 + ], + "lines": [ + { + "bbox": [ + 331, + 567, + 521, + 654 + ], + "spans": [ + { + "bbox": [ + 331, + 567, + 521, + 654 + ], + "type": "image", + "image_path": "97dddc40b0091821cfab65ba2c7e2cfb43f71a6956850708ff94d0d1f24f3e7e.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 655, + 545, + 710 + ], + "lines": [ + { + "bbox": [ + 304, + 655, + 545, + 710 + ], + "spans": [ + { + "bbox": [ + 304, + 655, + 545, + 710 + ], + "type": "text", + "content": "Figure 11. Network initialization. We optimize 3D Highlighter for the text prompt 'belt' using different initialization methods: using a default initialization where all output probabilities are near 0.5 (middle) or altering the final layer so that all outputs are 0 (left) or 1 (right). Initializing with 0 or 1 leads to an undesirable result." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "20936" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 56, + 263, + 166 + ], + "blocks": [ + { + "bbox": [ + 70, + 56, + 263, + 166 + ], + "lines": [ + { + "bbox": [ + 70, + 56, + 263, + 166 + ], + "spans": [ + { + "bbox": [ + 70, + 56, + 263, + 166 + ], + "type": "image", + "image_path": "46a442d6d6c2c3000f719cefdc0f5284a2910bc7d071a06bbdd8e125c11897ff.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 168, + 289, + 247 + ], + "lines": [ + { + "bbox": [ + 46, + 168, + 289, + 247 + ], + "spans": [ + { + "bbox": [ + 46, + 168, + 289, + 247 + ], + "type": "text", + "content": "Figure 12. CLIP understanding. We examine CLIP similarity scores for several prompt formulations targeting the 'back' of the chair while using the correct color assignment and where the coloring is flipped. For the prompt 'gray chair with highlighted back' (left) we observe that the CLIP score is higher for the correct assignment. For the prompt 'blue chair with red back' (right) the CLIP score is higher for the flipped (incorrect) assignment." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 266, + 287, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 287, + 289 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 287, + 289 + ], + "type": "text", + "content": "CLIP score corresponds to the flipped selection (data not shown)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 290, + 288, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 290, + 288, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 290, + 288, + 386 + ], + "type": "text", + "content": "We also measure the CLIP scores for our standard prompt formulation: 'gray chair with highlighted back', replacing the yellow color in the rendering with other colors, such as red and blue, and find that the correct selection has a higher CLIP score (data not shown). To conclude, our prompt formulation (i.e., the use of the term 'highlighted') coincides with CLIP's understanding and 3D Highlighter is robust to the highlight color." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 388, + 287, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 388, + 287, + 508 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 287, + 508 + ], + "type": "text", + "content": "Network initialization. Initializing the network such that the object is partially highlighted (i.e., with highlight probability equal to 0.5) is important for obtaining desirable results. In Fig. 11, we show the optimization of our method for the target text prompt 'belt' using three different initializations. Our method (middle) initializes all output probabilities near 0.5 by random weight initialization of the network. We compare to initializing the output probabilities to 0 (left) or 1 (right), in which we set the weights of the last layer to 0, and the bias to 0 or 1, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 509, + 288, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 509, + 288, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 509, + 288, + 568 + ], + "type": "text", + "content": "For the initialization to both 0.5 and 1, a highlight color is uniformly present on the styled mesh, whereas with 0, the mesh is gray with no highlight. Consequently, we hypothesize that the presence of highlight color at initialization is important for CLIP's supervision." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 575, + 125, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 575, + 125, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 575, + 125, + 588 + ], + "type": "text", + "content": "4.5. Limitations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 594, + 287, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 701 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 701 + ], + "type": "text", + "content": "3D Highlighter is robust to variations of the object specification in the target prompt. However, there should still be a logical connection between the 3D shape and its description. Fig. 13 shows results for a camel mesh and the target highlight 'shinguards'. For each optimization, we use a slightly different target prompt by varying the object specification. The prompts are of the form \"[object] with highlighted shinguards\", where [object] is replaced with camel, pig, animal, or chair." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "type": "text", + "content": "In Fig. 13, we observe that with object specifications" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 547, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 193 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 193 + ], + "type": "text", + "content": "that resemble the geometry of camel, such as pig and animal, 3D Highlighter accurately localizes the desired region. However, for a description that is incompatible with the object's geometry (i.e., referring to a camel as a chair), our method does not produce meaningful results. This result sheds light on 3D Highlighter's robustness to text descriptions: 3D Highlighter is able to reason about a mesh even when its description is not perfectly accurate, provided that it is sufficiently similar to the true description (i.e., referring to a camel mesh as a pig)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 214, + 384, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 214, + 384, + 227 + ], + "spans": [ + { + "bbox": [ + 306, + 214, + 384, + 227 + ], + "type": "text", + "content": "5. Conclusions" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 235, + 545, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 235, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 304, + 235, + 545, + 342 + ], + "type": "text", + "content": "We present a technique for highlighting semantic regions on meshes using text as input, without any 3D datasets or 3D pre-training. 3D Highlighter can reason about where to place a non-obviously related part on a 3D object (i.e. a hat on a candle). The ability to combine unconnected parts and objects together is reminiscent of ideas from image analogies [12, 22]. In this work, we show that we can identify part-concepts that are geometrically absent from a shape, giving rise to our hallucinated highlighting capability." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 342, + 546, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 546, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 546, + 437 + ], + "type": "text", + "content": "During neural optimization, our neural network infers a probability which we use to blend the highlight color onto the mesh. The network-predicted probabilities are general, and provide a soft-segmentation which we show can be used for a variety of different applications (Figs. 4 and 8). In the future, we are interested in extending our framework to obtain part correspondence between shapes that differ topologically but are semantically related." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 448, + 416, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 448, + 416, + 462 + ], + "spans": [ + { + "bbox": [ + 306, + 448, + 416, + 462 + ], + "type": "text", + "content": "6. Acknowledgments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 468, + 547, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 468, + 547, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 468, + 547, + 540 + ], + "type": "text", + "content": "We thank the University of Chicago for providing the AI cluster resources, services, and the professional support of the technical staff. This work was also supported in part by gifts from Adobe Research. Finally, we would like to thank Richard Liu, Avery Zhou, and the members of 3DL for their thorough and insightful feedback on our work." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 307, + 555, + 545, + 642 + ], + "blocks": [ + { + "bbox": [ + 307, + 555, + 545, + 642 + ], + "lines": [ + { + "bbox": [ + 307, + 555, + 545, + 642 + ], + "spans": [ + { + "bbox": [ + 307, + 555, + 545, + 642 + ], + "type": "image", + "image_path": "d466cfa6af2a35e8ea84c273f2ed61469a2e59581258127dfb4738c887104207.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 644, + 547, + 711 + ], + "lines": [ + { + "bbox": [ + 304, + 644, + 547, + 711 + ], + "spans": [ + { + "bbox": [ + 304, + 644, + 547, + 711 + ], + "type": "text", + "content": "Figure 13. Prompt generality. Our system is robust to certain variations in object specifications. We achieve desirable results for the text input 'camel with highlighted shinguards' (left), as well as for other variations ('pig' and 'animal'). If the object specification, such as 'chair', is incompatible with the input geometry, 3D Highlighter no longer produces meaningful results." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20937" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] Shmuel Asafi, Avi Goren, and Daniel Cohen-Or. Weak convex decomposition by lines-of-sight. Computer graphics forum, 32(5):23-31, 2013. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 288, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 288, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 288, + 168 + ], + "type": "text", + "content": "[2] Omer Bar-Tal, Dolev Ofri-Amar, Rafail Fridman, Yoni Kasten, and Tali Dekel. Text2live: Text-driven layered image and video editing. arXiv preprint arXiv:2204.02491, 2022. 3, 6, 12, 13" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 287, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 287, + 225 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 287, + 225 + ], + "type": "text", + "content": "[3] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 227, + 287, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 227, + 287, + 281 + ], + "spans": [ + { + "bbox": [ + 53, + 227, + 287, + 281 + ], + "type": "text", + "content": "[4] Wenzheng Chen, Huan Ling, Jun Gao, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Advances in Neural Information Processing Systems, 32, 2019. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 284, + 287, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 284, + 287, + 337 + ], + "spans": [ + { + "bbox": [ + 53, + 284, + 287, + 337 + ], + "type": "text", + "content": "[5] Zhiqin Chen, Kangxue Yin, Matthew Fisher, Siddhartha Chaudhuri, and Hao Zhang. Bae-net: Branched autoencoder for shape co-segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8490-8499, 2019. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 339, + 287, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 339, + 287, + 383 + ], + "spans": [ + { + "bbox": [ + 53, + 339, + 287, + 383 + ], + "type": "text", + "content": "[6] Nicu D Cornea, Deborah Silver, and Patrick Min. **Curve-skeleton properties, applications, and algorithms. IEEE Transactions on visualization and computer graphics*, 13(3):530, 2007. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 385, + 287, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 385, + 287, + 440 + ], + "spans": [ + { + "bbox": [ + 53, + 385, + 287, + 440 + ], + "type": "text", + "content": "[7] Boyang Deng, Kyle Genova, Soroosh Yazdani, Sofien Bouaziz, Geoffrey Hinton, and Andrea Tagliasacchi. Cvxnet: Learnable convex decomposition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 31-44, 2020. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 441, + 287, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 441, + 287, + 474 + ], + "spans": [ + { + "bbox": [ + 53, + 441, + 287, + 474 + ], + "type": "text", + "content": "[8] Tamal K Dey and Wulue Zhao. Approximating the medial axis from the voronoi diagram with a convergence guarantee. Algorithmica, 38(1):179-200, 2004. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 475, + 287, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 475, + 287, + 508 + ], + "spans": [ + { + "bbox": [ + 53, + 475, + 287, + 508 + ], + "type": "text", + "content": "[9] Kevin Frans, Lisa B. Soros, and Olaf Witkowski. Clipdraw: Exploring text-to-drawing synthesis through language-image encoders. ArXiv, abs/2106.14843, 2021. 5, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 510, + 287, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 510, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 48, + 510, + 287, + 552 + ], + "type": "text", + "content": "[10] Rao Fu, Xiao Zhan, Yiwen Chen, Daniel Ritchie, and Srinath Sridhar. Shapecrafter: A recursive text-conditioned 3d shape generation model. arXiv preprint arXiv:2207.09446, 2022. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 555, + 287, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 598 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 598 + ], + "type": "text", + "content": "[11] Rana Hanocka, Amir Hertz, Noa Fish, Raja Giryes, Shachar Fleishman, and Daniel Cohen-Or. MeshCNN: A network with an edge. ACM Transactions on Graphics (TOG), 38(4):90:1–90:12, 2019. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 601, + 287, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 644 + ], + "type": "text", + "content": "[12] Aaron Hertzmann, Charles E Jacobs, Nuria Oliver, Brian Curless, and David H Salesin. Image analogies. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 327-340, 2001. 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 646, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 667 + ], + "type": "text", + "content": "[13] Donald D Hoffman and Whitman A Richards. Parts of recognition. Cognition, 18(1-3):65-96, 1984. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[14] Yining Hong, Yilun Du, Chunru Lin, Josh Tenenbaum, and Chuang Gan. 3d concept grounding on neural fields. In Annual Conference on Neural Information Processing Systems, 2022. 3" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[15] Shi-Min Hu, Zheng-Ning Liu, Meng-Hao Guo, Junxiong Cai, Jiahui Huang, Tai-Jiang Mu, and Ralph R. Martin. Subdivision-based mesh convolution networks. ACM Trans. Graph., 41(3):25:1-25:16, 2022. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 119, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 545, + 173 + ], + "type": "text", + "content": "[16] Ajay Jain, Ben Mildenhall, Jonathan T Barron, Pieter Abbeel, and Ben Poole. Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 867-876, 2022. 3, 12" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 175, + 545, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 219 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 219 + ], + "type": "text", + "content": "[17] Oliver Van Kaick, Noa Fish, Yanir Kleiman, Shmuel Asafi, and Daniel Cohen-Or. Shape segmentation by approximate convexity analysis. ACM Transactions on Graphics (TOG), 34(1):1–11, 2014. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 221, + 545, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 221, + 545, + 265 + ], + "spans": [ + { + "bbox": [ + 307, + 221, + 545, + 265 + ], + "type": "text", + "content": "[18] Nasir Mohammad Khalid, Tianhao Xie, Eugene Belilovsky, and Popa Tiberiu. Clip-mesh: Generating textured meshes from text using pretrained image-text models. SIGGRAPH Asia 2022 Conference Papers, December 2022. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 267, + 545, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 267, + 545, + 300 + ], + "spans": [ + { + "bbox": [ + 307, + 267, + 545, + 300 + ], + "type": "text", + "content": "[19] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nef for editing via feature field distillation. arXiv, 2022. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 302, + 545, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 302, + 545, + 334 + ], + "spans": [ + { + "bbox": [ + 307, + 302, + 545, + 334 + ], + "type": "text", + "content": "[20] Alon Lahav and Ayellet Tal. Meshwalker: Deep mesh understanding by random walks. ACM Transactions on Graphics (TOG), 39(6):1-13, 2020. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 337, + 545, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 337, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 307, + 337, + 545, + 380 + ], + "type": "text", + "content": "[21] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and Rene Ranftl. Language-driven semantic segmentation. In International Conference on Learning Representations, 2022. 3, 5, 6, 12, 13" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 383, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 545, + 415 + ], + "type": "text", + "content": "[22] Jing Liao, Yuan Yao, Lu Yuan, Gang Hua, and Sing Bing Kang. Visual attribute transfer through deep image analogy. arXiv preprint arXiv:1705.01088, 2017. 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 418, + 545, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 418, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 307, + 418, + 545, + 460 + ], + "type": "text", + "content": "[23] Jyh-Ming Lien and Nancy M Amato. Approximate convex decomposition of polyhedra. In Proceedings of the 2007 ACM symposium on Solid and physical modeling, pages 121-131, 2007. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 464, + 545, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 464, + 545, + 507 + ], + "spans": [ + { + "bbox": [ + 307, + 464, + 545, + 507 + ], + "type": "text", + "content": "[24] Haggai Maron, Meirav Galun, Noam Aigerman, Miri Trope, Nadav Dym, Ersin Yumer, Vladimir G Kim, and Yaron Lipman. Convolutional neural networks on surfaces via seamless toric covers. ACM Trans. Graph., 36(4):71-1, 2017. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 510, + 545, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 510, + 545, + 563 + ], + "spans": [ + { + "bbox": [ + 307, + 510, + 545, + 563 + ], + "type": "text", + "content": "[25] Oscar Michel, Roi Bar-On, Richard Liu, Sagie Benaim, and Rana Hanocka. Text2mesh: Text-driven neural stylization for meshes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13492-13502, 2022. 3, 5, 6, 7, 11" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 567, + 545, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 567, + 545, + 609 + ], + "spans": [ + { + "bbox": [ + 307, + 567, + 545, + 609 + ], + "type": "text", + "content": "[26] Francesco Milano, Antonio Loquercio, Antoni Rosinol, Davide Scaramuzzi, and Luca Carlone. Primal-dual mesh convolutional neural networks. Advances in Neural Information Processing Systems, 33:952-963, 2020. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 612, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 612, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 307, + 612, + 545, + 656 + ], + "type": "text", + "content": "[27] Kaichun Mo, Shilin Zhu, Angel X. Chang, Li Yi, Subarna Tripathi, Leonidas J. Guibas, and Hao Su. Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding, 2018. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "text", + "content": "[28] Dong Huk Park, Samaneh Azadi, Xihui Liu, Trevor Darrell, and Anna Rohrbach. Benchmark for compositional text-to-image synthesis. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 12" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20938" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[29] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 286, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 286, + 148 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 286, + 148 + ], + "type": "text", + "content": "[30] Ben Poole, Ajay Jain, Jonathan T. Barron, and Ben Mildenhall. Dreamfusion: Text-to-3d using 2d diffusion. arXiv, 2022. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 150, + 286, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 150, + 286, + 205 + ], + "spans": [ + { + "bbox": [ + 49, + 150, + 286, + 205 + ], + "type": "text", + "content": "[31] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. arXiv preprint arXiv:2103.00020, 2021. 1, 2, 3, 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 206, + 286, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 206, + 286, + 292 + ], + "spans": [ + { + "bbox": [ + 49, + 206, + 286, + 292 + ], + "type": "text", + "content": "[32] Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the spectral bias of neural networks. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 5301-5310. PMLR, 09-15 Jun 2019. 4, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 293, + 286, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 293, + 286, + 335 + ], + "spans": [ + { + "bbox": [ + 49, + 293, + 286, + 335 + ], + "type": "text", + "content": "[33] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022.7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 338, + 286, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 338, + 286, + 370 + ], + "spans": [ + { + "bbox": [ + 49, + 338, + 286, + 370 + ], + "type": "text", + "content": "[34] James Matthew Rehg. Toys4k 3d object dataset, 2022. https://github.com/rehg-lab/lowshot-shapebias/tree/main/toys4k.5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 371, + 286, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 371, + 286, + 392 + ], + "spans": [ + { + "bbox": [ + 49, + 371, + 286, + 392 + ], + "type": "text", + "content": "[35] Ariel Shamir. A survey on mesh segmentation techniques. Computer graphics forum, 27(6):1539-1556, 2008. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 394, + 286, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 394, + 286, + 436 + ], + "spans": [ + { + "bbox": [ + 49, + 394, + 286, + 436 + ], + "type": "text", + "content": "[36] Nicholas Sharp, Souhaib Attaiki, Keenan Crane, and Maks Ovsjanikov. Diffusionnet: Discretization agnostic learning on surfaces. ACM Transactions on Graphics (TOG), 41(3):1-16, 2022. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 437, + 286, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 437, + 286, + 524 + ], + "spans": [ + { + "bbox": [ + 49, + 437, + 286, + 524 + ], + "type": "text", + "content": "[37] Weiwei Sun, Andrea Tagliasacchi, Boyang Deng, Sara Sabour, Soroosh Yazdani, Geoffrey E Hinton, and Kwang Moo Yi. Canonical capsules: Self-supervised capsules in canonical pose. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 24993-25005. Curran Associates, Inc., 2021. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 525, + 286, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 525, + 286, + 579 + ], + "spans": [ + { + "bbox": [ + 49, + 525, + 286, + 579 + ], + "type": "text", + "content": "[38] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. NeurIPS, 2020. 4, 12" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 581, + 286, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 581, + 286, + 613 + ], + "spans": [ + { + "bbox": [ + 49, + 581, + 286, + 613 + ], + "type": "text", + "content": "[39] Hideki Todo, Ken Anjyo, and Shun'Ichi Yokoyama. Litsphere extension for artistic rendering. Vis. Comput., 29(6-8):473-480, jun 2013. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 614, + 286, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 614, + 286, + 635 + ], + "spans": [ + { + "bbox": [ + 49, + 614, + 286, + 635 + ], + "type": "text", + "content": "[40] TurboSquid. Turbosquid 3d model repository, 2021. https://www.turbosquid.com/.5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 637, + 286, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 637, + 286, + 679 + ], + "spans": [ + { + "bbox": [ + 49, + 637, + 286, + 679 + ], + "type": "text", + "content": "[41] Oliver van Kaick, Andrea Tagliasacchi, Oana Sidi, Hao Zhang, Daniel Cohen-Or, Lior Wolf, and Ghassan Hamarneh. Prior knowledge for part correspondence. Computer Graphics Forum, 30(2):553–562, 2011. 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 681, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 681, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 681, + 286, + 712 + ], + "type": "text", + "content": "[42] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In Proceedings of the" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 441 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3835-3844, 2022. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 96, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 150 + ], + "type": "text", + "content": "[43] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 152, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 545, + 205 + ], + "type": "text", + "content": "[44] Yiheng Xie, Towaki Takikawa, Shunsuke Saito, Or Litany, Shiqin Yan, Numair Khan, Federico Tombari, James Tompkin, Vincent Sitzmann, and Srinath Sridhar. Neural fields in visual computing and beyond. Computer Graphics Forum, 2022. 1, 4, 12" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 208, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 208, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 308, + 208, + 545, + 251 + ], + "type": "text", + "content": "[45] Li Yi, Hao Su, Xingwen Guo, and Leonidas J Guibas. Syncspeccnn: Synchronized spectral cnn for 3d shape segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2282-2290, 2017. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 253, + 545, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 253, + 545, + 306 + ], + "spans": [ + { + "bbox": [ + 308, + 253, + 545, + 306 + ], + "type": "text", + "content": "[46] Fenggen Yu, Kun Liu, Yan Zhang, Chenyang Zhu, and Kai Xu. Partnet: A recursive part decomposition network for fine-grained and hierarchical shape segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9491-9500, 2019. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 308, + 545, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 308, + 545, + 351 + ], + "spans": [ + { + "bbox": [ + 308, + 308, + 545, + 351 + ], + "type": "text", + "content": "[47] Qian Zheng, Zhuming Hao, Hui Huang, Kai Xu, Hao Zhang, Daniel Cohen-Or, and Baoquan Chen. Skeleton-intrinsic symmetrization of shapes. Computer Graphics Forum, 34(2):275-286, 2015. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 353, + 545, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 353, + 545, + 385 + ], + "spans": [ + { + "bbox": [ + 308, + 353, + 545, + 385 + ], + "type": "text", + "content": "[48] Qingnan Zhou and Alec Jacobson. Thingi10k: A dataset of 10,000 3d-printing models. arXiv preprint arXiv:1605.04797, 2016. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 387, + 545, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 387, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 308, + 387, + 545, + 441 + ], + "type": "text", + "content": "[49] Chenyang Zhu, Kai Xu, Siddhartha Chaudhuri, Li Yi, Leonidas J Guibas, and Hao Zhang. Adacoseg: Adaptive shape co-segmentation with group consistency loss. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8543-8552, 2020. 3" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "20939" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/833a9b3e-a176-4092-b5fd-3122723612f3_content_list.json b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/833a9b3e-a176-4092-b5fd-3122723612f3_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..2eee391e3d1fa56467659d01df95a3b9026243cd --- /dev/null +++ b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/833a9b3e-a176-4092-b5fd-3122723612f3_content_list.json @@ -0,0 +1,1737 @@ +[ + { + "type": "text", + "text": "3D Human Keypoints Estimation from Point Clouds in the Wild without Human Labels", + "text_level": 1, + "bbox": [ + 163, + 128, + 807, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhenzhen Weng $^{1*}$ Alexander S. Gorban $^{2}$ Jingwei Ji $^{2}$ Mahyar Najibi $^{2}$ Yin Zhou $^{2}$ Dragomir Anguelov $^{2}$", + "bbox": [ + 189, + 202, + 776, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Stanford University $^{2}$ Waymo", + "bbox": [ + 351, + 255, + 611, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 308, + 313, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Training a 3D human keypoint detector from point clouds in a supervised manner requires large volumes of high quality labels. While it is relatively easy to capture large amounts of human point clouds, annotating 3D keypoints is expensive, subjective, error prone and especially difficult for long-tail cases (pedestrians with rare poses, scooterists, etc.). In this work, we propose GC-KPL - Geometry Consistency inspired Key Point Leaning, an approach for learning 3D human joint locations from point clouds without human labels. We achieve this by our novel unsupervised loss formulations that account for the structure and movement of the human body. We show that by training on a large training set from Waymo Open Dataset [21] without any human annotated keypoints, we are able to achieve reasonable performance as compared to the fully supervised approach. Further, the backbone benefits from the unsupervised training and is useful in downstream few-shot learning of keypoints, where fine-tuning on only 10 percent of the labeled training data gives comparable performance to fine-tuning on the entire set. We demonstrated that GC-KPL outperforms by a large margin over SoTA when trained on entire dataset and efficiently leverages large volumes of unlabeled data.", + "bbox": [ + 75, + 339, + 473, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 700, + 209, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Estimation of human pose in 3D is an important problem in computer vision and it has a wide range of applications including AR/VR, AI-assisted healthcare, and autonomous driving [4,29,32]. For autonomous systems, being able to perceive human poses from sensor data (e.g. Li-DAR point clouds) is particularly essential to reason about the surrounding environment and make safe maneuvers.", + "bbox": [ + 75, + 724, + 468, + 830 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite the high level of interest in human pose estimation in the wild, only few papers approached outdoor 3D keypoint detection using point cloud. A main reason is that", + "bbox": [ + 75, + 830, + 468, + 875 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dc011686fedb76da0d66d44f198e418de7265f187c27f44ef07788a14963edf1.jpg", + "image_caption": [ + "Figure 1. We present GC-KPL, a novel method for learning 3D human keypoints from in-the-wild point clouds without any human labels. We propose to learn keypoint locations using unsupervised losses that account for the structure and movement of the human body. The backbone learns useful semantics from unsupervised learning and can be used in downstream fine-tuning tasks to boost the performance of 3D keypoint estimation." + ], + "image_footnote": [], + "bbox": [ + 506, + 311, + 883, + 545 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "training a pedestrian pose estimation model requires large amount of high quality in-the-wild data with ground truth labels. Annotating 3D human keypoints on point cloud data is expensive, time consuming and error prone. Although there are a few existing point cloud datasets with ground truth human poses [11, 13, 21], they are limited in terms of the quantity of the 3D annotations and diversity of the data. Therefore, fully-supervised human keypoint detectors trained on such datasets do not generalize well for long tail cases. For this reason, previous approaches on pedestrian 3D keypoint estimation have mainly focused on utilizing 2D weak supervision [4, 32] which is easier to obtain, or leveraging signals from others modalities (e.g. RGB, depth) [29]. Nonetheless, there is a lot of useful information in the large amount of unlabeled LiDAR data that previous works on human pose estimation have not made an effort to utilize.", + "bbox": [ + 496, + 659, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Work done as an intern at Waymo.", + "bbox": [ + 94, + 886, + 285, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1158", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we propose a novel and effective method for learning 3D human keypoints from in-the-wild point clouds without using any manual labeled 3D keypoints. Our approach is built on top of the key observation that human skeletons are roughly centered within approximately rigid body parts and that the location and movement of the surface points should explain the movement of the skeleton and vice versa. To that end, we design novel unsupervised loss terms for learning locations of the 3D keypoints/skeleton within human point clouds which correspond to 3D locations of major joints of human body.", + "bbox": [ + 75, + 90, + 472, + 256 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In the proposed method, we first train a transformer-based regression model for predicting keypoints and a semantic segmentation model for localizing body parts on a synthetic data constructed from randomly posed SMPL human body model [15]. Then, we train on the entire Waymo Open Dataset [21] without using any 3D ground-truth annotation of human keypoints. Through unsupervised training, keypoint predictions are refined and the backbone learns useful information from large amount of unannotated data.", + "bbox": [ + 75, + 256, + 472, + 390 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, we make the following contributions:", + "bbox": [ + 96, + 390, + 429, + 405 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We present GC-KPL, a method for learning human 3D keypoints for in-the-wild point clouds without any manual keypoint annotations.", + "- Drawing insight from the structure and movement of the human body, we propose three effective and novel unsupervised losses for refining keypoints. We show that the proposed losses are effective for unsupervised keypoint learning on Waymo Open Dataset.", + "- Through downstream fine-tuning/few-shot experiments, we demonstrate that GC-KPL can be used as unsupervised representation learning for human point clouds, which opens up the possibility to utilize a practically infinite amounts of sensor data to improve human pose understanding in autonomous driving." + ], + "bbox": [ + 96, + 414, + 468, + 647 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 657, + 218, + 674 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. 3D Human Keypoint Estimation from Points Clouds", + "text_level": 1, + "bbox": [ + 76, + 681, + 468, + 712 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "There have been a few works [19, 31, 34] about estimating 3D keypoints from clean and carefully-curated point clouds [6], but 3D keypoint estimation from in-the-wild point clouds is a much less studied problem. Due to the lack of ground-truth 3D human pose annotations paired with Li-DAR data, there has not been a lot of works on 3d human keypoint estimation from LiDAR information. Among the few point cloud datasets with 3D keypoint annotations, Li-DARHuman26M [13] captures long-range human motions with ground truth motion acquired by the IMU system and pose information derived from SMPL models fitted into point clouds. It is among the first few datasets which have", + "bbox": [ + 75, + 719, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "LiDAR point clouds synchronized with RGB images, but SMPL shape parameters are same for all 13 subjects and it does not feature in-the-wild pedestrians where there could be much more background noise and occlusion. PedX [11] offers 3D automatic pedestrian annotations obtained using model fitting on different modalities, gathered effectively from a single intersection with only 75 pedestrians (the second intersection has only 218 frames, labels for the third scene were not released). Waymo Open Dataset [21] has more than 3,500 subjects from over 1,000 different in-the-wild scenes with high-quality 2D and 3D manual annotations. Despite the existence of these datasets, the few works on 3D pose estimation from point clouds mostly rely on weak supervision. HPERL model [4] trains on 2D ground-truth pose annotations and uses a reprojection loss for the 3D pose regression task. Multi-modal model in [32] uses 2D labels on RGB images as weak supervision, and creates pseudo ground-truth 3D joint positions from the projection of annotated 2D joints. HUM3DIL [29] leverages RGB information with LiDAR points, by computing pixel-aligned multi-modal features with the 3D positions of the LiDAR signal. In contrast, our method does not use any RGB information or weak supervision.", + "bbox": [ + 496, + 90, + 893, + 439 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Unsupervised Keypoint Localization", + "text_level": 1, + "bbox": [ + 498, + 465, + 816, + 483 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "There are a number of works that aim to recover 3D keypoints using self-supervised geometric reasoning [12, 22], but they are limited to rigid objects. More recent unsupervised methods work for articulated objects from monocular RGB data [9, 10, 10, 18, 20, 24], multi-view data [16], or point clouds [27], where authors suggest to condition on the predicted keypoints and train a conditional generative model to supervise the keypoints through reconstruction losses. We propose a simpler pipeline where we apply our novel unsupervised losses to the predicted keypoints directly and do not require additional models besides the keypoint predictor itself.", + "bbox": [ + 496, + 488, + 890, + 670 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.3. Self-supervised Learning for Point Clouds", + "text_level": 1, + "bbox": [ + 498, + 696, + 857, + 713 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Self-supervised representation learning has proven to be remarkably useful in language [3, 17] and 2D vision tasks [2, 7]. As LiDAR sensors become more affordable and common, there has been an increasing amount of research interest in self-supervised learning on 3D point clouds. Previous works proposed to learn representations of object or scene level point clouds through contrastive learning [8, 25, 30] or reconstruction [23, 26, 28, 33], which is useful in downstream classification or segmentation tasks. In contrast, our supervision signals come from the unique structure of the human body and our learned backbone is particularly useful in downstream human keypoint estimation tasks.", + "bbox": [ + 496, + 719, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "1159", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 89, + 169, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we describe our complete training pipeline which contains two stages. In the first stage, we initialize the model parameters on a synthetic dataset (Sec. 3.1). The purpose of Stage I is to warm-up the model with reasonable semantics. The second stage generalizes the model to the real-world data. In this stage, we use our unsupervised losses to refine the keypoint predictions on in-the-wild point clouds (Sec. 3.2). An overview of our pipeline is in Fig. 2.", + "bbox": [ + 75, + 113, + 468, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Stage I: Initialization on Synthetic Data", + "text_level": 1, + "bbox": [ + 76, + 257, + 416, + 273 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this stage, we initialize the model on a synthetic dataset that is constructed by ray casting onto randomly posed human mesh models (SMPL [15]). We describe details of synthetic data generation in Supplementary.", + "bbox": [ + 75, + 279, + 468, + 339 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The goal of this stage is to train a model $f$ that takes a point cloud of a human $\\mathbf{P} \\in \\mathbb{R}^{N \\times 3}$ and outputs 3D locations of keypoints $\\hat{\\mathbf{Y}} \\in \\mathbb{R}^{(J + 1) \\times 3}$ , as well as soft body part assignments (or part segmentation) $\\hat{\\mathbf{W}} \\in \\mathbb{R}^{N \\times (J + 1)}$ that contains the probability of each point $i$ belonging to body part $j \\in [J]$ or the background.", + "bbox": [ + 75, + 339, + 468, + 429 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\{\\hat {\\mathbf {Y}}, \\hat {\\mathbf {W}} \\} = f (\\mathbf {P}) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 438, + 468, + 455 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\forall i \\in [ N ], \\sum_ {j = 1} ^ {J + 1} \\hat {\\mathbf {W}} _ {i, j} = 1 \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 460, + 468, + 498 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Ground truth information about part segmentation $\\mathbf{W}$ and keypoint locations $\\mathbf{Y}$ are readily available for synthetic data. Hence, we can train the model by directly supervising the predicted keypoint through L2 loss,", + "bbox": [ + 75, + 507, + 468, + 569 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {k p} = \\left\\| \\hat {\\mathbf {Y}} - \\mathbf {Y} \\right\\| _ {2} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 578, + 468, + 595 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and predicted segmentation through cross entropy loss,", + "bbox": [ + 76, + 607, + 437, + 623 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {s e g}} = - \\sum_ {i = 1} ^ {N} \\sum_ {j = 1} ^ {J + 1} \\mathbf {W} _ {i, j} \\log \\left(\\hat {\\mathbf {W}} _ {i, j}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 633, + 468, + 669 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Overall, we minimize", + "bbox": [ + 76, + 681, + 223, + 694 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {s y n}} = \\lambda_ {k p} \\mathcal {L} _ {\\mathrm {k p}} + \\lambda_ {\\text {s e g}} \\mathcal {L} _ {\\text {s e g}} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 707, + 468, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Notably, in Sec. 4.6 we show that supervision in this stage is not required - ground truth $\\mathbf{W}$ and $\\mathbf{Y}$ can be replaced by surrogate ground truths to achieve comparable results.", + "bbox": [ + 75, + 734, + 468, + 780 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Stage II: Self-Supervised Learning on In-the-Wild Data", + "text_level": 1, + "bbox": [ + 76, + 787, + 468, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this stage, we further refine the network using unsupervised losses. The key insight behind the design of the losses is that the human body is composed of limbs, each of which is a rigid part. Therefore, points on a limb move with the limb and should stay roughly at the same location", + "bbox": [ + 75, + 825, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "in each limb's local coordinate system. To account for this, we propose flow loss that encourages the points to stay in the same location (despite rotation around the limb) within each limb's local cylindrical coordinate.", + "bbox": [ + 496, + 90, + 890, + 150 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We start by formally defining the key ingredients in the following formulations. In our setup, a human skeleton $L$ is composed of limbs, each of which is connecting two keypoints. A limb $l = (y_{a}, y_{b}) \\in L$ is a line segment connecting the parent $y_{a}$ and child keypoints $y_{b}$ on this limb, and all surface points on this limb have segmentation label $a$ .", + "bbox": [ + 496, + 150, + 890, + 239 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "All three proposed losses are in terms of surface points in each predicted limb's local coordinate system. Therefore, we first convert all input points to each limbs' local cylindrical coordinate and compute the radial and axial coordinates. Specifically, we project point $p \\in \\mathbf{P}$ in global coordinate on to vector $\\overrightarrow{\\hat{y}_a\\hat{y}_b}$ , and calculate the norm of the projected vector", + "bbox": [ + 496, + 239, + 890, + 348 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {z} (p, \\hat {l}) = \\frac {\\left(p - \\hat {y} _ {a}\\right) \\cdot \\left(\\hat {y} _ {b} - \\hat {y} _ {a}\\right)}{\\| \\hat {y} _ {b} - \\hat {y} _ {a} \\| _ {2}} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 354, + 890, + 388 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and the distance between the point and $\\overrightarrow{\\hat{y}_a\\hat{y}_b}$", + "bbox": [ + 500, + 396, + 795, + 414 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {r} (p, \\hat {l}) = \\| p - \\hat {y} _ {a} - \\mathbf {z} (\\hat {y} _ {b} - \\hat {y} _ {a}, \\hat {l}) \\| _ {2} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 420, + 890, + 438 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For simplicity, we use $\\mathbf{z}_{\\hat{l}}(p)$ to represent $\\mathbf{z}(p,\\hat{l})$ , and $\\mathbf{r}_{\\hat{l}}(p)$ to represent $\\mathbf{r}(p,\\hat{l})$ in the following.", + "bbox": [ + 496, + 446, + 888, + 478 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Next, we describe the formulation of each loss function in detail.", + "bbox": [ + 496, + 478, + 888, + 506 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Flow Loss. Flow loss considers the predictions from two consecutive frames and encourages consistency of the radial and altitude components of all points with respect to scene flow - limbs should move between frames in a way to keep radial and axial coordinates for all points constant. Formally, we define the forward and backward flow losses $(\\mathcal{L}_{ff}$ and $\\mathcal{L}_{bf}$ respectively) for limbs $\\hat{l}^t = (\\hat{y}_a^t,\\hat{y}_b^t)$ and $\\hat{l}^{t + 1} = (\\hat{y}_a^{t + 1},\\hat{y}_b^{t + 1})$ for predicted keypoints for timestamp $t$ and $t + 1$ .", + "bbox": [ + 496, + 507, + 890, + 643 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {f f} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} ^ {t} \\cdot \\left(\\left| \\mathbf {r} _ {\\hat {l} ^ {t + 1}} \\left(p _ {i} ^ {t} + f _ {i} ^ {t}\\right) - \\mathbf {r} _ {\\hat {l} ^ {t}} \\left(p _ {i} ^ {t}\\right) \\right| + \\right. \\\\ \\left| \\mathbf {z} _ {\\hat {l} ^ {t + 1}} \\left(p _ {i} ^ {t} + f _ {i} ^ {t}\\right) - \\mathbf {z} _ {\\hat {l} ^ {t}} \\left(p _ {i} ^ {t}\\right) \\right|) \\tag {8} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 651, + 890, + 705 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {b f} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} ^ {t + 1} \\cdot \\left(\\left| \\mathbf {r} _ {\\hat {l} t} \\left(p _ {i} ^ {t + 1} + b _ {i} ^ {t + 1}\\right) - \\mathbf {r} _ {\\hat {l} t + 1} \\left(p _ {i} ^ {t + 1}\\right) \\right| + \\right. \\\\ \\left| \\mathbf {z} _ {\\hat {l} ^ {t}} \\left(p _ {i} ^ {t + 1} + b _ {i} ^ {t + 1}\\right) - \\mathbf {z} _ {\\hat {l} ^ {t + 1}} \\left(p _ {i} ^ {t + 1}\\right) \\right|) \\tag {9} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 734, + 890, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$f^t$ is the forward flow for each point $p^t \\in \\mathbf{P}^t$ and $b^{t+1}$ is the backward flow for each point $p^{t+1} \\in \\mathbf{P}^{t+1}$ . We use Neural Scene Flow Prior [14] to estimate flow for two consecutive frames of points. The overall flow loss for frame $t$ is", + "bbox": [ + 496, + 792, + 890, + 854 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {f l o w}} = \\frac {1}{| L |} \\sum_ {\\hat {l} t} \\frac {\\mathcal {L} _ {f f} + \\mathcal {L} _ {b f}}{2} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 859, + 890, + 896 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "1160", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e2241ea31bfe3b1b334d4aec242957af7570a85f23e0d4f8a61bd8a2d7a27339.jpg", + "image_caption": [ + "Stage I: Initialization on Synthetic Data", + "Unsupervised Losses" + ], + "image_footnote": [], + "bbox": [ + 109, + 112, + 455, + 224 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e6c46fcd903a52cbf3d9ea1d82cf231f133ad9db0823c9be84fb08d9e6962afb.jpg", + "image_caption": [ + "Stage II: Unsupervised Learning on In-the-Wild Data" + ], + "image_footnote": [], + "bbox": [ + 500, + 116, + 883, + 220 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/c19e32e7f6e091575f8c198f70f1411cfba5908a5377ce6ddceddb13810fe484.jpg", + "image_caption": [ + "Flow loss" + ], + "image_footnote": [ + "(a) After moving, points stay in the same place (despite rotation around axis) within each limb's local cylindrical coordinate system." + ], + "bbox": [ + 91, + 276, + 197, + 347 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ee82e58063d9c4b155af987824ad367617c17e974f3bc1a2ff60e862b13eb8fb.jpg", + "image_caption": [ + "Figure 2. Overview of our method. In Stage I, we warm-up the keypoint predictor and body part segmentation predictor on a small synthetic dataset. Then, in Stage II we refine the 3D keypoint predictions on a large in-the-wild dataset with unsupervised losses. The main losses are depicted on the bottom." + ], + "image_footnote": [], + "bbox": [ + 218, + 277, + 346, + 349 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4705f627c4c7da48ea1250dc5f5c6d455383ec604cac68cabb703e4aecea0e16.jpg", + "image_caption": [ + "Points-to-limb loss" + ], + "image_footnote": [ + "(b) Minimize points-to-limb distance to encourage the limb to stay within the body." + ], + "bbox": [ + 410, + 292, + 563, + 342 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9136ace268ca2b5d4dd47262bdc7e07315e5ff57d75fa2c86dd65ab0c40c1236.jpg", + "image_caption": [ + "Symmetry loss" + ], + "image_footnote": [ + "(c) Points are symmetrical around limb. (i.e. points with similar height z have similar radius r)" + ], + "bbox": [ + 689, + 297, + 831, + 340 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "By design, the flow loss value is the same if the radial and axial values for all points in a local coordinate system are the same in consecutive frames. This would happen if a limb in both frames are shifted in their respective orthogonal direction by the same amount. Theoretically, it is unlikely to happen for all limbs, but empirically we observe that with flow loss alone the skeleton would move out of the point cloud. Therefore, we need additional losses to make the keypoints stay within the body.", + "bbox": [ + 75, + 419, + 468, + 554 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Points-to-Limb Loss. For a predicted limb $\\hat{l} = (\\hat{y}_a, \\hat{y}_b)$ , we want the points on this limb to be close to it. Hence, we introduce a points-to-limb (p2l) loss", + "bbox": [ + 75, + 554, + 468, + 599 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p 2 l} ^ {\\hat {l}} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} \\mathbf {d} \\left(p _ {i}, \\hat {l}\\right) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 604, + 468, + 637 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{d}$ is the Euclidean distance function between a point and a line segment. We sum over all points to get the overall points-to-limb loss,", + "bbox": [ + 75, + 648, + 468, + 694 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {p} 2 \\mathrm {l}} = \\frac {1}{| L |} \\sum_ {\\hat {l}} \\mathcal {L} _ {\\mathrm {p} 2 \\mathrm {l}} ^ {\\hat {l}} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 693, + 468, + 728 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Symmetry Loss. Symmetry loss encourages the predicted limb $\\hat{l}$ to be in a position such that all points around this limb are roughly symmetrical around it. That is to say, points with similar axial coordinates $\\mathbf{z}_{\\hat{l}}$ should have similar radial values $\\mathbf{r}_{\\hat{l}}$ . To that end, we introduce symmetry loss,", + "bbox": [ + 75, + 739, + 468, + 815 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s y m} ^ {\\hat {l}} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} \\left(\\mathbf {r} _ {\\hat {l}} \\left(p _ {i}\\right) - \\bar {\\mathbf {r}} _ {\\hat {l}} \\left(p _ {i}\\right)\\right) ^ {2} \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 145, + 839, + 468, + 871 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\bar{\\mathbf{r}}_i(p_i)$ is the weighted mean of radial values of points", + "bbox": [ + 76, + 885, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "with similar axial coordinates as $p_i$ ,", + "bbox": [ + 500, + 420, + 736, + 435 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\mathbf {r}} _ {\\hat {l}} \\left(p _ {i}\\right) = \\frac {\\sum_ {j} K _ {h} \\left(\\mathbf {z} _ {\\hat {l}} \\left(p _ {i}\\right) , \\mathbf {z} _ {\\hat {l}} \\left(p _ {j}\\right)\\right) \\left(\\hat {\\mathbf {W}} _ {i *} \\cdot \\hat {\\mathbf {W}} _ {j *}\\right) \\mathbf {r} _ {\\hat {l}} \\left(p _ {j}\\right)}{\\sum_ {j} K _ {h} \\left(\\mathbf {z} _ {\\hat {l}} \\left(p _ {i}\\right) , \\mathbf {z} _ {\\hat {l}} \\left(p _ {j}\\right)\\right) \\left(\\hat {\\mathbf {W}} _ {i *} \\cdot \\hat {\\mathbf {W}} _ {j *}\\right)} \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 504, + 444, + 890, + 484 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$K_{h}$ is Gaussian kernel with bandwidth $h$ , i.e. $K_{h}(x,y) = e^{-(\\frac{x - y}{h})^{2}}$ . $\\hat{\\mathbf{W}}_{i*} \\in \\mathbb{R}^{J}$ is the $i_{th}$ row of $\\hat{\\mathbf{W}}$ , and the dot product $\\hat{\\mathbf{W}}_{i*} \\cdot \\hat{\\mathbf{W}}_{j*}$ measures the similarity of part assignment of point $i$ and $j$ , as we want the value of $\\bar{r}_i^k$ to be calculated using the points from the same part as point $i$ .", + "bbox": [ + 498, + 491, + 890, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The overall symmetry loss is over all points,", + "bbox": [ + 500, + 569, + 792, + 584 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s y m} = \\frac {1}{| L |} \\sum_ {l \\in L} \\mathcal {L} _ {s y m} ^ {l} \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 592, + 890, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Joint-to-Part Loss. In addition, we encourage each joint to be close to the center of the points on that part using a joint-to-part loss.", + "bbox": [ + 496, + 632, + 890, + 676 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {j 2 p} ^ {j} = \\left\\| \\hat {y} _ {j} - \\frac {\\sum_ {i} \\hat {\\mathbf {W}} _ {i j} p _ {i}}{\\sum_ {i} \\hat {\\mathbf {W}} _ {i j}} \\right\\| _ {2} \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 606, + 686, + 890, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We sum over all joints to get the overall joint-to-part loss.", + "bbox": [ + 500, + 732, + 888, + 748 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {j 2 p} = \\frac {1}{J} \\sum_ {j} \\mathcal {L} _ {j 2 p} ^ {j} \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 635, + 768, + 890, + 801 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that although the ground truth location of joints are not in the center of points on the corresponding part, keeping this loss is essential in making the unsupervised training more robust.", + "bbox": [ + 496, + 811, + 890, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In practice, jointly optimizing $\\hat{\\mathbf{W}}$ and $\\hat{\\mathbf{Y}}$ in Stage II leads to unstable training curves. Hence, we use the pre-trained", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "1161", + "bbox": [ + 483, + 944, + 513, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/94c477a0fef43640d8aee39e48a863e21dfb0607695fb10236540fe888404a00.jpg", + "image_caption": [ + "Figure 3. Effect of unsupervised losses on perturbed skeleton." + ], + "image_footnote": [], + "bbox": [ + 81, + 118, + 460, + 258 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "segmentation branch from Stage I to run segmentation inference to get the segmentation labels on all of the training samples in the beginning of Stage II, and $\\hat{\\mathbf{W}}$ is the one-hot encoding of the predicted segmentation labels.", + "bbox": [ + 75, + 306, + 468, + 366 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Segmentation Loss. Lastly, we notice that keeping the segmentation loss at this stage further regularizes the backbone and leads to better quantitative performance. We use the inferred segmentation $\\hat{\\mathbf{W}}$ as the surrogate ground truth and minimize cross entropy as in Eq. (4).", + "bbox": [ + 75, + 366, + 468, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training objective. The overall training objective during Stage II is to minimize", + "bbox": [ + 76, + 441, + 468, + 469 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} = \\lambda_ {f l o w} \\mathcal {L} _ {f l o w} + \\lambda_ {\\mathrm {p 2 l}} \\mathcal {L} _ {\\mathrm {p 2 l}} + \\lambda_ {s y m} \\mathcal {L} _ {s y m} \\\\ + \\lambda_ {\\mathrm {j} 2 \\mathrm {p}} \\mathcal {L} _ {\\mathrm {j} 2 \\mathrm {p}} + \\lambda_ {\\mathrm {s e g}} \\mathcal {L} _ {\\mathrm {s e g}} \\tag {18} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 484, + 468, + 520 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To illustrate the effect of the three unsupervised losses $(\\mathcal{L}_{flow}, \\mathcal{L}_{p2l}$ and $\\mathcal{L}_{sym})$ , we show the result of applying these losses on a perturbed ground truth skeleton (Fig. 3). As shown, the proposed unsupervised losses effectively moves the perturbed skeleton to locations that are closer to ground truth.", + "bbox": [ + 75, + 526, + 468, + 617 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 628, + 209, + 645 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 651, + 294, + 667 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The predictor model $f$ consists of a transformer backbone with fully connected layers for predicting joints and segmentation respectively. We use the same transformer backbone as in HUM3DIL [29]. A fully connected layer is applied to the output of transformer head to regress the predicted $\\hat{W}$ and $\\hat{Y}$ respectively. There are 352,787 trainable parameters in total. We set the maximum number of input LiDAR points to 1024, and zero-pad or downsample the point clouds with fewer or more number of points. The flow is obtained using a self-supervised test-time optimization method [14]. The network is trained on 4 TPUs. We train Stage I for 200 epochs and Stage II for 75 epochs, both with batch size 32, base learning rate of $1e - 4$ , and exponential decay 0.9. Stage I and II each finishes in about 6 hours. The loss weights in Eq. (5) are $\\lambda_{kp} = 0.5$ and $\\lambda_{seg} = 1$ .", + "bbox": [ + 75, + 674, + 472, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The loss weights in Eq. (18) are $\\lambda_{flow} = 0.02$ , $\\lambda_{p2l} = 0.01$ , $\\lambda_{sym} = 0.5$ , $\\lambda_{j2p} = 2$ , and $\\lambda_{seg} = 0.5$ . The kernel bandwidth Eq. (14) is 0.1.", + "bbox": [ + 498, + 90, + 892, + 137 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Dataset and Metrics", + "text_level": 1, + "bbox": [ + 498, + 152, + 691, + 167 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We construct a synthetic dataset with 1,000 sequences of 16-frame raycasted point clouds for Stage I training. Each sequence starts with the same standing pose and ends in a random pose. We find that data augmentation is essential in Stage I training. To simulate real-world noisy background and occlusion, we apply various data augmentations to the synthetic data, including randomly downsample, random mask, add ground clusters, add background clusters, add a second person, add noise to each point, scale the person. We include examples of augmented synthetic data in Fig. 4.", + "bbox": [ + 496, + 174, + 893, + 338 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/46b0b01e293159ea5e03a7b82bbd0621d66fb8aa7fa3b31e0c0263f694d1b38a.jpg", + "image_caption": [ + "Figure 4. Data augmentations applied to the synthetic point clouds (colored by ground truth segmentation labels). Ground truth skeletons are shown in purple. Background points are in blue." + ], + "image_footnote": [], + "bbox": [ + 535, + 340, + 861, + 534 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Stage II, we train on the entire Waymo Open dataset (WOD) training set (with around 200,000 unlabeled samples). As the official WOD testing subset is hidden from the public, we randomly choose $50\\%$ of the validation set as the validation split, and the rest as the test split for benchmarking. We report average Mean Per Joint Position Error (MPJPE) on test set at the end of each stage. Formally, for a single sample, let $\\hat{Y} \\in \\mathcal{R}^{J \\times 3}$ be the predicted keypoints, $Y \\in \\mathcal{R}^{J \\times 3}$ the ground truth keypoints, and $v \\in \\{0,1\\}^J$ the visibility indicator annotated per keypoint.", + "bbox": [ + 496, + 585, + 890, + 739 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {M P J P E} (Y, \\hat {Y}) = \\frac {1}{\\sum_ {j} v _ {j}} \\sum_ {j \\in [ J ]} v _ {j} \\| y _ {j} - \\hat {y} \\| _ {2} \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 562, + 755, + 890, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that in this Stage, we do Hungarian matching between the predicted and annotated keypoints per frame, and then report MPJPE on matched keypoints. We report matched MPJPE because the method is intended for scenarios where correspondence between keypoints in the unlabeled training data and downstream data is unknown.", + "bbox": [ + 496, + 809, + 892, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "1162", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/235df5a86dacfa377f2b945d9cac5e30d20a2b314a17b86314d035b487064d79.jpg", + "image_caption": [ + "Figure 5. Visualizations of predictions on WOD at the end of Stage I and Stage II. Points are colored by predicted segmentation labels. Ground truth keypoints are in green and predicted keypoints and skeletons are in red." + ], + "image_footnote": [], + "bbox": [ + 122, + 93, + 415, + 448 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Results", + "text_level": 1, + "bbox": [ + 76, + 512, + 171, + 526 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section we perform quantitative evaluation of GC-KPL at the end of Stage I and II in Tab. 2. Qualitative results are in Fig. 5. As shown, after first stage where we train on a synthetic dataset constructed from posed body models with carefully chosen data augmentations, we are able to predict reasonable human keypoints on in-the-wild point clouds. The second stage our novel unsupervised losses further refine the predicted keypoints.", + "bbox": [ + 75, + 534, + 468, + 655 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Downstream Task: Few-shot 3D Keypoint Learning", + "text_level": 1, + "bbox": [ + 76, + 667, + 468, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this experiment, we show that the backbone of our model benefits from unsupervised training on large amount of unlabeled data, and can be useful for downstream finetuning tasks. We start from our pre-trained backbone after Stage II, and fine-tune with annotated training samples from WOD by minimizing mean per joint error. We include few-shot experiments where we fine-tune with a extremely small amount of data (10% and 1% of the training set), to represent challenging scenarios where there is a limited amount of annotated data.", + "bbox": [ + 75, + 705, + 468, + 854 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We include the LiDAR-only version of HUM3DIL (a state-of-the-art model on WOD) [29] as a strong baseline. The quantitative results (Tab. 1) suggest that our back", + "bbox": [ + 75, + 854, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "bone learns useful information from the unlabeled in-the-wild data and enables a significant performance boost on the downstream tasks. Compared to a randomly initialized backbone as used in HUM3DIL, our backbone leads to over $2\\mathrm{cm}$ of decrease in MPJPE in downstream fine-tuning experiments, which is a significant improvement for the 3D human keypoint estimation task.", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We visualize the predicted keypoints under different data regime in Fig. 6. As shown, models fine-tuned from our backbone is able to capture fine details on the arms and overall produces more accurate results than HUM3DIL.", + "bbox": [ + 496, + 196, + 890, + 255 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To the best of our knowledge, there does not exist previous works on completely unsupervised human keypoint estimation from point clouds. We additionally experiment with using a readout layer on top of the features learned by a state-of-the-art point cloud SSL method 3D-OAE [30], but the MPJPE is $15\\mathrm{cm}$ (compared to $10.10\\mathrm{cm}$ from GC-KPL). Hence we consider the baselines we adopt here strong and complete. In Sec. 4.6, we further challenge our method by comparing to the domain adaptation setup and demonstrate that the performance of GC-KPL is still superior.", + "bbox": [ + 496, + 255, + 892, + 405 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.5. Domain adaptation", + "text_level": 1, + "bbox": [ + 500, + 412, + 684, + 429 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the configuration where we use ground truth labels in Stage I and unsupervised training in Stage II could be seen as a domain adaption (DA) technique. Thus it is useful to compare proposed method with a commonly-used domain adaptation method. We train the same backbone model using a mix of real and synthetic data and a gradient reversal layer (aka DA loss) [5] to help the network to learn domain invariant keypoint features. Results in Tab. 3 demonstrate that GC-KPL yields superior accuracy compared with the DA method (MPJPE 10.1 vs $11.35\\mathrm{cm}$ ).", + "bbox": [ + 496, + 435, + 890, + 585 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.6. Ablations", + "text_level": 1, + "bbox": [ + 500, + 593, + 609, + 607 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Effect of using GT bounding boxes in pre-processing. We cropped human point clouds from the entire scene by including only points within GT bounding boxes. We also conducted experiments where we train with detected bounding boxes from raw LiDAR scans using a SoTA 3D detector. Results suggest that GC-KPL is robust to noise in 3D detection, as there were no noticeable changes in metrics.", + "bbox": [ + 496, + 614, + 890, + 719 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Effect of synthetic dataset size. In our method Stage I serves as a model initialization step where we show that training on a small synthetic dataset (16,000 samples) with properly chosen data augmentations is suffice for the model to learn useful semantics. We further investigate the effect of synthetic dataset size during Stage I. We experiment with larger dataset sizes (160,000 and 1,600,000 samples) and observe that the effect of increasing synthetic dataset size is insignificant on $\\mathrm{MPJPE}_{\\mathrm{matched}}$ at the end of Stage I - it decreased from $17.7\\mathrm{cm}$ to $17.6\\mathrm{cm}$ . Lack of a notable improvements for larger dataset sizes is likely due to limited variability of generated poses in synthetic data (see Supple", + "bbox": [ + 496, + 720, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "1163", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8dc2fe390528e659e81192b9044aa880ff139f08e5825c8a30a66b34d0b122a9.jpg", + "image_caption": [ + "(a) Fine-tune on $100\\%$ training set" + ], + "image_footnote": [], + "bbox": [ + 122, + 89, + 419, + 407 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bf2689fb1e1f498337db157c972e6efcc82d78b530c8b7c4f39d2e975113622b.jpg", + "image_caption": [ + "(b) Fine-tune on $10\\%$ training set" + ], + "image_footnote": [], + "bbox": [ + 421, + 90, + 627, + 407 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/561b3aee6120df91a4d59a2d47092628e9b66ea246cf2d73ff471e17621a1132.jpg", + "image_caption": [ + "(c) Fine-tune on $1\\%$ training set", + "Figure 6. Predicted keypoints from fine-tuning with different amount of annotated data. The points are colored by predicted segmentation labels by our model. Predicted keypoints are shown in red." + ], + "image_footnote": [], + "bbox": [ + 630, + 90, + 843, + 407 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/857d7c4785e6e810c95ce2fa60973eac22a2da6a5d7a2678ff5d5a7fe212d259.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodBackboneStage I supervised1% training set MPJPE cm. (gain)10% training set MPJPE cm. (gain)100% training set MPJPE cm. (gain)
HUM3DIL [29]Randomly initialized19.5716.3612.21
Pre-trained on synthetic only18.52 (-1.05)15.10 (-1.26)11.27 (-0.94)
GC-KPLPre-trained on 5,000 WOD-train17.87 (-1.70)14.51 (-1.85)10.73 (-1.48)
Pre-trained on 200,000 WOD-train17.80 (-1.77)14.30 (-2.06)10.60 (-1.61)
Pre-trained on 200,000 WOD-train17.20 (-2.37)13.40 (-2.96)10.10 (-2.11)
", + "bbox": [ + 78, + 477, + 890, + 599 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/2b724ef6ef1e37a1fcd2d5bca2ace917a76c092949837e058db89b07f4a1a1fb.jpg", + "table_caption": [ + "Table 1. Downstream fine-tuning results. Check marks in \"Stage I supervised\" mean that we use ground truth part labels in Stage I, otherwise we use KMeans labels." + ], + "table_footnote": [], + "table_body": "
Training dataMPJPEmatchd (↓)
Synthetic only17.70
5,000 WOD-train14.64
200,000 WOD-train13.92
", + "bbox": [ + 130, + 642, + 416, + 720 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/1d0c9bc76f2ad60d39dfc5760a39ca04b7b42e24daf47ba7120fe11b67aabad4.jpg", + "table_caption": [ + "Table 2. Unsupervised learning (Stage II) results." + ], + "table_footnote": [], + "table_body": "
Domain distributionDA lossMPJPE (↓)
100% real12.21
50/50% real/synthetic12.08
50/50% real/synthetic11.35
", + "bbox": [ + 109, + 756, + 437, + 834 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Unsupervised domain adaptation results evaluated on WOD validation set.", + "bbox": [ + 75, + 844, + 467, + 868 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "mental for details).", + "bbox": [ + 500, + 646, + 627, + 659 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effect of using ground truths on synthetic data. While our described pipeline does not use any kind of manual labels, we do use ground truth segmentation and keypoints on synthetic dataset in Stage I because they are readily available. Here we further experiment with a variation where we do not use any kind of ground truths in Stage I (first row in Tab. 4). Instead, we use KMeans clusters and cluster centers as surrogate ground truths for model initialization, similar to [1]. Note that we are able to establish correspondence between KMeans clusters from different samples due to the fact that in our data generation process, each synthetic sequence starts with the same starting standing pose. Hence, we can run KMeans clustering on the starting pose that is shared among all sequences, and for subsequent samples within each sequence, we do Hungarian matching using", + "bbox": [ + 496, + 660, + 890, + 888 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "1164", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5baa9cd0ea4f591b5697b46de878005b3516974af871b926c862c73b534bf5b7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Stage IStage II
No.Exp.\\( \\mathcal{L}_{kp} \\)\\( \\mathcal{L}_{seg} \\)\\( MPJPE_{matched} \\)\\( \\mathcal{L}_{j2p} \\)\\( \\mathcal{L}_{seg} \\)\\( \\mathcal{L}_{sym} \\)\\( \\mathcal{L}_{p2l} \\)\\( \\mathcal{L}_{flow} \\)\\( MPJPE_{matched} \\)
1Effect of using KMeans labels in Stage I19.214.5
2Effect of \\( \\mathcal{L}_{kp} \\) in Stage IN/A14.2
315.0
4Effect of warmup losses in Stage II14.2
515.2
630.1
715.6
825.7
9Effect of unsupervised losses in Stage II14.3
1014.9
1114.4
1214.9
Full model (GC-KPL)17.713.9
", + "bbox": [ + 76, + 88, + 893, + 308 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4. Ablations studies on the effect of individual loss term in our method. Experiments 3 through 12 are using both losses in Stage I. Full model is using GT labels for Stage I.", + "bbox": [ + 75, + 316, + 892, + 344 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "inter-cluster Chamfer distance to establish correspondence between clusters from consecutive frames. We observe that although initializing with surrogate ground truths leads to slightly inferior performance in Stage I, after training with the losses in Stage II the drop in performance is less visible. Overall, downstream fine-tuning performance is comparable to our best model (10.6/14.3/17.8 vs. 10.1/13.4/17.2 cm when fine-tuned on $100\\% / 10\\% / 1\\%$ of the data, see Tab. 1). This experiment suggests that method does not require any kind of ground truths, even during initialization stage.", + "bbox": [ + 75, + 354, + 468, + 503 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effect of Losses. In this section we further investigate the effect of each component in our pipeline (Tab. 4). First, we note that $\\mathcal{L}_{seg}$ in Stage I is essential because we need an initialized segmentation model to get the body part assignment for each point in order to calculate the losses in Stage II. Therefore, we only experiment with a variation of Stage I training without $\\mathcal{L}_{kp}$ , and we observe that $\\mathcal{L}_{kp}$ is useful in warming up the backbone for later stages. Next, we take the backbone from Stage I (trained with both $\\mathcal{L}_{kp}$ and $\\mathcal{L}_{seg}$ ), and study the effect of individual losses in Stage II. Experiments No. $3/4/5$ show that it is helpful to include $\\mathcal{L}_{j2p}$ and $\\mathcal{L}_{seg}$ while having all other three unsupervised losses. In experiments $6/7/8$ we take out $\\mathcal{L}_{j2p}$ and $\\mathcal{L}_{seg}$ , and investigate the effect of individual unsupervised losses. As shown the training becomes rather unstable if we further eliminate any of the three losses. We observe qualitatively that the metric worsens drastically because the limbs quickly move out of the human body. Experiments No. $3/4/5$ suggest that $\\mathcal{L}_{j2p}$ and $\\mathcal{L}_{seg}$ are useful regularizers that make sure the limbs stay within the body, and the unsupervised losses further improve the performance by refining the keypoint location.", + "bbox": [ + 75, + 505, + 472, + 823 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.7. Limitations and Future Work", + "text_level": 1, + "bbox": [ + 76, + 848, + 341, + 864 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The task of keypoint location could be considered as a dual problem for semantic segmentation. In this work we", + "bbox": [ + 76, + 869, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "use a simple segmentation network based on the same architecture as our keypoint estimation model. Using a superior segmentation model could lead to further improvements.", + "bbox": [ + 496, + 354, + 890, + 398 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The proposed flow loss depends on quality of the estimated flow of LiDAR points. In this work we used a simple but reasonable method to estimate flow between two frames of LiDAR points called Neural Scene Flow prior [14]. Quality of the unsupervised keypoint estimation could be improved by using a more advanced flow estimator tailored for point clouds on human body surfaces.", + "bbox": [ + 496, + 398, + 892, + 502 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Lastly, we use a part of the HUM3DIL [29] model which takes only LiDAR point cloud as input. The full HUM3DIL model was designed for multi-modal inputs and attains better performance. Thus, another interesting direction is to leverage multi-modal inputs.", + "bbox": [ + 496, + 503, + 893, + 578 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 585, + 619, + 601 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we approached the problem of 3D human pose estimation using points clouds in-the-wild, introduced a method (GC-KPL) for learning 3D human keypoints from point clouds without using any manual 3D keypoint annotations. We shown that the proposed novel losses are effective for unsupervised keypoint learning on Waymo Open Dataset. Through downstream experiments we demonstrated that GC-KPL can additionally serve as a self-supervised representation method to learn from large quantity of in-the-wild human point clouds. In addition, GC-KPL compares favorably with a commonly used domain adaptation technique. The few-shot experiments empirically verified that using only $10\\%$ of available 3D keypoint annotation the fine-tuned model reached comparable performance to the state-of-the-art model training on the entire dataset. These results opens up exciting possibility to utilize massive amount of sensor data in autonomous driving to improve pedestrian 3D keypoint estimation.", + "bbox": [ + 496, + 609, + 893, + 882 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "1165", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In European Conference on Computer Vision, 2018. 7", + "[2] Mathilde Caron, Piotr Bojanowski, Julien Mairal, and Armand Joulin. Unsupervised pre-training of image features on non-curated data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2959-2968, 2019. 2", + "[3] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 2", + "[4] Michael Fürst, Shriya TP Gupta, René Schuster, Oliver Wasenmüller, and Didier Stricker. Hperl: 3d human pose estimation from rgb and lidar. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 7321-7327. IEEE, 2021. 1, 2", + "[5] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In Francis Bach and David Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 1180-1189, Lille, France, 07-09 Jul 2015. PMLR. 6", + "[6] Albert Haque, Boya Peng, Zelun Luo, Alexandre Alahi, Serena Yeung, and Li Fei-Fei. Towards viewpoint invariant 3d human pose estimation. In European conference on computer vision, pages 160–177. Springer, 2016. 2", + "[7] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 2", + "[8] Siyuan Huang, Yichen Xie, Song-Chun Zhu, and Yixin Zhu. Spatio-temporal self-supervised representation learning for 3d point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6535-6545, 2021. 2", + "[9] Tomas Jakab, Ankush Gupta, Hakan Bilen, and Andrea Vedaldi. Unsupervised learning of object landmarks through conditional image generation. Advances in neural information processing systems, 31, 2018. 2", + "[10] Tomas Jakab, Ankush Gupta, Hakan Bilen, and Andrea Vedaldi. Self-supervised learning of interpretable keypoints from unlabelled videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8787-8797, 2020. 2", + "[11] Wonhui Kim, Manikandasriram Srinivasan Ramanagopal, Charles Barto, Ming-Yuan Yu, Karl Rosaen, Nick Goumas, Ram Vasudevan, and Matthew Johnson-Roberson. Pedx: Benchmark dataset for metric 3-d pose estimation of pedestrians in complex urban intersections. IEEE Robotics and Automation Letters, 4(2):1940-1947, 2019. 1, 2", + "[12] Jiaxin Li and Gim Hee Lee. Usip: Unsupervised stable interest point detection from 3d point clouds. In Proceedings of" + ], + "bbox": [ + 78, + 114, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "the IEEE/CVF international conference on computer vision, pages 361-370, 2019. 2", + "[13] Jialian Li, Jingyi Zhang, Zhiyong Wang, Siqi Shen, Chenglu Wen, Yuexin Ma, Lan Xu, Jingyi Yu, and Cheng Wang. Lidarcap: Long-range marker-less 3d human motion capture with lidar point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20502-20512, 2022. 1, 2", + "[14] Xueqian Li, Jhony Kaesemodel Pontes, and Simon Lucey. Neural scene flow prior. Advances in Neural Information Processing Systems, 34:7838-7851, 2021. 3, 5, 8", + "[15] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. ACM transactions on graphics (TOG), 34(6):1-16, 2015. 2, 3", + "[16] Atsuhiro Noguchi, Umar Iqbal, Jonathan Tremblay, Tatsuya Harada, and Orazio Gallo. Watch it move: Unsupervised discovery of 3d joints for re-posing of articulated objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3677–3687, 2022. 2", + "[17] Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019. 2", + "[18] Luca Schmidtke, Athanasios Vlontzos, Simon Ellershaw, Anna Lukens, Tomoki Arichi, and Bernhard Kainz. Unsupervised human pose estimation through transforming shape templates. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2484-2494, 2021. 2", + "[19] Jamie Shotton, Andrew Fitzgibbon, Mat Cook, Toby Sharp, Mark Finocchio, Richard Moore, Alex Kipman, and Andrew Blake. Real-time human pose recognition in parts from single depth images. In CVPR 2011, pages 1297-1304. IEEE, 2011. 2", + "[20] Jennifer J Sun, Serim Ryou, Roni H Goldshmid, Brandon Weissboud, John O Dabiri, David J Anderson, Ann Kennedy, Yisong Yue, and Pietro Perona. Self-supervised keypoint discovery in behavioral videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2171-2180, 2022. 2", + "[21] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 1, 2", + "[22] Supasorn Suwajanakorn, Noah Snavely, Jonathan J Tompson, and Mohammad Norouzi. Discovery of latent 3d keypoints via end-to-end geometric reasoning. Advances in neural information processing systems, 31, 2018. 2", + "[23] Hanchen Wang, Qi Liu, Xiangyu Yue, Joan Lasenby, and Matt J Kusner. Unsupervised point cloud pre-training via occlusion completion. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9782-9792, 2021. 2", + "[24] Yuefan Wu, Zeyuan Chen, Shaowei Liu, Zhongzheng Ren, and Shenlong Wang. Casa: Category-agnostic skeletal ani" + ], + "bbox": [ + 503, + 92, + 893, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "1166", + "bbox": [ + 483, + 945, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "mal reconstruction. arXiv preprint arXiv:2211.03568, 2022. 2", + "[25] Saining Xie, Jiatao Gu, Demi Guo, Charles R Qi, Leonidas Guibas, and Or Litany. Pointcontrast: Unsupervised pretraining for 3d point cloud understanding. In European conference on computer vision, pages 574-591. Springer, 2020. 2", + "[26] Yaoqing Yang, Chen Feng, Yiru Shen, and Dong Tian. Foldingnet: Point cloud auto-encoder via deep grid deformation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 206–215, 2018. 2", + "[27] Yang You, Wenhai Liu, Yanjie Ze, Yong-Lu Li, Weiming Wang, and Cewu Lu. Ukpgan: A general self-supervised keypoint detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17042-17051, 2022. 2", + "[28] Xumin Yu, Lulu Tang, Yongming Rao, Tiejun Huang, Jie Zhou, and Jiwen Lu. Point-bert: Pre-training 3d point cloud transformers with masked point modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19313-19322, 2022. 2", + "[29] Andrei Zanfir, Mihai Zanfir, Alex Gorban, Jingwei Ji, Yin Zhou, Dragomir Anguelov, and Cristian Sminchisescu. Hum3dil: Semi-supervised multi-modal 3d humanpose estimation for autonomous driving. In 6th Annual Conference on Robot Learning, 2022. 1, 2, 5, 6, 7, 8", + "[30] Zaiwei Zhang, Rohit Girdhar, Armand Joulin, and Ishan Misra. Self-supervised pretraining of 3d features on any point-cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10252-10263, 2021. 2, 6", + "[31] Zihao Zhang, Lei Hu, Xiaoming Deng, and Shihong Xia. Weakly supervised adversarial learning for 3d human pose estimation from point clouds. IEEE transactions on visualization and computer graphics, 26(5):1851-1859, 2020. 2", + "[32] Jingxiao Zheng, Xinwei Shi, Alexander Gorban, Junhua Mao, Yang Song, Charles R Qi, Ting Liu, Visesh Chari, Andre Cornman, Yin Zhou, et al. Multi-modal 3d human pose estimation with 2d weak supervision in autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4478-4487, 2022. 1, 2", + "[33] Junsheng Zhou, Xin Wen, Yu-Shen Liu, Yi Fang, and Zhizhong Han. Self-supervised point cloud representation learning with occlusion auto-encoder. arXiv preprint arXiv:2203.14084, 2022. 2", + "[34] Yufan Zhou, Haiwei Dong, and Abdulmotaleb El Saddik. Learning to estimate 3d human pose from point cloud. IEEE Sensors Journal, 20(20):12334-12342, 2020. 2" + ], + "bbox": [ + 78, + 90, + 468, + 781 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "1167", + "bbox": [ + 483, + 944, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/833a9b3e-a176-4092-b5fd-3122723612f3_model.json b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/833a9b3e-a176-4092-b5fd-3122723612f3_model.json new file mode 100644 index 0000000000000000000000000000000000000000..15ae134a7d275d2b4b517d03617bb7d9e35ac8cc --- /dev/null +++ b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/833a9b3e-a176-4092-b5fd-3122723612f3_model.json @@ -0,0 +1,2244 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.164, + 0.13, + 0.808, + 0.175 + ], + "angle": 0, + "content": "3D Human Keypoints Estimation from Point Clouds in the Wild without Human Labels" + }, + { + "type": "text", + "bbox": [ + 0.191, + 0.203, + 0.777, + 0.24 + ], + "angle": 0, + "content": "Zhenzhen Weng\\(^{1*}\\) Alexander S. Gorban\\(^{2}\\) Jingwei Ji\\(^{2}\\) Mahyar Najibi\\(^{2}\\) Yin Zhou\\(^{2}\\) Dragomir Anguelov\\(^{2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.352, + 0.256, + 0.612, + 0.275 + ], + "angle": 0, + "content": "\\(^{1}\\)Stanford University \\(^{2}\\)Waymo" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.309, + 0.314, + 0.327 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.34, + 0.474, + 0.687 + ], + "angle": 0, + "content": "Training a 3D human keypoint detector from point clouds in a supervised manner requires large volumes of high quality labels. While it is relatively easy to capture large amounts of human point clouds, annotating 3D keypoints is expensive, subjective, error prone and especially difficult for long-tail cases (pedestrians with rare poses, scooterists, etc.). In this work, we propose GC-KPL - Geometry Consistency inspired Key Point Leaning, an approach for learning 3D human joint locations from point clouds without human labels. We achieve this by our novel unsupervised loss formulations that account for the structure and movement of the human body. We show that by training on a large training set from Waymo Open Dataset [21] without any human annotated keypoints, we are able to achieve reasonable performance as compared to the fully supervised approach. Further, the backbone benefits from the unsupervised training and is useful in downstream few-shot learning of keypoints, where fine-tuning on only 10 percent of the labeled training data gives comparable performance to fine-tuning on the entire set. We demonstrated that GC-KPL outperforms by a large margin over SoTA when trained on entire dataset and efficiently leverages large volumes of unlabeled data." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.702, + 0.21, + 0.717 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.726, + 0.47, + 0.831 + ], + "angle": 0, + "content": "Estimation of human pose in 3D is an important problem in computer vision and it has a wide range of applications including AR/VR, AI-assisted healthcare, and autonomous driving [4,29,32]. For autonomous systems, being able to perceive human poses from sensor data (e.g. Li-DAR point clouds) is particularly essential to reason about the surrounding environment and make safe maneuvers." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.831, + 0.47, + 0.876 + ], + "angle": 0, + "content": "Despite the high level of interest in human pose estimation in the wild, only few papers approached outdoor 3D keypoint detection using point cloud. A main reason is that" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.313, + 0.885, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.559, + 0.895, + 0.645 + ], + "angle": 0, + "content": "Figure 1. We present GC-KPL, a novel method for learning 3D human keypoints from in-the-wild point clouds without any human labels. We propose to learn keypoint locations using unsupervised losses that account for the structure and movement of the human body. The backbone learns useful semantics from unsupervised learning and can be used in downstream fine-tuning tasks to boost the performance of 3D keypoint estimation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.66, + 0.895, + 0.901 + ], + "angle": 0, + "content": "training a pedestrian pose estimation model requires large amount of high quality in-the-wild data with ground truth labels. Annotating 3D human keypoints on point cloud data is expensive, time consuming and error prone. Although there are a few existing point cloud datasets with ground truth human poses [11, 13, 21], they are limited in terms of the quantity of the 3D annotations and diversity of the data. Therefore, fully-supervised human keypoint detectors trained on such datasets do not generalize well for long tail cases. For this reason, previous approaches on pedestrian 3D keypoint estimation have mainly focused on utilizing 2D weak supervision [4, 32] which is easier to obtain, or leveraging signals from others modalities (e.g. RGB, depth) [29]. Nonetheless, there is a lot of useful information in the large amount of unlabeled LiDAR data that previous works on human pose estimation have not made an effort to utilize." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.286, + 0.9 + ], + "angle": 0, + "content": "*Work done as an intern at Waymo." + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1158" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.257 + ], + "angle": 0, + "content": "In this work, we propose a novel and effective method for learning 3D human keypoints from in-the-wild point clouds without using any manual labeled 3D keypoints. Our approach is built on top of the key observation that human skeletons are roughly centered within approximately rigid body parts and that the location and movement of the surface points should explain the movement of the skeleton and vice versa. To that end, we design novel unsupervised loss terms for learning locations of the 3D keypoints/skeleton within human point clouds which correspond to 3D locations of major joints of human body." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.257, + 0.473, + 0.391 + ], + "angle": 0, + "content": "In the proposed method, we first train a transformer-based regression model for predicting keypoints and a semantic segmentation model for localizing body parts on a synthetic data constructed from randomly posed SMPL human body model [15]. Then, we train on the entire Waymo Open Dataset [21] without using any 3D ground-truth annotation of human keypoints. Through unsupervised training, keypoint predictions are refined and the backbone learns useful information from large amount of unannotated data." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.391, + 0.431, + 0.406 + ], + "angle": 0, + "content": "In summary, we make the following contributions:" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.415, + 0.468, + 0.46 + ], + "angle": 0, + "content": "- We present GC-KPL, a method for learning human 3D keypoints for in-the-wild point clouds without any manual keypoint annotations." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.47, + 0.469, + 0.547 + ], + "angle": 0, + "content": "- Drawing insight from the structure and movement of the human body, we propose three effective and novel unsupervised losses for refining keypoints. We show that the proposed losses are effective for unsupervised keypoint learning on Waymo Open Dataset." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.557, + 0.469, + 0.648 + ], + "angle": 0, + "content": "- Through downstream fine-tuning/few-shot experiments, we demonstrate that GC-KPL can be used as unsupervised representation learning for human point clouds, which opens up the possibility to utilize a practically infinite amounts of sensor data to improve human pose understanding in autonomous driving." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.415, + 0.469, + 0.648 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.659, + 0.22, + 0.675 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.682, + 0.469, + 0.713 + ], + "angle": 0, + "content": "2.1. 3D Human Keypoint Estimation from Points Clouds" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.473, + 0.903 + ], + "angle": 0, + "content": "There have been a few works [19, 31, 34] about estimating 3D keypoints from clean and carefully-curated point clouds [6], but 3D keypoint estimation from in-the-wild point clouds is a much less studied problem. Due to the lack of ground-truth 3D human pose annotations paired with Li-DAR data, there has not been a lot of works on 3d human keypoint estimation from LiDAR information. Among the few point cloud datasets with 3D keypoint annotations, Li-DARHuman26M [13] captures long-range human motions with ground truth motion acquired by the IMU system and pose information derived from SMPL models fitted into point clouds. It is among the first few datasets which have" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.44 + ], + "angle": 0, + "content": "LiDAR point clouds synchronized with RGB images, but SMPL shape parameters are same for all 13 subjects and it does not feature in-the-wild pedestrians where there could be much more background noise and occlusion. PedX [11] offers 3D automatic pedestrian annotations obtained using model fitting on different modalities, gathered effectively from a single intersection with only 75 pedestrians (the second intersection has only 218 frames, labels for the third scene were not released). Waymo Open Dataset [21] has more than 3,500 subjects from over 1,000 different in-the-wild scenes with high-quality 2D and 3D manual annotations. Despite the existence of these datasets, the few works on 3D pose estimation from point clouds mostly rely on weak supervision. HPERL model [4] trains on 2D ground-truth pose annotations and uses a reprojection loss for the 3D pose regression task. Multi-modal model in [32] uses 2D labels on RGB images as weak supervision, and creates pseudo ground-truth 3D joint positions from the projection of annotated 2D joints. HUM3DIL [29] leverages RGB information with LiDAR points, by computing pixel-aligned multi-modal features with the 3D positions of the LiDAR signal. In contrast, our method does not use any RGB information or weak supervision." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.467, + 0.817, + 0.484 + ], + "angle": 0, + "content": "2.2. Unsupervised Keypoint Localization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.489, + 0.892, + 0.671 + ], + "angle": 0, + "content": "There are a number of works that aim to recover 3D keypoints using self-supervised geometric reasoning [12, 22], but they are limited to rigid objects. More recent unsupervised methods work for articulated objects from monocular RGB data [9, 10, 10, 18, 20, 24], multi-view data [16], or point clouds [27], where authors suggest to condition on the predicted keypoints and train a conditional generative model to supervise the keypoints through reconstruction losses. We propose a simpler pipeline where we apply our novel unsupervised losses to the predicted keypoints directly and do not require additional models besides the keypoint predictor itself." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.698, + 0.859, + 0.714 + ], + "angle": 0, + "content": "2.3. Self-supervised Learning for Point Clouds" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Self-supervised representation learning has proven to be remarkably useful in language [3, 17] and 2D vision tasks [2, 7]. As LiDAR sensors become more affordable and common, there has been an increasing amount of research interest in self-supervised learning on 3D point clouds. Previous works proposed to learn representations of object or scene level point clouds through contrastive learning [8, 25, 30] or reconstruction [23, 26, 28, 33], which is useful in downstream classification or segmentation tasks. In contrast, our supervision signals come from the unique structure of the human body and our learned backbone is particularly useful in downstream human keypoint estimation tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1159" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.09, + 0.17, + 0.106 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.114, + 0.47, + 0.25 + ], + "angle": 0, + "content": "In this section, we describe our complete training pipeline which contains two stages. In the first stage, we initialize the model parameters on a synthetic dataset (Sec. 3.1). The purpose of Stage I is to warm-up the model with reasonable semantics. The second stage generalizes the model to the real-world data. In this stage, we use our unsupervised losses to refine the keypoint predictions on in-the-wild point clouds (Sec. 3.2). An overview of our pipeline is in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.258, + 0.418, + 0.274 + ], + "angle": 0, + "content": "3.1. Stage I: Initialization on Synthetic Data" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.28, + 0.469, + 0.34 + ], + "angle": 0, + "content": "In this stage, we initialize the model on a synthetic dataset that is constructed by ray casting onto randomly posed human mesh models (SMPL [15]). We describe details of synthetic data generation in Supplementary." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.34, + 0.469, + 0.43 + ], + "angle": 0, + "content": "The goal of this stage is to train a model \\( f \\) that takes a point cloud of a human \\( \\mathbf{P} \\in \\mathbb{R}^{N \\times 3} \\) and outputs 3D locations of keypoints \\( \\hat{\\mathbf{Y}} \\in \\mathbb{R}^{(J + 1) \\times 3} \\), as well as soft body part assignments (or part segmentation) \\( \\hat{\\mathbf{W}} \\in \\mathbb{R}^{N \\times (J + 1)} \\) that contains the probability of each point \\( i \\) belonging to body part \\( j \\in [J] \\) or the background." + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.439, + 0.469, + 0.457 + ], + "angle": 0, + "content": "\\[\n\\{\\hat {\\mathbf {Y}}, \\hat {\\mathbf {W}} \\} = f (\\mathbf {P}) \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.461, + 0.469, + 0.499 + ], + "angle": 0, + "content": "\\[\n\\forall i \\in [ N ], \\sum_ {j = 1} ^ {J + 1} \\hat {\\mathbf {W}} _ {i, j} = 1 \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.508, + 0.469, + 0.57 + ], + "angle": 0, + "content": "Ground truth information about part segmentation \\(\\mathbf{W}\\) and keypoint locations \\(\\mathbf{Y}\\) are readily available for synthetic data. Hence, we can train the model by directly supervising the predicted keypoint through L2 loss," + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.579, + 0.469, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {k p} = \\left\\| \\hat {\\mathbf {Y}} - \\mathbf {Y} \\right\\| _ {2} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.608, + 0.438, + 0.624 + ], + "angle": 0, + "content": "and predicted segmentation through cross entropy loss," + }, + { + "type": "equation", + "bbox": [ + 0.164, + 0.634, + 0.469, + 0.67 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {s e g}} = - \\sum_ {i = 1} ^ {N} \\sum_ {j = 1} ^ {J + 1} \\mathbf {W} _ {i, j} \\log \\left(\\hat {\\mathbf {W}} _ {i, j}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.682, + 0.224, + 0.695 + ], + "angle": 0, + "content": "Overall, we minimize" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.708, + 0.469, + 0.726 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {s y n}} = \\lambda_ {k p} \\mathcal {L} _ {\\mathrm {k p}} + \\lambda_ {\\text {s e g}} \\mathcal {L} _ {\\text {s e g}} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.469, + 0.781 + ], + "angle": 0, + "content": "Notably, in Sec. 4.6 we show that supervision in this stage is not required - ground truth \\(\\mathbf{W}\\) and \\(\\mathbf{Y}\\) can be replaced by surrogate ground truths to achieve comparable results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.788, + 0.469, + 0.818 + ], + "angle": 0, + "content": "3.2. Stage II: Self-Supervised Learning on In-the-Wild Data" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.469, + 0.901 + ], + "angle": 0, + "content": "In this stage, we further refine the network using unsupervised losses. The key insight behind the design of the losses is that the human body is composed of limbs, each of which is a rigid part. Therefore, points on a limb move with the limb and should stay roughly at the same location" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.151 + ], + "angle": 0, + "content": "in each limb's local coordinate system. To account for this, we propose flow loss that encourages the points to stay in the same location (despite rotation around the limb) within each limb's local cylindrical coordinate." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.151, + 0.892, + 0.241 + ], + "angle": 0, + "content": "We start by formally defining the key ingredients in the following formulations. In our setup, a human skeleton \\( L \\) is composed of limbs, each of which is connecting two keypoints. A limb \\( l = (y_{a}, y_{b}) \\in L \\) is a line segment connecting the parent \\( y_{a} \\) and child keypoints \\( y_{b} \\) on this limb, and all surface points on this limb have segmentation label \\( a \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.241, + 0.892, + 0.349 + ], + "angle": 0, + "content": "All three proposed losses are in terms of surface points in each predicted limb's local coordinate system. Therefore, we first convert all input points to each limbs' local cylindrical coordinate and compute the radial and axial coordinates. Specifically, we project point \\( p \\in \\mathbf{P} \\) in global coordinate on to vector \\( \\overrightarrow{\\hat{y}_a\\hat{y}_b} \\), and calculate the norm of the projected vector" + }, + { + "type": "equation", + "bbox": [ + 0.598, + 0.355, + 0.892, + 0.389 + ], + "angle": 0, + "content": "\\[\n\\mathbf {z} (p, \\hat {l}) = \\frac {\\left(p - \\hat {y} _ {a}\\right) \\cdot \\left(\\hat {y} _ {b} - \\hat {y} _ {a}\\right)}{\\| \\hat {y} _ {b} - \\hat {y} _ {a} \\| _ {2}} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.397, + 0.796, + 0.415 + ], + "angle": 0, + "content": "and the distance between the point and \\(\\overrightarrow{\\hat{y}_a\\hat{y}_b}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.583, + 0.421, + 0.891, + 0.439 + ], + "angle": 0, + "content": "\\[\n\\mathbf {r} (p, \\hat {l}) = \\| p - \\hat {y} _ {a} - \\mathbf {z} (\\hat {y} _ {b} - \\hat {y} _ {a}, \\hat {l}) \\| _ {2} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.447, + 0.89, + 0.479 + ], + "angle": 0, + "content": "For simplicity, we use \\(\\mathbf{z}_{\\hat{l}}(p)\\) to represent \\(\\mathbf{z}(p,\\hat{l})\\), and \\(\\mathbf{r}_{\\hat{l}}(p)\\) to represent \\(\\mathbf{r}(p,\\hat{l})\\) in the following." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.479, + 0.89, + 0.507 + ], + "angle": 0, + "content": "Next, we describe the formulation of each loss function in detail." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.508, + 0.892, + 0.644 + ], + "angle": 0, + "content": "Flow Loss. Flow loss considers the predictions from two consecutive frames and encourages consistency of the radial and altitude components of all points with respect to scene flow - limbs should move between frames in a way to keep radial and axial coordinates for all points constant. Formally, we define the forward and backward flow losses \\((\\mathcal{L}_{ff}\\) and \\(\\mathcal{L}_{bf}\\) respectively) for limbs \\(\\hat{l}^t = (\\hat{y}_a^t,\\hat{y}_b^t)\\) and \\(\\hat{l}^{t + 1} = (\\hat{y}_a^{t + 1},\\hat{y}_b^{t + 1})\\) for predicted keypoints for timestamp \\(t\\) and \\(t + 1\\)." + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.652, + 0.891, + 0.706 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {f f} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} ^ {t} \\cdot \\left(\\left| \\mathbf {r} _ {\\hat {l} ^ {t + 1}} \\left(p _ {i} ^ {t} + f _ {i} ^ {t}\\right) - \\mathbf {r} _ {\\hat {l} ^ {t}} \\left(p _ {i} ^ {t}\\right) \\right| + \\right. \\\\ \\left| \\mathbf {z} _ {\\hat {l} ^ {t + 1}} \\left(p _ {i} ^ {t} + f _ {i} ^ {t}\\right) - \\mathbf {z} _ {\\hat {l} ^ {t}} \\left(p _ {i} ^ {t}\\right) \\right|) \\tag {8} \\\\ \\end{array}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.735, + 0.891, + 0.789 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {b f} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} ^ {t + 1} \\cdot \\left(\\left| \\mathbf {r} _ {\\hat {l} t} \\left(p _ {i} ^ {t + 1} + b _ {i} ^ {t + 1}\\right) - \\mathbf {r} _ {\\hat {l} t + 1} \\left(p _ {i} ^ {t + 1}\\right) \\right| + \\right. \\\\ \\left| \\mathbf {z} _ {\\hat {l} ^ {t}} \\left(p _ {i} ^ {t + 1} + b _ {i} ^ {t + 1}\\right) - \\mathbf {z} _ {\\hat {l} ^ {t + 1}} \\left(p _ {i} ^ {t + 1}\\right) \\right|) \\tag {9} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.794, + 0.892, + 0.856 + ], + "angle": 0, + "content": "\\(f^t\\) is the forward flow for each point \\(p^t \\in \\mathbf{P}^t\\) and \\(b^{t+1}\\) is the backward flow for each point \\(p^{t+1} \\in \\mathbf{P}^{t+1}\\). We use Neural Scene Flow Prior [14] to estimate flow for two consecutive frames of points. The overall flow loss for frame \\(t\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.606, + 0.861, + 0.891, + 0.897 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {f l o w}} = \\frac {1}{| L |} \\sum_ {\\hat {l} t} \\frac {\\mathcal {L} _ {f f} + \\mathcal {L} _ {b f}}{2} \\tag {10}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1160" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.147, + 0.097, + 0.422, + 0.112 + ], + "angle": 0, + "content": "Stage I: Initialization on Synthetic Data" + }, + { + "type": "image", + "bbox": [ + 0.111, + 0.113, + 0.456, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.49, + 0.096, + 0.86, + 0.112 + ], + "angle": 0, + "content": "Stage II: Unsupervised Learning on In-the-Wild Data" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.117, + 0.885, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.391, + 0.236, + 0.577, + 0.252 + ], + "angle": 0, + "content": "Unsupervised Losses" + }, + { + "type": "image_caption", + "bbox": [ + 0.178, + 0.258, + 0.236, + 0.269 + ], + "angle": 0, + "content": "Flow loss" + }, + { + "type": "image", + "bbox": [ + 0.093, + 0.277, + 0.199, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.279, + 0.348, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.093, + 0.354, + 0.364, + 0.372 + ], + "angle": 0, + "content": "(a) After moving, points stay in the same place (despite rotation around axis) within each limb's local cylindrical coordinate system." + }, + { + "type": "image_caption", + "bbox": [ + 0.428, + 0.259, + 0.543, + 0.27 + ], + "angle": 0, + "content": "Points-to-limb loss" + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.293, + 0.565, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.392, + 0.353, + 0.597, + 0.371 + ], + "angle": 0, + "content": "(b) Minimize points-to-limb distance to encourage the limb to stay within the body." + }, + { + "type": "image_caption", + "bbox": [ + 0.716, + 0.26, + 0.806, + 0.272 + ], + "angle": 0, + "content": "Symmetry loss" + }, + { + "type": "image", + "bbox": [ + 0.691, + 0.298, + 0.832, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.646, + 0.352, + 0.871, + 0.371 + ], + "angle": 0, + "content": "(c) Points are symmetrical around limb. (i.e. points with similar height z have similar radius r)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.383, + 0.89, + 0.409 + ], + "angle": 0, + "content": "Figure 2. Overview of our method. In Stage I, we warm-up the keypoint predictor and body part segmentation predictor on a small synthetic dataset. Then, in Stage II we refine the 3D keypoint predictions on a large in-the-wild dataset with unsupervised losses. The main losses are depicted on the bottom." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.42, + 0.469, + 0.555 + ], + "angle": 0, + "content": "By design, the flow loss value is the same if the radial and axial values for all points in a local coordinate system are the same in consecutive frames. This would happen if a limb in both frames are shifted in their respective orthogonal direction by the same amount. Theoretically, it is unlikely to happen for all limbs, but empirically we observe that with flow loss alone the skeleton would move out of the point cloud. Therefore, we need additional losses to make the keypoints stay within the body." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.555, + 0.469, + 0.6 + ], + "angle": 0, + "content": "Points-to-Limb Loss. For a predicted limb \\(\\hat{l} = (\\hat{y}_a, \\hat{y}_b)\\), we want the points on this limb to be close to it. Hence, we introduce a points-to-limb (p2l) loss" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.605, + 0.469, + 0.638 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p 2 l} ^ {\\hat {l}} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} \\mathbf {d} \\left(p _ {i}, \\hat {l}\\right) \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.65, + 0.469, + 0.695 + ], + "angle": 0, + "content": "where \\( \\mathbf{d} \\) is the Euclidean distance function between a point and a line segment. We sum over all points to get the overall points-to-limb loss," + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.694, + 0.469, + 0.729 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {p} 2 \\mathrm {l}} = \\frac {1}{| L |} \\sum_ {\\hat {l}} \\mathcal {L} _ {\\mathrm {p} 2 \\mathrm {l}} ^ {\\hat {l}} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.74, + 0.469, + 0.816 + ], + "angle": 0, + "content": "Symmetry Loss. Symmetry loss encourages the predicted limb \\(\\hat{l}\\) to be in a position such that all points around this limb are roughly symmetrical around it. That is to say, points with similar axial coordinates \\(\\mathbf{z}_{\\hat{l}}\\) should have similar radial values \\(\\mathbf{r}_{\\hat{l}}\\). To that end, we introduce symmetry loss," + }, + { + "type": "equation", + "bbox": [ + 0.147, + 0.84, + 0.469, + 0.872 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s y m} ^ {\\hat {l}} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} \\left(\\mathbf {r} _ {\\hat {l}} \\left(p _ {i}\\right) - \\bar {\\mathbf {r}} _ {\\hat {l}} \\left(p _ {i}\\right)\\right) ^ {2} \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.886, + 0.469, + 0.902 + ], + "angle": 0, + "content": "where \\(\\bar{\\mathbf{r}}_i(p_i)\\) is the weighted mean of radial values of points" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.421, + 0.738, + 0.436 + ], + "angle": 0, + "content": "with similar axial coordinates as \\(p_i\\)," + }, + { + "type": "equation", + "bbox": [ + 0.506, + 0.445, + 0.892, + 0.485 + ], + "angle": 0, + "content": "\\[\n\\bar {\\mathbf {r}} _ {\\hat {l}} \\left(p _ {i}\\right) = \\frac {\\sum_ {j} K _ {h} \\left(\\mathbf {z} _ {\\hat {l}} \\left(p _ {i}\\right) , \\mathbf {z} _ {\\hat {l}} \\left(p _ {j}\\right)\\right) \\left(\\hat {\\mathbf {W}} _ {i *} \\cdot \\hat {\\mathbf {W}} _ {j *}\\right) \\mathbf {r} _ {\\hat {l}} \\left(p _ {j}\\right)}{\\sum_ {j} K _ {h} \\left(\\mathbf {z} _ {\\hat {l}} \\left(p _ {i}\\right) , \\mathbf {z} _ {\\hat {l}} \\left(p _ {j}\\right)\\right) \\left(\\hat {\\mathbf {W}} _ {i *} \\cdot \\hat {\\mathbf {W}} _ {j *}\\right)} \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.492, + 0.892, + 0.569 + ], + "angle": 0, + "content": "\\(K_{h}\\) is Gaussian kernel with bandwidth \\(h\\), i.e. \\(K_{h}(x,y) = e^{-(\\frac{x - y}{h})^{2}}\\). \\(\\hat{\\mathbf{W}}_{i*} \\in \\mathbb{R}^{J}\\) is the \\(i_{th}\\) row of \\(\\hat{\\mathbf{W}}\\), and the dot product \\(\\hat{\\mathbf{W}}_{i*} \\cdot \\hat{\\mathbf{W}}_{j*}\\) measures the similarity of part assignment of point \\(i\\) and \\(j\\), as we want the value of \\(\\bar{r}_i^k\\) to be calculated using the points from the same part as point \\(i\\)." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.57, + 0.794, + 0.585 + ], + "angle": 0, + "content": "The overall symmetry loss is over all points," + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.593, + 0.891, + 0.626 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s y m} = \\frac {1}{| L |} \\sum_ {l \\in L} \\mathcal {L} _ {s y m} ^ {l} \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.633, + 0.892, + 0.678 + ], + "angle": 0, + "content": "Joint-to-Part Loss. In addition, we encourage each joint to be close to the center of the points on that part using a joint-to-part loss." + }, + { + "type": "equation", + "bbox": [ + 0.607, + 0.688, + 0.891, + 0.726 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {j 2 p} ^ {j} = \\left\\| \\hat {y} _ {j} - \\frac {\\sum_ {i} \\hat {\\mathbf {W}} _ {i j} p _ {i}}{\\sum_ {i} \\hat {\\mathbf {W}} _ {i j}} \\right\\| _ {2} \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.733, + 0.89, + 0.749 + ], + "angle": 0, + "content": "We sum over all joints to get the overall joint-to-part loss." + }, + { + "type": "equation", + "bbox": [ + 0.636, + 0.769, + 0.891, + 0.803 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {j 2 p} = \\frac {1}{J} \\sum_ {j} \\mathcal {L} _ {j 2 p} ^ {j} \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.812, + 0.891, + 0.87 + ], + "angle": 0, + "content": "Note that although the ground truth location of joints are not in the center of points on the corresponding part, keeping this loss is essential in making the unsupervised training more robust." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.87, + 0.891, + 0.901 + ], + "angle": 0, + "content": "In practice, jointly optimizing \\(\\hat{\\mathbf{W}}\\) and \\(\\hat{\\mathbf{Y}}\\) in Stage II leads to unstable training curves. Hence, we use the pre-trained" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "1161" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.119, + 0.462, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.108, + 0.27, + 0.438, + 0.283 + ], + "angle": 0, + "content": "Figure 3. Effect of unsupervised losses on perturbed skeleton." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.307, + 0.47, + 0.367 + ], + "angle": 0, + "content": "segmentation branch from Stage I to run segmentation inference to get the segmentation labels on all of the training samples in the beginning of Stage II, and \\(\\hat{\\mathbf{W}}\\) is the one-hot encoding of the predicted segmentation labels." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.367, + 0.469, + 0.442 + ], + "angle": 0, + "content": "Segmentation Loss. Lastly, we notice that keeping the segmentation loss at this stage further regularizes the backbone and leads to better quantitative performance. We use the inferred segmentation \\(\\hat{\\mathbf{W}}\\) as the surrogate ground truth and minimize cross entropy as in Eq. (4)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.442, + 0.469, + 0.47 + ], + "angle": 0, + "content": "Training objective. The overall training objective during Stage II is to minimize" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.485, + 0.469, + 0.521 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} = \\lambda_ {f l o w} \\mathcal {L} _ {f l o w} + \\lambda_ {\\mathrm {p 2 l}} \\mathcal {L} _ {\\mathrm {p 2 l}} + \\lambda_ {s y m} \\mathcal {L} _ {s y m} \\\\ + \\lambda_ {\\mathrm {j} 2 \\mathrm {p}} \\mathcal {L} _ {\\mathrm {j} 2 \\mathrm {p}} + \\lambda_ {\\mathrm {s e g}} \\mathcal {L} _ {\\mathrm {s e g}} \\tag {18} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.527, + 0.47, + 0.618 + ], + "angle": 0, + "content": "To illustrate the effect of the three unsupervised losses \\((\\mathcal{L}_{flow}, \\mathcal{L}_{p2l}\\) and \\(\\mathcal{L}_{sym})\\), we show the result of applying these losses on a perturbed ground truth skeleton (Fig. 3). As shown, the proposed unsupervised losses effectively moves the perturbed skeleton to locations that are closer to ground truth." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.629, + 0.21, + 0.646 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.652, + 0.295, + 0.669 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.473, + 0.903 + ], + "angle": 0, + "content": "The predictor model \\( f \\) consists of a transformer backbone with fully connected layers for predicting joints and segmentation respectively. We use the same transformer backbone as in HUM3DIL [29]. A fully connected layer is applied to the output of transformer head to regress the predicted \\( \\hat{W} \\) and \\( \\hat{Y} \\) respectively. There are 352,787 trainable parameters in total. We set the maximum number of input LiDAR points to 1024, and zero-pad or downsample the point clouds with fewer or more number of points. The flow is obtained using a self-supervised test-time optimization method [14]. The network is trained on 4 TPUs. We train Stage I for 200 epochs and Stage II for 75 epochs, both with batch size 32, base learning rate of \\( 1e - 4 \\), and exponential decay 0.9. Stage I and II each finishes in about 6 hours. The loss weights in Eq. (5) are \\( \\lambda_{kp} = 0.5 \\) and \\( \\lambda_{seg} = 1 \\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.893, + 0.138 + ], + "angle": 0, + "content": "The loss weights in Eq. (18) are \\(\\lambda_{flow} = 0.02\\), \\(\\lambda_{p2l} = 0.01\\), \\(\\lambda_{sym} = 0.5\\), \\(\\lambda_{j2p} = 2\\), and \\(\\lambda_{seg} = 0.5\\). The kernel bandwidth Eq. (14) is 0.1." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.153, + 0.692, + 0.168 + ], + "angle": 0, + "content": "4.2. Dataset and Metrics" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.175, + 0.894, + 0.339 + ], + "angle": 0, + "content": "We construct a synthetic dataset with 1,000 sequences of 16-frame raycasted point clouds for Stage I training. Each sequence starts with the same standing pose and ends in a random pose. We find that data augmentation is essential in Stage I training. To simulate real-world noisy background and occlusion, we apply various data augmentations to the synthetic data, including randomly downsample, random mask, add ground clusters, add background clusters, add a second person, add noise to each point, scale the person. We include examples of augmented synthetic data in Fig. 4." + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.342, + 0.862, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.548, + 0.893, + 0.585 + ], + "angle": 0, + "content": "Figure 4. Data augmentations applied to the synthetic point clouds (colored by ground truth segmentation labels). Ground truth skeletons are shown in purple. Background points are in blue." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.587, + 0.892, + 0.74 + ], + "angle": 0, + "content": "In Stage II, we train on the entire Waymo Open dataset (WOD) training set (with around 200,000 unlabeled samples). As the official WOD testing subset is hidden from the public, we randomly choose \\(50\\%\\) of the validation set as the validation split, and the rest as the test split for benchmarking. We report average Mean Per Joint Position Error (MPJPE) on test set at the end of each stage. Formally, for a single sample, let \\(\\hat{Y} \\in \\mathcal{R}^{J \\times 3}\\) be the predicted keypoints, \\(Y \\in \\mathcal{R}^{J \\times 3}\\) the ground truth keypoints, and \\(v \\in \\{0,1\\}^J\\) the visibility indicator annotated per keypoint." + }, + { + "type": "equation", + "bbox": [ + 0.563, + 0.756, + 0.892, + 0.793 + ], + "angle": 0, + "content": "\\[\n\\operatorname {M P J P E} (Y, \\hat {Y}) = \\frac {1}{\\sum_ {j} v _ {j}} \\sum_ {j \\in [ J ]} v _ {j} \\| y _ {j} - \\hat {y} \\| _ {2} \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.893, + 0.9 + ], + "angle": 0, + "content": "Note that in this Stage, we do Hungarian matching between the predicted and annotated keypoints per frame, and then report MPJPE on matched keypoints. We report matched MPJPE because the method is intended for scenarios where correspondence between keypoints in the unlabeled training data and downstream data is unknown." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1162" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.094, + 0.416, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.459, + 0.472, + 0.498 + ], + "angle": 0, + "content": "Figure 5. Visualizations of predictions on WOD at the end of Stage I and Stage II. Points are colored by predicted segmentation labels. Ground truth keypoints are in green and predicted keypoints and skeletons are in red." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.513, + 0.172, + 0.527 + ], + "angle": 0, + "content": "4.3. Results" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.535, + 0.47, + 0.656 + ], + "angle": 0, + "content": "In this section we perform quantitative evaluation of GC-KPL at the end of Stage I and II in Tab. 2. Qualitative results are in Fig. 5. As shown, after first stage where we train on a synthetic dataset constructed from posed body models with carefully chosen data augmentations, we are able to predict reasonable human keypoints on in-the-wild point clouds. The second stage our novel unsupervised losses further refine the predicted keypoints." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.669, + 0.47, + 0.701 + ], + "angle": 0, + "content": "4.4. Downstream Task: Few-shot 3D Keypoint Learning" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.706, + 0.47, + 0.856 + ], + "angle": 0, + "content": "In this experiment, we show that the backbone of our model benefits from unsupervised training on large amount of unlabeled data, and can be useful for downstream finetuning tasks. We start from our pre-trained backbone after Stage II, and fine-tune with annotated training samples from WOD by minimizing mean per joint error. We include few-shot experiments where we fine-tune with a extremely small amount of data (10% and 1% of the training set), to represent challenging scenarios where there is a limited amount of annotated data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.902 + ], + "angle": 0, + "content": "We include the LiDAR-only version of HUM3DIL (a state-of-the-art model on WOD) [29] as a strong baseline. The quantitative results (Tab. 1) suggest that our back" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.197 + ], + "angle": 0, + "content": "bone learns useful information from the unlabeled in-the-wild data and enables a significant performance boost on the downstream tasks. Compared to a randomly initialized backbone as used in HUM3DIL, our backbone leads to over \\(2\\mathrm{cm}\\) of decrease in MPJPE in downstream fine-tuning experiments, which is a significant improvement for the 3D human keypoint estimation task." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.197, + 0.892, + 0.256 + ], + "angle": 0, + "content": "We visualize the predicted keypoints under different data regime in Fig. 6. As shown, models fine-tuned from our backbone is able to capture fine details on the arms and overall produces more accurate results than HUM3DIL." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.256, + 0.893, + 0.406 + ], + "angle": 0, + "content": "To the best of our knowledge, there does not exist previous works on completely unsupervised human keypoint estimation from point clouds. We additionally experiment with using a readout layer on top of the features learned by a state-of-the-art point cloud SSL method 3D-OAE [30], but the MPJPE is \\(15\\mathrm{cm}\\) (compared to \\(10.10\\mathrm{cm}\\) from GC-KPL). Hence we consider the baselines we adopt here strong and complete. In Sec. 4.6, we further challenge our method by comparing to the domain adaptation setup and demonstrate that the performance of GC-KPL is still superior." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.414, + 0.686, + 0.43 + ], + "angle": 0, + "content": "4.5. Domain adaptation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.436, + 0.892, + 0.587 + ], + "angle": 0, + "content": "In the configuration where we use ground truth labels in Stage I and unsupervised training in Stage II could be seen as a domain adaption (DA) technique. Thus it is useful to compare proposed method with a commonly-used domain adaptation method. We train the same backbone model using a mix of real and synthetic data and a gradient reversal layer (aka DA loss) [5] to help the network to learn domain invariant keypoint features. Results in Tab. 3 demonstrate that GC-KPL yields superior accuracy compared with the DA method (MPJPE 10.1 vs \\(11.35\\mathrm{cm}\\))." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.594, + 0.611, + 0.608 + ], + "angle": 0, + "content": "4.6. Ablations" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.615, + 0.892, + 0.72 + ], + "angle": 0, + "content": "Effect of using GT bounding boxes in pre-processing. We cropped human point clouds from the entire scene by including only points within GT bounding boxes. We also conducted experiments where we train with detected bounding boxes from raw LiDAR scans using a SoTA 3D detector. Results suggest that GC-KPL is robust to noise in 3D detection, as there were no noticeable changes in metrics." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.721, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Effect of synthetic dataset size. In our method Stage I serves as a model initialization step where we show that training on a small synthetic dataset (16,000 samples) with properly chosen data augmentations is suffice for the model to learn useful semantics. We further investigate the effect of synthetic dataset size during Stage I. We experiment with larger dataset sizes (160,000 and 1,600,000 samples) and observe that the effect of increasing synthetic dataset size is insignificant on \\(\\mathrm{MPJPE}_{\\mathrm{matched}}\\) at the end of Stage I - it decreased from \\(17.7\\mathrm{cm}\\) to \\(17.6\\mathrm{cm}\\). Lack of a notable improvements for larger dataset sizes is likely due to limited variability of generated poses in synthetic data (see Supple" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1163" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.09, + 0.42, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.228, + 0.412, + 0.385, + 0.423 + ], + "angle": 0, + "content": "(a) Fine-tune on \\(100\\%\\) training set" + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.091, + 0.628, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.443, + 0.412, + 0.593, + 0.423 + ], + "angle": 0, + "content": "(b) Fine-tune on \\(10\\%\\) training set" + }, + { + "type": "image", + "bbox": [ + 0.632, + 0.091, + 0.844, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.669, + 0.412, + 0.813, + 0.423 + ], + "angle": 0, + "content": "(c) Fine-tune on \\(1\\%\\) training set" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.439, + 0.894, + 0.465 + ], + "angle": 0, + "content": "Figure 6. Predicted keypoints from fine-tuning with different amount of annotated data. The points are colored by predicted segmentation labels by our model. Predicted keypoints are shown in red." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.478, + 0.891, + 0.6 + ], + "angle": 0, + "content": "
MethodBackboneStage I supervised1% training set MPJPE cm. (gain)10% training set MPJPE cm. (gain)100% training set MPJPE cm. (gain)
HUM3DIL [29]Randomly initialized19.5716.3612.21
Pre-trained on synthetic only18.52 (-1.05)15.10 (-1.26)11.27 (-0.94)
GC-KPLPre-trained on 5,000 WOD-train17.87 (-1.70)14.51 (-1.85)10.73 (-1.48)
Pre-trained on 200,000 WOD-train17.80 (-1.77)14.30 (-2.06)10.60 (-1.61)
Pre-trained on 200,000 WOD-train17.20 (-2.37)13.40 (-2.96)10.10 (-2.11)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.609, + 0.893, + 0.634 + ], + "angle": 0, + "content": "Table 1. Downstream fine-tuning results. Check marks in \"Stage I supervised\" mean that we use ground truth part labels in Stage I, otherwise we use KMeans labels." + }, + { + "type": "table", + "bbox": [ + 0.132, + 0.643, + 0.418, + 0.722 + ], + "angle": 0, + "content": "
Training dataMPJPEmatchd (↓)
Synthetic only17.70
5,000 WOD-train14.64
200,000 WOD-train13.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.141, + 0.725, + 0.405, + 0.74 + ], + "angle": 0, + "content": "Table 2. Unsupervised learning (Stage II) results." + }, + { + "type": "table", + "bbox": [ + 0.11, + 0.757, + 0.439, + 0.835 + ], + "angle": 0, + "content": "
Domain distributionDA lossMPJPE (↓)
100% real12.21
50/50% real/synthetic12.08
50/50% real/synthetic11.35
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.845, + 0.468, + 0.869 + ], + "angle": 0, + "content": "Table 3. Unsupervised domain adaptation results evaluated on WOD validation set." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.647, + 0.628, + 0.66 + ], + "angle": 0, + "content": "mental for details)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.661, + 0.892, + 0.889 + ], + "angle": 0, + "content": "Effect of using ground truths on synthetic data. While our described pipeline does not use any kind of manual labels, we do use ground truth segmentation and keypoints on synthetic dataset in Stage I because they are readily available. Here we further experiment with a variation where we do not use any kind of ground truths in Stage I (first row in Tab. 4). Instead, we use KMeans clusters and cluster centers as surrogate ground truths for model initialization, similar to [1]. Note that we are able to establish correspondence between KMeans clusters from different samples due to the fact that in our data generation process, each synthetic sequence starts with the same starting standing pose. Hence, we can run KMeans clustering on the starting pose that is shared among all sequences, and for subsequent samples within each sequence, we do Hungarian matching using" + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1164" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.078, + 0.089, + 0.895, + 0.309 + ], + "angle": 0, + "content": "
Stage IStage II
No.Exp.\\( \\mathcal{L}_{kp} \\)\\( \\mathcal{L}_{seg} \\)\\( MPJPE_{matched} \\)\\( \\mathcal{L}_{j2p} \\)\\( \\mathcal{L}_{seg} \\)\\( \\mathcal{L}_{sym} \\)\\( \\mathcal{L}_{p2l} \\)\\( \\mathcal{L}_{flow} \\)\\( MPJPE_{matched} \\)
1Effect of using KMeans labels in Stage I19.214.5
2Effect of \\( \\mathcal{L}_{kp} \\) in Stage IN/A14.2
315.0
4Effect of warmup losses in Stage II14.2
515.2
630.1
715.6
825.7
9Effect of unsupervised losses in Stage II14.3
1014.9
1114.4
1214.9
Full model (GC-KPL)17.713.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.317, + 0.893, + 0.345 + ], + "angle": 0, + "content": "Table 4. Ablations studies on the effect of individual loss term in our method. Experiments 3 through 12 are using both losses in Stage I. Full model is using GT labels for Stage I." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.355, + 0.47, + 0.505 + ], + "angle": 0, + "content": "inter-cluster Chamfer distance to establish correspondence between clusters from consecutive frames. We observe that although initializing with surrogate ground truths leads to slightly inferior performance in Stage I, after training with the losses in Stage II the drop in performance is less visible. Overall, downstream fine-tuning performance is comparable to our best model (10.6/14.3/17.8 vs. 10.1/13.4/17.2 cm when fine-tuned on \\(100\\% / 10\\% / 1\\%\\) of the data, see Tab. 1). This experiment suggests that method does not require any kind of ground truths, even during initialization stage." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.506, + 0.473, + 0.824 + ], + "angle": 0, + "content": "Effect of Losses. In this section we further investigate the effect of each component in our pipeline (Tab. 4). First, we note that \\(\\mathcal{L}_{seg}\\) in Stage I is essential because we need an initialized segmentation model to get the body part assignment for each point in order to calculate the losses in Stage II. Therefore, we only experiment with a variation of Stage I training without \\(\\mathcal{L}_{kp}\\), and we observe that \\(\\mathcal{L}_{kp}\\) is useful in warming up the backbone for later stages. Next, we take the backbone from Stage I (trained with both \\(\\mathcal{L}_{kp}\\) and \\(\\mathcal{L}_{seg}\\)), and study the effect of individual losses in Stage II. Experiments No. \\(3/4/5\\) show that it is helpful to include \\(\\mathcal{L}_{j2p}\\) and \\(\\mathcal{L}_{seg}\\) while having all other three unsupervised losses. In experiments \\(6/7/8\\) we take out \\(\\mathcal{L}_{j2p}\\) and \\(\\mathcal{L}_{seg}\\), and investigate the effect of individual unsupervised losses. As shown the training becomes rather unstable if we further eliminate any of the three losses. We observe qualitatively that the metric worsens drastically because the limbs quickly move out of the human body. Experiments No. \\(3/4/5\\) suggest that \\(\\mathcal{L}_{j2p}\\) and \\(\\mathcal{L}_{seg}\\) are useful regularizers that make sure the limbs stay within the body, and the unsupervised losses further improve the performance by refining the keypoint location." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.849, + 0.342, + 0.865 + ], + "angle": 0, + "content": "4.7. Limitations and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.471, + 0.903 + ], + "angle": 0, + "content": "The task of keypoint location could be considered as a dual problem for semantic segmentation. In this work we" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.355, + 0.892, + 0.399 + ], + "angle": 0, + "content": "use a simple segmentation network based on the same architecture as our keypoint estimation model. Using a superior segmentation model could lead to further improvements." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.4, + 0.893, + 0.503 + ], + "angle": 0, + "content": "The proposed flow loss depends on quality of the estimated flow of LiDAR points. In this work we used a simple but reasonable method to estimate flow between two frames of LiDAR points called Neural Scene Flow prior [14]. Quality of the unsupervised keypoint estimation could be improved by using a more advanced flow estimator tailored for point clouds on human body surfaces." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.504, + 0.894, + 0.579 + ], + "angle": 0, + "content": "Lastly, we use a part of the HUM3DIL [29] model which takes only LiDAR point cloud as input. The full HUM3DIL model was designed for multi-modal inputs and attains better performance. Thus, another interesting direction is to leverage multi-modal inputs." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.586, + 0.62, + 0.602 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.895, + 0.883 + ], + "angle": 0, + "content": "In this work, we approached the problem of 3D human pose estimation using points clouds in-the-wild, introduced a method (GC-KPL) for learning 3D human keypoints from point clouds without using any manual 3D keypoint annotations. We shown that the proposed novel losses are effective for unsupervised keypoint learning on Waymo Open Dataset. Through downstream experiments we demonstrated that GC-KPL can additionally serve as a self-supervised representation method to learn from large quantity of in-the-wild human point clouds. In addition, GC-KPL compares favorably with a commonly used domain adaptation technique. The few-shot experiments empirically verified that using only \\(10\\%\\) of available 3D keypoint annotation the fine-tuned model reached comparable performance to the state-of-the-art model training on the entire dataset. These results opens up exciting possibility to utilize massive amount of sensor data in autonomous driving to improve pedestrian 3D keypoint estimation." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "1165" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.169 + ], + "angle": 0, + "content": "[1] Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In European Conference on Computer Vision, 2018. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.172, + 0.472, + 0.24 + ], + "angle": 0, + "content": "[2] Mathilde Caron, Piotr Bojanowski, Julien Mairal, and Armand Joulin. Unsupervised pre-training of image features on non-curated data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2959-2968, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.243, + 0.471, + 0.298 + ], + "angle": 0, + "content": "[3] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.301, + 0.471, + 0.37 + ], + "angle": 0, + "content": "[4] Michael Fürst, Shriya TP Gupta, René Schuster, Oliver Wasenmüller, and Didier Stricker. Hperl: 3d human pose estimation from rgb and lidar. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 7321-7327. IEEE, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.373, + 0.471, + 0.455 + ], + "angle": 0, + "content": "[5] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In Francis Bach and David Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 1180-1189, Lille, France, 07-09 Jul 2015. PMLR. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.458, + 0.47, + 0.514 + ], + "angle": 0, + "content": "[6] Albert Haque, Boya Peng, Zelun Luo, Alexandre Alahi, Serena Yeung, and Li Fei-Fei. Towards viewpoint invariant 3d human pose estimation. In European conference on computer vision, pages 160–177. Springer, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.516, + 0.471, + 0.584 + ], + "angle": 0, + "content": "[7] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.587, + 0.471, + 0.655 + ], + "angle": 0, + "content": "[8] Siyuan Huang, Yichen Xie, Song-Chun Zhu, and Yixin Zhu. Spatio-temporal self-supervised representation learning for 3d point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6535-6545, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.659, + 0.47, + 0.714 + ], + "angle": 0, + "content": "[9] Tomas Jakab, Ankush Gupta, Hakan Bilen, and Andrea Vedaldi. Unsupervised learning of object landmarks through conditional image generation. Advances in neural information processing systems, 31, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.716, + 0.47, + 0.785 + ], + "angle": 0, + "content": "[10] Tomas Jakab, Ankush Gupta, Hakan Bilen, and Andrea Vedaldi. Self-supervised learning of interpretable keypoints from unlabelled videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8787-8797, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.47, + 0.87 + ], + "angle": 0, + "content": "[11] Wonhui Kim, Manikandasriram Srinivasan Ramanagopal, Charles Barto, Ming-Yuan Yu, Karl Rosaen, Nick Goumas, Ram Vasudevan, and Matthew Johnson-Roberson. Pedx: Benchmark dataset for metric 3-d pose estimation of pedestrians in complex urban intersections. IEEE Robotics and Automation Letters, 4(2):1940-1947, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.472, + 0.902 + ], + "angle": 0, + "content": "[12] Jiaxin Li and Gim Hee Lee. Usip: Unsupervised stable interest point detection from 3d point clouds. In Proceedings of" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "the IEEE/CVF international conference on computer vision, pages 361-370, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[13] Jialian Li, Jingyi Zhang, Zhiyong Wang, Siqi Shen, Chenglu Wen, Yuexin Ma, Lan Xu, Jingyi Yu, and Cheng Wang. Lidarcap: Long-range marker-less 3d human motion capture with lidar point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20502-20512, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.207, + 0.892, + 0.248 + ], + "angle": 0, + "content": "[14] Xueqian Li, Jhony Kaesemodel Pontes, and Simon Lucey. Neural scene flow prior. Advances in Neural Information Processing Systems, 34:7838-7851, 2021. 3, 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.25, + 0.892, + 0.305 + ], + "angle": 0, + "content": "[15] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. ACM transactions on graphics (TOG), 34(6):1-16, 2015. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.307, + 0.892, + 0.376 + ], + "angle": 0, + "content": "[16] Atsuhiro Noguchi, Umar Iqbal, Jonathan Tremblay, Tatsuya Harada, and Orazio Gallo. Watch it move: Unsupervised discovery of 3d joints for re-posing of articulated objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3677–3687, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.378, + 0.892, + 0.42 + ], + "angle": 0, + "content": "[17] Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.421, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[18] Luca Schmidtke, Athanasios Vlontzos, Simon Ellershaw, Anna Lukens, Tomoki Arichi, and Bernhard Kainz. Unsupervised human pose estimation through transforming shape templates. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2484-2494, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.505, + 0.892, + 0.573 + ], + "angle": 0, + "content": "[19] Jamie Shotton, Andrew Fitzgibbon, Mat Cook, Toby Sharp, Mark Finocchio, Richard Moore, Alex Kipman, and Andrew Blake. Real-time human pose recognition in parts from single depth images. In CVPR 2011, pages 1297-1304. IEEE, 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.576, + 0.892, + 0.659 + ], + "angle": 0, + "content": "[20] Jennifer J Sun, Serim Ryou, Roni H Goldshmid, Brandon Weissboud, John O Dabiri, David J Anderson, Ann Kennedy, Yisong Yue, and Pietro Perona. Self-supervised keypoint discovery in behavioral videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2171-2180, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.661, + 0.892, + 0.744 + ], + "angle": 0, + "content": "[21] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.746, + 0.892, + 0.801 + ], + "angle": 0, + "content": "[22] Supasorn Suwajanakorn, Noah Snavely, Jonathan J Tompson, and Mohammad Norouzi. Discovery of latent 3d keypoints via end-to-end geometric reasoning. Advances in neural information processing systems, 31, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.803, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[23] Hanchen Wang, Qi Liu, Xiangyu Yue, Joan Lasenby, and Matt J Kusner. Unsupervised point cloud pre-training via occlusion completion. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9782-9792, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[24] Yuefan Wu, Zeyuan Chen, Shaowei Liu, Zhongzheng Ren, and Shenlong Wang. Casa: Category-agnostic skeletal ani" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1166" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.468, + 0.119 + ], + "angle": 0, + "content": "mal reconstruction. arXiv preprint arXiv:2211.03568, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.189 + ], + "angle": 0, + "content": "[25] Saining Xie, Jiatao Gu, Demi Guo, Charles R Qi, Leonidas Guibas, and Or Litany. Pointcontrast: Unsupervised pretraining for 3d point cloud understanding. In European conference on computer vision, pages 574-591. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.469, + 0.248 + ], + "angle": 0, + "content": "[26] Yaoqing Yang, Chen Feng, Yiru Shen, and Dong Tian. Foldingnet: Point cloud auto-encoder via deep grid deformation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 206–215, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.25, + 0.469, + 0.317 + ], + "angle": 0, + "content": "[27] Yang You, Wenhai Liu, Yanjie Ze, Yong-Lu Li, Weiming Wang, and Cewu Lu. Ukpgan: A general self-supervised keypoint detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17042-17051, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.32, + 0.469, + 0.387 + ], + "angle": 0, + "content": "[28] Xumin Yu, Lulu Tang, Yongming Rao, Tiejun Huang, Jie Zhou, and Jiwen Lu. Point-bert: Pre-training 3d point cloud transformers with masked point modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19313-19322, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.39, + 0.469, + 0.459 + ], + "angle": 0, + "content": "[29] Andrei Zanfir, Mihai Zanfir, Alex Gorban, Jingwei Ji, Yin Zhou, Dragomir Anguelov, and Cristian Sminchisescu. Hum3dil: Semi-supervised multi-modal 3d humanpose estimation for autonomous driving. In 6th Annual Conference on Robot Learning, 2022. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.461, + 0.469, + 0.528 + ], + "angle": 0, + "content": "[30] Zaiwei Zhang, Rohit Girdhar, Armand Joulin, and Ishan Misra. Self-supervised pretraining of 3d features on any point-cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10252-10263, 2021. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.531, + 0.469, + 0.586 + ], + "angle": 0, + "content": "[31] Zihao Zhang, Lei Hu, Xiaoming Deng, and Shihong Xia. Weakly supervised adversarial learning for 3d human pose estimation from point clouds. IEEE transactions on visualization and computer graphics, 26(5):1851-1859, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.588, + 0.469, + 0.682 + ], + "angle": 0, + "content": "[32] Jingxiao Zheng, Xinwei Shi, Alexander Gorban, Junhua Mao, Yang Song, Charles R Qi, Ting Liu, Visesh Chari, Andre Cornman, Yin Zhou, et al. Multi-modal 3d human pose estimation with 2d weak supervision in autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4478-4487, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.686, + 0.469, + 0.739 + ], + "angle": 0, + "content": "[33] Junsheng Zhou, Xin Wen, Yu-Shen Liu, Yi Fang, and Zhizhong Han. Self-supervised point cloud representation learning with occlusion auto-encoder. arXiv preprint arXiv:2203.14084, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.743, + 0.469, + 0.782 + ], + "angle": 0, + "content": "[34] Yufan Zhou, Haiwei Dong, and Abdulmotaleb El Saddik. Learning to estimate 3d human pose from point cloud. IEEE Sensors Journal, 20(20):12334-12342, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.782 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.484, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "1167" + } + ] +] \ No newline at end of file diff --git a/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/833a9b3e-a176-4092-b5fd-3122723612f3_origin.pdf b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/833a9b3e-a176-4092-b5fd-3122723612f3_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..faf39f6e6caac4ed82157dea880e5eed021d51d6 --- /dev/null +++ b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/833a9b3e-a176-4092-b5fd-3122723612f3_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac3548a8e7db80317476eb12c67911ceeeb94789e21e40d3113c3724ac83806f +size 1349055 diff --git a/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/full.md b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/full.md new file mode 100644 index 0000000000000000000000000000000000000000..dfa3729e319a9c5b4bcb41231ebbc71655b759fd --- /dev/null +++ b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/full.md @@ -0,0 +1,354 @@ +# 3D Human Keypoints Estimation from Point Clouds in the Wild without Human Labels + +Zhenzhen Weng $^{1*}$ Alexander S. Gorban $^{2}$ Jingwei Ji $^{2}$ Mahyar Najibi $^{2}$ Yin Zhou $^{2}$ Dragomir Anguelov $^{2}$ + +$^{1}$ Stanford University $^{2}$ Waymo + +# Abstract + +Training a 3D human keypoint detector from point clouds in a supervised manner requires large volumes of high quality labels. While it is relatively easy to capture large amounts of human point clouds, annotating 3D keypoints is expensive, subjective, error prone and especially difficult for long-tail cases (pedestrians with rare poses, scooterists, etc.). In this work, we propose GC-KPL - Geometry Consistency inspired Key Point Leaning, an approach for learning 3D human joint locations from point clouds without human labels. We achieve this by our novel unsupervised loss formulations that account for the structure and movement of the human body. We show that by training on a large training set from Waymo Open Dataset [21] without any human annotated keypoints, we are able to achieve reasonable performance as compared to the fully supervised approach. Further, the backbone benefits from the unsupervised training and is useful in downstream few-shot learning of keypoints, where fine-tuning on only 10 percent of the labeled training data gives comparable performance to fine-tuning on the entire set. We demonstrated that GC-KPL outperforms by a large margin over SoTA when trained on entire dataset and efficiently leverages large volumes of unlabeled data. + +# 1. Introduction + +Estimation of human pose in 3D is an important problem in computer vision and it has a wide range of applications including AR/VR, AI-assisted healthcare, and autonomous driving [4,29,32]. For autonomous systems, being able to perceive human poses from sensor data (e.g. Li-DAR point clouds) is particularly essential to reason about the surrounding environment and make safe maneuvers. + +Despite the high level of interest in human pose estimation in the wild, only few papers approached outdoor 3D keypoint detection using point cloud. A main reason is that + +![](images/dc011686fedb76da0d66d44f198e418de7265f187c27f44ef07788a14963edf1.jpg) +Figure 1. We present GC-KPL, a novel method for learning 3D human keypoints from in-the-wild point clouds without any human labels. We propose to learn keypoint locations using unsupervised losses that account for the structure and movement of the human body. The backbone learns useful semantics from unsupervised learning and can be used in downstream fine-tuning tasks to boost the performance of 3D keypoint estimation. + +training a pedestrian pose estimation model requires large amount of high quality in-the-wild data with ground truth labels. Annotating 3D human keypoints on point cloud data is expensive, time consuming and error prone. Although there are a few existing point cloud datasets with ground truth human poses [11, 13, 21], they are limited in terms of the quantity of the 3D annotations and diversity of the data. Therefore, fully-supervised human keypoint detectors trained on such datasets do not generalize well for long tail cases. For this reason, previous approaches on pedestrian 3D keypoint estimation have mainly focused on utilizing 2D weak supervision [4, 32] which is easier to obtain, or leveraging signals from others modalities (e.g. RGB, depth) [29]. Nonetheless, there is a lot of useful information in the large amount of unlabeled LiDAR data that previous works on human pose estimation have not made an effort to utilize. + +In this work, we propose a novel and effective method for learning 3D human keypoints from in-the-wild point clouds without using any manual labeled 3D keypoints. Our approach is built on top of the key observation that human skeletons are roughly centered within approximately rigid body parts and that the location and movement of the surface points should explain the movement of the skeleton and vice versa. To that end, we design novel unsupervised loss terms for learning locations of the 3D keypoints/skeleton within human point clouds which correspond to 3D locations of major joints of human body. + +In the proposed method, we first train a transformer-based regression model for predicting keypoints and a semantic segmentation model for localizing body parts on a synthetic data constructed from randomly posed SMPL human body model [15]. Then, we train on the entire Waymo Open Dataset [21] without using any 3D ground-truth annotation of human keypoints. Through unsupervised training, keypoint predictions are refined and the backbone learns useful information from large amount of unannotated data. + +In summary, we make the following contributions: + +- We present GC-KPL, a method for learning human 3D keypoints for in-the-wild point clouds without any manual keypoint annotations. +- Drawing insight from the structure and movement of the human body, we propose three effective and novel unsupervised losses for refining keypoints. We show that the proposed losses are effective for unsupervised keypoint learning on Waymo Open Dataset. +- Through downstream fine-tuning/few-shot experiments, we demonstrate that GC-KPL can be used as unsupervised representation learning for human point clouds, which opens up the possibility to utilize a practically infinite amounts of sensor data to improve human pose understanding in autonomous driving. + +# 2. Related Work + +# 2.1. 3D Human Keypoint Estimation from Points Clouds + +There have been a few works [19, 31, 34] about estimating 3D keypoints from clean and carefully-curated point clouds [6], but 3D keypoint estimation from in-the-wild point clouds is a much less studied problem. Due to the lack of ground-truth 3D human pose annotations paired with Li-DAR data, there has not been a lot of works on 3d human keypoint estimation from LiDAR information. Among the few point cloud datasets with 3D keypoint annotations, Li-DARHuman26M [13] captures long-range human motions with ground truth motion acquired by the IMU system and pose information derived from SMPL models fitted into point clouds. It is among the first few datasets which have + +LiDAR point clouds synchronized with RGB images, but SMPL shape parameters are same for all 13 subjects and it does not feature in-the-wild pedestrians where there could be much more background noise and occlusion. PedX [11] offers 3D automatic pedestrian annotations obtained using model fitting on different modalities, gathered effectively from a single intersection with only 75 pedestrians (the second intersection has only 218 frames, labels for the third scene were not released). Waymo Open Dataset [21] has more than 3,500 subjects from over 1,000 different in-the-wild scenes with high-quality 2D and 3D manual annotations. Despite the existence of these datasets, the few works on 3D pose estimation from point clouds mostly rely on weak supervision. HPERL model [4] trains on 2D ground-truth pose annotations and uses a reprojection loss for the 3D pose regression task. Multi-modal model in [32] uses 2D labels on RGB images as weak supervision, and creates pseudo ground-truth 3D joint positions from the projection of annotated 2D joints. HUM3DIL [29] leverages RGB information with LiDAR points, by computing pixel-aligned multi-modal features with the 3D positions of the LiDAR signal. In contrast, our method does not use any RGB information or weak supervision. + +# 2.2. Unsupervised Keypoint Localization + +There are a number of works that aim to recover 3D keypoints using self-supervised geometric reasoning [12, 22], but they are limited to rigid objects. More recent unsupervised methods work for articulated objects from monocular RGB data [9, 10, 10, 18, 20, 24], multi-view data [16], or point clouds [27], where authors suggest to condition on the predicted keypoints and train a conditional generative model to supervise the keypoints through reconstruction losses. We propose a simpler pipeline where we apply our novel unsupervised losses to the predicted keypoints directly and do not require additional models besides the keypoint predictor itself. + +# 2.3. Self-supervised Learning for Point Clouds + +Self-supervised representation learning has proven to be remarkably useful in language [3, 17] and 2D vision tasks [2, 7]. As LiDAR sensors become more affordable and common, there has been an increasing amount of research interest in self-supervised learning on 3D point clouds. Previous works proposed to learn representations of object or scene level point clouds through contrastive learning [8, 25, 30] or reconstruction [23, 26, 28, 33], which is useful in downstream classification or segmentation tasks. In contrast, our supervision signals come from the unique structure of the human body and our learned backbone is particularly useful in downstream human keypoint estimation tasks. + +# 3. Method + +In this section, we describe our complete training pipeline which contains two stages. In the first stage, we initialize the model parameters on a synthetic dataset (Sec. 3.1). The purpose of Stage I is to warm-up the model with reasonable semantics. The second stage generalizes the model to the real-world data. In this stage, we use our unsupervised losses to refine the keypoint predictions on in-the-wild point clouds (Sec. 3.2). An overview of our pipeline is in Fig. 2. + +# 3.1. Stage I: Initialization on Synthetic Data + +In this stage, we initialize the model on a synthetic dataset that is constructed by ray casting onto randomly posed human mesh models (SMPL [15]). We describe details of synthetic data generation in Supplementary. + +The goal of this stage is to train a model $f$ that takes a point cloud of a human $\mathbf{P} \in \mathbb{R}^{N \times 3}$ and outputs 3D locations of keypoints $\hat{\mathbf{Y}} \in \mathbb{R}^{(J + 1) \times 3}$ , as well as soft body part assignments (or part segmentation) $\hat{\mathbf{W}} \in \mathbb{R}^{N \times (J + 1)}$ that contains the probability of each point $i$ belonging to body part $j \in [J]$ or the background. + +$$ +\{\hat {\mathbf {Y}}, \hat {\mathbf {W}} \} = f (\mathbf {P}) \tag {1} +$$ + +$$ +\forall i \in [ N ], \sum_ {j = 1} ^ {J + 1} \hat {\mathbf {W}} _ {i, j} = 1 \tag {2} +$$ + +Ground truth information about part segmentation $\mathbf{W}$ and keypoint locations $\mathbf{Y}$ are readily available for synthetic data. Hence, we can train the model by directly supervising the predicted keypoint through L2 loss, + +$$ +\mathcal {L} _ {k p} = \left\| \hat {\mathbf {Y}} - \mathbf {Y} \right\| _ {2} \tag {3} +$$ + +and predicted segmentation through cross entropy loss, + +$$ +\mathcal {L} _ {\text {s e g}} = - \sum_ {i = 1} ^ {N} \sum_ {j = 1} ^ {J + 1} \mathbf {W} _ {i, j} \log \left(\hat {\mathbf {W}} _ {i, j}\right) \tag {4} +$$ + +Overall, we minimize + +$$ +\mathcal {L} _ {\text {s y n}} = \lambda_ {k p} \mathcal {L} _ {\mathrm {k p}} + \lambda_ {\text {s e g}} \mathcal {L} _ {\text {s e g}} \tag {5} +$$ + +Notably, in Sec. 4.6 we show that supervision in this stage is not required - ground truth $\mathbf{W}$ and $\mathbf{Y}$ can be replaced by surrogate ground truths to achieve comparable results. + +# 3.2. Stage II: Self-Supervised Learning on In-the-Wild Data + +In this stage, we further refine the network using unsupervised losses. The key insight behind the design of the losses is that the human body is composed of limbs, each of which is a rigid part. Therefore, points on a limb move with the limb and should stay roughly at the same location + +in each limb's local coordinate system. To account for this, we propose flow loss that encourages the points to stay in the same location (despite rotation around the limb) within each limb's local cylindrical coordinate. + +We start by formally defining the key ingredients in the following formulations. In our setup, a human skeleton $L$ is composed of limbs, each of which is connecting two keypoints. A limb $l = (y_{a}, y_{b}) \in L$ is a line segment connecting the parent $y_{a}$ and child keypoints $y_{b}$ on this limb, and all surface points on this limb have segmentation label $a$ . + +All three proposed losses are in terms of surface points in each predicted limb's local coordinate system. Therefore, we first convert all input points to each limbs' local cylindrical coordinate and compute the radial and axial coordinates. Specifically, we project point $p \in \mathbf{P}$ in global coordinate on to vector $\overrightarrow{\hat{y}_a\hat{y}_b}$ , and calculate the norm of the projected vector + +$$ +\mathbf {z} (p, \hat {l}) = \frac {\left(p - \hat {y} _ {a}\right) \cdot \left(\hat {y} _ {b} - \hat {y} _ {a}\right)}{\| \hat {y} _ {b} - \hat {y} _ {a} \| _ {2}} \tag {6} +$$ + +and the distance between the point and $\overrightarrow{\hat{y}_a\hat{y}_b}$ + +$$ +\mathbf {r} (p, \hat {l}) = \| p - \hat {y} _ {a} - \mathbf {z} (\hat {y} _ {b} - \hat {y} _ {a}, \hat {l}) \| _ {2} \tag {7} +$$ + +For simplicity, we use $\mathbf{z}_{\hat{l}}(p)$ to represent $\mathbf{z}(p,\hat{l})$ , and $\mathbf{r}_{\hat{l}}(p)$ to represent $\mathbf{r}(p,\hat{l})$ in the following. + +Next, we describe the formulation of each loss function in detail. + +Flow Loss. Flow loss considers the predictions from two consecutive frames and encourages consistency of the radial and altitude components of all points with respect to scene flow - limbs should move between frames in a way to keep radial and axial coordinates for all points constant. Formally, we define the forward and backward flow losses $(\mathcal{L}_{ff}$ and $\mathcal{L}_{bf}$ respectively) for limbs $\hat{l}^t = (\hat{y}_a^t,\hat{y}_b^t)$ and $\hat{l}^{t + 1} = (\hat{y}_a^{t + 1},\hat{y}_b^{t + 1})$ for predicted keypoints for timestamp $t$ and $t + 1$ . + +$$ +\begin{array}{l} \mathcal {L} _ {f f} = \frac {1}{N} \sum_ {i} \hat {\mathbf {W}} _ {i a} ^ {t} \cdot \left(\left| \mathbf {r} _ {\hat {l} ^ {t + 1}} \left(p _ {i} ^ {t} + f _ {i} ^ {t}\right) - \mathbf {r} _ {\hat {l} ^ {t}} \left(p _ {i} ^ {t}\right) \right| + \right. \\ \left| \mathbf {z} _ {\hat {l} ^ {t + 1}} \left(p _ {i} ^ {t} + f _ {i} ^ {t}\right) - \mathbf {z} _ {\hat {l} ^ {t}} \left(p _ {i} ^ {t}\right) \right|) \tag {8} \\ \end{array} +$$ + +$$ +\begin{array}{l} \mathcal {L} _ {b f} = \frac {1}{N} \sum_ {i} \hat {\mathbf {W}} _ {i a} ^ {t + 1} \cdot \left(\left| \mathbf {r} _ {\hat {l} t} \left(p _ {i} ^ {t + 1} + b _ {i} ^ {t + 1}\right) - \mathbf {r} _ {\hat {l} t + 1} \left(p _ {i} ^ {t + 1}\right) \right| + \right. \\ \left| \mathbf {z} _ {\hat {l} ^ {t}} \left(p _ {i} ^ {t + 1} + b _ {i} ^ {t + 1}\right) - \mathbf {z} _ {\hat {l} ^ {t + 1}} \left(p _ {i} ^ {t + 1}\right) \right|) \tag {9} \\ \end{array} +$$ + +$f^t$ is the forward flow for each point $p^t \in \mathbf{P}^t$ and $b^{t+1}$ is the backward flow for each point $p^{t+1} \in \mathbf{P}^{t+1}$ . We use Neural Scene Flow Prior [14] to estimate flow for two consecutive frames of points. The overall flow loss for frame $t$ is + +$$ +\mathcal {L} _ {\text {f l o w}} = \frac {1}{| L |} \sum_ {\hat {l} t} \frac {\mathcal {L} _ {f f} + \mathcal {L} _ {b f}}{2} \tag {10} +$$ + +![](images/e2241ea31bfe3b1b334d4aec242957af7570a85f23e0d4f8a61bd8a2d7a27339.jpg) +Stage I: Initialization on Synthetic Data +Unsupervised Losses + +![](images/e6c46fcd903a52cbf3d9ea1d82cf231f133ad9db0823c9be84fb08d9e6962afb.jpg) +Stage II: Unsupervised Learning on In-the-Wild Data + +Flow loss +![](images/c19e32e7f6e091575f8c198f70f1411cfba5908a5377ce6ddceddb13810fe484.jpg) +(a) After moving, points stay in the same place (despite rotation around axis) within each limb's local cylindrical coordinate system. + +![](images/ee82e58063d9c4b155af987824ad367617c17e974f3bc1a2ff60e862b13eb8fb.jpg) +Figure 2. Overview of our method. In Stage I, we warm-up the keypoint predictor and body part segmentation predictor on a small synthetic dataset. Then, in Stage II we refine the 3D keypoint predictions on a large in-the-wild dataset with unsupervised losses. The main losses are depicted on the bottom. + +Points-to-limb loss +![](images/4705f627c4c7da48ea1250dc5f5c6d455383ec604cac68cabb703e4aecea0e16.jpg) +(b) Minimize points-to-limb distance to encourage the limb to stay within the body. + +Symmetry loss +![](images/9136ace268ca2b5d4dd47262bdc7e07315e5ff57d75fa2c86dd65ab0c40c1236.jpg) +(c) Points are symmetrical around limb. (i.e. points with similar height z have similar radius r) + +By design, the flow loss value is the same if the radial and axial values for all points in a local coordinate system are the same in consecutive frames. This would happen if a limb in both frames are shifted in their respective orthogonal direction by the same amount. Theoretically, it is unlikely to happen for all limbs, but empirically we observe that with flow loss alone the skeleton would move out of the point cloud. Therefore, we need additional losses to make the keypoints stay within the body. + +Points-to-Limb Loss. For a predicted limb $\hat{l} = (\hat{y}_a, \hat{y}_b)$ , we want the points on this limb to be close to it. Hence, we introduce a points-to-limb (p2l) loss + +$$ +\mathcal {L} _ {p 2 l} ^ {\hat {l}} = \frac {1}{N} \sum_ {i} \hat {\mathbf {W}} _ {i a} \mathbf {d} \left(p _ {i}, \hat {l}\right) \tag {11} +$$ + +where $\mathbf{d}$ is the Euclidean distance function between a point and a line segment. We sum over all points to get the overall points-to-limb loss, + +$$ +\mathcal {L} _ {\mathrm {p} 2 \mathrm {l}} = \frac {1}{| L |} \sum_ {\hat {l}} \mathcal {L} _ {\mathrm {p} 2 \mathrm {l}} ^ {\hat {l}} \tag {12} +$$ + +Symmetry Loss. Symmetry loss encourages the predicted limb $\hat{l}$ to be in a position such that all points around this limb are roughly symmetrical around it. That is to say, points with similar axial coordinates $\mathbf{z}_{\hat{l}}$ should have similar radial values $\mathbf{r}_{\hat{l}}$ . To that end, we introduce symmetry loss, + +$$ +\mathcal {L} _ {s y m} ^ {\hat {l}} = \frac {1}{N} \sum_ {i} \hat {\mathbf {W}} _ {i a} \left(\mathbf {r} _ {\hat {l}} \left(p _ {i}\right) - \bar {\mathbf {r}} _ {\hat {l}} \left(p _ {i}\right)\right) ^ {2} \tag {13} +$$ + +where $\bar{\mathbf{r}}_i(p_i)$ is the weighted mean of radial values of points + +with similar axial coordinates as $p_i$ , + +$$ +\bar {\mathbf {r}} _ {\hat {l}} \left(p _ {i}\right) = \frac {\sum_ {j} K _ {h} \left(\mathbf {z} _ {\hat {l}} \left(p _ {i}\right) , \mathbf {z} _ {\hat {l}} \left(p _ {j}\right)\right) \left(\hat {\mathbf {W}} _ {i *} \cdot \hat {\mathbf {W}} _ {j *}\right) \mathbf {r} _ {\hat {l}} \left(p _ {j}\right)}{\sum_ {j} K _ {h} \left(\mathbf {z} _ {\hat {l}} \left(p _ {i}\right) , \mathbf {z} _ {\hat {l}} \left(p _ {j}\right)\right) \left(\hat {\mathbf {W}} _ {i *} \cdot \hat {\mathbf {W}} _ {j *}\right)} \tag {14} +$$ + +$K_{h}$ is Gaussian kernel with bandwidth $h$ , i.e. $K_{h}(x,y) = e^{-(\frac{x - y}{h})^{2}}$ . $\hat{\mathbf{W}}_{i*} \in \mathbb{R}^{J}$ is the $i_{th}$ row of $\hat{\mathbf{W}}$ , and the dot product $\hat{\mathbf{W}}_{i*} \cdot \hat{\mathbf{W}}_{j*}$ measures the similarity of part assignment of point $i$ and $j$ , as we want the value of $\bar{r}_i^k$ to be calculated using the points from the same part as point $i$ . + +The overall symmetry loss is over all points, + +$$ +\mathcal {L} _ {s y m} = \frac {1}{| L |} \sum_ {l \in L} \mathcal {L} _ {s y m} ^ {l} \tag {15} +$$ + +Joint-to-Part Loss. In addition, we encourage each joint to be close to the center of the points on that part using a joint-to-part loss. + +$$ +\mathcal {L} _ {j 2 p} ^ {j} = \left\| \hat {y} _ {j} - \frac {\sum_ {i} \hat {\mathbf {W}} _ {i j} p _ {i}}{\sum_ {i} \hat {\mathbf {W}} _ {i j}} \right\| _ {2} \tag {16} +$$ + +We sum over all joints to get the overall joint-to-part loss. + +$$ +\mathcal {L} _ {j 2 p} = \frac {1}{J} \sum_ {j} \mathcal {L} _ {j 2 p} ^ {j} \tag {17} +$$ + +Note that although the ground truth location of joints are not in the center of points on the corresponding part, keeping this loss is essential in making the unsupervised training more robust. + +In practice, jointly optimizing $\hat{\mathbf{W}}$ and $\hat{\mathbf{Y}}$ in Stage II leads to unstable training curves. Hence, we use the pre-trained + +![](images/94c477a0fef43640d8aee39e48a863e21dfb0607695fb10236540fe888404a00.jpg) +Figure 3. Effect of unsupervised losses on perturbed skeleton. + +segmentation branch from Stage I to run segmentation inference to get the segmentation labels on all of the training samples in the beginning of Stage II, and $\hat{\mathbf{W}}$ is the one-hot encoding of the predicted segmentation labels. + +Segmentation Loss. Lastly, we notice that keeping the segmentation loss at this stage further regularizes the backbone and leads to better quantitative performance. We use the inferred segmentation $\hat{\mathbf{W}}$ as the surrogate ground truth and minimize cross entropy as in Eq. (4). + +Training objective. The overall training objective during Stage II is to minimize + +$$ +\begin{array}{l} \mathcal {L} = \lambda_ {f l o w} \mathcal {L} _ {f l o w} + \lambda_ {\mathrm {p 2 l}} \mathcal {L} _ {\mathrm {p 2 l}} + \lambda_ {s y m} \mathcal {L} _ {s y m} \\ + \lambda_ {\mathrm {j} 2 \mathrm {p}} \mathcal {L} _ {\mathrm {j} 2 \mathrm {p}} + \lambda_ {\mathrm {s e g}} \mathcal {L} _ {\mathrm {s e g}} \tag {18} \\ \end{array} +$$ + +To illustrate the effect of the three unsupervised losses $(\mathcal{L}_{flow}, \mathcal{L}_{p2l}$ and $\mathcal{L}_{sym})$ , we show the result of applying these losses on a perturbed ground truth skeleton (Fig. 3). As shown, the proposed unsupervised losses effectively moves the perturbed skeleton to locations that are closer to ground truth. + +# 4. Experiments + +# 4.1. Implementation Details + +The predictor model $f$ consists of a transformer backbone with fully connected layers for predicting joints and segmentation respectively. We use the same transformer backbone as in HUM3DIL [29]. A fully connected layer is applied to the output of transformer head to regress the predicted $\hat{W}$ and $\hat{Y}$ respectively. There are 352,787 trainable parameters in total. We set the maximum number of input LiDAR points to 1024, and zero-pad or downsample the point clouds with fewer or more number of points. The flow is obtained using a self-supervised test-time optimization method [14]. The network is trained on 4 TPUs. We train Stage I for 200 epochs and Stage II for 75 epochs, both with batch size 32, base learning rate of $1e - 4$ , and exponential decay 0.9. Stage I and II each finishes in about 6 hours. The loss weights in Eq. (5) are $\lambda_{kp} = 0.5$ and $\lambda_{seg} = 1$ . + +The loss weights in Eq. (18) are $\lambda_{flow} = 0.02$ , $\lambda_{p2l} = 0.01$ , $\lambda_{sym} = 0.5$ , $\lambda_{j2p} = 2$ , and $\lambda_{seg} = 0.5$ . The kernel bandwidth Eq. (14) is 0.1. + +# 4.2. Dataset and Metrics + +We construct a synthetic dataset with 1,000 sequences of 16-frame raycasted point clouds for Stage I training. Each sequence starts with the same standing pose and ends in a random pose. We find that data augmentation is essential in Stage I training. To simulate real-world noisy background and occlusion, we apply various data augmentations to the synthetic data, including randomly downsample, random mask, add ground clusters, add background clusters, add a second person, add noise to each point, scale the person. We include examples of augmented synthetic data in Fig. 4. + +![](images/46b0b01e293159ea5e03a7b82bbd0621d66fb8aa7fa3b31e0c0263f694d1b38a.jpg) +Figure 4. Data augmentations applied to the synthetic point clouds (colored by ground truth segmentation labels). Ground truth skeletons are shown in purple. Background points are in blue. + +In Stage II, we train on the entire Waymo Open dataset (WOD) training set (with around 200,000 unlabeled samples). As the official WOD testing subset is hidden from the public, we randomly choose $50\%$ of the validation set as the validation split, and the rest as the test split for benchmarking. We report average Mean Per Joint Position Error (MPJPE) on test set at the end of each stage. Formally, for a single sample, let $\hat{Y} \in \mathcal{R}^{J \times 3}$ be the predicted keypoints, $Y \in \mathcal{R}^{J \times 3}$ the ground truth keypoints, and $v \in \{0,1\}^J$ the visibility indicator annotated per keypoint. + +$$ +\operatorname {M P J P E} (Y, \hat {Y}) = \frac {1}{\sum_ {j} v _ {j}} \sum_ {j \in [ J ]} v _ {j} \| y _ {j} - \hat {y} \| _ {2} \tag {19} +$$ + +Note that in this Stage, we do Hungarian matching between the predicted and annotated keypoints per frame, and then report MPJPE on matched keypoints. We report matched MPJPE because the method is intended for scenarios where correspondence between keypoints in the unlabeled training data and downstream data is unknown. + +![](images/235df5a86dacfa377f2b945d9cac5e30d20a2b314a17b86314d035b487064d79.jpg) +Figure 5. Visualizations of predictions on WOD at the end of Stage I and Stage II. Points are colored by predicted segmentation labels. Ground truth keypoints are in green and predicted keypoints and skeletons are in red. + +# 4.3. Results + +In this section we perform quantitative evaluation of GC-KPL at the end of Stage I and II in Tab. 2. Qualitative results are in Fig. 5. As shown, after first stage where we train on a synthetic dataset constructed from posed body models with carefully chosen data augmentations, we are able to predict reasonable human keypoints on in-the-wild point clouds. The second stage our novel unsupervised losses further refine the predicted keypoints. + +# 4.4. Downstream Task: Few-shot 3D Keypoint Learning + +In this experiment, we show that the backbone of our model benefits from unsupervised training on large amount of unlabeled data, and can be useful for downstream finetuning tasks. We start from our pre-trained backbone after Stage II, and fine-tune with annotated training samples from WOD by minimizing mean per joint error. We include few-shot experiments where we fine-tune with a extremely small amount of data (10% and 1% of the training set), to represent challenging scenarios where there is a limited amount of annotated data. + +We include the LiDAR-only version of HUM3DIL (a state-of-the-art model on WOD) [29] as a strong baseline. The quantitative results (Tab. 1) suggest that our back + +bone learns useful information from the unlabeled in-the-wild data and enables a significant performance boost on the downstream tasks. Compared to a randomly initialized backbone as used in HUM3DIL, our backbone leads to over $2\mathrm{cm}$ of decrease in MPJPE in downstream fine-tuning experiments, which is a significant improvement for the 3D human keypoint estimation task. + +We visualize the predicted keypoints under different data regime in Fig. 6. As shown, models fine-tuned from our backbone is able to capture fine details on the arms and overall produces more accurate results than HUM3DIL. + +To the best of our knowledge, there does not exist previous works on completely unsupervised human keypoint estimation from point clouds. We additionally experiment with using a readout layer on top of the features learned by a state-of-the-art point cloud SSL method 3D-OAE [30], but the MPJPE is $15\mathrm{cm}$ (compared to $10.10\mathrm{cm}$ from GC-KPL). Hence we consider the baselines we adopt here strong and complete. In Sec. 4.6, we further challenge our method by comparing to the domain adaptation setup and demonstrate that the performance of GC-KPL is still superior. + +# 4.5. Domain adaptation + +In the configuration where we use ground truth labels in Stage I and unsupervised training in Stage II could be seen as a domain adaption (DA) technique. Thus it is useful to compare proposed method with a commonly-used domain adaptation method. We train the same backbone model using a mix of real and synthetic data and a gradient reversal layer (aka DA loss) [5] to help the network to learn domain invariant keypoint features. Results in Tab. 3 demonstrate that GC-KPL yields superior accuracy compared with the DA method (MPJPE 10.1 vs $11.35\mathrm{cm}$ ). + +# 4.6. Ablations + +Effect of using GT bounding boxes in pre-processing. We cropped human point clouds from the entire scene by including only points within GT bounding boxes. We also conducted experiments where we train with detected bounding boxes from raw LiDAR scans using a SoTA 3D detector. Results suggest that GC-KPL is robust to noise in 3D detection, as there were no noticeable changes in metrics. + +Effect of synthetic dataset size. In our method Stage I serves as a model initialization step where we show that training on a small synthetic dataset (16,000 samples) with properly chosen data augmentations is suffice for the model to learn useful semantics. We further investigate the effect of synthetic dataset size during Stage I. We experiment with larger dataset sizes (160,000 and 1,600,000 samples) and observe that the effect of increasing synthetic dataset size is insignificant on $\mathrm{MPJPE}_{\mathrm{matched}}$ at the end of Stage I - it decreased from $17.7\mathrm{cm}$ to $17.6\mathrm{cm}$ . Lack of a notable improvements for larger dataset sizes is likely due to limited variability of generated poses in synthetic data (see Supple + +![](images/8dc2fe390528e659e81192b9044aa880ff139f08e5825c8a30a66b34d0b122a9.jpg) +(a) Fine-tune on $100\%$ training set + +![](images/bf2689fb1e1f498337db157c972e6efcc82d78b530c8b7c4f39d2e975113622b.jpg) +(b) Fine-tune on $10\%$ training set + +![](images/561b3aee6120df91a4d59a2d47092628e9b66ea246cf2d73ff471e17621a1132.jpg) +(c) Fine-tune on $1\%$ training set +Figure 6. Predicted keypoints from fine-tuning with different amount of annotated data. The points are colored by predicted segmentation labels by our model. Predicted keypoints are shown in red. + +
MethodBackboneStage I supervised1% training set MPJPE cm. (gain)10% training set MPJPE cm. (gain)100% training set MPJPE cm. (gain)
HUM3DIL [29]Randomly initialized19.5716.3612.21
Pre-trained on synthetic only18.52 (-1.05)15.10 (-1.26)11.27 (-0.94)
GC-KPLPre-trained on 5,000 WOD-train17.87 (-1.70)14.51 (-1.85)10.73 (-1.48)
Pre-trained on 200,000 WOD-train17.80 (-1.77)14.30 (-2.06)10.60 (-1.61)
Pre-trained on 200,000 WOD-train17.20 (-2.37)13.40 (-2.96)10.10 (-2.11)
+ +Table 1. Downstream fine-tuning results. Check marks in "Stage I supervised" mean that we use ground truth part labels in Stage I, otherwise we use KMeans labels. + +
Training dataMPJPEmatchd (↓)
Synthetic only17.70
5,000 WOD-train14.64
200,000 WOD-train13.92
+ +Table 2. Unsupervised learning (Stage II) results. + +
Domain distributionDA lossMPJPE (↓)
100% real12.21
50/50% real/synthetic12.08
50/50% real/synthetic11.35
+ +Table 3. Unsupervised domain adaptation results evaluated on WOD validation set. + +mental for details). + +Effect of using ground truths on synthetic data. While our described pipeline does not use any kind of manual labels, we do use ground truth segmentation and keypoints on synthetic dataset in Stage I because they are readily available. Here we further experiment with a variation where we do not use any kind of ground truths in Stage I (first row in Tab. 4). Instead, we use KMeans clusters and cluster centers as surrogate ground truths for model initialization, similar to [1]. Note that we are able to establish correspondence between KMeans clusters from different samples due to the fact that in our data generation process, each synthetic sequence starts with the same starting standing pose. Hence, we can run KMeans clustering on the starting pose that is shared among all sequences, and for subsequent samples within each sequence, we do Hungarian matching using + +
Stage IStage II
No.Exp.\( \mathcal{L}_{kp} \)\( \mathcal{L}_{seg} \)\( MPJPE_{matched} \)\( \mathcal{L}_{j2p} \)\( \mathcal{L}_{seg} \)\( \mathcal{L}_{sym} \)\( \mathcal{L}_{p2l} \)\( \mathcal{L}_{flow} \)\( MPJPE_{matched} \)
1Effect of using KMeans labels in Stage I19.214.5
2Effect of \( \mathcal{L}_{kp} \) in Stage IN/A14.2
315.0
4Effect of warmup losses in Stage II14.2
515.2
630.1
715.6
825.7
9Effect of unsupervised losses in Stage II14.3
1014.9
1114.4
1214.9
Full model (GC-KPL)17.713.9
+ +Table 4. Ablations studies on the effect of individual loss term in our method. Experiments 3 through 12 are using both losses in Stage I. Full model is using GT labels for Stage I. + +inter-cluster Chamfer distance to establish correspondence between clusters from consecutive frames. We observe that although initializing with surrogate ground truths leads to slightly inferior performance in Stage I, after training with the losses in Stage II the drop in performance is less visible. Overall, downstream fine-tuning performance is comparable to our best model (10.6/14.3/17.8 vs. 10.1/13.4/17.2 cm when fine-tuned on $100\% / 10\% / 1\%$ of the data, see Tab. 1). This experiment suggests that method does not require any kind of ground truths, even during initialization stage. + +Effect of Losses. In this section we further investigate the effect of each component in our pipeline (Tab. 4). First, we note that $\mathcal{L}_{seg}$ in Stage I is essential because we need an initialized segmentation model to get the body part assignment for each point in order to calculate the losses in Stage II. Therefore, we only experiment with a variation of Stage I training without $\mathcal{L}_{kp}$ , and we observe that $\mathcal{L}_{kp}$ is useful in warming up the backbone for later stages. Next, we take the backbone from Stage I (trained with both $\mathcal{L}_{kp}$ and $\mathcal{L}_{seg}$ ), and study the effect of individual losses in Stage II. Experiments No. $3/4/5$ show that it is helpful to include $\mathcal{L}_{j2p}$ and $\mathcal{L}_{seg}$ while having all other three unsupervised losses. In experiments $6/7/8$ we take out $\mathcal{L}_{j2p}$ and $\mathcal{L}_{seg}$ , and investigate the effect of individual unsupervised losses. As shown the training becomes rather unstable if we further eliminate any of the three losses. We observe qualitatively that the metric worsens drastically because the limbs quickly move out of the human body. Experiments No. $3/4/5$ suggest that $\mathcal{L}_{j2p}$ and $\mathcal{L}_{seg}$ are useful regularizers that make sure the limbs stay within the body, and the unsupervised losses further improve the performance by refining the keypoint location. + +# 4.7. Limitations and Future Work + +The task of keypoint location could be considered as a dual problem for semantic segmentation. In this work we + +use a simple segmentation network based on the same architecture as our keypoint estimation model. Using a superior segmentation model could lead to further improvements. + +The proposed flow loss depends on quality of the estimated flow of LiDAR points. In this work we used a simple but reasonable method to estimate flow between two frames of LiDAR points called Neural Scene Flow prior [14]. Quality of the unsupervised keypoint estimation could be improved by using a more advanced flow estimator tailored for point clouds on human body surfaces. + +Lastly, we use a part of the HUM3DIL [29] model which takes only LiDAR point cloud as input. The full HUM3DIL model was designed for multi-modal inputs and attains better performance. Thus, another interesting direction is to leverage multi-modal inputs. + +# 5. Conclusion + +In this work, we approached the problem of 3D human pose estimation using points clouds in-the-wild, introduced a method (GC-KPL) for learning 3D human keypoints from point clouds without using any manual 3D keypoint annotations. We shown that the proposed novel losses are effective for unsupervised keypoint learning on Waymo Open Dataset. Through downstream experiments we demonstrated that GC-KPL can additionally serve as a self-supervised representation method to learn from large quantity of in-the-wild human point clouds. In addition, GC-KPL compares favorably with a commonly used domain adaptation technique. The few-shot experiments empirically verified that using only $10\%$ of available 3D keypoint annotation the fine-tuned model reached comparable performance to the state-of-the-art model training on the entire dataset. These results opens up exciting possibility to utilize massive amount of sensor data in autonomous driving to improve pedestrian 3D keypoint estimation. + +# References + +[1] Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In European Conference on Computer Vision, 2018. 7 +[2] Mathilde Caron, Piotr Bojanowski, Julien Mairal, and Armand Joulin. Unsupervised pre-training of image features on non-curated data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2959-2968, 2019. 2 +[3] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 2 +[4] Michael Fürst, Shriya TP Gupta, René Schuster, Oliver Wasenmüller, and Didier Stricker. Hperl: 3d human pose estimation from rgb and lidar. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 7321-7327. IEEE, 2021. 1, 2 +[5] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In Francis Bach and David Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 1180-1189, Lille, France, 07-09 Jul 2015. PMLR. 6 +[6] Albert Haque, Boya Peng, Zelun Luo, Alexandre Alahi, Serena Yeung, and Li Fei-Fei. Towards viewpoint invariant 3d human pose estimation. In European conference on computer vision, pages 160–177. Springer, 2016. 2 +[7] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 2 +[8] Siyuan Huang, Yichen Xie, Song-Chun Zhu, and Yixin Zhu. Spatio-temporal self-supervised representation learning for 3d point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6535-6545, 2021. 2 +[9] Tomas Jakab, Ankush Gupta, Hakan Bilen, and Andrea Vedaldi. Unsupervised learning of object landmarks through conditional image generation. Advances in neural information processing systems, 31, 2018. 2 +[10] Tomas Jakab, Ankush Gupta, Hakan Bilen, and Andrea Vedaldi. Self-supervised learning of interpretable keypoints from unlabelled videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8787-8797, 2020. 2 +[11] Wonhui Kim, Manikandasriram Srinivasan Ramanagopal, Charles Barto, Ming-Yuan Yu, Karl Rosaen, Nick Goumas, Ram Vasudevan, and Matthew Johnson-Roberson. Pedx: Benchmark dataset for metric 3-d pose estimation of pedestrians in complex urban intersections. IEEE Robotics and Automation Letters, 4(2):1940-1947, 2019. 1, 2 +[12] Jiaxin Li and Gim Hee Lee. Usip: Unsupervised stable interest point detection from 3d point clouds. In Proceedings of + +the IEEE/CVF international conference on computer vision, pages 361-370, 2019. 2 +[13] Jialian Li, Jingyi Zhang, Zhiyong Wang, Siqi Shen, Chenglu Wen, Yuexin Ma, Lan Xu, Jingyi Yu, and Cheng Wang. Lidarcap: Long-range marker-less 3d human motion capture with lidar point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20502-20512, 2022. 1, 2 +[14] Xueqian Li, Jhony Kaesemodel Pontes, and Simon Lucey. Neural scene flow prior. Advances in Neural Information Processing Systems, 34:7838-7851, 2021. 3, 5, 8 +[15] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. ACM transactions on graphics (TOG), 34(6):1-16, 2015. 2, 3 +[16] Atsuhiro Noguchi, Umar Iqbal, Jonathan Tremblay, Tatsuya Harada, and Orazio Gallo. Watch it move: Unsupervised discovery of 3d joints for re-posing of articulated objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3677–3687, 2022. 2 +[17] Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019. 2 +[18] Luca Schmidtke, Athanasios Vlontzos, Simon Ellershaw, Anna Lukens, Tomoki Arichi, and Bernhard Kainz. Unsupervised human pose estimation through transforming shape templates. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2484-2494, 2021. 2 +[19] Jamie Shotton, Andrew Fitzgibbon, Mat Cook, Toby Sharp, Mark Finocchio, Richard Moore, Alex Kipman, and Andrew Blake. Real-time human pose recognition in parts from single depth images. In CVPR 2011, pages 1297-1304. IEEE, 2011. 2 +[20] Jennifer J Sun, Serim Ryou, Roni H Goldshmid, Brandon Weissboud, John O Dabiri, David J Anderson, Ann Kennedy, Yisong Yue, and Pietro Perona. Self-supervised keypoint discovery in behavioral videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2171-2180, 2022. 2 +[21] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 1, 2 +[22] Supasorn Suwajanakorn, Noah Snavely, Jonathan J Tompson, and Mohammad Norouzi. Discovery of latent 3d keypoints via end-to-end geometric reasoning. Advances in neural information processing systems, 31, 2018. 2 +[23] Hanchen Wang, Qi Liu, Xiangyu Yue, Joan Lasenby, and Matt J Kusner. Unsupervised point cloud pre-training via occlusion completion. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9782-9792, 2021. 2 +[24] Yuefan Wu, Zeyuan Chen, Shaowei Liu, Zhongzheng Ren, and Shenlong Wang. Casa: Category-agnostic skeletal ani + +mal reconstruction. arXiv preprint arXiv:2211.03568, 2022. 2 +[25] Saining Xie, Jiatao Gu, Demi Guo, Charles R Qi, Leonidas Guibas, and Or Litany. Pointcontrast: Unsupervised pretraining for 3d point cloud understanding. In European conference on computer vision, pages 574-591. Springer, 2020. 2 +[26] Yaoqing Yang, Chen Feng, Yiru Shen, and Dong Tian. Foldingnet: Point cloud auto-encoder via deep grid deformation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 206–215, 2018. 2 +[27] Yang You, Wenhai Liu, Yanjie Ze, Yong-Lu Li, Weiming Wang, and Cewu Lu. Ukpgan: A general self-supervised keypoint detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17042-17051, 2022. 2 +[28] Xumin Yu, Lulu Tang, Yongming Rao, Tiejun Huang, Jie Zhou, and Jiwen Lu. Point-bert: Pre-training 3d point cloud transformers with masked point modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19313-19322, 2022. 2 +[29] Andrei Zanfir, Mihai Zanfir, Alex Gorban, Jingwei Ji, Yin Zhou, Dragomir Anguelov, and Cristian Sminchisescu. Hum3dil: Semi-supervised multi-modal 3d humanpose estimation for autonomous driving. In 6th Annual Conference on Robot Learning, 2022. 1, 2, 5, 6, 7, 8 +[30] Zaiwei Zhang, Rohit Girdhar, Armand Joulin, and Ishan Misra. Self-supervised pretraining of 3d features on any point-cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10252-10263, 2021. 2, 6 +[31] Zihao Zhang, Lei Hu, Xiaoming Deng, and Shihong Xia. Weakly supervised adversarial learning for 3d human pose estimation from point clouds. IEEE transactions on visualization and computer graphics, 26(5):1851-1859, 2020. 2 +[32] Jingxiao Zheng, Xinwei Shi, Alexander Gorban, Junhua Mao, Yang Song, Charles R Qi, Ting Liu, Visesh Chari, Andre Cornman, Yin Zhou, et al. Multi-modal 3d human pose estimation with 2d weak supervision in autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4478-4487, 2022. 1, 2 +[33] Junsheng Zhou, Xin Wen, Yu-Shen Liu, Yi Fang, and Zhizhong Han. Self-supervised point cloud representation learning with occlusion auto-encoder. arXiv preprint arXiv:2203.14084, 2022. 2 +[34] Yufan Zhou, Haiwei Dong, and Abdulmotaleb El Saddik. Learning to estimate 3d human pose from point cloud. IEEE Sensors Journal, 20(20):12334-12342, 2020. 2 \ No newline at end of file diff --git a/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/images.zip b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..6de812cc1eecef80a7dd924b7285e7755f5d4bab --- /dev/null +++ b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7869e80bef2dcb489e2302d50ab00c788994341298cace69a8ac52d38d9c92af +size 507700 diff --git a/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/layout.json b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..44693168d773bf1a034dfecfdae67d37c1e80165 --- /dev/null +++ b/2023/3D Human Keypoints Estimation From Point Clouds in the Wild Without Human Labels/layout.json @@ -0,0 +1,9003 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 100, + 102, + 494, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 102, + 494, + 138 + ], + "spans": [ + { + "bbox": [ + 100, + 102, + 494, + 138 + ], + "type": "text", + "content": "3D Human Keypoints Estimation from Point Clouds in the Wild without Human Labels" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "spans": [ + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "text", + "content": "Zhenzhen Weng" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "text", + "content": " Alexander S. Gorban" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "text", + "content": " Jingwei Ji" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "text", + "content": " Mahyar Najibi" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "text", + "content": " Yin Zhou" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "text", + "content": " Dragomir Anguelov" + }, + { + "bbox": [ + 116, + 160, + 475, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 215, + 202, + 374, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 215, + 202, + 374, + 217 + ], + "spans": [ + { + "bbox": [ + 215, + 202, + 374, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 215, + 202, + 374, + 217 + ], + "type": "text", + "content": "Stanford University " + }, + { + "bbox": [ + 215, + 202, + 374, + 217 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 215, + 202, + 374, + 217 + ], + "type": "text", + "content": "Waymo" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 244, + 192, + 258 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 269, + 290, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 269, + 290, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 269, + 290, + 544 + ], + "type": "text", + "content": "Training a 3D human keypoint detector from point clouds in a supervised manner requires large volumes of high quality labels. While it is relatively easy to capture large amounts of human point clouds, annotating 3D keypoints is expensive, subjective, error prone and especially difficult for long-tail cases (pedestrians with rare poses, scooterists, etc.). In this work, we propose GC-KPL - Geometry Consistency inspired Key Point Leaning, an approach for learning 3D human joint locations from point clouds without human labels. We achieve this by our novel unsupervised loss formulations that account for the structure and movement of the human body. We show that by training on a large training set from Waymo Open Dataset [21] without any human annotated keypoints, we are able to achieve reasonable performance as compared to the fully supervised approach. Further, the backbone benefits from the unsupervised training and is useful in downstream few-shot learning of keypoints, where fine-tuning on only 10 percent of the labeled training data gives comparable performance to fine-tuning on the entire set. We demonstrated that GC-KPL outperforms by a large margin over SoTA when trained on entire dataset and efficiently leverages large volumes of unlabeled data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 555, + 128, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 555, + 128, + 567 + ], + "spans": [ + { + "bbox": [ + 47, + 555, + 128, + 567 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 574, + 287, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 574, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 46, + 574, + 287, + 658 + ], + "type": "text", + "content": "Estimation of human pose in 3D is an important problem in computer vision and it has a wide range of applications including AR/VR, AI-assisted healthcare, and autonomous driving [4,29,32]. For autonomous systems, being able to perceive human poses from sensor data (e.g. Li-DAR point clouds) is particularly essential to reason about the surrounding environment and make safe maneuvers." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 658, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 658, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 658, + 287, + 693 + ], + "type": "text", + "content": "Despite the high level of interest in human pose estimation in the wild, only few papers approached outdoor 3D keypoint detection using point cloud. A main reason is that" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 247, + 541, + 432 + ], + "blocks": [ + { + "bbox": [ + 310, + 247, + 541, + 432 + ], + "lines": [ + { + "bbox": [ + 310, + 247, + 541, + 432 + ], + "spans": [ + { + "bbox": [ + 310, + 247, + 541, + 432 + ], + "type": "image", + "image_path": "dc011686fedb76da0d66d44f198e418de7265f187c27f44ef07788a14963edf1.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 442, + 547, + 510 + ], + "lines": [ + { + "bbox": [ + 305, + 442, + 547, + 510 + ], + "spans": [ + { + "bbox": [ + 305, + 442, + 547, + 510 + ], + "type": "text", + "content": "Figure 1. We present GC-KPL, a novel method for learning 3D human keypoints from in-the-wild point clouds without any human labels. We propose to learn keypoint locations using unsupervised losses that account for the structure and movement of the human body. The backbone learns useful semantics from unsupervised learning and can be used in downstream fine-tuning tasks to boost the performance of 3D keypoint estimation." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 522, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 547, + 713 + ], + "type": "text", + "content": "training a pedestrian pose estimation model requires large amount of high quality in-the-wild data with ground truth labels. Annotating 3D human keypoints on point cloud data is expensive, time consuming and error prone. Although there are a few existing point cloud datasets with ground truth human poses [11, 13, 21], they are limited in terms of the quantity of the 3D annotations and diversity of the data. Therefore, fully-supervised human keypoint detectors trained on such datasets do not generalize well for long tail cases. For this reason, previous approaches on pedestrian 3D keypoint estimation have mainly focused on utilizing 2D weak supervision [4, 32] which is easier to obtain, or leveraging signals from others modalities (e.g. RGB, depth) [29]. Nonetheless, there is a lot of useful information in the large amount of unlabeled LiDAR data that previous works on human pose estimation have not made an effort to utilize." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 175, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 175, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 175, + 712 + ], + "type": "text", + "content": "*Work done as an intern at Waymo." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1158" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 203 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 203 + ], + "type": "text", + "content": "In this work, we propose a novel and effective method for learning 3D human keypoints from in-the-wild point clouds without using any manual labeled 3D keypoints. Our approach is built on top of the key observation that human skeletons are roughly centered within approximately rigid body parts and that the location and movement of the surface points should explain the movement of the skeleton and vice versa. To that end, we design novel unsupervised loss terms for learning locations of the 3D keypoints/skeleton within human point clouds which correspond to 3D locations of major joints of human body." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 203, + 289, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 203, + 289, + 309 + ], + "spans": [ + { + "bbox": [ + 46, + 203, + 289, + 309 + ], + "type": "text", + "content": "In the proposed method, we first train a transformer-based regression model for predicting keypoints and a semantic segmentation model for localizing body parts on a synthetic data constructed from randomly posed SMPL human body model [15]. Then, we train on the entire Waymo Open Dataset [21] without using any 3D ground-truth annotation of human keypoints. Through unsupervised training, keypoint predictions are refined and the backbone learns useful information from large amount of unannotated data." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 309, + 263, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 309, + 263, + 321 + ], + "spans": [ + { + "bbox": [ + 59, + 309, + 263, + 321 + ], + "type": "text", + "content": "In summary, we make the following contributions:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 328, + 287, + 513 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 59, + 328, + 286, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 328, + 286, + 364 + ], + "spans": [ + { + "bbox": [ + 59, + 328, + 286, + 364 + ], + "type": "text", + "content": "- We present GC-KPL, a method for learning human 3D keypoints for in-the-wild point clouds without any manual keypoint annotations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 372, + 287, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 372, + 287, + 433 + ], + "spans": [ + { + "bbox": [ + 59, + 372, + 287, + 433 + ], + "type": "text", + "content": "- Drawing insight from the structure and movement of the human body, we propose three effective and novel unsupervised losses for refining keypoints. We show that the proposed losses are effective for unsupervised keypoint learning on Waymo Open Dataset." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 441, + 287, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 441, + 287, + 513 + ], + "spans": [ + { + "bbox": [ + 59, + 441, + 287, + 513 + ], + "type": "text", + "content": "- Through downstream fine-tuning/few-shot experiments, we demonstrate that GC-KPL can be used as unsupervised representation learning for human point clouds, which opens up the possibility to utilize a practically infinite amounts of sensor data to improve human pose understanding in autonomous driving." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 521, + 134, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 521, + 134, + 534 + ], + "spans": [ + { + "bbox": [ + 47, + 521, + 134, + 534 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 540, + 287, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 540, + 287, + 564 + ], + "spans": [ + { + "bbox": [ + 47, + 540, + 287, + 564 + ], + "type": "text", + "content": "2.1. 3D Human Keypoint Estimation from Points Clouds" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": "There have been a few works [19, 31, 34] about estimating 3D keypoints from clean and carefully-curated point clouds [6], but 3D keypoint estimation from in-the-wild point clouds is a much less studied problem. Due to the lack of ground-truth 3D human pose annotations paired with Li-DAR data, there has not been a lot of works on 3d human keypoint estimation from LiDAR information. Among the few point cloud datasets with 3D keypoint annotations, Li-DARHuman26M [13] captures long-range human motions with ground truth motion acquired by the IMU system and pose information derived from SMPL models fitted into point clouds. It is among the first few datasets which have" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 547, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 348 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 348 + ], + "type": "text", + "content": "LiDAR point clouds synchronized with RGB images, but SMPL shape parameters are same for all 13 subjects and it does not feature in-the-wild pedestrians where there could be much more background noise and occlusion. PedX [11] offers 3D automatic pedestrian annotations obtained using model fitting on different modalities, gathered effectively from a single intersection with only 75 pedestrians (the second intersection has only 218 frames, labels for the third scene were not released). Waymo Open Dataset [21] has more than 3,500 subjects from over 1,000 different in-the-wild scenes with high-quality 2D and 3D manual annotations. Despite the existence of these datasets, the few works on 3D pose estimation from point clouds mostly rely on weak supervision. HPERL model [4] trains on 2D ground-truth pose annotations and uses a reprojection loss for the 3D pose regression task. Multi-modal model in [32] uses 2D labels on RGB images as weak supervision, and creates pseudo ground-truth 3D joint positions from the projection of annotated 2D joints. HUM3DIL [29] leverages RGB information with LiDAR points, by computing pixel-aligned multi-modal features with the 3D positions of the LiDAR signal. In contrast, our method does not use any RGB information or weak supervision." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 369, + 500, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 369, + 500, + 383 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 500, + 383 + ], + "type": "text", + "content": "2.2. Unsupervised Keypoint Localization" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 387, + 545, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 387, + 545, + 531 + ], + "spans": [ + { + "bbox": [ + 304, + 387, + 545, + 531 + ], + "type": "text", + "content": "There are a number of works that aim to recover 3D keypoints using self-supervised geometric reasoning [12, 22], but they are limited to rigid objects. More recent unsupervised methods work for articulated objects from monocular RGB data [9, 10, 10, 18, 20, 24], multi-view data [16], or point clouds [27], where authors suggest to condition on the predicted keypoints and train a conditional generative model to supervise the keypoints through reconstruction losses. We propose a simpler pipeline where we apply our novel unsupervised losses to the predicted keypoints directly and do not require additional models besides the keypoint predictor itself." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 552, + 525, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 552, + 525, + 565 + ], + "spans": [ + { + "bbox": [ + 305, + 552, + 525, + 565 + ], + "type": "text", + "content": "2.3. Self-supervised Learning for Point Clouds" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 570, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 715 + ], + "type": "text", + "content": "Self-supervised representation learning has proven to be remarkably useful in language [3, 17] and 2D vision tasks [2, 7]. As LiDAR sensors become more affordable and common, there has been an increasing amount of research interest in self-supervised learning on 3D point clouds. Previous works proposed to learn representations of object or scene level point clouds through contrastive learning [8, 25, 30] or reconstruction [23, 26, 28, 33], which is useful in downstream classification or segmentation tasks. In contrast, our supervision signals come from the unique structure of the human body and our learned backbone is particularly useful in downstream human keypoint estimation tasks." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1159" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 104, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 104, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 104, + 83 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "spans": [ + { + "bbox": [ + 46, + 90, + 287, + 198 + ], + "type": "text", + "content": "In this section, we describe our complete training pipeline which contains two stages. In the first stage, we initialize the model parameters on a synthetic dataset (Sec. 3.1). The purpose of Stage I is to warm-up the model with reasonable semantics. The second stage generalizes the model to the real-world data. In this stage, we use our unsupervised losses to refine the keypoint predictions on in-the-wild point clouds (Sec. 3.2). An overview of our pipeline is in Fig. 2." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 204, + 255, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 204, + 255, + 217 + ], + "spans": [ + { + "bbox": [ + 47, + 204, + 255, + 217 + ], + "type": "text", + "content": "3.1. Stage I: Initialization on Synthetic Data" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 221, + 287, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 221, + 287, + 269 + ], + "spans": [ + { + "bbox": [ + 46, + 221, + 287, + 269 + ], + "type": "text", + "content": "In this stage, we initialize the model on a synthetic dataset that is constructed by ray casting onto randomly posed human mesh models (SMPL [15]). We describe details of synthetic data generation in Supplementary." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "spans": [ + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "text", + "content": "The goal of this stage is to train a model " + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "text", + "content": " that takes a point cloud of a human " + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "inline_equation", + "content": "\\mathbf{P} \\in \\mathbb{R}^{N \\times 3}" + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "text", + "content": " and outputs 3D locations of keypoints " + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{Y}} \\in \\mathbb{R}^{(J + 1) \\times 3}" + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "text", + "content": ", as well as soft body part assignments (or part segmentation) " + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{W}} \\in \\mathbb{R}^{N \\times (J + 1)}" + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "text", + "content": " that contains the probability of each point " + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "text", + "content": " belonging to body part " + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "inline_equation", + "content": "j \\in [J]" + }, + { + "bbox": [ + 46, + 269, + 287, + 340 + ], + "type": "text", + "content": " or the background." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 347, + 287, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 347, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 132, + 347, + 287, + 361 + ], + "type": "interline_equation", + "content": "\\{\\hat {\\mathbf {Y}}, \\hat {\\mathbf {W}} \\} = f (\\mathbf {P}) \\tag {1}", + "image_path": "a4df3286720b12b6d3073cbf5cc11fb1b8b773440ad7f09fd646513a773b7034.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 119, + 365, + 287, + 395 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 365, + 287, + 395 + ], + "spans": [ + { + "bbox": [ + 119, + 365, + 287, + 395 + ], + "type": "interline_equation", + "content": "\\forall i \\in [ N ], \\sum_ {j = 1} ^ {J + 1} \\hat {\\mathbf {W}} _ {i, j} = 1 \\tag {2}", + "image_path": "94276dd447d036c50d5a832862dbd6211f56550545a6c151abf9a18e467460c7.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 402, + 287, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 402, + 287, + 451 + ], + "spans": [ + { + "bbox": [ + 46, + 402, + 287, + 451 + ], + "type": "text", + "content": "Ground truth information about part segmentation " + }, + { + "bbox": [ + 46, + 402, + 287, + 451 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 46, + 402, + 287, + 451 + ], + "type": "text", + "content": " and keypoint locations " + }, + { + "bbox": [ + 46, + 402, + 287, + 451 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}" + }, + { + "bbox": [ + 46, + 402, + 287, + 451 + ], + "type": "text", + "content": " are readily available for synthetic data. Hence, we can train the model by directly supervising the predicted keypoint through L2 loss," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 458, + 287, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 458, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 132, + 458, + 287, + 472 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {k p} = \\left\\| \\hat {\\mathbf {Y}} - \\mathbf {Y} \\right\\| _ {2} \\tag {3}", + "image_path": "0d7bccbdd1bc87fb82877b10ff6e847a75a55233674f8653e52bcd339cc84fd3.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 481, + 268, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 268, + 494 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 268, + 494 + ], + "type": "text", + "content": "and predicted segmentation through cross entropy loss," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 100, + 502, + 287, + 530 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 502, + 287, + 530 + ], + "spans": [ + { + "bbox": [ + 100, + 502, + 287, + 530 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {s e g}} = - \\sum_ {i = 1} ^ {N} \\sum_ {j = 1} ^ {J + 1} \\mathbf {W} _ {i, j} \\log \\left(\\hat {\\mathbf {W}} _ {i, j}\\right) \\tag {4}", + "image_path": "4c8c407398074dff97d9b995626f77ee77f905fcb23f685cd3b92705b052d5cb.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 540, + 137, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 540, + 137, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 540, + 137, + 550 + ], + "type": "text", + "content": "Overall, we minimize" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 113, + 560, + 287, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 560, + 287, + 574 + ], + "spans": [ + { + "bbox": [ + 113, + 560, + 287, + 574 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {s y n}} = \\lambda_ {k p} \\mathcal {L} _ {\\mathrm {k p}} + \\lambda_ {\\text {s e g}} \\mathcal {L} _ {\\text {s e g}} \\tag {5}", + "image_path": "b7f8e6315ba662cffab2b4c9dd2963397e774e4dbafb2c04fb19e4180ab74593.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 582, + 287, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 618 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 618 + ], + "type": "text", + "content": "Notably, in Sec. 4.6 we show that supervision in this stage is not required - ground truth " + }, + { + "bbox": [ + 46, + 582, + 287, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 46, + 582, + 287, + 618 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 582, + 287, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}" + }, + { + "bbox": [ + 46, + 582, + 287, + 618 + ], + "type": "text", + "content": " can be replaced by surrogate ground truths to achieve comparable results." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 624, + 287, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 624, + 287, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 624, + 287, + 647 + ], + "type": "text", + "content": "3.2. Stage II: Self-Supervised Learning on In-the-Wild Data" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 287, + 713 + ], + "type": "text", + "content": "In this stage, we further refine the network using unsupervised losses. The key insight behind the design of the losses is that the human body is composed of limbs, each of which is a rigid part. Therefore, points on a limb move with the limb and should stay roughly at the same location" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 72, + 545, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 119 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 119 + ], + "type": "text", + "content": "in each limb's local coordinate system. To account for this, we propose flow loss that encourages the points to stay in the same location (despite rotation around the limb) within each limb's local cylindrical coordinate." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "spans": [ + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "text", + "content": "We start by formally defining the key ingredients in the following formulations. In our setup, a human skeleton " + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "text", + "content": " is composed of limbs, each of which is connecting two keypoints. A limb " + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "inline_equation", + "content": "l = (y_{a}, y_{b}) \\in L" + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "text", + "content": " is a line segment connecting the parent " + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "inline_equation", + "content": "y_{a}" + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "text", + "content": " and child keypoints " + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "inline_equation", + "content": "y_{b}" + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "text", + "content": " on this limb, and all surface points on this limb have segmentation label " + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 304, + 119, + 545, + 190 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 190, + 545, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 190, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 304, + 190, + 545, + 276 + ], + "type": "text", + "content": "All three proposed losses are in terms of surface points in each predicted limb's local coordinate system. Therefore, we first convert all input points to each limbs' local cylindrical coordinate and compute the radial and axial coordinates. Specifically, we project point " + }, + { + "bbox": [ + 304, + 190, + 545, + 276 + ], + "type": "inline_equation", + "content": "p \\in \\mathbf{P}" + }, + { + "bbox": [ + 304, + 190, + 545, + 276 + ], + "type": "text", + "content": " in global coordinate on to vector " + }, + { + "bbox": [ + 304, + 190, + 545, + 276 + ], + "type": "inline_equation", + "content": "\\overrightarrow{\\hat{y}_a\\hat{y}_b}" + }, + { + "bbox": [ + 304, + 190, + 545, + 276 + ], + "type": "text", + "content": ", and calculate the norm of the projected vector" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 365, + 281, + 545, + 308 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 281, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 365, + 281, + 545, + 308 + ], + "type": "interline_equation", + "content": "\\mathbf {z} (p, \\hat {l}) = \\frac {\\left(p - \\hat {y} _ {a}\\right) \\cdot \\left(\\hat {y} _ {b} - \\hat {y} _ {a}\\right)}{\\| \\hat {y} _ {b} - \\hat {y} _ {a} \\| _ {2}} \\tag {6}", + "image_path": "761f9d90610063a68e8e9509df94b93e28b3579165e52d137bdcf287ff187fbf.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 314, + 487, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 314, + 487, + 328 + ], + "spans": [ + { + "bbox": [ + 306, + 314, + 487, + 328 + ], + "type": "text", + "content": "and the distance between the point and " + }, + { + "bbox": [ + 306, + 314, + 487, + 328 + ], + "type": "inline_equation", + "content": "\\overrightarrow{\\hat{y}_a\\hat{y}_b}" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 356, + 333, + 545, + 347 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 333, + 545, + 347 + ], + "spans": [ + { + "bbox": [ + 356, + 333, + 545, + 347 + ], + "type": "interline_equation", + "content": "\\mathbf {r} (p, \\hat {l}) = \\| p - \\hat {y} _ {a} - \\mathbf {z} (\\hat {y} _ {b} - \\hat {y} _ {a}, \\hat {l}) \\| _ {2} \\tag {7}", + "image_path": "e01a7fcb26eb7382c7fa247e1dc556dafa523da02d35c9cab58ea7e8cddc9818.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "spans": [ + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "text", + "content": "For simplicity, we use " + }, + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{\\hat{l}}(p)" + }, + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "text", + "content": " to represent " + }, + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "inline_equation", + "content": "\\mathbf{z}(p,\\hat{l})" + }, + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_{\\hat{l}}(p)" + }, + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "text", + "content": " to represent " + }, + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "inline_equation", + "content": "\\mathbf{r}(p,\\hat{l})" + }, + { + "bbox": [ + 304, + 354, + 544, + 379 + ], + "type": "text", + "content": " in the following." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 379, + 544, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 379, + 544, + 401 + ], + "spans": [ + { + "bbox": [ + 304, + 379, + 544, + 401 + ], + "type": "text", + "content": "Next, we describe the formulation of each loss function in detail." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "text", + "content": "Flow Loss. Flow loss considers the predictions from two consecutive frames and encourages consistency of the radial and altitude components of all points with respect to scene flow - limbs should move between frames in a way to keep radial and axial coordinates for all points constant. Formally, we define the forward and backward flow losses " + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{ff}" + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{bf}" + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "text", + "content": " respectively) for limbs " + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "inline_equation", + "content": "\\hat{l}^t = (\\hat{y}_a^t,\\hat{y}_b^t)" + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "inline_equation", + "content": "\\hat{l}^{t + 1} = (\\hat{y}_a^{t + 1},\\hat{y}_b^{t + 1})" + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "text", + "content": " for predicted keypoints for timestamp " + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "inline_equation", + "content": "t + 1" + }, + { + "bbox": [ + 304, + 402, + 545, + 510 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 516, + 545, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 516, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 315, + 516, + 545, + 559 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {f f} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} ^ {t} \\cdot \\left(\\left| \\mathbf {r} _ {\\hat {l} ^ {t + 1}} \\left(p _ {i} ^ {t} + f _ {i} ^ {t}\\right) - \\mathbf {r} _ {\\hat {l} ^ {t}} \\left(p _ {i} ^ {t}\\right) \\right| + \\right. \\\\ \\left| \\mathbf {z} _ {\\hat {l} ^ {t + 1}} \\left(p _ {i} ^ {t} + f _ {i} ^ {t}\\right) - \\mathbf {z} _ {\\hat {l} ^ {t}} \\left(p _ {i} ^ {t}\\right) \\right|) \\tag {8} \\\\ \\end{array}", + "image_path": "e7c8cbb01d91da482cfc828df5ebee595f0d04015fec66c896b8b930e833acc9.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 315, + 582, + 545, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 582, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 315, + 582, + 545, + 624 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {b f} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} ^ {t + 1} \\cdot \\left(\\left| \\mathbf {r} _ {\\hat {l} t} \\left(p _ {i} ^ {t + 1} + b _ {i} ^ {t + 1}\\right) - \\mathbf {r} _ {\\hat {l} t + 1} \\left(p _ {i} ^ {t + 1}\\right) \\right| + \\right. \\\\ \\left| \\mathbf {z} _ {\\hat {l} ^ {t}} \\left(p _ {i} ^ {t + 1} + b _ {i} ^ {t + 1}\\right) - \\mathbf {z} _ {\\hat {l} ^ {t + 1}} \\left(p _ {i} ^ {t + 1}\\right) \\right|) \\tag {9} \\\\ \\end{array}", + "image_path": "f8d919667dbff5c934de479a82a827e248367fbeccd94806deab06f1c030fcfb.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "inline_equation", + "content": "f^t" + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "text", + "content": " is the forward flow for each point " + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "inline_equation", + "content": "p^t \\in \\mathbf{P}^t" + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "inline_equation", + "content": "b^{t+1}" + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "text", + "content": " is the backward flow for each point " + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "inline_equation", + "content": "p^{t+1} \\in \\mathbf{P}^{t+1}" + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "text", + "content": ". We use Neural Scene Flow Prior [14] to estimate flow for two consecutive frames of points. The overall flow loss for frame " + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 628, + 545, + 677 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 370, + 681, + 545, + 710 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 681, + 545, + 710 + ], + "spans": [ + { + "bbox": [ + 370, + 681, + 545, + 710 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {f l o w}} = \\frac {1}{| L |} \\sum_ {\\hat {l} t} \\frac {\\mathcal {L} _ {f f} + \\mathcal {L} _ {b f}}{2} \\tag {10}", + "image_path": "b55d46e63437934e2e72199f69ef1bbc0cdc1f11ddb933d4c667c904240f6fdb.jpg" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1160" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 89, + 279, + 178 + ], + "blocks": [ + { + "bbox": [ + 89, + 76, + 258, + 88 + ], + "lines": [ + { + "bbox": [ + 89, + 76, + 258, + 88 + ], + "spans": [ + { + "bbox": [ + 89, + 76, + 258, + 88 + ], + "type": "text", + "content": "Stage I: Initialization on Synthetic Data" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 89, + 279, + 178 + ], + "lines": [ + { + "bbox": [ + 67, + 89, + 279, + 178 + ], + "spans": [ + { + "bbox": [ + 67, + 89, + 279, + 178 + ], + "type": "image", + "image_path": "e2241ea31bfe3b1b334d4aec242957af7570a85f23e0d4f8a61bd8a2d7a27339.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 186, + 353, + 199 + ], + "lines": [ + { + "bbox": [ + 239, + 186, + 353, + 199 + ], + "spans": [ + { + "bbox": [ + 239, + 186, + 353, + 199 + ], + "type": "text", + "content": "Unsupervised Losses" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 306, + 92, + 541, + 175 + ], + "blocks": [ + { + "bbox": [ + 299, + 76, + 526, + 88 + ], + "lines": [ + { + "bbox": [ + 299, + 76, + 526, + 88 + ], + "spans": [ + { + "bbox": [ + 299, + 76, + 526, + 88 + ], + "type": "text", + "content": "Stage II: Unsupervised Learning on In-the-Wild Data" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 92, + 541, + 175 + ], + "lines": [ + { + "bbox": [ + 306, + 92, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 306, + 92, + 541, + 175 + ], + "type": "image", + "image_path": "e6c46fcd903a52cbf3d9ea1d82cf231f133ad9db0823c9be84fb08d9e6962afb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 56, + 219, + 121, + 275 + ], + "blocks": [ + { + "bbox": [ + 108, + 204, + 144, + 213 + ], + "lines": [ + { + "bbox": [ + 108, + 204, + 144, + 213 + ], + "spans": [ + { + "bbox": [ + 108, + 204, + 144, + 213 + ], + "type": "text", + "content": "Flow loss" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 56, + 219, + 121, + 275 + ], + "lines": [ + { + "bbox": [ + 56, + 219, + 121, + 275 + ], + "spans": [ + { + "bbox": [ + 56, + 219, + 121, + 275 + ], + "type": "image", + "image_path": "c19e32e7f6e091575f8c198f70f1411cfba5908a5377ce6ddceddb13810fe484.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 280, + 222, + 294 + ], + "lines": [ + { + "bbox": [ + 56, + 280, + 222, + 294 + ], + "spans": [ + { + "bbox": [ + 56, + 280, + 222, + 294 + ], + "type": "text", + "content": "(a) After moving, points stay in the same place (despite rotation around axis) within each limb's local cylindrical coordinate system." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 134, + 220, + 212, + 277 + ], + "blocks": [ + { + "bbox": [ + 134, + 220, + 212, + 277 + ], + "lines": [ + { + "bbox": [ + 134, + 220, + 212, + 277 + ], + "spans": [ + { + "bbox": [ + 134, + 220, + 212, + 277 + ], + "type": "image", + "image_path": "ee82e58063d9c4b155af987824ad367617c17e974f3bc1a2ff60e862b13eb8fb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 303, + 544, + 323 + ], + "lines": [ + { + "bbox": [ + 46, + 303, + 544, + 323 + ], + "spans": [ + { + "bbox": [ + 46, + 303, + 544, + 323 + ], + "type": "text", + "content": "Figure 2. Overview of our method. In Stage I, we warm-up the keypoint predictor and body part segmentation predictor on a small synthetic dataset. Then, in Stage II we refine the 3D keypoint predictions on a large in-the-wild dataset with unsupervised losses. The main losses are depicted on the bottom." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 251, + 232, + 345, + 271 + ], + "blocks": [ + { + "bbox": [ + 261, + 205, + 332, + 213 + ], + "lines": [ + { + "bbox": [ + 261, + 205, + 332, + 213 + ], + "spans": [ + { + "bbox": [ + 261, + 205, + 332, + 213 + ], + "type": "text", + "content": "Points-to-limb loss" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 251, + 232, + 345, + 271 + ], + "lines": [ + { + "bbox": [ + 251, + 232, + 345, + 271 + ], + "spans": [ + { + "bbox": [ + 251, + 232, + 345, + 271 + ], + "type": "image", + "image_path": "4705f627c4c7da48ea1250dc5f5c6d455383ec604cac68cabb703e4aecea0e16.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 279, + 365, + 293 + ], + "lines": [ + { + "bbox": [ + 239, + 279, + 365, + 293 + ], + "spans": [ + { + "bbox": [ + 239, + 279, + 365, + 293 + ], + "type": "text", + "content": "(b) Minimize points-to-limb distance to encourage the limb to stay within the body." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 422, + 236, + 509, + 270 + ], + "blocks": [ + { + "bbox": [ + 438, + 205, + 493, + 215 + ], + "lines": [ + { + "bbox": [ + 438, + 205, + 493, + 215 + ], + "spans": [ + { + "bbox": [ + 438, + 205, + 493, + 215 + ], + "type": "text", + "content": "Symmetry loss" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 422, + 236, + 509, + 270 + ], + "lines": [ + { + "bbox": [ + 422, + 236, + 509, + 270 + ], + "spans": [ + { + "bbox": [ + 422, + 236, + 509, + 270 + ], + "type": "image", + "image_path": "9136ace268ca2b5d4dd47262bdc7e07315e5ff57d75fa2c86dd65ab0c40c1236.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 395, + 278, + 533, + 293 + ], + "lines": [ + { + "bbox": [ + 395, + 278, + 533, + 293 + ], + "spans": [ + { + "bbox": [ + 395, + 278, + 533, + 293 + ], + "type": "text", + "content": "(c) Points are symmetrical around limb. (i.e. points with similar height z have similar radius r)" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 332, + 287, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 332, + 287, + 439 + ], + "spans": [ + { + "bbox": [ + 46, + 332, + 287, + 439 + ], + "type": "text", + "content": "By design, the flow loss value is the same if the radial and axial values for all points in a local coordinate system are the same in consecutive frames. This would happen if a limb in both frames are shifted in their respective orthogonal direction by the same amount. Theoretically, it is unlikely to happen for all limbs, but empirically we observe that with flow loss alone the skeleton would move out of the point cloud. Therefore, we need additional losses to make the keypoints stay within the body." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 439, + 287, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 439, + 287, + 475 + ], + "spans": [ + { + "bbox": [ + 46, + 439, + 287, + 475 + ], + "type": "text", + "content": "Points-to-Limb Loss. For a predicted limb " + }, + { + "bbox": [ + 46, + 439, + 287, + 475 + ], + "type": "inline_equation", + "content": "\\hat{l} = (\\hat{y}_a, \\hat{y}_b)" + }, + { + "bbox": [ + 46, + 439, + 287, + 475 + ], + "type": "text", + "content": ", we want the points on this limb to be close to it. Hence, we introduce a points-to-limb (p2l) loss" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 113, + 479, + 287, + 505 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 479, + 287, + 505 + ], + "spans": [ + { + "bbox": [ + 113, + 479, + 287, + 505 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p 2 l} ^ {\\hat {l}} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} \\mathbf {d} \\left(p _ {i}, \\hat {l}\\right) \\tag {11}", + "image_path": "c3ad198a5fc9c3d24c4ad5a72f2764b3a59c3f63601c818f399e6d1edcd96616.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 514, + 287, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 514, + 287, + 550 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 287, + 550 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 514, + 287, + 550 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 46, + 514, + 287, + 550 + ], + "type": "text", + "content": " is the Euclidean distance function between a point and a line segment. We sum over all points to get the overall points-to-limb loss," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 130, + 549, + 287, + 577 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 549, + 287, + 577 + ], + "spans": [ + { + "bbox": [ + 130, + 549, + 287, + 577 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {p} 2 \\mathrm {l}} = \\frac {1}{| L |} \\sum_ {\\hat {l}} \\mathcal {L} _ {\\mathrm {p} 2 \\mathrm {l}} ^ {\\hat {l}} \\tag {12}", + "image_path": "b287b950a4313e0459be8a9c678c0242d749c30d348a5aeaaab143914cfd42a1.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 46, + 586, + 287, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 586, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 46, + 586, + 287, + 646 + ], + "type": "text", + "content": "Symmetry Loss. Symmetry loss encourages the predicted limb " + }, + { + "bbox": [ + 46, + 586, + 287, + 646 + ], + "type": "inline_equation", + "content": "\\hat{l}" + }, + { + "bbox": [ + 46, + 586, + 287, + 646 + ], + "type": "text", + "content": " to be in a position such that all points around this limb are roughly symmetrical around it. That is to say, points with similar axial coordinates " + }, + { + "bbox": [ + 46, + 586, + 287, + 646 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{\\hat{l}}" + }, + { + "bbox": [ + 46, + 586, + 287, + 646 + ], + "type": "text", + "content": " should have similar radial values " + }, + { + "bbox": [ + 46, + 586, + 287, + 646 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_{\\hat{l}}" + }, + { + "bbox": [ + 46, + 586, + 287, + 646 + ], + "type": "text", + "content": ". To that end, we introduce symmetry loss," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 89, + 665, + 287, + 690 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 665, + 287, + 690 + ], + "spans": [ + { + "bbox": [ + 89, + 665, + 287, + 690 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s y m} ^ {\\hat {l}} = \\frac {1}{N} \\sum_ {i} \\hat {\\mathbf {W}} _ {i a} \\left(\\mathbf {r} _ {\\hat {l}} \\left(p _ {i}\\right) - \\bar {\\mathbf {r}} _ {\\hat {l}} \\left(p _ {i}\\right)\\right) ^ {2} \\tag {13}", + "image_path": "b6a5ac6f7730858a87767efd9c77dc2e55f3baf2a2693c145e56881ed0a0ec91.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\bar{\\mathbf{r}}_i(p_i)" + }, + { + "bbox": [ + 47, + 701, + 287, + 714 + ], + "type": "text", + "content": " is the weighted mean of radial values of points" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 306, + 333, + 451, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 333, + 451, + 345 + ], + "spans": [ + { + "bbox": [ + 306, + 333, + 451, + 345 + ], + "type": "text", + "content": "with similar axial coordinates as " + }, + { + "bbox": [ + 306, + 333, + 451, + 345 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 306, + 333, + 451, + 345 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 309, + 352, + 545, + 384 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 352, + 545, + 384 + ], + "spans": [ + { + "bbox": [ + 309, + 352, + 545, + 384 + ], + "type": "interline_equation", + "content": "\\bar {\\mathbf {r}} _ {\\hat {l}} \\left(p _ {i}\\right) = \\frac {\\sum_ {j} K _ {h} \\left(\\mathbf {z} _ {\\hat {l}} \\left(p _ {i}\\right) , \\mathbf {z} _ {\\hat {l}} \\left(p _ {j}\\right)\\right) \\left(\\hat {\\mathbf {W}} _ {i *} \\cdot \\hat {\\mathbf {W}} _ {j *}\\right) \\mathbf {r} _ {\\hat {l}} \\left(p _ {j}\\right)}{\\sum_ {j} K _ {h} \\left(\\mathbf {z} _ {\\hat {l}} \\left(p _ {i}\\right) , \\mathbf {z} _ {\\hat {l}} \\left(p _ {j}\\right)\\right) \\left(\\hat {\\mathbf {W}} _ {i *} \\cdot \\hat {\\mathbf {W}} _ {j *}\\right)} \\tag {14}", + "image_path": "e983d4e2a2cb9affe2449766e8543ebf5513073eee658548884dcb2cd72a0a09.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "spans": [ + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "K_{h}" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": " is Gaussian kernel with bandwidth " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": ", i.e. " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "K_{h}(x,y) = e^{-(\\frac{x - y}{h})^{2}}" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{W}}_{i*} \\in \\mathbb{R}^{J}" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "i_{th}" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": " row of " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{W}}" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": ", and the dot product " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{W}}_{i*} \\cdot \\hat{\\mathbf{W}}_{j*}" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": " measures the similarity of part assignment of point " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": ", as we want the value of " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "\\bar{r}_i^k" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": " to be calculated using the points from the same part as point " + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 389, + 545, + 450 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 306, + 451, + 485, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 451, + 485, + 463 + ], + "spans": [ + { + "bbox": [ + 306, + 451, + 485, + 463 + ], + "type": "text", + "content": "The overall symmetry loss is over all points," + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 382, + 469, + 545, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 469, + 545, + 495 + ], + "spans": [ + { + "bbox": [ + 382, + 469, + 545, + 495 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s y m} = \\frac {1}{| L |} \\sum_ {l \\in L} \\mathcal {L} _ {s y m} ^ {l} \\tag {15}", + "image_path": "e2c955c426d342eeab7e37f201e2c98c2c9a8f911efe3f6e56594532b1aa48c6.jpg" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 501, + 545, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 501, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 304, + 501, + 545, + 536 + ], + "type": "text", + "content": "Joint-to-Part Loss. In addition, we encourage each joint to be close to the center of the points on that part using a joint-to-part loss." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 371, + 544, + 545, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 371, + 544, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 371, + 544, + 545, + 574 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {j 2 p} ^ {j} = \\left\\| \\hat {y} _ {j} - \\frac {\\sum_ {i} \\hat {\\mathbf {W}} _ {i j} p _ {i}}{\\sum_ {i} \\hat {\\mathbf {W}} _ {i j}} \\right\\| _ {2} \\tag {16}", + "image_path": "a540a4d06aea7345529e560d1b415f01e5c55f19c759b30870d2282e7376fae0.jpg" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 306, + 580, + 544, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 580, + 544, + 593 + ], + "spans": [ + { + "bbox": [ + 306, + 580, + 544, + 593 + ], + "type": "text", + "content": "We sum over all joints to get the overall joint-to-part loss." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 389, + 609, + 545, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 609, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 389, + 609, + 545, + 635 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {j 2 p} = \\frac {1}{J} \\sum_ {j} \\mathcal {L} _ {j 2 p} ^ {j} \\tag {17}", + "image_path": "ca99dc22f73b3382af572ce278bd58f245ad8ee379550dc4b31a2cbb8d64b748.jpg" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 304, + 643, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 643, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 643, + 545, + 689 + ], + "type": "text", + "content": "Note that although the ground truth location of joints are not in the center of points on the corresponding part, keeping this loss is essential in making the unsupervised training more robust." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "In practice, jointly optimizing " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{W}}" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{Y}}" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " in Stage II leads to unstable training curves. Hence, we use the pre-trained" + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 314, + 757 + ], + "type": "text", + "content": "1161" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 94, + 282, + 205 + ], + "blocks": [ + { + "bbox": [ + 50, + 94, + 282, + 205 + ], + "lines": [ + { + "bbox": [ + 50, + 94, + 282, + 205 + ], + "spans": [ + { + "bbox": [ + 50, + 94, + 282, + 205 + ], + "type": "image", + "image_path": "94c477a0fef43640d8aee39e48a863e21dfb0607695fb10236540fe888404a00.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 213, + 268, + 224 + ], + "lines": [ + { + "bbox": [ + 66, + 213, + 268, + 224 + ], + "spans": [ + { + "bbox": [ + 66, + 213, + 268, + 224 + ], + "type": "text", + "content": "Figure 3. Effect of unsupervised losses on perturbed skeleton." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 243, + 287, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 243, + 287, + 290 + ], + "spans": [ + { + "bbox": [ + 46, + 243, + 287, + 290 + ], + "type": "text", + "content": "segmentation branch from Stage I to run segmentation inference to get the segmentation labels on all of the training samples in the beginning of Stage II, and " + }, + { + "bbox": [ + 46, + 243, + 287, + 290 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{W}}" + }, + { + "bbox": [ + 46, + 243, + 287, + 290 + ], + "type": "text", + "content": " is the one-hot encoding of the predicted segmentation labels." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 290, + 287, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 290, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 46, + 290, + 287, + 350 + ], + "type": "text", + "content": "Segmentation Loss. Lastly, we notice that keeping the segmentation loss at this stage further regularizes the backbone and leads to better quantitative performance. We use the inferred segmentation " + }, + { + "bbox": [ + 46, + 290, + 287, + 350 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{W}}" + }, + { + "bbox": [ + 46, + 290, + 287, + 350 + ], + "type": "text", + "content": " as the surrogate ground truth and minimize cross entropy as in Eq. (4)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 350, + 287, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 350, + 287, + 372 + ], + "spans": [ + { + "bbox": [ + 47, + 350, + 287, + 372 + ], + "type": "text", + "content": "Training objective. The overall training objective during Stage II is to minimize" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 384, + 287, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 384, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 56, + 384, + 287, + 412 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} = \\lambda_ {f l o w} \\mathcal {L} _ {f l o w} + \\lambda_ {\\mathrm {p 2 l}} \\mathcal {L} _ {\\mathrm {p 2 l}} + \\lambda_ {s y m} \\mathcal {L} _ {s y m} \\\\ + \\lambda_ {\\mathrm {j} 2 \\mathrm {p}} \\mathcal {L} _ {\\mathrm {j} 2 \\mathrm {p}} + \\lambda_ {\\mathrm {s e g}} \\mathcal {L} _ {\\mathrm {s e g}} \\tag {18} \\\\ \\end{array}", + "image_path": "d6d8bfd963665cd6bbccff0f13ba0564d56318d87220b9e285add880712318e8.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 417, + 287, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 417, + 287, + 489 + ], + "spans": [ + { + "bbox": [ + 46, + 417, + 287, + 489 + ], + "type": "text", + "content": "To illustrate the effect of the three unsupervised losses " + }, + { + "bbox": [ + 46, + 417, + 287, + 489 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{flow}, \\mathcal{L}_{p2l}" + }, + { + "bbox": [ + 46, + 417, + 287, + 489 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 417, + 287, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{sym})" + }, + { + "bbox": [ + 46, + 417, + 287, + 489 + ], + "type": "text", + "content": ", we show the result of applying these losses on a perturbed ground truth skeleton (Fig. 3). As shown, the proposed unsupervised losses effectively moves the perturbed skeleton to locations that are closer to ground truth." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 498, + 128, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 498, + 128, + 511 + ], + "spans": [ + { + "bbox": [ + 47, + 498, + 128, + 511 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 516, + 180, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 516, + 180, + 529 + ], + "spans": [ + { + "bbox": [ + 47, + 516, + 180, + 529 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "text", + "content": "The predictor model " + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "text", + "content": " consists of a transformer backbone with fully connected layers for predicting joints and segmentation respectively. We use the same transformer backbone as in HUM3DIL [29]. A fully connected layer is applied to the output of transformer head to regress the predicted " + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\hat{W}" + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\hat{Y}" + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "text", + "content": " respectively. There are 352,787 trainable parameters in total. We set the maximum number of input LiDAR points to 1024, and zero-pad or downsample the point clouds with fewer or more number of points. The flow is obtained using a self-supervised test-time optimization method [14]. The network is trained on 4 TPUs. We train Stage I for 200 epochs and Stage II for 75 epochs, both with batch size 32, base learning rate of " + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "inline_equation", + "content": "1e - 4" + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "text", + "content": ", and exponential decay 0.9. Stage I and II each finishes in about 6 hours. The loss weights in Eq. (5) are " + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\lambda_{kp} = 0.5" + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\lambda_{seg} = 1" + }, + { + "bbox": [ + 46, + 534, + 289, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "text", + "content": "The loss weights in Eq. (18) are " + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "inline_equation", + "content": "\\lambda_{flow} = 0.02" + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "inline_equation", + "content": "\\lambda_{p2l} = 0.01" + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "inline_equation", + "content": "\\lambda_{sym} = 0.5" + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "inline_equation", + "content": "\\lambda_{j2p} = 2" + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "inline_equation", + "content": "\\lambda_{seg} = 0.5" + }, + { + "bbox": [ + 305, + 72, + 546, + 109 + ], + "type": "text", + "content": ". The kernel bandwidth Eq. (14) is 0.1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 121, + 423, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 121, + 423, + 133 + ], + "spans": [ + { + "bbox": [ + 305, + 121, + 423, + 133 + ], + "type": "text", + "content": "4.2. Dataset and Metrics" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 138, + 547, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 138, + 547, + 268 + ], + "spans": [ + { + "bbox": [ + 304, + 138, + 547, + 268 + ], + "type": "text", + "content": "We construct a synthetic dataset with 1,000 sequences of 16-frame raycasted point clouds for Stage I training. Each sequence starts with the same standing pose and ends in a random pose. We find that data augmentation is essential in Stage I training. To simulate real-world noisy background and occlusion, we apply various data augmentations to the synthetic data, including randomly downsample, random mask, add ground clusters, add background clusters, add a second person, add noise to each point, scale the person. We include examples of augmented synthetic data in Fig. 4." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 328, + 270, + 527, + 423 + ], + "blocks": [ + { + "bbox": [ + 328, + 270, + 527, + 423 + ], + "lines": [ + { + "bbox": [ + 328, + 270, + 527, + 423 + ], + "spans": [ + { + "bbox": [ + 328, + 270, + 527, + 423 + ], + "type": "image", + "image_path": "46b0b01e293159ea5e03a7b82bbd0621d66fb8aa7fa3b31e0c0263f694d1b38a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 434, + 546, + 463 + ], + "lines": [ + { + "bbox": [ + 304, + 434, + 546, + 463 + ], + "spans": [ + { + "bbox": [ + 304, + 434, + 546, + 463 + ], + "type": "text", + "content": "Figure 4. Data augmentations applied to the synthetic point clouds (colored by ground truth segmentation labels). Ground truth skeletons are shown in purple. Background points are in blue." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "text", + "content": "In Stage II, we train on the entire Waymo Open dataset (WOD) training set (with around 200,000 unlabeled samples). As the official WOD testing subset is hidden from the public, we randomly choose " + }, + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "text", + "content": " of the validation set as the validation split, and the rest as the test split for benchmarking. We report average Mean Per Joint Position Error (MPJPE) on test set at the end of each stage. Formally, for a single sample, let " + }, + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "inline_equation", + "content": "\\hat{Y} \\in \\mathcal{R}^{J \\times 3}" + }, + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "text", + "content": " be the predicted keypoints, " + }, + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "inline_equation", + "content": "Y \\in \\mathcal{R}^{J \\times 3}" + }, + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "text", + "content": " the ground truth keypoints, and " + }, + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "inline_equation", + "content": "v \\in \\{0,1\\}^J" + }, + { + "bbox": [ + 304, + 464, + 545, + 586 + ], + "type": "text", + "content": " the visibility indicator annotated per keypoint." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 344, + 598, + 545, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 598, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 344, + 598, + 545, + 628 + ], + "type": "interline_equation", + "content": "\\operatorname {M P J P E} (Y, \\hat {Y}) = \\frac {1}{\\sum_ {j} v _ {j}} \\sum_ {j \\in [ J ]} v _ {j} \\| y _ {j} - \\hat {y} \\| _ {2} \\tag {19}", + "image_path": "789a5343c79f19d698715753e71ad1ffb2bab1b70082173956ed974da4578b74.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 641, + 546, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 546, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 546, + 712 + ], + "type": "text", + "content": "Note that in this Stage, we do Hungarian matching between the predicted and annotated keypoints per frame, and then report MPJPE on matched keypoints. We report matched MPJPE because the method is intended for scenarios where correspondence between keypoints in the unlabeled training data and downstream data is unknown." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1162" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 74, + 254, + 355 + ], + "blocks": [ + { + "bbox": [ + 75, + 74, + 254, + 355 + ], + "lines": [ + { + "bbox": [ + 75, + 74, + 254, + 355 + ], + "spans": [ + { + "bbox": [ + 75, + 74, + 254, + 355 + ], + "type": "image", + "image_path": "235df5a86dacfa377f2b945d9cac5e30d20a2b314a17b86314d035b487064d79.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 363, + 288, + 394 + ], + "lines": [ + { + "bbox": [ + 46, + 363, + 288, + 394 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 288, + 394 + ], + "type": "text", + "content": "Figure 5. Visualizations of predictions on WOD at the end of Stage I and Stage II. Points are colored by predicted segmentation labels. Ground truth keypoints are in green and predicted keypoints and skeletons are in red." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 406, + 105, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 406, + 105, + 417 + ], + "spans": [ + { + "bbox": [ + 47, + 406, + 105, + 417 + ], + "type": "text", + "content": "4.3. Results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 423, + 287, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 423, + 287, + 519 + ], + "spans": [ + { + "bbox": [ + 46, + 423, + 287, + 519 + ], + "type": "text", + "content": "In this section we perform quantitative evaluation of GC-KPL at the end of Stage I and II in Tab. 2. Qualitative results are in Fig. 5. As shown, after first stage where we train on a synthetic dataset constructed from posed body models with carefully chosen data augmentations, we are able to predict reasonable human keypoints on in-the-wild point clouds. The second stage our novel unsupervised losses further refine the predicted keypoints." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 529, + 287, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 529, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 47, + 529, + 287, + 555 + ], + "type": "text", + "content": "4.4. Downstream Task: Few-shot 3D Keypoint Learning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 559, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 559, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 559, + 287, + 677 + ], + "type": "text", + "content": "In this experiment, we show that the backbone of our model benefits from unsupervised training on large amount of unlabeled data, and can be useful for downstream finetuning tasks. We start from our pre-trained backbone after Stage II, and fine-tune with annotated training samples from WOD by minimizing mean per joint error. We include few-shot experiments where we fine-tune with a extremely small amount of data (10% and 1% of the training set), to represent challenging scenarios where there is a limited amount of annotated data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "content": "We include the LiDAR-only version of HUM3DIL (a state-of-the-art model on WOD) [29] as a strong baseline. The quantitative results (Tab. 1) suggest that our back" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "bone learns useful information from the unlabeled in-the-wild data and enables a significant performance boost on the downstream tasks. Compared to a randomly initialized backbone as used in HUM3DIL, our backbone leads to over " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "2\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " of decrease in MPJPE in downstream fine-tuning experiments, which is a significant improvement for the 3D human keypoint estimation task." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 156, + 545, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 156, + 545, + 202 + ], + "spans": [ + { + "bbox": [ + 304, + 156, + 545, + 202 + ], + "type": "text", + "content": "We visualize the predicted keypoints under different data regime in Fig. 6. As shown, models fine-tuned from our backbone is able to capture fine details on the arms and overall produces more accurate results than HUM3DIL." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 202, + 546, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 202, + 546, + 321 + ], + "spans": [ + { + "bbox": [ + 304, + 202, + 546, + 321 + ], + "type": "text", + "content": "To the best of our knowledge, there does not exist previous works on completely unsupervised human keypoint estimation from point clouds. We additionally experiment with using a readout layer on top of the features learned by a state-of-the-art point cloud SSL method 3D-OAE [30], but the MPJPE is " + }, + { + "bbox": [ + 304, + 202, + 546, + 321 + ], + "type": "inline_equation", + "content": "15\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 202, + 546, + 321 + ], + "type": "text", + "content": " (compared to " + }, + { + "bbox": [ + 304, + 202, + 546, + 321 + ], + "type": "inline_equation", + "content": "10.10\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 202, + 546, + 321 + ], + "type": "text", + "content": " from GC-KPL). Hence we consider the baselines we adopt here strong and complete. In Sec. 4.6, we further challenge our method by comparing to the domain adaptation setup and demonstrate that the performance of GC-KPL is still superior." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 327, + 419, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 327, + 419, + 340 + ], + "spans": [ + { + "bbox": [ + 306, + 327, + 419, + 340 + ], + "type": "text", + "content": "4.5. Domain adaptation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "content": "In the configuration where we use ground truth labels in Stage I and unsupervised training in Stage II could be seen as a domain adaption (DA) technique. Thus it is useful to compare proposed method with a commonly-used domain adaptation method. We train the same backbone model using a mix of real and synthetic data and a gradient reversal layer (aka DA loss) [5] to help the network to learn domain invariant keypoint features. Results in Tab. 3 demonstrate that GC-KPL yields superior accuracy compared with the DA method (MPJPE 10.1 vs " + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "inline_equation", + "content": "11.35\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 345, + 545, + 464 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 470, + 373, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 470, + 373, + 481 + ], + "spans": [ + { + "bbox": [ + 306, + 470, + 373, + 481 + ], + "type": "text", + "content": "4.6. Ablations" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 487, + 545, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 487, + 545, + 570 + ], + "spans": [ + { + "bbox": [ + 304, + 487, + 545, + 570 + ], + "type": "text", + "content": "Effect of using GT bounding boxes in pre-processing. We cropped human point clouds from the entire scene by including only points within GT bounding boxes. We also conducted experiments where we train with detected bounding boxes from raw LiDAR scans using a SoTA 3D detector. Results suggest that GC-KPL is robust to noise in 3D detection, as there were no noticeable changes in metrics." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 571, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 571, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 571, + 546, + 713 + ], + "type": "text", + "content": "Effect of synthetic dataset size. In our method Stage I serves as a model initialization step where we show that training on a small synthetic dataset (16,000 samples) with properly chosen data augmentations is suffice for the model to learn useful semantics. We further investigate the effect of synthetic dataset size during Stage I. We experiment with larger dataset sizes (160,000 and 1,600,000 samples) and observe that the effect of increasing synthetic dataset size is insignificant on " + }, + { + "bbox": [ + 304, + 571, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{MPJPE}_{\\mathrm{matched}}" + }, + { + "bbox": [ + 304, + 571, + 546, + 713 + ], + "type": "text", + "content": " at the end of Stage I - it decreased from " + }, + { + "bbox": [ + 304, + 571, + 546, + 713 + ], + "type": "inline_equation", + "content": "17.7\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 571, + 546, + 713 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 571, + 546, + 713 + ], + "type": "inline_equation", + "content": "17.6\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 571, + 546, + 713 + ], + "type": "text", + "content": ". Lack of a notable improvements for larger dataset sizes is likely due to limited variability of generated poses in synthetic data (see Supple" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1163" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 71, + 257, + 323 + ], + "blocks": [ + { + "bbox": [ + 75, + 71, + 257, + 323 + ], + "lines": [ + { + "bbox": [ + 75, + 71, + 257, + 323 + ], + "spans": [ + { + "bbox": [ + 75, + 71, + 257, + 323 + ], + "type": "image", + "image_path": "8dc2fe390528e659e81192b9044aa880ff139f08e5825c8a30a66b34d0b122a9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 139, + 326, + 235, + 335 + ], + "lines": [ + { + "bbox": [ + 139, + 326, + 235, + 335 + ], + "spans": [ + { + "bbox": [ + 139, + 326, + 235, + 335 + ], + "type": "text", + "content": "(a) Fine-tune on " + }, + { + "bbox": [ + 139, + 326, + 235, + 335 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 139, + 326, + 235, + 335 + ], + "type": "text", + "content": " training set" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 258, + 72, + 384, + 323 + ], + "blocks": [ + { + "bbox": [ + 258, + 72, + 384, + 323 + ], + "lines": [ + { + "bbox": [ + 258, + 72, + 384, + 323 + ], + "spans": [ + { + "bbox": [ + 258, + 72, + 384, + 323 + ], + "type": "image", + "image_path": "bf2689fb1e1f498337db157c972e6efcc82d78b530c8b7c4f39d2e975113622b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 271, + 326, + 362, + 335 + ], + "lines": [ + { + "bbox": [ + 271, + 326, + 362, + 335 + ], + "spans": [ + { + "bbox": [ + 271, + 326, + 362, + 335 + ], + "type": "text", + "content": "(b) Fine-tune on " + }, + { + "bbox": [ + 271, + 326, + 362, + 335 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 271, + 326, + 362, + 335 + ], + "type": "text", + "content": " training set" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 386, + 72, + 516, + 323 + ], + "blocks": [ + { + "bbox": [ + 386, + 72, + 516, + 323 + ], + "lines": [ + { + "bbox": [ + 386, + 72, + 516, + 323 + ], + "spans": [ + { + "bbox": [ + 386, + 72, + 516, + 323 + ], + "type": "image", + "image_path": "561b3aee6120df91a4d59a2d47092628e9b66ea246cf2d73ff471e17621a1132.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 409, + 326, + 497, + 335 + ], + "lines": [ + { + "bbox": [ + 409, + 326, + 497, + 335 + ], + "spans": [ + { + "bbox": [ + 409, + 326, + 497, + 335 + ], + "type": "text", + "content": "(c) Fine-tune on " + }, + { + "bbox": [ + 409, + 326, + 497, + 335 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 409, + 326, + 497, + 335 + ], + "type": "text", + "content": " training set" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 347, + 547, + 368 + ], + "lines": [ + { + "bbox": [ + 46, + 347, + 547, + 368 + ], + "spans": [ + { + "bbox": [ + 46, + 347, + 547, + 368 + ], + "type": "text", + "content": "Figure 6. Predicted keypoints from fine-tuning with different amount of annotated data. The points are colored by predicted segmentation labels by our model. Predicted keypoints are shown in red." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 48, + 378, + 545, + 475 + ], + "blocks": [ + { + "bbox": [ + 48, + 378, + 545, + 475 + ], + "lines": [ + { + "bbox": [ + 48, + 378, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 48, + 378, + 545, + 475 + ], + "type": "table", + "html": "
MethodBackboneStage I supervised1% training set MPJPE cm. (gain)10% training set MPJPE cm. (gain)100% training set MPJPE cm. (gain)
HUM3DIL [29]Randomly initialized19.5716.3612.21
Pre-trained on synthetic only18.52 (-1.05)15.10 (-1.26)11.27 (-0.94)
GC-KPLPre-trained on 5,000 WOD-train17.87 (-1.70)14.51 (-1.85)10.73 (-1.48)
Pre-trained on 200,000 WOD-train17.80 (-1.77)14.30 (-2.06)10.60 (-1.61)
Pre-trained on 200,000 WOD-train17.20 (-2.37)13.40 (-2.96)10.10 (-2.11)
", + "image_path": "857d7c4785e6e810c95ce2fa60973eac22a2da6a5d7a2678ff5d5a7fe212d259.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 80, + 509, + 255, + 571 + ], + "blocks": [ + { + "bbox": [ + 46, + 482, + 546, + 502 + ], + "lines": [ + { + "bbox": [ + 46, + 482, + 546, + 502 + ], + "spans": [ + { + "bbox": [ + 46, + 482, + 546, + 502 + ], + "type": "text", + "content": "Table 1. Downstream fine-tuning results. Check marks in \"Stage I supervised\" mean that we use ground truth part labels in Stage I, otherwise we use KMeans labels." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 80, + 509, + 255, + 571 + ], + "lines": [ + { + "bbox": [ + 80, + 509, + 255, + 571 + ], + "spans": [ + { + "bbox": [ + 80, + 509, + 255, + 571 + ], + "type": "table", + "html": "
Training dataMPJPEmatchd (↓)
Synthetic only17.70
5,000 WOD-train14.64
200,000 WOD-train13.92
", + "image_path": "2b724ef6ef1e37a1fcd2d5bca2ace917a76c092949837e058db89b07f4a1a1fb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 67, + 599, + 268, + 661 + ], + "blocks": [ + { + "bbox": [ + 86, + 574, + 247, + 586 + ], + "lines": [ + { + "bbox": [ + 86, + 574, + 247, + 586 + ], + "spans": [ + { + "bbox": [ + 86, + 574, + 247, + 586 + ], + "type": "text", + "content": "Table 2. Unsupervised learning (Stage II) results." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 67, + 599, + 268, + 661 + ], + "lines": [ + { + "bbox": [ + 67, + 599, + 268, + 661 + ], + "spans": [ + { + "bbox": [ + 67, + 599, + 268, + 661 + ], + "type": "table", + "html": "
Domain distributionDA lossMPJPE (↓)
100% real12.21
50/50% real/synthetic12.08
50/50% real/synthetic11.35
", + "image_path": "1d0c9bc76f2ad60d39dfc5760a39ca04b7b42e24daf47ba7120fe11b67aabad4.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 669, + 286, + 688 + ], + "lines": [ + { + "bbox": [ + 46, + 669, + 286, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 669, + 286, + 688 + ], + "type": "text", + "content": "Table 3. Unsupervised domain adaptation results evaluated on WOD validation set." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 512, + 384, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 512, + 384, + 522 + ], + "spans": [ + { + "bbox": [ + 306, + 512, + 384, + 522 + ], + "type": "text", + "content": "mental for details)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 523, + 545, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 523, + 545, + 704 + ], + "spans": [ + { + "bbox": [ + 304, + 523, + 545, + 704 + ], + "type": "text", + "content": "Effect of using ground truths on synthetic data. While our described pipeline does not use any kind of manual labels, we do use ground truth segmentation and keypoints on synthetic dataset in Stage I because they are readily available. Here we further experiment with a variation where we do not use any kind of ground truths in Stage I (first row in Tab. 4). Instead, we use KMeans clusters and cluster centers as surrogate ground truths for model initialization, similar to [1]. Note that we are able to establish correspondence between KMeans clusters from different samples due to the fact that in our data generation process, each synthetic sequence starts with the same starting standing pose. Hence, we can run KMeans clustering on the starting pose that is shared among all sequences, and for subsequent samples within each sequence, we do Hungarian matching using" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1164" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 70, + 547, + 244 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 547, + 244 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 547, + 244 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 547, + 244 + ], + "type": "table", + "html": "
Stage IStage II
No.Exp.\\( \\mathcal{L}_{kp} \\)\\( \\mathcal{L}_{seg} \\)\\( MPJPE_{matched} \\)\\( \\mathcal{L}_{j2p} \\)\\( \\mathcal{L}_{seg} \\)\\( \\mathcal{L}_{sym} \\)\\( \\mathcal{L}_{p2l} \\)\\( \\mathcal{L}_{flow} \\)\\( MPJPE_{matched} \\)
1Effect of using KMeans labels in Stage I19.214.5
2Effect of \\( \\mathcal{L}_{kp} \\) in Stage IN/A14.2
315.0
4Effect of warmup losses in Stage II14.2
515.2
630.1
715.6
825.7
9Effect of unsupervised losses in Stage II14.3
1014.9
1114.4
1214.9
Full model (GC-KPL)17.713.9
", + "image_path": "5baa9cd0ea4f591b5697b46de878005b3516974af871b926c862c73b534bf5b7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 251, + 546, + 273 + ], + "lines": [ + { + "bbox": [ + 46, + 251, + 546, + 273 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 546, + 273 + ], + "type": "text", + "content": "Table 4. Ablations studies on the effect of individual loss term in our method. Experiments 3 through 12 are using both losses in Stage I. Full model is using GT labels for Stage I." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 281, + 287, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 281, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 281, + 287, + 399 + ], + "type": "text", + "content": "inter-cluster Chamfer distance to establish correspondence between clusters from consecutive frames. We observe that although initializing with surrogate ground truths leads to slightly inferior performance in Stage I, after training with the losses in Stage II the drop in performance is less visible. Overall, downstream fine-tuning performance is comparable to our best model (10.6/14.3/17.8 vs. 10.1/13.4/17.2 cm when fine-tuned on " + }, + { + "bbox": [ + 46, + 281, + 287, + 399 + ], + "type": "inline_equation", + "content": "100\\% / 10\\% / 1\\%" + }, + { + "bbox": [ + 46, + 281, + 287, + 399 + ], + "type": "text", + "content": " of the data, see Tab. 1). This experiment suggests that method does not require any kind of ground truths, even during initialization stage." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": "Effect of Losses. In this section we further investigate the effect of each component in our pipeline (Tab. 4). First, we note that " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{seg}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " in Stage I is essential because we need an initialized segmentation model to get the body part assignment for each point in order to calculate the losses in Stage II. Therefore, we only experiment with a variation of Stage I training without " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{kp}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": ", and we observe that " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{kp}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " is useful in warming up the backbone for later stages. Next, we take the backbone from Stage I (trained with both " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{kp}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{seg}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": "), and study the effect of individual losses in Stage II. Experiments No. " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "3/4/5" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " show that it is helpful to include " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{j2p}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{seg}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " while having all other three unsupervised losses. In experiments " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "6/7/8" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " we take out " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{j2p}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{seg}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": ", and investigate the effect of individual unsupervised losses. As shown the training becomes rather unstable if we further eliminate any of the three losses. We observe qualitatively that the metric worsens drastically because the limbs quickly move out of the human body. Experiments No. " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "3/4/5" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " suggest that " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{j2p}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{seg}" + }, + { + "bbox": [ + 46, + 400, + 289, + 652 + ], + "type": "text", + "content": " are useful regularizers that make sure the limbs stay within the body, and the unsupervised losses further improve the performance by refining the keypoint location." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 672, + 209, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 672, + 209, + 685 + ], + "spans": [ + { + "bbox": [ + 47, + 672, + 209, + 685 + ], + "type": "text", + "content": "4.7. Limitations and Future Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 288, + 715 + ], + "type": "text", + "content": "The task of keypoint location could be considered as a dual problem for semantic segmentation. In this work we" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 281, + 545, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 545, + 316 + ], + "type": "text", + "content": "use a simple segmentation network based on the same architecture as our keypoint estimation model. Using a superior segmentation model could lead to further improvements." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 316, + 546, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 546, + 398 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 546, + 398 + ], + "type": "text", + "content": "The proposed flow loss depends on quality of the estimated flow of LiDAR points. In this work we used a simple but reasonable method to estimate flow between two frames of LiDAR points called Neural Scene Flow prior [14]. Quality of the unsupervised keypoint estimation could be improved by using a more advanced flow estimator tailored for point clouds on human body surfaces." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 399, + 547, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 547, + 458 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 547, + 458 + ], + "type": "text", + "content": "Lastly, we use a part of the HUM3DIL [29] model which takes only LiDAR point cloud as input. The full HUM3DIL model was designed for multi-modal inputs and attains better performance. Thus, another interesting direction is to leverage multi-modal inputs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 464, + 379, + 476 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 464, + 379, + 476 + ], + "spans": [ + { + "bbox": [ + 306, + 464, + 379, + 476 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 483, + 547, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 547, + 699 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 547, + 699 + ], + "type": "text", + "content": "In this work, we approached the problem of 3D human pose estimation using points clouds in-the-wild, introduced a method (GC-KPL) for learning 3D human keypoints from point clouds without using any manual 3D keypoint annotations. We shown that the proposed novel losses are effective for unsupervised keypoint learning on Waymo Open Dataset. Through downstream experiments we demonstrated that GC-KPL can additionally serve as a self-supervised representation method to learn from large quantity of in-the-wild human point clouds. In addition, GC-KPL compares favorably with a commonly used domain adaptation technique. The few-shot experiments empirically verified that using only " + }, + { + "bbox": [ + 304, + 483, + 547, + 699 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 304, + 483, + 547, + 699 + ], + "type": "text", + "content": " of available 3D keypoint annotation the fine-tuned model reached comparable performance to the state-of-the-art model training on the entire dataset. These results opens up exciting possibility to utilize massive amount of sensor data in autonomous driving to improve pedestrian 3D keypoint estimation." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "1165" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "text", + "content": "[1] Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In European Conference on Computer Vision, 2018. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 136, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 136, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 136, + 288, + 190 + ], + "type": "text", + "content": "[2] Mathilde Caron, Piotr Bojanowski, Julien Mairal, and Armand Joulin. Unsupervised pre-training of image features on non-curated data. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2959-2968, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 192, + 288, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 192, + 288, + 236 + ], + "spans": [ + { + "bbox": [ + 53, + 192, + 288, + 236 + ], + "type": "text", + "content": "[3] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 238, + 288, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 238, + 288, + 293 + ], + "spans": [ + { + "bbox": [ + 53, + 238, + 288, + 293 + ], + "type": "text", + "content": "[4] Michael Fürst, Shriya TP Gupta, René Schuster, Oliver Wasenmüller, and Didier Stricker. Hperl: 3d human pose estimation from rgb and lidar. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 7321-7327. IEEE, 2021. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 295, + 288, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 295, + 288, + 360 + ], + "spans": [ + { + "bbox": [ + 53, + 295, + 288, + 360 + ], + "type": "text", + "content": "[5] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In Francis Bach and David Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 1180-1189, Lille, France, 07-09 Jul 2015. PMLR. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 362, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 362, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 362, + 287, + 407 + ], + "type": "text", + "content": "[6] Albert Haque, Boya Peng, Zelun Luo, Alexandre Alahi, Serena Yeung, and Li Fei-Fei. Towards viewpoint invariant 3d human pose estimation. In European conference on computer vision, pages 160–177. Springer, 2016. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 408, + 288, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 408, + 288, + 462 + ], + "spans": [ + { + "bbox": [ + 53, + 408, + 288, + 462 + ], + "type": "text", + "content": "[7] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 464, + 288, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 464, + 288, + 518 + ], + "spans": [ + { + "bbox": [ + 53, + 464, + 288, + 518 + ], + "type": "text", + "content": "[8] Siyuan Huang, Yichen Xie, Song-Chun Zhu, and Yixin Zhu. Spatio-temporal self-supervised representation learning for 3d point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6535-6545, 2021. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 521, + 287, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 521, + 287, + 565 + ], + "spans": [ + { + "bbox": [ + 53, + 521, + 287, + 565 + ], + "type": "text", + "content": "[9] Tomas Jakab, Ankush Gupta, Hakan Bilen, and Andrea Vedaldi. Unsupervised learning of object landmarks through conditional image generation. Advances in neural information processing systems, 31, 2018. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 567, + 287, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 287, + 621 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 287, + 621 + ], + "type": "text", + "content": "[10] Tomas Jakab, Ankush Gupta, Hakan Bilen, and Andrea Vedaldi. Self-supervised learning of interpretable keypoints from unlabelled videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8787-8797, 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "type": "text", + "content": "[11] Wonhui Kim, Manikandasriram Srinivasan Ramanagopal, Charles Barto, Ming-Yuan Yu, Karl Rosaen, Nick Goumas, Ram Vasudevan, and Matthew Johnson-Roberson. Pedx: Benchmark dataset for metric 3-d pose estimation of pedestrians in complex urban intersections. IEEE Robotics and Automation Letters, 4(2):1940-1947, 2019. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "type": "text", + "content": "[12] Jiaxin Li and Gim Hee Lee. Usip: Unsupervised stable interest point detection from 3d point clouds. In Proceedings of" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "the IEEE/CVF international conference on computer vision, pages 361-370, 2019. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 96, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 547, + 162 + ], + "type": "text", + "content": "[13] Jialian Li, Jingyi Zhang, Zhiyong Wang, Siqi Shen, Chenglu Wen, Yuexin Ma, Lan Xu, Jingyi Yu, and Cheng Wang. Lidarcap: Long-range marker-less 3d human motion capture with lidar point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20502-20512, 2022. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 163, + 545, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 545, + 196 + ], + "type": "text", + "content": "[14] Xueqian Li, Jhony Kaesemodel Pontes, and Simon Lucey. Neural scene flow prior. Advances in Neural Information Processing Systems, 34:7838-7851, 2021. 3, 5, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 198, + 545, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 198, + 545, + 241 + ], + "spans": [ + { + "bbox": [ + 308, + 198, + 545, + 241 + ], + "type": "text", + "content": "[15] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. ACM transactions on graphics (TOG), 34(6):1-16, 2015. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 243, + 545, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 243, + 545, + 297 + ], + "spans": [ + { + "bbox": [ + 308, + 243, + 545, + 297 + ], + "type": "text", + "content": "[16] Atsuhiro Noguchi, Umar Iqbal, Jonathan Tremblay, Tatsuya Harada, and Orazio Gallo. Watch it move: Unsupervised discovery of 3d joints for re-posing of articulated objects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3677–3687, 2022. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 299, + 545, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 299, + 545, + 332 + ], + "spans": [ + { + "bbox": [ + 308, + 299, + 545, + 332 + ], + "type": "text", + "content": "[17] Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 333, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 333, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 308, + 333, + 545, + 398 + ], + "type": "text", + "content": "[18] Luca Schmidtke, Athanasios Vlontzos, Simon Ellershaw, Anna Lukens, Tomoki Arichi, and Bernhard Kainz. Unsupervised human pose estimation through transforming shape templates. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2484-2494, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 399, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 545, + 453 + ], + "type": "text", + "content": "[19] Jamie Shotton, Andrew Fitzgibbon, Mat Cook, Toby Sharp, Mark Finocchio, Richard Moore, Alex Kipman, and Andrew Blake. Real-time human pose recognition in parts from single depth images. In CVPR 2011, pages 1297-1304. IEEE, 2011. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 456, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 456, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 308, + 456, + 545, + 521 + ], + "type": "text", + "content": "[20] Jennifer J Sun, Serim Ryou, Roni H Goldshmid, Brandon Weissboud, John O Dabiri, David J Anderson, Ann Kennedy, Yisong Yue, and Pietro Perona. Self-supervised keypoint discovery in behavioral videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2171-2180, 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 523, + 545, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 523, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 308, + 523, + 545, + 589 + ], + "type": "text", + "content": "[21] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 590, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 590, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 308, + 590, + 545, + 634 + ], + "type": "text", + "content": "[22] Supasorn Suwajanakorn, Noah Snavely, Jonathan J Tompson, and Mohammad Norouzi. Discovery of latent 3d keypoints via end-to-end geometric reasoning. Advances in neural information processing systems, 31, 2018. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 689 + ], + "type": "text", + "content": "[23] Hanchen Wang, Qi Liu, Xiangyu Yue, Joan Lasenby, and Matt J Kusner. Unsupervised point cloud pre-training via occlusion completion. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9782-9792, 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 691, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 714 + ], + "type": "text", + "content": "[24] Yuefan Wu, Zeyuan Chen, Shaowei Liu, Zhongzheng Ren, and Shenlong Wang. Casa: Category-agnostic skeletal ani" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 749, + 315, + 757 + ], + "type": "text", + "content": "1166" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 619 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 66, + 72, + 286, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 286, + 94 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 286, + 94 + ], + "type": "text", + "content": "mal reconstruction. arXiv preprint arXiv:2211.03568, 2022. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 149 + ], + "type": "text", + "content": "[25] Saining Xie, Jiatao Gu, Demi Guo, Charles R Qi, Leonidas Guibas, and Or Litany. Pointcontrast: Unsupervised pretraining for 3d point cloud understanding. In European conference on computer vision, pages 574-591. Springer, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 196 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 196 + ], + "type": "text", + "content": "[26] Yaoqing Yang, Chen Feng, Yiru Shen, and Dong Tian. Foldingnet: Point cloud auto-encoder via deep grid deformation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 206–215, 2018. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 198, + 287, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 198, + 287, + 251 + ], + "spans": [ + { + "bbox": [ + 48, + 198, + 287, + 251 + ], + "type": "text", + "content": "[27] Yang You, Wenhai Liu, Yanjie Ze, Yong-Lu Li, Weiming Wang, and Cewu Lu. Ukpgan: A general self-supervised keypoint detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17042-17051, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 253, + 287, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 253, + 287, + 306 + ], + "spans": [ + { + "bbox": [ + 48, + 253, + 287, + 306 + ], + "type": "text", + "content": "[28] Xumin Yu, Lulu Tang, Yongming Rao, Tiejun Huang, Jie Zhou, and Jiwen Lu. Point-bert: Pre-training 3d point cloud transformers with masked point modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19313-19322, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 308, + 287, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 308, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 48, + 308, + 287, + 363 + ], + "type": "text", + "content": "[29] Andrei Zanfir, Mihai Zanfir, Alex Gorban, Jingwei Ji, Yin Zhou, Dragomir Anguelov, and Cristian Sminchisescu. Hum3dil: Semi-supervised multi-modal 3d humanpose estimation for autonomous driving. In 6th Annual Conference on Robot Learning, 2022. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 365, + 287, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 418 + ], + "type": "text", + "content": "[30] Zaiwei Zhang, Rohit Girdhar, Armand Joulin, and Ishan Misra. Self-supervised pretraining of 3d features on any point-cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10252-10263, 2021. 2, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 420, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 420, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 420, + 287, + 464 + ], + "type": "text", + "content": "[31] Zihao Zhang, Lei Hu, Xiaoming Deng, and Shihong Xia. Weakly supervised adversarial learning for 3d human pose estimation from point clouds. IEEE transactions on visualization and computer graphics, 26(5):1851-1859, 2020. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 465, + 287, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 465, + 287, + 540 + ], + "spans": [ + { + "bbox": [ + 48, + 465, + 287, + 540 + ], + "type": "text", + "content": "[32] Jingxiao Zheng, Xinwei Shi, Alexander Gorban, Junhua Mao, Yang Song, Charles R Qi, Ting Liu, Visesh Chari, Andre Cornman, Yin Zhou, et al. Multi-modal 3d human pose estimation with 2d weak supervision in autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4478-4487, 2022. 1, 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 543, + 287, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 543, + 287, + 585 + ], + "spans": [ + { + "bbox": [ + 48, + 543, + 287, + 585 + ], + "type": "text", + "content": "[33] Junsheng Zhou, Xin Wen, Yu-Shen Liu, Yi Fang, and Zhizhong Han. Self-supervised point cloud representation learning with occlusion auto-encoder. arXiv preprint arXiv:2203.14084, 2022. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 588, + 287, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 588, + 287, + 619 + ], + "spans": [ + { + "bbox": [ + 48, + 588, + 287, + 619 + ], + "type": "text", + "content": "[34] Yufan Zhou, Haiwei Dong, and Abdulmotaleb El Saddik. Learning to estimate 3d human pose from point cloud. IEEE Sensors Journal, 20(20):12334-12342, 2020. 2" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 296, + 748, + 315, + 757 + ], + "type": "text", + "content": "1167" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Human Mesh Estimation From Virtual Markers/067f420e-7fdc-4668-8983-b6715ae47be7_content_list.json b/2023/3D Human Mesh Estimation From Virtual Markers/067f420e-7fdc-4668-8983-b6715ae47be7_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..569cc8e2b01f8d55d54f48d50246d816d9d3129e --- /dev/null +++ b/2023/3D Human Mesh Estimation From Virtual Markers/067f420e-7fdc-4668-8983-b6715ae47be7_content_list.json @@ -0,0 +1,2034 @@ +[ + { + "type": "text", + "text": "3D Human Mesh Estimation from Virtual Markers", + "text_level": 1, + "bbox": [ + 225, + 130, + 743, + 150 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaoxuan Ma $^{1}$ Jiajun Su $^{1}$ Chunyu Wang $^{3*}$ Wentao Zhu $^{1}$ Yizhou Wang $^{1,2,4}$", + "bbox": [ + 150, + 178, + 818, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ School of Computer Science, Center on Frontiers of Computing Studies, Peking University \n $^{2}$ Inst. for Artificial Intelligence, Peking University \n $^{3}$ Microsoft Research Asia \n $^{4}$ Nat'l Eng. Research Center of Visual Technology", + "bbox": [ + 120, + 210, + 848, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{maxiaoxuan, sujiajun, wtzhu, yizhou.wang}@pku.edu.cn, chnuwa@microsoft.com", + "bbox": [ + 153, + 303, + 818, + 316 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 352, + 313, + 368 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Inspired by the success of volumetric 3D pose estimation, some recent human mesh estimators propose to estimate 3D skeletons as intermediate representations, from which, the dense 3D meshes are regressed by exploiting the mesh topology. However, body shape information is lost in extracting skeletons, leading to mediocre performance. The advanced motion capture systems solve the problem by placing dense physical markers on the body surface, which allows to extract realistic meshes from their non-rigid motions. However, they cannot be applied to wild images without markers. In this work, we present an intermediate representation, named virtual markers, which learns 64 landmark keypoints on the body surface based on the large-scale mocap data in a generative style, mimicking the effects of physical markers. The virtual markers can be accurately detected from wild images and can reconstruct the intact meshes with realistic shapes by simple interpolation. Our approach outperforms the state-of-the-art methods on three datasets. In particular, it surpasses the existing methods by a notable margin on the SURREAL dataset, which has diverse body shapes. Code is available at https://github.com/ShirleyMaxx/VirtualMarker.", + "bbox": [ + 75, + 385, + 473, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 746, + 209, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D human mesh estimation aims to estimate the 3D positions of the mesh vertices that are on the body surface. The task has attracted a lot of attention from the computer vision and computer graphics communities [3, 10, 18, 24, 26, 29, 34, 36, 41, 49] because it can benefit many applications such as virtual reality [14]. Recently, the deep learning-based methods [7, 18, 28] have significantly", + "bbox": [ + 75, + 771, + 470, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9802a17301884443af2cf8a67de51599d2994b1a4602483b08ea496814f3ece9.jpg", + "image_caption": [ + "Figure 1. Mesh estimation results on four examples with different body shapes. Pose2Mesh [7] which uses 3D skeletons as the intermediate representation fails to predict accurate shapes. Our virtual marker-based method obtains accurate estimates." + ], + "image_footnote": [], + "bbox": [ + 535, + 352, + 861, + 683 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "advanced the accuracy on the benchmark datasets.", + "bbox": [ + 500, + 763, + 831, + 777 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The pioneer methods [18,49] propose to regress the pose and shape parameters of the mesh models such as SMPL [35] directly from images. While straightforward, their accuracy is usually lower than the state-of-the-arts. The first reason is that the mapping from the image features to the model parameters is highly non-linear and suffers from image-model misalignment [28]. Besides, existing mesh datasets [15,27,37,52] are small and limited to simple labo", + "bbox": [ + 496, + 780, + 895, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author", + "bbox": [ + 94, + 887, + 220, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "534", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ratory environments due to the complex capturing process. The lack of sufficient training data severely limits its performance.", + "bbox": [ + 76, + 90, + 470, + 133 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, some works [25, 38] begin to formulate mesh estimation as a dense 3D keypoint detection task inspired by the success of volumetric pose estimation [42, 43, 45, 48, 57, 63]. For example, in [25, 38], the authors propose to regress the 3D positions of all vertices. However, it is computationally expensive because it has more than several thousand vertices. Moon and Lee [38] improve the efficiency by decomposing the 3D heatmaps into multiple 1D heatmaps at the cost of mediocre accuracy. Choi et al. [7] propose to first detect a sparser set of skeleton joints in the images, from which the dense 3D meshes are regressed by exploiting the mesh topology. The methods along this direction have attracted increasing attention [7, 28, 53] due to two reasons. First, the proxy task of 3D skeleton estimation can leverage the abundant 2D pose datasets which notably improves the accuracy. Second, mesh regression from the skeletons is efficient. However, important information about the body shapes is lost in extracting the 3D skeletons, which is largely overlooked previously. As a result, different types of body shapes, such as lean or obese, cannot be accurately estimated (see Figure 1).", + "bbox": [ + 75, + 138, + 470, + 455 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The professional marker-based motion capture (mocap) method MoSh [34] places physical markers on the body surface and explore their subtle non-rigid motions to extract meshes with accurate shapes. However, the physical markers limit the approach to be used in laboratory environments. We are inspired to think whether we can identify a set of landmarks on the mesh as virtual markers, e.g., elbow and wrist, that can be detected from wild images, and allow to recover accurate body shapes? The desired virtual markers should satisfy several requirements. First, the number of markers should be much smaller than that of the mesh vertices so that we can use volumetric representations to efficiently estimate their 3D positions. Second, the markers should capture the mesh topology so that the intact mesh can be accurately regressed from them. Third, the virtual markers have distinguishable visual patterns so that they can be detected from images.", + "bbox": [ + 75, + 459, + 470, + 715 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we present a learning algorithm based on archetypal analysis [12] to identify a subset of mesh vertices as the virtual markers that try to satisfy the above requirements to the best extent. Figure 2 shows that the learned virtual markers coarsely outline the body shape and pose which paves the way for estimating meshes with accurate shapes. Then we present a simple framework for 3D mesh estimation on top of the representation as shown in Figure 3. It first learns a 3D keypoint estimation network based on [45] to detect the 3D positions of the virtual markers. Then we recover the intact mesh simply by interpolating them. The interpolation weights are pre-trained in the representation", + "bbox": [ + 75, + 719, + 470, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "learning step and will be adjusted by a light network based on the prediction confidences of the virtual markers for each image.", + "bbox": [ + 496, + 90, + 890, + 135 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We extensively evaluate our approach on three benchmark datasets. It consistently outperforms the state-of-the-art methods on all of them. In particular, it achieves a significant gain on the SURREAL dataset [51] which has a variety of body shapes. Our ablation study also validates the advantages of the virtual marker representation in terms of recovering accurate shapes. Finally, the method shows decent generalization ability and generates visually appealing results for the wild images.", + "bbox": [ + 496, + 136, + 893, + 272 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 500, + 287, + 635, + 304 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Optimization-based mesh estimation", + "text_level": 1, + "bbox": [ + 500, + 313, + 815, + 329 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Before deep learning dominates this field, 3D human mesh estimation [2, 27, 34, 40, 58] is mainly optimization-based, which optimizes the parameters of the human mesh models to match the observations. For example, Loper et al. [34] propose MoSh that optimizes the SMPL parameters to align the mesh with the 3D marker positions. It is usually used to get GT 3D meshes for benchmark datasets because of its high accuracy. Later works propose to optimize the model parameters or mesh vertices based on 2D image cues [2, 11, 27, 40, 58]. They extract intermediate representations such as 2D skeletons from the images and optimize the mesh model by minimizing the discrepancy between the model projection and the intermediate representations such as the 2D skeletons. These methods are usually sensitive to initialization and suffer from local optimum.", + "bbox": [ + 496, + 335, + 893, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Learning-based mesh estimation", + "text_level": 1, + "bbox": [ + 500, + 574, + 784, + 589 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, most works follow the learning-based framework and have achieved promising results. Deep networks [18, 24, 26, 36, 49] are used to regress the SMPL parameters from image features. However, learning the mapping from the image space to the parameter space is highly nonlinear [38]. In addition, they suffer from the misalignment between the meshes and image pixels [60]. These problems make it difficult to learn an accurate yet generalizable model.", + "bbox": [ + 496, + 598, + 893, + 718 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Some works propose to introduce proxy tasks to get intermediate representations first, hoping to alleviate the learning difficulty. In particular, intermediate representations of physical markers [59], IUV images [55,60-62], body part segmentation masks [23,27,39,50] and body skeletons [7,28,47,53] have been proposed. In particular, THUNDR [59] first estimates the 3D locations of physical markers from images and then reconstructs the mesh from the 3D markers. The physical markers can be interpreted as a simplified representation of body shape and pose. Although it is very accurate, it cannot be applied to wild images without markers. In contrast, body skeleton is a popular human representation", + "bbox": [ + 496, + 719, + 893, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "535", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e284d5f27134f54524d381aedddf79673639cc90ce42d653625aa64ad2afe0d4.jpg", + "image_caption": [ + "Figure 2. Left: The learned virtual markers (blue balls) in the back and front views. The grey balls mean they are invisible in the front view. The virtual markers act similarly to physical body markers and approximately outline the body shape. Right: Mesh estimation results by our approach, from left to right are input image, estimated 3D mesh overlayed on the image, and three different viewpoints showing the estimated 3D mesh with our intermediate predicted virtual markers (blue balls), respectively." + ], + "image_footnote": [], + "bbox": [ + 119, + 88, + 478, + 229 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f69b18821bf52c010f0513102762f9d306e115d3ac5d7d71172e968edab7b793.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 483, + 88, + 851, + 231 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "that can be robustly detected from wild images. Choi et al. [7] propose to first estimate the 3D skeletons, and then estimate the intact mesh from them. However, accurate body shapes are difficult to be recovered from the oversimplified 3D skeletons.", + "bbox": [ + 75, + 310, + 470, + 383 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our work belongs to the learning-based class and is related to works that use physical markers or skeletons as intermediate representations. But different from them, we propose a novel intermediate representation, named virtual markers, which is more expressive to reduce the ambiguity in pose and shape estimation than body skeletons and can be applied to wild images.", + "bbox": [ + 75, + 385, + 470, + 492 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 505, + 168, + 520 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we describe the details of our approach. First, Section 3.1 introduces how we learn the virtual marker representation from mocap data. Then we present the overall framework for mesh estimation from an image in Section 3.2. At last, Section 3.3 discusses the loss functions and training details.", + "bbox": [ + 75, + 530, + 468, + 621 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. The virtual marker representation", + "text_level": 1, + "bbox": [ + 76, + 631, + 377, + 647 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We represent a mesh by a vector of vertex positions $\\mathbf{x} \\in \\mathbb{R}^{3M}$ where $M$ is the number of mesh vertices. Denote a mocap dataset such as [15] with $N$ meshes as $\\widehat{\\mathbf{X}} = [\\mathbf{x}_1, \\dots, \\mathbf{x}_N] \\in \\mathbb{R}^{3M \\times N}$ . To unveil the latent structure among vertices, we reshape it to $\\mathbf{X} \\in \\mathbb{R}^{3N \\times M}$ with each column $\\mathbf{x}_i \\in \\mathbb{R}^{3N}$ representing all possible positions of the $i^{\\text{th}}$ vertex in the dataset [15].", + "bbox": [ + 75, + 654, + 468, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The rank of $\\mathbf{X}$ is smaller than $M$ because the mesh representation is smooth and redundant where some vertices can be accurately reconstructed by the others. While it seems natural to apply PCA [17] to $\\mathbf{X}$ to compute the eigenvectors as virtual markers for reconstructing others, there is no guarantee that the virtual markers correspond to the mesh vertices, making them difficult to be detected from images. Instead, we aim to learn $K$ virtual markers $\\mathbf{Z} = [\\mathbf{z}_1,\\dots,\\mathbf{z}_K]\\in \\mathbb{R}^{3N\\times K}$ that try to satisfy the follow", + "bbox": [ + 75, + 763, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/b639b5ec35efba53e27e609d318a2846cf61c1f6c4540b6f9ec5a58220aa7c2a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeFormulaReconst. Error (mm) ↓
Original||X - XBA||2F11.67
Symmetric||X - XBsymAsym||2F10.98
", + "bbox": [ + 519, + 306, + 875, + 353 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1. The reconstruction errors using the original and the symmetric sets of markers on the H3.6M dataset [15], respectively. The errors are small indicating that they are sufficiently expressive and can reconstruct all vertices accurately.", + "bbox": [ + 498, + 364, + 893, + 419 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ing two requirements to the greatest extent. First, they can accurately reconstruct the intact mesh $\\mathbf{X}$ by their linear combinations: $\\mathbf{X} = \\mathbf{Z}\\mathbf{A}$ , where $\\mathbf{A} \\in \\mathbb{R}^{K \\times M}$ is a coefficient matrix that encodes the spatial relationship between the virtual markers and the mesh vertices. Second, they should have distinguishable visual patterns in images so that they can be easily detected from images. Ideally, they can be on the body surface as the meshes.", + "bbox": [ + 496, + 431, + 893, + 551 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We apply archetypal analysis [4, 12] to learn $\\mathbf{Z}$ by minimizing a reconstruction error with two additional constraints: (1) each vertex $\\mathbf{x}_i$ can be reconstructed by convex combinations of $\\mathbf{Z}$ , and (2) each marker $\\mathbf{z}_i$ should be convex combinations of the mesh vertices $\\mathbf{X}$ :", + "bbox": [ + 496, + 553, + 893, + 628 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\substack {\\boldsymbol {\\alpha} _ {i} \\in \\Delta_ {K} \\text {for} 1 \\leq i \\leq M, \\\\ \\boldsymbol {\\beta} _ {j} \\in \\Delta_ {M} \\text {for} 1 \\leq j \\leq K}} \\| \\mathbf {X} - \\mathbf {X B A} \\| _ {F} ^ {2}, \\tag{1}\n$$\n", + "text_format": "latex", + "bbox": [ + 575, + 641, + 890, + 675 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{A} = [\\pmb{\\alpha}_1, \\dots, \\pmb{\\alpha}_M] \\in \\mathbb{R}^{K \\times M}$ , each $\\pmb{\\alpha}$ resides in the simplex $\\Delta_K \\triangleq \\{\\pmb{\\alpha} \\in \\mathbb{R}^K \\text{ s.t. } \\pmb{\\alpha} \\succeq 0 \\text{ and } ||\\pmb{\\alpha}||_1 = 1\\}$ , and $\\mathbf{B} = [\\beta_1, \\dots, \\beta_K] \\in \\mathbb{R}^{M \\times K}$ , $\\beta_j \\in \\Delta_M$ . We adopt Active-set algorithm [4] to solve objective (1) and obtain the learned virtual markers $\\mathbf{Z} = \\mathbf{X}\\mathbf{B} \\in \\mathbb{R}^{3N \\times K}$ . As shown in [4, 12], the two constraints encourage the virtual markers $\\mathbf{Z}$ to unveil the latent structure among vertices, therefore they learn to be close to the extreme points of the mesh and located on the body surface as much as possible.", + "bbox": [ + 496, + 686, + 893, + 825 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Post-processing. Since human body is left-right symmetric, we adjust $\\mathbf{Z}$ to reflect the property. We first replace each $\\mathbf{z}_i\\in$ $\\mathbf{Z}$ by its nearest vertex on the mesh and obtain $\\widetilde{\\mathbf{Z}}\\in \\mathbb{R}^{3\\times K}$ . This step allows us to compute the left or right counterpart", + "bbox": [ + 496, + 839, + 893, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "536", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/733021ed3ea7358bb09d5c51c395e53373c9f6e4de354a829e9c325bc3dccae1.jpg", + "image_caption": [ + "Figure 3. Overview of our framework. Given an input image $\\mathbf{I}$ , it first estimates the 3D positions $\\hat{\\mathbf{P}}$ of the virtual markers. Then we update the coefficient matrix $\\hat{\\mathbf{A}}$ based on the estimation confidence scores $\\mathbf{C}$ of the virtual markers. Finally, the complete human mesh can be simply recovered by linear multiplication $\\hat{\\mathbf{M}} = \\hat{\\mathbf{P}}\\hat{\\mathbf{A}}$ ." + ], + "image_footnote": [], + "bbox": [ + 125, + 90, + 836, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "of each marker. Then we replace the markers in the right body with the symmetric vertices in the left body and obtain the symmetric markers $\\widetilde{\\mathbf{Z}}^{sym} \\in \\mathbb{R}^{3 \\times K}$ . Finally we update $\\mathbf{B}$ and $\\mathbf{A}$ by minimizing $||\\mathbf{X} - \\mathbf{X}\\widetilde{\\mathbf{B}}^{sym}\\widetilde{\\mathbf{A}}^{sym}||_F^2$ subject to $\\widetilde{\\mathbf{Z}}^{sym} = \\mathbf{X}\\widetilde{\\mathbf{B}}^{sym}$ . More details are elaborated in the supplementary.", + "bbox": [ + 75, + 324, + 470, + 416 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 2 shows the virtual markers learned on the mocap dataset [15] after post-processing. They are similar to the physical markers and approximately outline the body shape which agrees with our expectations. They are roughly evenly distributed on the surface of the body, and some of them are located close to the body keypoints, which have distinguishable visual patterns to be accurately detected. Table 1 shows the reconstruction errors of using original markers $\\mathbf{XB}$ and the symmetric markers $\\widetilde{\\mathbf{XB}}^{sym}$ . Both can reconstruct meshes accurately.", + "bbox": [ + 75, + 416, + 473, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Mesh estimation framework", + "text_level": 1, + "bbox": [ + 76, + 575, + 330, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "On top of the virtual markers, we present a simple yet effective framework for end-to-end 3D human mesh estimation from a single image. As shown in Figure 3, it consists of two branches. The first branch uses a volumetric CNN [45] to estimate the 3D positions $\\hat{\\mathbf{P}}$ of the markers, and the second branch reconstructs the full mesh $\\hat{\\mathbf{M}}$ by predicting a coefficient matrix $\\hat{\\mathbf{A}}$ :", + "bbox": [ + 75, + 598, + 470, + 702 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {M}} = \\hat {\\mathbf {P}} \\hat {\\mathbf {A}}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 703, + 468, + 718 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We will describe the two branches in more detail.", + "bbox": [ + 76, + 724, + 403, + 739 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D marker estimation. We train a neural network to estimate a 3D heatmap $\\hat{\\mathbf{H}} = [\\hat{\\mathbf{H}}_1, \\dots, \\hat{\\mathbf{H}}_K] \\in \\mathbb{R}^{K \\times D \\times H \\times W}$ from an image. The heatmap encodes per-voxel likelihood of each marker. There are $D \\times H \\times W$ voxels in total which are used to discretize the 3D space. The 3D position $\\hat{\\mathbf{P}}_z \\in \\mathbb{R}^3$ of each marker is computed as the center of mass of the corresponding heatmap $\\hat{\\mathbf{H}}_z$ [45] as follows:", + "bbox": [ + 75, + 755, + 470, + 859 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {P}} _ {z} = \\sum_ {d = 1} ^ {D} \\sum_ {h = 1} ^ {H} \\sum_ {w = 1} ^ {W} (d, h, w) \\cdot \\hat {\\mathbf {H}} _ {z} (d, h, w). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 866, + 468, + 904 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The positions of all markers are represented as $\\hat{\\mathbf{P}} = [\\hat{\\mathbf{P}}_1, \\hat{\\mathbf{P}}_2, \\dots, \\hat{\\mathbf{P}}_K]$ .", + "bbox": [ + 498, + 324, + 890, + 354 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Interpolation. Ideally, if we have accurate estimates for all virtual markers $\\hat{\\mathbf{P}}$ , then we can recover the complete mesh by simply multiplying $\\hat{\\mathbf{P}}$ with a fixed coefficient matrix $\\tilde{\\mathbf{A}}^{sym}$ with sufficient accuracy as validated in Table 1. However, in practice, some markers may have large estimation errors because they may be occluded in the monocular setting. Note that this happens frequently. For example, the markers in the back will be occluded when a person is facing the camera. As a result, inaccurate markers positions may bring large errors to the final mesh if we directly multiply them with the fixed matrix $\\tilde{\\mathbf{A}}^{sym}$ .", + "bbox": [ + 496, + 369, + 893, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our solution is to rely more on those accurately detected markers. To that end, we propose to update the coefficient matrix based on the estimation confidence scores of the markers. In practice, we simply take the heatmap score at the estimated positions of each marker, i.e. $\\hat{\\mathbf{H}}_z(\\hat{\\mathbf{P}}_z)$ , and feed them to a single fully-connected layer to obtain the coefficient matrix $\\hat{\\mathbf{A}}$ . Then the mesh is reconstructed by $\\hat{\\mathbf{M}} = \\hat{\\mathbf{P}}\\hat{\\mathbf{A}}$ .", + "bbox": [ + 496, + 536, + 893, + 655 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Training", + "text_level": 1, + "bbox": [ + 500, + 666, + 604, + 681 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We train the whole network end-to-end in a supervised way. The overall loss function is defined as:", + "bbox": [ + 496, + 690, + 890, + 720 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\lambda_ {v m} \\mathcal {L} _ {v m} + \\lambda_ {c} \\mathcal {L} _ {\\text {c o n f}} + \\lambda_ {m} \\mathcal {L} _ {\\text {m e s h}}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 573, + 731, + 890, + 746 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Virtual marker loss. We define $\\mathcal{L}_{vm}$ as the $L_{1}$ distance between the predicted 3D virtual markers $\\hat{\\mathbf{P}}$ and the GT $\\hat{\\mathbf{P}}^{*}$ as follows:", + "bbox": [ + 496, + 757, + 890, + 800 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {v m} = \\left\\| \\hat {\\mathbf {P}} - \\hat {\\mathbf {P}} ^ {*} \\right\\| _ {1}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 630, + 801, + 890, + 816 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that it is easy to get GT markers $\\hat{\\mathbf{P}}^*$ from GT meshes as stated in Section 3.1 without additional manual annotations.", + "bbox": [ + 496, + 824, + 890, + 854 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Confidence loss. We also require that the 3D heatmaps have reasonable shapes, therefore, the heatmap score at the", + "bbox": [ + 496, + 869, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "537", + "bbox": [ + 485, + 944, + 509, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "voxel containing the GT marker position $\\hat{\\mathbf{P}}_z^*$ should have the maximum value as in the previous work [16]:", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {c o n f}} = - \\sum_ {z = 1} ^ {K} \\log \\left(\\hat {\\mathbf {H}} _ {z} \\left(\\hat {\\mathbf {P}} _ {z} ^ {*}\\right)\\right). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 128, + 468, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Mesh loss. Following [38], we define $\\mathcal{L}_{mesh}$ as a weighted sum of four losses:", + "bbox": [ + 76, + 172, + 468, + 202 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {m e s h}} = \\mathcal {L} _ {\\text {v e r t e x}} + \\mathcal {L} _ {\\text {p o s e}} + \\mathcal {L} _ {\\text {n o r m a l}} + \\lambda_ {e} \\mathcal {L} _ {\\text {e d g e}}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 212, + 468, + 229 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Vertex coordinate loss. We adopt $L_{1}$ loss between predicted 3D mesh coordinates $\\hat{\\mathbf{M}}$ with GT mesh $\\hat{\\mathbf{M}}^{*}$ as:", + "bbox": [ + 91, + 238, + 468, + 281 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {v e r t e x}} = \\left\\| \\hat {\\mathbf {M}} - \\hat {\\mathbf {M}} ^ {*} \\right\\| _ {1}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 282, + 468, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Pose loss. We use $L_{1}$ loss between the 3D landmark joints regressed from mesh $\\hat{\\mathbf{M}}\\mathcal{I}$ and the GT joints $\\hat{\\mathbf{J}}^{*}$ as:", + "bbox": [ + 91, + 306, + 468, + 349 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {p o s e}} = \\left\\| \\hat {\\mathbf {M}} \\mathcal {J} - \\hat {\\mathbf {J}} ^ {*} \\right\\| _ {1}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 214, + 349, + 468, + 366 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{J} \\in \\mathbb{R}^{M \\times J}$ is a pre-defined joint regression matrix in SMPL model [2].", + "bbox": [ + 107, + 369, + 468, + 401 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Surface losses. To improve surface smoothness [54], we supervise the normal vector of a triangle face with GT normal vectors by $\\mathcal{L}_{\\text{normal}}$ and the edge length of the predicted mesh with GT length by $\\mathcal{L}_{\\text{edge}}$ :", + "bbox": [ + 91, + 409, + 468, + 470 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {n o r m a l} = \\sum_ {f} \\sum_ {\\{i, j \\} \\subset f} \\left| \\left\\langle \\frac {\\hat {\\mathbf {M}} _ {i} - \\hat {\\mathbf {M}} _ {j}}{\\| \\hat {\\mathbf {M}} _ {i} - \\hat {\\mathbf {M}} _ {j} \\| _ {2}}, \\hat {\\mathbf {n}} _ {f} ^ {*} \\right\\rangle \\right|, \\\\ \\mathcal {L} _ {\\text {e d g e}} = \\sum_ {f} \\sum_ {\\{i, j \\} \\subset f} \\left| \\| \\hat {\\mathbf {M}} _ {i} - \\hat {\\mathbf {M}} _ {j} \\| _ {2} - \\| \\hat {\\mathbf {M}} _ {i} ^ {*} - \\hat {\\mathbf {M}} _ {j} ^ {*} \\| _ {2} \\right|. \\tag {10} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 119, + 477, + 468, + 563 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $f$ and $\\hat{\\mathbf{n}}_f^*$ denote a triangle face in the mesh and its GT unit normal vector, respectively. $\\hat{\\mathbf{M}}_i$ denote the $i^{th}$ vertex of $\\hat{\\mathbf{M}}$ . * denotes GT.", + "bbox": [ + 107, + 564, + 468, + 612 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 626, + 209, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Datasets and metrics", + "text_level": 1, + "bbox": [ + 76, + 650, + 272, + 666 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "H3.6M [15]. We use (S1, S5, S6, S7, S8) for training and (S9, S11) for testing. As in [7, 18, 31, 32], we report MPJPE and PA-MPJPE for poses that are derived from the estimated meshes. We also report Mean Per Vertex Error (MPVE) for the whole mesh.", + "bbox": [ + 75, + 672, + 468, + 748 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3DPW [52] is collected in natural scenes. Following the previous works [23, 31, 32, 59], we use the train set of 3DPW to learn the model and evaluate on the test set. The same evaluation metrics as H3.6M are used.", + "bbox": [ + 75, + 763, + 468, + 824 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "SURREAL [51] is a large-scale synthetic dataset with GT SMPL annotations and has diverse samples in terms of body shapes, backgrounds, etc. We use its training set to train a model and evaluate the test split following [7].", + "bbox": [ + 75, + 839, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 90, + 715, + 106 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We learn 64 virtual markers on the H3.6M [15] training set. We use the same set of markers for all datasets instead of learning a separate set on each dataset. Following [7, 18, 22, 25, 31, 32, 38, 59], we conduct mix-training by using MPI-INF-3DHP [37], UP-3D [27], and COCO [33] training set for experiments on the H3.6M and 3DPW datasets. We adapt a 3D pose estimator [45] with HRNet-W48 [44] as the image feature backbone for estimating the 3D virtual markers. We set the number of voxels in each dimension to be 64, i.e. $D = H = W = 64$ for 3D heatmaps. Following [18, 25, 38], we crop every single human region from the input image and resize it to $256 \\times 256$ . We use Adam [21] optimizer to train the whole framework for 40 epochs with a batch size of 32. The learning rates for the two branches are set to $5 \\times 10^{-4}$ and $1 \\times 10^{-3}$ , respectively, which are decreased by half after the $30^{th}$ epoch. Please refer to the supplementary for more details.", + "bbox": [ + 496, + 117, + 893, + 375 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Comparison to the State-of-the-arts", + "text_level": 1, + "bbox": [ + 498, + 396, + 808, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Results on H3.6M. Table 2 compares our approach to the state-of-the-art methods on the H3.6M dataset. Our method achieves competitive or superior performance. In particular, it outperforms the methods that use skeletons (Pose2Mesh [7], DSD-SATN [47]), body markers (THUNDR) [59], or IUV image [60, 62] as proxy representations, demonstrating the effectiveness of the virtual marker representation.", + "bbox": [ + 496, + 422, + 893, + 545 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Results on 3DPW. We compare our method to the state-of-the-art methods on the 3DPW dataset in Table 2. Our approach achieves state-of-the-art results among all the methods, validating the advantages of the virtual marker representation over the skeleton representation used in Pose2Mesh [7], DSD-SATN [47], and other representations like IUV image used in PyMAF [62]. In particular, our approach outperforms I2L-MeshNet [38], METRO [31], and Mesh Graphormer [32] by a notable margin, which suggests that virtual markers are more suitable and effective representations than detecting all vertices directly as most of them are not discriminative enough to be accurately detected.", + "bbox": [ + 496, + 564, + 893, + 746 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Results on SURREAL. This dataset has more diverse samples in terms of body shapes. The results are shown in Table 3. Our approach outperforms the state-of-the-art methods by a notable margin, especially in terms of MPVE. Figure 1 shows some challenging cases without cherry-picking. The skeleton representation loses the body shape information so the method [7] can only recover mean shapes. In contrast, our approach generates much more accurate mesh estimation results.", + "bbox": [ + 496, + 763, + 893, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "538", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/3e60115631d9a9b87c93f799eb428171e6ffd71035da3ca6ea4baa6c3e3e164a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodIntermediate RepresentationH3.6M3DPW
MPVE↓MPJPE↓PA-MPJPE↓MPVE↓MPJPE↓PA-MPJPE↓
† Arnab et al. [1] CVPR'192D skeleton-77.854.3--72.2
† HMMR [19] CVPR'19---56.9139.3116.572.6
† DSD-SATN [47] ICCV'193D skeleton-59.142.4--69.5
† VIBE [22] CVPR'20--65.941.599.182.951.9
† TCMR [6] CVPR'21--62.341.1102.986.552.7
† MAED [53] ICCV'213D skeleton-56.338.792.679.145.7
SMPLify [2] ECCV'162D skeleton--82.3---
HMR [18] CVPR'18-96.188.056.8152.7130.081.3
GraphCMR [25] CVPR'193D vertices--50.1--70.2
SPIN [24] ICCV'19---41.1116.496.959.2
DenseRac [55] ICCV'19IUV image-76.848.0---
DecoMR [60] CVPR'20IUV image-60.639.3---
ExPose [9] ECCV'20-----93.460.7
Pose2Mesh [7] ECCV'203D skeleton85.364.946.3106.388.958.3
I2L-MeshNet [38] ECCV'203D vertices65.155.741.1110.193.257.7
PC-HMR [36] AAAI'213D skeleton---108.687.866.9
HybrIK [28] CVPR'213D skeleton65.754.434.586.574.145.0
METRO [31] CVPR'213D vertices-54.036.788.277.147.9
ROMP [46] ICCV'21----108.391.354.9
Mesh Graphormer [32] ICCV'213D vertices-51.234.587.774.745.6
PARE [23] ICCV'21Segmentation---88.674.546.5
THUNDR [59] ICCV'213D markers-55.039.888.074.851.5
PyMaf [62] ICCV'21IUV image-57.740.5110.192.858.9
ProHMR [26] ICCV'21---41.2--59.8
OCHMR [20] CVPR'222D heatmap---107.189.758.3
3DCrowdNet [8] CVPR'223D skeleton---98.381.751.5
CLIFF [30] ECCV'22--47.132.781.269.043.0
FastMETRO [5] ECCV'223D vertices-52.233.784.173.544.6
VisDB [56] ECCV'223D vertices-51.034.585.573.544.9
OursVirtual marker58.047.332.077.967.541.3
", + "bbox": [ + 133, + 87, + 841, + 534 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/b041165650b1125387e2e7fb6be62468adf43889f28768a7475e69a2e07f3514.jpg", + "table_caption": [ + "Table 2. Comparison to the state-of-the-arts on H3.6M [15] and 3DPW [52] datasets. $\\dagger$ means using temporal cues. The methods are not strictly comparable because they may have different backbones and training datasets. We provide the numbers only to show proof-of-concept results." + ], + "table_footnote": [], + "table_body": "
MethodIntermediate RepresentationMPVE↓MPJPE↓PA-MPJPE↓
HMR [18] CVPR'18-85.173.655.4
BodyNet [50] ECCV'18Skel. + Seg.65.8--
GraphCMR [25] CVPR'193D vertices103.287.463.2
SPIN [24] ICCV'19-82.366.743.7
DecoMR [60] CVPR'20IUV image68.952.043.0
Pose2Mesh [7] ECCV'203D skeleton68.856.639.6
PC-HMR [36] AAAI'213D skeleton59.851.737.9
* DynaBOA [13] TPAMI'22-70.755.234.0
OursVirtual marker44.736.928.9
", + "bbox": [ + 81, + 595, + 480, + 750 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Ablation study", + "text_level": 1, + "bbox": [ + 76, + 816, + 227, + 832 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Virtual marker representation. We compare our method to two baselines in Table 4. First, in baseline (a), we replace the virtual markers of our method with the skeleton representation. The rest are kept the same as ours (c). Our", + "bbox": [ + 75, + 839, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/7de4e7354de823738a5b974fb63cdbe563832aeb963f7f14daa4c5589fb52edd.jpg", + "table_caption": [ + "Table 3. Comparison to the state-of-the-arts on SURREAL [51] dataset. * means training on the test split with 2D supervisions. \"Skel. + Seg.\" means using skeleton and segmentation together." + ], + "table_footnote": [], + "table_body": "
No.Intermediate RepresentationMPVE↓
H3.6MSURREAL
(a)Skeleton64.453.6
(b)Rand virtual marker63.050.1
(c)Virtual marker58.044.7
", + "bbox": [ + 532, + 595, + 862, + 667 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4. Ablation study of the virtual marker representation for our approach on H3.6M and SURREAL datasets. \"Skeleton\" means the sparse landmark joint representation is used. \"Rand virtual marker\" means the virtual markers are randomly selected from all the vertices without learning. (c) is our method, where the learned virtual markers are used.", + "bbox": [ + 496, + 678, + 890, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "method achieves a much lower MPVE than the baseline (a), demonstrating that the virtual markers help to estimate body shapes more accurately than the skeletons. In baseline (b), we randomly sample 64 from the 6890 mesh vertices as virtual markers. We repeat the experiment five times and report the average number. We can see that the result is worse than ours, which is because the randomly selected vertices may not be expressive to reconstruct the other vertices or can not", + "bbox": [ + 496, + 779, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "539", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d75d2439e70f101e0f0597491573606a5244e0ef6442b20606bf112db785d5f7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 86, + 94, + 163, + 155 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bf585a9744263c3b1f8ef02f1b7b9305bd4896c825be18a1c6af970551c93ab4.jpg", + "image_caption": [ + "Image" + ], + "image_footnote": [], + "bbox": [ + 86, + 159, + 163, + 220 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/19daadfeebe7f76537c8a3a3c8981aa125ceb53693b9da1fd227c199c98eff81.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 169, + 95, + 251, + 155 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1a19c634fcd5bca6dec6a8569b592597c50cd1a6d2d511187f43e8d79d844853.jpg", + "image_caption": [ + "Pose2Mesh" + ], + "image_footnote": [], + "bbox": [ + 169, + 157, + 250, + 220 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dee4dae334b9c78aa64931c0fc114c493247ad4ebeb923c379d47b11a00fa1d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 256, + 95, + 292, + 155 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a2e01121534df4ea145f12232adcfe1c366ed57133f647ac77f9b15ecea858f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 256, + 157, + 295, + 220 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/514d2b4ec42e40e039e3a34b8bfde4f4a522a242b99341d2e424280812502d09.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 302, + 95, + 377, + 155 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/32d455f6b7f7ced9a2dabed08523e3dba17fc81385ca7145ba610be177ed1d76.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 302, + 157, + 377, + 220 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/740c31dda70dd2dfc4f69814bcdf139af247eef60dcd3014a76169cc041e0de6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 383, + 95, + 423, + 154 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9228de83b9f0fa68f5c48466135bfaa09c81e68d3d48d03ad9e480eb2a5b2417.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 383, + 157, + 423, + 220 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/48a9c41dc89b424c693fad759f6285598f6ace7d6fbd08d7deeb2ee89383e1bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 441, + 94, + 460, + 152 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8281acdc46bbc40959de252d6c15a2ecfc0325139a723c9c13a160d5bf5716fa.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 441, + 157, + 460, + 220 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0d1fcf6f789f2e3f61fa2d4631812b595c7c583ebc42d6c5c0d38c6f884f0451.jpg", + "image_caption": [ + "Figure 4. Mesh estimation results of different methods on H3.6M test set. Our method with virtual marker representation gets better shape estimation results than Pose2Mesh which uses skeleton representation. Note the waistline of the body and the thickness of the arm." + ], + "image_footnote": [], + "bbox": [ + 96, + 324, + 200, + 406 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fba7ae4d1224de8fd8e9c29591ea274341b5f73ae951ad2b036a0bc75ac30c1f.jpg", + "image_caption": [ + "Figure 5. Visualization of the learned virtual markers of different numbers of $K = 16, 32, 96$ , from left to right, respectively." + ], + "image_footnote": [], + "bbox": [ + 223, + 325, + 333, + 409 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/24d97febde435c7046067f49afbce89a10d520d7f36e8496eb2346e7e89ca6f5.jpg", + "image_caption": [ + "Figure 6. Mesh estimation comparison results when using (a) fixed coefficient matrix $\\tilde{\\mathbf{A}}^{sym}$ , and (b) updated $\\hat{\\mathbf{A}}$ . Please zoom in to better see the details." + ], + "image_footnote": [], + "bbox": [ + 348, + 325, + 457, + 407 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "be accurately detected from images as they lack distinguishable visual patterns. The results validate the effectiveness of our learning strategy.", + "bbox": [ + 75, + 482, + 470, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 1 shows some qualitative results on the SURREAL test set. The meshes estimated by the baseline which uses skeleton representation, i.e. Pose2Mesh [7], have inaccurate body shapes. This is reasonable because the skeleton is oversimplified and has very limited capability to recover shapes. Instead, it implicitly learns a mean shape for the whole training dataset. In contrast, the mesh estimated by using virtual markers has much better quality due to its strong representation power and therefore can handle different body shapes elegantly. Figure 4 also shows some qualitative results on the H3.6M test set. For clarity, we draw the intermediate representation (blue balls) in it as well.", + "bbox": [ + 75, + 529, + 470, + 710 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Number of virtual markers. We evaluate how the number of virtual markers affects estimation quality on H3.6M [15] dataset. Figure 5 visualizes the learned virtual markers, which are all located on the body surface and close to the extreme points of the mesh. This is expected as mentioned in Section 3.1. Table 5 (GT) shows the mesh reconstruction results when we have GT 3D positions of the virtual markers in objective (1). When we increase the number of virtual markers, both mesh reconstruction error (MPVE) and the regressed landmark joint error (MPJPE) steadily decrease. This is expected because using more virtual markers improves the representation power. However, using more", + "bbox": [ + 75, + 719, + 472, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/56c79e16e81e45c7e6d1d56026c8e17d0cd746b6c6758f569f49e072d1060fc0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
KGTDet
MPVE↓MPJPE↓MPVE↓MPJPE↓
1646.839.858.747.8
3220.114.258.248.3
6411.07.558.047.3
969.95.659.648.2
", + "bbox": [ + 519, + 88, + 875, + 170 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Ablation study of the different number of virtual markers $(K)$ on H3.6M [15] dataset. (GT) Mesh reconstruction results when GT 3D positions of the virtual markers are used in objective (1). (Det) Mesh estimation results obtained by our proposed framework when we use different numbers of virtual markers $(K)$ .", + "bbox": [ + 498, + 180, + 893, + 250 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8b6485047caa0e196514516f49ae19921554e134ca1a6a5c0473633923679ae5.jpg", + "image_caption": [ + "Input Image" + ], + "image_footnote": [], + "bbox": [ + 503, + 266, + 602, + 348 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ef7044907723ac23cdf036a13bc811d5915811b698b4fe28fceb6378677a97f8.jpg", + "image_caption": [ + "(a) Using fixed coefficient matrix" + ], + "image_footnote": [], + "bbox": [ + 617, + 266, + 754, + 348 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2103297200699a6c36f2488db301fee592207ba6bd91cd061bd3179919384d4a.jpg", + "image_caption": [ + "(b) Using updated coefficient matrix" + ], + "image_footnote": [], + "bbox": [ + 763, + 267, + 897, + 348 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "virtual markers cannot guarantee smaller estimation errors when we need to estimate the virtual marker positions from images as in our method. This is because the additional virtual markers may have large estimation errors which affect the mesh estimation result. The results are shown in Table 5 (Det). Increasing the number of virtual markers $K$ steadily reduces the MPVE errors when $K$ is smaller than 96. However, if we keep increasing $K$ , the error begins to increase. This is mainly because some of the newly introduced virtual markers are difficult to detect from images and therefore bring errors to mesh estimation.", + "bbox": [ + 496, + 452, + 893, + 618 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Coefficient matrix. We compare our method to a baseline which uses the fixed coefficient matrix $\\widetilde{\\mathbf{A}}^{sym}$ . We show the quality comparison in Figure 6. We can see that the estimated mesh by a fixed coefficient matrix (a) has mostly correct pose and shape but there are also some artifacts on the mesh while using the updated coefficient matrix (b) can get better mesh estimation results. As shown in Table 6, using a fixed coefficient matrix gets larger MPVE and MPJPE errors than using the updated coefficient matrix. This is caused by the estimation errors of virtual markers when occlusion happens, which is inevitable since the virtual markers on the back will be self-occluded by the front body. As a result, inaccurate marker positions would bring large errors to the final mesh estimates if we directly use the fixed matrix.", + "bbox": [ + 496, + 625, + 893, + 835 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Qualitative Results", + "text_level": 1, + "bbox": [ + 500, + 845, + 684, + 863 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 7 (top) presents some meshes estimated by our approach on natural images from the 3DPW test set. The", + "bbox": [ + 500, + 869, + 893, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "540", + "bbox": [ + 485, + 945, + 511, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1466ae8ff36d2cb4d89cf6d32c584932755aa511355777bf8a4c9ac1680f2ec3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 91, + 103, + 171, + 189 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c09ecdf5e66aeac82f858beefb5178321d062416a75d2827b29e4e993cebabbd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 174, + 103, + 254, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b347b4823adb72f1beb89e53b693ee0f13a218c96f5a781942ec2f82522704de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 259, + 103, + 328, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/671c4765f76e5082ec0a322174d690ced937e8c119ae69e2ba70eeb4e1af914a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 103, + 403, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/91221dcf2a8def63b76923650cc63a8b12304b472cd57d79ba610a8f2dfea940.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 408, + 103, + 480, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/46828dff4792f15e0d3078870b51e27a8d808a785bca38b4597abc521593d694.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 103, + 558, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0c363e8fa3f8e66654e0a9cca2f9b642d653bd3bbb3da1fa6ef432fad29dd5e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 563, + 103, + 627, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/53bf9295864d34d05d4c8e0ecae174a83cede494dba0449491e2723a0e105885.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 632, + 103, + 697, + 188 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7d9b766b435982a945e5a0a41cb93a99292b6733c7a9709752a1cdd802da3505.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 712, + 95, + 880, + 191 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/14c41b8498a35d95bb93c324efee4de6fa93d65e487b12f84b40d8e84a0eb762.jpg", + "image_caption": [ + "Figure 7. Top: Meshes estimated by our approach on images from 3DPW test set. The rightmost case in the dashed box shows a typical failure. Bottom: Meshes estimated by our approach on Internet images with challenging cases (extreme shapes or in a long dress)." + ], + "image_footnote": [], + "bbox": [ + 91, + 194, + 171, + 279 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dd7af12bc3988e0f0949f3dbd6a4a20820bb0cdabd3fc01e999491084d64a442.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 174, + 194, + 254, + 279 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3e480c67116134bf80778ea845b63acd5634c368cddaab6692f4f964b863dbca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 259, + 194, + 328, + 279 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d94ebada5ba03a75357488123772aab188e2b33f39c5e5ace79f1a7506558026.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 333, + 194, + 403, + 279 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9e3043cdf52b4064082f78724e98235bea1d6dc37d8371b1b1d738120157d0aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 410, + 194, + 482, + 279 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/eeb1139646c7446dfe1947b26b0799e68b409a8a66aa334b64ae2c55eb1e3cba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 194, + 560, + 277 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f404861c5961e317b732009e6f4db114f4125dde743a31ffccbdecbbfb0507a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 563, + 194, + 627, + 277 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8184ce0f5f56930eb841d6ef7ab4c10c460ee662953ed1b388455cf4ef0b2132.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 633, + 194, + 697, + 277 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c554d8af59e261ebd3a6ab99776dc49058d643a3dd257d189b66367e7ba57236.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 700, + 194, + 787, + 277 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d52d300af7ba7d41ce034ecd3811ccd8d0c76d3f0ab129dd6fec70a9be375d7e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 790, + 194, + 879, + 279 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/4a69f90cd761277b0f642a517ccd028f738e33b2d6525c661b9831b59650d747.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
No.MethodFixed AsymUpdated AMPVE↓MPJPE↓
(a)Ours (fixed)64.751.6
(b)Ours58.047.3
", + "bbox": [ + 96, + 333, + 452, + 385 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 6. Ablation study of the coefficient matrix for our approach on H3.6M dataset. \"fixed\" means using the fixed coefficient matrix $\\tilde{\\mathbf{A}}^{sym}$ to reconstruct the mesh.", + "bbox": [ + 76, + 395, + 468, + 436 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "rightmost case shows a typical failure where our method has a wrong pose estimate of the left leg due to heavy occlusion. We can see that the failure is constrained to the local region and the rest of the body still gets accurate estimates. We further analyze how inaccurate virtual markers would affect the mesh estimation, i.e. when part of human body is occluded or truncated. According to the finally learned coefficient matrix $\\hat{\\mathbf{A}}$ of our model, we highlight the relationship weights among virtual markers and all vertices in Figure 8. We can see that our model actually learns local and sparse dependency between each vertex and the virtual markers, e.g. for each vertex, the virtual markers that contribute the most are in a near range as shown in Figure 8 (b). Therefore, in inference, if a virtual marker has inaccurate position estimation due to occlusion or truncation, the dependent vertices may have inaccurate estimates, while the rest will be barely affected. Figure 2 (right) shows more examples where occlusion or truncation occurs, and our method can still get accurate or reasonable estimates robustly. Note that when truncation occurs, our method still guesses the positions of the truncated virtual markers.", + "bbox": [ + 75, + 472, + 472, + 789 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 7 (bottom) shows our estimated meshes on challenging cases, which indicates the strong generalization ability of our model on diverse postures and actions in natural scenes. Please refer to the supplementary for more quality results. Note that since the datasets do not provide supervision of head orientation, face expression, hands, or feet, the estimates of these parts are just in canonical poses inevitably.", + "bbox": [ + 75, + 795, + 472, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f77b347f26e1845c88e5275ea6153d190ca64e6431719b2841008da21507bd7f.jpg", + "image_caption": [ + "Figure 8. (a) For each virtual marker (represented by a star), we highlight the top 30 most affected vertices (represented by a colored dot) based on average coefficient matrix $\\hat{\\mathbf{A}}$ . (b) For each vertex (dot), we highlight the top 3 virtual markers (star) that contribute the most. We can see that the dependency has a strong locality which improves the robustness when some virtual markers cannot be accurately detected." + ], + "image_footnote": [], + "bbox": [ + 542, + 337, + 671, + 441 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d9dfe504d7a86215e536183b1b742b4dce25ee797a320c746254582265d0cfa5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 712, + 337, + 846, + 441 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Apart from that, most errors are due to inaccurate 3D virtual marker estimation which may be addressed using more powerful estimators or more diverse training datasets in the future.", + "bbox": [ + 496, + 566, + 893, + 626 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 640, + 617, + 656 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we present a novel intermediate representation Virtual Marker, which is more expressive than the prevailing skeleton representation and more accessible than physical markers. It can reconstruct 3D meshes more accurately and efficiently, especially in handling diverse body shapes. Besides, the coefficient matrix in the virtual marker representation encodes spatial relationships among mesh vertices which allows the method to implicitly explore structure priors of human body. It achieves better mesh estimation results than the state-of-the-art methods and shows advanced generalization potential in spite of its simplicity.", + "bbox": [ + 496, + 666, + 893, + 832 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 500, + 845, + 660, + 862 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported by MOST-2022ZD0114900 and NSFC-62061136001.", + "bbox": [ + 500, + 869, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "541", + "bbox": [ + 485, + 945, + 509, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Anurag Arnab, Carl Doersch, and Andrew Zisserman. Exploiting temporal context for 3d human pose estimation in the wild. In CVPR, pages 3395-3404, 2019.", + "[2] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J Black. Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In ECCV, pages 561-578, 2016.", + "[3] Ronan Boulic, Pascal Becheiraz, Luc Emerging, and Daniel Thalmann. Integration of motion control techniques for virtual human and avatar real-time animation. In Proceedings of the ACM symposium on Virtual reality software and technology, pages 111-118, 1997.", + "[4] Yuansi Chen, Julien Mairal, and Zaid Harchaoui. Fast and robust archetypal analysis for representation learning. In CVPR, pages 1478-1485, 2014.", + "[5] Junhyeong Cho, Kim Youwang, and Tae-Hyun Oh. Cross-attention of disentangled modalities for 3d human mesh recovery with transformers. In ECCV, 2022.", + "[6] Hongsuk Choi, Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. Beyond static features for temporally consistent 3d human pose and shape from a video. In CVPR, pages 1964-1973, 2021.", + "[7] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2mesh: Graph convolutional network for 3d human pose and mesh recovery from a 2d human pose. In ECCV, pages 769-787, 2020.", + "[8] Hongsuk Choi, Gyeongsik Moon, JoonKyu Park, and Kyoung Mu Lee. Learning to estimate robust 3d human mesh from in-the-wild crowded scenes. In CVPR, pages 1475-1484, June 2022.", + "[9] Vasileios Choutas, Georgios Pavlakos, Timo Bolkart, Dimitrios Tzionas, and Michael J Black. Monocular expressive body regression through body-driven attention. In ECCV, pages 20-40, 2020.", + "[10] Hai Ci, Mingdong Wu, Wentao Zhu, Xiaoxuan Ma, Hao Dong, Fangwei Zhong, and Yizhou Wang. Gfpose: Learning 3d human pose prior with gradient fields. arXiv preprint arXiv:2212.08641, 2022.", + "[11] Enric Corona, Gerard Pons-Moll, Guillem Alenyà, and Francesc Moreno-Noguer. Learned vertex descent: a new direction for 3d human model fitting. In ECCV, pages 146-165. Springer, 2022.", + "[12] Adele Cutler and Leo Breiman. Archetypal analysis. Technometrics, 36(4):338-347, 1994.", + "[13] Shanyan Guan, Jingwei Xu, Michelle Z He, Yunbo Wang, Bingbing Ni, and Xiaokang Yang. Out-of-domain human mesh reconstruction via dynamic bilevel online adaptation. IEEE TPAMI, 2022.", + "[14] Yinghao Huang, Federica Bogo, Christoph Lassner, Angjoo Kanazawa, Peter V Gehler, Javier Romero, Ijaz Akhter, and Michael J Black. Towards accurate marker-less human shape and pose estimation over time. In 3DV, pages 421-430, 2017.", + "[15] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE TPAMI, 36(7):1325-1339, 2013." + ], + "bbox": [ + 76, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Karim Iskakov, Egor Burkov, Victor Lempitsky, and Yury Malkov. Learnable triangulation of human pose. In ICCV, pages 7718-7727, 2019.", + "[17] Ian T Jolliffe. Principal components in regression analysis. In Principal component analysis, pages 129-155. Springer, 1986.", + "[18] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In CVPR, pages 7122-7131, 2018.", + "[19] Angjoo Kanazawa, Jason Y Zhang, Panna Felsen, and Jitendra Malik. Learning 3d human dynamics from video. In CVPR, pages 5614-5623, 2019.", + "[20] Rawal Khirodkar, Shashank Tripathi, and Kris Kitani. Occluded human mesh recovery. In CVPR, pages 1715-1725, June 2022.", + "[21] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015.", + "[22] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In CVPR, pages 5253-5263, 2020.", + "[23] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. Pare: Part attention regressor for 3d human body estimation. In ICCV, pages 11127-11137, October 2021.", + "[24] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3d human pose and shape via model-fitting in the loop. In ICCV, pages 2252-2261, 2019.", + "[25] Nikos Kolotouros, Georgios Pavlakos, and Kostas Daniilidis. Convolutional mesh regression for single-image human shape reconstruction. In CVPR, pages 4501-4510, 2019.", + "[26] Nikos Kolotouros, Georgios Pavlakos, Dinesh Jayaraman, and Kostas Daniilidis. Probabilistic modeling for human mesh recovery. In ICCV, pages 11605-11614, October 2021.", + "[27] Christoph Lassner, Javier Romero, Martin Kiefel, Federica Bogo, Michael J Black, and Peter V Gehler. Unite the people: Closing the loop between 3d and 2d human representations. In CVPR, pages 6050-6059, 2017.", + "[28] Jiefeng Li, Chao Xu, Zhicun Chen, Siyuan Bian, Lixin Yang, and Cewu Lu. Hybrik: A hybrid analytical-neural inverse kinematics solution for 3d human pose and shape estimation. In CVPR, pages 3383-3393, 2021.", + "[29] Yong-Lu Li, Liang Xu, Xinpeng Liu, Xijie Huang, Yue Xu, Shiyi Wang, Hao-Shu Fang, Ze Ma, Mingyang Chen, and Cewu Lu. Pastanet: Toward human activity knowledge engine. In CVPR, pages 382-391, 2020.", + "[30] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. Cliff: Carrying location information in full frames into human pose and shape estimation. In ECCV, 2022.", + "[31] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In CVPR, pages 1954-1963, 2021.", + "[32] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In ICCV, pages 12939-12948, 2021.", + "[33] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "542", + "bbox": [ + 486, + 945, + 511, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zitnick. Microsoft coco: Common objects in context. In ECCV, pages 740-755, 2014.", + "[34] Matthew Loper, Naureen Mahmood, and Michael J Black. Mosh: Motion and shape capture from sparse markers. TOG, 33(6):1-13, 2014.", + "[35] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. TOG, 34(6):1-16, 2015.", + "[36] Tianyu Luan, Yali Wang, Junhao Zhang, Zhe Wang, Zhipeng Zhou, and Yu Qiao. Pc-hmr: Pose calibration for 3d human mesh recovery from 2d images/videos. In AAAI, pages 2269-2276, 2021.", + "[37] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 3DV, pages 506-516, 2017.", + "[38] Gyeongsik Moon and Kyoung Mu Lee. I2l-meshnet: Imageto-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In ECCV, pages 752-768, 2020.", + "[39] Mohamed Omran, Christoph Lassner, Gerard Pons-Moll, Peter Gehler, and Bernt Schiele. Neural body fitting: Unifying deep learning and model based human pose and shape estimation. In 3DV, pages 484-494. IEEE, 2018.", + "[40] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3d hands, face, and body from a single image. In CVPR, pages 10975-10985, 2019.", + "[41] Liliana Lo Presti and Marco La Cascia. 3d skeleton-based human action classification: A survey. Pattern Recognition, 53:130-147, 2016.", + "[42] Haibo Qiu, Chunyu Wang, Jingdong Wang, Naiyan Wang, and Wenjun Zeng. Cross view fusion for 3d human pose estimation. In ICCV, pages 4342-4351, 2019.", + "[43] Jiajun Su, Chunyu Wang, Xiaoxuan Ma, Wenjun Zeng, and Yizhou Wang. Virtualpose: Learning generalizable 3d human pose models from virtual data. In ECCV, pages 55-71. Springer, 2022.", + "[44] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In CVPR, pages 5693-5703, 2019.", + "[45] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, pages 529-545, 2018.", + "[46] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J Black, and Tao Mei. Monocular, one-stage, regression of multiple 3d people. In ICCV, pages 11179-11188, 2021.", + "[47] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a skeleton-disentangled representation. In ICCV, pages 5349-5358, 2019.", + "[48] Hanyue Tu, Chunyu Wang, and Wenjun Zeng. Voxelpose: Towards multi-camera 3d human pose estimation in wild environment. In ECCV, pages 197-212. Springer, 2020.", + "[49] Hsiao-Yu Tung, Hsiao-Wei Tung, Ersin Yumer, and Katerina Fragkiadaki. Self-supervised learning of motion capture. In NIPS, volume 30, 2017." + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[50] Gul Varol, Duygu Ceylan, Bryan Russell, Jimei Yang, Ersin Yumer, Ivan Laptev, and Cordelia Schmid. Bodynet: Volumetric inference of 3d human body shapes. In ECCV, pages 20-36, 2018.", + "[51] Gul Varol, Javier Romero, Xavier Martin, Naureen Mahmood, Michael J Black, Ivan Laptev, and Cordelia Schmid. Learning from synthetic humans. In CVPR, pages 109-117, 2017.", + "[52] Timo von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In ECCV, pages 601-617, 2018.", + "[53] Ziniu Wan, Zhengjia Li, Maoqing Tian, Jianbo Liu, Shuai Yi, and Hongsheng Li. Encoder-decoder with multi-level attention for 3d human shape and pose estimation. In ICCV, pages 13033-13042, 2021.", + "[54] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In ECCV, pages 52-67, 2018.", + "[55] Yuanlu Xu, Song-Chun Zhu, and Tony Tung. Denserac: Joint 3d pose and shape estimation by dense render-and-compare. In ICCV, pages 7760-7770, 2019.", + "[56] Chun-Han Yao, Jimei Yang, Duygu Ceylan, Yi Zhou, Yang Zhou, and Ming-Hsuan Yang. Learning visibility for robust dense human body estimation. In ECCV, 2022.", + "[57] Hang Ye, Wentao Zhu, Chunyu Wang, Rujie Wu, and Yizhou Wang. Faster voxelpose: Real-time 3d human pose estimation by orthographic projection. In ECCV, pages 142-159. Springer, 2022.", + "[58] Andrei Zanfir, Elisabeta Marinoiu, and Cristian Sminchisescu. Monocular 3d pose and shape estimation of multiple people in natural scenes-the importance of multiple scene constraints. In CVPR, pages 2148-2157, 2018.", + "[59] Mihai Zanfir, Andrei Zanfir, Eduard Gabriel Bazavan, William T Freeman, Rahul Sukthankar, and Cristian Sminchiescu. Thundr: Transformer-based 3d human reconstruction with markers. In ICCV, pages 12971-12980, 2021.", + "[60] Wang Zeng, Wanli Ouyang, Ping Luo, Wentao Liu, and Xiaogang Wang. 3d human mesh regression with dense correspondence. In CVPR, pages 7054-7063, 2020.", + "[61] Hongwen Zhang, Jie Cao, Guo Lu, Wanli Ouyang, and Zhenan Sun. Learning 3d human shape and pose from dense body parts. IEEE TPAMI, 44(5):2610-2627, 2022.", + "[62] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In ICCV, pages 11446-11456, 2021.", + "[63] Yifu Zhang, Chunyu Wang, Xinggang Wang, Wenyu Liu, and Wenjun Zeng. Voxeltrack: Multi-person 3d human pose estimation and tracking in the wild. IEEE TPAMI, 45(2):2613-2626, 2022." + ], + "bbox": [ + 501, + 92, + 893, + 799 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "543", + "bbox": [ + 486, + 945, + 509, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/3D Human Mesh Estimation From Virtual Markers/067f420e-7fdc-4668-8983-b6715ae47be7_model.json b/2023/3D Human Mesh Estimation From Virtual Markers/067f420e-7fdc-4668-8983-b6715ae47be7_model.json new file mode 100644 index 0000000000000000000000000000000000000000..be9678773e0c9f3ce8170952fd050801cc1d7e4e --- /dev/null +++ b/2023/3D Human Mesh Estimation From Virtual Markers/067f420e-7fdc-4668-8983-b6715ae47be7_model.json @@ -0,0 +1,2706 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.227, + 0.131, + 0.745, + 0.151 + ], + "angle": 0, + "content": "3D Human Mesh Estimation from Virtual Markers" + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.179, + 0.819, + 0.2 + ], + "angle": 0, + "content": "Xiaoxuan Ma\\(^{1}\\) Jiajun Su\\(^{1}\\) Chunyu Wang\\(^{3*}\\) Wentao Zhu\\(^{1}\\) Yizhou Wang\\(^{1,2,4}\\)" + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.211, + 0.849, + 0.293 + ], + "angle": 0, + "content": "\\(^{1}\\) School of Computer Science, Center on Frontiers of Computing Studies, Peking University \n\\(^{2}\\) Inst. for Artificial Intelligence, Peking University \n\\(^{3}\\) Microsoft Research Asia \n\\(^{4}\\) Nat'l Eng. Research Center of Visual Technology" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.304, + 0.819, + 0.318 + ], + "angle": 0, + "content": "{maxiaoxuan, sujiajun, wtzhu, yizhou.wang}@pku.edu.cn, chnuwa@microsoft.com" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.353, + 0.314, + 0.369 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.386, + 0.474, + 0.718 + ], + "angle": 0, + "content": "Inspired by the success of volumetric 3D pose estimation, some recent human mesh estimators propose to estimate 3D skeletons as intermediate representations, from which, the dense 3D meshes are regressed by exploiting the mesh topology. However, body shape information is lost in extracting skeletons, leading to mediocre performance. The advanced motion capture systems solve the problem by placing dense physical markers on the body surface, which allows to extract realistic meshes from their non-rigid motions. However, they cannot be applied to wild images without markers. In this work, we present an intermediate representation, named virtual markers, which learns 64 landmark keypoints on the body surface based on the large-scale mocap data in a generative style, mimicking the effects of physical markers. The virtual markers can be accurately detected from wild images and can reconstruct the intact meshes with realistic shapes by simple interpolation. Our approach outperforms the state-of-the-art methods on three datasets. In particular, it surpasses the existing methods by a notable margin on the SURREAL dataset, which has diverse body shapes. Code is available at https://github.com/ShirleyMaxx/VirtualMarker." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.747, + 0.21, + 0.763 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.772, + 0.471, + 0.879 + ], + "angle": 0, + "content": "3D human mesh estimation aims to estimate the 3D positions of the mesh vertices that are on the body surface. The task has attracted a lot of attention from the computer vision and computer graphics communities [3, 10, 18, 24, 26, 29, 34, 36, 41, 49] because it can benefit many applications such as virtual reality [14]. Recently, the deep learning-based methods [7, 18, 28] have significantly" + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.353, + 0.862, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.696, + 0.896, + 0.752 + ], + "angle": 0, + "content": "Figure 1. Mesh estimation results on four examples with different body shapes. Pose2Mesh [7] which uses 3D skeletons as the intermediate representation fails to predict accurate shapes. Our virtual marker-based method obtains accurate estimates." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.764, + 0.833, + 0.779 + ], + "angle": 0, + "content": "advanced the accuracy on the benchmark datasets." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.781, + 0.896, + 0.901 + ], + "angle": 0, + "content": "The pioneer methods [18,49] propose to regress the pose and shape parameters of the mesh models such as SMPL [35] directly from images. While straightforward, their accuracy is usually lower than the state-of-the-arts. The first reason is that the mapping from the image features to the model parameters is highly non-linear and suffers from image-model misalignment [28]. Besides, existing mesh datasets [15,27,37,52] are small and limited to simple labo" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.888, + 0.221, + 0.9 + ], + "angle": 0, + "content": "*Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "534" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.471, + 0.135 + ], + "angle": 0, + "content": "ratory environments due to the complex capturing process. The lack of sufficient training data severely limits its performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.14, + 0.471, + 0.456 + ], + "angle": 0, + "content": "Recently, some works [25, 38] begin to formulate mesh estimation as a dense 3D keypoint detection task inspired by the success of volumetric pose estimation [42, 43, 45, 48, 57, 63]. For example, in [25, 38], the authors propose to regress the 3D positions of all vertices. However, it is computationally expensive because it has more than several thousand vertices. Moon and Lee [38] improve the efficiency by decomposing the 3D heatmaps into multiple 1D heatmaps at the cost of mediocre accuracy. Choi et al. [7] propose to first detect a sparser set of skeleton joints in the images, from which the dense 3D meshes are regressed by exploiting the mesh topology. The methods along this direction have attracted increasing attention [7, 28, 53] due to two reasons. First, the proxy task of 3D skeleton estimation can leverage the abundant 2D pose datasets which notably improves the accuracy. Second, mesh regression from the skeletons is efficient. However, important information about the body shapes is lost in extracting the 3D skeletons, which is largely overlooked previously. As a result, different types of body shapes, such as lean or obese, cannot be accurately estimated (see Figure 1)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.46, + 0.471, + 0.717 + ], + "angle": 0, + "content": "The professional marker-based motion capture (mocap) method MoSh [34] places physical markers on the body surface and explore their subtle non-rigid motions to extract meshes with accurate shapes. However, the physical markers limit the approach to be used in laboratory environments. We are inspired to think whether we can identify a set of landmarks on the mesh as virtual markers, e.g., elbow and wrist, that can be detected from wild images, and allow to recover accurate body shapes? The desired virtual markers should satisfy several requirements. First, the number of markers should be much smaller than that of the mesh vertices so that we can use volumetric representations to efficiently estimate their 3D positions. Second, the markers should capture the mesh topology so that the intact mesh can be accurately regressed from them. Third, the virtual markers have distinguishable visual patterns so that they can be detected from images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.471, + 0.901 + ], + "angle": 0, + "content": "In this work, we present a learning algorithm based on archetypal analysis [12] to identify a subset of mesh vertices as the virtual markers that try to satisfy the above requirements to the best extent. Figure 2 shows that the learned virtual markers coarsely outline the body shape and pose which paves the way for estimating meshes with accurate shapes. Then we present a simple framework for 3D mesh estimation on top of the representation as shown in Figure 3. It first learns a 3D keypoint estimation network based on [45] to detect the 3D positions of the virtual markers. Then we recover the intact mesh simply by interpolating them. The interpolation weights are pre-trained in the representation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.136 + ], + "angle": 0, + "content": "learning step and will be adjusted by a light network based on the prediction confidences of the virtual markers for each image." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.137, + 0.895, + 0.273 + ], + "angle": 0, + "content": "We extensively evaluate our approach on three benchmark datasets. It consistently outperforms the state-of-the-art methods on all of them. In particular, it achieves a significant gain on the SURREAL dataset [51] which has a variety of body shapes. Our ablation study also validates the advantages of the virtual marker representation in terms of recovering accurate shapes. Finally, the method shows decent generalization ability and generates visually appealing results for the wild images." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.289, + 0.637, + 0.305 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.314, + 0.816, + 0.33 + ], + "angle": 0, + "content": "2.1. Optimization-based mesh estimation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.337, + 0.895, + 0.563 + ], + "angle": 0, + "content": "Before deep learning dominates this field, 3D human mesh estimation [2, 27, 34, 40, 58] is mainly optimization-based, which optimizes the parameters of the human mesh models to match the observations. For example, Loper et al. [34] propose MoSh that optimizes the SMPL parameters to align the mesh with the 3D marker positions. It is usually used to get GT 3D meshes for benchmark datasets because of its high accuracy. Later works propose to optimize the model parameters or mesh vertices based on 2D image cues [2, 11, 27, 40, 58]. They extract intermediate representations such as 2D skeletons from the images and optimize the mesh model by minimizing the discrepancy between the model projection and the intermediate representations such as the 2D skeletons. These methods are usually sensitive to initialization and suffer from local optimum." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.575, + 0.785, + 0.59 + ], + "angle": 0, + "content": "2.2. Learning-based mesh estimation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.599, + 0.895, + 0.719 + ], + "angle": 0, + "content": "Recently, most works follow the learning-based framework and have achieved promising results. Deep networks [18, 24, 26, 36, 49] are used to regress the SMPL parameters from image features. However, learning the mapping from the image space to the parameter space is highly nonlinear [38]. In addition, they suffer from the misalignment between the meshes and image pixels [60]. These problems make it difficult to learn an accurate yet generalizable model." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Some works propose to introduce proxy tasks to get intermediate representations first, hoping to alleviate the learning difficulty. In particular, intermediate representations of physical markers [59], IUV images [55,60-62], body part segmentation masks [23,27,39,50] and body skeletons [7,28,47,53] have been proposed. In particular, THUNDR [59] first estimates the 3D locations of physical markers from images and then reconstructs the mesh from the 3D markers. The physical markers can be interpreted as a simplified representation of body shape and pose. Although it is very accurate, it cannot be applied to wild images without markers. In contrast, body skeleton is a popular human representation" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "535" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.12, + 0.089, + 0.48, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.484, + 0.089, + 0.852, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.243, + 0.895, + 0.3 + ], + "angle": 0, + "content": "Figure 2. Left: The learned virtual markers (blue balls) in the back and front views. The grey balls mean they are invisible in the front view. The virtual markers act similarly to physical body markers and approximately outline the body shape. Right: Mesh estimation results by our approach, from left to right are input image, estimated 3D mesh overlayed on the image, and three different viewpoints showing the estimated 3D mesh with our intermediate predicted virtual markers (blue balls), respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.311, + 0.471, + 0.385 + ], + "angle": 0, + "content": "that can be robustly detected from wild images. Choi et al. [7] propose to first estimate the 3D skeletons, and then estimate the intact mesh from them. However, accurate body shapes are difficult to be recovered from the oversimplified 3D skeletons." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.386, + 0.471, + 0.493 + ], + "angle": 0, + "content": "Our work belongs to the learning-based class and is related to works that use physical markers or skeletons as intermediate representations. But different from them, we propose a novel intermediate representation, named virtual markers, which is more expressive to reduce the ambiguity in pose and shape estimation than body skeletons and can be applied to wild images." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.506, + 0.169, + 0.521 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.531, + 0.47, + 0.622 + ], + "angle": 0, + "content": "In this section, we describe the details of our approach. First, Section 3.1 introduces how we learn the virtual marker representation from mocap data. Then we present the overall framework for mesh estimation from an image in Section 3.2. At last, Section 3.3 discusses the loss functions and training details." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.632, + 0.379, + 0.648 + ], + "angle": 0, + "content": "3.1. The virtual marker representation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.655, + 0.47, + 0.765 + ], + "angle": 0, + "content": "We represent a mesh by a vector of vertex positions \\(\\mathbf{x} \\in \\mathbb{R}^{3M}\\) where \\(M\\) is the number of mesh vertices. Denote a mocap dataset such as [15] with \\(N\\) meshes as \\(\\widehat{\\mathbf{X}} = [\\mathbf{x}_1, \\dots, \\mathbf{x}_N] \\in \\mathbb{R}^{3M \\times N}\\). To unveil the latent structure among vertices, we reshape it to \\(\\mathbf{X} \\in \\mathbb{R}^{3N \\times M}\\) with each column \\(\\mathbf{x}_i \\in \\mathbb{R}^{3N}\\) representing all possible positions of the \\(i^{\\text{th}}\\) vertex in the dataset [15]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.471, + 0.902 + ], + "angle": 0, + "content": "The rank of \\(\\mathbf{X}\\) is smaller than \\(M\\) because the mesh representation is smooth and redundant where some vertices can be accurately reconstructed by the others. While it seems natural to apply PCA [17] to \\(\\mathbf{X}\\) to compute the eigenvectors as virtual markers for reconstructing others, there is no guarantee that the virtual markers correspond to the mesh vertices, making them difficult to be detected from images. Instead, we aim to learn \\(K\\) virtual markers \\(\\mathbf{Z} = [\\mathbf{z}_1,\\dots,\\mathbf{z}_K]\\in \\mathbb{R}^{3N\\times K}\\) that try to satisfy the follow" + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.308, + 0.876, + 0.354 + ], + "angle": 0, + "content": "
TypeFormulaReconst. Error (mm) ↓
Original||X - XBA||2F11.67
Symmetric||X - XBsymAsym||2F10.98
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.365, + 0.894, + 0.42 + ], + "angle": 0, + "content": "Table 1. The reconstruction errors using the original and the symmetric sets of markers on the H3.6M dataset [15], respectively. The errors are small indicating that they are sufficiently expressive and can reconstruct all vertices accurately." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.433, + 0.894, + 0.553 + ], + "angle": 0, + "content": "ing two requirements to the greatest extent. First, they can accurately reconstruct the intact mesh \\(\\mathbf{X}\\) by their linear combinations: \\(\\mathbf{X} = \\mathbf{Z}\\mathbf{A}\\), where \\(\\mathbf{A} \\in \\mathbb{R}^{K \\times M}\\) is a coefficient matrix that encodes the spatial relationship between the virtual markers and the mesh vertices. Second, they should have distinguishable visual patterns in images so that they can be easily detected from images. Ideally, they can be on the body surface as the meshes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.894, + 0.629 + ], + "angle": 0, + "content": "We apply archetypal analysis [4, 12] to learn \\(\\mathbf{Z}\\) by minimizing a reconstruction error with two additional constraints: (1) each vertex \\(\\mathbf{x}_i\\) can be reconstructed by convex combinations of \\(\\mathbf{Z}\\), and (2) each marker \\(\\mathbf{z}_i\\) should be convex combinations of the mesh vertices \\(\\mathbf{X}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.576, + 0.642, + 0.892, + 0.676 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\substack {\\boldsymbol {\\alpha} _ {i} \\in \\Delta_ {K} \\text {for} 1 \\leq i \\leq M, \\\\ \\boldsymbol {\\beta} _ {j} \\in \\Delta_ {M} \\text {for} 1 \\leq j \\leq K}} \\| \\mathbf {X} - \\mathbf {X B A} \\| _ {F} ^ {2}, \\tag{1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.688, + 0.894, + 0.826 + ], + "angle": 0, + "content": "where \\(\\mathbf{A} = [\\pmb{\\alpha}_1, \\dots, \\pmb{\\alpha}_M] \\in \\mathbb{R}^{K \\times M}\\), each \\(\\pmb{\\alpha}\\) resides in the simplex \\(\\Delta_K \\triangleq \\{\\pmb{\\alpha} \\in \\mathbb{R}^K \\text{ s.t. } \\pmb{\\alpha} \\succeq 0 \\text{ and } ||\\pmb{\\alpha}||_1 = 1\\}\\), and \\(\\mathbf{B} = [\\beta_1, \\dots, \\beta_K] \\in \\mathbb{R}^{M \\times K}\\), \\(\\beta_j \\in \\Delta_M\\). We adopt Active-set algorithm [4] to solve objective (1) and obtain the learned virtual markers \\(\\mathbf{Z} = \\mathbf{X}\\mathbf{B} \\in \\mathbb{R}^{3N \\times K}\\). As shown in [4, 12], the two constraints encourage the virtual markers \\(\\mathbf{Z}\\) to unveil the latent structure among vertices, therefore they learn to be close to the extreme points of the mesh and located on the body surface as much as possible." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.902 + ], + "angle": 0, + "content": "Post-processing. Since human body is left-right symmetric, we adjust \\(\\mathbf{Z}\\) to reflect the property. We first replace each \\(\\mathbf{z}_i\\in\\) \\(\\mathbf{Z}\\) by its nearest vertex on the mesh and obtain \\(\\widetilde{\\mathbf{Z}}\\in \\mathbb{R}^{3\\times K}\\). This step allows us to compute the left or right counterpart" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "536" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.126, + 0.091, + 0.837, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.27, + 0.895, + 0.313 + ], + "angle": 0, + "content": "Figure 3. Overview of our framework. Given an input image \\(\\mathbf{I}\\), it first estimates the 3D positions \\(\\hat{\\mathbf{P}}\\) of the virtual markers. Then we update the coefficient matrix \\(\\hat{\\mathbf{A}}\\) based on the estimation confidence scores \\(\\mathbf{C}\\) of the virtual markers. Finally, the complete human mesh can be simply recovered by linear multiplication \\(\\hat{\\mathbf{M}} = \\hat{\\mathbf{P}}\\hat{\\mathbf{A}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.325, + 0.471, + 0.417 + ], + "angle": 0, + "content": "of each marker. Then we replace the markers in the right body with the symmetric vertices in the left body and obtain the symmetric markers \\(\\widetilde{\\mathbf{Z}}^{sym} \\in \\mathbb{R}^{3 \\times K}\\). Finally we update \\(\\mathbf{B}\\) and \\(\\mathbf{A}\\) by minimizing \\(||\\mathbf{X} - \\mathbf{X}\\widetilde{\\mathbf{B}}^{sym}\\widetilde{\\mathbf{A}}^{sym}||_F^2\\) subject to \\(\\widetilde{\\mathbf{Z}}^{sym} = \\mathbf{X}\\widetilde{\\mathbf{B}}^{sym}\\). More details are elaborated in the supplementary." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.417, + 0.474, + 0.569 + ], + "angle": 0, + "content": "Figure 2 shows the virtual markers learned on the mocap dataset [15] after post-processing. They are similar to the physical markers and approximately outline the body shape which agrees with our expectations. They are roughly evenly distributed on the surface of the body, and some of them are located close to the body keypoints, which have distinguishable visual patterns to be accurately detected. Table 1 shows the reconstruction errors of using original markers \\(\\mathbf{XB}\\) and the symmetric markers \\(\\widetilde{\\mathbf{XB}}^{sym}\\). Both can reconstruct meshes accurately." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.576, + 0.331, + 0.59 + ], + "angle": 0, + "content": "3.2. Mesh estimation framework" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.472, + 0.703 + ], + "angle": 0, + "content": "On top of the virtual markers, we present a simple yet effective framework for end-to-end 3D human mesh estimation from a single image. As shown in Figure 3, it consists of two branches. The first branch uses a volumetric CNN [45] to estimate the 3D positions \\(\\hat{\\mathbf{P}}\\) of the markers, and the second branch reconstructs the full mesh \\(\\hat{\\mathbf{M}}\\) by predicting a coefficient matrix \\(\\hat{\\mathbf{A}}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.704, + 0.47, + 0.719 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {M}} = \\hat {\\mathbf {P}} \\hat {\\mathbf {A}}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.726, + 0.405, + 0.74 + ], + "angle": 0, + "content": "We will describe the two branches in more detail." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.756, + 0.471, + 0.861 + ], + "angle": 0, + "content": "3D marker estimation. We train a neural network to estimate a 3D heatmap \\(\\hat{\\mathbf{H}} = [\\hat{\\mathbf{H}}_1, \\dots, \\hat{\\mathbf{H}}_K] \\in \\mathbb{R}^{K \\times D \\times H \\times W}\\) from an image. The heatmap encodes per-voxel likelihood of each marker. There are \\(D \\times H \\times W\\) voxels in total which are used to discretize the 3D space. The 3D position \\(\\hat{\\mathbf{P}}_z \\in \\mathbb{R}^3\\) of each marker is computed as the center of mass of the corresponding heatmap \\(\\hat{\\mathbf{H}}_z\\) [45] as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.867, + 0.47, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {P}} _ {z} = \\sum_ {d = 1} ^ {D} \\sum_ {h = 1} ^ {H} \\sum_ {w = 1} ^ {W} (d, h, w) \\cdot \\hat {\\mathbf {H}} _ {z} (d, h, w). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.325, + 0.892, + 0.356 + ], + "angle": 0, + "content": "The positions of all markers are represented as \\(\\hat{\\mathbf{P}} = [\\hat{\\mathbf{P}}_1, \\hat{\\mathbf{P}}_2, \\dots, \\hat{\\mathbf{P}}_K]\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.37, + 0.895, + 0.534 + ], + "angle": 0, + "content": "Interpolation. Ideally, if we have accurate estimates for all virtual markers \\(\\hat{\\mathbf{P}}\\), then we can recover the complete mesh by simply multiplying \\(\\hat{\\mathbf{P}}\\) with a fixed coefficient matrix \\(\\tilde{\\mathbf{A}}^{sym}\\) with sufficient accuracy as validated in Table 1. However, in practice, some markers may have large estimation errors because they may be occluded in the monocular setting. Note that this happens frequently. For example, the markers in the back will be occluded when a person is facing the camera. As a result, inaccurate markers positions may bring large errors to the final mesh if we directly multiply them with the fixed matrix \\(\\tilde{\\mathbf{A}}^{sym}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.537, + 0.894, + 0.656 + ], + "angle": 0, + "content": "Our solution is to rely more on those accurately detected markers. To that end, we propose to update the coefficient matrix based on the estimation confidence scores of the markers. In practice, we simply take the heatmap score at the estimated positions of each marker, i.e. \\(\\hat{\\mathbf{H}}_z(\\hat{\\mathbf{P}}_z)\\), and feed them to a single fully-connected layer to obtain the coefficient matrix \\(\\hat{\\mathbf{A}}\\). Then the mesh is reconstructed by \\(\\hat{\\mathbf{M}} = \\hat{\\mathbf{P}}\\hat{\\mathbf{A}}\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.667, + 0.605, + 0.683 + ], + "angle": 0, + "content": "3.3. Training" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.691, + 0.892, + 0.721 + ], + "angle": 0, + "content": "We train the whole network end-to-end in a supervised way. The overall loss function is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.574, + 0.732, + 0.892, + 0.747 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\lambda_ {v m} \\mathcal {L} _ {v m} + \\lambda_ {c} \\mathcal {L} _ {\\text {c o n f}} + \\lambda_ {m} \\mathcal {L} _ {\\text {m e s h}}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.758, + 0.891, + 0.801 + ], + "angle": 0, + "content": "Virtual marker loss. We define \\(\\mathcal{L}_{vm}\\) as the \\(L_{1}\\) distance between the predicted 3D virtual markers \\(\\hat{\\mathbf{P}}\\) and the GT \\(\\hat{\\mathbf{P}}^{*}\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.632, + 0.802, + 0.892, + 0.818 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {v m} = \\left\\| \\hat {\\mathbf {P}} - \\hat {\\mathbf {P}} ^ {*} \\right\\| _ {1}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.892, + 0.855 + ], + "angle": 0, + "content": "Note that it is easy to get GT markers \\(\\hat{\\mathbf{P}}^*\\) from GT meshes as stated in Section 3.1 without additional manual annotations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Confidence loss. We also require that the 3D heatmaps have reasonable shapes, therefore, the heatmap score at the" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.945, + 0.511, + 0.957 + ], + "angle": 0, + "content": "537" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.47, + 0.122 + ], + "angle": 0, + "content": "voxel containing the GT marker position \\(\\hat{\\mathbf{P}}_z^*\\) should have the maximum value as in the previous work [16]:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.129, + 0.47, + 0.166 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {c o n f}} = - \\sum_ {z = 1} ^ {K} \\log \\left(\\hat {\\mathbf {H}} _ {z} \\left(\\hat {\\mathbf {P}} _ {z} ^ {*}\\right)\\right). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.173, + 0.469, + 0.203 + ], + "angle": 0, + "content": "Mesh loss. Following [38], we define \\(\\mathcal{L}_{mesh}\\) as a weighted sum of four losses:" + }, + { + "type": "equation", + "bbox": [ + 0.098, + 0.213, + 0.47, + 0.231 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {m e s h}} = \\mathcal {L} _ {\\text {v e r t e x}} + \\mathcal {L} _ {\\text {p o s e}} + \\mathcal {L} _ {\\text {n o r m a l}} + \\lambda_ {e} \\mathcal {L} _ {\\text {e d g e}}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.239, + 0.469, + 0.282 + ], + "angle": 0, + "content": "- Vertex coordinate loss. We adopt \\(L_{1}\\) loss between predicted 3D mesh coordinates \\(\\hat{\\mathbf{M}}\\) with GT mesh \\(\\hat{\\mathbf{M}}^{*}\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.283, + 0.47, + 0.299 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {v e r t e x}} = \\left\\| \\hat {\\mathbf {M}} - \\hat {\\mathbf {M}} ^ {*} \\right\\| _ {1}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.307, + 0.469, + 0.35 + ], + "angle": 0, + "content": "- Pose loss. We use \\(L_{1}\\) loss between the 3D landmark joints regressed from mesh \\(\\hat{\\mathbf{M}}\\mathcal{I}\\) and the GT joints \\(\\hat{\\mathbf{J}}^{*}\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.215, + 0.351, + 0.469, + 0.367 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {p o s e}} = \\left\\| \\hat {\\mathbf {M}} \\mathcal {J} - \\hat {\\mathbf {J}} ^ {*} \\right\\| _ {1}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.37, + 0.469, + 0.402 + ], + "angle": 0, + "content": "where \\(\\mathcal{J} \\in \\mathbb{R}^{M \\times J}\\) is a pre-defined joint regression matrix in SMPL model [2]." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.41, + 0.47, + 0.472 + ], + "angle": 0, + "content": "- Surface losses. To improve surface smoothness [54], we supervise the normal vector of a triangle face with GT normal vectors by \\(\\mathcal{L}_{\\text{normal}}\\) and the edge length of the predicted mesh with GT length by \\(\\mathcal{L}_{\\text{edge}}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.12, + 0.478, + 0.47, + 0.564 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {n o r m a l} = \\sum_ {f} \\sum_ {\\{i, j \\} \\subset f} \\left| \\left\\langle \\frac {\\hat {\\mathbf {M}} _ {i} - \\hat {\\mathbf {M}} _ {j}}{\\| \\hat {\\mathbf {M}} _ {i} - \\hat {\\mathbf {M}} _ {j} \\| _ {2}}, \\hat {\\mathbf {n}} _ {f} ^ {*} \\right\\rangle \\right|, \\\\ \\mathcal {L} _ {\\text {e d g e}} = \\sum_ {f} \\sum_ {\\{i, j \\} \\subset f} \\left| \\| \\hat {\\mathbf {M}} _ {i} - \\hat {\\mathbf {M}} _ {j} \\| _ {2} - \\| \\hat {\\mathbf {M}} _ {i} ^ {*} - \\hat {\\mathbf {M}} _ {j} ^ {*} \\| _ {2} \\right|. \\tag {10} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.565, + 0.469, + 0.613 + ], + "angle": 0, + "content": "where \\(f\\) and \\(\\hat{\\mathbf{n}}_f^*\\) denote a triangle face in the mesh and its GT unit normal vector, respectively. \\(\\hat{\\mathbf{M}}_i\\) denote the \\(i^{th}\\) vertex of \\(\\hat{\\mathbf{M}}\\). * denotes GT." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.627, + 0.21, + 0.644 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.651, + 0.274, + 0.667 + ], + "angle": 0, + "content": "4.1. Datasets and metrics" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.469, + 0.749 + ], + "angle": 0, + "content": "H3.6M [15]. We use (S1, S5, S6, S7, S8) for training and (S9, S11) for testing. As in [7, 18, 31, 32], we report MPJPE and PA-MPJPE for poses that are derived from the estimated meshes. We also report Mean Per Vertex Error (MPVE) for the whole mesh." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.825 + ], + "angle": 0, + "content": "3DPW [52] is collected in natural scenes. Following the previous works [23, 31, 32, 59], we use the train set of 3DPW to learn the model and evaluate on the test set. The same evaluation metrics as H3.6M are used." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.469, + 0.902 + ], + "angle": 0, + "content": "SURREAL [51] is a large-scale synthetic dataset with GT SMPL annotations and has diverse samples in terms of body shapes, backgrounds, etc. We use its training set to train a model and evaluate the test split following [7]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.717, + 0.107 + ], + "angle": 0, + "content": "4.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.118, + 0.894, + 0.375 + ], + "angle": 0, + "content": "We learn 64 virtual markers on the H3.6M [15] training set. We use the same set of markers for all datasets instead of learning a separate set on each dataset. Following [7, 18, 22, 25, 31, 32, 38, 59], we conduct mix-training by using MPI-INF-3DHP [37], UP-3D [27], and COCO [33] training set for experiments on the H3.6M and 3DPW datasets. We adapt a 3D pose estimator [45] with HRNet-W48 [44] as the image feature backbone for estimating the 3D virtual markers. We set the number of voxels in each dimension to be 64, i.e. \\( D = H = W = 64 \\) for 3D heatmaps. Following [18, 25, 38], we crop every single human region from the input image and resize it to \\( 256 \\times 256 \\). We use Adam [21] optimizer to train the whole framework for 40 epochs with a batch size of 32. The learning rates for the two branches are set to \\( 5 \\times 10^{-4} \\) and \\( 1 \\times 10^{-3} \\), respectively, which are decreased by half after the \\( 30^{th} \\) epoch. Please refer to the supplementary for more details." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.397, + 0.809, + 0.413 + ], + "angle": 0, + "content": "4.3. Comparison to the State-of-the-arts" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.424, + 0.894, + 0.546 + ], + "angle": 0, + "content": "Results on H3.6M. Table 2 compares our approach to the state-of-the-art methods on the H3.6M dataset. Our method achieves competitive or superior performance. In particular, it outperforms the methods that use skeletons (Pose2Mesh [7], DSD-SATN [47]), body markers (THUNDR) [59], or IUV image [60, 62] as proxy representations, demonstrating the effectiveness of the virtual marker representation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.565, + 0.894, + 0.747 + ], + "angle": 0, + "content": "Results on 3DPW. We compare our method to the state-of-the-art methods on the 3DPW dataset in Table 2. Our approach achieves state-of-the-art results among all the methods, validating the advantages of the virtual marker representation over the skeleton representation used in Pose2Mesh [7], DSD-SATN [47], and other representations like IUV image used in PyMAF [62]. In particular, our approach outperforms I2L-MeshNet [38], METRO [31], and Mesh Graphormer [32] by a notable margin, which suggests that virtual markers are more suitable and effective representations than detecting all vertices directly as most of them are not discriminative enough to be accurately detected." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.894, + 0.9 + ], + "angle": 0, + "content": "Results on SURREAL. This dataset has more diverse samples in terms of body shapes. The results are shown in Table 3. Our approach outperforms the state-of-the-art methods by a notable margin, especially in terms of MPVE. Figure 1 shows some challenging cases without cherry-picking. The skeleton representation loses the body shape information so the method [7] can only recover mean shapes. In contrast, our approach generates much more accurate mesh estimation results." + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "538" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.134, + 0.088, + 0.842, + 0.535 + ], + "angle": 0, + "content": "
MethodIntermediate RepresentationH3.6M3DPW
MPVE↓MPJPE↓PA-MPJPE↓MPVE↓MPJPE↓PA-MPJPE↓
† Arnab et al. [1] CVPR'192D skeleton-77.854.3--72.2
† HMMR [19] CVPR'19---56.9139.3116.572.6
† DSD-SATN [47] ICCV'193D skeleton-59.142.4--69.5
† VIBE [22] CVPR'20--65.941.599.182.951.9
† TCMR [6] CVPR'21--62.341.1102.986.552.7
† MAED [53] ICCV'213D skeleton-56.338.792.679.145.7
SMPLify [2] ECCV'162D skeleton--82.3---
HMR [18] CVPR'18-96.188.056.8152.7130.081.3
GraphCMR [25] CVPR'193D vertices--50.1--70.2
SPIN [24] ICCV'19---41.1116.496.959.2
DenseRac [55] ICCV'19IUV image-76.848.0---
DecoMR [60] CVPR'20IUV image-60.639.3---
ExPose [9] ECCV'20-----93.460.7
Pose2Mesh [7] ECCV'203D skeleton85.364.946.3106.388.958.3
I2L-MeshNet [38] ECCV'203D vertices65.155.741.1110.193.257.7
PC-HMR [36] AAAI'213D skeleton---108.687.866.9
HybrIK [28] CVPR'213D skeleton65.754.434.586.574.145.0
METRO [31] CVPR'213D vertices-54.036.788.277.147.9
ROMP [46] ICCV'21----108.391.354.9
Mesh Graphormer [32] ICCV'213D vertices-51.234.587.774.745.6
PARE [23] ICCV'21Segmentation---88.674.546.5
THUNDR [59] ICCV'213D markers-55.039.888.074.851.5
PyMaf [62] ICCV'21IUV image-57.740.5110.192.858.9
ProHMR [26] ICCV'21---41.2--59.8
OCHMR [20] CVPR'222D heatmap---107.189.758.3
3DCrowdNet [8] CVPR'223D skeleton---98.381.751.5
CLIFF [30] ECCV'22--47.132.781.269.043.0
FastMETRO [5] ECCV'223D vertices-52.233.784.173.544.6
VisDB [56] ECCV'223D vertices-51.034.585.573.544.9
OursVirtual marker58.047.332.077.967.541.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.544, + 0.895, + 0.586 + ], + "angle": 0, + "content": "Table 2. Comparison to the state-of-the-arts on H3.6M [15] and 3DPW [52] datasets. \\( \\dagger \\) means using temporal cues. The methods are not strictly comparable because they may have different backbones and training datasets. We provide the numbers only to show proof-of-concept results." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.597, + 0.482, + 0.751 + ], + "angle": 0, + "content": "
MethodIntermediate RepresentationMPVE↓MPJPE↓PA-MPJPE↓
HMR [18] CVPR'18-85.173.655.4
BodyNet [50] ECCV'18Skel. + Seg.65.8--
GraphCMR [25] CVPR'193D vertices103.287.463.2
SPIN [24] ICCV'19-82.366.743.7
DecoMR [60] CVPR'20IUV image68.952.043.0
Pose2Mesh [7] ECCV'203D skeleton68.856.639.6
PC-HMR [36] AAAI'213D skeleton59.851.737.9
* DynaBOA [13] TPAMI'22-70.755.234.0
OursVirtual marker44.736.928.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.762, + 0.47, + 0.807 + ], + "angle": 0, + "content": "Table 3. Comparison to the state-of-the-arts on SURREAL [51] dataset. * means training on the test split with 2D supervisions. \"Skel. + Seg.\" means using skeleton and segmentation together." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.817, + 0.228, + 0.833 + ], + "angle": 0, + "content": "4.4. Ablation study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Virtual marker representation. We compare our method to two baselines in Table 4. First, in baseline (a), we replace the virtual markers of our method with the skeleton representation. The rest are kept the same as ours (c). Our" + }, + { + "type": "table", + "bbox": [ + 0.533, + 0.596, + 0.864, + 0.669 + ], + "angle": 0, + "content": "
No.Intermediate RepresentationMPVE↓
H3.6MSURREAL
(a)Skeleton64.453.6
(b)Rand virtual marker63.050.1
(c)Virtual marker58.044.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.679, + 0.892, + 0.763 + ], + "angle": 0, + "content": "Table 4. Ablation study of the virtual marker representation for our approach on H3.6M and SURREAL datasets. \"Skeleton\" means the sparse landmark joint representation is used. \"Rand virtual marker\" means the virtual markers are randomly selected from all the vertices without learning. (c) is our method, where the learned virtual markers are used." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.901 + ], + "angle": 0, + "content": "method achieves a much lower MPVE than the baseline (a), demonstrating that the virtual markers help to estimate body shapes more accurately than the skeletons. In baseline (b), we randomly sample 64 from the 6890 mesh vertices as virtual markers. We repeat the experiment five times and report the average number. We can see that the result is worse than ours, which is because the randomly selected vertices may not be expressive to reconstruct the other vertices or can not" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.511, + 0.957 + ], + "angle": 0, + "content": "539" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.087, + 0.095, + 0.165, + 0.156 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.16, + 0.165, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.223, + 0.142, + 0.232 + ], + "angle": 0, + "content": "Image" + }, + { + "type": "image", + "bbox": [ + 0.171, + 0.096, + 0.252, + 0.156 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.171, + 0.159, + 0.251, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.209, + 0.223, + 0.264, + 0.232 + ], + "angle": 0, + "content": "Pose2Mesh" + }, + { + "type": "image", + "bbox": [ + 0.257, + 0.096, + 0.294, + 0.156 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.258, + 0.159, + 0.297, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.303, + 0.096, + 0.379, + 0.156 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.303, + 0.159, + 0.379, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.361, + 0.223, + 0.385, + 0.232 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.385, + 0.096, + 0.424, + 0.155 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.385, + 0.159, + 0.424, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.442, + 0.095, + 0.462, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.442, + 0.159, + 0.462, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.442, + 0.223, + 0.458, + 0.232 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.248, + 0.473, + 0.317 + ], + "angle": 0, + "content": "Figure 4. Mesh estimation results of different methods on H3.6M test set. Our method with virtual marker representation gets better shape estimation results than Pose2Mesh which uses skeleton representation. Note the waistline of the body and the thickness of the arm." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.325, + 0.201, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.326, + 0.334, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.349, + 0.326, + 0.458, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.424, + 0.471, + 0.453 + ], + "angle": 0, + "content": "Figure 5. Visualization of the learned virtual markers of different numbers of \\( K = 16, 32, 96 \\), from left to right, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.483, + 0.471, + 0.529 + ], + "angle": 0, + "content": "be accurately detected from images as they lack distinguishable visual patterns. The results validate the effectiveness of our learning strategy." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.53, + 0.471, + 0.712 + ], + "angle": 0, + "content": "Figure 1 shows some qualitative results on the SURREAL test set. The meshes estimated by the baseline which uses skeleton representation, i.e. Pose2Mesh [7], have inaccurate body shapes. This is reasonable because the skeleton is oversimplified and has very limited capability to recover shapes. Instead, it implicitly learns a mean shape for the whole training dataset. In contrast, the mesh estimated by using virtual markers has much better quality due to its strong representation power and therefore can handle different body shapes elegantly. Figure 4 also shows some qualitative results on the H3.6M test set. For clarity, we draw the intermediate representation (blue balls) in it as well." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Number of virtual markers. We evaluate how the number of virtual markers affects estimation quality on H3.6M [15] dataset. Figure 5 visualizes the learned virtual markers, which are all located on the body surface and close to the extreme points of the mesh. This is expected as mentioned in Section 3.1. Table 5 (GT) shows the mesh reconstruction results when we have GT 3D positions of the virtual markers in objective (1). When we increase the number of virtual markers, both mesh reconstruction error (MPVE) and the regressed landmark joint error (MPJPE) steadily decrease. This is expected because using more virtual markers improves the representation power. However, using more" + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.089, + 0.877, + 0.171 + ], + "angle": 0, + "content": "
KGTDet
MPVE↓MPJPE↓MPVE↓MPJPE↓
1646.839.858.747.8
3220.114.258.248.3
6411.07.558.047.3
969.95.659.648.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.181, + 0.895, + 0.251 + ], + "angle": 0, + "content": "Table 5. Ablation study of the different number of virtual markers \\((K)\\) on H3.6M [15] dataset. (GT) Mesh reconstruction results when GT 3D positions of the virtual markers are used in objective (1). (Det) Mesh estimation results obtained by our proposed framework when we use different numbers of virtual markers \\((K)\\)." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.267, + 0.603, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.352, + 0.583, + 0.362 + ], + "angle": 0, + "content": "Input Image" + }, + { + "type": "image", + "bbox": [ + 0.618, + 0.267, + 0.755, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.623, + 0.352, + 0.706, + 0.37 + ], + "angle": 0, + "content": "(a) Using fixed coefficient matrix" + }, + { + "type": "image", + "bbox": [ + 0.764, + 0.268, + 0.898, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.769, + 0.352, + 0.854, + 0.37 + ], + "angle": 0, + "content": "(b) Using updated coefficient matrix" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.383, + 0.893, + 0.424 + ], + "angle": 0, + "content": "Figure 6. Mesh estimation comparison results when using (a) fixed coefficient matrix \\(\\tilde{\\mathbf{A}}^{sym}\\), and (b) updated \\(\\hat{\\mathbf{A}}\\). Please zoom in to better see the details." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.453, + 0.895, + 0.619 + ], + "angle": 0, + "content": "virtual markers cannot guarantee smaller estimation errors when we need to estimate the virtual marker positions from images as in our method. This is because the additional virtual markers may have large estimation errors which affect the mesh estimation result. The results are shown in Table 5 (Det). Increasing the number of virtual markers \\( K \\) steadily reduces the MPVE errors when \\( K \\) is smaller than 96. However, if we keep increasing \\( K \\), the error begins to increase. This is mainly because some of the newly introduced virtual markers are difficult to detect from images and therefore bring errors to mesh estimation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.625, + 0.895, + 0.837 + ], + "angle": 0, + "content": "Coefficient matrix. We compare our method to a baseline which uses the fixed coefficient matrix \\(\\widetilde{\\mathbf{A}}^{sym}\\). We show the quality comparison in Figure 6. We can see that the estimated mesh by a fixed coefficient matrix (a) has mostly correct pose and shape but there are also some artifacts on the mesh while using the updated coefficient matrix (b) can get better mesh estimation results. As shown in Table 6, using a fixed coefficient matrix gets larger MPVE and MPJPE errors than using the updated coefficient matrix. This is caused by the estimation errors of virtual markers when occlusion happens, which is inevitable since the virtual markers on the back will be self-occluded by the front body. As a result, inaccurate marker positions would bring large errors to the final mesh estimates if we directly use the fixed matrix." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.847, + 0.685, + 0.864 + ], + "angle": 0, + "content": "4.5. Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.894, + 0.902 + ], + "angle": 0, + "content": "Figure 7 (top) presents some meshes estimated by our approach on natural images from the 3DPW test set. The" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "540" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.104, + 0.172, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.104, + 0.255, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.26, + 0.104, + 0.33, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.104, + 0.405, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.41, + 0.104, + 0.482, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.104, + 0.56, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.564, + 0.104, + 0.629, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.104, + 0.699, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.713, + 0.096, + 0.882, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.195, + 0.172, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.195, + 0.256, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.26, + 0.195, + 0.33, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.195, + 0.405, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.195, + 0.483, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.488, + 0.195, + 0.561, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.195, + 0.629, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.195, + 0.698, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.195, + 0.788, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.792, + 0.195, + 0.88, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.292, + 0.892, + 0.321 + ], + "angle": 0, + "content": "Figure 7. Top: Meshes estimated by our approach on images from 3DPW test set. The rightmost case in the dashed box shows a typical failure. Bottom: Meshes estimated by our approach on Internet images with challenging cases (extreme shapes or in a long dress)." + }, + { + "type": "table", + "bbox": [ + 0.098, + 0.334, + 0.453, + 0.386 + ], + "angle": 0, + "content": "
No.MethodFixed AsymUpdated AMPVE↓MPJPE↓
(a)Ours (fixed)64.751.6
(b)Ours58.047.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.396, + 0.47, + 0.437 + ], + "angle": 0, + "content": "Table 6. Ablation study of the coefficient matrix for our approach on H3.6M dataset. \"fixed\" means using the fixed coefficient matrix \\(\\tilde{\\mathbf{A}}^{sym}\\) to reconstruct the mesh." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.473, + 0.473, + 0.79 + ], + "angle": 0, + "content": "rightmost case shows a typical failure where our method has a wrong pose estimate of the left leg due to heavy occlusion. We can see that the failure is constrained to the local region and the rest of the body still gets accurate estimates. We further analyze how inaccurate virtual markers would affect the mesh estimation, i.e. when part of human body is occluded or truncated. According to the finally learned coefficient matrix \\(\\hat{\\mathbf{A}}\\) of our model, we highlight the relationship weights among virtual markers and all vertices in Figure 8. We can see that our model actually learns local and sparse dependency between each vertex and the virtual markers, e.g. for each vertex, the virtual markers that contribute the most are in a near range as shown in Figure 8 (b). Therefore, in inference, if a virtual marker has inaccurate position estimation due to occlusion or truncation, the dependent vertices may have inaccurate estimates, while the rest will be barely affected. Figure 2 (right) shows more examples where occlusion or truncation occurs, and our method can still get accurate or reasonable estimates robustly. Note that when truncation occurs, our method still guesses the positions of the truncated virtual markers." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Figure 7 (bottom) shows our estimated meshes on challenging cases, which indicates the strong generalization ability of our model on diverse postures and actions in natural scenes. Please refer to the supplementary for more quality results. Note that since the datasets do not provide supervision of head orientation, face expression, hands, or feet, the estimates of these parts are just in canonical poses inevitably." + }, + { + "type": "image", + "bbox": [ + 0.544, + 0.338, + 0.673, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.713, + 0.338, + 0.848, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.455, + 0.892, + 0.553 + ], + "angle": 0, + "content": "Figure 8. (a) For each virtual marker (represented by a star), we highlight the top 30 most affected vertices (represented by a colored dot) based on average coefficient matrix \\(\\hat{\\mathbf{A}}\\). (b) For each vertex (dot), we highlight the top 3 virtual markers (star) that contribute the most. We can see that the dependency has a strong locality which improves the robustness when some virtual markers cannot be accurately detected." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.894, + 0.627 + ], + "angle": 0, + "content": "Apart from that, most errors are due to inaccurate 3D virtual marker estimation which may be addressed using more powerful estimators or more diverse training datasets in the future." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.641, + 0.619, + 0.657 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.667, + 0.895, + 0.833 + ], + "angle": 0, + "content": "In this paper, we present a novel intermediate representation Virtual Marker, which is more expressive than the prevailing skeleton representation and more accessible than physical markers. It can reconstruct 3D meshes more accurately and efficiently, especially in handling diverse body shapes. Besides, the coefficient matrix in the virtual marker representation encodes spatial relationships among mesh vertices which allows the method to implicitly explore structure priors of human body. It achieves better mesh estimation results than the state-of-the-art methods and shows advanced generalization potential in spite of its simplicity." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.846, + 0.661, + 0.863 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.9 + ], + "angle": 0, + "content": "This work was supported by MOST-2022ZD0114900 and NSFC-62061136001." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.946, + 0.51, + 0.957 + ], + "angle": 0, + "content": "541" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.156 + ], + "angle": 0, + "content": "[1] Anurag Arnab, Carl Doersch, and Andrew Zisserman. Exploiting temporal context for 3d human pose estimation in the wild. In CVPR, pages 3395-3404, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.157, + 0.472, + 0.213 + ], + "angle": 0, + "content": "[2] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J Black. Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In ECCV, pages 561-578, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.213, + 0.471, + 0.284 + ], + "angle": 0, + "content": "[3] Ronan Boulic, Pascal Becheiraz, Luc Emerging, and Daniel Thalmann. Integration of motion control techniques for virtual human and avatar real-time animation. In Proceedings of the ACM symposium on Virtual reality software and technology, pages 111-118, 1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.284, + 0.471, + 0.325 + ], + "angle": 0, + "content": "[4] Yuansi Chen, Julien Mairal, and Zaid Harchaoui. Fast and robust archetypal analysis for representation learning. In CVPR, pages 1478-1485, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.471, + 0.368 + ], + "angle": 0, + "content": "[5] Junhyeong Cho, Kim Youwang, and Tae-Hyun Oh. Cross-attention of disentangled modalities for 3d human mesh recovery with transformers. In ECCV, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.368, + 0.471, + 0.422 + ], + "angle": 0, + "content": "[6] Hongsuk Choi, Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. Beyond static features for temporally consistent 3d human pose and shape from a video. In CVPR, pages 1964-1973, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.424, + 0.471, + 0.478 + ], + "angle": 0, + "content": "[7] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2mesh: Graph convolutional network for 3d human pose and mesh recovery from a 2d human pose. In ECCV, pages 769-787, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.48, + 0.471, + 0.535 + ], + "angle": 0, + "content": "[8] Hongsuk Choi, Gyeongsik Moon, JoonKyu Park, and Kyoung Mu Lee. Learning to estimate robust 3d human mesh from in-the-wild crowded scenes. In CVPR, pages 1475-1484, June 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.536, + 0.471, + 0.592 + ], + "angle": 0, + "content": "[9] Vasileios Choutas, Georgios Pavlakos, Timo Bolkart, Dimitrios Tzionas, and Michael J Black. Monocular expressive body regression through body-driven attention. In ECCV, pages 20-40, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.592, + 0.471, + 0.647 + ], + "angle": 0, + "content": "[10] Hai Ci, Mingdong Wu, Wentao Zhu, Xiaoxuan Ma, Hao Dong, Fangwei Zhong, and Yizhou Wang. Gfpose: Learning 3d human pose prior with gradient fields. arXiv preprint arXiv:2212.08641, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.649, + 0.471, + 0.704 + ], + "angle": 0, + "content": "[11] Enric Corona, Gerard Pons-Moll, Guillem Alenyà, and Francesc Moreno-Noguer. Learned vertex descent: a new direction for 3d human model fitting. In ECCV, pages 146-165. Springer, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.705, + 0.471, + 0.732 + ], + "angle": 0, + "content": "[12] Adele Cutler and Leo Breiman. Archetypal analysis. Technometrics, 36(4):338-347, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.733, + 0.471, + 0.788 + ], + "angle": 0, + "content": "[13] Shanyan Guan, Jingwei Xu, Michelle Z He, Yunbo Wang, Bingbing Ni, and Xiaokang Yang. Out-of-domain human mesh reconstruction via dynamic bilevel online adaptation. IEEE TPAMI, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.789, + 0.471, + 0.845 + ], + "angle": 0, + "content": "[14] Yinghao Huang, Federica Bogo, Christoph Lassner, Angjoo Kanazawa, Peter V Gehler, Javier Romero, Ijaz Akhter, and Michael J Black. Towards accurate marker-less human shape and pose estimation over time. In 3DV, pages 421-430, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.846, + 0.471, + 0.901 + ], + "angle": 0, + "content": "[15] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE TPAMI, 36(7):1325-1339, 2013." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.135 + ], + "angle": 0, + "content": "[16] Karim Iskakov, Egor Burkov, Victor Lempitsky, and Yury Malkov. Learnable triangulation of human pose. In ICCV, pages 7718-7727, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.894, + 0.176 + ], + "angle": 0, + "content": "[17] Ian T Jolliffe. Principal components in regression analysis. In Principal component analysis, pages 129-155. Springer, 1986." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.178, + 0.893, + 0.22 + ], + "angle": 0, + "content": "[18] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In CVPR, pages 7122-7131, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.894, + 0.262 + ], + "angle": 0, + "content": "[19] Angjoo Kanazawa, Jason Y Zhang, Panna Felsen, and Jitendra Malik. Learning 3d human dynamics from video. In CVPR, pages 5614-5623, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.263, + 0.894, + 0.304 + ], + "angle": 0, + "content": "[20] Rawal Khirodkar, Shashank Tripathi, and Kris Kitani. Occluded human mesh recovery. In CVPR, pages 1715-1725, June 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.893, + 0.334 + ], + "angle": 0, + "content": "[21] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.334, + 0.894, + 0.376 + ], + "angle": 0, + "content": "[22] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In CVPR, pages 5253-5263, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.377, + 0.893, + 0.432 + ], + "angle": 0, + "content": "[23] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. Pare: Part attention regressor for 3d human body estimation. In ICCV, pages 11127-11137, October 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.433, + 0.893, + 0.489 + ], + "angle": 0, + "content": "[24] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3d human pose and shape via model-fitting in the loop. In ICCV, pages 2252-2261, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.49, + 0.893, + 0.532 + ], + "angle": 0, + "content": "[25] Nikos Kolotouros, Georgios Pavlakos, and Kostas Daniilidis. Convolutional mesh regression for single-image human shape reconstruction. In CVPR, pages 4501-4510, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.893, + 0.575 + ], + "angle": 0, + "content": "[26] Nikos Kolotouros, Georgios Pavlakos, Dinesh Jayaraman, and Kostas Daniilidis. Probabilistic modeling for human mesh recovery. In ICCV, pages 11605-11614, October 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.893, + 0.631 + ], + "angle": 0, + "content": "[27] Christoph Lassner, Javier Romero, Martin Kiefel, Federica Bogo, Michael J Black, and Peter V Gehler. Unite the people: Closing the loop between 3d and 2d human representations. In CVPR, pages 6050-6059, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.632, + 0.893, + 0.688 + ], + "angle": 0, + "content": "[28] Jiefeng Li, Chao Xu, Zhicun Chen, Siyuan Bian, Lixin Yang, and Cewu Lu. Hybrik: A hybrid analytical-neural inverse kinematics solution for 3d human pose and shape estimation. In CVPR, pages 3383-3393, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.893, + 0.744 + ], + "angle": 0, + "content": "[29] Yong-Lu Li, Liang Xu, Xinpeng Liu, Xijie Huang, Yue Xu, Shiyi Wang, Hao-Shu Fang, Ze Ma, Mingyang Chen, and Cewu Lu. Pastanet: Toward human activity knowledge engine. In CVPR, pages 382-391, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.745, + 0.893, + 0.799 + ], + "angle": 0, + "content": "[30] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. Cliff: Carrying location information in full frames into human pose and shape estimation. In ECCV, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.801, + 0.893, + 0.843 + ], + "angle": 0, + "content": "[31] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In CVPR, pages 1954-1963, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.845, + 0.893, + 0.872 + ], + "angle": 0, + "content": "[32] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In ICCV, pages 12939-12948, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.893, + 0.901 + ], + "angle": 0, + "content": "[33] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.512, + 0.957 + ], + "angle": 0, + "content": "542" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "Zitnick. Microsoft coco: Common objects in context. In ECCV, pages 740-755, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.121, + 0.472, + 0.162 + ], + "angle": 0, + "content": "[34] Matthew Loper, Naureen Mahmood, and Michael J Black. Mosh: Motion and shape capture from sparse markers. TOG, 33(6):1-13, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.471, + 0.204 + ], + "angle": 0, + "content": "[35] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. TOG, 34(6):1-16, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.205, + 0.472, + 0.259 + ], + "angle": 0, + "content": "[36] Tianyu Luan, Yali Wang, Junhao Zhang, Zhe Wang, Zhipeng Zhou, and Yu Qiao. Pc-hmr: Pose calibration for 3d human mesh recovery from 2d images/videos. In AAAI, pages 2269-2276, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.26, + 0.472, + 0.316 + ], + "angle": 0, + "content": "[37] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 3DV, pages 506-516, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.317, + 0.471, + 0.37 + ], + "angle": 0, + "content": "[38] Gyeongsik Moon and Kyoung Mu Lee. I2l-meshnet: Imageto-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In ECCV, pages 752-768, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.372, + 0.471, + 0.427 + ], + "angle": 0, + "content": "[39] Mohamed Omran, Christoph Lassner, Gerard Pons-Moll, Peter Gehler, and Bernt Schiele. Neural body fitting: Unifying deep learning and model based human pose and shape estimation. In 3DV, pages 484-494. IEEE, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.428, + 0.471, + 0.495 + ], + "angle": 0, + "content": "[40] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3d hands, face, and body from a single image. In CVPR, pages 10975-10985, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.471, + 0.537 + ], + "angle": 0, + "content": "[41] Liliana Lo Presti and Marco La Cascia. 3d skeleton-based human action classification: A survey. Pattern Recognition, 53:130-147, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.539, + 0.471, + 0.58 + ], + "angle": 0, + "content": "[42] Haibo Qiu, Chunyu Wang, Jingdong Wang, Naiyan Wang, and Wenjun Zeng. Cross view fusion for 3d human pose estimation. In ICCV, pages 4342-4351, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.581, + 0.471, + 0.636 + ], + "angle": 0, + "content": "[43] Jiajun Su, Chunyu Wang, Xiaoxuan Ma, Wenjun Zeng, and Yizhou Wang. Virtualpose: Learning generalizable 3d human pose models from virtual data. In ECCV, pages 55-71. Springer, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.637, + 0.471, + 0.678 + ], + "angle": 0, + "content": "[44] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In CVPR, pages 5693-5703, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.679, + 0.471, + 0.718 + ], + "angle": 0, + "content": "[45] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, pages 529-545, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.72, + 0.471, + 0.761 + ], + "angle": 0, + "content": "[46] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J Black, and Tao Mei. Monocular, one-stage, regression of multiple 3d people. In ICCV, pages 11179-11188, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.762, + 0.471, + 0.816 + ], + "angle": 0, + "content": "[47] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a skeleton-disentangled representation. In ICCV, pages 5349-5358, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.471, + 0.859 + ], + "angle": 0, + "content": "[48] Hanyue Tu, Chunyu Wang, and Wenjun Zeng. Voxelpose: Towards multi-camera 3d human pose estimation in wild environment. In ECCV, pages 197-212. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.471, + 0.9 + ], + "angle": 0, + "content": "[49] Hsiao-Yu Tung, Hsiao-Wei Tung, Ersin Yumer, and Katerina Fragkiadaki. Self-supervised learning of motion capture. In NIPS, volume 30, 2017." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.147 + ], + "angle": 0, + "content": "[50] Gul Varol, Duygu Ceylan, Bryan Russell, Jimei Yang, Ersin Yumer, Ivan Laptev, and Cordelia Schmid. Bodynet: Volumetric inference of 3d human body shapes. In ECCV, pages 20-36, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.894, + 0.19 + ], + "angle": 0, + "content": "[51] Gul Varol, Javier Romero, Xavier Martin, Naureen Mahmood, Michael J Black, Ivan Laptev, and Cordelia Schmid. Learning from synthetic humans. In CVPR, pages 109-117, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.893, + 0.248 + ], + "angle": 0, + "content": "[52] Timo von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In ECCV, pages 601-617, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.894, + 0.304 + ], + "angle": 0, + "content": "[53] Ziniu Wan, Zhengjia Li, Maoqing Tian, Jianbo Liu, Shuai Yi, and Hongsheng Li. Encoder-decoder with multi-level attention for 3d human shape and pose estimation. In ICCV, pages 13033-13042, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.893, + 0.347 + ], + "angle": 0, + "content": "[54] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In ECCV, pages 52-67, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.349, + 0.893, + 0.389 + ], + "angle": 0, + "content": "[55] Yuanlu Xu, Song-Chun Zhu, and Tony Tung. Denserac: Joint 3d pose and shape estimation by dense render-and-compare. In ICCV, pages 7760-7770, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.391, + 0.893, + 0.432 + ], + "angle": 0, + "content": "[56] Chun-Han Yao, Jimei Yang, Duygu Ceylan, Yi Zhou, Yang Zhou, and Ming-Hsuan Yang. Learning visibility for robust dense human body estimation. In ECCV, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.434, + 0.893, + 0.489 + ], + "angle": 0, + "content": "[57] Hang Ye, Wentao Zhu, Chunyu Wang, Rujie Wu, and Yizhou Wang. Faster voxelpose: Real-time 3d human pose estimation by orthographic projection. In ECCV, pages 142-159. Springer, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.49, + 0.893, + 0.545 + ], + "angle": 0, + "content": "[58] Andrei Zanfir, Elisabeta Marinoiu, and Cristian Sminchisescu. Monocular 3d pose and shape estimation of multiple people in natural scenes-the importance of multiple scene constraints. In CVPR, pages 2148-2157, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.547, + 0.893, + 0.602 + ], + "angle": 0, + "content": "[59] Mihai Zanfir, Andrei Zanfir, Eduard Gabriel Bazavan, William T Freeman, Rahul Sukthankar, and Cristian Sminchiescu. Thundr: Transformer-based 3d human reconstruction with markers. In ICCV, pages 12971-12980, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.893, + 0.645 + ], + "angle": 0, + "content": "[60] Wang Zeng, Wanli Ouyang, Ping Luo, Wentao Liu, and Xiaogang Wang. 3d human mesh regression with dense correspondence. In CVPR, pages 7054-7063, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.647, + 0.893, + 0.688 + ], + "angle": 0, + "content": "[61] Hongwen Zhang, Jie Cao, Guo Lu, Wanli Ouyang, and Zhenan Sun. Learning 3d human shape and pose from dense body parts. IEEE TPAMI, 44(5):2610-2627, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.893, + 0.744 + ], + "angle": 0, + "content": "[62] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In ICCV, pages 11446-11456, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.746, + 0.894, + 0.8 + ], + "angle": 0, + "content": "[63] Yifu Zhang, Chunyu Wang, Xinggang Wang, Wenyu Liu, and Wenjun Zeng. Voxeltrack: Multi-person 3d human pose estimation and tracking in the wild. IEEE TPAMI, 45(2):2613-2626, 2022." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.8 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.946, + 0.511, + 0.956 + ], + "angle": 0, + "content": "543" + } + ] +] \ No newline at end of file diff --git a/2023/3D Human Mesh Estimation From Virtual Markers/067f420e-7fdc-4668-8983-b6715ae47be7_origin.pdf b/2023/3D Human Mesh Estimation From Virtual Markers/067f420e-7fdc-4668-8983-b6715ae47be7_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b1543e809e3026026c14428127011e8f72207605 --- /dev/null +++ b/2023/3D Human Mesh Estimation From Virtual Markers/067f420e-7fdc-4668-8983-b6715ae47be7_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a03cc7cfefad19b02d0769bbd2512d332e9454e355f076740ecf14341bcdae7 +size 4742746 diff --git a/2023/3D Human Mesh Estimation From Virtual Markers/full.md b/2023/3D Human Mesh Estimation From Virtual Markers/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f80408364096d996a42a4b5631180ff17b58581f --- /dev/null +++ b/2023/3D Human Mesh Estimation From Virtual Markers/full.md @@ -0,0 +1,401 @@ +# 3D Human Mesh Estimation from Virtual Markers + +Xiaoxuan Ma $^{1}$ Jiajun Su $^{1}$ Chunyu Wang $^{3*}$ Wentao Zhu $^{1}$ Yizhou Wang $^{1,2,4}$ + +$^{1}$ School of Computer Science, Center on Frontiers of Computing Studies, Peking University + $^{2}$ Inst. for Artificial Intelligence, Peking University + $^{3}$ Microsoft Research Asia + $^{4}$ Nat'l Eng. Research Center of Visual Technology + +{maxiaoxuan, sujiajun, wtzhu, yizhou.wang}@pku.edu.cn, chnuwa@microsoft.com + +# Abstract + +Inspired by the success of volumetric 3D pose estimation, some recent human mesh estimators propose to estimate 3D skeletons as intermediate representations, from which, the dense 3D meshes are regressed by exploiting the mesh topology. However, body shape information is lost in extracting skeletons, leading to mediocre performance. The advanced motion capture systems solve the problem by placing dense physical markers on the body surface, which allows to extract realistic meshes from their non-rigid motions. However, they cannot be applied to wild images without markers. In this work, we present an intermediate representation, named virtual markers, which learns 64 landmark keypoints on the body surface based on the large-scale mocap data in a generative style, mimicking the effects of physical markers. The virtual markers can be accurately detected from wild images and can reconstruct the intact meshes with realistic shapes by simple interpolation. Our approach outperforms the state-of-the-art methods on three datasets. In particular, it surpasses the existing methods by a notable margin on the SURREAL dataset, which has diverse body shapes. Code is available at https://github.com/ShirleyMaxx/VirtualMarker. + +# 1. Introduction + +3D human mesh estimation aims to estimate the 3D positions of the mesh vertices that are on the body surface. The task has attracted a lot of attention from the computer vision and computer graphics communities [3, 10, 18, 24, 26, 29, 34, 36, 41, 49] because it can benefit many applications such as virtual reality [14]. Recently, the deep learning-based methods [7, 18, 28] have significantly + +![](images/9802a17301884443af2cf8a67de51599d2994b1a4602483b08ea496814f3ece9.jpg) +Figure 1. Mesh estimation results on four examples with different body shapes. Pose2Mesh [7] which uses 3D skeletons as the intermediate representation fails to predict accurate shapes. Our virtual marker-based method obtains accurate estimates. + +advanced the accuracy on the benchmark datasets. + +The pioneer methods [18,49] propose to regress the pose and shape parameters of the mesh models such as SMPL [35] directly from images. While straightforward, their accuracy is usually lower than the state-of-the-arts. The first reason is that the mapping from the image features to the model parameters is highly non-linear and suffers from image-model misalignment [28]. Besides, existing mesh datasets [15,27,37,52] are small and limited to simple labo + +ratory environments due to the complex capturing process. The lack of sufficient training data severely limits its performance. + +Recently, some works [25, 38] begin to formulate mesh estimation as a dense 3D keypoint detection task inspired by the success of volumetric pose estimation [42, 43, 45, 48, 57, 63]. For example, in [25, 38], the authors propose to regress the 3D positions of all vertices. However, it is computationally expensive because it has more than several thousand vertices. Moon and Lee [38] improve the efficiency by decomposing the 3D heatmaps into multiple 1D heatmaps at the cost of mediocre accuracy. Choi et al. [7] propose to first detect a sparser set of skeleton joints in the images, from which the dense 3D meshes are regressed by exploiting the mesh topology. The methods along this direction have attracted increasing attention [7, 28, 53] due to two reasons. First, the proxy task of 3D skeleton estimation can leverage the abundant 2D pose datasets which notably improves the accuracy. Second, mesh regression from the skeletons is efficient. However, important information about the body shapes is lost in extracting the 3D skeletons, which is largely overlooked previously. As a result, different types of body shapes, such as lean or obese, cannot be accurately estimated (see Figure 1). + +The professional marker-based motion capture (mocap) method MoSh [34] places physical markers on the body surface and explore their subtle non-rigid motions to extract meshes with accurate shapes. However, the physical markers limit the approach to be used in laboratory environments. We are inspired to think whether we can identify a set of landmarks on the mesh as virtual markers, e.g., elbow and wrist, that can be detected from wild images, and allow to recover accurate body shapes? The desired virtual markers should satisfy several requirements. First, the number of markers should be much smaller than that of the mesh vertices so that we can use volumetric representations to efficiently estimate their 3D positions. Second, the markers should capture the mesh topology so that the intact mesh can be accurately regressed from them. Third, the virtual markers have distinguishable visual patterns so that they can be detected from images. + +In this work, we present a learning algorithm based on archetypal analysis [12] to identify a subset of mesh vertices as the virtual markers that try to satisfy the above requirements to the best extent. Figure 2 shows that the learned virtual markers coarsely outline the body shape and pose which paves the way for estimating meshes with accurate shapes. Then we present a simple framework for 3D mesh estimation on top of the representation as shown in Figure 3. It first learns a 3D keypoint estimation network based on [45] to detect the 3D positions of the virtual markers. Then we recover the intact mesh simply by interpolating them. The interpolation weights are pre-trained in the representation + +learning step and will be adjusted by a light network based on the prediction confidences of the virtual markers for each image. + +We extensively evaluate our approach on three benchmark datasets. It consistently outperforms the state-of-the-art methods on all of them. In particular, it achieves a significant gain on the SURREAL dataset [51] which has a variety of body shapes. Our ablation study also validates the advantages of the virtual marker representation in terms of recovering accurate shapes. Finally, the method shows decent generalization ability and generates visually appealing results for the wild images. + +# 2. Related work + +# 2.1. Optimization-based mesh estimation + +Before deep learning dominates this field, 3D human mesh estimation [2, 27, 34, 40, 58] is mainly optimization-based, which optimizes the parameters of the human mesh models to match the observations. For example, Loper et al. [34] propose MoSh that optimizes the SMPL parameters to align the mesh with the 3D marker positions. It is usually used to get GT 3D meshes for benchmark datasets because of its high accuracy. Later works propose to optimize the model parameters or mesh vertices based on 2D image cues [2, 11, 27, 40, 58]. They extract intermediate representations such as 2D skeletons from the images and optimize the mesh model by minimizing the discrepancy between the model projection and the intermediate representations such as the 2D skeletons. These methods are usually sensitive to initialization and suffer from local optimum. + +# 2.2. Learning-based mesh estimation + +Recently, most works follow the learning-based framework and have achieved promising results. Deep networks [18, 24, 26, 36, 49] are used to regress the SMPL parameters from image features. However, learning the mapping from the image space to the parameter space is highly nonlinear [38]. In addition, they suffer from the misalignment between the meshes and image pixels [60]. These problems make it difficult to learn an accurate yet generalizable model. + +Some works propose to introduce proxy tasks to get intermediate representations first, hoping to alleviate the learning difficulty. In particular, intermediate representations of physical markers [59], IUV images [55,60-62], body part segmentation masks [23,27,39,50] and body skeletons [7,28,47,53] have been proposed. In particular, THUNDR [59] first estimates the 3D locations of physical markers from images and then reconstructs the mesh from the 3D markers. The physical markers can be interpreted as a simplified representation of body shape and pose. Although it is very accurate, it cannot be applied to wild images without markers. In contrast, body skeleton is a popular human representation + +![](images/e284d5f27134f54524d381aedddf79673639cc90ce42d653625aa64ad2afe0d4.jpg) +Figure 2. Left: The learned virtual markers (blue balls) in the back and front views. The grey balls mean they are invisible in the front view. The virtual markers act similarly to physical body markers and approximately outline the body shape. Right: Mesh estimation results by our approach, from left to right are input image, estimated 3D mesh overlayed on the image, and three different viewpoints showing the estimated 3D mesh with our intermediate predicted virtual markers (blue balls), respectively. + +![](images/f69b18821bf52c010f0513102762f9d306e115d3ac5d7d71172e968edab7b793.jpg) + +that can be robustly detected from wild images. Choi et al. [7] propose to first estimate the 3D skeletons, and then estimate the intact mesh from them. However, accurate body shapes are difficult to be recovered from the oversimplified 3D skeletons. + +Our work belongs to the learning-based class and is related to works that use physical markers or skeletons as intermediate representations. But different from them, we propose a novel intermediate representation, named virtual markers, which is more expressive to reduce the ambiguity in pose and shape estimation than body skeletons and can be applied to wild images. + +# 3. Method + +In this section, we describe the details of our approach. First, Section 3.1 introduces how we learn the virtual marker representation from mocap data. Then we present the overall framework for mesh estimation from an image in Section 3.2. At last, Section 3.3 discusses the loss functions and training details. + +# 3.1. The virtual marker representation + +We represent a mesh by a vector of vertex positions $\mathbf{x} \in \mathbb{R}^{3M}$ where $M$ is the number of mesh vertices. Denote a mocap dataset such as [15] with $N$ meshes as $\widehat{\mathbf{X}} = [\mathbf{x}_1, \dots, \mathbf{x}_N] \in \mathbb{R}^{3M \times N}$ . To unveil the latent structure among vertices, we reshape it to $\mathbf{X} \in \mathbb{R}^{3N \times M}$ with each column $\mathbf{x}_i \in \mathbb{R}^{3N}$ representing all possible positions of the $i^{\text{th}}$ vertex in the dataset [15]. + +The rank of $\mathbf{X}$ is smaller than $M$ because the mesh representation is smooth and redundant where some vertices can be accurately reconstructed by the others. While it seems natural to apply PCA [17] to $\mathbf{X}$ to compute the eigenvectors as virtual markers for reconstructing others, there is no guarantee that the virtual markers correspond to the mesh vertices, making them difficult to be detected from images. Instead, we aim to learn $K$ virtual markers $\mathbf{Z} = [\mathbf{z}_1,\dots,\mathbf{z}_K]\in \mathbb{R}^{3N\times K}$ that try to satisfy the follow + +
TypeFormulaReconst. Error (mm) ↓
Original||X - XBA||2F11.67
Symmetric||X - XBsymAsym||2F10.98
+ +Table 1. The reconstruction errors using the original and the symmetric sets of markers on the H3.6M dataset [15], respectively. The errors are small indicating that they are sufficiently expressive and can reconstruct all vertices accurately. + +ing two requirements to the greatest extent. First, they can accurately reconstruct the intact mesh $\mathbf{X}$ by their linear combinations: $\mathbf{X} = \mathbf{Z}\mathbf{A}$ , where $\mathbf{A} \in \mathbb{R}^{K \times M}$ is a coefficient matrix that encodes the spatial relationship between the virtual markers and the mesh vertices. Second, they should have distinguishable visual patterns in images so that they can be easily detected from images. Ideally, they can be on the body surface as the meshes. + +We apply archetypal analysis [4, 12] to learn $\mathbf{Z}$ by minimizing a reconstruction error with two additional constraints: (1) each vertex $\mathbf{x}_i$ can be reconstructed by convex combinations of $\mathbf{Z}$ , and (2) each marker $\mathbf{z}_i$ should be convex combinations of the mesh vertices $\mathbf{X}$ : + +$$ +\min _ {\substack {\boldsymbol {\alpha} _ {i} \in \Delta_ {K} \text {for} 1 \leq i \leq M, \\ \boldsymbol {\beta} _ {j} \in \Delta_ {M} \text {for} 1 \leq j \leq K}} \| \mathbf {X} - \mathbf {X B A} \| _ {F} ^ {2}, \tag{1} +$$ + +where $\mathbf{A} = [\pmb{\alpha}_1, \dots, \pmb{\alpha}_M] \in \mathbb{R}^{K \times M}$ , each $\pmb{\alpha}$ resides in the simplex $\Delta_K \triangleq \{\pmb{\alpha} \in \mathbb{R}^K \text{ s.t. } \pmb{\alpha} \succeq 0 \text{ and } ||\pmb{\alpha}||_1 = 1\}$ , and $\mathbf{B} = [\beta_1, \dots, \beta_K] \in \mathbb{R}^{M \times K}$ , $\beta_j \in \Delta_M$ . We adopt Active-set algorithm [4] to solve objective (1) and obtain the learned virtual markers $\mathbf{Z} = \mathbf{X}\mathbf{B} \in \mathbb{R}^{3N \times K}$ . As shown in [4, 12], the two constraints encourage the virtual markers $\mathbf{Z}$ to unveil the latent structure among vertices, therefore they learn to be close to the extreme points of the mesh and located on the body surface as much as possible. + +Post-processing. Since human body is left-right symmetric, we adjust $\mathbf{Z}$ to reflect the property. We first replace each $\mathbf{z}_i\in$ $\mathbf{Z}$ by its nearest vertex on the mesh and obtain $\widetilde{\mathbf{Z}}\in \mathbb{R}^{3\times K}$ . This step allows us to compute the left or right counterpart + +![](images/733021ed3ea7358bb09d5c51c395e53373c9f6e4de354a829e9c325bc3dccae1.jpg) +Figure 3. Overview of our framework. Given an input image $\mathbf{I}$ , it first estimates the 3D positions $\hat{\mathbf{P}}$ of the virtual markers. Then we update the coefficient matrix $\hat{\mathbf{A}}$ based on the estimation confidence scores $\mathbf{C}$ of the virtual markers. Finally, the complete human mesh can be simply recovered by linear multiplication $\hat{\mathbf{M}} = \hat{\mathbf{P}}\hat{\mathbf{A}}$ . + +of each marker. Then we replace the markers in the right body with the symmetric vertices in the left body and obtain the symmetric markers $\widetilde{\mathbf{Z}}^{sym} \in \mathbb{R}^{3 \times K}$ . Finally we update $\mathbf{B}$ and $\mathbf{A}$ by minimizing $||\mathbf{X} - \mathbf{X}\widetilde{\mathbf{B}}^{sym}\widetilde{\mathbf{A}}^{sym}||_F^2$ subject to $\widetilde{\mathbf{Z}}^{sym} = \mathbf{X}\widetilde{\mathbf{B}}^{sym}$ . More details are elaborated in the supplementary. + +Figure 2 shows the virtual markers learned on the mocap dataset [15] after post-processing. They are similar to the physical markers and approximately outline the body shape which agrees with our expectations. They are roughly evenly distributed on the surface of the body, and some of them are located close to the body keypoints, which have distinguishable visual patterns to be accurately detected. Table 1 shows the reconstruction errors of using original markers $\mathbf{XB}$ and the symmetric markers $\widetilde{\mathbf{XB}}^{sym}$ . Both can reconstruct meshes accurately. + +# 3.2. Mesh estimation framework + +On top of the virtual markers, we present a simple yet effective framework for end-to-end 3D human mesh estimation from a single image. As shown in Figure 3, it consists of two branches. The first branch uses a volumetric CNN [45] to estimate the 3D positions $\hat{\mathbf{P}}$ of the markers, and the second branch reconstructs the full mesh $\hat{\mathbf{M}}$ by predicting a coefficient matrix $\hat{\mathbf{A}}$ : + +$$ +\hat {\mathbf {M}} = \hat {\mathbf {P}} \hat {\mathbf {A}}. \tag {2} +$$ + +We will describe the two branches in more detail. + +3D marker estimation. We train a neural network to estimate a 3D heatmap $\hat{\mathbf{H}} = [\hat{\mathbf{H}}_1, \dots, \hat{\mathbf{H}}_K] \in \mathbb{R}^{K \times D \times H \times W}$ from an image. The heatmap encodes per-voxel likelihood of each marker. There are $D \times H \times W$ voxels in total which are used to discretize the 3D space. The 3D position $\hat{\mathbf{P}}_z \in \mathbb{R}^3$ of each marker is computed as the center of mass of the corresponding heatmap $\hat{\mathbf{H}}_z$ [45] as follows: + +$$ +\hat {\mathbf {P}} _ {z} = \sum_ {d = 1} ^ {D} \sum_ {h = 1} ^ {H} \sum_ {w = 1} ^ {W} (d, h, w) \cdot \hat {\mathbf {H}} _ {z} (d, h, w). \tag {3} +$$ + +The positions of all markers are represented as $\hat{\mathbf{P}} = [\hat{\mathbf{P}}_1, \hat{\mathbf{P}}_2, \dots, \hat{\mathbf{P}}_K]$ . + +Interpolation. Ideally, if we have accurate estimates for all virtual markers $\hat{\mathbf{P}}$ , then we can recover the complete mesh by simply multiplying $\hat{\mathbf{P}}$ with a fixed coefficient matrix $\tilde{\mathbf{A}}^{sym}$ with sufficient accuracy as validated in Table 1. However, in practice, some markers may have large estimation errors because they may be occluded in the monocular setting. Note that this happens frequently. For example, the markers in the back will be occluded when a person is facing the camera. As a result, inaccurate markers positions may bring large errors to the final mesh if we directly multiply them with the fixed matrix $\tilde{\mathbf{A}}^{sym}$ . + +Our solution is to rely more on those accurately detected markers. To that end, we propose to update the coefficient matrix based on the estimation confidence scores of the markers. In practice, we simply take the heatmap score at the estimated positions of each marker, i.e. $\hat{\mathbf{H}}_z(\hat{\mathbf{P}}_z)$ , and feed them to a single fully-connected layer to obtain the coefficient matrix $\hat{\mathbf{A}}$ . Then the mesh is reconstructed by $\hat{\mathbf{M}} = \hat{\mathbf{P}}\hat{\mathbf{A}}$ . + +# 3.3. Training + +We train the whole network end-to-end in a supervised way. The overall loss function is defined as: + +$$ +\mathcal {L} = \lambda_ {v m} \mathcal {L} _ {v m} + \lambda_ {c} \mathcal {L} _ {\text {c o n f}} + \lambda_ {m} \mathcal {L} _ {\text {m e s h}}. \tag {4} +$$ + +Virtual marker loss. We define $\mathcal{L}_{vm}$ as the $L_{1}$ distance between the predicted 3D virtual markers $\hat{\mathbf{P}}$ and the GT $\hat{\mathbf{P}}^{*}$ as follows: + +$$ +\mathcal {L} _ {v m} = \left\| \hat {\mathbf {P}} - \hat {\mathbf {P}} ^ {*} \right\| _ {1}. \tag {5} +$$ + +Note that it is easy to get GT markers $\hat{\mathbf{P}}^*$ from GT meshes as stated in Section 3.1 without additional manual annotations. + +Confidence loss. We also require that the 3D heatmaps have reasonable shapes, therefore, the heatmap score at the + +voxel containing the GT marker position $\hat{\mathbf{P}}_z^*$ should have the maximum value as in the previous work [16]: + +$$ +\mathcal {L} _ {\text {c o n f}} = - \sum_ {z = 1} ^ {K} \log \left(\hat {\mathbf {H}} _ {z} \left(\hat {\mathbf {P}} _ {z} ^ {*}\right)\right). \tag {6} +$$ + +Mesh loss. Following [38], we define $\mathcal{L}_{mesh}$ as a weighted sum of four losses: + +$$ +\mathcal {L} _ {\text {m e s h}} = \mathcal {L} _ {\text {v e r t e x}} + \mathcal {L} _ {\text {p o s e}} + \mathcal {L} _ {\text {n o r m a l}} + \lambda_ {e} \mathcal {L} _ {\text {e d g e}}. \tag {7} +$$ + +- Vertex coordinate loss. We adopt $L_{1}$ loss between predicted 3D mesh coordinates $\hat{\mathbf{M}}$ with GT mesh $\hat{\mathbf{M}}^{*}$ as: + +$$ +\mathcal {L} _ {\text {v e r t e x}} = \left\| \hat {\mathbf {M}} - \hat {\mathbf {M}} ^ {*} \right\| _ {1}. \tag {8} +$$ + +- Pose loss. We use $L_{1}$ loss between the 3D landmark joints regressed from mesh $\hat{\mathbf{M}}\mathcal{I}$ and the GT joints $\hat{\mathbf{J}}^{*}$ as: + +$$ +\mathcal {L} _ {\text {p o s e}} = \left\| \hat {\mathbf {M}} \mathcal {J} - \hat {\mathbf {J}} ^ {*} \right\| _ {1}, \tag {9} +$$ + +where $\mathcal{J} \in \mathbb{R}^{M \times J}$ is a pre-defined joint regression matrix in SMPL model [2]. + +- Surface losses. To improve surface smoothness [54], we supervise the normal vector of a triangle face with GT normal vectors by $\mathcal{L}_{\text{normal}}$ and the edge length of the predicted mesh with GT length by $\mathcal{L}_{\text{edge}}$ : + +$$ +\begin{array}{l} \mathcal {L} _ {n o r m a l} = \sum_ {f} \sum_ {\{i, j \} \subset f} \left| \left\langle \frac {\hat {\mathbf {M}} _ {i} - \hat {\mathbf {M}} _ {j}}{\| \hat {\mathbf {M}} _ {i} - \hat {\mathbf {M}} _ {j} \| _ {2}}, \hat {\mathbf {n}} _ {f} ^ {*} \right\rangle \right|, \\ \mathcal {L} _ {\text {e d g e}} = \sum_ {f} \sum_ {\{i, j \} \subset f} \left| \| \hat {\mathbf {M}} _ {i} - \hat {\mathbf {M}} _ {j} \| _ {2} - \| \hat {\mathbf {M}} _ {i} ^ {*} - \hat {\mathbf {M}} _ {j} ^ {*} \| _ {2} \right|. \tag {10} \\ \end{array} +$$ + +where $f$ and $\hat{\mathbf{n}}_f^*$ denote a triangle face in the mesh and its GT unit normal vector, respectively. $\hat{\mathbf{M}}_i$ denote the $i^{th}$ vertex of $\hat{\mathbf{M}}$ . * denotes GT. + +# 4. Experiments + +# 4.1. Datasets and metrics + +H3.6M [15]. We use (S1, S5, S6, S7, S8) for training and (S9, S11) for testing. As in [7, 18, 31, 32], we report MPJPE and PA-MPJPE for poses that are derived from the estimated meshes. We also report Mean Per Vertex Error (MPVE) for the whole mesh. + +3DPW [52] is collected in natural scenes. Following the previous works [23, 31, 32, 59], we use the train set of 3DPW to learn the model and evaluate on the test set. The same evaluation metrics as H3.6M are used. + +SURREAL [51] is a large-scale synthetic dataset with GT SMPL annotations and has diverse samples in terms of body shapes, backgrounds, etc. We use its training set to train a model and evaluate the test split following [7]. + +# 4.2. Implementation Details + +We learn 64 virtual markers on the H3.6M [15] training set. We use the same set of markers for all datasets instead of learning a separate set on each dataset. Following [7, 18, 22, 25, 31, 32, 38, 59], we conduct mix-training by using MPI-INF-3DHP [37], UP-3D [27], and COCO [33] training set for experiments on the H3.6M and 3DPW datasets. We adapt a 3D pose estimator [45] with HRNet-W48 [44] as the image feature backbone for estimating the 3D virtual markers. We set the number of voxels in each dimension to be 64, i.e. $D = H = W = 64$ for 3D heatmaps. Following [18, 25, 38], we crop every single human region from the input image and resize it to $256 \times 256$ . We use Adam [21] optimizer to train the whole framework for 40 epochs with a batch size of 32. The learning rates for the two branches are set to $5 \times 10^{-4}$ and $1 \times 10^{-3}$ , respectively, which are decreased by half after the $30^{th}$ epoch. Please refer to the supplementary for more details. + +# 4.3. Comparison to the State-of-the-arts + +Results on H3.6M. Table 2 compares our approach to the state-of-the-art methods on the H3.6M dataset. Our method achieves competitive or superior performance. In particular, it outperforms the methods that use skeletons (Pose2Mesh [7], DSD-SATN [47]), body markers (THUNDR) [59], or IUV image [60, 62] as proxy representations, demonstrating the effectiveness of the virtual marker representation. + +Results on 3DPW. We compare our method to the state-of-the-art methods on the 3DPW dataset in Table 2. Our approach achieves state-of-the-art results among all the methods, validating the advantages of the virtual marker representation over the skeleton representation used in Pose2Mesh [7], DSD-SATN [47], and other representations like IUV image used in PyMAF [62]. In particular, our approach outperforms I2L-MeshNet [38], METRO [31], and Mesh Graphormer [32] by a notable margin, which suggests that virtual markers are more suitable and effective representations than detecting all vertices directly as most of them are not discriminative enough to be accurately detected. + +Results on SURREAL. This dataset has more diverse samples in terms of body shapes. The results are shown in Table 3. Our approach outperforms the state-of-the-art methods by a notable margin, especially in terms of MPVE. Figure 1 shows some challenging cases without cherry-picking. The skeleton representation loses the body shape information so the method [7] can only recover mean shapes. In contrast, our approach generates much more accurate mesh estimation results. + +
MethodIntermediate RepresentationH3.6M3DPW
MPVE↓MPJPE↓PA-MPJPE↓MPVE↓MPJPE↓PA-MPJPE↓
† Arnab et al. [1] CVPR'192D skeleton-77.854.3--72.2
† HMMR [19] CVPR'19---56.9139.3116.572.6
† DSD-SATN [47] ICCV'193D skeleton-59.142.4--69.5
† VIBE [22] CVPR'20--65.941.599.182.951.9
† TCMR [6] CVPR'21--62.341.1102.986.552.7
† MAED [53] ICCV'213D skeleton-56.338.792.679.145.7
SMPLify [2] ECCV'162D skeleton--82.3---
HMR [18] CVPR'18-96.188.056.8152.7130.081.3
GraphCMR [25] CVPR'193D vertices--50.1--70.2
SPIN [24] ICCV'19---41.1116.496.959.2
DenseRac [55] ICCV'19IUV image-76.848.0---
DecoMR [60] CVPR'20IUV image-60.639.3---
ExPose [9] ECCV'20-----93.460.7
Pose2Mesh [7] ECCV'203D skeleton85.364.946.3106.388.958.3
I2L-MeshNet [38] ECCV'203D vertices65.155.741.1110.193.257.7
PC-HMR [36] AAAI'213D skeleton---108.687.866.9
HybrIK [28] CVPR'213D skeleton65.754.434.586.574.145.0
METRO [31] CVPR'213D vertices-54.036.788.277.147.9
ROMP [46] ICCV'21----108.391.354.9
Mesh Graphormer [32] ICCV'213D vertices-51.234.587.774.745.6
PARE [23] ICCV'21Segmentation---88.674.546.5
THUNDR [59] ICCV'213D markers-55.039.888.074.851.5
PyMaf [62] ICCV'21IUV image-57.740.5110.192.858.9
ProHMR [26] ICCV'21---41.2--59.8
OCHMR [20] CVPR'222D heatmap---107.189.758.3
3DCrowdNet [8] CVPR'223D skeleton---98.381.751.5
CLIFF [30] ECCV'22--47.132.781.269.043.0
FastMETRO [5] ECCV'223D vertices-52.233.784.173.544.6
VisDB [56] ECCV'223D vertices-51.034.585.573.544.9
OursVirtual marker58.047.332.077.967.541.3
+ +Table 2. Comparison to the state-of-the-arts on H3.6M [15] and 3DPW [52] datasets. $\dagger$ means using temporal cues. The methods are not strictly comparable because they may have different backbones and training datasets. We provide the numbers only to show proof-of-concept results. + +
MethodIntermediate RepresentationMPVE↓MPJPE↓PA-MPJPE↓
HMR [18] CVPR'18-85.173.655.4
BodyNet [50] ECCV'18Skel. + Seg.65.8--
GraphCMR [25] CVPR'193D vertices103.287.463.2
SPIN [24] ICCV'19-82.366.743.7
DecoMR [60] CVPR'20IUV image68.952.043.0
Pose2Mesh [7] ECCV'203D skeleton68.856.639.6
PC-HMR [36] AAAI'213D skeleton59.851.737.9
* DynaBOA [13] TPAMI'22-70.755.234.0
OursVirtual marker44.736.928.9
+ +# 4.4. Ablation study + +Virtual marker representation. We compare our method to two baselines in Table 4. First, in baseline (a), we replace the virtual markers of our method with the skeleton representation. The rest are kept the same as ours (c). Our + +Table 3. Comparison to the state-of-the-arts on SURREAL [51] dataset. * means training on the test split with 2D supervisions. "Skel. + Seg." means using skeleton and segmentation together. + +
No.Intermediate RepresentationMPVE↓
H3.6MSURREAL
(a)Skeleton64.453.6
(b)Rand virtual marker63.050.1
(c)Virtual marker58.044.7
+ +Table 4. Ablation study of the virtual marker representation for our approach on H3.6M and SURREAL datasets. "Skeleton" means the sparse landmark joint representation is used. "Rand virtual marker" means the virtual markers are randomly selected from all the vertices without learning. (c) is our method, where the learned virtual markers are used. + +method achieves a much lower MPVE than the baseline (a), demonstrating that the virtual markers help to estimate body shapes more accurately than the skeletons. In baseline (b), we randomly sample 64 from the 6890 mesh vertices as virtual markers. We repeat the experiment five times and report the average number. We can see that the result is worse than ours, which is because the randomly selected vertices may not be expressive to reconstruct the other vertices or can not + +![](images/d75d2439e70f101e0f0597491573606a5244e0ef6442b20606bf112db785d5f7.jpg) + +![](images/bf585a9744263c3b1f8ef02f1b7b9305bd4896c825be18a1c6af970551c93ab4.jpg) +Image + +![](images/19daadfeebe7f76537c8a3a3c8981aa125ceb53693b9da1fd227c199c98eff81.jpg) + +![](images/1a19c634fcd5bca6dec6a8569b592597c50cd1a6d2d511187f43e8d79d844853.jpg) +Pose2Mesh + +![](images/dee4dae334b9c78aa64931c0fc114c493247ad4ebeb923c379d47b11a00fa1d7.jpg) + +![](images/a2e01121534df4ea145f12232adcfe1c366ed57133f647ac77f9b15ecea858f1.jpg) + +![](images/514d2b4ec42e40e039e3a34b8bfde4f4a522a242b99341d2e424280812502d09.jpg) + +![](images/32d455f6b7f7ced9a2dabed08523e3dba17fc81385ca7145ba610be177ed1d76.jpg) +Ours + +![](images/740c31dda70dd2dfc4f69814bcdf139af247eef60dcd3014a76169cc041e0de6.jpg) + +![](images/9228de83b9f0fa68f5c48466135bfaa09c81e68d3d48d03ad9e480eb2a5b2417.jpg) + +![](images/48a9c41dc89b424c693fad759f6285598f6ace7d6fbd08d7deeb2ee89383e1bf.jpg) + +![](images/8281acdc46bbc40959de252d6c15a2ecfc0325139a723c9c13a160d5bf5716fa.jpg) +GT + +![](images/0d1fcf6f789f2e3f61fa2d4631812b595c7c583ebc42d6c5c0d38c6f884f0451.jpg) +Figure 4. Mesh estimation results of different methods on H3.6M test set. Our method with virtual marker representation gets better shape estimation results than Pose2Mesh which uses skeleton representation. Note the waistline of the body and the thickness of the arm. + +![](images/fba7ae4d1224de8fd8e9c29591ea274341b5f73ae951ad2b036a0bc75ac30c1f.jpg) +Figure 5. Visualization of the learned virtual markers of different numbers of $K = 16, 32, 96$ , from left to right, respectively. + +![](images/24d97febde435c7046067f49afbce89a10d520d7f36e8496eb2346e7e89ca6f5.jpg) +Figure 6. Mesh estimation comparison results when using (a) fixed coefficient matrix $\tilde{\mathbf{A}}^{sym}$ , and (b) updated $\hat{\mathbf{A}}$ . Please zoom in to better see the details. + +be accurately detected from images as they lack distinguishable visual patterns. The results validate the effectiveness of our learning strategy. + +Figure 1 shows some qualitative results on the SURREAL test set. The meshes estimated by the baseline which uses skeleton representation, i.e. Pose2Mesh [7], have inaccurate body shapes. This is reasonable because the skeleton is oversimplified and has very limited capability to recover shapes. Instead, it implicitly learns a mean shape for the whole training dataset. In contrast, the mesh estimated by using virtual markers has much better quality due to its strong representation power and therefore can handle different body shapes elegantly. Figure 4 also shows some qualitative results on the H3.6M test set. For clarity, we draw the intermediate representation (blue balls) in it as well. + +Number of virtual markers. We evaluate how the number of virtual markers affects estimation quality on H3.6M [15] dataset. Figure 5 visualizes the learned virtual markers, which are all located on the body surface and close to the extreme points of the mesh. This is expected as mentioned in Section 3.1. Table 5 (GT) shows the mesh reconstruction results when we have GT 3D positions of the virtual markers in objective (1). When we increase the number of virtual markers, both mesh reconstruction error (MPVE) and the regressed landmark joint error (MPJPE) steadily decrease. This is expected because using more virtual markers improves the representation power. However, using more + +
KGTDet
MPVE↓MPJPE↓MPVE↓MPJPE↓
1646.839.858.747.8
3220.114.258.248.3
6411.07.558.047.3
969.95.659.648.2
+ +Table 5. Ablation study of the different number of virtual markers $(K)$ on H3.6M [15] dataset. (GT) Mesh reconstruction results when GT 3D positions of the virtual markers are used in objective (1). (Det) Mesh estimation results obtained by our proposed framework when we use different numbers of virtual markers $(K)$ . + +![](images/8b6485047caa0e196514516f49ae19921554e134ca1a6a5c0473633923679ae5.jpg) +Input Image + +![](images/ef7044907723ac23cdf036a13bc811d5915811b698b4fe28fceb6378677a97f8.jpg) +(a) Using fixed coefficient matrix + +![](images/2103297200699a6c36f2488db301fee592207ba6bd91cd061bd3179919384d4a.jpg) +(b) Using updated coefficient matrix + +virtual markers cannot guarantee smaller estimation errors when we need to estimate the virtual marker positions from images as in our method. This is because the additional virtual markers may have large estimation errors which affect the mesh estimation result. The results are shown in Table 5 (Det). Increasing the number of virtual markers $K$ steadily reduces the MPVE errors when $K$ is smaller than 96. However, if we keep increasing $K$ , the error begins to increase. This is mainly because some of the newly introduced virtual markers are difficult to detect from images and therefore bring errors to mesh estimation. + +Coefficient matrix. We compare our method to a baseline which uses the fixed coefficient matrix $\widetilde{\mathbf{A}}^{sym}$ . We show the quality comparison in Figure 6. We can see that the estimated mesh by a fixed coefficient matrix (a) has mostly correct pose and shape but there are also some artifacts on the mesh while using the updated coefficient matrix (b) can get better mesh estimation results. As shown in Table 6, using a fixed coefficient matrix gets larger MPVE and MPJPE errors than using the updated coefficient matrix. This is caused by the estimation errors of virtual markers when occlusion happens, which is inevitable since the virtual markers on the back will be self-occluded by the front body. As a result, inaccurate marker positions would bring large errors to the final mesh estimates if we directly use the fixed matrix. + +# 4.5. Qualitative Results + +Figure 7 (top) presents some meshes estimated by our approach on natural images from the 3DPW test set. The + +![](images/1466ae8ff36d2cb4d89cf6d32c584932755aa511355777bf8a4c9ac1680f2ec3.jpg) + +![](images/c09ecdf5e66aeac82f858beefb5178321d062416a75d2827b29e4e993cebabbd.jpg) + +![](images/b347b4823adb72f1beb89e53b693ee0f13a218c96f5a781942ec2f82522704de.jpg) + +![](images/671c4765f76e5082ec0a322174d690ced937e8c119ae69e2ba70eeb4e1af914a.jpg) + +![](images/91221dcf2a8def63b76923650cc63a8b12304b472cd57d79ba610a8f2dfea940.jpg) + +![](images/46828dff4792f15e0d3078870b51e27a8d808a785bca38b4597abc521593d694.jpg) + +![](images/0c363e8fa3f8e66654e0a9cca2f9b642d653bd3bbb3da1fa6ef432fad29dd5e7.jpg) + +![](images/53bf9295864d34d05d4c8e0ecae174a83cede494dba0449491e2723a0e105885.jpg) + +![](images/7d9b766b435982a945e5a0a41cb93a99292b6733c7a9709752a1cdd802da3505.jpg) + +![](images/14c41b8498a35d95bb93c324efee4de6fa93d65e487b12f84b40d8e84a0eb762.jpg) +Figure 7. Top: Meshes estimated by our approach on images from 3DPW test set. The rightmost case in the dashed box shows a typical failure. Bottom: Meshes estimated by our approach on Internet images with challenging cases (extreme shapes or in a long dress). + +![](images/dd7af12bc3988e0f0949f3dbd6a4a20820bb0cdabd3fc01e999491084d64a442.jpg) + +![](images/3e480c67116134bf80778ea845b63acd5634c368cddaab6692f4f964b863dbca.jpg) + +![](images/d94ebada5ba03a75357488123772aab188e2b33f39c5e5ace79f1a7506558026.jpg) + +![](images/9e3043cdf52b4064082f78724e98235bea1d6dc37d8371b1b1d738120157d0aa.jpg) + +![](images/eeb1139646c7446dfe1947b26b0799e68b409a8a66aa334b64ae2c55eb1e3cba.jpg) + +![](images/f404861c5961e317b732009e6f4db114f4125dde743a31ffccbdecbbfb0507a2.jpg) + +![](images/8184ce0f5f56930eb841d6ef7ab4c10c460ee662953ed1b388455cf4ef0b2132.jpg) + +![](images/c554d8af59e261ebd3a6ab99776dc49058d643a3dd257d189b66367e7ba57236.jpg) + +![](images/d52d300af7ba7d41ce034ecd3811ccd8d0c76d3f0ab129dd6fec70a9be375d7e.jpg) + +
No.MethodFixed AsymUpdated AMPVE↓MPJPE↓
(a)Ours (fixed)64.751.6
(b)Ours58.047.3
+ +Table 6. Ablation study of the coefficient matrix for our approach on H3.6M dataset. "fixed" means using the fixed coefficient matrix $\tilde{\mathbf{A}}^{sym}$ to reconstruct the mesh. + +rightmost case shows a typical failure where our method has a wrong pose estimate of the left leg due to heavy occlusion. We can see that the failure is constrained to the local region and the rest of the body still gets accurate estimates. We further analyze how inaccurate virtual markers would affect the mesh estimation, i.e. when part of human body is occluded or truncated. According to the finally learned coefficient matrix $\hat{\mathbf{A}}$ of our model, we highlight the relationship weights among virtual markers and all vertices in Figure 8. We can see that our model actually learns local and sparse dependency between each vertex and the virtual markers, e.g. for each vertex, the virtual markers that contribute the most are in a near range as shown in Figure 8 (b). Therefore, in inference, if a virtual marker has inaccurate position estimation due to occlusion or truncation, the dependent vertices may have inaccurate estimates, while the rest will be barely affected. Figure 2 (right) shows more examples where occlusion or truncation occurs, and our method can still get accurate or reasonable estimates robustly. Note that when truncation occurs, our method still guesses the positions of the truncated virtual markers. + +Figure 7 (bottom) shows our estimated meshes on challenging cases, which indicates the strong generalization ability of our model on diverse postures and actions in natural scenes. Please refer to the supplementary for more quality results. Note that since the datasets do not provide supervision of head orientation, face expression, hands, or feet, the estimates of these parts are just in canonical poses inevitably. + +![](images/f77b347f26e1845c88e5275ea6153d190ca64e6431719b2841008da21507bd7f.jpg) +Figure 8. (a) For each virtual marker (represented by a star), we highlight the top 30 most affected vertices (represented by a colored dot) based on average coefficient matrix $\hat{\mathbf{A}}$ . (b) For each vertex (dot), we highlight the top 3 virtual markers (star) that contribute the most. We can see that the dependency has a strong locality which improves the robustness when some virtual markers cannot be accurately detected. + +![](images/d9dfe504d7a86215e536183b1b742b4dce25ee797a320c746254582265d0cfa5.jpg) + +Apart from that, most errors are due to inaccurate 3D virtual marker estimation which may be addressed using more powerful estimators or more diverse training datasets in the future. + +# 5. Conclusion + +In this paper, we present a novel intermediate representation Virtual Marker, which is more expressive than the prevailing skeleton representation and more accessible than physical markers. It can reconstruct 3D meshes more accurately and efficiently, especially in handling diverse body shapes. Besides, the coefficient matrix in the virtual marker representation encodes spatial relationships among mesh vertices which allows the method to implicitly explore structure priors of human body. It achieves better mesh estimation results than the state-of-the-art methods and shows advanced generalization potential in spite of its simplicity. + +# Acknowledgement + +This work was supported by MOST-2022ZD0114900 and NSFC-62061136001. + +# References + +[1] Anurag Arnab, Carl Doersch, and Andrew Zisserman. Exploiting temporal context for 3d human pose estimation in the wild. In CVPR, pages 3395-3404, 2019. +[2] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J Black. Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In ECCV, pages 561-578, 2016. +[3] Ronan Boulic, Pascal Becheiraz, Luc Emerging, and Daniel Thalmann. Integration of motion control techniques for virtual human and avatar real-time animation. In Proceedings of the ACM symposium on Virtual reality software and technology, pages 111-118, 1997. +[4] Yuansi Chen, Julien Mairal, and Zaid Harchaoui. Fast and robust archetypal analysis for representation learning. In CVPR, pages 1478-1485, 2014. +[5] Junhyeong Cho, Kim Youwang, and Tae-Hyun Oh. Cross-attention of disentangled modalities for 3d human mesh recovery with transformers. In ECCV, 2022. +[6] Hongsuk Choi, Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. Beyond static features for temporally consistent 3d human pose and shape from a video. In CVPR, pages 1964-1973, 2021. +[7] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2mesh: Graph convolutional network for 3d human pose and mesh recovery from a 2d human pose. In ECCV, pages 769-787, 2020. +[8] Hongsuk Choi, Gyeongsik Moon, JoonKyu Park, and Kyoung Mu Lee. Learning to estimate robust 3d human mesh from in-the-wild crowded scenes. In CVPR, pages 1475-1484, June 2022. +[9] Vasileios Choutas, Georgios Pavlakos, Timo Bolkart, Dimitrios Tzionas, and Michael J Black. Monocular expressive body regression through body-driven attention. In ECCV, pages 20-40, 2020. +[10] Hai Ci, Mingdong Wu, Wentao Zhu, Xiaoxuan Ma, Hao Dong, Fangwei Zhong, and Yizhou Wang. Gfpose: Learning 3d human pose prior with gradient fields. arXiv preprint arXiv:2212.08641, 2022. +[11] Enric Corona, Gerard Pons-Moll, Guillem Alenyà, and Francesc Moreno-Noguer. Learned vertex descent: a new direction for 3d human model fitting. In ECCV, pages 146-165. Springer, 2022. +[12] Adele Cutler and Leo Breiman. Archetypal analysis. Technometrics, 36(4):338-347, 1994. +[13] Shanyan Guan, Jingwei Xu, Michelle Z He, Yunbo Wang, Bingbing Ni, and Xiaokang Yang. Out-of-domain human mesh reconstruction via dynamic bilevel online adaptation. IEEE TPAMI, 2022. +[14] Yinghao Huang, Federica Bogo, Christoph Lassner, Angjoo Kanazawa, Peter V Gehler, Javier Romero, Ijaz Akhter, and Michael J Black. Towards accurate marker-less human shape and pose estimation over time. In 3DV, pages 421-430, 2017. +[15] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE TPAMI, 36(7):1325-1339, 2013. + +[16] Karim Iskakov, Egor Burkov, Victor Lempitsky, and Yury Malkov. Learnable triangulation of human pose. In ICCV, pages 7718-7727, 2019. +[17] Ian T Jolliffe. Principal components in regression analysis. In Principal component analysis, pages 129-155. Springer, 1986. +[18] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In CVPR, pages 7122-7131, 2018. +[19] Angjoo Kanazawa, Jason Y Zhang, Panna Felsen, and Jitendra Malik. Learning 3d human dynamics from video. In CVPR, pages 5614-5623, 2019. +[20] Rawal Khirodkar, Shashank Tripathi, and Kris Kitani. Occluded human mesh recovery. In CVPR, pages 1715-1725, June 2022. +[21] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. +[22] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In CVPR, pages 5253-5263, 2020. +[23] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. Pare: Part attention regressor for 3d human body estimation. In ICCV, pages 11127-11137, October 2021. +[24] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3d human pose and shape via model-fitting in the loop. In ICCV, pages 2252-2261, 2019. +[25] Nikos Kolotouros, Georgios Pavlakos, and Kostas Daniilidis. Convolutional mesh regression for single-image human shape reconstruction. In CVPR, pages 4501-4510, 2019. +[26] Nikos Kolotouros, Georgios Pavlakos, Dinesh Jayaraman, and Kostas Daniilidis. Probabilistic modeling for human mesh recovery. In ICCV, pages 11605-11614, October 2021. +[27] Christoph Lassner, Javier Romero, Martin Kiefel, Federica Bogo, Michael J Black, and Peter V Gehler. Unite the people: Closing the loop between 3d and 2d human representations. In CVPR, pages 6050-6059, 2017. +[28] Jiefeng Li, Chao Xu, Zhicun Chen, Siyuan Bian, Lixin Yang, and Cewu Lu. Hybrik: A hybrid analytical-neural inverse kinematics solution for 3d human pose and shape estimation. In CVPR, pages 3383-3393, 2021. +[29] Yong-Lu Li, Liang Xu, Xinpeng Liu, Xijie Huang, Yue Xu, Shiyi Wang, Hao-Shu Fang, Ze Ma, Mingyang Chen, and Cewu Lu. Pastanet: Toward human activity knowledge engine. In CVPR, pages 382-391, 2020. +[30] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. Cliff: Carrying location information in full frames into human pose and shape estimation. In ECCV, 2022. +[31] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In CVPR, pages 1954-1963, 2021. +[32] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In ICCV, pages 12939-12948, 2021. +[33] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence + +Zitnick. Microsoft coco: Common objects in context. In ECCV, pages 740-755, 2014. +[34] Matthew Loper, Naureen Mahmood, and Michael J Black. Mosh: Motion and shape capture from sparse markers. TOG, 33(6):1-13, 2014. +[35] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. TOG, 34(6):1-16, 2015. +[36] Tianyu Luan, Yali Wang, Junhao Zhang, Zhe Wang, Zhipeng Zhou, and Yu Qiao. Pc-hmr: Pose calibration for 3d human mesh recovery from 2d images/videos. In AAAI, pages 2269-2276, 2021. +[37] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 3DV, pages 506-516, 2017. +[38] Gyeongsik Moon and Kyoung Mu Lee. I2l-meshnet: Imageto-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In ECCV, pages 752-768, 2020. +[39] Mohamed Omran, Christoph Lassner, Gerard Pons-Moll, Peter Gehler, and Bernt Schiele. Neural body fitting: Unifying deep learning and model based human pose and shape estimation. In 3DV, pages 484-494. IEEE, 2018. +[40] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3d hands, face, and body from a single image. In CVPR, pages 10975-10985, 2019. +[41] Liliana Lo Presti and Marco La Cascia. 3d skeleton-based human action classification: A survey. Pattern Recognition, 53:130-147, 2016. +[42] Haibo Qiu, Chunyu Wang, Jingdong Wang, Naiyan Wang, and Wenjun Zeng. Cross view fusion for 3d human pose estimation. In ICCV, pages 4342-4351, 2019. +[43] Jiajun Su, Chunyu Wang, Xiaoxuan Ma, Wenjun Zeng, and Yizhou Wang. Virtualpose: Learning generalizable 3d human pose models from virtual data. In ECCV, pages 55-71. Springer, 2022. +[44] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In CVPR, pages 5693-5703, 2019. +[45] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, pages 529-545, 2018. +[46] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J Black, and Tao Mei. Monocular, one-stage, regression of multiple 3d people. In ICCV, pages 11179-11188, 2021. +[47] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a skeleton-disentangled representation. In ICCV, pages 5349-5358, 2019. +[48] Hanyue Tu, Chunyu Wang, and Wenjun Zeng. Voxelpose: Towards multi-camera 3d human pose estimation in wild environment. In ECCV, pages 197-212. Springer, 2020. +[49] Hsiao-Yu Tung, Hsiao-Wei Tung, Ersin Yumer, and Katerina Fragkiadaki. Self-supervised learning of motion capture. In NIPS, volume 30, 2017. + +[50] Gul Varol, Duygu Ceylan, Bryan Russell, Jimei Yang, Ersin Yumer, Ivan Laptev, and Cordelia Schmid. Bodynet: Volumetric inference of 3d human body shapes. In ECCV, pages 20-36, 2018. +[51] Gul Varol, Javier Romero, Xavier Martin, Naureen Mahmood, Michael J Black, Ivan Laptev, and Cordelia Schmid. Learning from synthetic humans. In CVPR, pages 109-117, 2017. +[52] Timo von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In ECCV, pages 601-617, 2018. +[53] Ziniu Wan, Zhengjia Li, Maoqing Tian, Jianbo Liu, Shuai Yi, and Hongsheng Li. Encoder-decoder with multi-level attention for 3d human shape and pose estimation. In ICCV, pages 13033-13042, 2021. +[54] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In ECCV, pages 52-67, 2018. +[55] Yuanlu Xu, Song-Chun Zhu, and Tony Tung. Denserac: Joint 3d pose and shape estimation by dense render-and-compare. In ICCV, pages 7760-7770, 2019. +[56] Chun-Han Yao, Jimei Yang, Duygu Ceylan, Yi Zhou, Yang Zhou, and Ming-Hsuan Yang. Learning visibility for robust dense human body estimation. In ECCV, 2022. +[57] Hang Ye, Wentao Zhu, Chunyu Wang, Rujie Wu, and Yizhou Wang. Faster voxelpose: Real-time 3d human pose estimation by orthographic projection. In ECCV, pages 142-159. Springer, 2022. +[58] Andrei Zanfir, Elisabeta Marinoiu, and Cristian Sminchisescu. Monocular 3d pose and shape estimation of multiple people in natural scenes-the importance of multiple scene constraints. In CVPR, pages 2148-2157, 2018. +[59] Mihai Zanfir, Andrei Zanfir, Eduard Gabriel Bazavan, William T Freeman, Rahul Sukthankar, and Cristian Sminchiescu. Thundr: Transformer-based 3d human reconstruction with markers. In ICCV, pages 12971-12980, 2021. +[60] Wang Zeng, Wanli Ouyang, Ping Luo, Wentao Liu, and Xiaogang Wang. 3d human mesh regression with dense correspondence. In CVPR, pages 7054-7063, 2020. +[61] Hongwen Zhang, Jie Cao, Guo Lu, Wanli Ouyang, and Zhenan Sun. Learning 3d human shape and pose from dense body parts. IEEE TPAMI, 44(5):2610-2627, 2022. +[62] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In ICCV, pages 11446-11456, 2021. +[63] Yifu Zhang, Chunyu Wang, Xinggang Wang, Wenyu Liu, and Wenjun Zeng. Voxeltrack: Multi-person 3d human pose estimation and tracking in the wild. IEEE TPAMI, 45(2):2613-2626, 2022. \ No newline at end of file diff --git a/2023/3D Human Mesh Estimation From Virtual Markers/images.zip b/2023/3D Human Mesh Estimation From Virtual Markers/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..08bec50fb5f623c44f433e803b96043a1ce6ff16 --- /dev/null +++ b/2023/3D Human Mesh Estimation From Virtual Markers/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1749567a65f1c3774efc3d028df05f358e32ab1700cbf2b45eeb1dadae92a726 +size 619202 diff --git a/2023/3D Human Mesh Estimation From Virtual Markers/layout.json b/2023/3D Human Mesh Estimation From Virtual Markers/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ee393d26343a98128fc64c0070f90c05f9f11426 --- /dev/null +++ b/2023/3D Human Mesh Estimation From Virtual Markers/layout.json @@ -0,0 +1,10786 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 138, + 103, + 455, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 103, + 455, + 119 + ], + "spans": [ + { + "bbox": [ + 138, + 103, + 455, + 119 + ], + "type": "text", + "content": "3D Human Mesh Estimation from Virtual Markers" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "spans": [ + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "text", + "content": "Xiaoxuan Ma" + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "text", + "content": " Jiajun Su" + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "text", + "content": " Chunyu Wang" + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "inline_equation", + "content": "^{3*}" + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "text", + "content": " Wentao Zhu" + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "text", + "content": " Yizhou Wang" + }, + { + "bbox": [ + 92, + 141, + 501, + 158 + ], + "type": "inline_equation", + "content": "^{1,2,4}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "spans": [ + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "type": "text", + "content": " School of Computer Science, Center on Frontiers of Computing Studies, Peking University \n" + }, + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "type": "text", + "content": " Inst. for Artificial Intelligence, Peking University \n" + }, + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "type": "text", + "content": " Microsoft Research Asia \n" + }, + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 74, + 167, + 519, + 232 + ], + "type": "text", + "content": " Nat'l Eng. Research Center of Visual Technology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 94, + 240, + 501, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 240, + 501, + 251 + ], + "spans": [ + { + "bbox": [ + 94, + 240, + 501, + 251 + ], + "type": "text", + "content": "{maxiaoxuan, sujiajun, wtzhu, yizhou.wang}@pku.edu.cn, chnuwa@microsoft.com" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 279, + 192, + 292 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 279, + 192, + 292 + ], + "spans": [ + { + "bbox": [ + 143, + 279, + 192, + 292 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 305, + 290, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 305, + 290, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 290, + 568 + ], + "type": "text", + "content": "Inspired by the success of volumetric 3D pose estimation, some recent human mesh estimators propose to estimate 3D skeletons as intermediate representations, from which, the dense 3D meshes are regressed by exploiting the mesh topology. However, body shape information is lost in extracting skeletons, leading to mediocre performance. The advanced motion capture systems solve the problem by placing dense physical markers on the body surface, which allows to extract realistic meshes from their non-rigid motions. However, they cannot be applied to wild images without markers. In this work, we present an intermediate representation, named virtual markers, which learns 64 landmark keypoints on the body surface based on the large-scale mocap data in a generative style, mimicking the effects of physical markers. The virtual markers can be accurately detected from wild images and can reconstruct the intact meshes with realistic shapes by simple interpolation. Our approach outperforms the state-of-the-art methods on three datasets. In particular, it surpasses the existing methods by a notable margin on the SURREAL dataset, which has diverse body shapes. Code is available at https://github.com/ShirleyMaxx/VirtualMarker." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 591, + 128, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 591, + 128, + 604 + ], + "spans": [ + { + "bbox": [ + 47, + 591, + 128, + 604 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 611, + 288, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 611, + 288, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 611, + 288, + 696 + ], + "type": "text", + "content": "3D human mesh estimation aims to estimate the 3D positions of the mesh vertices that are on the body surface. The task has attracted a lot of attention from the computer vision and computer graphics communities [3, 10, 18, 24, 26, 29, 34, 36, 41, 49] because it can benefit many applications such as virtual reality [14]. Recently, the deep learning-based methods [7, 18, 28] have significantly" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 328, + 279, + 527, + 541 + ], + "blocks": [ + { + "bbox": [ + 328, + 279, + 527, + 541 + ], + "lines": [ + { + "bbox": [ + 328, + 279, + 527, + 541 + ], + "spans": [ + { + "bbox": [ + 328, + 279, + 527, + 541 + ], + "type": "image", + "image_path": "9802a17301884443af2cf8a67de51599d2994b1a4602483b08ea496814f3ece9.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 551, + 548, + 595 + ], + "lines": [ + { + "bbox": [ + 305, + 551, + 548, + 595 + ], + "spans": [ + { + "bbox": [ + 305, + 551, + 548, + 595 + ], + "type": "text", + "content": "Figure 1. Mesh estimation results on four examples with different body shapes. Pose2Mesh [7] which uses 3D skeletons as the intermediate representation fails to predict accurate shapes. Our virtual marker-based method obtains accurate estimates." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 605, + 509, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 605, + 509, + 616 + ], + "spans": [ + { + "bbox": [ + 306, + 605, + 509, + 616 + ], + "type": "text", + "content": "advanced the accuracy on the benchmark datasets." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 618, + 548, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 618, + 548, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 548, + 713 + ], + "type": "text", + "content": "The pioneer methods [18,49] propose to regress the pose and shape parameters of the mesh models such as SMPL [35] directly from images. While straightforward, their accuracy is usually lower than the state-of-the-arts. The first reason is that the mapping from the image features to the model parameters is highly non-linear and suffers from image-model misalignment [28]. Besides, existing mesh datasets [15,27,37,52] are small and limited to simple labo" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 135, + 712 + ], + "type": "text", + "content": "*Corresponding author" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "534" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 288, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 288, + 106 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 288, + 106 + ], + "type": "text", + "content": "ratory environments due to the complex capturing process. The lack of sufficient training data severely limits its performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 110, + 288, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 110, + 288, + 361 + ], + "spans": [ + { + "bbox": [ + 46, + 110, + 288, + 361 + ], + "type": "text", + "content": "Recently, some works [25, 38] begin to formulate mesh estimation as a dense 3D keypoint detection task inspired by the success of volumetric pose estimation [42, 43, 45, 48, 57, 63]. For example, in [25, 38], the authors propose to regress the 3D positions of all vertices. However, it is computationally expensive because it has more than several thousand vertices. Moon and Lee [38] improve the efficiency by decomposing the 3D heatmaps into multiple 1D heatmaps at the cost of mediocre accuracy. Choi et al. [7] propose to first detect a sparser set of skeleton joints in the images, from which the dense 3D meshes are regressed by exploiting the mesh topology. The methods along this direction have attracted increasing attention [7, 28, 53] due to two reasons. First, the proxy task of 3D skeleton estimation can leverage the abundant 2D pose datasets which notably improves the accuracy. Second, mesh regression from the skeletons is efficient. However, important information about the body shapes is lost in extracting the 3D skeletons, which is largely overlooked previously. As a result, different types of body shapes, such as lean or obese, cannot be accurately estimated (see Figure 1)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 364, + 288, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 364, + 288, + 567 + ], + "spans": [ + { + "bbox": [ + 46, + 364, + 288, + 567 + ], + "type": "text", + "content": "The professional marker-based motion capture (mocap) method MoSh [34] places physical markers on the body surface and explore their subtle non-rigid motions to extract meshes with accurate shapes. However, the physical markers limit the approach to be used in laboratory environments. We are inspired to think whether we can identify a set of landmarks on the mesh as virtual markers, e.g., elbow and wrist, that can be detected from wild images, and allow to recover accurate body shapes? The desired virtual markers should satisfy several requirements. First, the number of markers should be much smaller than that of the mesh vertices so that we can use volumetric representations to efficiently estimate their 3D positions. Second, the markers should capture the mesh topology so that the intact mesh can be accurately regressed from them. Third, the virtual markers have distinguishable visual patterns so that they can be detected from images." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 570, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 288, + 713 + ], + "type": "text", + "content": "In this work, we present a learning algorithm based on archetypal analysis [12] to identify a subset of mesh vertices as the virtual markers that try to satisfy the above requirements to the best extent. Figure 2 shows that the learned virtual markers coarsely outline the body shape and pose which paves the way for estimating meshes with accurate shapes. Then we present a simple framework for 3D mesh estimation on top of the representation as shown in Figure 3. It first learns a 3D keypoint estimation network based on [45] to detect the 3D positions of the virtual markers. Then we recover the intact mesh simply by interpolating them. The interpolation weights are pre-trained in the representation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 107 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 107 + ], + "type": "text", + "content": "learning step and will be adjusted by a light network based on the prediction confidences of the virtual markers for each image." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 108, + 547, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 108, + 547, + 216 + ], + "spans": [ + { + "bbox": [ + 304, + 108, + 547, + 216 + ], + "type": "text", + "content": "We extensively evaluate our approach on three benchmark datasets. It consistently outperforms the state-of-the-art methods on all of them. In particular, it achieves a significant gain on the SURREAL dataset [51] which has a variety of body shapes. Our ablation study also validates the advantages of the virtual marker representation in terms of recovering accurate shapes. Finally, the method shows decent generalization ability and generates visually appealing results for the wild images." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 228, + 389, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 228, + 389, + 241 + ], + "spans": [ + { + "bbox": [ + 306, + 228, + 389, + 241 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 248, + 499, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 248, + 499, + 261 + ], + "spans": [ + { + "bbox": [ + 306, + 248, + 499, + 261 + ], + "type": "text", + "content": "2.1. Optimization-based mesh estimation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 266, + 547, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 266, + 547, + 445 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 547, + 445 + ], + "type": "text", + "content": "Before deep learning dominates this field, 3D human mesh estimation [2, 27, 34, 40, 58] is mainly optimization-based, which optimizes the parameters of the human mesh models to match the observations. For example, Loper et al. [34] propose MoSh that optimizes the SMPL parameters to align the mesh with the 3D marker positions. It is usually used to get GT 3D meshes for benchmark datasets because of its high accuracy. Later works propose to optimize the model parameters or mesh vertices based on 2D image cues [2, 11, 27, 40, 58]. They extract intermediate representations such as 2D skeletons from the images and optimize the mesh model by minimizing the discrepancy between the model projection and the intermediate representations such as the 2D skeletons. These methods are usually sensitive to initialization and suffer from local optimum." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 455, + 480, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 455, + 480, + 467 + ], + "spans": [ + { + "bbox": [ + 306, + 455, + 480, + 467 + ], + "type": "text", + "content": "2.2. Learning-based mesh estimation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 474, + 547, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 474, + 547, + 569 + ], + "spans": [ + { + "bbox": [ + 304, + 474, + 547, + 569 + ], + "type": "text", + "content": "Recently, most works follow the learning-based framework and have achieved promising results. Deep networks [18, 24, 26, 36, 49] are used to regress the SMPL parameters from image features. However, learning the mapping from the image space to the parameter space is highly nonlinear [38]. In addition, they suffer from the misalignment between the meshes and image pixels [60]. These problems make it difficult to learn an accurate yet generalizable model." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "Some works propose to introduce proxy tasks to get intermediate representations first, hoping to alleviate the learning difficulty. In particular, intermediate representations of physical markers [59], IUV images [55,60-62], body part segmentation masks [23,27,39,50] and body skeletons [7,28,47,53] have been proposed. In particular, THUNDR [59] first estimates the 3D locations of physical markers from images and then reconstructs the mesh from the 3D markers. The physical markers can be interpreted as a simplified representation of body shape and pose. Although it is very accurate, it cannot be applied to wild images without markers. In contrast, body skeleton is a popular human representation" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "535" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 70, + 293, + 182 + ], + "blocks": [ + { + "bbox": [ + 73, + 70, + 293, + 182 + ], + "lines": [ + { + "bbox": [ + 73, + 70, + 293, + 182 + ], + "spans": [ + { + "bbox": [ + 73, + 70, + 293, + 182 + ], + "type": "image", + "image_path": "e284d5f27134f54524d381aedddf79673639cc90ce42d653625aa64ad2afe0d4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 192, + 547, + 237 + ], + "lines": [ + { + "bbox": [ + 45, + 192, + 547, + 237 + ], + "spans": [ + { + "bbox": [ + 45, + 192, + 547, + 237 + ], + "type": "text", + "content": "Figure 2. Left: The learned virtual markers (blue balls) in the back and front views. The grey balls mean they are invisible in the front view. The virtual markers act similarly to physical body markers and approximately outline the body shape. Right: Mesh estimation results by our approach, from left to right are input image, estimated 3D mesh overlayed on the image, and three different viewpoints showing the estimated 3D mesh with our intermediate predicted virtual markers (blue balls), respectively." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 296, + 70, + 521, + 183 + ], + "blocks": [ + { + "bbox": [ + 296, + 70, + 521, + 183 + ], + "lines": [ + { + "bbox": [ + 296, + 70, + 521, + 183 + ], + "spans": [ + { + "bbox": [ + 296, + 70, + 521, + 183 + ], + "type": "image", + "image_path": "f69b18821bf52c010f0513102762f9d306e115d3ac5d7d71172e968edab7b793.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 246, + 288, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 246, + 288, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 246, + 288, + 304 + ], + "type": "text", + "content": "that can be robustly detected from wild images. Choi et al. [7] propose to first estimate the 3D skeletons, and then estimate the intact mesh from them. However, accurate body shapes are difficult to be recovered from the oversimplified 3D skeletons." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 305, + 288, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 305, + 288, + 390 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 288, + 390 + ], + "type": "text", + "content": "Our work belongs to the learning-based class and is related to works that use physical markers or skeletons as intermediate representations. But different from them, we propose a novel intermediate representation, named virtual markers, which is more expressive to reduce the ambiguity in pose and shape estimation than body skeletons and can be applied to wild images." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 400, + 103, + 412 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 400, + 103, + 412 + ], + "spans": [ + { + "bbox": [ + 47, + 400, + 103, + 412 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 420, + 287, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 420, + 287, + 492 + ], + "spans": [ + { + "bbox": [ + 46, + 420, + 287, + 492 + ], + "type": "text", + "content": "In this section, we describe the details of our approach. First, Section 3.1 introduces how we learn the virtual marker representation from mocap data. Then we present the overall framework for mesh estimation from an image in Section 3.2. At last, Section 3.3 discusses the loss functions and training details." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 500, + 231, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 500, + 231, + 513 + ], + "spans": [ + { + "bbox": [ + 47, + 500, + 231, + 513 + ], + "type": "text", + "content": "3.1. The virtual marker representation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "text", + "content": "We represent a mesh by a vector of vertex positions " + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}^{3M}" + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "text", + "content": " is the number of mesh vertices. Denote a mocap dataset such as [15] with " + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "text", + "content": " meshes as " + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{X}} = [\\mathbf{x}_1, \\dots, \\mathbf{x}_N] \\in \\mathbb{R}^{3M \\times N}" + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "text", + "content": ". To unveil the latent structure among vertices, we reshape it to " + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^{3N \\times M}" + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "text", + "content": " with each column " + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i \\in \\mathbb{R}^{3N}" + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "text", + "content": " representing all possible positions of the " + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "inline_equation", + "content": "i^{\\text{th}}" + }, + { + "bbox": [ + 46, + 518, + 287, + 605 + ], + "type": "text", + "content": " vertex in the dataset [15]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": "The rank of " + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": " is smaller than " + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": " because the mesh representation is smooth and redundant where some vertices can be accurately reconstructed by the others. While it seems natural to apply PCA [17] to " + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": " to compute the eigenvectors as virtual markers for reconstructing others, there is no guarantee that the virtual markers correspond to the mesh vertices, making them difficult to be detected from images. Instead, we aim to learn " + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": " virtual markers " + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{Z} = [\\mathbf{z}_1,\\dots,\\mathbf{z}_K]\\in \\mathbb{R}^{3N\\times K}" + }, + { + "bbox": [ + 46, + 605, + 288, + 714 + ], + "type": "text", + "content": " that try to satisfy the follow" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 318, + 243, + 536, + 280 + ], + "blocks": [ + { + "bbox": [ + 318, + 243, + 536, + 280 + ], + "lines": [ + { + "bbox": [ + 318, + 243, + 536, + 280 + ], + "spans": [ + { + "bbox": [ + 318, + 243, + 536, + 280 + ], + "type": "table", + "html": "
TypeFormulaReconst. Error (mm) ↓
Original||X - XBA||2F11.67
Symmetric||X - XBsymAsym||2F10.98
", + "image_path": "b639b5ec35efba53e27e609d318a2846cf61c1f6c4540b6f9ec5a58220aa7c2a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 289, + 547, + 332 + ], + "lines": [ + { + "bbox": [ + 305, + 289, + 547, + 332 + ], + "spans": [ + { + "bbox": [ + 305, + 289, + 547, + 332 + ], + "type": "text", + "content": "Table 1. The reconstruction errors using the original and the symmetric sets of markers on the H3.6M dataset [15], respectively. The errors are small indicating that they are sufficiently expressive and can reconstruct all vertices accurately." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 342, + 547, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 547, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 547, + 437 + ], + "type": "text", + "content": "ing two requirements to the greatest extent. First, they can accurately reconstruct the intact mesh " + }, + { + "bbox": [ + 304, + 342, + 547, + 437 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 304, + 342, + 547, + 437 + ], + "type": "text", + "content": " by their linear combinations: " + }, + { + "bbox": [ + 304, + 342, + 547, + 437 + ], + "type": "inline_equation", + "content": "\\mathbf{X} = \\mathbf{Z}\\mathbf{A}" + }, + { + "bbox": [ + 304, + 342, + 547, + 437 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 342, + 547, + 437 + ], + "type": "inline_equation", + "content": "\\mathbf{A} \\in \\mathbb{R}^{K \\times M}" + }, + { + "bbox": [ + 304, + 342, + 547, + 437 + ], + "type": "text", + "content": " is a coefficient matrix that encodes the spatial relationship between the virtual markers and the mesh vertices. Second, they should have distinguishable visual patterns in images so that they can be easily detected from images. Ideally, they can be on the body surface as the meshes." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "text", + "content": "We apply archetypal analysis [4, 12] to learn " + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}" + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "text", + "content": " by minimizing a reconstruction error with two additional constraints: (1) each vertex " + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i" + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "text", + "content": " can be reconstructed by convex combinations of " + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}" + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "text", + "content": ", and (2) each marker " + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_i" + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "text", + "content": " should be convex combinations of the mesh vertices " + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 304, + 438, + 547, + 498 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 352, + 508, + 545, + 535 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 508, + 545, + 535 + ], + "spans": [ + { + "bbox": [ + 352, + 508, + 545, + 535 + ], + "type": "interline_equation", + "content": "\\min _ {\\substack {\\boldsymbol {\\alpha} _ {i} \\in \\Delta_ {K} \\text {for} 1 \\leq i \\leq M, \\\\ \\boldsymbol {\\beta} _ {j} \\in \\Delta_ {M} \\text {for} 1 \\leq j \\leq K}} \\| \\mathbf {X} - \\mathbf {X B A} \\| _ {F} ^ {2}, \\tag{1}", + "image_path": "34fdea6cb7ffba66dd98e62c5fed5cc3369ab6897617698a1ce1b858c1640440.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{A} = [\\pmb{\\alpha}_1, \\dots, \\pmb{\\alpha}_M] \\in \\mathbb{R}^{K \\times M}" + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "text", + "content": ", each " + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "inline_equation", + "content": "\\pmb{\\alpha}" + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "text", + "content": " resides in the simplex " + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "inline_equation", + "content": "\\Delta_K \\triangleq \\{\\pmb{\\alpha} \\in \\mathbb{R}^K \\text{ s.t. } \\pmb{\\alpha} \\succeq 0 \\text{ and } ||\\pmb{\\alpha}||_1 = 1\\}" + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{B} = [\\beta_1, \\dots, \\beta_K] \\in \\mathbb{R}^{M \\times K}" + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "inline_equation", + "content": "\\beta_j \\in \\Delta_M" + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "text", + "content": ". We adopt Active-set algorithm [4] to solve objective (1) and obtain the learned virtual markers " + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{Z} = \\mathbf{X}\\mathbf{B} \\in \\mathbb{R}^{3N \\times K}" + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "text", + "content": ". As shown in [4, 12], the two constraints encourage the virtual markers " + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}" + }, + { + "bbox": [ + 304, + 544, + 547, + 654 + ], + "type": "text", + "content": " to unveil the latent structure among vertices, therefore they learn to be close to the extreme points of the mesh and located on the body surface as much as possible." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "content": "Post-processing. Since human body is left-right symmetric, we adjust " + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}" + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "content": " to reflect the property. We first replace each " + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_i\\in" + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}" + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "content": " by its nearest vertex on the mesh and obtain " + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathbf{Z}}\\in \\mathbb{R}^{3\\times K}" + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "content": ". This step allows us to compute the left or right counterpart" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "536" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 77, + 72, + 512, + 205 + ], + "blocks": [ + { + "bbox": [ + 77, + 72, + 512, + 205 + ], + "lines": [ + { + "bbox": [ + 77, + 72, + 512, + 205 + ], + "spans": [ + { + "bbox": [ + 77, + 72, + 512, + 205 + ], + "type": "image", + "image_path": "733021ed3ea7358bb09d5c51c395e53373c9f6e4de354a829e9c325bc3dccae1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "lines": [ + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "spans": [ + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "text", + "content": "Figure 3. Overview of our framework. Given an input image " + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "text", + "content": ", it first estimates the 3D positions " + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}" + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "text", + "content": " of the virtual markers. Then we update the coefficient matrix " + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{A}}" + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "text", + "content": " based on the estimation confidence scores " + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{C}" + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "text", + "content": " of the virtual markers. Finally, the complete human mesh can be simply recovered by linear multiplication " + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}} = \\hat{\\mathbf{P}}\\hat{\\mathbf{A}}" + }, + { + "bbox": [ + 46, + 213, + 547, + 247 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "spans": [ + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "text", + "content": "of each marker. Then we replace the markers in the right body with the symmetric vertices in the left body and obtain the symmetric markers " + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathbf{Z}}^{sym} \\in \\mathbb{R}^{3 \\times K}" + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "text", + "content": ". Finally we update " + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "inline_equation", + "content": "\\mathbf{B}" + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "text", + "content": " by minimizing " + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "inline_equation", + "content": "||\\mathbf{X} - \\mathbf{X}\\widetilde{\\mathbf{B}}^{sym}\\widetilde{\\mathbf{A}}^{sym}||_F^2" + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "text", + "content": " subject to " + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathbf{Z}}^{sym} = \\mathbf{X}\\widetilde{\\mathbf{B}}^{sym}" + }, + { + "bbox": [ + 46, + 257, + 288, + 330 + ], + "type": "text", + "content": ". More details are elaborated in the supplementary." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 330, + 290, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 330, + 290, + 450 + ], + "spans": [ + { + "bbox": [ + 46, + 330, + 290, + 450 + ], + "type": "text", + "content": "Figure 2 shows the virtual markers learned on the mocap dataset [15] after post-processing. They are similar to the physical markers and approximately outline the body shape which agrees with our expectations. They are roughly evenly distributed on the surface of the body, and some of them are located close to the body keypoints, which have distinguishable visual patterns to be accurately detected. Table 1 shows the reconstruction errors of using original markers " + }, + { + "bbox": [ + 46, + 330, + 290, + 450 + ], + "type": "inline_equation", + "content": "\\mathbf{XB}" + }, + { + "bbox": [ + 46, + 330, + 290, + 450 + ], + "type": "text", + "content": " and the symmetric markers " + }, + { + "bbox": [ + 46, + 330, + 290, + 450 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathbf{XB}}^{sym}" + }, + { + "bbox": [ + 46, + 330, + 290, + 450 + ], + "type": "text", + "content": ". Both can reconstruct meshes accurately." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 456, + 202, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 456, + 202, + 467 + ], + "spans": [ + { + "bbox": [ + 47, + 456, + 202, + 467 + ], + "type": "text", + "content": "3.2. Mesh estimation framework" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 474, + 288, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 288, + 556 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 288, + 556 + ], + "type": "text", + "content": "On top of the virtual markers, we present a simple yet effective framework for end-to-end 3D human mesh estimation from a single image. As shown in Figure 3, it consists of two branches. The first branch uses a volumetric CNN [45] to estimate the 3D positions " + }, + { + "bbox": [ + 46, + 474, + 288, + 556 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}" + }, + { + "bbox": [ + 46, + 474, + 288, + 556 + ], + "type": "text", + "content": " of the markers, and the second branch reconstructs the full mesh " + }, + { + "bbox": [ + 46, + 474, + 288, + 556 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}}" + }, + { + "bbox": [ + 46, + 474, + 288, + 556 + ], + "type": "text", + "content": " by predicting a coefficient matrix " + }, + { + "bbox": [ + 46, + 474, + 288, + 556 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{A}}" + }, + { + "bbox": [ + 46, + 474, + 288, + 556 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 145, + 557, + 287, + 569 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 557, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 145, + 557, + 287, + 569 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {M}} = \\hat {\\mathbf {P}} \\hat {\\mathbf {A}}. \\tag {2}", + "image_path": "ac1c84b90f67a5af05130dbad9da2c3721186d940f88596884c4f1ce9a5d240e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 574, + 247, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 247, + 586 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 247, + 586 + ], + "type": "text", + "content": "We will describe the two branches in more detail." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "spans": [ + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "text", + "content": "3D marker estimation. We train a neural network to estimate a 3D heatmap " + }, + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{H}} = [\\hat{\\mathbf{H}}_1, \\dots, \\hat{\\mathbf{H}}_K] \\in \\mathbb{R}^{K \\times D \\times H \\times W}" + }, + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "text", + "content": " from an image. The heatmap encodes per-voxel likelihood of each marker. There are " + }, + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "inline_equation", + "content": "D \\times H \\times W" + }, + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "text", + "content": " voxels in total which are used to discretize the 3D space. The 3D position " + }, + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}_z \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "text", + "content": " of each marker is computed as the center of mass of the corresponding heatmap " + }, + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{H}}_z" + }, + { + "bbox": [ + 46, + 598, + 288, + 681 + ], + "type": "text", + "content": " [45] as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 87, + 686, + 287, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 686, + 287, + 716 + ], + "spans": [ + { + "bbox": [ + 87, + 686, + 287, + 716 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {P}} _ {z} = \\sum_ {d = 1} ^ {D} \\sum_ {h = 1} ^ {H} \\sum_ {w = 1} ^ {W} (d, h, w) \\cdot \\hat {\\mathbf {H}} _ {z} (d, h, w). \\tag {3}", + "image_path": "ca40e56c1005ee9bbb999c997dad4e95f6309b9cbbab314e9e83bca7343aedab.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 257, + 545, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 257, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 305, + 257, + 545, + 281 + ], + "type": "text", + "content": "The positions of all markers are represented as " + }, + { + "bbox": [ + 305, + 257, + 545, + 281 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}} = [\\hat{\\mathbf{P}}_1, \\hat{\\mathbf{P}}_2, \\dots, \\hat{\\mathbf{P}}_K]" + }, + { + "bbox": [ + 305, + 257, + 545, + 281 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "spans": [ + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "text", + "content": "Interpolation. Ideally, if we have accurate estimates for all virtual markers " + }, + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}" + }, + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "text", + "content": ", then we can recover the complete mesh by simply multiplying " + }, + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}" + }, + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "text", + "content": " with a fixed coefficient matrix " + }, + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{A}}^{sym}" + }, + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "text", + "content": " with sufficient accuracy as validated in Table 1. However, in practice, some markers may have large estimation errors because they may be occluded in the monocular setting. Note that this happens frequently. For example, the markers in the back will be occluded when a person is facing the camera. As a result, inaccurate markers positions may bring large errors to the final mesh if we directly multiply them with the fixed matrix " + }, + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{A}}^{sym}" + }, + { + "bbox": [ + 304, + 293, + 547, + 422 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 425, + 547, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 425, + 547, + 519 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 547, + 519 + ], + "type": "text", + "content": "Our solution is to rely more on those accurately detected markers. To that end, we propose to update the coefficient matrix based on the estimation confidence scores of the markers. In practice, we simply take the heatmap score at the estimated positions of each marker, i.e. " + }, + { + "bbox": [ + 304, + 425, + 547, + 519 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{H}}_z(\\hat{\\mathbf{P}}_z)" + }, + { + "bbox": [ + 304, + 425, + 547, + 519 + ], + "type": "text", + "content": ", and feed them to a single fully-connected layer to obtain the coefficient matrix " + }, + { + "bbox": [ + 304, + 425, + 547, + 519 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{A}}" + }, + { + "bbox": [ + 304, + 425, + 547, + 519 + ], + "type": "text", + "content": ". Then the mesh is reconstructed by " + }, + { + "bbox": [ + 304, + 425, + 547, + 519 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}} = \\hat{\\mathbf{P}}\\hat{\\mathbf{A}}" + }, + { + "bbox": [ + 304, + 425, + 547, + 519 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 528, + 370, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 528, + 370, + 540 + ], + "spans": [ + { + "bbox": [ + 306, + 528, + 370, + 540 + ], + "type": "text", + "content": "3.3. Training" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 547, + 545, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 547, + 545, + 571 + ], + "spans": [ + { + "bbox": [ + 304, + 547, + 545, + 571 + ], + "type": "text", + "content": "We train the whole network end-to-end in a supervised way. The overall loss function is defined as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 351, + 579, + 545, + 591 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 579, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 351, + 579, + 545, + 591 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\lambda_ {v m} \\mathcal {L} _ {v m} + \\lambda_ {c} \\mathcal {L} _ {\\text {c o n f}} + \\lambda_ {m} \\mathcal {L} _ {\\text {m e s h}}. \\tag {4}", + "image_path": "028592b207e35d0aee71bee23ef1e8e143a701e49c00406f3c33d4b8ea88dfe5.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "text", + "content": "Virtual marker loss. We define " + }, + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{vm}" + }, + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "text", + "content": " as the " + }, + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "text", + "content": " distance between the predicted 3D virtual markers " + }, + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}" + }, + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "text", + "content": " and the GT " + }, + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}^{*}" + }, + { + "bbox": [ + 304, + 600, + 545, + 634 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 386, + 635, + 545, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 635, + 545, + 647 + ], + "spans": [ + { + "bbox": [ + 386, + 635, + 545, + 647 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {v m} = \\left\\| \\hat {\\mathbf {P}} - \\hat {\\mathbf {P}} ^ {*} \\right\\| _ {1}. \\tag {5}", + "image_path": "df0baa94b0ca101b68e9ee4c7d7a4585377a7cc2c8d37c4e86f6eb743502a204.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 653, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 545, + 677 + ], + "type": "text", + "content": "Note that it is easy to get GT markers " + }, + { + "bbox": [ + 304, + 653, + 545, + 677 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}^*" + }, + { + "bbox": [ + 304, + 653, + 545, + 677 + ], + "type": "text", + "content": " from GT meshes as stated in Section 3.1 without additional manual annotations." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 689, + 545, + 713 + ], + "type": "text", + "content": "Confidence loss. We also require that the 3D heatmaps have reasonable shapes, therefore, the heatmap score at the" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 748, + 312, + 757 + ], + "type": "text", + "content": "537" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "voxel containing the GT marker position " + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}_z^*" + }, + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": " should have the maximum value as in the previous work [16]:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 110, + 102, + 287, + 131 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 102, + 287, + 131 + ], + "spans": [ + { + "bbox": [ + 110, + 102, + 287, + 131 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {c o n f}} = - \\sum_ {z = 1} ^ {K} \\log \\left(\\hat {\\mathbf {H}} _ {z} \\left(\\hat {\\mathbf {P}} _ {z} ^ {*}\\right)\\right). \\tag {6}", + "image_path": "cabe4a5aed422f18c0ced1b3f5ab2dd7a976c6c6b4c7c0ad01c7797b498dad91.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 137, + 287, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 137, + 287, + 160 + ], + "spans": [ + { + "bbox": [ + 47, + 137, + 287, + 160 + ], + "type": "text", + "content": "Mesh loss. Following [38], we define " + }, + { + "bbox": [ + 47, + 137, + 287, + 160 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{mesh}" + }, + { + "bbox": [ + 47, + 137, + 287, + 160 + ], + "type": "text", + "content": " as a weighted sum of four losses:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 168, + 287, + 182 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 168, + 287, + 182 + ], + "spans": [ + { + "bbox": [ + 59, + 168, + 287, + 182 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {m e s h}} = \\mathcal {L} _ {\\text {v e r t e x}} + \\mathcal {L} _ {\\text {p o s e}} + \\mathcal {L} _ {\\text {n o r m a l}} + \\lambda_ {e} \\mathcal {L} _ {\\text {e d g e}}. \\tag {7}", + "image_path": "736c6ddc24ec78ee6dc6c920f0b477ed2660589183fd4722527ceb8a7ca557f8.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 189, + 287, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 189, + 287, + 223 + ], + "spans": [ + { + "bbox": [ + 56, + 189, + 287, + 223 + ], + "type": "text", + "content": "- Vertex coordinate loss. We adopt " + }, + { + "bbox": [ + 56, + 189, + 287, + 223 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 56, + 189, + 287, + 223 + ], + "type": "text", + "content": " loss between predicted 3D mesh coordinates " + }, + { + "bbox": [ + 56, + 189, + 287, + 223 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}}" + }, + { + "bbox": [ + 56, + 189, + 287, + 223 + ], + "type": "text", + "content": " with GT mesh " + }, + { + "bbox": [ + 56, + 189, + 287, + 223 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}}^{*}" + }, + { + "bbox": [ + 56, + 189, + 287, + 223 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 129, + 224, + 287, + 236 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 224, + 287, + 236 + ], + "spans": [ + { + "bbox": [ + 129, + 224, + 287, + 236 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {v e r t e x}} = \\left\\| \\hat {\\mathbf {M}} - \\hat {\\mathbf {M}} ^ {*} \\right\\| _ {1}. \\tag {8}", + "image_path": "0613c3d609dae420678e9a8ad853104c30d9ebabca28ee99f65c3862ee78d908.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 243, + 287, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 243, + 287, + 277 + ], + "spans": [ + { + "bbox": [ + 56, + 243, + 287, + 277 + ], + "type": "text", + "content": "- Pose loss. We use " + }, + { + "bbox": [ + 56, + 243, + 287, + 277 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 56, + 243, + 287, + 277 + ], + "type": "text", + "content": " loss between the 3D landmark joints regressed from mesh " + }, + { + "bbox": [ + 56, + 243, + 287, + 277 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}}\\mathcal{I}" + }, + { + "bbox": [ + 56, + 243, + 287, + 277 + ], + "type": "text", + "content": " and the GT joints " + }, + { + "bbox": [ + 56, + 243, + 287, + 277 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{J}}^{*}" + }, + { + "bbox": [ + 56, + 243, + 287, + 277 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 277, + 287, + 290 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 277, + 287, + 290 + ], + "spans": [ + { + "bbox": [ + 131, + 277, + 287, + 290 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {p o s e}} = \\left\\| \\hat {\\mathbf {M}} \\mathcal {J} - \\hat {\\mathbf {J}} ^ {*} \\right\\| _ {1}, \\tag {9}", + "image_path": "bed00087d0f8aed3992da4c16492fc9bb4b8581995b701f9adee9de029ad3118.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 293, + 287, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 293, + 287, + 318 + ], + "spans": [ + { + "bbox": [ + 66, + 293, + 287, + 318 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 66, + 293, + 287, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{J} \\in \\mathbb{R}^{M \\times J}" + }, + { + "bbox": [ + 66, + 293, + 287, + 318 + ], + "type": "text", + "content": " is a pre-defined joint regression matrix in SMPL model [2]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 324, + 287, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 324, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 56, + 324, + 287, + 373 + ], + "type": "text", + "content": "- Surface losses. To improve surface smoothness [54], we supervise the normal vector of a triangle face with GT normal vectors by " + }, + { + "bbox": [ + 56, + 324, + 287, + 373 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{normal}}" + }, + { + "bbox": [ + 56, + 324, + 287, + 373 + ], + "type": "text", + "content": " and the edge length of the predicted mesh with GT length by " + }, + { + "bbox": [ + 56, + 324, + 287, + 373 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{edge}}" + }, + { + "bbox": [ + 56, + 324, + 287, + 373 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 73, + 378, + 287, + 446 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 378, + 287, + 446 + ], + "spans": [ + { + "bbox": [ + 73, + 378, + 287, + 446 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {n o r m a l} = \\sum_ {f} \\sum_ {\\{i, j \\} \\subset f} \\left| \\left\\langle \\frac {\\hat {\\mathbf {M}} _ {i} - \\hat {\\mathbf {M}} _ {j}}{\\| \\hat {\\mathbf {M}} _ {i} - \\hat {\\mathbf {M}} _ {j} \\| _ {2}}, \\hat {\\mathbf {n}} _ {f} ^ {*} \\right\\rangle \\right|, \\\\ \\mathcal {L} _ {\\text {e d g e}} = \\sum_ {f} \\sum_ {\\{i, j \\} \\subset f} \\left| \\| \\hat {\\mathbf {M}} _ {i} - \\hat {\\mathbf {M}} _ {j} \\| _ {2} - \\| \\hat {\\mathbf {M}} _ {i} ^ {*} - \\hat {\\mathbf {M}} _ {j} ^ {*} \\| _ {2} \\right|. \\tag {10} \\\\ \\end{array}", + "image_path": "f05d5ff949b0c52fc774622c3d0a1db18e646296696aa5d053f6419711ef2c9e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "spans": [ + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{n}}_f^*" + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "text", + "content": " denote a triangle face in the mesh and its GT unit normal vector, respectively. " + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}}_i" + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "text", + "content": " denote the " + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "text", + "content": " vertex of " + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}}" + }, + { + "bbox": [ + 66, + 447, + 287, + 485 + ], + "type": "text", + "content": ". * denotes GT." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 496, + 128, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 496, + 128, + 510 + ], + "spans": [ + { + "bbox": [ + 47, + 496, + 128, + 510 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 515, + 167, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 515, + 167, + 528 + ], + "spans": [ + { + "bbox": [ + 47, + 515, + 167, + 528 + ], + "type": "text", + "content": "4.1. Datasets and metrics" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 533, + 287, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 593 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 593 + ], + "type": "text", + "content": "H3.6M [15]. We use (S1, S5, S6, S7, S8) for training and (S9, S11) for testing. As in [7, 18, 31, 32], we report MPJPE and PA-MPJPE for poses that are derived from the estimated meshes. We also report Mean Per Vertex Error (MPVE) for the whole mesh." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 605, + 287, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 653 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 653 + ], + "type": "text", + "content": "3DPW [52] is collected in natural scenes. Following the previous works [23, 31, 32, 59], we use the train set of 3DPW to learn the model and evaluate on the test set. The same evaluation metrics as H3.6M are used." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "text", + "content": "SURREAL [51] is a large-scale synthetic dataset with GT SMPL annotations and has diverse samples in terms of body shapes, backgrounds, etc. We use its training set to train a model and evaluate the test split following [7]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 72, + 438, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 438, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 438, + 84 + ], + "type": "text", + "content": "4.2. Implementation Details" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "spans": [ + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "text", + "content": "We learn 64 virtual markers on the H3.6M [15] training set. We use the same set of markers for all datasets instead of learning a separate set on each dataset. Following [7, 18, 22, 25, 31, 32, 38, 59], we conduct mix-training by using MPI-INF-3DHP [37], UP-3D [27], and COCO [33] training set for experiments on the H3.6M and 3DPW datasets. We adapt a 3D pose estimator [45] with HRNet-W48 [44] as the image feature backbone for estimating the 3D virtual markers. We set the number of voxels in each dimension to be 64, i.e. " + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "inline_equation", + "content": "D = H = W = 64" + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "text", + "content": " for 3D heatmaps. Following [18, 25, 38], we crop every single human region from the input image and resize it to " + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "text", + "content": ". We use Adam [21] optimizer to train the whole framework for 40 epochs with a batch size of 32. The learning rates for the two branches are set to " + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-3}" + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "text", + "content": ", respectively, which are decreased by half after the " + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "inline_equation", + "content": "30^{th}" + }, + { + "bbox": [ + 304, + 93, + 547, + 297 + ], + "type": "text", + "content": " epoch. Please refer to the supplementary for more details." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 314, + 495, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 314, + 495, + 327 + ], + "spans": [ + { + "bbox": [ + 305, + 314, + 495, + 327 + ], + "type": "text", + "content": "4.3. Comparison to the State-of-the-arts" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 335, + 547, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 335, + 547, + 432 + ], + "spans": [ + { + "bbox": [ + 304, + 335, + 547, + 432 + ], + "type": "text", + "content": "Results on H3.6M. Table 2 compares our approach to the state-of-the-art methods on the H3.6M dataset. Our method achieves competitive or superior performance. In particular, it outperforms the methods that use skeletons (Pose2Mesh [7], DSD-SATN [47]), body markers (THUNDR) [59], or IUV image [60, 62] as proxy representations, demonstrating the effectiveness of the virtual marker representation." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 447, + 547, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 447, + 547, + 591 + ], + "spans": [ + { + "bbox": [ + 304, + 447, + 547, + 591 + ], + "type": "text", + "content": "Results on 3DPW. We compare our method to the state-of-the-art methods on the 3DPW dataset in Table 2. Our approach achieves state-of-the-art results among all the methods, validating the advantages of the virtual marker representation over the skeleton representation used in Pose2Mesh [7], DSD-SATN [47], and other representations like IUV image used in PyMAF [62]. In particular, our approach outperforms I2L-MeshNet [38], METRO [31], and Mesh Graphormer [32] by a notable margin, which suggests that virtual markers are more suitable and effective representations than detecting all vertices directly as most of them are not discriminative enough to be accurately detected." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 605, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 712 + ], + "type": "text", + "content": "Results on SURREAL. This dataset has more diverse samples in terms of body shapes. The results are shown in Table 3. Our approach outperforms the state-of-the-art methods by a notable margin, especially in terms of MPVE. Figure 1 shows some challenging cases without cherry-picking. The skeleton representation loses the body shape information so the method [7] can only recover mean shapes. In contrast, our approach generates much more accurate mesh estimation results." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "538" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 82, + 69, + 515, + 423 + ], + "blocks": [ + { + "bbox": [ + 82, + 69, + 515, + 423 + ], + "lines": [ + { + "bbox": [ + 82, + 69, + 515, + 423 + ], + "spans": [ + { + "bbox": [ + 82, + 69, + 515, + 423 + ], + "type": "table", + "html": "
MethodIntermediate RepresentationH3.6M3DPW
MPVE↓MPJPE↓PA-MPJPE↓MPVE↓MPJPE↓PA-MPJPE↓
† Arnab et al. [1] CVPR'192D skeleton-77.854.3--72.2
† HMMR [19] CVPR'19---56.9139.3116.572.6
† DSD-SATN [47] ICCV'193D skeleton-59.142.4--69.5
† VIBE [22] CVPR'20--65.941.599.182.951.9
† TCMR [6] CVPR'21--62.341.1102.986.552.7
† MAED [53] ICCV'213D skeleton-56.338.792.679.145.7
SMPLify [2] ECCV'162D skeleton--82.3---
HMR [18] CVPR'18-96.188.056.8152.7130.081.3
GraphCMR [25] CVPR'193D vertices--50.1--70.2
SPIN [24] ICCV'19---41.1116.496.959.2
DenseRac [55] ICCV'19IUV image-76.848.0---
DecoMR [60] CVPR'20IUV image-60.639.3---
ExPose [9] ECCV'20-----93.460.7
Pose2Mesh [7] ECCV'203D skeleton85.364.946.3106.388.958.3
I2L-MeshNet [38] ECCV'203D vertices65.155.741.1110.193.257.7
PC-HMR [36] AAAI'213D skeleton---108.687.866.9
HybrIK [28] CVPR'213D skeleton65.754.434.586.574.145.0
METRO [31] CVPR'213D vertices-54.036.788.277.147.9
ROMP [46] ICCV'21----108.391.354.9
Mesh Graphormer [32] ICCV'213D vertices-51.234.587.774.745.6
PARE [23] ICCV'21Segmentation---88.674.546.5
THUNDR [59] ICCV'213D markers-55.039.888.074.851.5
PyMaf [62] ICCV'21IUV image-57.740.5110.192.858.9
ProHMR [26] ICCV'21---41.2--59.8
OCHMR [20] CVPR'222D heatmap---107.189.758.3
3DCrowdNet [8] CVPR'223D skeleton---98.381.751.5
CLIFF [30] ECCV'22--47.132.781.269.043.0
FastMETRO [5] ECCV'223D vertices-52.233.784.173.544.6
VisDB [56] ECCV'223D vertices-51.034.585.573.544.9
OursVirtual marker58.047.332.077.967.541.3
", + "image_path": "3e60115631d9a9b87c93f799eb428171e6ffd71035da3ca6ea4baa6c3e3e164a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 472, + 294, + 594 + ], + "blocks": [ + { + "bbox": [ + 46, + 430, + 547, + 464 + ], + "lines": [ + { + "bbox": [ + 46, + 430, + 547, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 430, + 547, + 464 + ], + "type": "text", + "content": "Table 2. Comparison to the state-of-the-arts on H3.6M [15] and 3DPW [52] datasets. " + }, + { + "bbox": [ + 46, + 430, + 547, + 464 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 46, + 430, + 547, + 464 + ], + "type": "text", + "content": " means using temporal cues. The methods are not strictly comparable because they may have different backbones and training datasets. We provide the numbers only to show proof-of-concept results." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 472, + 294, + 594 + ], + "lines": [ + { + "bbox": [ + 50, + 472, + 294, + 594 + ], + "spans": [ + { + "bbox": [ + 50, + 472, + 294, + 594 + ], + "type": "table", + "html": "
MethodIntermediate RepresentationMPVE↓MPJPE↓PA-MPJPE↓
HMR [18] CVPR'18-85.173.655.4
BodyNet [50] ECCV'18Skel. + Seg.65.8--
GraphCMR [25] CVPR'193D vertices103.287.463.2
SPIN [24] ICCV'19-82.366.743.7
DecoMR [60] CVPR'20IUV image68.952.043.0
Pose2Mesh [7] ECCV'203D skeleton68.856.639.6
PC-HMR [36] AAAI'213D skeleton59.851.737.9
* DynaBOA [13] TPAMI'22-70.755.234.0
OursVirtual marker44.736.928.9
", + "image_path": "b041165650b1125387e2e7fb6be62468adf43889f28768a7475e69a2e07f3514.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 647, + 139, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 647, + 139, + 659 + ], + "spans": [ + { + "bbox": [ + 47, + 647, + 139, + 659 + ], + "type": "text", + "content": "4.4. Ablation study" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 288, + 715 + ], + "type": "text", + "content": "Virtual marker representation. We compare our method to two baselines in Table 4. First, in baseline (a), we replace the virtual markers of our method with the skeleton representation. The rest are kept the same as ours (c). Our" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 326, + 472, + 528, + 529 + ], + "blocks": [ + { + "bbox": [ + 46, + 603, + 287, + 639 + ], + "lines": [ + { + "bbox": [ + 46, + 603, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 603, + 287, + 639 + ], + "type": "text", + "content": "Table 3. Comparison to the state-of-the-arts on SURREAL [51] dataset. * means training on the test split with 2D supervisions. \"Skel. + Seg.\" means using skeleton and segmentation together." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 326, + 472, + 528, + 529 + ], + "lines": [ + { + "bbox": [ + 326, + 472, + 528, + 529 + ], + "spans": [ + { + "bbox": [ + 326, + 472, + 528, + 529 + ], + "type": "table", + "html": "
No.Intermediate RepresentationMPVE↓
H3.6MSURREAL
(a)Skeleton64.453.6
(b)Rand virtual marker63.050.1
(c)Virtual marker58.044.7
", + "image_path": "7de4e7354de823738a5b974fb63cdbe563832aeb963f7f14daa4c5589fb52edd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 537, + 545, + 604 + ], + "lines": [ + { + "bbox": [ + 304, + 537, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 537, + 545, + 604 + ], + "type": "text", + "content": "Table 4. Ablation study of the virtual marker representation for our approach on H3.6M and SURREAL datasets. \"Skeleton\" means the sparse landmark joint representation is used. \"Rand virtual marker\" means the virtual markers are randomly selected from all the vertices without learning. (c) is our method, where the learned virtual markers are used." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 713 + ], + "type": "text", + "content": "method achieves a much lower MPVE than the baseline (a), demonstrating that the virtual markers help to estimate body shapes more accurately than the skeletons. In baseline (b), we randomly sample 64 from the 6890 mesh vertices as virtual markers. We repeat the experiment five times and report the average number. We can see that the result is worse than ours, which is because the randomly selected vertices may not be expressive to reconstruct the other vertices or can not" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "539" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 75, + 100, + 123 + ], + "blocks": [ + { + "bbox": [ + 53, + 75, + 100, + 123 + ], + "lines": [ + { + "bbox": [ + 53, + 75, + 100, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 75, + 100, + 123 + ], + "type": "image", + "image_path": "d75d2439e70f101e0f0597491573606a5244e0ef6442b20606bf112db785d5f7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 53, + 126, + 100, + 175 + ], + "blocks": [ + { + "bbox": [ + 53, + 126, + 100, + 175 + ], + "lines": [ + { + "bbox": [ + 53, + 126, + 100, + 175 + ], + "spans": [ + { + "bbox": [ + 53, + 126, + 100, + 175 + ], + "type": "image", + "image_path": "bf585a9744263c3b1f8ef02f1b7b9305bd4896c825be18a1c6af970551c93ab4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 176, + 86, + 183 + ], + "lines": [ + { + "bbox": [ + 67, + 176, + 86, + 183 + ], + "spans": [ + { + "bbox": [ + 67, + 176, + 86, + 183 + ], + "type": "text", + "content": "Image" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 104, + 76, + 154, + 123 + ], + "blocks": [ + { + "bbox": [ + 104, + 76, + 154, + 123 + ], + "lines": [ + { + "bbox": [ + 104, + 76, + 154, + 123 + ], + "spans": [ + { + "bbox": [ + 104, + 76, + 154, + 123 + ], + "type": "image", + "image_path": "19daadfeebe7f76537c8a3a3c8981aa125ceb53693b9da1fd227c199c98eff81.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 104, + 125, + 153, + 175 + ], + "blocks": [ + { + "bbox": [ + 104, + 125, + 153, + 175 + ], + "lines": [ + { + "bbox": [ + 104, + 125, + 153, + 175 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 153, + 175 + ], + "type": "image", + "image_path": "1a19c634fcd5bca6dec6a8569b592597c50cd1a6d2d511187f43e8d79d844853.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 176, + 161, + 183 + ], + "lines": [ + { + "bbox": [ + 127, + 176, + 161, + 183 + ], + "spans": [ + { + "bbox": [ + 127, + 176, + 161, + 183 + ], + "type": "text", + "content": "Pose2Mesh" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 157, + 76, + 179, + 123 + ], + "blocks": [ + { + "bbox": [ + 157, + 76, + 179, + 123 + ], + "lines": [ + { + "bbox": [ + 157, + 76, + 179, + 123 + ], + "spans": [ + { + "bbox": [ + 157, + 76, + 179, + 123 + ], + "type": "image", + "image_path": "dee4dae334b9c78aa64931c0fc114c493247ad4ebeb923c379d47b11a00fa1d7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 157, + 125, + 181, + 175 + ], + "blocks": [ + { + "bbox": [ + 157, + 125, + 181, + 175 + ], + "lines": [ + { + "bbox": [ + 157, + 125, + 181, + 175 + ], + "spans": [ + { + "bbox": [ + 157, + 125, + 181, + 175 + ], + "type": "image", + "image_path": "a2e01121534df4ea145f12232adcfe1c366ed57133f647ac77f9b15ecea858f1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 185, + 76, + 231, + 123 + ], + "blocks": [ + { + "bbox": [ + 185, + 76, + 231, + 123 + ], + "lines": [ + { + "bbox": [ + 185, + 76, + 231, + 123 + ], + "spans": [ + { + "bbox": [ + 185, + 76, + 231, + 123 + ], + "type": "image", + "image_path": "514d2b4ec42e40e039e3a34b8bfde4f4a522a242b99341d2e424280812502d09.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 185, + 125, + 231, + 175 + ], + "blocks": [ + { + "bbox": [ + 185, + 125, + 231, + 175 + ], + "lines": [ + { + "bbox": [ + 185, + 125, + 231, + 175 + ], + "spans": [ + { + "bbox": [ + 185, + 125, + 231, + 175 + ], + "type": "image", + "image_path": "32d455f6b7f7ced9a2dabed08523e3dba17fc81385ca7145ba610be177ed1d76.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 220, + 176, + 235, + 183 + ], + "lines": [ + { + "bbox": [ + 220, + 176, + 235, + 183 + ], + "spans": [ + { + "bbox": [ + 220, + 176, + 235, + 183 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 235, + 76, + 259, + 122 + ], + "blocks": [ + { + "bbox": [ + 235, + 76, + 259, + 122 + ], + "lines": [ + { + "bbox": [ + 235, + 76, + 259, + 122 + ], + "spans": [ + { + "bbox": [ + 235, + 76, + 259, + 122 + ], + "type": "image", + "image_path": "740c31dda70dd2dfc4f69814bcdf139af247eef60dcd3014a76169cc041e0de6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 235, + 125, + 259, + 175 + ], + "blocks": [ + { + "bbox": [ + 235, + 125, + 259, + 175 + ], + "lines": [ + { + "bbox": [ + 235, + 125, + 259, + 175 + ], + "spans": [ + { + "bbox": [ + 235, + 125, + 259, + 175 + ], + "type": "image", + "image_path": "9228de83b9f0fa68f5c48466135bfaa09c81e68d3d48d03ad9e480eb2a5b2417.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 270, + 75, + 282, + 121 + ], + "blocks": [ + { + "bbox": [ + 270, + 75, + 282, + 121 + ], + "lines": [ + { + "bbox": [ + 270, + 75, + 282, + 121 + ], + "spans": [ + { + "bbox": [ + 270, + 75, + 282, + 121 + ], + "type": "image", + "image_path": "48a9c41dc89b424c693fad759f6285598f6ace7d6fbd08d7deeb2ee89383e1bf.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 270, + 125, + 282, + 175 + ], + "blocks": [ + { + "bbox": [ + 270, + 125, + 282, + 175 + ], + "lines": [ + { + "bbox": [ + 270, + 125, + 282, + 175 + ], + "spans": [ + { + "bbox": [ + 270, + 125, + 282, + 175 + ], + "type": "image", + "image_path": "8281acdc46bbc40959de252d6c15a2ecfc0325139a723c9c13a160d5bf5716fa.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 270, + 176, + 280, + 183 + ], + "lines": [ + { + "bbox": [ + 270, + 176, + 280, + 183 + ], + "spans": [ + { + "bbox": [ + 270, + 176, + 280, + 183 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 59, + 257, + 123, + 322 + ], + "blocks": [ + { + "bbox": [ + 46, + 196, + 289, + 251 + ], + "lines": [ + { + "bbox": [ + 46, + 196, + 289, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 196, + 289, + 251 + ], + "type": "text", + "content": "Figure 4. Mesh estimation results of different methods on H3.6M test set. Our method with virtual marker representation gets better shape estimation results than Pose2Mesh which uses skeleton representation. Note the waistline of the body and the thickness of the arm." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 59, + 257, + 123, + 322 + ], + "lines": [ + { + "bbox": [ + 59, + 257, + 123, + 322 + ], + "spans": [ + { + "bbox": [ + 59, + 257, + 123, + 322 + ], + "type": "image", + "image_path": "0d1fcf6f789f2e3f61fa2d4631812b595c7c583ebc42d6c5c0d38c6f884f0451.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 137, + 258, + 204, + 324 + ], + "blocks": [ + { + "bbox": [ + 137, + 258, + 204, + 324 + ], + "lines": [ + { + "bbox": [ + 137, + 258, + 204, + 324 + ], + "spans": [ + { + "bbox": [ + 137, + 258, + 204, + 324 + ], + "type": "image", + "image_path": "fba7ae4d1224de8fd8e9c29591ea274341b5f73ae951ad2b036a0bc75ac30c1f.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 335, + 288, + 358 + ], + "lines": [ + { + "bbox": [ + 47, + 335, + 288, + 358 + ], + "spans": [ + { + "bbox": [ + 47, + 335, + 288, + 358 + ], + "type": "text", + "content": "Figure 5. Visualization of the learned virtual markers of different numbers of " + }, + { + "bbox": [ + 47, + 335, + 288, + 358 + ], + "type": "inline_equation", + "content": "K = 16, 32, 96" + }, + { + "bbox": [ + 47, + 335, + 288, + 358 + ], + "type": "text", + "content": ", from left to right, respectively." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 213, + 258, + 280, + 323 + ], + "blocks": [ + { + "bbox": [ + 213, + 258, + 280, + 323 + ], + "lines": [ + { + "bbox": [ + 213, + 258, + 280, + 323 + ], + "spans": [ + { + "bbox": [ + 213, + 258, + 280, + 323 + ], + "type": "image", + "image_path": "24d97febde435c7046067f49afbce89a10d520d7f36e8496eb2346e7e89ca6f5.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 303, + 546, + 335 + ], + "lines": [ + { + "bbox": [ + 305, + 303, + 546, + 335 + ], + "spans": [ + { + "bbox": [ + 305, + 303, + 546, + 335 + ], + "type": "text", + "content": "Figure 6. Mesh estimation comparison results when using (a) fixed coefficient matrix " + }, + { + "bbox": [ + 305, + 303, + 546, + 335 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{A}}^{sym}" + }, + { + "bbox": [ + 305, + 303, + 546, + 335 + ], + "type": "text", + "content": ", and (b) updated " + }, + { + "bbox": [ + 305, + 303, + 546, + 335 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{A}}" + }, + { + "bbox": [ + 305, + 303, + 546, + 335 + ], + "type": "text", + "content": ". Please zoom in to better see the details." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "bbox": [ + 46, + 382, + 288, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 382, + 288, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 382, + 288, + 418 + ], + "type": "text", + "content": "be accurately detected from images as they lack distinguishable visual patterns. The results validate the effectiveness of our learning strategy." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 46, + 419, + 288, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 419, + 288, + 563 + ], + "spans": [ + { + "bbox": [ + 46, + 419, + 288, + 563 + ], + "type": "text", + "content": "Figure 1 shows some qualitative results on the SURREAL test set. The meshes estimated by the baseline which uses skeleton representation, i.e. Pose2Mesh [7], have inaccurate body shapes. This is reasonable because the skeleton is oversimplified and has very limited capability to recover shapes. Instead, it implicitly learns a mean shape for the whole training dataset. In contrast, the mesh estimated by using virtual markers has much better quality due to its strong representation power and therefore can handle different body shapes elegantly. Figure 4 also shows some qualitative results on the H3.6M test set. For clarity, we draw the intermediate representation (blue balls) in it as well." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 289, + 715 + ], + "type": "text", + "content": "Number of virtual markers. We evaluate how the number of virtual markers affects estimation quality on H3.6M [15] dataset. Figure 5 visualizes the learned virtual markers, which are all located on the body surface and close to the extreme points of the mesh. This is expected as mentioned in Section 3.1. Table 5 (GT) shows the mesh reconstruction results when we have GT 3D positions of the virtual markers in objective (1). When we increase the number of virtual markers, both mesh reconstruction error (MPVE) and the regressed landmark joint error (MPJPE) steadily decrease. This is expected because using more virtual markers improves the representation power. However, using more" + } + ] + } + ], + "index": 23 + }, + { + "type": "table", + "bbox": [ + 318, + 70, + 536, + 135 + ], + "blocks": [ + { + "bbox": [ + 318, + 70, + 536, + 135 + ], + "lines": [ + { + "bbox": [ + 318, + 70, + 536, + 135 + ], + "spans": [ + { + "bbox": [ + 318, + 70, + 536, + 135 + ], + "type": "table", + "html": "
KGTDet
MPVE↓MPJPE↓MPVE↓MPJPE↓
1646.839.858.747.8
3220.114.258.248.3
6411.07.558.047.3
969.95.659.648.2
", + "image_path": "56c79e16e81e45c7e6d1d56026c8e17d0cd746b6c6758f569f49e072d1060fc0.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "table_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 143, + 547, + 198 + ], + "lines": [ + { + "bbox": [ + 305, + 143, + 547, + 198 + ], + "spans": [ + { + "bbox": [ + 305, + 143, + 547, + 198 + ], + "type": "text", + "content": "Table 5. Ablation study of the different number of virtual markers " + }, + { + "bbox": [ + 305, + 143, + 547, + 198 + ], + "type": "inline_equation", + "content": "(K)" + }, + { + "bbox": [ + 305, + 143, + 547, + 198 + ], + "type": "text", + "content": " on H3.6M [15] dataset. (GT) Mesh reconstruction results when GT 3D positions of the virtual markers are used in objective (1). (Det) Mesh estimation results obtained by our proposed framework when we use different numbers of virtual markers " + }, + { + "bbox": [ + 305, + 143, + 547, + 198 + ], + "type": "inline_equation", + "content": "(K)" + }, + { + "bbox": [ + 305, + 143, + 547, + 198 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 308, + 211, + 369, + 276 + ], + "blocks": [ + { + "bbox": [ + 308, + 211, + 369, + 276 + ], + "lines": [ + { + "bbox": [ + 308, + 211, + 369, + 276 + ], + "spans": [ + { + "bbox": [ + 308, + 211, + 369, + 276 + ], + "type": "image", + "image_path": "8b6485047caa0e196514516f49ae19921554e134ca1a6a5c0473633923679ae5.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 278, + 356, + 286 + ], + "lines": [ + { + "bbox": [ + 321, + 278, + 356, + 286 + ], + "spans": [ + { + "bbox": [ + 321, + 278, + 356, + 286 + ], + "type": "text", + "content": "Input Image" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 378, + 211, + 462, + 276 + ], + "blocks": [ + { + "bbox": [ + 378, + 211, + 462, + 276 + ], + "lines": [ + { + "bbox": [ + 378, + 211, + 462, + 276 + ], + "spans": [ + { + "bbox": [ + 378, + 211, + 462, + 276 + ], + "type": "image", + "image_path": "ef7044907723ac23cdf036a13bc811d5915811b698b4fe28fceb6378677a97f8.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 381, + 278, + 432, + 293 + ], + "lines": [ + { + "bbox": [ + 381, + 278, + 432, + 293 + ], + "spans": [ + { + "bbox": [ + 381, + 278, + 432, + 293 + ], + "type": "text", + "content": "(a) Using fixed coefficient matrix" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 467, + 212, + 549, + 276 + ], + "blocks": [ + { + "bbox": [ + 467, + 212, + 549, + 276 + ], + "lines": [ + { + "bbox": [ + 467, + 212, + 549, + 276 + ], + "spans": [ + { + "bbox": [ + 467, + 212, + 549, + 276 + ], + "type": "image", + "image_path": "2103297200699a6c36f2488db301fee592207ba6bd91cd061bd3179919384d4a.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 470, + 278, + 522, + 293 + ], + "lines": [ + { + "bbox": [ + 470, + 278, + 522, + 293 + ], + "spans": [ + { + "bbox": [ + 470, + 278, + 522, + 293 + ], + "type": "text", + "content": "(b) Using updated coefficient matrix" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "bbox": [ + 304, + 358, + 547, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 547, + 490 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 547, + 490 + ], + "type": "text", + "content": "virtual markers cannot guarantee smaller estimation errors when we need to estimate the virtual marker positions from images as in our method. This is because the additional virtual markers may have large estimation errors which affect the mesh estimation result. The results are shown in Table 5 (Det). Increasing the number of virtual markers " + }, + { + "bbox": [ + 304, + 358, + 547, + 490 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 358, + 547, + 490 + ], + "type": "text", + "content": " steadily reduces the MPVE errors when " + }, + { + "bbox": [ + 304, + 358, + 547, + 490 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 358, + 547, + 490 + ], + "type": "text", + "content": " is smaller than 96. However, if we keep increasing " + }, + { + "bbox": [ + 304, + 358, + 547, + 490 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 358, + 547, + 490 + ], + "type": "text", + "content": ", the error begins to increase. This is mainly because some of the newly introduced virtual markers are difficult to detect from images and therefore bring errors to mesh estimation." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "type": "text", + "content": "Coefficient matrix. We compare our method to a baseline which uses the fixed coefficient matrix " + }, + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathbf{A}}^{sym}" + }, + { + "bbox": [ + 304, + 495, + 547, + 662 + ], + "type": "text", + "content": ". We show the quality comparison in Figure 6. We can see that the estimated mesh by a fixed coefficient matrix (a) has mostly correct pose and shape but there are also some artifacts on the mesh while using the updated coefficient matrix (b) can get better mesh estimation results. As shown in Table 6, using a fixed coefficient matrix gets larger MPVE and MPJPE errors than using the updated coefficient matrix. This is caused by the estimation errors of virtual markers when occlusion happens, which is inevitable since the virtual markers on the back will be self-occluded by the front body. As a result, inaccurate marker positions would bring large errors to the final mesh estimates if we directly use the fixed matrix." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 306, + 670, + 419, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 419, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 419, + 684 + ], + "type": "text", + "content": "4.5. Qualitative Results" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 306, + 689, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 547, + 714 + ], + "type": "text", + "content": "Figure 7 (top) presents some meshes estimated by our approach on natural images from the 3DPW test set. The" + } + ] + } + ], + "index": 36 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 313, + 757 + ], + "type": "text", + "content": "540" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 82, + 105, + 150 + ], + "blocks": [ + { + "bbox": [ + 56, + 82, + 105, + 150 + ], + "lines": [ + { + "bbox": [ + 56, + 82, + 105, + 150 + ], + "spans": [ + { + "bbox": [ + 56, + 82, + 105, + 150 + ], + "type": "image", + "image_path": "1466ae8ff36d2cb4d89cf6d32c584932755aa511355777bf8a4c9ac1680f2ec3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 107, + 82, + 156, + 149 + ], + "blocks": [ + { + "bbox": [ + 107, + 82, + 156, + 149 + ], + "lines": [ + { + "bbox": [ + 107, + 82, + 156, + 149 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 156, + 149 + ], + "type": "image", + "image_path": "c09ecdf5e66aeac82f858beefb5178321d062416a75d2827b29e4e993cebabbd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 159, + 82, + 201, + 149 + ], + "blocks": [ + { + "bbox": [ + 159, + 82, + 201, + 149 + ], + "lines": [ + { + "bbox": [ + 159, + 82, + 201, + 149 + ], + "spans": [ + { + "bbox": [ + 159, + 82, + 201, + 149 + ], + "type": "image", + "image_path": "b347b4823adb72f1beb89e53b693ee0f13a218c96f5a781942ec2f82522704de.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 205, + 82, + 247, + 149 + ], + "blocks": [ + { + "bbox": [ + 205, + 82, + 247, + 149 + ], + "lines": [ + { + "bbox": [ + 205, + 82, + 247, + 149 + ], + "spans": [ + { + "bbox": [ + 205, + 82, + 247, + 149 + ], + "type": "image", + "image_path": "671c4765f76e5082ec0a322174d690ced937e8c119ae69e2ba70eeb4e1af914a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 250, + 82, + 294, + 149 + ], + "blocks": [ + { + "bbox": [ + 250, + 82, + 294, + 149 + ], + "lines": [ + { + "bbox": [ + 250, + 82, + 294, + 149 + ], + "spans": [ + { + "bbox": [ + 250, + 82, + 294, + 149 + ], + "type": "image", + "image_path": "91221dcf2a8def63b76923650cc63a8b12304b472cd57d79ba610a8f2dfea940.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 298, + 82, + 342, + 149 + ], + "blocks": [ + { + "bbox": [ + 298, + 82, + 342, + 149 + ], + "lines": [ + { + "bbox": [ + 298, + 82, + 342, + 149 + ], + "spans": [ + { + "bbox": [ + 298, + 82, + 342, + 149 + ], + "type": "image", + "image_path": "46828dff4792f15e0d3078870b51e27a8d808a785bca38b4597abc521593d694.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 345, + 82, + 384, + 149 + ], + "blocks": [ + { + "bbox": [ + 345, + 82, + 384, + 149 + ], + "lines": [ + { + "bbox": [ + 345, + 82, + 384, + 149 + ], + "spans": [ + { + "bbox": [ + 345, + 82, + 384, + 149 + ], + "type": "image", + "image_path": "0c363e8fa3f8e66654e0a9cca2f9b642d653bd3bbb3da1fa6ef432fad29dd5e7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 387, + 82, + 427, + 149 + ], + "blocks": [ + { + "bbox": [ + 387, + 82, + 427, + 149 + ], + "lines": [ + { + "bbox": [ + 387, + 82, + 427, + 149 + ], + "spans": [ + { + "bbox": [ + 387, + 82, + 427, + 149 + ], + "type": "image", + "image_path": "53bf9295864d34d05d4c8e0ecae174a83cede494dba0449491e2723a0e105885.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 436, + 76, + 539, + 152 + ], + "blocks": [ + { + "bbox": [ + 436, + 76, + 539, + 152 + ], + "lines": [ + { + "bbox": [ + 436, + 76, + 539, + 152 + ], + "spans": [ + { + "bbox": [ + 436, + 76, + 539, + 152 + ], + "type": "image", + "image_path": "7d9b766b435982a945e5a0a41cb93a99292b6733c7a9709752a1cdd802da3505.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 56, + 154, + 105, + 221 + ], + "blocks": [ + { + "bbox": [ + 56, + 154, + 105, + 221 + ], + "lines": [ + { + "bbox": [ + 56, + 154, + 105, + 221 + ], + "spans": [ + { + "bbox": [ + 56, + 154, + 105, + 221 + ], + "type": "image", + "image_path": "14c41b8498a35d95bb93c324efee4de6fa93d65e487b12f84b40d8e84a0eb762.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 231, + 545, + 254 + ], + "lines": [ + { + "bbox": [ + 46, + 231, + 545, + 254 + ], + "spans": [ + { + "bbox": [ + 46, + 231, + 545, + 254 + ], + "type": "text", + "content": "Figure 7. Top: Meshes estimated by our approach on images from 3DPW test set. The rightmost case in the dashed box shows a typical failure. Bottom: Meshes estimated by our approach on Internet images with challenging cases (extreme shapes or in a long dress)." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 107, + 154, + 156, + 221 + ], + "blocks": [ + { + "bbox": [ + 107, + 154, + 156, + 221 + ], + "lines": [ + { + "bbox": [ + 107, + 154, + 156, + 221 + ], + "spans": [ + { + "bbox": [ + 107, + 154, + 156, + 221 + ], + "type": "image", + "image_path": "dd7af12bc3988e0f0949f3dbd6a4a20820bb0cdabd3fc01e999491084d64a442.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 159, + 154, + 201, + 221 + ], + "blocks": [ + { + "bbox": [ + 159, + 154, + 201, + 221 + ], + "lines": [ + { + "bbox": [ + 159, + 154, + 201, + 221 + ], + "spans": [ + { + "bbox": [ + 159, + 154, + 201, + 221 + ], + "type": "image", + "image_path": "3e480c67116134bf80778ea845b63acd5634c368cddaab6692f4f964b863dbca.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 204, + 154, + 247, + 221 + ], + "blocks": [ + { + "bbox": [ + 204, + 154, + 247, + 221 + ], + "lines": [ + { + "bbox": [ + 204, + 154, + 247, + 221 + ], + "spans": [ + { + "bbox": [ + 204, + 154, + 247, + 221 + ], + "type": "image", + "image_path": "d94ebada5ba03a75357488123772aab188e2b33f39c5e5ace79f1a7506558026.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 251, + 154, + 295, + 221 + ], + "blocks": [ + { + "bbox": [ + 251, + 154, + 295, + 221 + ], + "lines": [ + { + "bbox": [ + 251, + 154, + 295, + 221 + ], + "spans": [ + { + "bbox": [ + 251, + 154, + 295, + 221 + ], + "type": "image", + "image_path": "9e3043cdf52b4064082f78724e98235bea1d6dc37d8371b1b1d738120157d0aa.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 298, + 154, + 343, + 220 + ], + "blocks": [ + { + "bbox": [ + 298, + 154, + 343, + 220 + ], + "lines": [ + { + "bbox": [ + 298, + 154, + 343, + 220 + ], + "spans": [ + { + "bbox": [ + 298, + 154, + 343, + 220 + ], + "type": "image", + "image_path": "eeb1139646c7446dfe1947b26b0799e68b409a8a66aa334b64ae2c55eb1e3cba.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 345, + 154, + 384, + 220 + ], + "blocks": [ + { + "bbox": [ + 345, + 154, + 384, + 220 + ], + "lines": [ + { + "bbox": [ + 345, + 154, + 384, + 220 + ], + "spans": [ + { + "bbox": [ + 345, + 154, + 384, + 220 + ], + "type": "image", + "image_path": "f404861c5961e317b732009e6f4db114f4125dde743a31ffccbdecbbfb0507a2.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 388, + 154, + 427, + 220 + ], + "blocks": [ + { + "bbox": [ + 388, + 154, + 427, + 220 + ], + "lines": [ + { + "bbox": [ + 388, + 154, + 427, + 220 + ], + "spans": [ + { + "bbox": [ + 388, + 154, + 427, + 220 + ], + "type": "image", + "image_path": "8184ce0f5f56930eb841d6ef7ab4c10c460ee662953ed1b388455cf4ef0b2132.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 429, + 154, + 482, + 220 + ], + "blocks": [ + { + "bbox": [ + 429, + 154, + 482, + 220 + ], + "lines": [ + { + "bbox": [ + 429, + 154, + 482, + 220 + ], + "spans": [ + { + "bbox": [ + 429, + 154, + 482, + 220 + ], + "type": "image", + "image_path": "c554d8af59e261ebd3a6ab99776dc49058d643a3dd257d189b66367e7ba57236.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 484, + 154, + 538, + 221 + ], + "blocks": [ + { + "bbox": [ + 484, + 154, + 538, + 221 + ], + "lines": [ + { + "bbox": [ + 484, + 154, + 538, + 221 + ], + "spans": [ + { + "bbox": [ + 484, + 154, + 538, + 221 + ], + "type": "image", + "image_path": "d52d300af7ba7d41ce034ecd3811ccd8d0c76d3f0ab129dd6fec70a9be375d7e.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 59, + 264, + 277, + 305 + ], + "blocks": [ + { + "bbox": [ + 59, + 264, + 277, + 305 + ], + "lines": [ + { + "bbox": [ + 59, + 264, + 277, + 305 + ], + "spans": [ + { + "bbox": [ + 59, + 264, + 277, + 305 + ], + "type": "table", + "html": "
No.MethodFixed AsymUpdated AMPVE↓MPJPE↓
(a)Ours (fixed)64.751.6
(b)Ours58.047.3
", + "image_path": "4a69f90cd761277b0f642a517ccd028f738e33b2d6525c661b9831b59650d747.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 313, + 287, + 346 + ], + "lines": [ + { + "bbox": [ + 47, + 313, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 47, + 313, + 287, + 346 + ], + "type": "text", + "content": "Table 6. Ablation study of the coefficient matrix for our approach on H3.6M dataset. \"fixed\" means using the fixed coefficient matrix " + }, + { + "bbox": [ + 47, + 313, + 287, + 346 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{A}}^{sym}" + }, + { + "bbox": [ + 47, + 313, + 287, + 346 + ], + "type": "text", + "content": " to reconstruct the mesh." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 374, + 289, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 374, + 289, + 625 + ], + "spans": [ + { + "bbox": [ + 46, + 374, + 289, + 625 + ], + "type": "text", + "content": "rightmost case shows a typical failure where our method has a wrong pose estimate of the left leg due to heavy occlusion. We can see that the failure is constrained to the local region and the rest of the body still gets accurate estimates. We further analyze how inaccurate virtual markers would affect the mesh estimation, i.e. when part of human body is occluded or truncated. According to the finally learned coefficient matrix " + }, + { + "bbox": [ + 46, + 374, + 289, + 625 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{A}}" + }, + { + "bbox": [ + 46, + 374, + 289, + 625 + ], + "type": "text", + "content": " of our model, we highlight the relationship weights among virtual markers and all vertices in Figure 8. We can see that our model actually learns local and sparse dependency between each vertex and the virtual markers, e.g. for each vertex, the virtual markers that contribute the most are in a near range as shown in Figure 8 (b). Therefore, in inference, if a virtual marker has inaccurate position estimation due to occlusion or truncation, the dependent vertices may have inaccurate estimates, while the rest will be barely affected. Figure 2 (right) shows more examples where occlusion or truncation occurs, and our method can still get accurate or reasonable estimates robustly. Note that when truncation occurs, our method still guesses the positions of the truncated virtual markers." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 46, + 630, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 289, + 715 + ], + "type": "text", + "content": "Figure 7 (bottom) shows our estimated meshes on challenging cases, which indicates the strong generalization ability of our model on diverse postures and actions in natural scenes. Please refer to the supplementary for more quality results. Note that since the datasets do not provide supervision of head orientation, face expression, hands, or feet, the estimates of these parts are just in canonical poses inevitably." + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 332, + 267, + 411, + 350 + ], + "blocks": [ + { + "bbox": [ + 332, + 267, + 411, + 350 + ], + "lines": [ + { + "bbox": [ + 332, + 267, + 411, + 350 + ], + "spans": [ + { + "bbox": [ + 332, + 267, + 411, + 350 + ], + "type": "image", + "image_path": "f77b347f26e1845c88e5275ea6153d190ca64e6431719b2841008da21507bd7f.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 360, + 545, + 437 + ], + "lines": [ + { + "bbox": [ + 304, + 360, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 360, + 545, + 437 + ], + "type": "text", + "content": "Figure 8. (a) For each virtual marker (represented by a star), we highlight the top 30 most affected vertices (represented by a colored dot) based on average coefficient matrix " + }, + { + "bbox": [ + 304, + 360, + 545, + 437 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{A}}" + }, + { + "bbox": [ + 304, + 360, + 545, + 437 + ], + "type": "text", + "content": ". (b) For each vertex (dot), we highlight the top 3 virtual markers (star) that contribute the most. We can see that the dependency has a strong locality which improves the robustness when some virtual markers cannot be accurately detected." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 436, + 267, + 518, + 350 + ], + "blocks": [ + { + "bbox": [ + 436, + 267, + 518, + 350 + ], + "lines": [ + { + "bbox": [ + 436, + 267, + 518, + 350 + ], + "spans": [ + { + "bbox": [ + 436, + 267, + 518, + 350 + ], + "type": "image", + "image_path": "d9dfe504d7a86215e536183b1b742b4dce25ee797a320c746254582265d0cfa5.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 449, + 547, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 547, + 496 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 547, + 496 + ], + "type": "text", + "content": "Apart from that, most errors are due to inaccurate 3D virtual marker estimation which may be addressed using more powerful estimators or more diverse training datasets in the future." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 306, + 507, + 378, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 507, + 378, + 520 + ], + "spans": [ + { + "bbox": [ + 306, + 507, + 378, + 520 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 304, + 528, + 547, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 528, + 547, + 659 + ], + "spans": [ + { + "bbox": [ + 304, + 528, + 547, + 659 + ], + "type": "text", + "content": "In this paper, we present a novel intermediate representation Virtual Marker, which is more expressive than the prevailing skeleton representation and more accessible than physical markers. It can reconstruct 3D meshes more accurately and efficiently, especially in handling diverse body shapes. Besides, the coefficient matrix in the virtual marker representation encodes spatial relationships among mesh vertices which allows the method to implicitly explore structure priors of human body. It achieves better mesh estimation results than the state-of-the-art methods and shows advanced generalization potential in spite of its simplicity." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 306, + 670, + 404, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 404, + 683 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 404, + 683 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 712 + ], + "type": "text", + "content": "This work was supported by MOST-2022ZD0114900 and NSFC-62061136001." + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 297, + 749, + 312, + 757 + ], + "type": "text", + "content": "541" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "type": "text", + "content": "[1] Anurag Arnab, Carl Doersch, and Andrew Zisserman. Exploiting temporal context for 3d human pose estimation in the wild. In CVPR, pages 3395-3404, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 124, + 288, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 124, + 288, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 124, + 288, + 168 + ], + "type": "text", + "content": "[2] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J Black. Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In ECCV, pages 561-578, 2016." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 168, + 288, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 168, + 288, + 224 + ], + "spans": [ + { + "bbox": [ + 53, + 168, + 288, + 224 + ], + "type": "text", + "content": "[3] Ronan Boulic, Pascal Becheiraz, Luc Emerging, and Daniel Thalmann. Integration of motion control techniques for virtual human and avatar real-time animation. In Proceedings of the ACM symposium on Virtual reality software and technology, pages 111-118, 1997." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 224, + 288, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 288, + 257 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 288, + 257 + ], + "type": "text", + "content": "[4] Yuansi Chen, Julien Mairal, and Zaid Harchaoui. Fast and robust archetypal analysis for representation learning. In CVPR, pages 1478-1485, 2014." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 258, + 288, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 288, + 291 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 288, + 291 + ], + "type": "text", + "content": "[5] Junhyeong Cho, Kim Youwang, and Tae-Hyun Oh. Cross-attention of disentangled modalities for 3d human mesh recovery with transformers. In ECCV, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 291, + 288, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 288, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 288, + 334 + ], + "type": "text", + "content": "[6] Hongsuk Choi, Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. Beyond static features for temporally consistent 3d human pose and shape from a video. In CVPR, pages 1964-1973, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 335, + 288, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 335, + 288, + 378 + ], + "spans": [ + { + "bbox": [ + 53, + 335, + 288, + 378 + ], + "type": "text", + "content": "[7] Hongsuk Choi, Gyeongsik Moon, and Kyoung Mu Lee. Pose2mesh: Graph convolutional network for 3d human pose and mesh recovery from a 2d human pose. In ECCV, pages 769-787, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 380, + 288, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 380, + 288, + 423 + ], + "spans": [ + { + "bbox": [ + 53, + 380, + 288, + 423 + ], + "type": "text", + "content": "[8] Hongsuk Choi, Gyeongsik Moon, JoonKyu Park, and Kyoung Mu Lee. Learning to estimate robust 3d human mesh from in-the-wild crowded scenes. In CVPR, pages 1475-1484, June 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 424, + 288, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 424, + 288, + 468 + ], + "spans": [ + { + "bbox": [ + 53, + 424, + 288, + 468 + ], + "type": "text", + "content": "[9] Vasileios Choutas, Georgios Pavlakos, Timo Bolkart, Dimitrios Tzionas, and Michael J Black. Monocular expressive body regression through body-driven attention. In ECCV, pages 20-40, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 468, + 288, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 468, + 288, + 512 + ], + "spans": [ + { + "bbox": [ + 47, + 468, + 288, + 512 + ], + "type": "text", + "content": "[10] Hai Ci, Mingdong Wu, Wentao Zhu, Xiaoxuan Ma, Hao Dong, Fangwei Zhong, and Yizhou Wang. Gfpose: Learning 3d human pose prior with gradient fields. arXiv preprint arXiv:2212.08641, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 514, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 288, + 557 + ], + "type": "text", + "content": "[11] Enric Corona, Gerard Pons-Moll, Guillem Alenyà, and Francesc Moreno-Noguer. Learned vertex descent: a new direction for 3d human model fitting. In ECCV, pages 146-165. Springer, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 558, + 288, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 558, + 288, + 579 + ], + "spans": [ + { + "bbox": [ + 47, + 558, + 288, + 579 + ], + "type": "text", + "content": "[12] Adele Cutler and Leo Breiman. Archetypal analysis. Technometrics, 36(4):338-347, 1994." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 580, + 288, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 580, + 288, + 624 + ], + "spans": [ + { + "bbox": [ + 47, + 580, + 288, + 624 + ], + "type": "text", + "content": "[13] Shanyan Guan, Jingwei Xu, Michelle Z He, Yunbo Wang, Bingbing Ni, and Xiaokang Yang. Out-of-domain human mesh reconstruction via dynamic bilevel online adaptation. IEEE TPAMI, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 624, + 288, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 624, + 288, + 669 + ], + "spans": [ + { + "bbox": [ + 47, + 624, + 288, + 669 + ], + "type": "text", + "content": "[14] Yinghao Huang, Federica Bogo, Christoph Lassner, Angjoo Kanazawa, Peter V Gehler, Javier Romero, Ijaz Akhter, and Michael J Black. Towards accurate marker-less human shape and pose estimation over time. In 3DV, pages 421-430, 2017." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 670, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 288, + 713 + ], + "type": "text", + "content": "[15] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE TPAMI, 36(7):1325-1339, 2013." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 307, + 73, + 546, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 546, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 546, + 106 + ], + "type": "text", + "content": "[16] Karim Iskakov, Egor Burkov, Victor Lempitsky, and Yury Malkov. Learnable triangulation of human pose. In ICCV, pages 7718-7727, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 107, + 547, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 547, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 547, + 139 + ], + "type": "text", + "content": "[17] Ian T Jolliffe. Principal components in regression analysis. In Principal component analysis, pages 129-155. Springer, 1986." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 140, + 546, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 546, + 174 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 546, + 174 + ], + "type": "text", + "content": "[18] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In CVPR, pages 7122-7131, 2018." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 175, + 547, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 547, + 207 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 547, + 207 + ], + "type": "text", + "content": "[19] Angjoo Kanazawa, Jason Y Zhang, Panna Felsen, and Jitendra Malik. Learning 3d human dynamics from video. In CVPR, pages 5614-5623, 2019." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 208, + 547, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 208, + 547, + 240 + ], + "spans": [ + { + "bbox": [ + 307, + 208, + 547, + 240 + ], + "type": "text", + "content": "[20] Rawal Khirodkar, Shashank Tripathi, and Kris Kitani. Occluded human mesh recovery. In CVPR, pages 1715-1725, June 2022." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 241, + 546, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 546, + 264 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 546, + 264 + ], + "type": "text", + "content": "[21] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 264, + 547, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 264, + 547, + 297 + ], + "spans": [ + { + "bbox": [ + 307, + 264, + 547, + 297 + ], + "type": "text", + "content": "[22] Muhammed Kocabas, Nikos Athanasiou, and Michael J Black. Vibe: Video inference for human body pose and shape estimation. In CVPR, pages 5253-5263, 2020." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 298, + 546, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 298, + 546, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 298, + 546, + 342 + ], + "type": "text", + "content": "[23] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. Pare: Part attention regressor for 3d human body estimation. In ICCV, pages 11127-11137, October 2021." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 342, + 546, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 342, + 546, + 387 + ], + "spans": [ + { + "bbox": [ + 307, + 342, + 546, + 387 + ], + "type": "text", + "content": "[24] Nikos Kolotouros, Georgios Pavlakos, Michael J Black, and Kostas Daniilidis. Learning to reconstruct 3d human pose and shape via model-fitting in the loop. In ICCV, pages 2252-2261, 2019." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 388, + 546, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 546, + 421 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 546, + 421 + ], + "type": "text", + "content": "[25] Nikos Kolotouros, Georgios Pavlakos, and Kostas Daniilidis. Convolutional mesh regression for single-image human shape reconstruction. In CVPR, pages 4501-4510, 2019." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 422, + 546, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 546, + 455 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 546, + 455 + ], + "type": "text", + "content": "[26] Nikos Kolotouros, Georgios Pavlakos, Dinesh Jayaraman, and Kostas Daniilidis. Probabilistic modeling for human mesh recovery. In ICCV, pages 11605-11614, October 2021." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 456, + 546, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 546, + 499 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 546, + 499 + ], + "type": "text", + "content": "[27] Christoph Lassner, Javier Romero, Martin Kiefel, Federica Bogo, Michael J Black, and Peter V Gehler. Unite the people: Closing the loop between 3d and 2d human representations. In CVPR, pages 6050-6059, 2017." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 500, + 546, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 500, + 546, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 500, + 546, + 544 + ], + "type": "text", + "content": "[28] Jiefeng Li, Chao Xu, Zhicun Chen, Siyuan Bian, Lixin Yang, and Cewu Lu. Hybrik: A hybrid analytical-neural inverse kinematics solution for 3d human pose and shape estimation. In CVPR, pages 3383-3393, 2021." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 545, + 546, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 546, + 589 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 546, + 589 + ], + "type": "text", + "content": "[29] Yong-Lu Li, Liang Xu, Xinpeng Liu, Xijie Huang, Yue Xu, Shiyi Wang, Hao-Shu Fang, Ze Ma, Mingyang Chen, and Cewu Lu. Pastanet: Toward human activity knowledge engine. In CVPR, pages 382-391, 2020." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 590, + 546, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 546, + 632 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 546, + 632 + ], + "type": "text", + "content": "[30] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. Cliff: Carrying location information in full frames into human pose and shape estimation. In ECCV, 2022." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 634, + 546, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 634, + 546, + 667 + ], + "spans": [ + { + "bbox": [ + 307, + 634, + 546, + 667 + ], + "type": "text", + "content": "[31] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In CVPR, pages 1954-1963, 2021." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 669, + 546, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 669, + 546, + 690 + ], + "spans": [ + { + "bbox": [ + 307, + 669, + 546, + 690 + ], + "type": "text", + "content": "[32] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In ICCV, pages 12939-12948, 2021." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 691, + 546, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 546, + 713 + ], + "type": "text", + "content": "[33] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 313, + 757 + ], + "type": "text", + "content": "542" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "text", + "content": "Zitnick. Microsoft coco: Common objects in context. In ECCV, pages 740-755, 2014." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 95, + 288, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 95, + 288, + 128 + ], + "spans": [ + { + "bbox": [ + 48, + 95, + 288, + 128 + ], + "type": "text", + "content": "[34] Matthew Loper, Naureen Mahmood, and Michael J Black. Mosh: Motion and shape capture from sparse markers. TOG, 33(6):1-13, 2014." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 129, + 288, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 161 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 161 + ], + "type": "text", + "content": "[35] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. TOG, 34(6):1-16, 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 162, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 162, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 162, + 288, + 205 + ], + "type": "text", + "content": "[36] Tianyu Luan, Yali Wang, Junhao Zhang, Zhe Wang, Zhipeng Zhou, and Yu Qiao. Pc-hmr: Pose calibration for 3d human mesh recovery from 2d images/videos. In AAAI, pages 2269-2276, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 205, + 288, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 205, + 288, + 250 + ], + "spans": [ + { + "bbox": [ + 48, + 205, + 288, + 250 + ], + "type": "text", + "content": "[37] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 3DV, pages 506-516, 2017." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 251, + 288, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 251, + 288, + 293 + ], + "spans": [ + { + "bbox": [ + 48, + 251, + 288, + 293 + ], + "type": "text", + "content": "[38] Gyeongsik Moon and Kyoung Mu Lee. I2l-meshnet: Imageto-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In ECCV, pages 752-768, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 294, + 288, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 294, + 288, + 338 + ], + "spans": [ + { + "bbox": [ + 48, + 294, + 288, + 338 + ], + "type": "text", + "content": "[39] Mohamed Omran, Christoph Lassner, Gerard Pons-Moll, Peter Gehler, and Bernt Schiele. Neural body fitting: Unifying deep learning and model based human pose and shape estimation. In 3DV, pages 484-494. IEEE, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 338, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 338, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 338, + 288, + 392 + ], + "type": "text", + "content": "[40] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed AA Osman, Dimitrios Tzionas, and Michael J Black. Expressive body capture: 3d hands, face, and body from a single image. In CVPR, pages 10975-10985, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 393, + 288, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 288, + 425 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 288, + 425 + ], + "type": "text", + "content": "[41] Liliana Lo Presti and Marco La Cascia. 3d skeleton-based human action classification: A survey. Pattern Recognition, 53:130-147, 2016." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 426, + 288, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 426, + 288, + 459 + ], + "spans": [ + { + "bbox": [ + 48, + 426, + 288, + 459 + ], + "type": "text", + "content": "[42] Haibo Qiu, Chunyu Wang, Jingdong Wang, Naiyan Wang, and Wenjun Zeng. Cross view fusion for 3d human pose estimation. In ICCV, pages 4342-4351, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 460, + 288, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 460, + 288, + 503 + ], + "spans": [ + { + "bbox": [ + 48, + 460, + 288, + 503 + ], + "type": "text", + "content": "[43] Jiajun Su, Chunyu Wang, Xiaoxuan Ma, Wenjun Zeng, and Yizhou Wang. Virtualpose: Learning generalizable 3d human pose models from virtual data. In ECCV, pages 55-71. Springer, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 504, + 288, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 504, + 288, + 536 + ], + "spans": [ + { + "bbox": [ + 48, + 504, + 288, + 536 + ], + "type": "text", + "content": "[44] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In CVPR, pages 5693-5703, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 537, + 288, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 537, + 288, + 568 + ], + "spans": [ + { + "bbox": [ + 48, + 537, + 288, + 568 + ], + "type": "text", + "content": "[45] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, pages 529-545, 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 570, + 288, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 570, + 288, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 288, + 602 + ], + "type": "text", + "content": "[46] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J Black, and Tao Mei. Monocular, one-stage, regression of multiple 3d people. In ICCV, pages 11179-11188, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 603, + 288, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 603, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 603, + 288, + 646 + ], + "type": "text", + "content": "[47] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a skeleton-disentangled representation. In ICCV, pages 5349-5358, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 647, + 288, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 680 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 680 + ], + "type": "text", + "content": "[48] Hanyue Tu, Chunyu Wang, and Wenjun Zeng. Voxelpose: Towards multi-camera 3d human pose estimation in wild environment. In ECCV, pages 197-212. Springer, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 681, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 288, + 712 + ], + "type": "text", + "content": "[49] Hsiao-Yu Tung, Hsiao-Wei Tung, Ersin Yumer, and Katerina Fragkiadaki. Self-supervised learning of motion capture. In NIPS, volume 30, 2017." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 633 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "text", + "content": "[50] Gul Varol, Duygu Ceylan, Bryan Russell, Jimei Yang, Ersin Yumer, Ivan Laptev, and Cordelia Schmid. Bodynet: Volumetric inference of 3d human body shapes. In ECCV, pages 20-36, 2018." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 118, + 547, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 547, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 547, + 150 + ], + "type": "text", + "content": "[51] Gul Varol, Javier Romero, Xavier Martin, Naureen Mahmood, Michael J Black, Ivan Laptev, and Cordelia Schmid. Learning from synthetic humans. In CVPR, pages 109-117, 2017." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 152, + 546, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 546, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 546, + 196 + ], + "type": "text", + "content": "[52] Timo von Marcard, Roberto Henschel, Michael J Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3d human pose in the wild using imus and a moving camera. In ECCV, pages 601-617, 2018." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 197, + 547, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 547, + 240 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 547, + 240 + ], + "type": "text", + "content": "[53] Ziniu Wan, Zhengjia Li, Maoqing Tian, Jianbo Liu, Shuai Yi, and Hongsheng Li. Encoder-decoder with multi-level attention for 3d human shape and pose estimation. In ICCV, pages 13033-13042, 2021." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 241, + 546, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 546, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 546, + 274 + ], + "type": "text", + "content": "[54] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In ECCV, pages 52-67, 2018." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 276, + 546, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 276, + 546, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 276, + 546, + 308 + ], + "type": "text", + "content": "[55] Yuanlu Xu, Song-Chun Zhu, and Tony Tung. Denserac: Joint 3d pose and shape estimation by dense render-and-compare. In ICCV, pages 7760-7770, 2019." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 309, + 546, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 309, + 546, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 309, + 546, + 342 + ], + "type": "text", + "content": "[56] Chun-Han Yao, Jimei Yang, Duygu Ceylan, Yi Zhou, Yang Zhou, and Ming-Hsuan Yang. Learning visibility for robust dense human body estimation. In ECCV, 2022." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 343, + 546, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 343, + 546, + 387 + ], + "spans": [ + { + "bbox": [ + 307, + 343, + 546, + 387 + ], + "type": "text", + "content": "[57] Hang Ye, Wentao Zhu, Chunyu Wang, Rujie Wu, and Yizhou Wang. Faster voxelpose: Real-time 3d human pose estimation by orthographic projection. In ECCV, pages 142-159. Springer, 2022." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 388, + 546, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 546, + 431 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 546, + 431 + ], + "type": "text", + "content": "[58] Andrei Zanfir, Elisabeta Marinoiu, and Cristian Sminchisescu. Monocular 3d pose and shape estimation of multiple people in natural scenes-the importance of multiple scene constraints. In CVPR, pages 2148-2157, 2018." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 433, + 546, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 433, + 546, + 476 + ], + "spans": [ + { + "bbox": [ + 307, + 433, + 546, + 476 + ], + "type": "text", + "content": "[59] Mihai Zanfir, Andrei Zanfir, Eduard Gabriel Bazavan, William T Freeman, Rahul Sukthankar, and Cristian Sminchiescu. Thundr: Transformer-based 3d human reconstruction with markers. In ICCV, pages 12971-12980, 2021." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 478, + 546, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 546, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 546, + 510 + ], + "type": "text", + "content": "[60] Wang Zeng, Wanli Ouyang, Ping Luo, Wentao Liu, and Xiaogang Wang. 3d human mesh regression with dense correspondence. In CVPR, pages 7054-7063, 2020." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 512, + 546, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 546, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 546, + 544 + ], + "type": "text", + "content": "[61] Hongwen Zhang, Jie Cao, Guo Lu, Wanli Ouyang, and Zhenan Sun. Learning 3d human shape and pose from dense body parts. IEEE TPAMI, 44(5):2610-2627, 2022." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 545, + 546, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 546, + 589 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 546, + 589 + ], + "type": "text", + "content": "[62] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In ICCV, pages 11446-11456, 2021." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 590, + 547, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 547, + 633 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 547, + 633 + ], + "type": "text", + "content": "[63] Yifu Zhang, Chunyu Wang, Xinggang Wang, Wenyu Liu, and Wenjun Zeng. Voxeltrack: Multi-person 3d human pose estimation and tracking in the wild. IEEE TPAMI, 45(2):2613-2626, 2022." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "spans": [ + { + "bbox": [ + 298, + 749, + 312, + 757 + ], + "type": "text", + "content": "543" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/54678f96-220e-4220-837c-0b75958caa1b_content_list.json b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/54678f96-220e-4220-837c-0b75958caa1b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f57de36bdb239174992a7cff5aa32059adc48cc8 --- /dev/null +++ b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/54678f96-220e-4220-837c-0b75958caa1b_content_list.json @@ -0,0 +1,1503 @@ +[ + { + "type": "text", + "text": "3D Human Pose Estimation with Spatio-Temporal Criss-cross Attention*", + "text_level": 1, + "bbox": [ + 122, + 130, + 854, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhenhua Tang, Zhaofan Qiu, Yanbin Hao, Richang Hong, Ting Yao \nHefei University of Technology, Anhui, China HiDream.ai Inc \nUniversity of Science and Technology of China, Anhui, China", + "bbox": [ + 207, + 180, + 759, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zhenhuat@foxmail.com, zhaofanqiu@gmail.com, haoyanbin@hotmail.com", + "bbox": [ + 196, + 237, + 772, + 252 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "hongrc.hfut@gmail.com, tingyao.ustc@gmail.com", + "bbox": [ + 282, + 253, + 684, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 304, + 313, + 319 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent transformer-based solutions have shown great success in 3D human pose estimation. Nevertheless, to calculate the joint-to-joint affinity matrix, the computational cost has a quadratic growth with the increasing number of joints. Such drawback becomes even worse especially for pose estimation in a video sequence, which necessitates spatio-temporal correlation spanning over the entire video. In this paper, we facilitate the issue by decomposing correlation learning into space and time, and present a novel Spatio-Temporal Criss-cross attention (STC) block. Technically, STC first slices its input feature into two partitions evenly along the channel dimension, followed by performing spatial and temporal attention respectively on each partition. STC then models the interactions between joints in an identical frame and joints in an identical trajectory simultaneously by concatenating the outputs from attention layers. On this basis, we devise STCFoer by stacking multiple STC blocks and further integrate a new Structure-enhanced Positional Embedding (SPE) into STCFoer to take the structure of human body into consideration. The embedding function consists of two components: spatio-temporal convolution around neighboring joints to capture local structure, and part-aware embedding to indicate which part each joint belongs to. Extensive experiments are conducted on Human3.6M and MPI-INF-3DHP benchmarks, and superior results are reported when comparing to the state-of-the-art approaches. More remarkably, STCFoer achieves to-date the best published performance: $40.5\\mathrm{mm}$ P1 error on the challenging Human3.6M dataset.", + "bbox": [ + 75, + 335, + 473, + 773 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 805, + 209, + 821 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D human pose estimation has attracted intensive research attention in CV community due to its great poten", + "bbox": [ + 76, + 832, + 468, + 863 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4f0d200e406775b9281a28760a9bbd17664497eecc4975874d69b3601a198ab0.jpg", + "image_caption": [ + "Figure 1. Modeling spatio-temporal correlation for 3D human pose estimation by (a) utilizing spatio-temporal attention on all joints in the entire video, (b) separating the framework into two steps that respectively capture spatial and temporal context, and (c) our Spatio-Temporal Criss-cross attention (STC), i.e., a two-pathway block that models spatial and temporal information in parallel. In the visualization of receptive field, the covered joints of each attention strategy is marked as red nodes." + ], + "image_footnote": [], + "bbox": [ + 501, + 304, + 890, + 468 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tial in numerous applications such as human-robot interaction [20, 43], virtual reality [11] and motion prediction [27, 28]. The typical monocular solution is a two-stage pipeline, which first extracts 2D keypoints by 2D human pose detectors (e.g., [7] and [41]), and then lifts 2D coordinates into 3D space [31]. Despite its simplicity, the second stage is an ill-posed problem which lacks the depth prior, and suffers from the ambiguity problem.", + "bbox": [ + 496, + 597, + 890, + 718 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To mitigate this issue, several progresses propose to aggregate the temporal cues in a video sequence to promote pose estimation by grid convolutions [15,26,35], graph convolutions [4, 47] and multi-layer perceptrons [6, 21]. Recently, Transformer structure has emerged as a dominant architecture in both NLP and CV fields [8,24,45,49], and also demonstrated high capability in modeling spatio-temporal correlation for 3D human pose estimation [13, 22, 23, 25, 48, 52, 54]. Figure 1(a) illustrates a straightforward way to exploit the transformer architecture for directly learning spatio-temporal correlation between all joints in the entire video sequence. However, the computational cost of calcu", + "bbox": [ + 496, + 719, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*This work is supported by the National Natural Science Foundation of China under Grants 61932009.", + "bbox": [ + 76, + 875, + 470, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "4790", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "lating the joint-to-joint affinity matrix in the self-attention has a quadratic growth along the increase of number of frames, making such solution unpractical for model training. As a result, most transformer structures employ a two-step alternative, as shown in Figure 1(b), which encodes spatial information for each frame first and then aggregates the feature sequence by temporal transformer. Note that we take spatial transformer as the frame encoder as an example in the figure. This strategy basically mines the correlation across frame-level features but seldom explores the relation between joints across different frames.", + "bbox": [ + 75, + 90, + 472, + 257 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose a novel two-pathway attention mechanism, namely Spatio-Temporal Criss-cross attention (STC), that models spatial and temporal information in parallel, as depicted in Figure 1(c). Concretely, STC first slices the input joint features into two partitions evenly with respect to the channel dimension. On each partition, a Multihead Self-Attention (MSA) is implemented to encapsulate the context along space or time axis. In between, the space pathway computes the affinity between joints in each frame independently, and the time pathway correlates the identical joint moving across different frames, i.e., the trajectory. Then, STC recombines the learnt contexts from two pathways, and mixes the information across channels by MultiLayer Perceptrons (MLP). By doing so, the receptive field is like a criss cross of spatial and temporal axes, and the computational cost is $\\mathcal{O}(T^2 S) + \\mathcal{O}(TS^2)$ . That is much lower than $\\mathcal{O}(T^2 S^2)$ of fully spatio-temporal attention, where $T$ and $S$ denote the number of frames and joints, respectively.", + "bbox": [ + 75, + 265, + 472, + 537 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By stacking multiple STC blocks, we devise a new architecture — STCFormer for 3D human pose estimation. Furthermore, we delve into the crucial design of positional embedding in STCFormer in the context of pose estimation. The observations that joints in the same body part are either highly relevant (static part) or not relevant but containing moving patterns (dynamic part) motivate us to design a new Structure-enhanced Positional Embedding (SPE). SPE consists of two embedding functions for the static and dynamic part, respectively. A part-aware embedding is to describe the static part by indicating which part each joint belongs to, and a spatio-temporal convolution around neighboring joints aims to capture dynamic structure in local window.", + "bbox": [ + 75, + 545, + 472, + 743 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We summarize the main contributions of this work as follows. First, STC is a new type of decomposed spatiotemporal attention for 3D human pose estimation in an economic and effective way. Second, STCFormaler is a novel transformer architecture by stacking multiple STC blocks and integrating the structure-enhanced positional embedding. Extensive experiments conducted on Human3.6M and MPI-INF-3DHP datasets demonstrate that STCFormaler with much less parameters achieves superior performances than the state-of-the-art techniques.", + "bbox": [ + 75, + 750, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 89, + 640, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Monocular 3D human pose estimation. Monocular 3D human pose estimation is to re-localize human body joints in 3D space from the input single view 2D data, i.e., image or 2D coordinates. The early works [1, 2, 17] develop various graphical or restrictive methods to explore the dependencies of human skeleton and perspective relationships across spaces. With the development of deep learning, several deep neural networks [5, 10, 19, 31, 34, 42, 44, 53] are devised for 3D human pose estimation, and can be categorized into one-stage and two-stage directions. The one-stage approaches directly regress the 3D pose from the input image, and necessitate a large number of image-pose paired data and powerful computing resources [19, 34, 42]. The two-stage methods first exploit off-the-shelf 2D pose detectors [7, 33, 41] to estimate 2D joint coordinates, and then lift the 2D coordinates into 3D space by the fully-connected network [31], grid convolutional network [5], recurrent neural network [10], or graph convolutional network [53]. Although the two-stage methods alleviate the requirement of image-pose pairs, they still heavily suffer from the depth ambiguities problem, which is intrinsically ill-posed due to the lack of depth information.", + "bbox": [ + 496, + 114, + 893, + 446 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D pose estimation from video sequence. To overcome the limitation of depth ambiguities, the advances involve temporal context from neighboring frames to improve 3D coordinates regression. For example, Pavllo et al. [35] propose a temporal fully-convolutional network (TCN) to model the local context by convoluting the neighboring frames. Later, Liu et al. [26] extend the TCN by introducing an attention mechanism to adaptively identify the significant frames/poses over a sequence. After that, Chen et al. [6] decompose the pose estimation into bone length and bone direction prediction. Instead of the aforementioned methods based on temporal aggregation, latter works [4, 16, 46] utilize the spatio-temporal graph convolutional network to model the spatial and temporal correlations across joints simultaneously.", + "bbox": [ + 496, + 448, + 893, + 672 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Transformer-based methods. In addition to the traditional convolutional networks, transformer architectures are also be exploited to model spatio-temporal correlation [13,22,23,29,30,37,50,51,54]. In particular, Zheng et al. [54] design a concatenation architecture of several spatial transformer encoders and temporal transformer encoders in PoseFormer. MHFormer [23] proposes to generate multiple hypothesis representations for a pose with the spatial transformer encoder and then model multi-level global correlations with different temporal transformer blocks. StridedFormer [22] and CrossFormer [13] introduce locality by integrating the 1D temporal convolution and 1D spatial convolution, respectively. More recently, the joint-wise inconsistency of motion patterns is highlighted in [48, 52], and encourages to model spatial and temporal information si", + "bbox": [ + 496, + 674, + 895, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "4791", + "bbox": [ + 482, + 944, + 513, + 957 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "multaneously. PATA [48] groups the joints with similar motion patterns and calculates the intra-part temporal correlation. Similarly, MixSTE [52] uses multiple separated spatial transformer blocks and temporal transformer blocks to model the spatial and temporal correlation iteratively.", + "bbox": [ + 75, + 90, + 468, + 167 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our work also falls into the category of transformer-based method for 3D human pose estimation. The aforementioned transformers mainly model spatial and temporal information respectively in different stages of the networks. In view that the joint motion is a state of coexistence of space and time, such separation may result in insufficient learning of moving patterns. In contrast, our STC block is a two-pathway design that models spatial and temporal dependencies in parallel, which are then mixed through MLP. Moreover, a new positional embedding function is deliberately devised to explore the local structure of human body.", + "bbox": [ + 75, + 169, + 470, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Spatio-Temporal Criss-cross Transformer", + "text_level": 1, + "bbox": [ + 76, + 351, + 449, + 369 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminary - Transformer", + "text_level": 1, + "bbox": [ + 76, + 377, + 321, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We begin this section by reviewing the transformer architecture [45] as the basis of our proposal. Transformer is a versatile representation learning architecture, and mainly consists of two components: Multi-head Self-Attention module (MSA) and Feed-Forward Network (FFN). MSA calculates the token-to-token affinity matrix and propagates the information across different tokens. Formally, given $N$ input tokens with $C$ channels, MSA is formulated as", + "bbox": [ + 75, + 402, + 468, + 522 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nM S A (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) = \\operatorname {S o f t m a x} \\left(\\frac {\\mathbf {Q} \\cdot \\mathbf {K} ^ {T}}{\\sqrt {C}}\\right) \\cdot \\mathbf {V}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 101, + 536, + 468, + 570 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{Q},\\mathbf{K},\\mathbf{V}\\in \\mathbb{R}^{N\\times C}$ denote the queries, keys and values obtained by linearly mapping the input tokens. Note that we omit the multi-head separation here for simplicity. FFN contains a Multi-Layer Perceptrons (MLP), i.e., a nonlinear mapping with two linear layer plus a GELU [14] activation in between. The output of MLP is computed by", + "bbox": [ + 75, + 584, + 468, + 675 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nM L P (\\mathbf {H}) = G E L U \\left(\\mathbf {H} \\cdot \\mathbf {W} _ {\\mathbf {1}}\\right) \\cdot \\mathbf {W} _ {\\mathbf {2}}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 150, + 689, + 468, + 704 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{H} \\in \\mathbb{R}^{N \\times C}$ is the input tokens of MLP, $\\mathbf{W}_1 \\in \\mathbb{R}^{C \\times \\hat{C}}$ and $\\mathbf{W}_2 \\in \\mathbb{R}^{\\hat{C} \\times C}$ are the projection matrices. With these, each transformer block is constructed by utilizing MSA and MLP in order with shortcut connection:", + "bbox": [ + 76, + 717, + 468, + 780 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q}, \\mathbf {K}, \\mathbf {V} = F C (L N (\\mathbf {X})) ,\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 791, + 359, + 806 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Y} = M S A (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) + \\mathbf {X}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 809, + 468, + 824 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Z} = M L P (L N (\\mathbf {Y})) + \\mathbf {Y},\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 827, + 359, + 842 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $FC$ is linear projection of the input tokens $\\mathbf{X}$ , and $LN$ denotes Layer Norm [3]. The output $\\mathbf{Z}$ serves as the input to the next block until the last one.", + "bbox": [ + 76, + 854, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f7ade926fdf0c5aedbee014565be6bfb3d06ec7afe0ac6780cc16ce8e6d2b1fb.jpg", + "image_caption": [ + "a)", + "Figure 2. An overview of our proposed Spatio-Temporal Crisscross Transformer (STCFormer). (a) It mainly consists of $L$ sequential STC blocks. Each block aggregates the context across tokens by spatio-temporal criss-cross attention, and non-linearly maps each token by Multi-Layer Perceptrons (MLP). (b) The architecture of our STC block and the Structure-enhanced Positional Embedding (SPE)." + ], + "image_footnote": [], + "bbox": [ + 511, + 89, + 661, + 292 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0df3cf8ca38bfb41dcf4c90200521e885db71c17c0ec70edef9fc902334e6302.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 676, + 89, + 882, + 292 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Overall Architecture", + "text_level": 1, + "bbox": [ + 500, + 407, + 694, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Figure 2 depicts an overview of the proposed STC-Former, which mainly includes three stages: a joint-based embedding, stacked STC blocks and a regression head. The joint-based embedding projects the input 2D coordinates of each joint into feature space. STC blocks aggregate the spatio-temporal context, and update the representation of each joint. Based on the learnt features, the 3D coordinates are estimated by a regression head.", + "bbox": [ + 496, + 431, + 890, + 551 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Joint-based embedding. Given a 2D pose sequence as $\\mathbf{P}_{2D} \\in \\mathbb{R}^{T \\times N \\times 2}$ , where $T$ and $N$ denote the number of frames and the number of body joints in each frame, respectively, we first project $\\mathbf{P}_{2D}$ to high-dimensional embeddings by a joint-based embedding layer. This layer applies an FC layer to each 2D coordinate independently followed by a GELU activation. As such, the joint-based embedding layer produces the features with the shape of $T \\times N \\times C$ . Note that in the previous transformer [22], the embedding layer projects all joint coordinates in each frame into a single vector, reducing the computational cost of the subsequent transformer blocks while losing the spatial discrimination. Ours is different in that the spatial dimension $N$ is maintained, and the computational cost is also reduced by spatio-temporal criss-cross attention.", + "bbox": [ + 496, + 553, + 890, + 779 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "STC blocks. The STC block originates from the transformer block in Eq.(3), and replaces the original MSA layer with spatio-temporal criss-cross attention. In addition, a new positional embedding function, i.e., Structure-enhanced Positional Embedding (SPE), is integrated into the STC block for better descriptive capability of local structures. Section 3.3 and Section 3.4 will elaborate STC and SPE, respectively.", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "4792", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Regression head. A liner regression head is finally established upon the STC blocks to estimate the 3D pose coordinates $\\hat{\\mathbf{P}}_{3D} \\in \\mathbb{R}^{T \\times N \\times 3}$ . The whole architecture is optimized by minimizing the Mean Squared Error (MSE) between $\\hat{\\mathbf{P}}_{3D}$ and the ground-truth 3D coordinates $\\mathbf{P}_{3D}$ as", + "bbox": [ + 76, + 90, + 468, + 167 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\left\\| \\hat {\\mathbf {P}} _ {3 D} - \\mathbf {P} _ {3 D} \\right\\| ^ {2}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 199, + 175, + 468, + 200 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Spatio-Temporal Criss-cross Attention", + "text_level": 1, + "bbox": [ + 76, + 208, + 408, + 224 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "STC aims to model the spatio-temporal dependencies between joints in an efficient way to avoid the quadratic computation cost of fully spatio-temporal attention. Inspired by the group contextualization strategy [12] which separates the channels into several paralleled groups and applies different feature contextualization operations to them respectively, we propose to capture the spatial and temporal context on different channels in parallel. Different from the axial convolution in [12,36,38], we exploit axis-specific multihead self-attention in STC for spatial or temporal context, which is more powerful for correlation learning.", + "bbox": [ + 75, + 231, + 468, + 397 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Concretely, the input embedding $\\mathbf{X} \\in \\mathbb{R}^{T \\times N \\times C}$ are firstly mapped to queries $\\mathbf{Q} \\in \\mathbb{R}^{T \\times N \\times C}$ , keys $\\mathbf{K} \\in \\mathbb{R}^{T \\times N \\times C}$ , and values $\\mathbf{V} \\in \\mathbb{R}^{T \\times N \\times C}$ , which are then evenly divided into two groups along the channel dimension. For notation clarity, we denote the divided feature matrix as time group $\\{\\mathbf{Q}_T, \\mathbf{K}_T, \\mathbf{V}_T\\}$ and space group $\\{\\mathbf{Q}_S, \\mathbf{K}_S, \\mathbf{V}_S\\}$ . Next, the temporal and spatial correlations are calculated in two separate self-attention modules.", + "bbox": [ + 76, + 397, + 468, + 518 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Temporal correlation represents the relation between the joints in an identical trajectory moving across different frames. To achieve this, we implement an axis-specific MSA, named $MSA_{T}$ , which computes the attention affinities in Eq.(1) between joints across the temporal dimension. Hence, the output of temporal attention is measured as", + "bbox": [ + 75, + 518, + 468, + 609 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {H} _ {\\mathbf {T}} = M S A _ {T} \\left(\\mathbf {Q} _ {\\mathbf {T}}, \\mathbf {K} _ {\\mathbf {T}}, \\mathbf {V} _ {\\mathbf {T}}\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 619, + 468, + 635 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Spatial correlation is the connection between joints in an identical frame. These joints indicate different body parts in one frame, which are intrinsically relevant due to the prior of body skeleton. Similar to temporal attention, we devise $MSAS_{S}$ as an axis-specific MSA component on spatial dimension. Therefore, the output of spatial attention is formulated as", + "bbox": [ + 75, + 643, + 468, + 750 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {H} _ {\\mathbf {S}} = M S A _ {S} \\left(\\mathbf {Q} _ {\\mathbf {S}}, \\mathbf {K} _ {\\mathbf {S}}, \\mathbf {V} _ {\\mathbf {S}}\\right). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 760, + 468, + 775 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The above two correlation modules process in parallel and follow the self-attention regime for feature contextualization. They compute the token-to-token affinities by contextualizing from a specific axial perspective, and complement to each other. Thus, we concatenate the outputs from both attention layers along the channel dimension:", + "bbox": [ + 75, + 785, + 468, + 875 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {H} = c a t \\left(\\mathbf {H} _ {\\mathbf {T}}, \\mathbf {H} _ {\\mathbf {S}}\\right), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 886, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/b0dde1ddefd622e779fec833910527d81fed9daa5fd23846e6298ad8727f8300.jpg", + "image_caption": [ + "Figure 3. (a) The coefficient matrix of the motion trajectory of different joints. (b) The body joints are divided into five parts, denoted as $g_{*}$ . The part with high/low relevance is colored as light/dark blue, respectively. The motion data is generated by actor S6 performing greeting action in the training set of Human3.6M." + ], + "image_footnote": [], + "bbox": [ + 504, + 88, + 890, + 228 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $cat$ performs the concatenation. The resultant receptive field of STC is like a criss cross of spatial and temporal axes, and stacking multiple STC blocks is able to approximate the fully spatio-temporal attention.", + "bbox": [ + 496, + 305, + 890, + 366 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Structure-enhanced Positional Embedding", + "text_level": 1, + "bbox": [ + 500, + 375, + 861, + 391 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "One of the crucial factor in transformer is positional embedding, which indicates the position of each token absolutely or relatively. For the positional embedding function in STCFormer, we delve into the inherent property of joints, i.e., the local structure, and propose Structure-enhanced Positional Embedding (SPE). Figure 3 depicts the motivation of SPE. Here, we group the body joints into five parts according to the dynamic chain structure of human body:", + "bbox": [ + 496, + 398, + 890, + 518 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} g _ {0} = \\{h i p, s p i n e, t h r o a x, n e c k, h e a d \\} \\\\ g _ {1} = \\{r i g h t _ {-} h i p, r i g h t _ {-} k n e e, r i g h t _ {-} f e e t \\} \\\\ g _ {2} = \\left\\{l e f t - h i p, l e f t - k n e e, l e f t - f e e t \\right\\} \\tag {8} \\\\ g _ {3} = \\{r i g h t \\_ s h o u l d e r, r i g h t \\_ e l b o w, r i g h t \\_ w r i s t \\} \\\\ g _ {4} = \\{\\text {l e f t - s h o u l d e r}, \\text {l e f t - e l b o w}, \\text {l e f t - w r i s t} \\} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 535, + 529, + 890, + 613 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The trajectories of joints in the static part ( $g_0, g_3$ and $g_4$ in the figure) are highly relevant. We devise a part-ware positional embedding to indicate which part each joint belongs to. The joints in the same part are attached with the same embedding vector. In particular, a learnable dictionary is constructed to assign embedding vector to different joints according to their group index. Given the group index $\\mathbf{g} \\in [0,1,2,3,4]^{T \\times N}$ of joints, the learnable dictionary $\\mathbf{D} \\in \\mathbb{R}^{5 \\times \\frac{C}{2}}$ convert the indexes to embedding vectors as", + "bbox": [ + 496, + 622, + 890, + 758 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {S P E} _ {1} = \\mathbf {D} (\\mathbf {g}). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 768, + 890, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Nevertheless, in the dynamic part, i.e., part with relative movements ( $g_{1}, g_{2}$ in the figure), the trajectories of joints are not relevant. Simply assigning the same embedding vector to these joints ignores the motion patterns in the dynamic part. Hence, we propose to exploit a spatio-temporal convolution around the neighboring joints to capture the local structure. Formally, given the values $\\mathbf{V} \\in \\mathbb{R}^{T \\times N \\times \\frac{C}{2}}$ in", + "bbox": [ + 496, + 795, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4793", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 1 Pseudo-code of STC with SPE (PyTorch-like)" + ], + "code_body": "x: input tensor of shape (B, T, N, C) \n# p: part index (B, T, N) in [0, 4] \n# MSA: axis-specific multi-head self-attention \nself.Linear = nn.Linear(C, 3C) \nself_embedding1 = nn.Embedding(5, C//2) \n# the channel-last convolution \nself_embedding2 = nn.Conv2d(C//2, C//2, k=3, g=C//2) \ndef STC(x, p): \n Q, K, V = self.linear(x).chunk(3, dim=3) \n Q_t, Q_s = Q.chunk(2, dim=3) \n K_t, K_s = K.chunk(2, dim=3) \n V_t, V_s = V.chunk(2, dim=3) \n H_t = MSA(Q_t, K_t, V_t, dim=1) \n H_s = MSA(Q_s, K_s, V_s, dim=2) \n H_t += self_embedding1(p) + self_embedding2(V_t) \n H_s += self_embedding1(p) + self_embedding2(V_s) \n H = torch.cat(H_t, H_s, dim=3) \nreturn H", + "guess_lang": "python", + "bbox": [ + 76, + 111, + 459, + 378 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "STC block, we treat $\\mathbf{V}$ as 2D (i.e., space and time) feature map, and utilize 2D convolution on the neighboring joints:", + "bbox": [ + 75, + 405, + 468, + 436 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {S P E} _ {2} (\\mathbf {V}) = \\operatorname {c o n v 2 d} (\\mathbf {V}), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 449, + 468, + 465 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $conv2d$ is a $3 \\times 3$ convolution operation. Although the two SPE functions are designed respectively for static part and dynamic part, we utilize the two functions concurrently on all joints leaving out the requirement of static/dynamic judgment. The duet of two SPE functions is able to deal with the parts with various moving patterns.", + "bbox": [ + 75, + 467, + 468, + 556 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By injecting the proposed SPE function into STC, the equation of STC is reformulated as", + "bbox": [ + 75, + 558, + 468, + 588 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbf {H} _ {\\mathbf {T}} = M S A _ {T} \\left(\\mathbf {Q} _ {\\mathbf {T}}, \\mathbf {K} _ {\\mathbf {T}}, \\mathbf {V} _ {\\mathbf {T}}\\right) + \\mathbf {S P E} _ {1} + \\mathbf {S P E} _ {2} (\\mathbf {V} _ {\\mathbf {T}}), \\\\ \\mathbf {H} _ {\\mathbf {S}} = M S A _ {S} \\left(\\mathbf {Q} _ {\\mathbf {S}}, \\mathbf {K} _ {\\mathbf {S}}, \\mathbf {V} _ {\\mathbf {S}}\\right) + \\mathbf {S P E} _ {1} + \\mathbf {S P E} _ {2} (\\mathbf {V} _ {\\mathbf {S}}), \\tag {11} \\\\ \\mathbf {H} = c a t \\left(\\mathbf {H} _ {\\mathbf {T}}, \\mathbf {H} _ {\\mathbf {S}}\\right). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 594, + 468, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation. The proposed STC plus SPE in Eq.(11) can be readily implemented with a few lines of codes in Python. We detail an example of the codes in Algorithm 1 based on PyTorch platform. Here, we execute the pre-defined MSA and MLP function in the standard transformer. The SPE is implemented by constructing the default Embedding layer and Conv2d layer.", + "bbox": [ + 75, + 650, + 468, + 756 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 768, + 209, + 786 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We comprehensively evaluate the proposed STCFoer architecture on two large-scale datasets, i.e., Human3.6M [18] and MPI-INF-3DHP [32].", + "bbox": [ + 75, + 794, + 468, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Datasets and Evaluation Metrics", + "text_level": 1, + "bbox": [ + 76, + 847, + 362, + 862 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Human3.6M is currently the most popular benchmark for indoor 3D human pose estimation, which contains 11", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "subjects performing 15 typical actions, leading to 3.6 million video frames in total. Following the standard protocol, we use subjects 1, 5, 6, 7, and 8 for training, and subjects 9 and 11 for evaluation. The Mean Per Joint Position Error (MPJPE) is used to measure the error under two protocols: Protocol 1 (referred to as P1) computes MPJPE between the estimated pose and the ground truth after aligning their root joints (hip); Protocol 2 (referred to as P2) calculates Procrustes-MPJPE, where the ground truth and the pose prediction are further aligned through a rigid transformation. We also compute the MPJPE distribution of pose to evaluate the overall precision of the reconstructed skeletons. MPI-INF-3DHP is a recently proposed large-scale dataset, which consists of three scenes, i.e., green screen, non-green screen, and outdoor. By using 14 cameras, the dataset records 8 actors performing 8 activities for the training set and 7 activities for evaluation. Following the previous works [6, 39, 54], we adopt the MPJPE (P1), percentage of correct keypoints (PCK) with $150\\mathrm{mm}$ , and area under the curve (AUC) results as the evaluation metrics.", + "bbox": [ + 496, + 90, + 890, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Implementation Details", + "text_level": 1, + "bbox": [ + 498, + 404, + 718, + 420 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our model is implemented with PyTorch toolkit and runs on a server with one GTX 2080Ti GPU. In the experiments, two kinds of input 2D pose sequences are utilized including the pre-estimated 2D pose by the pre-trained CPN [7] and the real 2D pose (ground truth). For model training, we set each mini-batch as 128 sequences. The network parameters are optimized for 20 epochs by Adam optimizer with basic learning rate of 0.001 and decayed by 0.96 after each epoch. We consider the repeat time $L$ of modules, the hidden embedding channel $C$ , and the number of head $H$ in attention block as free parameters that we tailor to the scale of network. The performances of the standard version STCFormaler with $\\{L = 6, C = 256, H = 8\\}$ and the large version STCFormaler-L with $\\{L = 6, C = 512, H = 8\\}$ are both reported.", + "bbox": [ + 496, + 428, + 890, + 654 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Performance Comparison on Human3.6M", + "text_level": 1, + "bbox": [ + 498, + 666, + 859, + 681 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We compare with several state-of-the-art techniques on Human3.6M dataset. Table 1 summarizes the performance comparisons in terms of P1 and P2 errors taking the pre-estimated 2D poses (CPN) as input, and the number of sampled frames T per video is also given for each method. In general, the longer input sequence leads to the lower regression error. Overall, STCFormer-L with $T = 243$ input frames achieves the new state-of-the-art performances with P1 error of $40.5\\mathrm{mm}$ and P2 error of $31.8\\mathrm{mm}$ . Benefiting from the proposed STC attention module, STCFormer-L outperforms StridedFormer [22], PATA [48] and MixSTE [52] with $T = 243$ frames, which are also based on transformer architecture, by the P1 error drop of $3.2\\mathrm{mm}$ , $2.6\\mathrm{mm}$ and $0.4\\mathrm{mm}$ , respectively. Comparing to the best competitor", + "bbox": [ + 496, + 689, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "4794", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/33307fa95cef6d717bdc4c1797e8eb56d94d3357502e3cd621f382f44899c6dc.jpg", + "table_caption": [ + "Table 1. Performance comparisons in terms of P1 error (mm) and P2 error (mm) with the state-of-the-art methods on Human3.6M dataset. The 2D pose input is estimated by CPN [7]. The best result and runner-up result in each column are marked in red and blue, respectively. \\* denotes the post-processing module proposed in [4]. $T$ is the number of sampled frames from each video." + ], + "table_footnote": [], + "table_body": "
P1PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2041.844.841.144.947.454.143.442.256.263.645.343.545.331.332.245.1
UGCN [46] (T=96)*ECCV'2040.242.542.641.146.756.741.442.356.260.446.342.246.231.731.044.5
PoseFormer [54] (T=81)ICCV'2141.544.839.842.546.551.642.142.053.360.745.543.346.131.832.244.3
Shan et al. [40] (T=243)ACM MM'2140.844.541.442.746.355.641.841.953.760.845.041.544.830.831.944.3
Anatomy3D [6] (T=243)TCVST'2141.443.540.142.946.651.941.742.353.960.245.441.746.031.532.744.1
Einfalt et al. [9] (T=351)*arXiv'2239.643.840.242.446.553.942.342.555.762.345.143.044.730.130.844.2
StridedFormer [22] (T=243)*TMM'2240.343.340.242.345.652.341.840.555.960.644.243.044.230.030.243.7
CrossFormer [13] (T=81)arXiv'2240.744.140.841.545.852.841.240.855.361.944.941.844.629.231.143.7
PATA [48] (T=243)TIP'2239.942.740.342.345.052.840.439.356.961.244.141.342.828.429.343.1
MHFormer [23] (T=351)CVPR'2239.243.140.140.944.951.240.641.353.560.343.741.143.829.830.643.0
P-STMO [39] (T=243)ECCV'2238.942.740.441.145.649.740.939.955.559.444.942.242.729.429.442.8
MixSTE [52] (T=81)CVPR'2239.843.038.640.143.450.640.641.452.256.743.840.843.929.430.342.4
MixSTE [52] (T=243)CVPR'2237.640.937.339.742.349.940.139.851.755.042.139.841.027.927.940.9
STCFformer (T=81)40.643.038.340.243.552.640.340.151.857.742.839.842.328.029.542.0
STCFformer (T=243)39.641.637.438.843.151.139.139.751.457.441.838.540.727.128.641.0
STCFformer-L (T=243)38.441.236.838.042.750.538.738.252.556.841.838.440.226.227.740.5
P2PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2032.335.233.335.835.941.533.232.744.650.937.032.437.025.227.235.6
UGCN [46] (T=96)*ECCV'2031.834.335.433.535.441.731.131.644.449.036.432.235.024.923.034.5
PoseFormer [54] (T=81)ICCV'2134.136.134.437.236.442.234.433.645.052.537.433.837.825.627.336.5
Shan et al. [40] (T=243)ACM MM'2132.536.233.235.335.642.132.631.942.647.936.632.134.824.225.835.0
Anatomy3D [6] (T=243)TCSVT'2132.635.132.835.436.340.432.432.342.749.036.832.436.024.926.535.0
Einfalt et al. [9] (T=351)*arXiv'2232.736.133.436.036.142.033.333.145.450.737.034.135.924.425.435.7
StridedFormer [22] (T=243)*TMM'2232.735.532.535.435.941.633.031.945.150.136.333.535.123.925.035.2
MHFormer [23] (T=351)CVPR'2231.534.932.833.635.339.632.032.243.548.736.432.634.323.925.134.4
P-STMO [39] (T=243)ECCV'2231.335.232.933.935.439.332.531.544.648.236.332.934.423.823.934.4
CrossFormer [13] (T=81)arXiv'2231.434.632.633.734.339.731.631.044.349.335.931.334.423.425.534.3
PATA [48] (T=243)TIP'2231.234.131.933.833.939.531.630.045.448.135.031.133.522.423.633.7
MixSTE [52] (T=81)CVPR'2232.034.231.733.734.439.232.031.842.946.935.532.034.423.625.233.9
MixSTE [52] (T=243)CVPR'2230.833.130.331.833.139.131.130.542.544.534.030.832.722.122.932.6
STCFformer (T=81)30.433.831.131.733.539.530.830.041.845.834.330.132.821.923.432.7
STCFformer (T=243)29.533.230.631.033.038.030.429.441.845.233.629.531.621.322.632.0
STCFformer-L (T=243)29.333.030.730.632.738.229.728.842.245.033.329.431.520.922.331.8
", + "bbox": [ + 80, + 133, + 888, + 481 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/b7c5f83f398442bf281b2c20706d24a78e7238d2d686f3b48dc36dd8f68df70d.jpg", + "table_caption": [ + "Table 2. Performance comparisons in terms of P1 error (mm) with the state-of-the-art methods on Human3.6M dataset. The models take the ground-truth 2D pose as input. The best result and runner-up result in each column are marked in red and blue, respectively. “*” denotes the post-processing module proposed in [4]. $T$ is the number of sampled frames from each video." + ], + "table_footnote": [], + "table_body": "
P1PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2034.537.133.634.232.937.139.635.840.741.433.033.833.026.626.934.7
PoseFormer [54] (T=81)ICCV'2130.033.629.931.030.233.334.831.437.838.631.731.529.023.323.131.3
Shan et al. [40] (T=243)ACM MM'2129.530.828.829.130.735.231.727.834.536.030.329.428.924.124.730.1
MHFormer [23] (T=351)CVPR'2227.732.129.128.930.033.933.031.237.039.330.031.029.422.223.030.5
P-STMO [39] (T=243)ECCV'2228.530.128.627.929.833.231.327.836.037.429.729.528.121.021.029.3
StridedFormer [22] (T=243) *TMM'2227.129.426.527.128.633.030.726.838.234.729.129.826.819.119.828.5
CrossFormer [13] (T=81)arXiv'2226.030.026.826.228.031.030.429.635.437.128.427.326.720.519.928.3
PATA [48] (T=243)TIP'2225.825.223.323.524.027.427.924.429.330.124.924.123.318.619.724.7
MixSTE [52] (T=81)CVPR'2225.627.824.525.724.929.928.627.429.929.026.125.025.218.719.925.9
MixSTE [52] (T=243)CVPR'2221.622.020.421.020.824.324.721.926.924.921.221.520.814.715.721.6
STCFemale (T=81)26.226.523.424.625.028.628.324.630.933.725.725.324.618.619.725.7
STCFemale (T=81) *25.925.922.724.024.627.527.623.130.131.525.124.723.818.419.625.0
STCFemale (T=243)21.422.621.021.323.826.024.220.028.928.022.321.420.114.215.022.0
STCFemale (T=243) *20.821.820.020.623.425.023.619.327.826.121.620.619.514.315.121.3
", + "bbox": [ + 80, + 532, + 890, + 688 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "MixSTE [52], our STCFormer consistently obtains better precision across different numbers of input frames, and only demands around half of the parameters (18.9M v.s. 33.6M). The results verify the advantages of STC attention as an economic and effective way to decompose the full spatiotemporal attention. More importantly, the series of STC-Former reaches to-date the best reported performances in 10 out of 15 categories.", + "bbox": [ + 75, + 698, + 468, + 819 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2 further details the comparisons between STC-Former and the state-of-the-art models with the groundtruth 2D pose as input. This setting excludes the noise from 2D pose estimation, and measures the upper bound of 2D-to-3D lifting models. Accordingly, the P1 errors are obvi", + "bbox": [ + 75, + 824, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ously decreased across different methods by replacing the CPN-estimated 2D pose with the ground-truth 2D pose, but the performance trends are still similar. STCFoermer with post-processing attains the best P1 error of $21.3\\mathrm{mm}$ , which is $0.3\\mathrm{mm}$ lower than the best competitor MixSTE, validating the impact of STCFoermer with different types of input.", + "bbox": [ + 496, + 698, + 890, + 789 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In addition to the mean error, we also compare the error distribution of STCFormer and baseline methods in Figure 4. In this experiment, the methods take the estimated 2D poses by CPN of 27 frames as input. Compared to the recent transformer-based approaches including Strided-Former [22], P-STMO [39], and MHFormer [23], our STC-Former leads to the highest number of samples with error", + "bbox": [ + 496, + 795, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "4795", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6da9641e3ca76d3e20113352b13c63f274d9ec12df88c637d044af5559dfd990.jpg", + "image_caption": [ + "Figure 4. Error distribution of the estimated 3D poses on Human3.6M. The horizontal axis represents the error interval, and the vertical axis is the proportion of poses with error in the interval." + ], + "image_footnote": [], + "bbox": [ + 86, + 90, + 454, + 212 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "less than $35\\mathrm{mm}$ , and the lowest number of those with error larger than $45\\mathrm{mm}$ . This again confirm the advances of STCFoermer for not only obtaining the lowest average error but also better distribution across different ranges of error.", + "bbox": [ + 75, + 263, + 470, + 325 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Performance Comparison on MPI-INF-3DHP", + "text_level": 1, + "bbox": [ + 76, + 330, + 465, + 348 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To verify the generalization of 3D pose estimation models, we then test the performance on MPI-INF-3DHP dataset, which contains more complex backgrounds. Following previous works [23, 39, 52], the ground-truth 2D poses are taken as input. In view of the shorter video sequence, we set the number of input frames as 9, 27 or 81. Table 3 lists the performance comparisons. Similar to the observations on Human3.6M, our STCFformer with $T = 81$ reaches the to-date best reported performance with PCK of $98.7\\%$ , AUC of $83.9\\%$ and P1 error of $23.1\\mathrm{mm}$ , outperforming the current state-of-the-art models with a large margin of $0.8\\%$ in PCK, $8.1\\%$ in AUC and $9.1\\mathrm{mm}$ in P1 error. In particular, STCFformer shows better generalization ability and surpasses MixSTE [52] by a much larger P1 error drop (31.8mm) against $0.3\\mathrm{mm}$ on Human3.6M. This highlights the efficacy of our method on the more complicated data.", + "bbox": [ + 75, + 354, + 468, + 597 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 604, + 230, + 622 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For a more in-depth analysis of our STCFormer, we further conduct a series of ablation studies on Human3.6M dataset using the CPN-estimated 2D poses as input.", + "bbox": [ + 75, + 628, + 468, + 674 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The first group of experiments is to verify how well our STCFoer works with different number of input frames. Table 4 shows the detailed comparisons in terms of P1 error. A general performance tendency is observed that increasing $T$ leads to monotonic performance improvement. Among the competitive methods, our STCFoer constantly exhibits the best results across 27-frame, 81-frame and 243-frame settings. The leading performances demonstrate the ability of STCFoer to deal with different length of video sequence. More remarkably, STCFoer-L has $43.7\\%$ fewer parameters and spends $43.6\\%$ fewer FLOPs than the runner-up MixSTE.", + "bbox": [ + 75, + 674, + 468, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The second ablation study assesses the performance impact of different design components. In this experiment, the models take the estimated 2D poses by CPN of 27 frames", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/74490c012557a14f19781086b79d44cbb034b1ea214e96e0df0e7a13cdabd603.jpg", + "table_caption": [ + "Table 3. Performance comparisons in terms of PCK, AUC and P1 with the state-of-the-art methods on MPI-INF-3DHP dataset. Here, the higher PCK, the higher AUC and the lower P1 indicate the better regressions. The best result in each column is marked in red. $T$ is the number of sampled frames from each video." + ], + "table_footnote": [], + "table_body": "
MethodPublicationPCK ↑AUC ↑P1(mm) ↓
UGCN [46](T=96)ECCV'2086.962.168.1
Anatomy3D [6] (T=81)TCSVT'2187.853.879.1
PoseFormer [54] (T=9)ICCV'2188.656.477.1
Hu et al. [16] (T=96)ACM MM'2197.969.542.5
CrossFormer [13] (T=9)arXiv'2289.157.576.3
PATA [48] (T=243)TIP'2290.357.869.4
MHFormer [23] (T=9)CVPR'2293.863.358.0
MixSTE [52] (T=27)CVPR'2294.466.554.9
Einfalt et al. [9] (T=81)arXiv'2295.467.646.9
P-STMO [39] (T=81)ECCV'2297.975.832.2
STCFormaler (T=9)98.281.528.2
STCFormaler (T=27)98.483.424.2
STCFormaler (T=81)98.783.923.1
", + "bbox": [ + 503, + 161, + 879, + 349 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/37196c3bf7a0d2abaa3bd5e0f23a8f35b450c33eb70fa6b817d80c9f658d9ae3.jpg", + "table_caption": [ + "Table 4. The P1 error comparisons with different number of sampled frame $(T)$ on Human3.6M dataset. The best result in each column is marked in red." + ], + "table_footnote": [], + "table_body": "
MethodFrames TParametersFLOPs (M)P1(mm)
StridedFormer [22]274.01M16346.9
P-STMO [39]274.6M16446.1
MHFormer [23]2718.92M100045.9
MixSTE [52]2733.61M1540245.1
STCFourner274.75M217344.1
StridedFormer [22]814.06M39245.4
P-STMO [39]815.4M49344.1
MHFormer [23]8119.67M156144.5
MixSTE [52]8133.61M4620842.7
STCFourner814.75M652042.0
StridedFormer [22]2434.23M137244.0
P-STMO [39]2436.7M173742.8
MHFormer [23]24324.72M481243.2
MixSTE [52]24333.61M13862340.9
STCFourner2434.75M1956141.0
STCFourner-L24318.91M7810740.5
", + "bbox": [ + 504, + 402, + 890, + 612 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "as input. Spatial Attention and Temporal Attention solely exploit the spatial pathway and temporal pathway, respectively. STC only contains both pathways but without the positional embedding. $\\mathbf{SPE}_1$ , $\\mathbf{SPE}_2$ and $\\mathbf{SPE}$ represent the two SPE positional embeddings and their combination, respectively. Table 5 details the contribution of each component towards the overall performance. STC only by considering both spatial and temporal correlations leads to the error drop over solely utilizing spatial attention and temporal attention by $218.5\\mathrm{mm}$ and $10.6\\mathrm{mm}$ , respectively. The result indicates the importance of modeling the correlations along two axes in parallel. The three positional embedding strategies, i.e., $\\mathbf{SPE}_1$ , $\\mathbf{SPE}_2$ and $\\mathbf{SPE}$ , further contribute $0.6\\mathrm{mm}$ , $12.1\\mathrm{mm}$ and $12.9\\mathrm{mm}$ of error drop, respectively, proving the advances of involving the structure information.", + "bbox": [ + 496, + 626, + 890, + 852 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In addition to the proposed $\\mathrm{SPE}_1$ , we explore three other positional embedding functions, i.e., Absolute Positional Embedding (APE), Centrality Positional Embedding", + "bbox": [ + 498, + 854, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "4796", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c8388f26c987fe17b472250cc0ac60a04c69e256e8954b65912738e1aa4a6f38.jpg", + "table_caption": [ + "Table 5. Performance contribution of each component in the proposed STCFormer on Human3.6M dataset." + ], + "table_footnote": [], + "table_body": "
STCSPE1SPE2P1 (mm)
Spatial Attention#1275.5
Temporal Attention#267.6
STC only#357.0
+SPE1#456.4
+SPE2#544.9
+SPE#644.1
", + "bbox": [ + 106, + 119, + 439, + 215 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(CPE), and Symmetric Positional Embedding (SyPE). We refer the readers to read the supplementary materials for more details. In Table 6, we assess the performance impact of different positional embedding functions. In this experiment, the models take the estimated 2D poses by CPN of 9 frames as input. And the comparisons empirically show the superiority of the used $\\mathrm{SPE}_1$ (48.3mm vs. 48.7mm, 49.9mm, and 49.2mm).", + "bbox": [ + 75, + 224, + 470, + 345 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/f2038d7d3e118f5fb82eadf1851cbb146fe40382a7b4083066dd06acdc0c803a.jpg", + "table_caption": [ + "Table 6. The P1 error comparisons with different positional embedding functions on Human3.6M dataset. The \"Baseline\" denotes the STCFoermer without $\\mathrm{SPE}_1$ . The best result in each column is marked in red." + ], + "table_footnote": [], + "table_body": "
P1 (mm)
Baseline#148.7
+SPE1#248.3
+APE#348.9
+CPE#449.6
+SyPE#549.2
", + "bbox": [ + 197, + 417, + 349, + 496 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.6. Qualitative Analysis", + "text_level": 1, + "bbox": [ + 76, + 513, + 267, + 530 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we validate our STCFoermer through attention visualization and 3D human pose estimation visualization. The examples are randomly selected from the evaluation set of Human3.6M.", + "bbox": [ + 75, + 537, + 468, + 595 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Attention visualization. We visualize the spatial attention map and temporal attention map from the last STC block of STCFormaler in Figure 5. As expected, the spatial attention map (Figure 5(a)) shows that our model learns different patterns between joints from the videos of different actions. Moreover, the temporal attention map in Figure 5(b) illustrates strong correlation across adjacent frames owing to the continuity of human actions.", + "bbox": [ + 75, + 598, + 468, + 717 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Result visualization on Human3.6M. Figure 6 showcases 3D human pose estimation results by STCFormaler and the recent transformer-based approaches including Strided-Former [22], MHFormer [23] and P-STMO [39]. The three examples are randomly selected from the walking, posing and sitting actions in Human3.6M dataset. For each method, we draw the estimated 3D human pose and the ground-truth 3D coordinates in one figure, and calculate the average error. Overall, our STCFormaler shows better reconstruction results across all three samples than the other three methods. Particularly, for the challenging action with complicated pose articulation like \"sitting\" (the third row), STC", + "bbox": [ + 75, + 719, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f4d7222c2cb3396df890ce6c3df23e8f1495e8fcaa74ec225a68a90150916539.jpg", + "image_caption": [ + "Figure 5. Visualizations of attention maps from the spatial and temporal attention modules in STCFormer. The x-axis and y-axis correspond to the queries and the predicted outputs, respectively." + ], + "image_footnote": [], + "bbox": [ + 501, + 88, + 691, + 233 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/172d75ea0c43e4f480276dfa409191e3d6e591614cc92ad58b8def51ea5e8a16.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 717, + 89, + 893, + 234 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/02eecdb6fe905f7c695b7edd35b58239fff5340110c2acebbde9b879fe152920.jpg", + "image_caption": [ + "Figure 6. Examples of 3D pose estimation by StridedFormer [22], MHFormer [23], P-STMO [39] and our STCFormer. The gray skeleton is the ground-truth 3D pose. Blue, orange and green skeletons represent the left part, right part and torso of the estimated human body, respectively." + ], + "image_footnote": [], + "bbox": [ + 506, + 301, + 890, + 483 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Former still estimates the 3D coordinates accurately and reconstructs the structurally plausible 3D pose.", + "bbox": [ + 496, + 580, + 890, + 612 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 643, + 617, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We have presented Spatio-Temporal Criss-cross Transformer (STCFormer), which explores spatial correlation and temporal correlation in a two-pathway manner for 3D human pose estimation in videos. Particularly, STCFormer is built by stacking several STC blocks, each of which separates the joint features into two groups along the channel dimension, and models the spatial and temporal interactions on each group, respectively. By doing so, the receptive field of STC block is like a criss cross of spatial and temporal axes. Moreover, the STCFormer exploits the dynamic chain structure of human body to model local context, resulting in a new positional embedding function. The experiments conducted on two benchmarks demonstrate the effectiveness of STCFormer and good generalization ability compared to the state-of-the-art techniques.", + "bbox": [ + 496, + 674, + 892, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "4797", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Ankur Agarwal and Bill Triggs. Recovering 3d human pose from monocular images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 28(1):44-58, 2005. 2", + "[2] Mykhaylo Andriluka, Stefan Roth, and Bernt Schiele. Pictorial structures revisited: People detection and articulated pose estimation. In CVPR, 2009. 2", + "[3] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 3", + "[4] Yujun Cai, Liuhao Ge, Jun Liu, Jianfei Cai, Tat-Jen Cham, Junsong Yuan, and Nadia Magnenat Thalmann. Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In ICCV, 2019. 1, 2, 6", + "[5] Ching-Hang Chen and Deva Ramanan. 3d human pose estimation= 2d pose estimation+ matching. In CVPR, 2017. 2", + "[6] Tianlang Chen, Chen Fang, Xiaohui Shen, Yiheng Zhu, Zhili Chen, and Jiebo Luo. Anatomy-aware 3d human pose estimation with bone-based pose decomposition. IEEE Transactions on Circuits and Systems for Video Technology, 32(1):198-209, 2021. 1, 2, 5, 6, 7", + "[7] Yilun Chen, Zhicheng Wang, Yuxiang Peng, Zhiqiang Zhang, Gang Yu, and Jian Sun. Cascaded pyramid network for multi-person pose estimation. In CVPR, 2018. 1, 2, 5, 6", + "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2021. 1", + "[9] Moritz Einfalt, Katja Ludwig, and Rainer Lienhart. Uplift and upsample: Efficient 3d human pose estimation with up-lifting transformers. arXiv preprint arXiv:2210.06110, 2022. 6, 7", + "[10] Hao-Shu Fang, Yuanlu Xu, Wenguan Wang, Xiaobai Liu, and Song-Chun Zhu. Learning pose grammar to encode human body configuration for 3d pose estimation. In AAAI, 2018. 2", + "[11] Nate Hagbi, Oriel Bergig, Jihad El-Sana, and Mark Billinghurst. Shape recognition and pose estimation for mobile augmented reality. IEEE Transactions on Visualization and Computer Graphics, 17(10):1369-1379, 2010. 1", + "[12] Yanbin Hao, Hao Zhang, Chong-Wah Ngo, and Xiangnan He. Group contextualization for video recognition. arXiv preprint arXiv:2203.09694, 2022. 4", + "[13] Mohammed Hassanin, Abdelwahed Khamiss, Mohammed Bennamoun, Farid Boussaid, and Ibrahim Radwan. Crossformer: Cross spatio-temporal transformer for 3d human pose estimation. arXiv preprint arXiv:2203.13387, 2022. 1, 2, 6, 7", + "[14] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 3", + "[15] Mir Rayat Imtiaz Hossain and James J Little. Exploiting temporal information for 3d human pose estimation. In ECCV, 2018. 1" + ], + "bbox": [ + 78, + 114, + 468, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Wenbo Hu, Changgong Zhang, Fangneng Zhan, Lei Zhang, and Tien-Tsin Wong. Conditional directed graph convolution for 3d human pose estimation. In ACM MM, 2021. 2, 7", + "[17] Catalin Ionescu, Fuxin Li, and Cristian Sminchisescu. Latent structured models for human pose estimation. In ICCV, 2011. 2", + "[18] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, 2013. 5", + "[19] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In CVPR, 2018. 2", + "[20] Branislav Kisacanin, Vladimir Pavlovic, and Thomas S Huang. Real-time vision for human-computer interaction. Springer Science & Business Media, 2005. 1", + "[21] Kyoungoh Lee, Inwooong Lee, and Sanghoon Lee. Propagating lstm: 3d pose estimation based on joint interdependency. In ECCV, 2018. 1", + "[22] Wenhao Li, Hong Liu, Runwei Ding, Mengyuan Liu, Pichao Wang, and Wenming Yang. Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia, 2022. 1, 2, 3, 5, 6, 7, 8", + "[23] Wenhao Li, Hong Liu, Hao Tang, Pichao Wang, and Luc Van Gool. Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In CVPR, 2022. 1, 2, 6, 7, 8", + "[24] Yehao Li, Ting Yao, Yingwei Pan, and Tao Mei. Contextual transformer networks for visual recognition. IEEE Trans. on PAMI, 2022. 1", + "[25] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In CVPR, 2021. 1", + "[26] Ruixu Liu, Ju Shen, He Wang, Chen Chen, Sen-ching Cheung, and Vijayan Asari. Attention mechanism exploits temporal contexts: Real-time 3d human pose reconstruction. In CVPR, 2020. 1, 2, 6", + "[27] Zhenguang Liu, Pengxiang Su, Shuang Wu, Xuanjing Shen, Haipeng Chen, Yanbin Hao, and Meng Wang. Motion prediction using trajectory cues. In ICCV, 2021. 1", + "[28] Zhenguang Liu, Shuang Wu, Shuyuan Jin, Qi Liu, Shouling Ji, Shijian Lu, and Li Cheng. Investigating pose representations and motion contexts modeling for 3d motion prediction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1", + "[29] Fuchen Long, Zhaofan Qiu, Yingwei Pan, Ting Yao, Jiebo Luo, and Tao Mei. Stand-alone inter-frame attention in video models. In CVPR, 2022. 2", + "[30] Fuchen Long, Zhaofan Qiu, Yingwei Pan, Ting Yao, Chong-Wah Ngo, and Tao Mei. Dynamic temporal filtering in video models. In ECCV, 2022. 2", + "[31] Julieta Martinez, Rayat Hossain, Javier Romero, and James J Little. A simple yet effective baseline for 3d human pose estimation. In ICCV, 2017. 1, 2", + "[32] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 3DV, 2017. 5" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "4798", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[33] Alejandro Newell, Kaiyu Yang, and Jia Deng. Stacked hourglass networks for human pose estimation. In ECCV, 2016. 2", + "[34] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3d human pose. In CVPR, 2017. 2", + "[35] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In CVPR, 2019. 1, 2", + "[36] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In ICCV, 2017. 4", + "[37] Zhaofan Qiu, Ting Yao, Chong-Wah Ngo, and Tao Mei. Mlp-3d: A mlp-like 3d architecture with grouped time mixing. In CVPR, 2022. 2", + "[38] Zhaofan Qiu, Ting Yao, Chong-Wah Ngo, Xinmei Tian, and Tao Mei. Learning spatio-temporal representation with local and global diffusion. In CVPR, 2019. 4", + "[39] Wenkang Shan, Zhenhua Liu, Xinfeng Zhang, Shanshe Wang, Siwei Ma, and Wen Gao. P-stmo: Pre-trained spatial temporal many-to-one model for 3d human pose estimation. arXiv preprint arXiv:2203.07628, 2022. 5, 6, 7, 8", + "[40] Wenkang Shan, Haopeng Lu, Shanshe Wang, Xinfeng Zhang, and Wen Gao. Improving robustness and accuracy via relative information encoding in 3d human pose estimation. In ACM MM, 2021. 6", + "[41] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In CVPR, 2019. 1, 2", + "[42] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, 2018. 2", + "[43] Mikael Svenstrup, Soren Tranberg, Hans Jorgen Andersen, and Thomas Bak. Pose estimation and adaptive robot behaviour for human-robot interaction. In ICRA, 2009. 1", + "[44] Zhenhua Tang, Jia Li, Yanbin Hao, and Richang Hong. Mlp-jcg: Multi-layer perceptron with joint-coordinate gating for efficient 3d human pose estimation. IEEE Transactions on Multimedia, 2023.", + "[45] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NIPS, 2017. 1, 3", + "[46] Jingbo Wang, Sijie Yan, Yuanjun Xiong, and Dahua Lin. Motion guided 3d pose estimation from videos. In ECCV, 2020. 2, 6, 7", + "[47] Tianhan Xu and Wataru Takano. Graph stacked hourglass networks for 3d human pose estimation. In CVPR, 2021. 1", + "[48] Youze Xue, Jiansheng Chen, Xiangming Gu, Huimin Ma, and Hongbing Ma. Boosting monocular 3d human pose estimation with part aware attention. IEEE Transactions on Image Processing, 31, 2022. 1, 2, 3, 5, 6, 7", + "[49] Ting Yao, Yingwei Pan, Yehao Li, Chong-Wah Ngo, and Tao Mei. Wave-vit: Unifying wavelet and transformers for visual representation learning. In ECCV, 2022. 1", + "[50] Hao Zhang, Lechao Cheng, Yanbin Hao, and Chong-wah Ngo. Long-term leap attention, short-term periodic shift for video classification. In ACM MM, 2022. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[51] Hao Zhang, Yanbin Hao, and Chong-Wah Ngo. Token shift transformer for video classification. In ACM MM, 2021. 2", + "[52] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Junsong Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. arXiv preprint arXiv:2203.00859, 2022. 1, 2, 3, 5, 6, 7", + "[53] Long Zhao, Xi Peng, Yu Tian, Mubbasir Kapadia, and Dimitris N Metaxas. Semantic graph convolutional networks for 3d human pose regression. In CVPR, 2019. 2", + "[54] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In ICCV, 2021. 1, 2, 5, 6, 7" + ], + "bbox": [ + 501, + 92, + 890, + 273 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "4799", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/54678f96-220e-4220-837c-0b75958caa1b_model.json b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/54678f96-220e-4220-837c-0b75958caa1b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..bd760d6351388d8aa1f25aecd2716a07ec35102e --- /dev/null +++ b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/54678f96-220e-4220-837c-0b75958caa1b_model.json @@ -0,0 +1,2123 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.123, + 0.131, + 0.856, + 0.154 + ], + "angle": 0, + "content": "3D Human Pose Estimation with Spatio-Temporal Criss-cross Attention*" + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.181, + 0.761, + 0.234 + ], + "angle": 0, + "content": "Zhenhua Tang, Zhaofan Qiu, Yanbin Hao, Richang Hong, Ting Yao \nHefei University of Technology, Anhui, China HiDream.ai Inc \nUniversity of Science and Technology of China, Anhui, China" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.238, + 0.774, + 0.253 + ], + "angle": 0, + "content": "zhenhuat@foxmail.com, zhaofanqiu@gmail.com, haoyanbin@hotmail.com" + }, + { + "type": "text", + "bbox": [ + 0.284, + 0.255, + 0.686, + 0.27 + ], + "angle": 0, + "content": "hongrc.hfut@gmail.com, tingyao.ustc@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.305, + 0.314, + 0.32 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.337, + 0.474, + 0.775 + ], + "angle": 0, + "content": "Recent transformer-based solutions have shown great success in 3D human pose estimation. Nevertheless, to calculate the joint-to-joint affinity matrix, the computational cost has a quadratic growth with the increasing number of joints. Such drawback becomes even worse especially for pose estimation in a video sequence, which necessitates spatio-temporal correlation spanning over the entire video. In this paper, we facilitate the issue by decomposing correlation learning into space and time, and present a novel Spatio-Temporal Criss-cross attention (STC) block. Technically, STC first slices its input feature into two partitions evenly along the channel dimension, followed by performing spatial and temporal attention respectively on each partition. STC then models the interactions between joints in an identical frame and joints in an identical trajectory simultaneously by concatenating the outputs from attention layers. On this basis, we devise STCFoer by stacking multiple STC blocks and further integrate a new Structure-enhanced Positional Embedding (SPE) into STCFoer to take the structure of human body into consideration. The embedding function consists of two components: spatio-temporal convolution around neighboring joints to capture local structure, and part-aware embedding to indicate which part each joint belongs to. Extensive experiments are conducted on Human3.6M and MPI-INF-3DHP benchmarks, and superior results are reported when comparing to the state-of-the-art approaches. More remarkably, STCFoer achieves to-date the best published performance: \\(40.5\\mathrm{mm}\\) P1 error on the challenging Human3.6M dataset." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.806, + 0.21, + 0.822 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.833, + 0.47, + 0.864 + ], + "angle": 0, + "content": "3D human pose estimation has attracted intensive research attention in CV community due to its great poten" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.305, + 0.891, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.476, + 0.895, + 0.588 + ], + "angle": 0, + "content": "Figure 1. Modeling spatio-temporal correlation for 3D human pose estimation by (a) utilizing spatio-temporal attention on all joints in the entire video, (b) separating the framework into two steps that respectively capture spatial and temporal context, and (c) our Spatio-Temporal Criss-cross attention (STC), i.e., a two-pathway block that models spatial and temporal information in parallel. In the visualization of receptive field, the covered joints of each attention strategy is marked as red nodes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.892, + 0.719 + ], + "angle": 0, + "content": "tial in numerous applications such as human-robot interaction [20, 43], virtual reality [11] and motion prediction [27, 28]. The typical monocular solution is a two-stage pipeline, which first extracts 2D keypoints by 2D human pose detectors (e.g., [7] and [41]), and then lifts 2D coordinates into 3D space [31]. Despite its simplicity, the second stage is an ill-posed problem which lacks the depth prior, and suffers from the ambiguity problem." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.893, + 0.901 + ], + "angle": 0, + "content": "To mitigate this issue, several progresses propose to aggregate the temporal cues in a video sequence to promote pose estimation by grid convolutions [15,26,35], graph convolutions [4, 47] and multi-layer perceptrons [6, 21]. Recently, Transformer structure has emerged as a dominant architecture in both NLP and CV fields [8,24,45,49], and also demonstrated high capability in modeling spatio-temporal correlation for 3D human pose estimation [13, 22, 23, 25, 48, 52, 54]. Figure 1(a) illustrates a straightforward way to exploit the transformer architecture for directly learning spatio-temporal correlation between all joints in the entire video sequence. However, the computational cost of calcu" + }, + { + "type": "page_footnote", + "bbox": [ + 0.077, + 0.875, + 0.471, + 0.9 + ], + "angle": 0, + "content": "*This work is supported by the National Natural Science Foundation of China under Grants 61932009." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4790" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.258 + ], + "angle": 0, + "content": "lating the joint-to-joint affinity matrix in the self-attention has a quadratic growth along the increase of number of frames, making such solution unpractical for model training. As a result, most transformer structures employ a two-step alternative, as shown in Figure 1(b), which encodes spatial information for each frame first and then aggregates the feature sequence by temporal transformer. Note that we take spatial transformer as the frame encoder as an example in the figure. This strategy basically mines the correlation across frame-level features but seldom explores the relation between joints across different frames." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.266, + 0.473, + 0.538 + ], + "angle": 0, + "content": "In this paper, we propose a novel two-pathway attention mechanism, namely Spatio-Temporal Criss-cross attention (STC), that models spatial and temporal information in parallel, as depicted in Figure 1(c). Concretely, STC first slices the input joint features into two partitions evenly with respect to the channel dimension. On each partition, a Multihead Self-Attention (MSA) is implemented to encapsulate the context along space or time axis. In between, the space pathway computes the affinity between joints in each frame independently, and the time pathway correlates the identical joint moving across different frames, i.e., the trajectory. Then, STC recombines the learnt contexts from two pathways, and mixes the information across channels by MultiLayer Perceptrons (MLP). By doing so, the receptive field is like a criss cross of spatial and temporal axes, and the computational cost is \\(\\mathcal{O}(T^2 S) + \\mathcal{O}(TS^2)\\). That is much lower than \\(\\mathcal{O}(T^2 S^2)\\) of fully spatio-temporal attention, where \\(T\\) and \\(S\\) denote the number of frames and joints, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.546, + 0.473, + 0.744 + ], + "angle": 0, + "content": "By stacking multiple STC blocks, we devise a new architecture — STCFormer for 3D human pose estimation. Furthermore, we delve into the crucial design of positional embedding in STCFormer in the context of pose estimation. The observations that joints in the same body part are either highly relevant (static part) or not relevant but containing moving patterns (dynamic part) motivate us to design a new Structure-enhanced Positional Embedding (SPE). SPE consists of two embedding functions for the static and dynamic part, respectively. A part-aware embedding is to describe the static part by indicating which part each joint belongs to, and a spatio-temporal convolution around neighboring joints aims to capture dynamic structure in local window." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.473, + 0.903 + ], + "angle": 0, + "content": "We summarize the main contributions of this work as follows. First, STC is a new type of decomposed spatiotemporal attention for 3D human pose estimation in an economic and effective way. Second, STCFormaler is a novel transformer architecture by stacking multiple STC blocks and integrating the structure-enhanced positional embedding. Extensive experiments conducted on Human3.6M and MPI-INF-3DHP datasets demonstrate that STCFormaler with much less parameters achieves superior performances than the state-of-the-art techniques." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.09, + 0.642, + 0.107 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.116, + 0.895, + 0.448 + ], + "angle": 0, + "content": "Monocular 3D human pose estimation. Monocular 3D human pose estimation is to re-localize human body joints in 3D space from the input single view 2D data, i.e., image or 2D coordinates. The early works [1, 2, 17] develop various graphical or restrictive methods to explore the dependencies of human skeleton and perspective relationships across spaces. With the development of deep learning, several deep neural networks [5, 10, 19, 31, 34, 42, 44, 53] are devised for 3D human pose estimation, and can be categorized into one-stage and two-stage directions. The one-stage approaches directly regress the 3D pose from the input image, and necessitate a large number of image-pose paired data and powerful computing resources [19, 34, 42]. The two-stage methods first exploit off-the-shelf 2D pose detectors [7, 33, 41] to estimate 2D joint coordinates, and then lift the 2D coordinates into 3D space by the fully-connected network [31], grid convolutional network [5], recurrent neural network [10], or graph convolutional network [53]. Although the two-stage methods alleviate the requirement of image-pose pairs, they still heavily suffer from the depth ambiguities problem, which is intrinsically ill-posed due to the lack of depth information." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.449, + 0.895, + 0.674 + ], + "angle": 0, + "content": "3D pose estimation from video sequence. To overcome the limitation of depth ambiguities, the advances involve temporal context from neighboring frames to improve 3D coordinates regression. For example, Pavllo et al. [35] propose a temporal fully-convolutional network (TCN) to model the local context by convoluting the neighboring frames. Later, Liu et al. [26] extend the TCN by introducing an attention mechanism to adaptively identify the significant frames/poses over a sequence. After that, Chen et al. [6] decompose the pose estimation into bone length and bone direction prediction. Instead of the aforementioned methods based on temporal aggregation, latter works [4, 16, 46] utilize the spatio-temporal graph convolutional network to model the spatial and temporal correlations across joints simultaneously." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.897, + 0.903 + ], + "angle": 0, + "content": "Transformer-based methods. In addition to the traditional convolutional networks, transformer architectures are also be exploited to model spatio-temporal correlation [13,22,23,29,30,37,50,51,54]. In particular, Zheng et al. [54] design a concatenation architecture of several spatial transformer encoders and temporal transformer encoders in PoseFormer. MHFormer [23] proposes to generate multiple hypothesis representations for a pose with the spatial transformer encoder and then model multi-level global correlations with different temporal transformer blocks. StridedFormer [22] and CrossFormer [13] introduce locality by integrating the 1D temporal convolution and 1D spatial convolution, respectively. More recently, the joint-wise inconsistency of motion patterns is highlighted in [48, 52], and encourages to model spatial and temporal information si" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.958 + ], + "angle": 0, + "content": "4791" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.168 + ], + "angle": 0, + "content": "multaneously. PATA [48] groups the joints with similar motion patterns and calculates the intra-part temporal correlation. Similarly, MixSTE [52] uses multiple separated spatial transformer blocks and temporal transformer blocks to model the spatial and temporal correlation iteratively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.17, + 0.471, + 0.336 + ], + "angle": 0, + "content": "Our work also falls into the category of transformer-based method for 3D human pose estimation. The aforementioned transformers mainly model spatial and temporal information respectively in different stages of the networks. In view that the joint motion is a state of coexistence of space and time, such separation may result in insufficient learning of moving patterns. In contrast, our STC block is a two-pathway design that models spatial and temporal dependencies in parallel, which are then mixed through MLP. Moreover, a new positional embedding function is deliberately devised to explore the local structure of human body." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.352, + 0.45, + 0.37 + ], + "angle": 0, + "content": "3. Spatio-Temporal Criss-cross Transformer" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.378, + 0.322, + 0.394 + ], + "angle": 0, + "content": "3.1. Preliminary - Transformer" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.403, + 0.469, + 0.523 + ], + "angle": 0, + "content": "We begin this section by reviewing the transformer architecture [45] as the basis of our proposal. Transformer is a versatile representation learning architecture, and mainly consists of two components: Multi-head Self-Attention module (MSA) and Feed-Forward Network (FFN). MSA calculates the token-to-token affinity matrix and propagates the information across different tokens. Formally, given \\( N \\) input tokens with \\( C \\) channels, MSA is formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.102, + 0.537, + 0.47, + 0.571 + ], + "angle": 0, + "content": "\\[\nM S A (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) = \\operatorname {S o f t m a x} \\left(\\frac {\\mathbf {Q} \\cdot \\mathbf {K} ^ {T}}{\\sqrt {C}}\\right) \\cdot \\mathbf {V}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.585, + 0.469, + 0.676 + ], + "angle": 0, + "content": "where \\(\\mathbf{Q},\\mathbf{K},\\mathbf{V}\\in \\mathbb{R}^{N\\times C}\\) denote the queries, keys and values obtained by linearly mapping the input tokens. Note that we omit the multi-head separation here for simplicity. FFN contains a Multi-Layer Perceptrons (MLP), i.e., a nonlinear mapping with two linear layer plus a GELU [14] activation in between. The output of MLP is computed by" + }, + { + "type": "equation", + "bbox": [ + 0.151, + 0.69, + 0.469, + 0.705 + ], + "angle": 0, + "content": "\\[\nM L P (\\mathbf {H}) = G E L U \\left(\\mathbf {H} \\cdot \\mathbf {W} _ {\\mathbf {1}}\\right) \\cdot \\mathbf {W} _ {\\mathbf {2}}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.718, + 0.469, + 0.781 + ], + "angle": 0, + "content": "where \\(\\mathbf{H} \\in \\mathbb{R}^{N \\times C}\\) is the input tokens of MLP, \\(\\mathbf{W}_1 \\in \\mathbb{R}^{C \\times \\hat{C}}\\) and \\(\\mathbf{W}_2 \\in \\mathbb{R}^{\\hat{C} \\times C}\\) are the projection matrices. With these, each transformer block is constructed by utilizing MSA and MLP in order with shortcut connection:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.792, + 0.36, + 0.808 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q}, \\mathbf {K}, \\mathbf {V} = F C (L N (\\mathbf {X})) ,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.81, + 0.469, + 0.825 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Y} = M S A (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) + \\mathbf {X}, \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.828, + 0.361, + 0.843 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Z} = M L P (L N (\\mathbf {Y})) + \\mathbf {Y},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "where \\(FC\\) is linear projection of the input tokens \\(\\mathbf{X}\\), and \\(LN\\) denotes Layer Norm [3]. The output \\(\\mathbf{Z}\\) serves as the input to the next block until the last one." + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.09, + 0.663, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.295, + 0.595, + 0.302 + ], + "angle": 0, + "content": "a)" + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.09, + 0.883, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.774, + 0.294, + 0.788, + 0.302 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.303, + 0.892, + 0.4 + ], + "angle": 0, + "content": "Figure 2. An overview of our proposed Spatio-Temporal Crisscross Transformer (STCFormer). (a) It mainly consists of \\(L\\) sequential STC blocks. Each block aggregates the context across tokens by spatio-temporal criss-cross attention, and non-linearly maps each token by Multi-Layer Perceptrons (MLP). (b) The architecture of our STC block and the Structure-enhanced Positional Embedding (SPE)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.409, + 0.696, + 0.423 + ], + "angle": 0, + "content": "3.2. Overall Architecture" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.432, + 0.892, + 0.553 + ], + "angle": 0, + "content": "Figure 2 depicts an overview of the proposed STC-Former, which mainly includes three stages: a joint-based embedding, stacked STC blocks and a regression head. The joint-based embedding projects the input 2D coordinates of each joint into feature space. STC blocks aggregate the spatio-temporal context, and update the representation of each joint. Based on the learnt features, the 3D coordinates are estimated by a regression head." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.554, + 0.892, + 0.78 + ], + "angle": 0, + "content": "Joint-based embedding. Given a 2D pose sequence as \\(\\mathbf{P}_{2D} \\in \\mathbb{R}^{T \\times N \\times 2}\\), where \\(T\\) and \\(N\\) denote the number of frames and the number of body joints in each frame, respectively, we first project \\(\\mathbf{P}_{2D}\\) to high-dimensional embeddings by a joint-based embedding layer. This layer applies an FC layer to each 2D coordinate independently followed by a GELU activation. As such, the joint-based embedding layer produces the features with the shape of \\(T \\times N \\times C\\). Note that in the previous transformer [22], the embedding layer projects all joint coordinates in each frame into a single vector, reducing the computational cost of the subsequent transformer blocks while losing the spatial discrimination. Ours is different in that the spatial dimension \\(N\\) is maintained, and the computational cost is also reduced by spatio-temporal criss-cross attention." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "STC blocks. The STC block originates from the transformer block in Eq.(3), and replaces the original MSA layer with spatio-temporal criss-cross attention. In addition, a new positional embedding function, i.e., Structure-enhanced Positional Embedding (SPE), is integrated into the STC block for better descriptive capability of local structures. Section 3.3 and Section 3.4 will elaborate STC and SPE, respectively." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4792" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.168 + ], + "angle": 0, + "content": "Regression head. A liner regression head is finally established upon the STC blocks to estimate the 3D pose coordinates \\(\\hat{\\mathbf{P}}_{3D} \\in \\mathbb{R}^{T \\times N \\times 3}\\). The whole architecture is optimized by minimizing the Mean Squared Error (MSE) between \\(\\hat{\\mathbf{P}}_{3D}\\) and the ground-truth 3D coordinates \\(\\mathbf{P}_{3D}\\) as" + }, + { + "type": "equation", + "bbox": [ + 0.2, + 0.176, + 0.47, + 0.202 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\left\\| \\hat {\\mathbf {P}} _ {3 D} - \\mathbf {P} _ {3 D} \\right\\| ^ {2}. \\tag {4}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.209, + 0.409, + 0.226 + ], + "angle": 0, + "content": "3.3. Spatio-Temporal Criss-cross Attention" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.232, + 0.469, + 0.398 + ], + "angle": 0, + "content": "STC aims to model the spatio-temporal dependencies between joints in an efficient way to avoid the quadratic computation cost of fully spatio-temporal attention. Inspired by the group contextualization strategy [12] which separates the channels into several paralleled groups and applies different feature contextualization operations to them respectively, we propose to capture the spatial and temporal context on different channels in parallel. Different from the axial convolution in [12,36,38], we exploit axis-specific multihead self-attention in STC for spatial or temporal context, which is more powerful for correlation learning." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.398, + 0.469, + 0.519 + ], + "angle": 0, + "content": "Concretely, the input embedding \\(\\mathbf{X} \\in \\mathbb{R}^{T \\times N \\times C}\\) are firstly mapped to queries \\(\\mathbf{Q} \\in \\mathbb{R}^{T \\times N \\times C}\\), keys \\(\\mathbf{K} \\in \\mathbb{R}^{T \\times N \\times C}\\), and values \\(\\mathbf{V} \\in \\mathbb{R}^{T \\times N \\times C}\\), which are then evenly divided into two groups along the channel dimension. For notation clarity, we denote the divided feature matrix as time group \\(\\{\\mathbf{Q}_T, \\mathbf{K}_T, \\mathbf{V}_T\\}\\) and space group \\(\\{\\mathbf{Q}_S, \\mathbf{K}_S, \\mathbf{V}_S\\}\\). Next, the temporal and spatial correlations are calculated in two separate self-attention modules." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.52, + 0.469, + 0.61 + ], + "angle": 0, + "content": "Temporal correlation represents the relation between the joints in an identical trajectory moving across different frames. To achieve this, we implement an axis-specific MSA, named \\(MSA_{T}\\), which computes the attention affinities in Eq.(1) between joints across the temporal dimension. Hence, the output of temporal attention is measured as" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.62, + 0.469, + 0.636 + ], + "angle": 0, + "content": "\\[\n\\mathbf {H} _ {\\mathbf {T}} = M S A _ {T} \\left(\\mathbf {Q} _ {\\mathbf {T}}, \\mathbf {K} _ {\\mathbf {T}}, \\mathbf {V} _ {\\mathbf {T}}\\right). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.645, + 0.469, + 0.75 + ], + "angle": 0, + "content": "Spatial correlation is the connection between joints in an identical frame. These joints indicate different body parts in one frame, which are intrinsically relevant due to the prior of body skeleton. Similar to temporal attention, we devise \\(MSAS_{S}\\) as an axis-specific MSA component on spatial dimension. Therefore, the output of spatial attention is formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.175, + 0.761, + 0.469, + 0.776 + ], + "angle": 0, + "content": "\\[\n\\mathbf {H} _ {\\mathbf {S}} = M S A _ {S} \\left(\\mathbf {Q} _ {\\mathbf {S}}, \\mathbf {K} _ {\\mathbf {S}}, \\mathbf {V} _ {\\mathbf {S}}\\right). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.786, + 0.469, + 0.876 + ], + "angle": 0, + "content": "The above two correlation modules process in parallel and follow the self-attention regime for feature contextualization. They compute the token-to-token affinities by contextualizing from a specific axial perspective, and complement to each other. Thus, we concatenate the outputs from both attention layers along the channel dimension:" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.887, + 0.469, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\mathbf {H} = c a t \\left(\\mathbf {H} _ {\\mathbf {T}}, \\mathbf {H} _ {\\mathbf {S}}\\right), \\tag {7}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.089, + 0.892, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.229, + 0.892, + 0.298 + ], + "angle": 0, + "content": "Figure 3. (a) The coefficient matrix of the motion trajectory of different joints. (b) The body joints are divided into five parts, denoted as \\( g_{*} \\). The part with high/low relevance is colored as light/dark blue, respectively. The motion data is generated by actor S6 performing greeting action in the training set of Human3.6M." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.306, + 0.892, + 0.367 + ], + "angle": 0, + "content": "where \\( cat \\) performs the concatenation. The resultant receptive field of STC is like a criss cross of spatial and temporal axes, and stacking multiple STC blocks is able to approximate the fully spatio-temporal attention." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.376, + 0.862, + 0.392 + ], + "angle": 0, + "content": "3.4. Structure-enhanced Positional Embedding" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.399, + 0.892, + 0.52 + ], + "angle": 0, + "content": "One of the crucial factor in transformer is positional embedding, which indicates the position of each token absolutely or relatively. For the positional embedding function in STCFormer, we delve into the inherent property of joints, i.e., the local structure, and propose Structure-enhanced Positional Embedding (SPE). Figure 3 depicts the motivation of SPE. Here, we group the body joints into five parts according to the dynamic chain structure of human body:" + }, + { + "type": "equation", + "bbox": [ + 0.537, + 0.53, + 0.891, + 0.614 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} g _ {0} = \\{h i p, s p i n e, t h r o a x, n e c k, h e a d \\} \\\\ g _ {1} = \\{r i g h t _ {-} h i p, r i g h t _ {-} k n e e, r i g h t _ {-} f e e t \\} \\\\ g _ {2} = \\left\\{l e f t - h i p, l e f t - k n e e, l e f t - f e e t \\right\\} \\tag {8} \\\\ g _ {3} = \\{r i g h t \\_ s h o u l d e r, r i g h t \\_ e l b o w, r i g h t \\_ w r i s t \\} \\\\ g _ {4} = \\{\\text {l e f t - s h o u l d e r}, \\text {l e f t - e l b o w}, \\text {l e f t - w r i s t} \\} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.623, + 0.892, + 0.76 + ], + "angle": 0, + "content": "The trajectories of joints in the static part (\\(g_0, g_3\\) and \\(g_4\\) in the figure) are highly relevant. We devise a part-ware positional embedding to indicate which part each joint belongs to. The joints in the same part are attached with the same embedding vector. In particular, a learnable dictionary is constructed to assign embedding vector to different joints according to their group index. Given the group index \\(\\mathbf{g} \\in [0,1,2,3,4]^{T \\times N}\\) of joints, the learnable dictionary \\(\\mathbf{D} \\in \\mathbb{R}^{5 \\times \\frac{C}{2}}\\) convert the indexes to embedding vectors as" + }, + { + "type": "equation", + "bbox": [ + 0.642, + 0.77, + 0.891, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\mathbf {S P E} _ {1} = \\mathbf {D} (\\mathbf {g}). \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Nevertheless, in the dynamic part, i.e., part with relative movements (\\(g_{1}, g_{2}\\) in the figure), the trajectories of joints are not relevant. Simply assigning the same embedding vector to these joints ignores the motion patterns in the dynamic part. Hence, we propose to exploit a spatio-temporal convolution around the neighboring joints to capture the local structure. Formally, given the values \\(\\mathbf{V} \\in \\mathbb{R}^{T \\times N \\times \\frac{C}{2}}\\) in" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4793" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.079, + 0.092, + 0.442, + 0.106 + ], + "angle": 0, + "content": "Algorithm 1 Pseudo-code of STC with SPE (PyTorch-like)" + }, + { + "type": "code", + "bbox": [ + 0.078, + 0.112, + 0.46, + 0.379 + ], + "angle": 0, + "content": "x: input tensor of shape (B, T, N, C) \n# p: part index (B, T, N) in [0, 4] \n# MSA: axis-specific multi-head self-attention \nself.Linear = nn.Linear(C, 3C) \nself_embedding1 = nn.Embedding(5, C//2) \n# the channel-last convolution \nself_embedding2 = nn.Conv2d(C//2, C//2, k=3, g=C//2) \ndef STC(x, p): \n Q, K, V = self.linear(x).chunk(3, dim=3) \n Q_t, Q_s = Q.chunk(2, dim=3) \n K_t, K_s = K.chunk(2, dim=3) \n V_t, V_s = V.chunk(2, dim=3) \n H_t = MSA(Q_t, K_t, V_t, dim=1) \n H_s = MSA(Q_s, K_s, V_s, dim=2) \n H_t += self_embedding1(p) + self_embedding2(V_t) \n H_s += self_embedding1(p) + self_embedding2(V_s) \n H = torch.cat(H_t, H_s, dim=3) \nreturn H" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.406, + 0.47, + 0.437 + ], + "angle": 0, + "content": "STC block, we treat \\(\\mathbf{V}\\) as 2D (i.e., space and time) feature map, and utilize 2D convolution on the neighboring joints:" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.45, + 0.469, + 0.466 + ], + "angle": 0, + "content": "\\[\n\\mathbf {S P E} _ {2} (\\mathbf {V}) = \\operatorname {c o n v 2 d} (\\mathbf {V}), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.468, + 0.469, + 0.558 + ], + "angle": 0, + "content": "where \\( conv2d \\) is a \\( 3 \\times 3 \\) convolution operation. Although the two SPE functions are designed respectively for static part and dynamic part, we utilize the two functions concurrently on all joints leaving out the requirement of static/dynamic judgment. The duet of two SPE functions is able to deal with the parts with various moving patterns." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.559, + 0.469, + 0.589 + ], + "angle": 0, + "content": "By injecting the proposed SPE function into STC, the equation of STC is reformulated as" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.595, + 0.469, + 0.645 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbf {H} _ {\\mathbf {T}} = M S A _ {T} \\left(\\mathbf {Q} _ {\\mathbf {T}}, \\mathbf {K} _ {\\mathbf {T}}, \\mathbf {V} _ {\\mathbf {T}}\\right) + \\mathbf {S P E} _ {1} + \\mathbf {S P E} _ {2} (\\mathbf {V} _ {\\mathbf {T}}), \\\\ \\mathbf {H} _ {\\mathbf {S}} = M S A _ {S} \\left(\\mathbf {Q} _ {\\mathbf {S}}, \\mathbf {K} _ {\\mathbf {S}}, \\mathbf {V} _ {\\mathbf {S}}\\right) + \\mathbf {S P E} _ {1} + \\mathbf {S P E} _ {2} (\\mathbf {V} _ {\\mathbf {S}}), \\tag {11} \\\\ \\mathbf {H} = c a t \\left(\\mathbf {H} _ {\\mathbf {T}}, \\mathbf {H} _ {\\mathbf {S}}\\right). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.651, + 0.47, + 0.757 + ], + "angle": 0, + "content": "Implementation. The proposed STC plus SPE in Eq.(11) can be readily implemented with a few lines of codes in Python. We detail an example of the codes in Algorithm 1 based on PyTorch platform. Here, we execute the pre-defined MSA and MLP function in the standard transformer. The SPE is implemented by constructing the default Embedding layer and Conv2d layer." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.769, + 0.21, + 0.787 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.469, + 0.84 + ], + "angle": 0, + "content": "We comprehensively evaluate the proposed STCFoer architecture on two large-scale datasets, i.e., Human3.6M [18] and MPI-INF-3DHP [32]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.848, + 0.364, + 0.863 + ], + "angle": 0, + "content": "4.1. Datasets and Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Human3.6M is currently the most popular benchmark for indoor 3D human pose estimation, which contains 11" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.394 + ], + "angle": 0, + "content": "subjects performing 15 typical actions, leading to 3.6 million video frames in total. Following the standard protocol, we use subjects 1, 5, 6, 7, and 8 for training, and subjects 9 and 11 for evaluation. The Mean Per Joint Position Error (MPJPE) is used to measure the error under two protocols: Protocol 1 (referred to as P1) computes MPJPE between the estimated pose and the ground truth after aligning their root joints (hip); Protocol 2 (referred to as P2) calculates Procrustes-MPJPE, where the ground truth and the pose prediction are further aligned through a rigid transformation. We also compute the MPJPE distribution of pose to evaluate the overall precision of the reconstructed skeletons. MPI-INF-3DHP is a recently proposed large-scale dataset, which consists of three scenes, i.e., green screen, non-green screen, and outdoor. By using 14 cameras, the dataset records 8 actors performing 8 activities for the training set and 7 activities for evaluation. Following the previous works [6, 39, 54], we adopt the MPJPE (P1), percentage of correct keypoints (PCK) with \\(150\\mathrm{mm}\\), and area under the curve (AUC) results as the evaluation metrics." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.405, + 0.719, + 0.421 + ], + "angle": 0, + "content": "4.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.429, + 0.892, + 0.655 + ], + "angle": 0, + "content": "Our model is implemented with PyTorch toolkit and runs on a server with one GTX 2080Ti GPU. In the experiments, two kinds of input 2D pose sequences are utilized including the pre-estimated 2D pose by the pre-trained CPN [7] and the real 2D pose (ground truth). For model training, we set each mini-batch as 128 sequences. The network parameters are optimized for 20 epochs by Adam optimizer with basic learning rate of 0.001 and decayed by 0.96 after each epoch. We consider the repeat time \\( L \\) of modules, the hidden embedding channel \\( C \\), and the number of head \\( H \\) in attention block as free parameters that we tailor to the scale of network. The performances of the standard version STCFormaler with \\( \\{L = 6, C = 256, H = 8\\} \\) and the large version STCFormaler-L with \\( \\{L = 6, C = 512, H = 8\\} \\) are both reported." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.667, + 0.86, + 0.683 + ], + "angle": 0, + "content": "4.3. Performance Comparison on Human3.6M" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We compare with several state-of-the-art techniques on Human3.6M dataset. Table 1 summarizes the performance comparisons in terms of P1 and P2 errors taking the pre-estimated 2D poses (CPN) as input, and the number of sampled frames T per video is also given for each method. In general, the longer input sequence leads to the lower regression error. Overall, STCFormer-L with \\(T = 243\\) input frames achieves the new state-of-the-art performances with P1 error of \\(40.5\\mathrm{mm}\\) and P2 error of \\(31.8\\mathrm{mm}\\). Benefiting from the proposed STC attention module, STCFormer-L outperforms StridedFormer [22], PATA [48] and MixSTE [52] with \\(T = 243\\) frames, which are also based on transformer architecture, by the P1 error drop of \\(3.2\\mathrm{mm}\\), \\(2.6\\mathrm{mm}\\) and \\(0.4\\mathrm{mm}\\), respectively. Comparing to the best competitor" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4794" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.09, + 0.892, + 0.132 + ], + "angle": 0, + "content": "Table 1. Performance comparisons in terms of P1 error (mm) and P2 error (mm) with the state-of-the-art methods on Human3.6M dataset. The 2D pose input is estimated by CPN [7]. The best result and runner-up result in each column are marked in red and blue, respectively. \\* denotes the post-processing module proposed in [4]. \\(T\\) is the number of sampled frames from each video." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.135, + 0.89, + 0.482 + ], + "angle": 0, + "content": "
P1PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2041.844.841.144.947.454.143.442.256.263.645.343.545.331.332.245.1
UGCN [46] (T=96)*ECCV'2040.242.542.641.146.756.741.442.356.260.446.342.246.231.731.044.5
PoseFormer [54] (T=81)ICCV'2141.544.839.842.546.551.642.142.053.360.745.543.346.131.832.244.3
Shan et al. [40] (T=243)ACM MM'2140.844.541.442.746.355.641.841.953.760.845.041.544.830.831.944.3
Anatomy3D [6] (T=243)TCVST'2141.443.540.142.946.651.941.742.353.960.245.441.746.031.532.744.1
Einfalt et al. [9] (T=351)*arXiv'2239.643.840.242.446.553.942.342.555.762.345.143.044.730.130.844.2
StridedFormer [22] (T=243)*TMM'2240.343.340.242.345.652.341.840.555.960.644.243.044.230.030.243.7
CrossFormer [13] (T=81)arXiv'2240.744.140.841.545.852.841.240.855.361.944.941.844.629.231.143.7
PATA [48] (T=243)TIP'2239.942.740.342.345.052.840.439.356.961.244.141.342.828.429.343.1
MHFormer [23] (T=351)CVPR'2239.243.140.140.944.951.240.641.353.560.343.741.143.829.830.643.0
P-STMO [39] (T=243)ECCV'2238.942.740.441.145.649.740.939.955.559.444.942.242.729.429.442.8
MixSTE [52] (T=81)CVPR'2239.843.038.640.143.450.640.641.452.256.743.840.843.929.430.342.4
MixSTE [52] (T=243)CVPR'2237.640.937.339.742.349.940.139.851.755.042.139.841.027.927.940.9
STCFformer (T=81)40.643.038.340.243.552.640.340.151.857.742.839.842.328.029.542.0
STCFformer (T=243)39.641.637.438.843.151.139.139.751.457.441.838.540.727.128.641.0
STCFformer-L (T=243)38.441.236.838.042.750.538.738.252.556.841.838.440.226.227.740.5
P2PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2032.335.233.335.835.941.533.232.744.650.937.032.437.025.227.235.6
UGCN [46] (T=96)*ECCV'2031.834.335.433.535.441.731.131.644.449.036.432.235.024.923.034.5
PoseFormer [54] (T=81)ICCV'2134.136.134.437.236.442.234.433.645.052.537.433.837.825.627.336.5
Shan et al. [40] (T=243)ACM MM'2132.536.233.235.335.642.132.631.942.647.936.632.134.824.225.835.0
Anatomy3D [6] (T=243)TCSVT'2132.635.132.835.436.340.432.432.342.749.036.832.436.024.926.535.0
Einfalt et al. [9] (T=351)*arXiv'2232.736.133.436.036.142.033.333.145.450.737.034.135.924.425.435.7
StridedFormer [22] (T=243)*TMM'2232.735.532.535.435.941.633.031.945.150.136.333.535.123.925.035.2
MHFormer [23] (T=351)CVPR'2231.534.932.833.635.339.632.032.243.548.736.432.634.323.925.134.4
P-STMO [39] (T=243)ECCV'2231.335.232.933.935.439.332.531.544.648.236.332.934.423.823.934.4
CrossFormer [13] (T=81)arXiv'2231.434.632.633.734.339.731.631.044.349.335.931.334.423.425.534.3
PATA [48] (T=243)TIP'2231.234.131.933.833.939.531.630.045.448.135.031.133.522.423.633.7
MixSTE [52] (T=81)CVPR'2232.034.231.733.734.439.232.031.842.946.935.532.034.423.625.233.9
MixSTE [52] (T=243)CVPR'2230.833.130.331.833.139.131.130.542.544.534.030.832.722.122.932.6
STCFformer (T=81)30.433.831.131.733.539.530.830.041.845.834.330.132.821.923.432.7
STCFformer (T=243)29.533.230.631.033.038.030.429.441.845.233.629.531.621.322.632.0
STCFformer-L (T=243)29.333.030.730.632.738.229.728.842.245.033.329.431.520.922.331.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.489, + 0.892, + 0.53 + ], + "angle": 0, + "content": "Table 2. Performance comparisons in terms of P1 error (mm) with the state-of-the-art methods on Human3.6M dataset. The models take the ground-truth 2D pose as input. The best result and runner-up result in each column are marked in red and blue, respectively. “*” denotes the post-processing module proposed in [4]. \\(T\\) is the number of sampled frames from each video." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.534, + 0.892, + 0.689 + ], + "angle": 0, + "content": "
P1PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2034.537.133.634.232.937.139.635.840.741.433.033.833.026.626.934.7
PoseFormer [54] (T=81)ICCV'2130.033.629.931.030.233.334.831.437.838.631.731.529.023.323.131.3
Shan et al. [40] (T=243)ACM MM'2129.530.828.829.130.735.231.727.834.536.030.329.428.924.124.730.1
MHFormer [23] (T=351)CVPR'2227.732.129.128.930.033.933.031.237.039.330.031.029.422.223.030.5
P-STMO [39] (T=243)ECCV'2228.530.128.627.929.833.231.327.836.037.429.729.528.121.021.029.3
StridedFormer [22] (T=243) *TMM'2227.129.426.527.128.633.030.726.838.234.729.129.826.819.119.828.5
CrossFormer [13] (T=81)arXiv'2226.030.026.826.228.031.030.429.635.437.128.427.326.720.519.928.3
PATA [48] (T=243)TIP'2225.825.223.323.524.027.427.924.429.330.124.924.123.318.619.724.7
MixSTE [52] (T=81)CVPR'2225.627.824.525.724.929.928.627.429.929.026.125.025.218.719.925.9
MixSTE [52] (T=243)CVPR'2221.622.020.421.020.824.324.721.926.924.921.221.520.814.715.721.6
STCFemale (T=81)26.226.523.424.625.028.628.324.630.933.725.725.324.618.619.725.7
STCFemale (T=81) *25.925.922.724.024.627.527.623.130.131.525.124.723.818.419.625.0
STCFemale (T=243)21.422.621.021.323.826.024.220.028.928.022.321.420.114.215.022.0
STCFemale (T=243) *20.821.820.020.623.425.023.619.327.826.121.620.619.514.315.121.3
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.699, + 0.47, + 0.82 + ], + "angle": 0, + "content": "MixSTE [52], our STCFormer consistently obtains better precision across different numbers of input frames, and only demands around half of the parameters (18.9M v.s. 33.6M). The results verify the advantages of STC attention as an economic and effective way to decompose the full spatiotemporal attention. More importantly, the series of STC-Former reaches to-date the best reported performances in 10 out of 15 categories." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Table 2 further details the comparisons between STC-Former and the state-of-the-art models with the groundtruth 2D pose as input. This setting excludes the noise from 2D pose estimation, and measures the upper bound of 2D-to-3D lifting models. Accordingly, the P1 errors are obvi" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.699, + 0.892, + 0.79 + ], + "angle": 0, + "content": "ously decreased across different methods by replacing the CPN-estimated 2D pose with the ground-truth 2D pose, but the performance trends are still similar. STCFoermer with post-processing attains the best P1 error of \\(21.3\\mathrm{mm}\\), which is \\(0.3\\mathrm{mm}\\) lower than the best competitor MixSTE, validating the impact of STCFoermer with different types of input." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.893, + 0.901 + ], + "angle": 0, + "content": "In addition to the mean error, we also compare the error distribution of STCFormer and baseline methods in Figure 4. In this experiment, the methods take the estimated 2D poses by CPN of 27 frames as input. Compared to the recent transformer-based approaches including Strided-Former [22], P-STMO [39], and MHFormer [23], our STC-Former leads to the highest number of samples with error" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4795" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.092, + 0.455, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.215, + 0.47, + 0.258 + ], + "angle": 0, + "content": "Figure 4. Error distribution of the estimated 3D poses on Human3.6M. The horizontal axis represents the error interval, and the vertical axis is the proportion of poses with error in the interval." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.264, + 0.471, + 0.326 + ], + "angle": 0, + "content": "less than \\(35\\mathrm{mm}\\), and the lowest number of those with error larger than \\(45\\mathrm{mm}\\). This again confirm the advances of STCFoermer for not only obtaining the lowest average error but also better distribution across different ranges of error." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.332, + 0.466, + 0.349 + ], + "angle": 0, + "content": "4.4. Performance Comparison on MPI-INF-3DHP" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.356, + 0.47, + 0.598 + ], + "angle": 0, + "content": "To verify the generalization of 3D pose estimation models, we then test the performance on MPI-INF-3DHP dataset, which contains more complex backgrounds. Following previous works [23, 39, 52], the ground-truth 2D poses are taken as input. In view of the shorter video sequence, we set the number of input frames as 9, 27 or 81. Table 3 lists the performance comparisons. Similar to the observations on Human3.6M, our STCFformer with \\( T = 81 \\) reaches the to-date best reported performance with PCK of \\( 98.7\\% \\), AUC of \\( 83.9\\% \\) and P1 error of \\( 23.1\\mathrm{mm} \\), outperforming the current state-of-the-art models with a large margin of \\( 0.8\\% \\) in PCK, \\( 8.1\\% \\) in AUC and \\( 9.1\\mathrm{mm} \\) in P1 error. In particular, STCFformer shows better generalization ability and surpasses MixSTE [52] by a much larger P1 error drop (31.8mm) against \\( 0.3\\mathrm{mm} \\) on Human3.6M. This highlights the efficacy of our method on the more complicated data." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.606, + 0.231, + 0.623 + ], + "angle": 0, + "content": "4.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.47, + 0.675 + ], + "angle": 0, + "content": "For a more in-depth analysis of our STCFormer, we further conduct a series of ablation studies on Human3.6M dataset using the CPN-estimated 2D poses as input." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.469, + 0.856 + ], + "angle": 0, + "content": "The first group of experiments is to verify how well our STCFoer works with different number of input frames. Table 4 shows the detailed comparisons in terms of P1 error. A general performance tendency is observed that increasing \\( T \\) leads to monotonic performance improvement. Among the competitive methods, our STCFoer constantly exhibits the best results across 27-frame, 81-frame and 243-frame settings. The leading performances demonstrate the ability of STCFoer to deal with different length of video sequence. More remarkably, STCFoer-L has \\( 43.7\\% \\) fewer parameters and spends \\( 43.6\\% \\) fewer FLOPs than the runner-up MixSTE." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "The second ablation study assesses the performance impact of different design components. In this experiment, the models take the estimated 2D poses by CPN of 27 frames" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.089, + 0.892, + 0.159 + ], + "angle": 0, + "content": "Table 3. Performance comparisons in terms of PCK, AUC and P1 with the state-of-the-art methods on MPI-INF-3DHP dataset. Here, the higher PCK, the higher AUC and the lower P1 indicate the better regressions. The best result in each column is marked in red. \\(T\\) is the number of sampled frames from each video." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.162, + 0.88, + 0.35 + ], + "angle": 0, + "content": "
MethodPublicationPCK ↑AUC ↑P1(mm) ↓
UGCN [46](T=96)ECCV'2086.962.168.1
Anatomy3D [6] (T=81)TCSVT'2187.853.879.1
PoseFormer [54] (T=9)ICCV'2188.656.477.1
Hu et al. [16] (T=96)ACM MM'2197.969.542.5
CrossFormer [13] (T=9)arXiv'2289.157.576.3
PATA [48] (T=243)TIP'2290.357.869.4
MHFormer [23] (T=9)CVPR'2293.863.358.0
MixSTE [52] (T=27)CVPR'2294.466.554.9
Einfalt et al. [9] (T=81)arXiv'2295.467.646.9
P-STMO [39] (T=81)ECCV'2297.975.832.2
STCFormaler (T=9)98.281.528.2
STCFormaler (T=27)98.483.424.2
STCFormaler (T=81)98.783.923.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.359, + 0.892, + 0.401 + ], + "angle": 0, + "content": "Table 4. The P1 error comparisons with different number of sampled frame \\((T)\\) on Human3.6M dataset. The best result in each column is marked in red." + }, + { + "type": "table", + "bbox": [ + 0.506, + 0.404, + 0.891, + 0.613 + ], + "angle": 0, + "content": "
MethodFrames TParametersFLOPs (M)P1(mm)
StridedFormer [22]274.01M16346.9
P-STMO [39]274.6M16446.1
MHFormer [23]2718.92M100045.9
MixSTE [52]2733.61M1540245.1
STCFourner274.75M217344.1
StridedFormer [22]814.06M39245.4
P-STMO [39]815.4M49344.1
MHFormer [23]8119.67M156144.5
MixSTE [52]8133.61M4620842.7
STCFourner814.75M652042.0
StridedFormer [22]2434.23M137244.0
P-STMO [39]2436.7M173742.8
MHFormer [23]24324.72M481243.2
MixSTE [52]24333.61M13862340.9
STCFourner2434.75M1956141.0
STCFourner-L24318.91M7810740.5
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.627, + 0.892, + 0.853 + ], + "angle": 0, + "content": "as input. Spatial Attention and Temporal Attention solely exploit the spatial pathway and temporal pathway, respectively. STC only contains both pathways but without the positional embedding. \\(\\mathbf{SPE}_1\\), \\(\\mathbf{SPE}_2\\) and \\(\\mathbf{SPE}\\) represent the two SPE positional embeddings and their combination, respectively. Table 5 details the contribution of each component towards the overall performance. STC only by considering both spatial and temporal correlations leads to the error drop over solely utilizing spatial attention and temporal attention by \\(218.5\\mathrm{mm}\\) and \\(10.6\\mathrm{mm}\\), respectively. The result indicates the importance of modeling the correlations along two axes in parallel. The three positional embedding strategies, i.e., \\(\\mathbf{SPE}_1\\), \\(\\mathbf{SPE}_2\\) and \\(\\mathbf{SPE}\\), further contribute \\(0.6\\mathrm{mm}\\), \\(12.1\\mathrm{mm}\\) and \\(12.9\\mathrm{mm}\\) of error drop, respectively, proving the advances of involving the structure information." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In addition to the proposed \\(\\mathrm{SPE}_1\\), we explore three other positional embedding functions, i.e., Absolute Positional Embedding (APE), Centrality Positional Embedding" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4796" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.09, + 0.47, + 0.117 + ], + "angle": 0, + "content": "Table 5. Performance contribution of each component in the proposed STCFormer on Human3.6M dataset." + }, + { + "type": "table", + "bbox": [ + 0.107, + 0.121, + 0.44, + 0.217 + ], + "angle": 0, + "content": "
STCSPE1SPE2P1 (mm)
Spatial Attention#1275.5
Temporal Attention#267.6
STC only#357.0
+SPE1#456.4
+SPE2#544.9
+SPE#644.1
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.225, + 0.471, + 0.346 + ], + "angle": 0, + "content": "(CPE), and Symmetric Positional Embedding (SyPE). We refer the readers to read the supplementary materials for more details. In Table 6, we assess the performance impact of different positional embedding functions. In this experiment, the models take the estimated 2D poses by CPN of 9 frames as input. And the comparisons empirically show the superiority of the used \\(\\mathrm{SPE}_1\\) (48.3mm vs. 48.7mm, 49.9mm, and 49.2mm)." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.36, + 0.47, + 0.414 + ], + "angle": 0, + "content": "Table 6. The P1 error comparisons with different positional embedding functions on Human3.6M dataset. The \"Baseline\" denotes the STCFoermer without \\(\\mathrm{SPE}_1\\). The best result in each column is marked in red." + }, + { + "type": "table", + "bbox": [ + 0.199, + 0.418, + 0.35, + 0.497 + ], + "angle": 0, + "content": "
P1 (mm)
Baseline#148.7
+SPE1#248.3
+APE#348.9
+CPE#449.6
+SyPE#549.2
" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.514, + 0.269, + 0.531 + ], + "angle": 0, + "content": "4.6. Qualitative Analysis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.538, + 0.47, + 0.597 + ], + "angle": 0, + "content": "In this section, we validate our STCFoermer through attention visualization and 3D human pose estimation visualization. The examples are randomly selected from the evaluation set of Human3.6M." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.469, + 0.718 + ], + "angle": 0, + "content": "Attention visualization. We visualize the spatial attention map and temporal attention map from the last STC block of STCFormaler in Figure 5. As expected, the spatial attention map (Figure 5(a)) shows that our model learns different patterns between joints from the videos of different actions. Moreover, the temporal attention map in Figure 5(b) illustrates strong correlation across adjacent frames owing to the continuity of human actions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Result visualization on Human3.6M. Figure 6 showcases 3D human pose estimation results by STCFormaler and the recent transformer-based approaches including Strided-Former [22], MHFormer [23] and P-STMO [39]. The three examples are randomly selected from the walking, posing and sitting actions in Human3.6M dataset. For each method, we draw the estimated 3D human pose and the ground-truth 3D coordinates in one figure, and calculate the average error. Overall, our STCFormaler shows better reconstruction results across all three samples than the other three methods. Particularly, for the challenging action with complicated pose articulation like \"sitting\" (the third row), STC" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.692, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.718, + 0.09, + 0.895, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.243, + 0.893, + 0.285 + ], + "angle": 0, + "content": "Figure 5. Visualizations of attention maps from the spatial and temporal attention modules in STCFormer. The x-axis and y-axis correspond to the queries and the predicted outputs, respectively." + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.302, + 0.891, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.492, + 0.892, + 0.563 + ], + "angle": 0, + "content": "Figure 6. Examples of 3D pose estimation by StridedFormer [22], MHFormer [23], P-STMO [39] and our STCFormer. The gray skeleton is the ground-truth 3D pose. Blue, orange and green skeletons represent the left part, right part and torso of the estimated human body, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.582, + 0.891, + 0.613 + ], + "angle": 0, + "content": "Former still estimates the 3D coordinates accurately and reconstructs the structurally plausible 3D pose." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.644, + 0.619, + 0.658 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.675, + 0.893, + 0.901 + ], + "angle": 0, + "content": "We have presented Spatio-Temporal Criss-cross Transformer (STCFormer), which explores spatial correlation and temporal correlation in a two-pathway manner for 3D human pose estimation in videos. Particularly, STCFormer is built by stacking several STC blocks, each of which separates the joint features into two groups along the channel dimension, and models the spatial and temporal interactions on each group, respectively. By doing so, the receptive field of STC block is like a criss cross of spatial and temporal axes. Moreover, the STCFormer exploits the dynamic chain structure of human body to model local context, resulting in a new positional embedding function. The experiments conducted on two benchmarks demonstrate the effectiveness of STCFormer and good generalization ability compared to the state-of-the-art techniques." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4797" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.175, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.169 + ], + "angle": 0, + "content": "[1] Ankur Agarwal and Bill Triggs. Recovering 3d human pose from monocular images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 28(1):44-58, 2005. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.173, + 0.47, + 0.214 + ], + "angle": 0, + "content": "[2] Mykhaylo Andriluka, Stefan Roth, and Bernt Schiele. Pictorial structures revisited: People detection and articulated pose estimation. In CVPR, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.216, + 0.469, + 0.257 + ], + "angle": 0, + "content": "[3] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.259, + 0.469, + 0.315 + ], + "angle": 0, + "content": "[4] Yujun Cai, Liuhao Ge, Jun Liu, Jianfei Cai, Tat-Jen Cham, Junsong Yuan, and Nadia Magnenat Thalmann. Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In ICCV, 2019. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.317, + 0.469, + 0.356 + ], + "angle": 0, + "content": "[5] Ching-Hang Chen and Deva Ramanan. 3d human pose estimation= 2d pose estimation+ matching. In CVPR, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.36, + 0.469, + 0.428 + ], + "angle": 0, + "content": "[6] Tianlang Chen, Chen Fang, Xiaohui Shen, Yiheng Zhu, Zhili Chen, and Jiebo Luo. Anatomy-aware 3d human pose estimation with bone-based pose decomposition. IEEE Transactions on Circuits and Systems for Video Technology, 32(1):198-209, 2021. 1, 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.431, + 0.469, + 0.471 + ], + "angle": 0, + "content": "[7] Yilun Chen, Zhicheng Wang, Yuxiang Peng, Zhiqiang Zhang, Gang Yu, and Jian Sun. Cascaded pyramid network for multi-person pose estimation. In CVPR, 2018. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.474, + 0.469, + 0.543 + ], + "angle": 0, + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.545, + 0.469, + 0.599 + ], + "angle": 0, + "content": "[9] Moritz Einfalt, Katja Ludwig, and Rainer Lienhart. Uplift and upsample: Efficient 3d human pose estimation with up-lifting transformers. arXiv preprint arXiv:2210.06110, 2022. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.602, + 0.469, + 0.655 + ], + "angle": 0, + "content": "[10] Hao-Shu Fang, Yuanlu Xu, Wenguan Wang, Xiaobai Liu, and Song-Chun Zhu. Learning pose grammar to encode human body configuration for 3d pose estimation. In AAAI, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.659, + 0.469, + 0.714 + ], + "angle": 0, + "content": "[11] Nate Hagbi, Oriel Bergig, Jihad El-Sana, and Mark Billinghurst. Shape recognition and pose estimation for mobile augmented reality. IEEE Transactions on Visualization and Computer Graphics, 17(10):1369-1379, 2010. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.716, + 0.469, + 0.757 + ], + "angle": 0, + "content": "[12] Yanbin Hao, Hao Zhang, Chong-Wah Ngo, and Xiangnan He. Group contextualization for video recognition. arXiv preprint arXiv:2203.09694, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.759, + 0.469, + 0.827 + ], + "angle": 0, + "content": "[13] Mohammed Hassanin, Abdelwahed Khamiss, Mohammed Bennamoun, Farid Boussaid, and Ibrahim Radwan. Crossformer: Cross spatio-temporal transformer for 3d human pose estimation. arXiv preprint arXiv:2203.13387, 2022. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.83, + 0.469, + 0.857 + ], + "angle": 0, + "content": "[14] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[15] Mir Rayat Imtiaz Hossain and James J Little. Exploiting temporal information for 3d human pose estimation. In ECCV, 2018. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[16] Wenbo Hu, Changgong Zhang, Fangneng Zhan, Lei Zhang, and Tien-Tsin Wong. Conditional directed graph convolution for 3d human pose estimation. In ACM MM, 2021. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.174 + ], + "angle": 0, + "content": "[17] Catalin Ionescu, Fuxin Li, and Cristian Sminchisescu. Latent structured models for human pose estimation. In ICCV, 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.178, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[18] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, 2013. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.247, + 0.892, + 0.286 + ], + "angle": 0, + "content": "[19] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In CVPR, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.289, + 0.892, + 0.328 + ], + "angle": 0, + "content": "[20] Branislav Kisacanin, Vladimir Pavlovic, and Thomas S Huang. Real-time vision for human-computer interaction. Springer Science & Business Media, 2005. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.331, + 0.892, + 0.37 + ], + "angle": 0, + "content": "[21] Kyoungoh Lee, Inwooong Lee, and Sanghoon Lee. Propagating lstm: 3d pose estimation based on joint interdependency. In ECCV, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.372, + 0.892, + 0.426 + ], + "angle": 0, + "content": "[22] Wenhao Li, Hong Liu, Runwei Ding, Mengyuan Liu, Pichao Wang, and Wenming Yang. Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia, 2022. 1, 2, 3, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.428, + 0.892, + 0.468 + ], + "angle": 0, + "content": "[23] Wenhao Li, Hong Liu, Hao Tang, Pichao Wang, and Luc Van Gool. Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In CVPR, 2022. 1, 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.47, + 0.892, + 0.509 + ], + "angle": 0, + "content": "[24] Yehao Li, Ting Yao, Yingwei Pan, and Tao Mei. Contextual transformer networks for visual recognition. IEEE Trans. on PAMI, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.512, + 0.892, + 0.551 + ], + "angle": 0, + "content": "[25] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In CVPR, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.554, + 0.892, + 0.607 + ], + "angle": 0, + "content": "[26] Ruixu Liu, Ju Shen, He Wang, Chen Chen, Sen-ching Cheung, and Vijayan Asari. Attention mechanism exploits temporal contexts: Real-time 3d human pose reconstruction. In CVPR, 2020. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.609, + 0.892, + 0.649 + ], + "angle": 0, + "content": "[27] Zhenguang Liu, Pengxiang Su, Shuang Wu, Xuanjing Shen, Haipeng Chen, Yanbin Hao, and Meng Wang. Motion prediction using trajectory cues. In ICCV, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.651, + 0.892, + 0.718 + ], + "angle": 0, + "content": "[28] Zhenguang Liu, Shuang Wu, Shuyuan Jin, Qi Liu, Shouling Ji, Shijian Lu, and Li Cheng. Investigating pose representations and motion contexts modeling for 3d motion prediction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.72, + 0.892, + 0.76 + ], + "angle": 0, + "content": "[29] Fuchen Long, Zhaofan Qiu, Yingwei Pan, Ting Yao, Jiebo Luo, and Tao Mei. Stand-alone inter-frame attention in video models. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.763, + 0.892, + 0.801 + ], + "angle": 0, + "content": "[30] Fuchen Long, Zhaofan Qiu, Yingwei Pan, Ting Yao, Chong-Wah Ngo, and Tao Mei. Dynamic temporal filtering in video models. In ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.804, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[31] Julieta Martinez, Rayat Hossain, Javier Romero, and James J Little. A simple yet effective baseline for 3d human pose estimation. In ICCV, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[32] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 3DV, 2017. 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4798" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.133 + ], + "angle": 0, + "content": "[33] Alejandro Newell, Kaiyu Yang, and Jia Deng. Stacked hourglass networks for human pose estimation. In ECCV, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.136, + 0.469, + 0.177 + ], + "angle": 0, + "content": "[34] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3d human pose. In CVPR, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.179, + 0.469, + 0.233 + ], + "angle": 0, + "content": "[35] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In CVPR, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.235, + 0.469, + 0.275 + ], + "angle": 0, + "content": "[36] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In ICCV, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.277, + 0.469, + 0.317 + ], + "angle": 0, + "content": "[37] Zhaofan Qiu, Ting Yao, Chong-Wah Ngo, and Tao Mei. Mlp-3d: A mlp-like 3d architecture with grouped time mixing. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.32, + 0.469, + 0.361 + ], + "angle": 0, + "content": "[38] Zhaofan Qiu, Ting Yao, Chong-Wah Ngo, Xinmei Tian, and Tao Mei. Learning spatio-temporal representation with local and global diffusion. In CVPR, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.363, + 0.469, + 0.417 + ], + "angle": 0, + "content": "[39] Wenkang Shan, Zhenhua Liu, Xinfeng Zhang, Shanshe Wang, Siwei Ma, and Wen Gao. P-stmo: Pre-trained spatial temporal many-to-one model for 3d human pose estimation. arXiv preprint arXiv:2203.07628, 2022. 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.42, + 0.469, + 0.473 + ], + "angle": 0, + "content": "[40] Wenkang Shan, Haopeng Lu, Shanshe Wang, Xinfeng Zhang, and Wen Gao. Improving robustness and accuracy via relative information encoding in 3d human pose estimation. In ACM MM, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.476, + 0.469, + 0.516 + ], + "angle": 0, + "content": "[41] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In CVPR, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.518, + 0.469, + 0.546 + ], + "angle": 0, + "content": "[42] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.548, + 0.469, + 0.588 + ], + "angle": 0, + "content": "[43] Mikael Svenstrup, Soren Tranberg, Hans Jorgen Andersen, and Thomas Bak. Pose estimation and adaptive robot behaviour for human-robot interaction. In ICRA, 2009. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.59, + 0.469, + 0.644 + ], + "angle": 0, + "content": "[44] Zhenhua Tang, Jia Li, Yanbin Hao, and Richang Hong. Mlp-jcg: Multi-layer perceptron with joint-coordinate gating for efficient 3d human pose estimation. IEEE Transactions on Multimedia, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.647, + 0.469, + 0.687 + ], + "angle": 0, + "content": "[45] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NIPS, 2017. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.689, + 0.469, + 0.73 + ], + "angle": 0, + "content": "[46] Jingbo Wang, Sijie Yan, Yuanjun Xiong, and Dahua Lin. Motion guided 3d pose estimation from videos. In ECCV, 2020. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.733, + 0.469, + 0.759 + ], + "angle": 0, + "content": "[47] Tianhan Xu and Wataru Takano. Graph stacked hourglass networks for 3d human pose estimation. In CVPR, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.761, + 0.469, + 0.815 + ], + "angle": 0, + "content": "[48] Youze Xue, Jiansheng Chen, Xiangming Gu, Huimin Ma, and Hongbing Ma. Boosting monocular 3d human pose estimation with part aware attention. IEEE Transactions on Image Processing, 31, 2022. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.817, + 0.469, + 0.858 + ], + "angle": 0, + "content": "[49] Ting Yao, Yingwei Pan, Yehao Li, Chong-Wah Ngo, and Tao Mei. Wave-vit: Unifying wavelet and transformers for visual representation learning. In ECCV, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.86, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[50] Hao Zhang, Lechao Cheng, Yanbin Hao, and Chong-wah Ngo. Long-term leap attention, short-term periodic shift for video classification. In ACM MM, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.119 + ], + "angle": 0, + "content": "[51] Hao Zhang, Yanbin Hao, and Chong-Wah Ngo. Token shift transformer for video classification. In ACM MM, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[52] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Junsong Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. arXiv preprint arXiv:2203.00859, 2022. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[53] Long Zhao, Xi Peng, Yu Tian, Mubbasir Kapadia, and Dimitris N Metaxas. Semantic graph convolutional networks for 3d human pose regression. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.222, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[54] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In ICCV, 2021. 1, 2, 5, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4799" + } + ] +] \ No newline at end of file diff --git a/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/54678f96-220e-4220-837c-0b75958caa1b_origin.pdf b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/54678f96-220e-4220-837c-0b75958caa1b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f9d2dac45a9976756d26b508267917df4ce7cd04 --- /dev/null +++ b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/54678f96-220e-4220-837c-0b75958caa1b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bdb1bde47a6d3a19788257f56ec852b6f00c6a6b86e025124b786ff54d75118 +size 3637533 diff --git a/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/full.md b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/full.md new file mode 100644 index 0000000000000000000000000000000000000000..618573054cd4c0fedf2e2fc68a29f387dfcb1c0c --- /dev/null +++ b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/full.md @@ -0,0 +1,335 @@ +# 3D Human Pose Estimation with Spatio-Temporal Criss-cross Attention* + +Zhenhua Tang, Zhaofan Qiu, Yanbin Hao, Richang Hong, Ting Yao +Hefei University of Technology, Anhui, China HiDream.ai Inc +University of Science and Technology of China, Anhui, China + +zhenhuat@foxmail.com, zhaofanqiu@gmail.com, haoyanbin@hotmail.com + +hongrc.hfut@gmail.com, tingyao.ustc@gmail.com + +# Abstract + +Recent transformer-based solutions have shown great success in 3D human pose estimation. Nevertheless, to calculate the joint-to-joint affinity matrix, the computational cost has a quadratic growth with the increasing number of joints. Such drawback becomes even worse especially for pose estimation in a video sequence, which necessitates spatio-temporal correlation spanning over the entire video. In this paper, we facilitate the issue by decomposing correlation learning into space and time, and present a novel Spatio-Temporal Criss-cross attention (STC) block. Technically, STC first slices its input feature into two partitions evenly along the channel dimension, followed by performing spatial and temporal attention respectively on each partition. STC then models the interactions between joints in an identical frame and joints in an identical trajectory simultaneously by concatenating the outputs from attention layers. On this basis, we devise STCFoer by stacking multiple STC blocks and further integrate a new Structure-enhanced Positional Embedding (SPE) into STCFoer to take the structure of human body into consideration. The embedding function consists of two components: spatio-temporal convolution around neighboring joints to capture local structure, and part-aware embedding to indicate which part each joint belongs to. Extensive experiments are conducted on Human3.6M and MPI-INF-3DHP benchmarks, and superior results are reported when comparing to the state-of-the-art approaches. More remarkably, STCFoer achieves to-date the best published performance: $40.5\mathrm{mm}$ P1 error on the challenging Human3.6M dataset. + +# 1. Introduction + +3D human pose estimation has attracted intensive research attention in CV community due to its great poten + +![](images/4f0d200e406775b9281a28760a9bbd17664497eecc4975874d69b3601a198ab0.jpg) +Figure 1. Modeling spatio-temporal correlation for 3D human pose estimation by (a) utilizing spatio-temporal attention on all joints in the entire video, (b) separating the framework into two steps that respectively capture spatial and temporal context, and (c) our Spatio-Temporal Criss-cross attention (STC), i.e., a two-pathway block that models spatial and temporal information in parallel. In the visualization of receptive field, the covered joints of each attention strategy is marked as red nodes. + +tial in numerous applications such as human-robot interaction [20, 43], virtual reality [11] and motion prediction [27, 28]. The typical monocular solution is a two-stage pipeline, which first extracts 2D keypoints by 2D human pose detectors (e.g., [7] and [41]), and then lifts 2D coordinates into 3D space [31]. Despite its simplicity, the second stage is an ill-posed problem which lacks the depth prior, and suffers from the ambiguity problem. + +To mitigate this issue, several progresses propose to aggregate the temporal cues in a video sequence to promote pose estimation by grid convolutions [15,26,35], graph convolutions [4, 47] and multi-layer perceptrons [6, 21]. Recently, Transformer structure has emerged as a dominant architecture in both NLP and CV fields [8,24,45,49], and also demonstrated high capability in modeling spatio-temporal correlation for 3D human pose estimation [13, 22, 23, 25, 48, 52, 54]. Figure 1(a) illustrates a straightforward way to exploit the transformer architecture for directly learning spatio-temporal correlation between all joints in the entire video sequence. However, the computational cost of calcu + +lating the joint-to-joint affinity matrix in the self-attention has a quadratic growth along the increase of number of frames, making such solution unpractical for model training. As a result, most transformer structures employ a two-step alternative, as shown in Figure 1(b), which encodes spatial information for each frame first and then aggregates the feature sequence by temporal transformer. Note that we take spatial transformer as the frame encoder as an example in the figure. This strategy basically mines the correlation across frame-level features but seldom explores the relation between joints across different frames. + +In this paper, we propose a novel two-pathway attention mechanism, namely Spatio-Temporal Criss-cross attention (STC), that models spatial and temporal information in parallel, as depicted in Figure 1(c). Concretely, STC first slices the input joint features into two partitions evenly with respect to the channel dimension. On each partition, a Multihead Self-Attention (MSA) is implemented to encapsulate the context along space or time axis. In between, the space pathway computes the affinity between joints in each frame independently, and the time pathway correlates the identical joint moving across different frames, i.e., the trajectory. Then, STC recombines the learnt contexts from two pathways, and mixes the information across channels by MultiLayer Perceptrons (MLP). By doing so, the receptive field is like a criss cross of spatial and temporal axes, and the computational cost is $\mathcal{O}(T^2 S) + \mathcal{O}(TS^2)$ . That is much lower than $\mathcal{O}(T^2 S^2)$ of fully spatio-temporal attention, where $T$ and $S$ denote the number of frames and joints, respectively. + +By stacking multiple STC blocks, we devise a new architecture — STCFormer for 3D human pose estimation. Furthermore, we delve into the crucial design of positional embedding in STCFormer in the context of pose estimation. The observations that joints in the same body part are either highly relevant (static part) or not relevant but containing moving patterns (dynamic part) motivate us to design a new Structure-enhanced Positional Embedding (SPE). SPE consists of two embedding functions for the static and dynamic part, respectively. A part-aware embedding is to describe the static part by indicating which part each joint belongs to, and a spatio-temporal convolution around neighboring joints aims to capture dynamic structure in local window. + +We summarize the main contributions of this work as follows. First, STC is a new type of decomposed spatiotemporal attention for 3D human pose estimation in an economic and effective way. Second, STCFormaler is a novel transformer architecture by stacking multiple STC blocks and integrating the structure-enhanced positional embedding. Extensive experiments conducted on Human3.6M and MPI-INF-3DHP datasets demonstrate that STCFormaler with much less parameters achieves superior performances than the state-of-the-art techniques. + +# 2. Related Work + +Monocular 3D human pose estimation. Monocular 3D human pose estimation is to re-localize human body joints in 3D space from the input single view 2D data, i.e., image or 2D coordinates. The early works [1, 2, 17] develop various graphical or restrictive methods to explore the dependencies of human skeleton and perspective relationships across spaces. With the development of deep learning, several deep neural networks [5, 10, 19, 31, 34, 42, 44, 53] are devised for 3D human pose estimation, and can be categorized into one-stage and two-stage directions. The one-stage approaches directly regress the 3D pose from the input image, and necessitate a large number of image-pose paired data and powerful computing resources [19, 34, 42]. The two-stage methods first exploit off-the-shelf 2D pose detectors [7, 33, 41] to estimate 2D joint coordinates, and then lift the 2D coordinates into 3D space by the fully-connected network [31], grid convolutional network [5], recurrent neural network [10], or graph convolutional network [53]. Although the two-stage methods alleviate the requirement of image-pose pairs, they still heavily suffer from the depth ambiguities problem, which is intrinsically ill-posed due to the lack of depth information. + +3D pose estimation from video sequence. To overcome the limitation of depth ambiguities, the advances involve temporal context from neighboring frames to improve 3D coordinates regression. For example, Pavllo et al. [35] propose a temporal fully-convolutional network (TCN) to model the local context by convoluting the neighboring frames. Later, Liu et al. [26] extend the TCN by introducing an attention mechanism to adaptively identify the significant frames/poses over a sequence. After that, Chen et al. [6] decompose the pose estimation into bone length and bone direction prediction. Instead of the aforementioned methods based on temporal aggregation, latter works [4, 16, 46] utilize the spatio-temporal graph convolutional network to model the spatial and temporal correlations across joints simultaneously. + +Transformer-based methods. In addition to the traditional convolutional networks, transformer architectures are also be exploited to model spatio-temporal correlation [13,22,23,29,30,37,50,51,54]. In particular, Zheng et al. [54] design a concatenation architecture of several spatial transformer encoders and temporal transformer encoders in PoseFormer. MHFormer [23] proposes to generate multiple hypothesis representations for a pose with the spatial transformer encoder and then model multi-level global correlations with different temporal transformer blocks. StridedFormer [22] and CrossFormer [13] introduce locality by integrating the 1D temporal convolution and 1D spatial convolution, respectively. More recently, the joint-wise inconsistency of motion patterns is highlighted in [48, 52], and encourages to model spatial and temporal information si + +multaneously. PATA [48] groups the joints with similar motion patterns and calculates the intra-part temporal correlation. Similarly, MixSTE [52] uses multiple separated spatial transformer blocks and temporal transformer blocks to model the spatial and temporal correlation iteratively. + +Our work also falls into the category of transformer-based method for 3D human pose estimation. The aforementioned transformers mainly model spatial and temporal information respectively in different stages of the networks. In view that the joint motion is a state of coexistence of space and time, such separation may result in insufficient learning of moving patterns. In contrast, our STC block is a two-pathway design that models spatial and temporal dependencies in parallel, which are then mixed through MLP. Moreover, a new positional embedding function is deliberately devised to explore the local structure of human body. + +# 3. Spatio-Temporal Criss-cross Transformer + +# 3.1. Preliminary - Transformer + +We begin this section by reviewing the transformer architecture [45] as the basis of our proposal. Transformer is a versatile representation learning architecture, and mainly consists of two components: Multi-head Self-Attention module (MSA) and Feed-Forward Network (FFN). MSA calculates the token-to-token affinity matrix and propagates the information across different tokens. Formally, given $N$ input tokens with $C$ channels, MSA is formulated as + +$$ +M S A (\mathbf {Q}, \mathbf {K}, \mathbf {V}) = \operatorname {S o f t m a x} \left(\frac {\mathbf {Q} \cdot \mathbf {K} ^ {T}}{\sqrt {C}}\right) \cdot \mathbf {V}, \tag {1} +$$ + +where $\mathbf{Q},\mathbf{K},\mathbf{V}\in \mathbb{R}^{N\times C}$ denote the queries, keys and values obtained by linearly mapping the input tokens. Note that we omit the multi-head separation here for simplicity. FFN contains a Multi-Layer Perceptrons (MLP), i.e., a nonlinear mapping with two linear layer plus a GELU [14] activation in between. The output of MLP is computed by + +$$ +M L P (\mathbf {H}) = G E L U \left(\mathbf {H} \cdot \mathbf {W} _ {\mathbf {1}}\right) \cdot \mathbf {W} _ {\mathbf {2}}, \tag {2} +$$ + +where $\mathbf{H} \in \mathbb{R}^{N \times C}$ is the input tokens of MLP, $\mathbf{W}_1 \in \mathbb{R}^{C \times \hat{C}}$ and $\mathbf{W}_2 \in \mathbb{R}^{\hat{C} \times C}$ are the projection matrices. With these, each transformer block is constructed by utilizing MSA and MLP in order with shortcut connection: + +$$ +\mathbf {Q}, \mathbf {K}, \mathbf {V} = F C (L N (\mathbf {X})) , +$$ + +$$ +\mathbf {Y} = M S A (\mathbf {Q}, \mathbf {K}, \mathbf {V}) + \mathbf {X}, \tag {3} +$$ + +$$ +\mathbf {Z} = M L P (L N (\mathbf {Y})) + \mathbf {Y}, +$$ + +where $FC$ is linear projection of the input tokens $\mathbf{X}$ , and $LN$ denotes Layer Norm [3]. The output $\mathbf{Z}$ serves as the input to the next block until the last one. + +![](images/f7ade926fdf0c5aedbee014565be6bfb3d06ec7afe0ac6780cc16ce8e6d2b1fb.jpg) +a) +Figure 2. An overview of our proposed Spatio-Temporal Crisscross Transformer (STCFormer). (a) It mainly consists of $L$ sequential STC blocks. Each block aggregates the context across tokens by spatio-temporal criss-cross attention, and non-linearly maps each token by Multi-Layer Perceptrons (MLP). (b) The architecture of our STC block and the Structure-enhanced Positional Embedding (SPE). + +![](images/0df3cf8ca38bfb41dcf4c90200521e885db71c17c0ec70edef9fc902334e6302.jpg) +(b) + +# 3.2. Overall Architecture + +Figure 2 depicts an overview of the proposed STC-Former, which mainly includes three stages: a joint-based embedding, stacked STC blocks and a regression head. The joint-based embedding projects the input 2D coordinates of each joint into feature space. STC blocks aggregate the spatio-temporal context, and update the representation of each joint. Based on the learnt features, the 3D coordinates are estimated by a regression head. + +Joint-based embedding. Given a 2D pose sequence as $\mathbf{P}_{2D} \in \mathbb{R}^{T \times N \times 2}$ , where $T$ and $N$ denote the number of frames and the number of body joints in each frame, respectively, we first project $\mathbf{P}_{2D}$ to high-dimensional embeddings by a joint-based embedding layer. This layer applies an FC layer to each 2D coordinate independently followed by a GELU activation. As such, the joint-based embedding layer produces the features with the shape of $T \times N \times C$ . Note that in the previous transformer [22], the embedding layer projects all joint coordinates in each frame into a single vector, reducing the computational cost of the subsequent transformer blocks while losing the spatial discrimination. Ours is different in that the spatial dimension $N$ is maintained, and the computational cost is also reduced by spatio-temporal criss-cross attention. + +STC blocks. The STC block originates from the transformer block in Eq.(3), and replaces the original MSA layer with spatio-temporal criss-cross attention. In addition, a new positional embedding function, i.e., Structure-enhanced Positional Embedding (SPE), is integrated into the STC block for better descriptive capability of local structures. Section 3.3 and Section 3.4 will elaborate STC and SPE, respectively. + +Regression head. A liner regression head is finally established upon the STC blocks to estimate the 3D pose coordinates $\hat{\mathbf{P}}_{3D} \in \mathbb{R}^{T \times N \times 3}$ . The whole architecture is optimized by minimizing the Mean Squared Error (MSE) between $\hat{\mathbf{P}}_{3D}$ and the ground-truth 3D coordinates $\mathbf{P}_{3D}$ as + +$$ +\mathcal {L} = \left\| \hat {\mathbf {P}} _ {3 D} - \mathbf {P} _ {3 D} \right\| ^ {2}. \tag {4} +$$ + +# 3.3. Spatio-Temporal Criss-cross Attention + +STC aims to model the spatio-temporal dependencies between joints in an efficient way to avoid the quadratic computation cost of fully spatio-temporal attention. Inspired by the group contextualization strategy [12] which separates the channels into several paralleled groups and applies different feature contextualization operations to them respectively, we propose to capture the spatial and temporal context on different channels in parallel. Different from the axial convolution in [12,36,38], we exploit axis-specific multihead self-attention in STC for spatial or temporal context, which is more powerful for correlation learning. + +Concretely, the input embedding $\mathbf{X} \in \mathbb{R}^{T \times N \times C}$ are firstly mapped to queries $\mathbf{Q} \in \mathbb{R}^{T \times N \times C}$ , keys $\mathbf{K} \in \mathbb{R}^{T \times N \times C}$ , and values $\mathbf{V} \in \mathbb{R}^{T \times N \times C}$ , which are then evenly divided into two groups along the channel dimension. For notation clarity, we denote the divided feature matrix as time group $\{\mathbf{Q}_T, \mathbf{K}_T, \mathbf{V}_T\}$ and space group $\{\mathbf{Q}_S, \mathbf{K}_S, \mathbf{V}_S\}$ . Next, the temporal and spatial correlations are calculated in two separate self-attention modules. + +Temporal correlation represents the relation between the joints in an identical trajectory moving across different frames. To achieve this, we implement an axis-specific MSA, named $MSA_{T}$ , which computes the attention affinities in Eq.(1) between joints across the temporal dimension. Hence, the output of temporal attention is measured as + +$$ +\mathbf {H} _ {\mathbf {T}} = M S A _ {T} \left(\mathbf {Q} _ {\mathbf {T}}, \mathbf {K} _ {\mathbf {T}}, \mathbf {V} _ {\mathbf {T}}\right). \tag {5} +$$ + +Spatial correlation is the connection between joints in an identical frame. These joints indicate different body parts in one frame, which are intrinsically relevant due to the prior of body skeleton. Similar to temporal attention, we devise $MSAS_{S}$ as an axis-specific MSA component on spatial dimension. Therefore, the output of spatial attention is formulated as + +$$ +\mathbf {H} _ {\mathbf {S}} = M S A _ {S} \left(\mathbf {Q} _ {\mathbf {S}}, \mathbf {K} _ {\mathbf {S}}, \mathbf {V} _ {\mathbf {S}}\right). \tag {6} +$$ + +The above two correlation modules process in parallel and follow the self-attention regime for feature contextualization. They compute the token-to-token affinities by contextualizing from a specific axial perspective, and complement to each other. Thus, we concatenate the outputs from both attention layers along the channel dimension: + +$$ +\mathbf {H} = c a t \left(\mathbf {H} _ {\mathbf {T}}, \mathbf {H} _ {\mathbf {S}}\right), \tag {7} +$$ + +![](images/b0dde1ddefd622e779fec833910527d81fed9daa5fd23846e6298ad8727f8300.jpg) +Figure 3. (a) The coefficient matrix of the motion trajectory of different joints. (b) The body joints are divided into five parts, denoted as $g_{*}$ . The part with high/low relevance is colored as light/dark blue, respectively. The motion data is generated by actor S6 performing greeting action in the training set of Human3.6M. + +where $cat$ performs the concatenation. The resultant receptive field of STC is like a criss cross of spatial and temporal axes, and stacking multiple STC blocks is able to approximate the fully spatio-temporal attention. + +# 3.4. Structure-enhanced Positional Embedding + +One of the crucial factor in transformer is positional embedding, which indicates the position of each token absolutely or relatively. For the positional embedding function in STCFormer, we delve into the inherent property of joints, i.e., the local structure, and propose Structure-enhanced Positional Embedding (SPE). Figure 3 depicts the motivation of SPE. Here, we group the body joints into five parts according to the dynamic chain structure of human body: + +$$ +\begin{array}{l} g _ {0} = \{h i p, s p i n e, t h r o a x, n e c k, h e a d \} \\ g _ {1} = \{r i g h t _ {-} h i p, r i g h t _ {-} k n e e, r i g h t _ {-} f e e t \} \\ g _ {2} = \left\{l e f t - h i p, l e f t - k n e e, l e f t - f e e t \right\} \tag {8} \\ g _ {3} = \{r i g h t \_ s h o u l d e r, r i g h t \_ e l b o w, r i g h t \_ w r i s t \} \\ g _ {4} = \{\text {l e f t - s h o u l d e r}, \text {l e f t - e l b o w}, \text {l e f t - w r i s t} \} \\ \end{array} +$$ + +The trajectories of joints in the static part ( $g_0, g_3$ and $g_4$ in the figure) are highly relevant. We devise a part-ware positional embedding to indicate which part each joint belongs to. The joints in the same part are attached with the same embedding vector. In particular, a learnable dictionary is constructed to assign embedding vector to different joints according to their group index. Given the group index $\mathbf{g} \in [0,1,2,3,4]^{T \times N}$ of joints, the learnable dictionary $\mathbf{D} \in \mathbb{R}^{5 \times \frac{C}{2}}$ convert the indexes to embedding vectors as + +$$ +\mathbf {S P E} _ {1} = \mathbf {D} (\mathbf {g}). \tag {9} +$$ + +Nevertheless, in the dynamic part, i.e., part with relative movements ( $g_{1}, g_{2}$ in the figure), the trajectories of joints are not relevant. Simply assigning the same embedding vector to these joints ignores the motion patterns in the dynamic part. Hence, we propose to exploit a spatio-temporal convolution around the neighboring joints to capture the local structure. Formally, given the values $\mathbf{V} \in \mathbb{R}^{T \times N \times \frac{C}{2}}$ in + +Algorithm 1 Pseudo-code of STC with SPE (PyTorch-like) +```python +x: input tensor of shape (B, T, N, C) +# p: part index (B, T, N) in [0, 4] +# MSA: axis-specific multi-head self-attention +self.Linear = nn.Linear(C, 3C) +self_embedding1 = nn.Embedding(5, C//2) +# the channel-last convolution +self_embedding2 = nn.Conv2d(C//2, C//2, k=3, g=C//2) +def STC(x, p): + Q, K, V = self.linear(x).chunk(3, dim=3) + Q_t, Q_s = Q.chunk(2, dim=3) + K_t, K_s = K.chunk(2, dim=3) + V_t, V_s = V.chunk(2, dim=3) + H_t = MSA(Q_t, K_t, V_t, dim=1) + H_s = MSA(Q_s, K_s, V_s, dim=2) + H_t += self_embedding1(p) + self_embedding2(V_t) + H_s += self_embedding1(p) + self_embedding2(V_s) + H = torch.cat(H_t, H_s, dim=3) +return H +``` + +STC block, we treat $\mathbf{V}$ as 2D (i.e., space and time) feature map, and utilize 2D convolution on the neighboring joints: + +$$ +\mathbf {S P E} _ {2} (\mathbf {V}) = \operatorname {c o n v 2 d} (\mathbf {V}), \tag {10} +$$ + +where $conv2d$ is a $3 \times 3$ convolution operation. Although the two SPE functions are designed respectively for static part and dynamic part, we utilize the two functions concurrently on all joints leaving out the requirement of static/dynamic judgment. The duet of two SPE functions is able to deal with the parts with various moving patterns. + +By injecting the proposed SPE function into STC, the equation of STC is reformulated as + +$$ +\begin{array}{l} \mathbf {H} _ {\mathbf {T}} = M S A _ {T} \left(\mathbf {Q} _ {\mathbf {T}}, \mathbf {K} _ {\mathbf {T}}, \mathbf {V} _ {\mathbf {T}}\right) + \mathbf {S P E} _ {1} + \mathbf {S P E} _ {2} (\mathbf {V} _ {\mathbf {T}}), \\ \mathbf {H} _ {\mathbf {S}} = M S A _ {S} \left(\mathbf {Q} _ {\mathbf {S}}, \mathbf {K} _ {\mathbf {S}}, \mathbf {V} _ {\mathbf {S}}\right) + \mathbf {S P E} _ {1} + \mathbf {S P E} _ {2} (\mathbf {V} _ {\mathbf {S}}), \tag {11} \\ \mathbf {H} = c a t \left(\mathbf {H} _ {\mathbf {T}}, \mathbf {H} _ {\mathbf {S}}\right). \\ \end{array} +$$ + +Implementation. The proposed STC plus SPE in Eq.(11) can be readily implemented with a few lines of codes in Python. We detail an example of the codes in Algorithm 1 based on PyTorch platform. Here, we execute the pre-defined MSA and MLP function in the standard transformer. The SPE is implemented by constructing the default Embedding layer and Conv2d layer. + +# 4. Experiments + +We comprehensively evaluate the proposed STCFoer architecture on two large-scale datasets, i.e., Human3.6M [18] and MPI-INF-3DHP [32]. + +# 4.1. Datasets and Evaluation Metrics + +Human3.6M is currently the most popular benchmark for indoor 3D human pose estimation, which contains 11 + +subjects performing 15 typical actions, leading to 3.6 million video frames in total. Following the standard protocol, we use subjects 1, 5, 6, 7, and 8 for training, and subjects 9 and 11 for evaluation. The Mean Per Joint Position Error (MPJPE) is used to measure the error under two protocols: Protocol 1 (referred to as P1) computes MPJPE between the estimated pose and the ground truth after aligning their root joints (hip); Protocol 2 (referred to as P2) calculates Procrustes-MPJPE, where the ground truth and the pose prediction are further aligned through a rigid transformation. We also compute the MPJPE distribution of pose to evaluate the overall precision of the reconstructed skeletons. MPI-INF-3DHP is a recently proposed large-scale dataset, which consists of three scenes, i.e., green screen, non-green screen, and outdoor. By using 14 cameras, the dataset records 8 actors performing 8 activities for the training set and 7 activities for evaluation. Following the previous works [6, 39, 54], we adopt the MPJPE (P1), percentage of correct keypoints (PCK) with $150\mathrm{mm}$ , and area under the curve (AUC) results as the evaluation metrics. + +# 4.2. Implementation Details + +Our model is implemented with PyTorch toolkit and runs on a server with one GTX 2080Ti GPU. In the experiments, two kinds of input 2D pose sequences are utilized including the pre-estimated 2D pose by the pre-trained CPN [7] and the real 2D pose (ground truth). For model training, we set each mini-batch as 128 sequences. The network parameters are optimized for 20 epochs by Adam optimizer with basic learning rate of 0.001 and decayed by 0.96 after each epoch. We consider the repeat time $L$ of modules, the hidden embedding channel $C$ , and the number of head $H$ in attention block as free parameters that we tailor to the scale of network. The performances of the standard version STCFormaler with $\{L = 6, C = 256, H = 8\}$ and the large version STCFormaler-L with $\{L = 6, C = 512, H = 8\}$ are both reported. + +# 4.3. Performance Comparison on Human3.6M + +We compare with several state-of-the-art techniques on Human3.6M dataset. Table 1 summarizes the performance comparisons in terms of P1 and P2 errors taking the pre-estimated 2D poses (CPN) as input, and the number of sampled frames T per video is also given for each method. In general, the longer input sequence leads to the lower regression error. Overall, STCFormer-L with $T = 243$ input frames achieves the new state-of-the-art performances with P1 error of $40.5\mathrm{mm}$ and P2 error of $31.8\mathrm{mm}$ . Benefiting from the proposed STC attention module, STCFormer-L outperforms StridedFormer [22], PATA [48] and MixSTE [52] with $T = 243$ frames, which are also based on transformer architecture, by the P1 error drop of $3.2\mathrm{mm}$ , $2.6\mathrm{mm}$ and $0.4\mathrm{mm}$ , respectively. Comparing to the best competitor + +Table 1. Performance comparisons in terms of P1 error (mm) and P2 error (mm) with the state-of-the-art methods on Human3.6M dataset. The 2D pose input is estimated by CPN [7]. The best result and runner-up result in each column are marked in red and blue, respectively. \* denotes the post-processing module proposed in [4]. $T$ is the number of sampled frames from each video. + +
P1PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2041.844.841.144.947.454.143.442.256.263.645.343.545.331.332.245.1
UGCN [46] (T=96)*ECCV'2040.242.542.641.146.756.741.442.356.260.446.342.246.231.731.044.5
PoseFormer [54] (T=81)ICCV'2141.544.839.842.546.551.642.142.053.360.745.543.346.131.832.244.3
Shan et al. [40] (T=243)ACM MM'2140.844.541.442.746.355.641.841.953.760.845.041.544.830.831.944.3
Anatomy3D [6] (T=243)TCVST'2141.443.540.142.946.651.941.742.353.960.245.441.746.031.532.744.1
Einfalt et al. [9] (T=351)*arXiv'2239.643.840.242.446.553.942.342.555.762.345.143.044.730.130.844.2
StridedFormer [22] (T=243)*TMM'2240.343.340.242.345.652.341.840.555.960.644.243.044.230.030.243.7
CrossFormer [13] (T=81)arXiv'2240.744.140.841.545.852.841.240.855.361.944.941.844.629.231.143.7
PATA [48] (T=243)TIP'2239.942.740.342.345.052.840.439.356.961.244.141.342.828.429.343.1
MHFormer [23] (T=351)CVPR'2239.243.140.140.944.951.240.641.353.560.343.741.143.829.830.643.0
P-STMO [39] (T=243)ECCV'2238.942.740.441.145.649.740.939.955.559.444.942.242.729.429.442.8
MixSTE [52] (T=81)CVPR'2239.843.038.640.143.450.640.641.452.256.743.840.843.929.430.342.4
MixSTE [52] (T=243)CVPR'2237.640.937.339.742.349.940.139.851.755.042.139.841.027.927.940.9
STCFformer (T=81)40.643.038.340.243.552.640.340.151.857.742.839.842.328.029.542.0
STCFformer (T=243)39.641.637.438.843.151.139.139.751.457.441.838.540.727.128.641.0
STCFformer-L (T=243)38.441.236.838.042.750.538.738.252.556.841.838.440.226.227.740.5
P2PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2032.335.233.335.835.941.533.232.744.650.937.032.437.025.227.235.6
UGCN [46] (T=96)*ECCV'2031.834.335.433.535.441.731.131.644.449.036.432.235.024.923.034.5
PoseFormer [54] (T=81)ICCV'2134.136.134.437.236.442.234.433.645.052.537.433.837.825.627.336.5
Shan et al. [40] (T=243)ACM MM'2132.536.233.235.335.642.132.631.942.647.936.632.134.824.225.835.0
Anatomy3D [6] (T=243)TCSVT'2132.635.132.835.436.340.432.432.342.749.036.832.436.024.926.535.0
Einfalt et al. [9] (T=351)*arXiv'2232.736.133.436.036.142.033.333.145.450.737.034.135.924.425.435.7
StridedFormer [22] (T=243)*TMM'2232.735.532.535.435.941.633.031.945.150.136.333.535.123.925.035.2
MHFormer [23] (T=351)CVPR'2231.534.932.833.635.339.632.032.243.548.736.432.634.323.925.134.4
P-STMO [39] (T=243)ECCV'2231.335.232.933.935.439.332.531.544.648.236.332.934.423.823.934.4
CrossFormer [13] (T=81)arXiv'2231.434.632.633.734.339.731.631.044.349.335.931.334.423.425.534.3
PATA [48] (T=243)TIP'2231.234.131.933.833.939.531.630.045.448.135.031.133.522.423.633.7
MixSTE [52] (T=81)CVPR'2232.034.231.733.734.439.232.031.842.946.935.532.034.423.625.233.9
MixSTE [52] (T=243)CVPR'2230.833.130.331.833.139.131.130.542.544.534.030.832.722.122.932.6
STCFformer (T=81)30.433.831.131.733.539.530.830.041.845.834.330.132.821.923.432.7
STCFformer (T=243)29.533.230.631.033.038.030.429.441.845.233.629.531.621.322.632.0
STCFformer-L (T=243)29.333.030.730.632.738.229.728.842.245.033.329.431.520.922.331.8
+ +Table 2. Performance comparisons in terms of P1 error (mm) with the state-of-the-art methods on Human3.6M dataset. The models take the ground-truth 2D pose as input. The best result and runner-up result in each column are marked in red and blue, respectively. “*” denotes the post-processing module proposed in [4]. $T$ is the number of sampled frames from each video. + +
P1PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2034.537.133.634.232.937.139.635.840.741.433.033.833.026.626.934.7
PoseFormer [54] (T=81)ICCV'2130.033.629.931.030.233.334.831.437.838.631.731.529.023.323.131.3
Shan et al. [40] (T=243)ACM MM'2129.530.828.829.130.735.231.727.834.536.030.329.428.924.124.730.1
MHFormer [23] (T=351)CVPR'2227.732.129.128.930.033.933.031.237.039.330.031.029.422.223.030.5
P-STMO [39] (T=243)ECCV'2228.530.128.627.929.833.231.327.836.037.429.729.528.121.021.029.3
StridedFormer [22] (T=243) *TMM'2227.129.426.527.128.633.030.726.838.234.729.129.826.819.119.828.5
CrossFormer [13] (T=81)arXiv'2226.030.026.826.228.031.030.429.635.437.128.427.326.720.519.928.3
PATA [48] (T=243)TIP'2225.825.223.323.524.027.427.924.429.330.124.924.123.318.619.724.7
MixSTE [52] (T=81)CVPR'2225.627.824.525.724.929.928.627.429.929.026.125.025.218.719.925.9
MixSTE [52] (T=243)CVPR'2221.622.020.421.020.824.324.721.926.924.921.221.520.814.715.721.6
STCFemale (T=81)26.226.523.424.625.028.628.324.630.933.725.725.324.618.619.725.7
STCFemale (T=81) *25.925.922.724.024.627.527.623.130.131.525.124.723.818.419.625.0
STCFemale (T=243)21.422.621.021.323.826.024.220.028.928.022.321.420.114.215.022.0
STCFemale (T=243) *20.821.820.020.623.425.023.619.327.826.121.620.619.514.315.121.3
+ +MixSTE [52], our STCFormer consistently obtains better precision across different numbers of input frames, and only demands around half of the parameters (18.9M v.s. 33.6M). The results verify the advantages of STC attention as an economic and effective way to decompose the full spatiotemporal attention. More importantly, the series of STC-Former reaches to-date the best reported performances in 10 out of 15 categories. + +Table 2 further details the comparisons between STC-Former and the state-of-the-art models with the groundtruth 2D pose as input. This setting excludes the noise from 2D pose estimation, and measures the upper bound of 2D-to-3D lifting models. Accordingly, the P1 errors are obvi + +ously decreased across different methods by replacing the CPN-estimated 2D pose with the ground-truth 2D pose, but the performance trends are still similar. STCFoermer with post-processing attains the best P1 error of $21.3\mathrm{mm}$ , which is $0.3\mathrm{mm}$ lower than the best competitor MixSTE, validating the impact of STCFoermer with different types of input. + +In addition to the mean error, we also compare the error distribution of STCFormer and baseline methods in Figure 4. In this experiment, the methods take the estimated 2D poses by CPN of 27 frames as input. Compared to the recent transformer-based approaches including Strided-Former [22], P-STMO [39], and MHFormer [23], our STC-Former leads to the highest number of samples with error + +![](images/6da9641e3ca76d3e20113352b13c63f274d9ec12df88c637d044af5559dfd990.jpg) +Figure 4. Error distribution of the estimated 3D poses on Human3.6M. The horizontal axis represents the error interval, and the vertical axis is the proportion of poses with error in the interval. + +less than $35\mathrm{mm}$ , and the lowest number of those with error larger than $45\mathrm{mm}$ . This again confirm the advances of STCFoermer for not only obtaining the lowest average error but also better distribution across different ranges of error. + +# 4.4. Performance Comparison on MPI-INF-3DHP + +To verify the generalization of 3D pose estimation models, we then test the performance on MPI-INF-3DHP dataset, which contains more complex backgrounds. Following previous works [23, 39, 52], the ground-truth 2D poses are taken as input. In view of the shorter video sequence, we set the number of input frames as 9, 27 or 81. Table 3 lists the performance comparisons. Similar to the observations on Human3.6M, our STCFformer with $T = 81$ reaches the to-date best reported performance with PCK of $98.7\%$ , AUC of $83.9\%$ and P1 error of $23.1\mathrm{mm}$ , outperforming the current state-of-the-art models with a large margin of $0.8\%$ in PCK, $8.1\%$ in AUC and $9.1\mathrm{mm}$ in P1 error. In particular, STCFformer shows better generalization ability and surpasses MixSTE [52] by a much larger P1 error drop (31.8mm) against $0.3\mathrm{mm}$ on Human3.6M. This highlights the efficacy of our method on the more complicated data. + +# 4.5. Ablation Study + +For a more in-depth analysis of our STCFormer, we further conduct a series of ablation studies on Human3.6M dataset using the CPN-estimated 2D poses as input. + +The first group of experiments is to verify how well our STCFoer works with different number of input frames. Table 4 shows the detailed comparisons in terms of P1 error. A general performance tendency is observed that increasing $T$ leads to monotonic performance improvement. Among the competitive methods, our STCFoer constantly exhibits the best results across 27-frame, 81-frame and 243-frame settings. The leading performances demonstrate the ability of STCFoer to deal with different length of video sequence. More remarkably, STCFoer-L has $43.7\%$ fewer parameters and spends $43.6\%$ fewer FLOPs than the runner-up MixSTE. + +The second ablation study assesses the performance impact of different design components. In this experiment, the models take the estimated 2D poses by CPN of 27 frames + +Table 3. Performance comparisons in terms of PCK, AUC and P1 with the state-of-the-art methods on MPI-INF-3DHP dataset. Here, the higher PCK, the higher AUC and the lower P1 indicate the better regressions. The best result in each column is marked in red. $T$ is the number of sampled frames from each video. + +
MethodPublicationPCK ↑AUC ↑P1(mm) ↓
UGCN [46](T=96)ECCV'2086.962.168.1
Anatomy3D [6] (T=81)TCSVT'2187.853.879.1
PoseFormer [54] (T=9)ICCV'2188.656.477.1
Hu et al. [16] (T=96)ACM MM'2197.969.542.5
CrossFormer [13] (T=9)arXiv'2289.157.576.3
PATA [48] (T=243)TIP'2290.357.869.4
MHFormer [23] (T=9)CVPR'2293.863.358.0
MixSTE [52] (T=27)CVPR'2294.466.554.9
Einfalt et al. [9] (T=81)arXiv'2295.467.646.9
P-STMO [39] (T=81)ECCV'2297.975.832.2
STCFormaler (T=9)98.281.528.2
STCFormaler (T=27)98.483.424.2
STCFormaler (T=81)98.783.923.1
+ +Table 4. The P1 error comparisons with different number of sampled frame $(T)$ on Human3.6M dataset. The best result in each column is marked in red. + +
MethodFrames TParametersFLOPs (M)P1(mm)
StridedFormer [22]274.01M16346.9
P-STMO [39]274.6M16446.1
MHFormer [23]2718.92M100045.9
MixSTE [52]2733.61M1540245.1
STCFourner274.75M217344.1
StridedFormer [22]814.06M39245.4
P-STMO [39]815.4M49344.1
MHFormer [23]8119.67M156144.5
MixSTE [52]8133.61M4620842.7
STCFourner814.75M652042.0
StridedFormer [22]2434.23M137244.0
P-STMO [39]2436.7M173742.8
MHFormer [23]24324.72M481243.2
MixSTE [52]24333.61M13862340.9
STCFourner2434.75M1956141.0
STCFourner-L24318.91M7810740.5
+ +as input. Spatial Attention and Temporal Attention solely exploit the spatial pathway and temporal pathway, respectively. STC only contains both pathways but without the positional embedding. $\mathbf{SPE}_1$ , $\mathbf{SPE}_2$ and $\mathbf{SPE}$ represent the two SPE positional embeddings and their combination, respectively. Table 5 details the contribution of each component towards the overall performance. STC only by considering both spatial and temporal correlations leads to the error drop over solely utilizing spatial attention and temporal attention by $218.5\mathrm{mm}$ and $10.6\mathrm{mm}$ , respectively. The result indicates the importance of modeling the correlations along two axes in parallel. The three positional embedding strategies, i.e., $\mathbf{SPE}_1$ , $\mathbf{SPE}_2$ and $\mathbf{SPE}$ , further contribute $0.6\mathrm{mm}$ , $12.1\mathrm{mm}$ and $12.9\mathrm{mm}$ of error drop, respectively, proving the advances of involving the structure information. + +In addition to the proposed $\mathrm{SPE}_1$ , we explore three other positional embedding functions, i.e., Absolute Positional Embedding (APE), Centrality Positional Embedding + +Table 5. Performance contribution of each component in the proposed STCFormer on Human3.6M dataset. + +
STCSPE1SPE2P1 (mm)
Spatial Attention#1275.5
Temporal Attention#267.6
STC only#357.0
+SPE1#456.4
+SPE2#544.9
+SPE#644.1
+ +(CPE), and Symmetric Positional Embedding (SyPE). We refer the readers to read the supplementary materials for more details. In Table 6, we assess the performance impact of different positional embedding functions. In this experiment, the models take the estimated 2D poses by CPN of 9 frames as input. And the comparisons empirically show the superiority of the used $\mathrm{SPE}_1$ (48.3mm vs. 48.7mm, 49.9mm, and 49.2mm). + +Table 6. The P1 error comparisons with different positional embedding functions on Human3.6M dataset. The "Baseline" denotes the STCFoermer without $\mathrm{SPE}_1$ . The best result in each column is marked in red. + +
P1 (mm)
Baseline#148.7
+SPE1#248.3
+APE#348.9
+CPE#449.6
+SyPE#549.2
+ +# 4.6. Qualitative Analysis + +In this section, we validate our STCFoermer through attention visualization and 3D human pose estimation visualization. The examples are randomly selected from the evaluation set of Human3.6M. + +Attention visualization. We visualize the spatial attention map and temporal attention map from the last STC block of STCFormaler in Figure 5. As expected, the spatial attention map (Figure 5(a)) shows that our model learns different patterns between joints from the videos of different actions. Moreover, the temporal attention map in Figure 5(b) illustrates strong correlation across adjacent frames owing to the continuity of human actions. + +Result visualization on Human3.6M. Figure 6 showcases 3D human pose estimation results by STCFormaler and the recent transformer-based approaches including Strided-Former [22], MHFormer [23] and P-STMO [39]. The three examples are randomly selected from the walking, posing and sitting actions in Human3.6M dataset. For each method, we draw the estimated 3D human pose and the ground-truth 3D coordinates in one figure, and calculate the average error. Overall, our STCFormaler shows better reconstruction results across all three samples than the other three methods. Particularly, for the challenging action with complicated pose articulation like "sitting" (the third row), STC + +![](images/f4d7222c2cb3396df890ce6c3df23e8f1495e8fcaa74ec225a68a90150916539.jpg) +Figure 5. Visualizations of attention maps from the spatial and temporal attention modules in STCFormer. The x-axis and y-axis correspond to the queries and the predicted outputs, respectively. + +![](images/172d75ea0c43e4f480276dfa409191e3d6e591614cc92ad58b8def51ea5e8a16.jpg) + +![](images/02eecdb6fe905f7c695b7edd35b58239fff5340110c2acebbde9b879fe152920.jpg) +Figure 6. Examples of 3D pose estimation by StridedFormer [22], MHFormer [23], P-STMO [39] and our STCFormer. The gray skeleton is the ground-truth 3D pose. Blue, orange and green skeletons represent the left part, right part and torso of the estimated human body, respectively. + +Former still estimates the 3D coordinates accurately and reconstructs the structurally plausible 3D pose. + +# 5. Conclusion + +We have presented Spatio-Temporal Criss-cross Transformer (STCFormer), which explores spatial correlation and temporal correlation in a two-pathway manner for 3D human pose estimation in videos. Particularly, STCFormer is built by stacking several STC blocks, each of which separates the joint features into two groups along the channel dimension, and models the spatial and temporal interactions on each group, respectively. By doing so, the receptive field of STC block is like a criss cross of spatial and temporal axes. Moreover, the STCFormer exploits the dynamic chain structure of human body to model local context, resulting in a new positional embedding function. The experiments conducted on two benchmarks demonstrate the effectiveness of STCFormer and good generalization ability compared to the state-of-the-art techniques. + +# References + +[1] Ankur Agarwal and Bill Triggs. Recovering 3d human pose from monocular images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 28(1):44-58, 2005. 2 +[2] Mykhaylo Andriluka, Stefan Roth, and Bernt Schiele. Pictorial structures revisited: People detection and articulated pose estimation. In CVPR, 2009. 2 +[3] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 3 +[4] Yujun Cai, Liuhao Ge, Jun Liu, Jianfei Cai, Tat-Jen Cham, Junsong Yuan, and Nadia Magnenat Thalmann. Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In ICCV, 2019. 1, 2, 6 +[5] Ching-Hang Chen and Deva Ramanan. 3d human pose estimation= 2d pose estimation+ matching. In CVPR, 2017. 2 +[6] Tianlang Chen, Chen Fang, Xiaohui Shen, Yiheng Zhu, Zhili Chen, and Jiebo Luo. Anatomy-aware 3d human pose estimation with bone-based pose decomposition. IEEE Transactions on Circuits and Systems for Video Technology, 32(1):198-209, 2021. 1, 2, 5, 6, 7 +[7] Yilun Chen, Zhicheng Wang, Yuxiang Peng, Zhiqiang Zhang, Gang Yu, and Jian Sun. Cascaded pyramid network for multi-person pose estimation. In CVPR, 2018. 1, 2, 5, 6 +[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2021. 1 +[9] Moritz Einfalt, Katja Ludwig, and Rainer Lienhart. Uplift and upsample: Efficient 3d human pose estimation with up-lifting transformers. arXiv preprint arXiv:2210.06110, 2022. 6, 7 +[10] Hao-Shu Fang, Yuanlu Xu, Wenguan Wang, Xiaobai Liu, and Song-Chun Zhu. Learning pose grammar to encode human body configuration for 3d pose estimation. In AAAI, 2018. 2 +[11] Nate Hagbi, Oriel Bergig, Jihad El-Sana, and Mark Billinghurst. Shape recognition and pose estimation for mobile augmented reality. IEEE Transactions on Visualization and Computer Graphics, 17(10):1369-1379, 2010. 1 +[12] Yanbin Hao, Hao Zhang, Chong-Wah Ngo, and Xiangnan He. Group contextualization for video recognition. arXiv preprint arXiv:2203.09694, 2022. 4 +[13] Mohammed Hassanin, Abdelwahed Khamiss, Mohammed Bennamoun, Farid Boussaid, and Ibrahim Radwan. Crossformer: Cross spatio-temporal transformer for 3d human pose estimation. arXiv preprint arXiv:2203.13387, 2022. 1, 2, 6, 7 +[14] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 3 +[15] Mir Rayat Imtiaz Hossain and James J Little. Exploiting temporal information for 3d human pose estimation. In ECCV, 2018. 1 + +[16] Wenbo Hu, Changgong Zhang, Fangneng Zhan, Lei Zhang, and Tien-Tsin Wong. Conditional directed graph convolution for 3d human pose estimation. In ACM MM, 2021. 2, 7 +[17] Catalin Ionescu, Fuxin Li, and Cristian Sminchisescu. Latent structured models for human pose estimation. In ICCV, 2011. 2 +[18] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, 2013. 5 +[19] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In CVPR, 2018. 2 +[20] Branislav Kisacanin, Vladimir Pavlovic, and Thomas S Huang. Real-time vision for human-computer interaction. Springer Science & Business Media, 2005. 1 +[21] Kyoungoh Lee, Inwooong Lee, and Sanghoon Lee. Propagating lstm: 3d pose estimation based on joint interdependency. In ECCV, 2018. 1 +[22] Wenhao Li, Hong Liu, Runwei Ding, Mengyuan Liu, Pichao Wang, and Wenming Yang. Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia, 2022. 1, 2, 3, 5, 6, 7, 8 +[23] Wenhao Li, Hong Liu, Hao Tang, Pichao Wang, and Luc Van Gool. Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In CVPR, 2022. 1, 2, 6, 7, 8 +[24] Yehao Li, Ting Yao, Yingwei Pan, and Tao Mei. Contextual transformer networks for visual recognition. IEEE Trans. on PAMI, 2022. 1 +[25] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In CVPR, 2021. 1 +[26] Ruixu Liu, Ju Shen, He Wang, Chen Chen, Sen-ching Cheung, and Vijayan Asari. Attention mechanism exploits temporal contexts: Real-time 3d human pose reconstruction. In CVPR, 2020. 1, 2, 6 +[27] Zhenguang Liu, Pengxiang Su, Shuang Wu, Xuanjing Shen, Haipeng Chen, Yanbin Hao, and Meng Wang. Motion prediction using trajectory cues. In ICCV, 2021. 1 +[28] Zhenguang Liu, Shuang Wu, Shuyuan Jin, Qi Liu, Shouling Ji, Shijian Lu, and Li Cheng. Investigating pose representations and motion contexts modeling for 3d motion prediction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1 +[29] Fuchen Long, Zhaofan Qiu, Yingwei Pan, Ting Yao, Jiebo Luo, and Tao Mei. Stand-alone inter-frame attention in video models. In CVPR, 2022. 2 +[30] Fuchen Long, Zhaofan Qiu, Yingwei Pan, Ting Yao, Chong-Wah Ngo, and Tao Mei. Dynamic temporal filtering in video models. In ECCV, 2022. 2 +[31] Julieta Martinez, Rayat Hossain, Javier Romero, and James J Little. A simple yet effective baseline for 3d human pose estimation. In ICCV, 2017. 1, 2 +[32] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 3DV, 2017. 5 + +[33] Alejandro Newell, Kaiyu Yang, and Jia Deng. Stacked hourglass networks for human pose estimation. In ECCV, 2016. 2 +[34] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3d human pose. In CVPR, 2017. 2 +[35] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In CVPR, 2019. 1, 2 +[36] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In ICCV, 2017. 4 +[37] Zhaofan Qiu, Ting Yao, Chong-Wah Ngo, and Tao Mei. Mlp-3d: A mlp-like 3d architecture with grouped time mixing. In CVPR, 2022. 2 +[38] Zhaofan Qiu, Ting Yao, Chong-Wah Ngo, Xinmei Tian, and Tao Mei. Learning spatio-temporal representation with local and global diffusion. In CVPR, 2019. 4 +[39] Wenkang Shan, Zhenhua Liu, Xinfeng Zhang, Shanshe Wang, Siwei Ma, and Wen Gao. P-stmo: Pre-trained spatial temporal many-to-one model for 3d human pose estimation. arXiv preprint arXiv:2203.07628, 2022. 5, 6, 7, 8 +[40] Wenkang Shan, Haopeng Lu, Shanshe Wang, Xinfeng Zhang, and Wen Gao. Improving robustness and accuracy via relative information encoding in 3d human pose estimation. In ACM MM, 2021. 6 +[41] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In CVPR, 2019. 1, 2 +[42] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, 2018. 2 +[43] Mikael Svenstrup, Soren Tranberg, Hans Jorgen Andersen, and Thomas Bak. Pose estimation and adaptive robot behaviour for human-robot interaction. In ICRA, 2009. 1 +[44] Zhenhua Tang, Jia Li, Yanbin Hao, and Richang Hong. Mlp-jcg: Multi-layer perceptron with joint-coordinate gating for efficient 3d human pose estimation. IEEE Transactions on Multimedia, 2023. +[45] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NIPS, 2017. 1, 3 +[46] Jingbo Wang, Sijie Yan, Yuanjun Xiong, and Dahua Lin. Motion guided 3d pose estimation from videos. In ECCV, 2020. 2, 6, 7 +[47] Tianhan Xu and Wataru Takano. Graph stacked hourglass networks for 3d human pose estimation. In CVPR, 2021. 1 +[48] Youze Xue, Jiansheng Chen, Xiangming Gu, Huimin Ma, and Hongbing Ma. Boosting monocular 3d human pose estimation with part aware attention. IEEE Transactions on Image Processing, 31, 2022. 1, 2, 3, 5, 6, 7 +[49] Ting Yao, Yingwei Pan, Yehao Li, Chong-Wah Ngo, and Tao Mei. Wave-vit: Unifying wavelet and transformers for visual representation learning. In ECCV, 2022. 1 +[50] Hao Zhang, Lechao Cheng, Yanbin Hao, and Chong-wah Ngo. Long-term leap attention, short-term periodic shift for video classification. In ACM MM, 2022. 2 + +[51] Hao Zhang, Yanbin Hao, and Chong-Wah Ngo. Token shift transformer for video classification. In ACM MM, 2021. 2 +[52] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Junsong Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. arXiv preprint arXiv:2203.00859, 2022. 1, 2, 3, 5, 6, 7 +[53] Long Zhao, Xi Peng, Yu Tian, Mubbasir Kapadia, and Dimitris N Metaxas. Semantic graph convolutional networks for 3d human pose regression. In CVPR, 2019. 2 +[54] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In ICCV, 2021. 1, 2, 5, 6, 7 \ No newline at end of file diff --git a/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/images.zip b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..59ded211631e3cd3a453bc940a1d744308b8f26c --- /dev/null +++ b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a46a3e63a82ad301c8de838496fa9c96075627c2295780f3fb3941ef6a8fe3f0 +size 806640 diff --git a/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/layout.json b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1e683ee98aaa8f9de8d2201f8d3d7420ae87d2e7 --- /dev/null +++ b/2023/3D Human Pose Estimation With Spatio-Temporal Criss-Cross Attention/layout.json @@ -0,0 +1,8344 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 75, + 103, + 523, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 103, + 523, + 121 + ], + "spans": [ + { + "bbox": [ + 75, + 103, + 523, + 121 + ], + "type": "text", + "content": "3D Human Pose Estimation with Spatio-Temporal Criss-cross Attention*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 127, + 143, + 465, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 143, + 465, + 185 + ], + "spans": [ + { + "bbox": [ + 127, + 143, + 465, + 185 + ], + "type": "text", + "content": "Zhenhua Tang, Zhaofan Qiu, Yanbin Hao, Richang Hong, Ting Yao \nHefei University of Technology, Anhui, China HiDream.ai Inc \nUniversity of Science and Technology of China, Anhui, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 120, + 188, + 473, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 188, + 473, + 200 + ], + "spans": [ + { + "bbox": [ + 120, + 188, + 473, + 200 + ], + "type": "text", + "content": "zhenhuat@foxmail.com, zhaofanqiu@gmail.com, haoyanbin@hotmail.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 173, + 201, + 419, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 201, + 419, + 213 + ], + "spans": [ + { + "bbox": [ + 173, + 201, + 419, + 213 + ], + "type": "text", + "content": "hongrc.hfut@gmail.com, tingyao.ustc@gmail.com" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 241, + 192, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 241, + 192, + 253 + ], + "spans": [ + { + "bbox": [ + 143, + 241, + 192, + 253 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 266, + 290, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 290, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 290, + 613 + ], + "type": "text", + "content": "Recent transformer-based solutions have shown great success in 3D human pose estimation. Nevertheless, to calculate the joint-to-joint affinity matrix, the computational cost has a quadratic growth with the increasing number of joints. Such drawback becomes even worse especially for pose estimation in a video sequence, which necessitates spatio-temporal correlation spanning over the entire video. In this paper, we facilitate the issue by decomposing correlation learning into space and time, and present a novel Spatio-Temporal Criss-cross attention (STC) block. Technically, STC first slices its input feature into two partitions evenly along the channel dimension, followed by performing spatial and temporal attention respectively on each partition. STC then models the interactions between joints in an identical frame and joints in an identical trajectory simultaneously by concatenating the outputs from attention layers. On this basis, we devise STCFoer by stacking multiple STC blocks and further integrate a new Structure-enhanced Positional Embedding (SPE) into STCFoer to take the structure of human body into consideration. The embedding function consists of two components: spatio-temporal convolution around neighboring joints to capture local structure, and part-aware embedding to indicate which part each joint belongs to. Extensive experiments are conducted on Human3.6M and MPI-INF-3DHP benchmarks, and superior results are reported when comparing to the state-of-the-art approaches. More remarkably, STCFoer achieves to-date the best published performance: " + }, + { + "bbox": [ + 46, + 266, + 290, + 613 + ], + "type": "inline_equation", + "content": "40.5\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 266, + 290, + 613 + ], + "type": "text", + "content": " P1 error on the challenging Human3.6M dataset." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 638, + 128, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 638, + 128, + 651 + ], + "spans": [ + { + "bbox": [ + 47, + 638, + 128, + 651 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 659, + 287, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 659, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 47, + 659, + 287, + 684 + ], + "type": "text", + "content": "3D human pose estimation has attracted intensive research attention in CV community due to its great poten" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 241, + 545, + 371 + ], + "blocks": [ + { + "bbox": [ + 307, + 241, + 545, + 371 + ], + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 371 + ], + "type": "image", + "image_path": "4f0d200e406775b9281a28760a9bbd17664497eecc4975874d69b3601a198ab0.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 376, + 547, + 465 + ], + "lines": [ + { + "bbox": [ + 304, + 376, + 547, + 465 + ], + "spans": [ + { + "bbox": [ + 304, + 376, + 547, + 465 + ], + "type": "text", + "content": "Figure 1. Modeling spatio-temporal correlation for 3D human pose estimation by (a) utilizing spatio-temporal attention on all joints in the entire video, (b) separating the framework into two steps that respectively capture spatial and temporal context, and (c) our Spatio-Temporal Criss-cross attention (STC), i.e., a two-pathway block that models spatial and temporal information in parallel. In the visualization of receptive field, the covered joints of each attention strategy is marked as red nodes." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 473, + 545, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 545, + 569 + ], + "type": "text", + "content": "tial in numerous applications such as human-robot interaction [20, 43], virtual reality [11] and motion prediction [27, 28]. The typical monocular solution is a two-stage pipeline, which first extracts 2D keypoints by 2D human pose detectors (e.g., [7] and [41]), and then lifts 2D coordinates into 3D space [31]. Despite its simplicity, the second stage is an ill-posed problem which lacks the depth prior, and suffers from the ambiguity problem." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 546, + 713 + ], + "type": "text", + "content": "To mitigate this issue, several progresses propose to aggregate the temporal cues in a video sequence to promote pose estimation by grid convolutions [15,26,35], graph convolutions [4, 47] and multi-layer perceptrons [6, 21]. Recently, Transformer structure has emerged as a dominant architecture in both NLP and CV fields [8,24,45,49], and also demonstrated high capability in modeling spatio-temporal correlation for 3D human pose estimation [13, 22, 23, 25, 48, 52, 54]. Figure 1(a) illustrates a straightforward way to exploit the transformer architecture for directly learning spatio-temporal correlation between all joints in the entire video sequence. However, the computational cost of calcu" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 693, + 288, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 288, + 712 + ], + "type": "text", + "content": "*This work is supported by the National Natural Science Foundation of China under Grants 61932009." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4790" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 204 + ], + "type": "text", + "content": "lating the joint-to-joint affinity matrix in the self-attention has a quadratic growth along the increase of number of frames, making such solution unpractical for model training. As a result, most transformer structures employ a two-step alternative, as shown in Figure 1(b), which encodes spatial information for each frame first and then aggregates the feature sequence by temporal transformer. Note that we take spatial transformer as the frame encoder as an example in the figure. This strategy basically mines the correlation across frame-level features but seldom explores the relation between joints across different frames." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "spans": [ + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "text", + "content": "In this paper, we propose a novel two-pathway attention mechanism, namely Spatio-Temporal Criss-cross attention (STC), that models spatial and temporal information in parallel, as depicted in Figure 1(c). Concretely, STC first slices the input joint features into two partitions evenly with respect to the channel dimension. On each partition, a Multihead Self-Attention (MSA) is implemented to encapsulate the context along space or time axis. In between, the space pathway computes the affinity between joints in each frame independently, and the time pathway correlates the identical joint moving across different frames, i.e., the trajectory. Then, STC recombines the learnt contexts from two pathways, and mixes the information across channels by MultiLayer Perceptrons (MLP). By doing so, the receptive field is like a criss cross of spatial and temporal axes, and the computational cost is " + }, + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(T^2 S) + \\mathcal{O}(TS^2)" + }, + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "text", + "content": ". That is much lower than " + }, + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(T^2 S^2)" + }, + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "text", + "content": " of fully spatio-temporal attention, where " + }, + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 210, + 289, + 426 + ], + "type": "text", + "content": " denote the number of frames and joints, respectively." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 432, + 289, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 432, + 289, + 589 + ], + "spans": [ + { + "bbox": [ + 46, + 432, + 289, + 589 + ], + "type": "text", + "content": "By stacking multiple STC blocks, we devise a new architecture — STCFormer for 3D human pose estimation. Furthermore, we delve into the crucial design of positional embedding in STCFormer in the context of pose estimation. The observations that joints in the same body part are either highly relevant (static part) or not relevant but containing moving patterns (dynamic part) motivate us to design a new Structure-enhanced Positional Embedding (SPE). SPE consists of two embedding functions for the static and dynamic part, respectively. A part-aware embedding is to describe the static part by indicating which part each joint belongs to, and a spatio-temporal convolution around neighboring joints aims to capture dynamic structure in local window." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 289, + 715 + ], + "type": "text", + "content": "We summarize the main contributions of this work as follows. First, STC is a new type of decomposed spatiotemporal attention for 3D human pose estimation in an economic and effective way. Second, STCFormaler is a novel transformer architecture by stacking multiple STC blocks and integrating the structure-enhanced positional embedding. Extensive experiments conducted on Human3.6M and MPI-INF-3DHP datasets demonstrate that STCFormaler with much less parameters achieves superior performances than the state-of-the-art techniques." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 392, + 84 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 91, + 547, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 547, + 354 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 547, + 354 + ], + "type": "text", + "content": "Monocular 3D human pose estimation. Monocular 3D human pose estimation is to re-localize human body joints in 3D space from the input single view 2D data, i.e., image or 2D coordinates. The early works [1, 2, 17] develop various graphical or restrictive methods to explore the dependencies of human skeleton and perspective relationships across spaces. With the development of deep learning, several deep neural networks [5, 10, 19, 31, 34, 42, 44, 53] are devised for 3D human pose estimation, and can be categorized into one-stage and two-stage directions. The one-stage approaches directly regress the 3D pose from the input image, and necessitate a large number of image-pose paired data and powerful computing resources [19, 34, 42]. The two-stage methods first exploit off-the-shelf 2D pose detectors [7, 33, 41] to estimate 2D joint coordinates, and then lift the 2D coordinates into 3D space by the fully-connected network [31], grid convolutional network [5], recurrent neural network [10], or graph convolutional network [53]. Although the two-stage methods alleviate the requirement of image-pose pairs, they still heavily suffer from the depth ambiguities problem, which is intrinsically ill-posed due to the lack of depth information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 355, + 547, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 355, + 547, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 355, + 547, + 533 + ], + "type": "text", + "content": "3D pose estimation from video sequence. To overcome the limitation of depth ambiguities, the advances involve temporal context from neighboring frames to improve 3D coordinates regression. For example, Pavllo et al. [35] propose a temporal fully-convolutional network (TCN) to model the local context by convoluting the neighboring frames. Later, Liu et al. [26] extend the TCN by introducing an attention mechanism to adaptively identify the significant frames/poses over a sequence. After that, Chen et al. [6] decompose the pose estimation into bone length and bone direction prediction. Instead of the aforementioned methods based on temporal aggregation, latter works [4, 16, 46] utilize the spatio-temporal graph convolutional network to model the spatial and temporal correlations across joints simultaneously." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 534, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 548, + 715 + ], + "type": "text", + "content": "Transformer-based methods. In addition to the traditional convolutional networks, transformer architectures are also be exploited to model spatio-temporal correlation [13,22,23,29,30,37,50,51,54]. In particular, Zheng et al. [54] design a concatenation architecture of several spatial transformer encoders and temporal transformer encoders in PoseFormer. MHFormer [23] proposes to generate multiple hypothesis representations for a pose with the spatial transformer encoder and then model multi-level global correlations with different temporal transformer blocks. StridedFormer [22] and CrossFormer [13] introduce locality by integrating the 1D temporal convolution and 1D spatial convolution, respectively. More recently, the joint-wise inconsistency of motion patterns is highlighted in [48, 52], and encourages to model spatial and temporal information si" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "text", + "content": "4791" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": "multaneously. PATA [48] groups the joints with similar motion patterns and calculates the intra-part temporal correlation. Similarly, MixSTE [52] uses multiple separated spatial transformer blocks and temporal transformer blocks to model the spatial and temporal correlation iteratively." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 134, + 288, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 134, + 288, + 266 + ], + "spans": [ + { + "bbox": [ + 46, + 134, + 288, + 266 + ], + "type": "text", + "content": "Our work also falls into the category of transformer-based method for 3D human pose estimation. The aforementioned transformers mainly model spatial and temporal information respectively in different stages of the networks. In view that the joint motion is a state of coexistence of space and time, such separation may result in insufficient learning of moving patterns. In contrast, our STC block is a two-pathway design that models spatial and temporal dependencies in parallel, which are then mixed through MLP. Moreover, a new positional embedding function is deliberately devised to explore the local structure of human body." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 278, + 275, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 278, + 275, + 293 + ], + "spans": [ + { + "bbox": [ + 47, + 278, + 275, + 293 + ], + "type": "text", + "content": "3. Spatio-Temporal Criss-cross Transformer" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 299, + 197, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 299, + 197, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 299, + 197, + 312 + ], + "type": "text", + "content": "3.1. Preliminary - Transformer" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 319, + 287, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 319, + 287, + 414 + ], + "spans": [ + { + "bbox": [ + 46, + 319, + 287, + 414 + ], + "type": "text", + "content": "We begin this section by reviewing the transformer architecture [45] as the basis of our proposal. Transformer is a versatile representation learning architecture, and mainly consists of two components: Multi-head Self-Attention module (MSA) and Feed-Forward Network (FFN). MSA calculates the token-to-token affinity matrix and propagates the information across different tokens. Formally, given " + }, + { + "bbox": [ + 46, + 319, + 287, + 414 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 319, + 287, + 414 + ], + "type": "text", + "content": " input tokens with " + }, + { + "bbox": [ + 46, + 319, + 287, + 414 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 319, + 287, + 414 + ], + "type": "text", + "content": " channels, MSA is formulated as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 425, + 287, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 425, + 287, + 452 + ], + "spans": [ + { + "bbox": [ + 62, + 425, + 287, + 452 + ], + "type": "interline_equation", + "content": "M S A (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) = \\operatorname {S o f t m a x} \\left(\\frac {\\mathbf {Q} \\cdot \\mathbf {K} ^ {T}}{\\sqrt {C}}\\right) \\cdot \\mathbf {V}, \\tag {1}", + "image_path": "60dbbeb986594ff6997fa69df7015fdcdffeb5c2cc86f44db3505e09e0e4b7ff.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 463, + 287, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 463, + 287, + 535 + ], + "spans": [ + { + "bbox": [ + 46, + 463, + 287, + 535 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 463, + 287, + 535 + ], + "type": "inline_equation", + "content": "\\mathbf{Q},\\mathbf{K},\\mathbf{V}\\in \\mathbb{R}^{N\\times C}" + }, + { + "bbox": [ + 46, + 463, + 287, + 535 + ], + "type": "text", + "content": " denote the queries, keys and values obtained by linearly mapping the input tokens. Note that we omit the multi-head separation here for simplicity. FFN contains a Multi-Layer Perceptrons (MLP), i.e., a nonlinear mapping with two linear layer plus a GELU [14] activation in between. The output of MLP is computed by" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 92, + 546, + 287, + 558 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 546, + 287, + 558 + ], + "spans": [ + { + "bbox": [ + 92, + 546, + 287, + 558 + ], + "type": "interline_equation", + "content": "M L P (\\mathbf {H}) = G E L U \\left(\\mathbf {H} \\cdot \\mathbf {W} _ {\\mathbf {1}}\\right) \\cdot \\mathbf {W} _ {\\mathbf {2}}, \\tag {2}", + "image_path": "14d86983fbafd3493cf00b9f39eef1d5f9d63ffa8aa2c4742f466870d6ae66c7.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 568, + 287, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 568, + 287, + 618 + ], + "spans": [ + { + "bbox": [ + 47, + 568, + 287, + 618 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 568, + 287, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{H} \\in \\mathbb{R}^{N \\times C}" + }, + { + "bbox": [ + 47, + 568, + 287, + 618 + ], + "type": "text", + "content": " is the input tokens of MLP, " + }, + { + "bbox": [ + 47, + 568, + 287, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_1 \\in \\mathbb{R}^{C \\times \\hat{C}}" + }, + { + "bbox": [ + 47, + 568, + 287, + 618 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 568, + 287, + 618 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_2 \\in \\mathbb{R}^{\\hat{C} \\times C}" + }, + { + "bbox": [ + 47, + 568, + 287, + 618 + ], + "type": "text", + "content": " are the projection matrices. With these, each transformer block is constructed by utilizing MSA and MLP in order with shortcut connection:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 110, + 627, + 220, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 627, + 220, + 639 + ], + "spans": [ + { + "bbox": [ + 110, + 627, + 220, + 639 + ], + "type": "interline_equation", + "content": "\\mathbf {Q}, \\mathbf {K}, \\mathbf {V} = F C (L N (\\mathbf {X})) ,", + "image_path": "4b122df6f70fcf6f6f902fc9ce9576ebd453799ca631e827c7d3512d52b29021.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 112, + 641, + 287, + 653 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 641, + 287, + 653 + ], + "spans": [ + { + "bbox": [ + 112, + 641, + 287, + 653 + ], + "type": "interline_equation", + "content": "\\mathbf {Y} = M S A (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) + \\mathbf {X}, \\tag {3}", + "image_path": "ed19ab40d94ba8591e48a220d5d7e2b3392a7e4b3495433c429a87dddedbdadf.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 112, + 655, + 220, + 667 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 655, + 220, + 667 + ], + "spans": [ + { + "bbox": [ + 112, + 655, + 220, + 667 + ], + "type": "interline_equation", + "content": "\\mathbf {Z} = M L P (L N (\\mathbf {Y})) + \\mathbf {Y},", + "image_path": "9bdc6b99a316bd31918846b93c9a6fdc4d9239198ff988619483f9cc9f6debeb.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "FC" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": " is linear projection of the input tokens " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "LN" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": " denotes Layer Norm [3]. The output " + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}" + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": " serves as the input to the next block until the last one." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 313, + 71, + 405, + 232 + ], + "blocks": [ + { + "bbox": [ + 313, + 71, + 405, + 232 + ], + "lines": [ + { + "bbox": [ + 313, + 71, + 405, + 232 + ], + "spans": [ + { + "bbox": [ + 313, + 71, + 405, + 232 + ], + "type": "image", + "image_path": "f7ade926fdf0c5aedbee014565be6bfb3d06ec7afe0ac6780cc16ce8e6d2b1fb.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 233, + 364, + 239 + ], + "lines": [ + { + "bbox": [ + 359, + 233, + 364, + 239 + ], + "spans": [ + { + "bbox": [ + 359, + 233, + 364, + 239 + ], + "type": "text", + "content": "a)" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "lines": [ + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "type": "text", + "content": "Figure 2. An overview of our proposed Spatio-Temporal Crisscross Transformer (STCFormer). (a) It mainly consists of " + }, + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 305, + 239, + 545, + 316 + ], + "type": "text", + "content": " sequential STC blocks. Each block aggregates the context across tokens by spatio-temporal criss-cross attention, and non-linearly maps each token by Multi-Layer Perceptrons (MLP). (b) The architecture of our STC block and the Structure-enhanced Positional Embedding (SPE)." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 414, + 71, + 540, + 232 + ], + "blocks": [ + { + "bbox": [ + 414, + 71, + 540, + 232 + ], + "lines": [ + { + "bbox": [ + 414, + 71, + 540, + 232 + ], + "spans": [ + { + "bbox": [ + 414, + 71, + 540, + 232 + ], + "type": "image", + "image_path": "0df3cf8ca38bfb41dcf4c90200521e885db71c17c0ec70edef9fc902334e6302.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 473, + 232, + 482, + 239 + ], + "lines": [ + { + "bbox": [ + 473, + 232, + 482, + 239 + ], + "spans": [ + { + "bbox": [ + 473, + 232, + 482, + 239 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 323, + 425, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 323, + 425, + 335 + ], + "spans": [ + { + "bbox": [ + 306, + 323, + 425, + 335 + ], + "type": "text", + "content": "3.2. Overall Architecture" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 342, + 545, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 545, + 437 + ], + "type": "text", + "content": "Figure 2 depicts an overview of the proposed STC-Former, which mainly includes three stages: a joint-based embedding, stacked STC blocks and a regression head. The joint-based embedding projects the input 2D coordinates of each joint into feature space. STC blocks aggregate the spatio-temporal context, and update the representation of each joint. Based on the learnt features, the 3D coordinates are estimated by a regression head." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "text", + "content": "Joint-based embedding. Given a 2D pose sequence as " + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_{2D} \\in \\mathbb{R}^{T \\times N \\times 2}" + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "text", + "content": " denote the number of frames and the number of body joints in each frame, respectively, we first project " + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_{2D}" + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "text", + "content": " to high-dimensional embeddings by a joint-based embedding layer. This layer applies an FC layer to each 2D coordinate independently followed by a GELU activation. As such, the joint-based embedding layer produces the features with the shape of " + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "inline_equation", + "content": "T \\times N \\times C" + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "text", + "content": ". Note that in the previous transformer [22], the embedding layer projects all joint coordinates in each frame into a single vector, reducing the computational cost of the subsequent transformer blocks while losing the spatial discrimination. Ours is different in that the spatial dimension " + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 438, + 545, + 617 + ], + "type": "text", + "content": " is maintained, and the computational cost is also reduced by spatio-temporal criss-cross attention." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "STC blocks. The STC block originates from the transformer block in Eq.(3), and replaces the original MSA layer with spatio-temporal criss-cross attention. In addition, a new positional embedding function, i.e., Structure-enhanced Positional Embedding (SPE), is integrated into the STC block for better descriptive capability of local structures. Section 3.3 and Section 3.4 will elaborate STC and SPE, respectively." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4792" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 133 + ], + "type": "text", + "content": "Regression head. A liner regression head is finally established upon the STC blocks to estimate the 3D pose coordinates " + }, + { + "bbox": [ + 47, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}_{3D} \\in \\mathbb{R}^{T \\times N \\times 3}" + }, + { + "bbox": [ + 47, + 72, + 287, + 133 + ], + "type": "text", + "content": ". The whole architecture is optimized by minimizing the Mean Squared Error (MSE) between " + }, + { + "bbox": [ + 47, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}_{3D}" + }, + { + "bbox": [ + 47, + 72, + 287, + 133 + ], + "type": "text", + "content": " and the ground-truth 3D coordinates " + }, + { + "bbox": [ + 47, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_{3D}" + }, + { + "bbox": [ + 47, + 72, + 287, + 133 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 122, + 139, + 287, + 159 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 139, + 287, + 159 + ], + "spans": [ + { + "bbox": [ + 122, + 139, + 287, + 159 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\left\\| \\hat {\\mathbf {P}} _ {3 D} - \\mathbf {P} _ {3 D} \\right\\| ^ {2}. \\tag {4}", + "image_path": "2cbd21222758d33cc576177e6d1f1a55de1f3133e4bdf281a5a77059d6d08bcc.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 165, + 250, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 165, + 250, + 178 + ], + "spans": [ + { + "bbox": [ + 47, + 165, + 250, + 178 + ], + "type": "text", + "content": "3.3. Spatio-Temporal Criss-cross Attention" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 183, + 287, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 183, + 287, + 315 + ], + "spans": [ + { + "bbox": [ + 46, + 183, + 287, + 315 + ], + "type": "text", + "content": "STC aims to model the spatio-temporal dependencies between joints in an efficient way to avoid the quadratic computation cost of fully spatio-temporal attention. Inspired by the group contextualization strategy [12] which separates the channels into several paralleled groups and applies different feature contextualization operations to them respectively, we propose to capture the spatial and temporal context on different channels in parallel. Different from the axial convolution in [12,36,38], we exploit axis-specific multihead self-attention in STC for spatial or temporal context, which is more powerful for correlation learning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "text", + "content": "Concretely, the input embedding " + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^{T \\times N \\times C}" + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "text", + "content": " are firstly mapped to queries " + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} \\in \\mathbb{R}^{T \\times N \\times C}" + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "text", + "content": ", keys " + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{K} \\in \\mathbb{R}^{T \\times N \\times C}" + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "text", + "content": ", and values " + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{V} \\in \\mathbb{R}^{T \\times N \\times C}" + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "text", + "content": ", which are then evenly divided into two groups along the channel dimension. For notation clarity, we denote the divided feature matrix as time group " + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{Q}_T, \\mathbf{K}_T, \\mathbf{V}_T\\}" + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "text", + "content": " and space group " + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{Q}_S, \\mathbf{K}_S, \\mathbf{V}_S\\}" + }, + { + "bbox": [ + 47, + 315, + 287, + 411 + ], + "type": "text", + "content": ". Next, the temporal and spatial correlations are calculated in two separate self-attention modules." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 411, + 287, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 411, + 287, + 483 + ], + "spans": [ + { + "bbox": [ + 46, + 411, + 287, + 483 + ], + "type": "text", + "content": "Temporal correlation represents the relation between the joints in an identical trajectory moving across different frames. To achieve this, we implement an axis-specific MSA, named " + }, + { + "bbox": [ + 46, + 411, + 287, + 483 + ], + "type": "inline_equation", + "content": "MSA_{T}" + }, + { + "bbox": [ + 46, + 411, + 287, + 483 + ], + "type": "text", + "content": ", which computes the attention affinities in Eq.(1) between joints across the temporal dimension. Hence, the output of temporal attention is measured as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 491, + 287, + 503 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 287, + 503 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 287, + 503 + ], + "type": "interline_equation", + "content": "\\mathbf {H} _ {\\mathbf {T}} = M S A _ {T} \\left(\\mathbf {Q} _ {\\mathbf {T}}, \\mathbf {K} _ {\\mathbf {T}}, \\mathbf {V} _ {\\mathbf {T}}\\right). \\tag {5}", + "image_path": "9f7299cdd22c0f7aa529957938898f783fe49f78a71c9cf034dda5a9b385c5fe.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 510, + 287, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 287, + 594 + ], + "type": "text", + "content": "Spatial correlation is the connection between joints in an identical frame. These joints indicate different body parts in one frame, which are intrinsically relevant due to the prior of body skeleton. Similar to temporal attention, we devise " + }, + { + "bbox": [ + 46, + 510, + 287, + 594 + ], + "type": "inline_equation", + "content": "MSAS_{S}" + }, + { + "bbox": [ + 46, + 510, + 287, + 594 + ], + "type": "text", + "content": " as an axis-specific MSA component on spatial dimension. Therefore, the output of spatial attention is formulated as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 602, + 287, + 614 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 602, + 287, + 614 + ], + "spans": [ + { + "bbox": [ + 107, + 602, + 287, + 614 + ], + "type": "interline_equation", + "content": "\\mathbf {H} _ {\\mathbf {S}} = M S A _ {S} \\left(\\mathbf {Q} _ {\\mathbf {S}}, \\mathbf {K} _ {\\mathbf {S}}, \\mathbf {V} _ {\\mathbf {S}}\\right). \\tag {6}", + "image_path": "35cac3cf9dfb0b4453dce28e9cf7aaac11ee892a4096c090b41f7787cfae54aa.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 622, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 622, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 622, + 287, + 693 + ], + "type": "text", + "content": "The above two correlation modules process in parallel and follow the self-attention regime for feature contextualization. They compute the token-to-token affinities by contextualizing from a specific axial perspective, and complement to each other. Thus, we concatenate the outputs from both attention layers along the channel dimension:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 125, + 702, + 287, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 702, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 125, + 702, + 287, + 714 + ], + "type": "interline_equation", + "content": "\\mathbf {H} = c a t \\left(\\mathbf {H} _ {\\mathbf {T}}, \\mathbf {H} _ {\\mathbf {S}}\\right), \\tag {7}", + "image_path": "707792c3392530a21eb9cd29d411f01e165cc1ecaeb87141f0b79d8263794ffb.jpg" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 309, + 70, + 545, + 181 + ], + "blocks": [ + { + "bbox": [ + 309, + 70, + 545, + 181 + ], + "lines": [ + { + "bbox": [ + 309, + 70, + 545, + 181 + ], + "spans": [ + { + "bbox": [ + 309, + 70, + 545, + 181 + ], + "type": "image", + "image_path": "b0dde1ddefd622e779fec833910527d81fed9daa5fd23846e6298ad8727f8300.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 181, + 545, + 236 + ], + "lines": [ + { + "bbox": [ + 304, + 181, + 545, + 236 + ], + "spans": [ + { + "bbox": [ + 304, + 181, + 545, + 236 + ], + "type": "text", + "content": "Figure 3. (a) The coefficient matrix of the motion trajectory of different joints. (b) The body joints are divided into five parts, denoted as " + }, + { + "bbox": [ + 304, + 181, + 545, + 236 + ], + "type": "inline_equation", + "content": "g_{*}" + }, + { + "bbox": [ + 304, + 181, + 545, + 236 + ], + "type": "text", + "content": ". The part with high/low relevance is colored as light/dark blue, respectively. The motion data is generated by actor S6 performing greeting action in the training set of Human3.6M." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 242, + 545, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 242, + 545, + 290 + ], + "spans": [ + { + "bbox": [ + 304, + 242, + 545, + 290 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 242, + 545, + 290 + ], + "type": "inline_equation", + "content": "cat" + }, + { + "bbox": [ + 304, + 242, + 545, + 290 + ], + "type": "text", + "content": " performs the concatenation. The resultant receptive field of STC is like a criss cross of spatial and temporal axes, and stacking multiple STC blocks is able to approximate the fully spatio-temporal attention." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 297, + 527, + 310 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 297, + 527, + 310 + ], + "spans": [ + { + "bbox": [ + 306, + 297, + 527, + 310 + ], + "type": "text", + "content": "3.4. Structure-enhanced Positional Embedding" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 316, + 545, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 545, + 411 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 545, + 411 + ], + "type": "text", + "content": "One of the crucial factor in transformer is positional embedding, which indicates the position of each token absolutely or relatively. For the positional embedding function in STCFormer, we delve into the inherent property of joints, i.e., the local structure, and propose Structure-enhanced Positional Embedding (SPE). Figure 3 depicts the motivation of SPE. Here, we group the body joints into five parts according to the dynamic chain structure of human body:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 328, + 419, + 545, + 486 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 419, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 328, + 419, + 545, + 486 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} g _ {0} = \\{h i p, s p i n e, t h r o a x, n e c k, h e a d \\} \\\\ g _ {1} = \\{r i g h t _ {-} h i p, r i g h t _ {-} k n e e, r i g h t _ {-} f e e t \\} \\\\ g _ {2} = \\left\\{l e f t - h i p, l e f t - k n e e, l e f t - f e e t \\right\\} \\tag {8} \\\\ g _ {3} = \\{r i g h t \\_ s h o u l d e r, r i g h t \\_ e l b o w, r i g h t \\_ w r i s t \\} \\\\ g _ {4} = \\{\\text {l e f t - s h o u l d e r}, \\text {l e f t - e l b o w}, \\text {l e f t - w r i s t} \\} \\\\ \\end{array}", + "image_path": "b06c34fc826259c173602d607e654e83c4a2f62014ff90e8a521d506fc34c311.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "text", + "content": "The trajectories of joints in the static part (" + }, + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "inline_equation", + "content": "g_0, g_3" + }, + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "inline_equation", + "content": "g_4" + }, + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "text", + "content": " in the figure) are highly relevant. We devise a part-ware positional embedding to indicate which part each joint belongs to. The joints in the same part are attached with the same embedding vector. In particular, a learnable dictionary is constructed to assign embedding vector to different joints according to their group index. Given the group index " + }, + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{g} \\in [0,1,2,3,4]^{T \\times N}" + }, + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "text", + "content": " of joints, the learnable dictionary " + }, + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{D} \\in \\mathbb{R}^{5 \\times \\frac{C}{2}}" + }, + { + "bbox": [ + 304, + 493, + 545, + 601 + ], + "type": "text", + "content": " convert the indexes to embedding vectors as" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 392, + 609, + 545, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 609, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 392, + 609, + 545, + 622 + ], + "type": "interline_equation", + "content": "\\mathbf {S P E} _ {1} = \\mathbf {D} (\\mathbf {g}). \\tag {9}", + "image_path": "8b322fd33651c5758646cf18ebba18e5f3f08d084666e72acb7571c6f29b16ca.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": "Nevertheless, in the dynamic part, i.e., part with relative movements (" + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "inline_equation", + "content": "g_{1}, g_{2}" + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": " in the figure), the trajectories of joints are not relevant. Simply assigning the same embedding vector to these joints ignores the motion patterns in the dynamic part. Hence, we propose to exploit a spatio-temporal convolution around the neighboring joints to capture the local structure. Formally, given the values " + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{V} \\in \\mathbb{R}^{T \\times N \\times \\frac{C}{2}}" + }, + { + "bbox": [ + 304, + 630, + 545, + 713 + ], + "type": "text", + "content": " in" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4793" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 47, + 88, + 281, + 300 + ], + "blocks": [ + { + "bbox": [ + 48, + 72, + 270, + 83 + ], + "lines": [ + { + "bbox": [ + 48, + 72, + 270, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 270, + 83 + ], + "type": "text", + "content": "Algorithm 1 Pseudo-code of STC with SPE (PyTorch-like)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 47, + 88, + 281, + 300 + ], + "lines": [ + { + "bbox": [ + 47, + 88, + 281, + 300 + ], + "spans": [ + { + "bbox": [ + 47, + 88, + 281, + 300 + ], + "type": "text", + "content": "x: input tensor of shape (B, T, N, C) \n# p: part index (B, T, N) in [0, 4] \n# MSA: axis-specific multi-head self-attention \nself.Linear = nn.Linear(C, 3C) \nself_embedding1 = nn.Embedding(5, C//2) \n# the channel-last convolution \nself_embedding2 = nn.Conv2d(C//2, C//2, k=3, g=C//2) \ndef STC(x, p): \n Q, K, V = self.linear(x).chunk(3, dim=3) \n Q_t, Q_s = Q.chunk(2, dim=3) \n K_t, K_s = K.chunk(2, dim=3) \n V_t, V_s = V.chunk(2, dim=3) \n H_t = MSA(Q_t, K_t, V_t, dim=1) \n H_s = MSA(Q_s, K_s, V_s, dim=2) \n H_t += self_embedding1(p) + self_embedding2(V_t) \n H_s += self_embedding1(p) + self_embedding2(V_s) \n H = torch.cat(H_t, H_s, dim=3) \nreturn H" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 46, + 321, + 287, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 321, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 46, + 321, + 287, + 346 + ], + "type": "text", + "content": "STC block, we treat " + }, + { + "bbox": [ + 46, + 321, + 287, + 346 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 46, + 321, + 287, + 346 + ], + "type": "text", + "content": " as 2D (i.e., space and time) feature map, and utilize 2D convolution on the neighboring joints:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 356, + 287, + 369 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 356, + 287, + 369 + ], + "spans": [ + { + "bbox": [ + 115, + 356, + 287, + 369 + ], + "type": "interline_equation", + "content": "\\mathbf {S P E} _ {2} (\\mathbf {V}) = \\operatorname {c o n v 2 d} (\\mathbf {V}), \\tag {10}", + "image_path": "128cbe5e44e59a9de0404bae94f3c72d03452afdbd036788f41af8966c415b50.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "inline_equation", + "content": "conv2d" + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "text", + "content": " convolution operation. Although the two SPE functions are designed respectively for static part and dynamic part, we utilize the two functions concurrently on all joints leaving out the requirement of static/dynamic judgment. The duet of two SPE functions is able to deal with the parts with various moving patterns." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 442, + 287, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 442, + 287, + 466 + ], + "spans": [ + { + "bbox": [ + 46, + 442, + 287, + 466 + ], + "type": "text", + "content": "By injecting the proposed SPE function into STC, the equation of STC is reformulated as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 471, + 287, + 510 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 471, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 52, + 471, + 287, + 510 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbf {H} _ {\\mathbf {T}} = M S A _ {T} \\left(\\mathbf {Q} _ {\\mathbf {T}}, \\mathbf {K} _ {\\mathbf {T}}, \\mathbf {V} _ {\\mathbf {T}}\\right) + \\mathbf {S P E} _ {1} + \\mathbf {S P E} _ {2} (\\mathbf {V} _ {\\mathbf {T}}), \\\\ \\mathbf {H} _ {\\mathbf {S}} = M S A _ {S} \\left(\\mathbf {Q} _ {\\mathbf {S}}, \\mathbf {K} _ {\\mathbf {S}}, \\mathbf {V} _ {\\mathbf {S}}\\right) + \\mathbf {S P E} _ {1} + \\mathbf {S P E} _ {2} (\\mathbf {V} _ {\\mathbf {S}}), \\tag {11} \\\\ \\mathbf {H} = c a t \\left(\\mathbf {H} _ {\\mathbf {T}}, \\mathbf {H} _ {\\mathbf {S}}\\right). \\\\ \\end{array}", + "image_path": "97d3c074963ba94a699a57a493d79e5841dbf439bca70da670c8737ab8dbcbbe.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 515, + 287, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 515, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 46, + 515, + 287, + 599 + ], + "type": "text", + "content": "Implementation. The proposed STC plus SPE in Eq.(11) can be readily implemented with a few lines of codes in Python. We detail an example of the codes in Algorithm 1 based on PyTorch platform. Here, we execute the pre-defined MSA and MLP function in the standard transformer. The SPE is implemented by constructing the default Embedding layer and Conv2d layer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 609, + 128, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 609, + 128, + 623 + ], + "spans": [ + { + "bbox": [ + 47, + 609, + 128, + 623 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 629, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 287, + 665 + ], + "type": "text", + "content": "We comprehensively evaluate the proposed STCFoer architecture on two large-scale datasets, i.e., Human3.6M [18] and MPI-INF-3DHP [32]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 671, + 222, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 671, + 222, + 683 + ], + "spans": [ + { + "bbox": [ + 47, + 671, + 222, + 683 + ], + "type": "text", + "content": "4.1. Datasets and Evaluation Metrics" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "Human3.6M is currently the most popular benchmark for indoor 3D human pose estimation, which contains 11" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "text", + "content": "subjects performing 15 typical actions, leading to 3.6 million video frames in total. Following the standard protocol, we use subjects 1, 5, 6, 7, and 8 for training, and subjects 9 and 11 for evaluation. The Mean Per Joint Position Error (MPJPE) is used to measure the error under two protocols: Protocol 1 (referred to as P1) computes MPJPE between the estimated pose and the ground truth after aligning their root joints (hip); Protocol 2 (referred to as P2) calculates Procrustes-MPJPE, where the ground truth and the pose prediction are further aligned through a rigid transformation. We also compute the MPJPE distribution of pose to evaluate the overall precision of the reconstructed skeletons. MPI-INF-3DHP is a recently proposed large-scale dataset, which consists of three scenes, i.e., green screen, non-green screen, and outdoor. By using 14 cameras, the dataset records 8 actors performing 8 activities for the training set and 7 activities for evaluation. Following the previous works [6, 39, 54], we adopt the MPJPE (P1), percentage of correct keypoints (PCK) with " + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "inline_equation", + "content": "150\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "text", + "content": ", and area under the curve (AUC) results as the evaluation metrics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 320, + 440, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 320, + 440, + 333 + ], + "spans": [ + { + "bbox": [ + 305, + 320, + 440, + 333 + ], + "type": "text", + "content": "4.2. Implementation Details" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "text", + "content": "Our model is implemented with PyTorch toolkit and runs on a server with one GTX 2080Ti GPU. In the experiments, two kinds of input 2D pose sequences are utilized including the pre-estimated 2D pose by the pre-trained CPN [7] and the real 2D pose (ground truth). For model training, we set each mini-batch as 128 sequences. The network parameters are optimized for 20 epochs by Adam optimizer with basic learning rate of 0.001 and decayed by 0.96 after each epoch. We consider the repeat time " + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "text", + "content": " of modules, the hidden embedding channel " + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "text", + "content": ", and the number of head " + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "text", + "content": " in attention block as free parameters that we tailor to the scale of network. The performances of the standard version STCFormaler with " + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "inline_equation", + "content": "\\{L = 6, C = 256, H = 8\\}" + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "text", + "content": " and the large version STCFormaler-L with " + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "inline_equation", + "content": "\\{L = 6, C = 512, H = 8\\}" + }, + { + "bbox": [ + 304, + 339, + 545, + 518 + ], + "type": "text", + "content": " are both reported." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 528, + 526, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 528, + 526, + 540 + ], + "spans": [ + { + "bbox": [ + 305, + 528, + 526, + 540 + ], + "type": "text", + "content": "4.3. Performance Comparison on Human3.6M" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": "We compare with several state-of-the-art techniques on Human3.6M dataset. Table 1 summarizes the performance comparisons in terms of P1 and P2 errors taking the pre-estimated 2D poses (CPN) as input, and the number of sampled frames T per video is also given for each method. In general, the longer input sequence leads to the lower regression error. Overall, STCFormer-L with " + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "inline_equation", + "content": "T = 243" + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": " input frames achieves the new state-of-the-art performances with P1 error of " + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "inline_equation", + "content": "40.5\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": " and P2 error of " + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "inline_equation", + "content": "31.8\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": ". Benefiting from the proposed STC attention module, STCFormer-L outperforms StridedFormer [22], PATA [48] and MixSTE [52] with " + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "inline_equation", + "content": "T = 243" + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": " frames, which are also based on transformer architecture, by the P1 error drop of " + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "inline_equation", + "content": "3.2\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "inline_equation", + "content": "2.6\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "inline_equation", + "content": "0.4\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": ", respectively. Comparing to the best competitor" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4794" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 106, + 544, + 381 + ], + "blocks": [ + { + "bbox": [ + 47, + 71, + 545, + 104 + ], + "lines": [ + { + "bbox": [ + 47, + 71, + 545, + 104 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 545, + 104 + ], + "type": "text", + "content": "Table 1. Performance comparisons in terms of P1 error (mm) and P2 error (mm) with the state-of-the-art methods on Human3.6M dataset. The 2D pose input is estimated by CPN [7]. The best result and runner-up result in each column are marked in red and blue, respectively. \\* denotes the post-processing module proposed in [4]. " + }, + { + "bbox": [ + 47, + 71, + 545, + 104 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 47, + 71, + 545, + 104 + ], + "type": "text", + "content": " is the number of sampled frames from each video." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 106, + 544, + 381 + ], + "lines": [ + { + "bbox": [ + 49, + 106, + 544, + 381 + ], + "spans": [ + { + "bbox": [ + 49, + 106, + 544, + 381 + ], + "type": "table", + "html": "
P1PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2041.844.841.144.947.454.143.442.256.263.645.343.545.331.332.245.1
UGCN [46] (T=96)*ECCV'2040.242.542.641.146.756.741.442.356.260.446.342.246.231.731.044.5
PoseFormer [54] (T=81)ICCV'2141.544.839.842.546.551.642.142.053.360.745.543.346.131.832.244.3
Shan et al. [40] (T=243)ACM MM'2140.844.541.442.746.355.641.841.953.760.845.041.544.830.831.944.3
Anatomy3D [6] (T=243)TCVST'2141.443.540.142.946.651.941.742.353.960.245.441.746.031.532.744.1
Einfalt et al. [9] (T=351)*arXiv'2239.643.840.242.446.553.942.342.555.762.345.143.044.730.130.844.2
StridedFormer [22] (T=243)*TMM'2240.343.340.242.345.652.341.840.555.960.644.243.044.230.030.243.7
CrossFormer [13] (T=81)arXiv'2240.744.140.841.545.852.841.240.855.361.944.941.844.629.231.143.7
PATA [48] (T=243)TIP'2239.942.740.342.345.052.840.439.356.961.244.141.342.828.429.343.1
MHFormer [23] (T=351)CVPR'2239.243.140.140.944.951.240.641.353.560.343.741.143.829.830.643.0
P-STMO [39] (T=243)ECCV'2238.942.740.441.145.649.740.939.955.559.444.942.242.729.429.442.8
MixSTE [52] (T=81)CVPR'2239.843.038.640.143.450.640.641.452.256.743.840.843.929.430.342.4
MixSTE [52] (T=243)CVPR'2237.640.937.339.742.349.940.139.851.755.042.139.841.027.927.940.9
STCFformer (T=81)40.643.038.340.243.552.640.340.151.857.742.839.842.328.029.542.0
STCFformer (T=243)39.641.637.438.843.151.139.139.751.457.441.838.540.727.128.641.0
STCFformer-L (T=243)38.441.236.838.042.750.538.738.252.556.841.838.440.226.227.740.5
P2PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2032.335.233.335.835.941.533.232.744.650.937.032.437.025.227.235.6
UGCN [46] (T=96)*ECCV'2031.834.335.433.535.441.731.131.644.449.036.432.235.024.923.034.5
PoseFormer [54] (T=81)ICCV'2134.136.134.437.236.442.234.433.645.052.537.433.837.825.627.336.5
Shan et al. [40] (T=243)ACM MM'2132.536.233.235.335.642.132.631.942.647.936.632.134.824.225.835.0
Anatomy3D [6] (T=243)TCSVT'2132.635.132.835.436.340.432.432.342.749.036.832.436.024.926.535.0
Einfalt et al. [9] (T=351)*arXiv'2232.736.133.436.036.142.033.333.145.450.737.034.135.924.425.435.7
StridedFormer [22] (T=243)*TMM'2232.735.532.535.435.941.633.031.945.150.136.333.535.123.925.035.2
MHFormer [23] (T=351)CVPR'2231.534.932.833.635.339.632.032.243.548.736.432.634.323.925.134.4
P-STMO [39] (T=243)ECCV'2231.335.232.933.935.439.332.531.544.648.236.332.934.423.823.934.4
CrossFormer [13] (T=81)arXiv'2231.434.632.633.734.339.731.631.044.349.335.931.334.423.425.534.3
PATA [48] (T=243)TIP'2231.234.131.933.833.939.531.630.045.448.135.031.133.522.423.633.7
MixSTE [52] (T=81)CVPR'2232.034.231.733.734.439.232.031.842.946.935.532.034.423.625.233.9
MixSTE [52] (T=243)CVPR'2230.833.130.331.833.139.131.130.542.544.534.030.832.722.122.932.6
STCFformer (T=81)30.433.831.131.733.539.530.830.041.845.834.330.132.821.923.432.7
STCFformer (T=243)29.533.230.631.033.038.030.429.441.845.233.629.531.621.322.632.0
STCFformer-L (T=243)29.333.030.730.632.738.229.728.842.245.033.329.431.520.922.331.8
", + "image_path": "33307fa95cef6d717bdc4c1797e8eb56d94d3357502e3cd621f382f44899c6dc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 49, + 422, + 545, + 545 + ], + "blocks": [ + { + "bbox": [ + 47, + 387, + 545, + 419 + ], + "lines": [ + { + "bbox": [ + 47, + 387, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 47, + 387, + 545, + 419 + ], + "type": "text", + "content": "Table 2. Performance comparisons in terms of P1 error (mm) with the state-of-the-art methods on Human3.6M dataset. The models take the ground-truth 2D pose as input. The best result and runner-up result in each column are marked in red and blue, respectively. “*” denotes the post-processing module proposed in [4]. " + }, + { + "bbox": [ + 47, + 387, + 545, + 419 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 47, + 387, + 545, + 419 + ], + "type": "text", + "content": " is the number of sampled frames from each video." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 422, + 545, + 545 + ], + "lines": [ + { + "bbox": [ + 49, + 422, + 545, + 545 + ], + "spans": [ + { + "bbox": [ + 49, + 422, + 545, + 545 + ], + "type": "table", + "html": "
P1PublicationDir.Dis.Eat.Gre.PhonePhotoPosePurch.Sit.SitD.SmokeWaitWalkD.WalkWalkT.Avg.
Liu et al. [26] (T=243)CVPR'2034.537.133.634.232.937.139.635.840.741.433.033.833.026.626.934.7
PoseFormer [54] (T=81)ICCV'2130.033.629.931.030.233.334.831.437.838.631.731.529.023.323.131.3
Shan et al. [40] (T=243)ACM MM'2129.530.828.829.130.735.231.727.834.536.030.329.428.924.124.730.1
MHFormer [23] (T=351)CVPR'2227.732.129.128.930.033.933.031.237.039.330.031.029.422.223.030.5
P-STMO [39] (T=243)ECCV'2228.530.128.627.929.833.231.327.836.037.429.729.528.121.021.029.3
StridedFormer [22] (T=243) *TMM'2227.129.426.527.128.633.030.726.838.234.729.129.826.819.119.828.5
CrossFormer [13] (T=81)arXiv'2226.030.026.826.228.031.030.429.635.437.128.427.326.720.519.928.3
PATA [48] (T=243)TIP'2225.825.223.323.524.027.427.924.429.330.124.924.123.318.619.724.7
MixSTE [52] (T=81)CVPR'2225.627.824.525.724.929.928.627.429.929.026.125.025.218.719.925.9
MixSTE [52] (T=243)CVPR'2221.622.020.421.020.824.324.721.926.924.921.221.520.814.715.721.6
STCFemale (T=81)26.226.523.424.625.028.628.324.630.933.725.725.324.618.619.725.7
STCFemale (T=81) *25.925.922.724.024.627.527.623.130.131.525.124.723.818.419.625.0
STCFemale (T=243)21.422.621.021.323.826.024.220.028.928.022.321.420.114.215.022.0
STCFemale (T=243) *20.821.820.020.623.425.023.619.327.826.121.620.619.514.315.121.3
", + "image_path": "b7c5f83f398442bf281b2c20706d24a78e7238d2d686f3b48dc36dd8f68df70d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 553, + 287, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 553, + 287, + 649 + ], + "spans": [ + { + "bbox": [ + 46, + 553, + 287, + 649 + ], + "type": "text", + "content": "MixSTE [52], our STCFormer consistently obtains better precision across different numbers of input frames, and only demands around half of the parameters (18.9M v.s. 33.6M). The results verify the advantages of STC attention as an economic and effective way to decompose the full spatiotemporal attention. More importantly, the series of STC-Former reaches to-date the best reported performances in 10 out of 15 categories." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": "Table 2 further details the comparisons between STC-Former and the state-of-the-art models with the groundtruth 2D pose as input. This setting excludes the noise from 2D pose estimation, and measures the upper bound of 2D-to-3D lifting models. Accordingly, the P1 errors are obvi" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 553, + 545, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 553, + 545, + 625 + ], + "spans": [ + { + "bbox": [ + 304, + 553, + 545, + 625 + ], + "type": "text", + "content": "ously decreased across different methods by replacing the CPN-estimated 2D pose with the ground-truth 2D pose, but the performance trends are still similar. STCFoermer with post-processing attains the best P1 error of " + }, + { + "bbox": [ + 304, + 553, + 545, + 625 + ], + "type": "inline_equation", + "content": "21.3\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 553, + 545, + 625 + ], + "type": "text", + "content": ", which is " + }, + { + "bbox": [ + 304, + 553, + 545, + 625 + ], + "type": "inline_equation", + "content": "0.3\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 553, + 545, + 625 + ], + "type": "text", + "content": " lower than the best competitor MixSTE, validating the impact of STCFoermer with different types of input." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 546, + 713 + ], + "type": "text", + "content": "In addition to the mean error, we also compare the error distribution of STCFormer and baseline methods in Figure 4. In this experiment, the methods take the estimated 2D poses by CPN of 27 frames as input. Compared to the recent transformer-based approaches including Strided-Former [22], P-STMO [39], and MHFormer [23], our STC-Former leads to the highest number of samples with error" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4795" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 72, + 278, + 168 + ], + "blocks": [ + { + "bbox": [ + 53, + 72, + 278, + 168 + ], + "lines": [ + { + "bbox": [ + 53, + 72, + 278, + 168 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 278, + 168 + ], + "type": "image", + "image_path": "6da9641e3ca76d3e20113352b13c63f274d9ec12df88c637d044af5559dfd990.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 170, + 287, + 204 + ], + "lines": [ + { + "bbox": [ + 46, + 170, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 170, + 287, + 204 + ], + "type": "text", + "content": "Figure 4. Error distribution of the estimated 3D poses on Human3.6M. The horizontal axis represents the error interval, and the vertical axis is the proportion of poses with error in the interval." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 209, + 288, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 209, + 288, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 288, + 258 + ], + "type": "text", + "content": "less than " + }, + { + "bbox": [ + 46, + 209, + 288, + 258 + ], + "type": "inline_equation", + "content": "35\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 209, + 288, + 258 + ], + "type": "text", + "content": ", and the lowest number of those with error larger than " + }, + { + "bbox": [ + 46, + 209, + 288, + 258 + ], + "type": "inline_equation", + "content": "45\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 209, + 288, + 258 + ], + "type": "text", + "content": ". This again confirm the advances of STCFoermer for not only obtaining the lowest average error but also better distribution across different ranges of error." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 262, + 285, + 276 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 262, + 285, + 276 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 285, + 276 + ], + "type": "text", + "content": "4.4. Performance Comparison on MPI-INF-3DHP" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "spans": [ + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "content": "To verify the generalization of 3D pose estimation models, we then test the performance on MPI-INF-3DHP dataset, which contains more complex backgrounds. Following previous works [23, 39, 52], the ground-truth 2D poses are taken as input. In view of the shorter video sequence, we set the number of input frames as 9, 27 or 81. Table 3 lists the performance comparisons. Similar to the observations on Human3.6M, our STCFformer with " + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "inline_equation", + "content": "T = 81" + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "content": " reaches the to-date best reported performance with PCK of " + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "inline_equation", + "content": "98.7\\%" + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "content": ", AUC of " + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "inline_equation", + "content": "83.9\\%" + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "content": " and P1 error of " + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "inline_equation", + "content": "23.1\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "content": ", outperforming the current state-of-the-art models with a large margin of " + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "inline_equation", + "content": "0.8\\%" + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "content": " in PCK, " + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "inline_equation", + "content": "8.1\\%" + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "content": " in AUC and " + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "inline_equation", + "content": "9.1\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "content": " in P1 error. In particular, STCFformer shows better generalization ability and surpasses MixSTE [52] by a much larger P1 error drop (31.8mm) against " + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "inline_equation", + "content": "0.3\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 281, + 287, + 473 + ], + "type": "text", + "content": " on Human3.6M. This highlights the efficacy of our method on the more complicated data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 479, + 141, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 479, + 141, + 493 + ], + "spans": [ + { + "bbox": [ + 47, + 479, + 141, + 493 + ], + "type": "text", + "content": "4.5. Ablation Study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 498, + 287, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 287, + 534 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 287, + 534 + ], + "type": "text", + "content": "For a more in-depth analysis of our STCFormer, we further conduct a series of ablation studies on Human3.6M dataset using the CPN-estimated 2D poses as input." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 534, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 287, + 677 + ], + "type": "text", + "content": "The first group of experiments is to verify how well our STCFoer works with different number of input frames. Table 4 shows the detailed comparisons in terms of P1 error. A general performance tendency is observed that increasing " + }, + { + "bbox": [ + 46, + 534, + 287, + 677 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 534, + 287, + 677 + ], + "type": "text", + "content": " leads to monotonic performance improvement. Among the competitive methods, our STCFoer constantly exhibits the best results across 27-frame, 81-frame and 243-frame settings. The leading performances demonstrate the ability of STCFoer to deal with different length of video sequence. More remarkably, STCFoer-L has " + }, + { + "bbox": [ + 46, + 534, + 287, + 677 + ], + "type": "inline_equation", + "content": "43.7\\%" + }, + { + "bbox": [ + 46, + 534, + 287, + 677 + ], + "type": "text", + "content": " fewer parameters and spends " + }, + { + "bbox": [ + 46, + 534, + 287, + 677 + ], + "type": "inline_equation", + "content": "43.6\\%" + }, + { + "bbox": [ + 46, + 534, + 287, + 677 + ], + "type": "text", + "content": " fewer FLOPs than the runner-up MixSTE." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "The second ablation study assesses the performance impact of different design components. In this experiment, the models take the estimated 2D poses by CPN of 27 frames" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 308, + 128, + 538, + 277 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 545, + 125 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 545, + 125 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 545, + 125 + ], + "type": "text", + "content": "Table 3. Performance comparisons in terms of PCK, AUC and P1 with the state-of-the-art methods on MPI-INF-3DHP dataset. Here, the higher PCK, the higher AUC and the lower P1 indicate the better regressions. The best result in each column is marked in red. " + }, + { + "bbox": [ + 305, + 70, + 545, + 125 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 305, + 70, + 545, + 125 + ], + "type": "text", + "content": " is the number of sampled frames from each video." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 128, + 538, + 277 + ], + "lines": [ + { + "bbox": [ + 308, + 128, + 538, + 277 + ], + "spans": [ + { + "bbox": [ + 308, + 128, + 538, + 277 + ], + "type": "table", + "html": "
MethodPublicationPCK ↑AUC ↑P1(mm) ↓
UGCN [46](T=96)ECCV'2086.962.168.1
Anatomy3D [6] (T=81)TCSVT'2187.853.879.1
PoseFormer [54] (T=9)ICCV'2188.656.477.1
Hu et al. [16] (T=96)ACM MM'2197.969.542.5
CrossFormer [13] (T=9)arXiv'2289.157.576.3
PATA [48] (T=243)TIP'2290.357.869.4
MHFormer [23] (T=9)CVPR'2293.863.358.0
MixSTE [52] (T=27)CVPR'2294.466.554.9
Einfalt et al. [9] (T=81)arXiv'2295.467.646.9
P-STMO [39] (T=81)ECCV'2297.975.832.2
STCFormaler (T=9)98.281.528.2
STCFormaler (T=27)98.483.424.2
STCFormaler (T=81)98.783.923.1
", + "image_path": "74490c012557a14f19781086b79d44cbb034b1ea214e96e0df0e7a13cdabd603.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 309, + 319, + 545, + 485 + ], + "blocks": [ + { + "bbox": [ + 305, + 284, + 545, + 317 + ], + "lines": [ + { + "bbox": [ + 305, + 284, + 545, + 317 + ], + "spans": [ + { + "bbox": [ + 305, + 284, + 545, + 317 + ], + "type": "text", + "content": "Table 4. The P1 error comparisons with different number of sampled frame " + }, + { + "bbox": [ + 305, + 284, + 545, + 317 + ], + "type": "inline_equation", + "content": "(T)" + }, + { + "bbox": [ + 305, + 284, + 545, + 317 + ], + "type": "text", + "content": " on Human3.6M dataset. The best result in each column is marked in red." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 319, + 545, + 485 + ], + "lines": [ + { + "bbox": [ + 309, + 319, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 309, + 319, + 545, + 485 + ], + "type": "table", + "html": "
MethodFrames TParametersFLOPs (M)P1(mm)
StridedFormer [22]274.01M16346.9
P-STMO [39]274.6M16446.1
MHFormer [23]2718.92M100045.9
MixSTE [52]2733.61M1540245.1
STCFourner274.75M217344.1
StridedFormer [22]814.06M39245.4
P-STMO [39]815.4M49344.1
MHFormer [23]8119.67M156144.5
MixSTE [52]8133.61M4620842.7
STCFourner814.75M652042.0
StridedFormer [22]2434.23M137244.0
P-STMO [39]2436.7M173742.8
MHFormer [23]24324.72M481243.2
MixSTE [52]24333.61M13862340.9
STCFourner2434.75M1956141.0
STCFourner-L24318.91M7810740.5
", + "image_path": "37196c3bf7a0d2abaa3bd5e0f23a8f35b450c33eb70fa6b817d80c9f658d9ae3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "spans": [ + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": "as input. Spatial Attention and Temporal Attention solely exploit the spatial pathway and temporal pathway, respectively. STC only contains both pathways but without the positional embedding. " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "\\mathbf{SPE}_1" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "\\mathbf{SPE}_2" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "\\mathbf{SPE}" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": " represent the two SPE positional embeddings and their combination, respectively. Table 5 details the contribution of each component towards the overall performance. STC only by considering both spatial and temporal correlations leads to the error drop over solely utilizing spatial attention and temporal attention by " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "218.5\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "10.6\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": ", respectively. The result indicates the importance of modeling the correlations along two axes in parallel. The three positional embedding strategies, i.e., " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "\\mathbf{SPE}_1" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "\\mathbf{SPE}_2" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "\\mathbf{SPE}" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": ", further contribute " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "0.6\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "12.1\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "inline_equation", + "content": "12.9\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 496, + 545, + 675 + ], + "type": "text", + "content": " of error drop, respectively, proving the advances of involving the structure information." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": "In addition to the proposed " + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{SPE}_1" + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": ", we explore three other positional embedding functions, i.e., Absolute Positional Embedding (APE), Centrality Positional Embedding" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4796" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 65, + 95, + 269, + 171 + ], + "blocks": [ + { + "bbox": [ + 47, + 71, + 287, + 92 + ], + "lines": [ + { + "bbox": [ + 47, + 71, + 287, + 92 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 287, + 92 + ], + "type": "text", + "content": "Table 5. Performance contribution of each component in the proposed STCFormer on Human3.6M dataset." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 65, + 95, + 269, + 171 + ], + "lines": [ + { + "bbox": [ + 65, + 95, + 269, + 171 + ], + "spans": [ + { + "bbox": [ + 65, + 95, + 269, + 171 + ], + "type": "table", + "html": "
STCSPE1SPE2P1 (mm)
Spatial Attention#1275.5
Temporal Attention#267.6
STC only#357.0
+SPE1#456.4
+SPE2#544.9
+SPE#644.1
", + "image_path": "c8388f26c987fe17b472250cc0ac60a04c69e256e8954b65912738e1aa4a6f38.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 178, + 288, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 178, + 288, + 274 + ], + "spans": [ + { + "bbox": [ + 46, + 178, + 288, + 274 + ], + "type": "text", + "content": "(CPE), and Symmetric Positional Embedding (SyPE). We refer the readers to read the supplementary materials for more details. In Table 6, we assess the performance impact of different positional embedding functions. In this experiment, the models take the estimated 2D poses by CPN of 9 frames as input. And the comparisons empirically show the superiority of the used " + }, + { + "bbox": [ + 46, + 178, + 288, + 274 + ], + "type": "inline_equation", + "content": "\\mathrm{SPE}_1" + }, + { + "bbox": [ + 46, + 178, + 288, + 274 + ], + "type": "text", + "content": " (48.3mm vs. 48.7mm, 49.9mm, and 49.2mm)." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 121, + 331, + 214, + 393 + ], + "blocks": [ + { + "bbox": [ + 46, + 285, + 287, + 327 + ], + "lines": [ + { + "bbox": [ + 46, + 285, + 287, + 327 + ], + "spans": [ + { + "bbox": [ + 46, + 285, + 287, + 327 + ], + "type": "text", + "content": "Table 6. The P1 error comparisons with different positional embedding functions on Human3.6M dataset. The \"Baseline\" denotes the STCFoermer without " + }, + { + "bbox": [ + 46, + 285, + 287, + 327 + ], + "type": "inline_equation", + "content": "\\mathrm{SPE}_1" + }, + { + "bbox": [ + 46, + 285, + 287, + 327 + ], + "type": "text", + "content": ". The best result in each column is marked in red." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 121, + 331, + 214, + 393 + ], + "lines": [ + { + "bbox": [ + 121, + 331, + 214, + 393 + ], + "spans": [ + { + "bbox": [ + 121, + 331, + 214, + 393 + ], + "type": "table", + "html": "
P1 (mm)
Baseline#148.7
+SPE1#248.3
+APE#348.9
+CPE#449.6
+SyPE#549.2
", + "image_path": "f2038d7d3e118f5fb82eadf1851cbb146fe40382a7b4083066dd06acdc0c803a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 407, + 164, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 164, + 420 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 164, + 420 + ], + "type": "text", + "content": "4.6. Qualitative Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 426, + 287, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 287, + 472 + ], + "type": "text", + "content": "In this section, we validate our STCFoermer through attention visualization and 3D human pose estimation visualization. The examples are randomly selected from the evaluation set of Human3.6M." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 474, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 568 + ], + "type": "text", + "content": "Attention visualization. We visualize the spatial attention map and temporal attention map from the last STC block of STCFormaler in Figure 5. As expected, the spatial attention map (Figure 5(a)) shows that our model learns different patterns between joints from the videos of different actions. Moreover, the temporal attention map in Figure 5(b) illustrates strong correlation across adjacent frames owing to the continuity of human actions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 713 + ], + "type": "text", + "content": "Result visualization on Human3.6M. Figure 6 showcases 3D human pose estimation results by STCFormaler and the recent transformer-based approaches including Strided-Former [22], MHFormer [23] and P-STMO [39]. The three examples are randomly selected from the walking, posing and sitting actions in Human3.6M dataset. For each method, we draw the estimated 3D human pose and the ground-truth 3D coordinates in one figure, and calculate the average error. Overall, our STCFormaler shows better reconstruction results across all three samples than the other three methods. Particularly, for the challenging action with complicated pose articulation like \"sitting\" (the third row), STC" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 423, + 185 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 423, + 185 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 423, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 423, + 185 + ], + "type": "image", + "image_path": "f4d7222c2cb3396df890ce6c3df23e8f1495e8fcaa74ec225a68a90150916539.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 192, + 546, + 225 + ], + "lines": [ + { + "bbox": [ + 304, + 192, + 546, + 225 + ], + "spans": [ + { + "bbox": [ + 304, + 192, + 546, + 225 + ], + "type": "text", + "content": "Figure 5. Visualizations of attention maps from the spatial and temporal attention modules in STCFormer. The x-axis and y-axis correspond to the queries and the predicted outputs, respectively." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 439, + 71, + 547, + 186 + ], + "blocks": [ + { + "bbox": [ + 439, + 71, + 547, + 186 + ], + "lines": [ + { + "bbox": [ + 439, + 71, + 547, + 186 + ], + "spans": [ + { + "bbox": [ + 439, + 71, + 547, + 186 + ], + "type": "image", + "image_path": "172d75ea0c43e4f480276dfa409191e3d6e591614cc92ad58b8def51ea5e8a16.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 310, + 239, + 545, + 383 + ], + "blocks": [ + { + "bbox": [ + 310, + 239, + 545, + 383 + ], + "lines": [ + { + "bbox": [ + 310, + 239, + 545, + 383 + ], + "spans": [ + { + "bbox": [ + 310, + 239, + 545, + 383 + ], + "type": "image", + "image_path": "02eecdb6fe905f7c695b7edd35b58239fff5340110c2acebbde9b879fe152920.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 389, + 545, + 445 + ], + "lines": [ + { + "bbox": [ + 304, + 389, + 545, + 445 + ], + "spans": [ + { + "bbox": [ + 304, + 389, + 545, + 445 + ], + "type": "text", + "content": "Figure 6. Examples of 3D pose estimation by StridedFormer [22], MHFormer [23], P-STMO [39] and our STCFormer. The gray skeleton is the ground-truth 3D pose. Blue, orange and green skeletons represent the left part, right part and torso of the estimated human body, respectively." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 460, + 545, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 485 + ], + "type": "text", + "content": "Former still estimates the 3D coordinates accurately and reconstructs the structurally plausible 3D pose." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 510, + 378, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 510, + 378, + 521 + ], + "spans": [ + { + "bbox": [ + 306, + 510, + 378, + 521 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 534, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 546, + 713 + ], + "type": "text", + "content": "We have presented Spatio-Temporal Criss-cross Transformer (STCFormer), which explores spatial correlation and temporal correlation in a two-pathway manner for 3D human pose estimation in videos. Particularly, STCFormer is built by stacking several STC blocks, each of which separates the joint features into two groups along the channel dimension, and models the spatial and temporal interactions on each group, respectively. By doing so, the receptive field of STC block is like a criss cross of spatial and temporal axes. Moreover, the STCFormer exploits the dynamic chain structure of human body to model local context, resulting in a new positional embedding function. The experiments conducted on two benchmarks demonstrate the effectiveness of STCFormer and good generalization ability compared to the state-of-the-art techniques." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4797" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 133 + ], + "type": "text", + "content": "[1] Ankur Agarwal and Bill Triggs. Recovering 3d human pose from monocular images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 28(1):44-58, 2005. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 287, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 287, + 169 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 287, + 169 + ], + "type": "text", + "content": "[2] Mykhaylo Andriluka, Stefan Roth, and Bernt Schiele. Pictorial structures revisited: People detection and articulated pose estimation. In CVPR, 2009. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 171, + 287, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 171, + 287, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 171, + 287, + 203 + ], + "type": "text", + "content": "[3] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 205, + 287, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 287, + 249 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 287, + 249 + ], + "type": "text", + "content": "[4] Yujun Cai, Liuhao Ge, Jun Liu, Jianfei Cai, Tat-Jen Cham, Junsong Yuan, and Nadia Magnenat Thalmann. Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In ICCV, 2019. 1, 2, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 251, + 287, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 287, + 281 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 287, + 281 + ], + "type": "text", + "content": "[5] Ching-Hang Chen and Deva Ramanan. 3d human pose estimation= 2d pose estimation+ matching. In CVPR, 2017. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 285, + 287, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 285, + 287, + 338 + ], + "spans": [ + { + "bbox": [ + 53, + 285, + 287, + 338 + ], + "type": "text", + "content": "[6] Tianlang Chen, Chen Fang, Xiaohui Shen, Yiheng Zhu, Zhili Chen, and Jiebo Luo. Anatomy-aware 3d human pose estimation with bone-based pose decomposition. IEEE Transactions on Circuits and Systems for Video Technology, 32(1):198-209, 2021. 1, 2, 5, 6, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 341, + 287, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 341, + 287, + 373 + ], + "spans": [ + { + "bbox": [ + 53, + 341, + 287, + 373 + ], + "type": "text", + "content": "[7] Yilun Chen, Zhicheng Wang, Yuxiang Peng, Zhiqiang Zhang, Gang Yu, and Jian Sun. Cascaded pyramid network for multi-person pose estimation. In CVPR, 2018. 1, 2, 5, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 375, + 287, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 375, + 287, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 375, + 287, + 430 + ], + "type": "text", + "content": "[8] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2021. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 431, + 287, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 431, + 287, + 474 + ], + "spans": [ + { + "bbox": [ + 53, + 431, + 287, + 474 + ], + "type": "text", + "content": "[9] Moritz Einfalt, Katja Ludwig, and Rainer Lienhart. Uplift and upsample: Efficient 3d human pose estimation with up-lifting transformers. arXiv preprint arXiv:2210.06110, 2022. 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 476, + 287, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 476, + 287, + 518 + ], + "spans": [ + { + "bbox": [ + 48, + 476, + 287, + 518 + ], + "type": "text", + "content": "[10] Hao-Shu Fang, Yuanlu Xu, Wenguan Wang, Xiaobai Liu, and Song-Chun Zhu. Learning pose grammar to encode human body configuration for 3d pose estimation. In AAAI, 2018. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 521, + 287, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 521, + 287, + 565 + ], + "spans": [ + { + "bbox": [ + 48, + 521, + 287, + 565 + ], + "type": "text", + "content": "[11] Nate Hagbi, Oriel Bergig, Jihad El-Sana, and Mark Billinghurst. Shape recognition and pose estimation for mobile augmented reality. IEEE Transactions on Visualization and Computer Graphics, 17(10):1369-1379, 2010. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 567, + 287, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 287, + 599 + ], + "type": "text", + "content": "[12] Yanbin Hao, Hao Zhang, Chong-Wah Ngo, and Xiangnan He. Group contextualization for video recognition. arXiv preprint arXiv:2203.09694, 2022. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 601, + 287, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 654 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 654 + ], + "type": "text", + "content": "[13] Mohammed Hassanin, Abdelwahed Khamiss, Mohammed Bennamoun, Farid Boussaid, and Ibrahim Radwan. Crossformer: Cross spatio-temporal transformer for 3d human pose estimation. arXiv preprint arXiv:2203.13387, 2022. 1, 2, 6, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 657, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 657, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 657, + 287, + 678 + ], + "type": "text", + "content": "[14] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "text", + "content": "[15] Mir Rayat Imtiaz Hossain and James J Little. Exploiting temporal information for 3d human pose estimation. In ECCV, 2018. 1" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[16] Wenbo Hu, Changgong Zhang, Fangneng Zhan, Lei Zhang, and Tien-Tsin Wong. Conditional directed graph convolution for 3d human pose estimation. In ACM MM, 2021. 2, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 107, + 545, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 137 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 137 + ], + "type": "text", + "content": "[17] Catalin Ionescu, Fuxin Li, and Cristian Sminchisescu. Latent structured models for human pose estimation. In ICCV, 2011. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 140, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 545, + 194 + ], + "type": "text", + "content": "[18] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, 2013. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 195, + 545, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 195, + 545, + 226 + ], + "spans": [ + { + "bbox": [ + 308, + 195, + 545, + 226 + ], + "type": "text", + "content": "[19] Angjoo Kanazawa, Michael J Black, David W Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In CVPR, 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 228, + 545, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 228, + 545, + 259 + ], + "spans": [ + { + "bbox": [ + 308, + 228, + 545, + 259 + ], + "type": "text", + "content": "[20] Branislav Kisacanin, Vladimir Pavlovic, and Thomas S Huang. Real-time vision for human-computer interaction. Springer Science & Business Media, 2005. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 262, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 262, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 262, + 545, + 293 + ], + "type": "text", + "content": "[21] Kyoungoh Lee, Inwooong Lee, and Sanghoon Lee. Propagating lstm: 3d pose estimation based on joint interdependency. In ECCV, 2018. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 294, + 545, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 294, + 545, + 337 + ], + "spans": [ + { + "bbox": [ + 308, + 294, + 545, + 337 + ], + "type": "text", + "content": "[22] Wenhao Li, Hong Liu, Runwei Ding, Mengyuan Liu, Pichao Wang, and Wenming Yang. Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia, 2022. 1, 2, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 338, + 545, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 338, + 545, + 370 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 545, + 370 + ], + "type": "text", + "content": "[23] Wenhao Li, Hong Liu, Hao Tang, Pichao Wang, and Luc Van Gool. Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In CVPR, 2022. 1, 2, 6, 7, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 372, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 372, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 372, + 545, + 403 + ], + "type": "text", + "content": "[24] Yehao Li, Ting Yao, Yingwei Pan, and Tao Mei. Contextual transformer networks for visual recognition. IEEE Trans. on PAMI, 2022. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 405, + 545, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 405, + 545, + 436 + ], + "spans": [ + { + "bbox": [ + 308, + 405, + 545, + 436 + ], + "type": "text", + "content": "[25] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In CVPR, 2021. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 438, + 545, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 438, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 308, + 438, + 545, + 480 + ], + "type": "text", + "content": "[26] Ruixu Liu, Ju Shen, He Wang, Chen Chen, Sen-ching Cheung, and Vijayan Asari. Attention mechanism exploits temporal contexts: Real-time 3d human pose reconstruction. In CVPR, 2020. 1, 2, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 482, + 545, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 482, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 308, + 482, + 545, + 514 + ], + "type": "text", + "content": "[27] Zhenguang Liu, Pengxiang Su, Shuang Wu, Xuanjing Shen, Haipeng Chen, Yanbin Hao, and Meng Wang. Motion prediction using trajectory cues. In ICCV, 2021. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 515, + 545, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 515, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 308, + 515, + 545, + 568 + ], + "type": "text", + "content": "[28] Zhenguang Liu, Shuang Wu, Shuyuan Jin, Qi Liu, Shouling Ji, Shijian Lu, and Li Cheng. Investigating pose representations and motion contexts modeling for 3d motion prediction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 570, + 545, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 570, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 308, + 570, + 545, + 601 + ], + "type": "text", + "content": "[29] Fuchen Long, Zhaofan Qiu, Yingwei Pan, Ting Yao, Jiebo Luo, and Tao Mei. Stand-alone inter-frame attention in video models. In CVPR, 2022. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 604, + 545, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 604, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 308, + 604, + 545, + 634 + ], + "type": "text", + "content": "[30] Fuchen Long, Zhaofan Qiu, Yingwei Pan, Ting Yao, Chong-Wah Ngo, and Tao Mei. Dynamic temporal filtering in video models. In ECCV, 2022. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 636, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 636, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 308, + 636, + 545, + 667 + ], + "type": "text", + "content": "[31] Julieta Martinez, Rayat Hossain, Javier Romero, and James J Little. A simple yet effective baseline for 3d human pose estimation. In ICCV, 2017. 1, 2" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 712 + ], + "type": "text", + "content": "[32] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3d human pose estimation in the wild using improved cnn supervision. In 3DV, 2017. 5" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4798" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "text", + "content": "[33] Alejandro Newell, Kaiyu Yang, and Jia Deng. Stacked hourglass networks for human pose estimation. In ECCV, 2016. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 107, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 107, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 49, + 107, + 287, + 140 + ], + "type": "text", + "content": "[34] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3d human pose. In CVPR, 2017. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 141, + 287, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 141, + 287, + 184 + ], + "spans": [ + { + "bbox": [ + 49, + 141, + 287, + 184 + ], + "type": "text", + "content": "[35] Dario Pavllo, Christoph Feichtenhofer, David Grangier, and Michael Auli. 3d human pose estimation in video with temporal convolutions and semi-supervised training. In CVPR, 2019. 1, 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 186, + 287, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 186, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 49, + 186, + 287, + 217 + ], + "type": "text", + "content": "[36] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In ICCV, 2017. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 219, + 287, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 219, + 287, + 251 + ], + "spans": [ + { + "bbox": [ + 49, + 219, + 287, + 251 + ], + "type": "text", + "content": "[37] Zhaofan Qiu, Ting Yao, Chong-Wah Ngo, and Tao Mei. Mlp-3d: A mlp-like 3d architecture with grouped time mixing. In CVPR, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 253, + 287, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 253, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 49, + 253, + 287, + 285 + ], + "type": "text", + "content": "[38] Zhaofan Qiu, Ting Yao, Chong-Wah Ngo, Xinmei Tian, and Tao Mei. Learning spatio-temporal representation with local and global diffusion. In CVPR, 2019. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 287, + 287, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 287, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 49, + 287, + 287, + 330 + ], + "type": "text", + "content": "[39] Wenkang Shan, Zhenhua Liu, Xinfeng Zhang, Shanshe Wang, Siwei Ma, and Wen Gao. P-stmo: Pre-trained spatial temporal many-to-one model for 3d human pose estimation. arXiv preprint arXiv:2203.07628, 2022. 5, 6, 7, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 332, + 287, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 332, + 287, + 374 + ], + "spans": [ + { + "bbox": [ + 49, + 332, + 287, + 374 + ], + "type": "text", + "content": "[40] Wenkang Shan, Haopeng Lu, Shanshe Wang, Xinfeng Zhang, and Wen Gao. Improving robustness and accuracy via relative information encoding in 3d human pose estimation. In ACM MM, 2021. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 376, + 287, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 376, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 49, + 376, + 287, + 408 + ], + "type": "text", + "content": "[41] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In CVPR, 2019. 1, 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 410, + 287, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 410, + 287, + 432 + ], + "spans": [ + { + "bbox": [ + 49, + 410, + 287, + 432 + ], + "type": "text", + "content": "[42] Xiao Sun, Bin Xiao, Fangyin Wei, Shuang Liang, and Yichen Wei. Integral human pose regression. In ECCV, 2018. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 434, + 287, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 434, + 287, + 465 + ], + "spans": [ + { + "bbox": [ + 49, + 434, + 287, + 465 + ], + "type": "text", + "content": "[43] Mikael Svenstrup, Soren Tranberg, Hans Jorgen Andersen, and Thomas Bak. Pose estimation and adaptive robot behaviour for human-robot interaction. In ICRA, 2009. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 467, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 467, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 49, + 467, + 287, + 510 + ], + "type": "text", + "content": "[44] Zhenhua Tang, Jia Li, Yanbin Hao, and Richang Hong. Mlp-jcg: Multi-layer perceptron with joint-coordinate gating for efficient 3d human pose estimation. IEEE Transactions on Multimedia, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 512, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 512, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 49, + 512, + 287, + 544 + ], + "type": "text", + "content": "[45] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NIPS, 2017. 1, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 545, + 287, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 545, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 49, + 545, + 287, + 578 + ], + "type": "text", + "content": "[46] Jingbo Wang, Sijie Yan, Yuanjun Xiong, and Dahua Lin. Motion guided 3d pose estimation from videos. In ECCV, 2020. 2, 6, 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 49, + 580, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 580, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 49, + 580, + 287, + 601 + ], + "type": "text", + "content": "[47] Tianhan Xu and Wataru Takano. Graph stacked hourglass networks for 3d human pose estimation. In CVPR, 2021. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 49, + 602, + 287, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 602, + 287, + 645 + ], + "spans": [ + { + "bbox": [ + 49, + 602, + 287, + 645 + ], + "type": "text", + "content": "[48] Youze Xue, Jiansheng Chen, Xiangming Gu, Huimin Ma, and Hongbing Ma. Boosting monocular 3d human pose estimation with part aware attention. IEEE Transactions on Image Processing, 31, 2022. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 49, + 647, + 287, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 647, + 287, + 679 + ], + "spans": [ + { + "bbox": [ + 49, + 647, + 287, + 679 + ], + "type": "text", + "content": "[49] Ting Yao, Yingwei Pan, Yehao Li, Chong-Wah Ngo, and Tao Mei. Wave-vit: Unifying wavelet and transformers for visual representation learning. In ECCV, 2022. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 49, + 681, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 681, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 681, + 287, + 712 + ], + "type": "text", + "content": "[50] Hao Zhang, Lechao Cheng, Yanbin Hao, and Chong-wah Ngo. Long-term leap attention, short-term periodic shift for video classification. In ACM MM, 2022. 2" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 217 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 94 + ], + "type": "text", + "content": "[51] Hao Zhang, Yanbin Hao, and Chong-Wah Ngo. Token shift transformer for video classification. In ACM MM, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 96, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 139 + ], + "type": "text", + "content": "[52] Jinlu Zhang, Zhigang Tu, Jianyu Yang, Yujin Chen, and Junsong Yuan. Mixste: Seq2seq mixed spatio-temporal encoder for 3d human pose estimation in video. arXiv preprint arXiv:2203.00859, 2022. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 141, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 173 + ], + "type": "text", + "content": "[53] Long Zhao, Xi Peng, Yu Tian, Mubbasir Kapadia, and Dimitris N Metaxas. Semantic graph convolutional networks for 3d human pose regression. In CVPR, 2019. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "text", + "content": "[54] Ce Zheng, Sijie Zhu, Matias Mendieta, Taojiannan Yang, Chen Chen, and Zhengming Ding. 3d human pose estimation with spatial and temporal transformers. In ICCV, 2021. 1, 2, 5, 6, 7" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4799" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Human Pose Estimation via Intuitive Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_content_list.json b/2023/3D Human Pose Estimation via Intuitive Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..30e57a5e21aa7b8517a07b9be0e4892f522727f4 --- /dev/null +++ b/2023/3D Human Pose Estimation via Intuitive Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_content_list.json @@ -0,0 +1,1706 @@ +[ + { + "type": "text", + "text": "3D Human Pose Estimation via Intuitive Physics", + "text_level": 1, + "bbox": [ + 238, + 121, + 728, + 142 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shashank Tripathi1 Lea Müller1 Chun-Hao P. Huang1 Omid Taheri1 Michael J. Black1 Dimitrios Tzionas2*", + "bbox": [ + 186, + 161, + 779, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Max Planck Institute for Intelligent Systems, Tübingen, Germany $^{2}$ University of Amsterdam, the Netherlands {stripathi, lmueller2, chuang2, otaheri, black}@tue.mpg.de d.tzionas@uva.nl", + "bbox": [ + 145, + 199, + 821, + 233 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/df44fdeea43adfe7d6345099c566049078468d7fd6b71b85fce44c44ceee9761.jpg", + "image_caption": [ + "Figure 1. Estimating a 3D body from an image is ill-posed. A recent, representative, optimization method [59] produces bodies that are in unstable poses, penetrate the floor, or hover above it. In contrast, IPMAN estimates a 3D body that is physically plausible. To achieve this, IPMAN uses novel intuitive-physics (IP) terms that exploit inferred pressure heatmaps on the body, the Center of Pressure (CoP), and the body's Center of Mass (CoM). Body heatmap colors encode per-vertex pressure." + ], + "image_footnote": [], + "bbox": [ + 76, + 255, + 893, + 407 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 497, + 313, + 512 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Estimating 3D humans from images often produces implausible bodies that lean, float, or penetrate the floor. Such methods ignore the fact that bodies are typically supported by the scene. A physics engine can be used to enforce physical plausibility, but these are not differentiable, rely on unrealistic proxy bodies, and are difficult to integrate into existing optimization and learning frameworks. In contrast, we exploit novel intuitive-physics (IP) terms that can be inferred from a 3D SMPL body interacting with the scene. Inspired by biomechanics, we infer the pressure heatmap on the body, the Center of Pressure (CoP) from the heatmap, and the SMPL body's Center of Mass (CoM). With these, we develop IPMAN, to estimate a 3D body from a color image in a \"stable\" configuration by encouraging plausible floor contact and overlapping CoP and CoM. Our IP terms are intuitive, easy to implement, fast to compute, differentiable, and can be integrated into existing optimization and regression methods. We evaluate IPMAN on standard datasets and MoYo, a new dataset with synchronized multi-view images, ground-truth 3D bodies with complex poses, body-floor contact, CoM and pressure. IPMAN produces more plausible results than the state of the art, improving accuracy for static poses, while not hurting dynamic ones. Code and data are available for research at https://ipman.is.tue.mpg.de.", + "bbox": [ + 73, + 527, + 473, + 891 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 497, + 630, + 512 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To understand humans and their actions, computers need automatic methods to reconstruct the body in 3D. Typically, the problem entails estimating the 3D human pose and shape (HPS) from one or more color images. State-of-the-art (SOTA) methods [46, 51, 75, 102] have made rapid progress, estimating 3D humans that align well with image features in the camera view. Unfortunately, the camera view can be deceiving. When viewed from other directions, or when placed in a 3D scene, the estimated bodies are often physically implausible: they lean, hover, or penetrate the ground (see Fig. 1 top). This is because most SOTA methods reason about humans in isolation; they ignore that people move in a scene, interact with it, and receive physical support by contacting it. This is a deal-breaker for inherently 3D applications, such as biomechanics, augmented/virtual reality (AR/VR) and the \"metaverse\"; these need humans to be reconstructed faithfully and physically plausibly with respect to the scene. For this, we need a method that estimates the 3D human on a ground plane from a color image in a configuration that is physically \"stable\".", + "bbox": [ + 496, + 522, + 895, + 824 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This is naturally related to reasoning about physics and support. There exist many physics simulators [10, 30, 60] for games, movies, or industrial simulations, and using these for plausible HPS estimation is increasingly popular [66, 74, 96]. However, existing simulators come with two significant", + "bbox": [ + 496, + 824, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* This work was mostly performed at MPI-IS.", + "bbox": [ + 76, + 911, + 326, + 925 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "4713", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "problems: (1) They are typically non-differentiable black boxes, making them incompatible with existing optimization and learning frameworks. Consequently, most methods [64, 95, 96] use them with reinforcement learning to evaluate whether a certain input has the desired outcome, but with no ability to reason about how changing inputs affects the outputs. (2) They rely on an unrealistic proxy body model for computational efficiency; bodies are represented as groups of rigid 3D shape primitives. Such proxy models are crude approximations of human bodies, which, in reality, are much more complex and deform non-rigidly when they move and interact. Moreover, proxies need a priori known body dimensions that are kept fixed during simulation. Also, these proxies differ significantly from the 3D body models [41, 54, 92] used by SOTA HPS methods. Thus, current physics simulators are too limited for use in HPS.", + "bbox": [ + 75, + 90, + 472, + 332 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "What we need, instead, is a solution that is fully differentiable, uses a realistic body model, and seamlessly integrates physical reasoning into HPS methods (both optimization- and regression-based). To this end, instead of using full physics simulation, we introduce novel intuitive-physics (IP) terms that are simple, differentiable, and compatible with a body model like SMPL [54]. Specifically, we define terms that exploit an inferred pressure heatmap of the body on the ground plane, the Center of Pressure (CoP) that arises from the heatmap, and the SMPL body's Center of Mass (CoM) projected on the floor; see Fig. 2 for a visualization. Intuitively, bodies whose CoM lie close to their CoP are more stable than ones with a CoP that is further away (see Fig. 5); the former suggests a static pose, e.g. standing or holding a yoga pose, while the latter a dynamic pose, e.g., walking.", + "bbox": [ + 75, + 335, + 472, + 564 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We use these intuitive-physics terms in two ways. First, we incorporate them in an objective function that extends SMPLify-XMC [59] to optimize for body poses that are stable. We also incorporate the same terms in the training loss for an HPS regressor, called IPMAN (Intuitive-Physics-based huMAN). In both formulations, the intuitive-physics terms encourage estimates of body shape and pose that have sufficient ground contact, while penalizing interpenetration and encouraging an overlap of the CoP and CoM.", + "bbox": [ + 75, + 565, + 472, + 700 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our intuitive-physics formulation is inspired by work in biomechanics [32, 33, 61], which characterizes the stability of humans in terms of relative positions between the CoP, the CoM, and the Base of Support (BoS). The BoS is defined as the convex hull of all contact regions on the floor (Fig. 2). Following past work [6, 71, 74], we use the \"inverted pendulum\" model [85, 86] for body balance; this considers poses as stable if the gravity-projected CoM onto the floor lies inside the BoS. Similar ideas are explored by Scott et al. [71] but they focus on predicting a foot pressure heatmap from 2D or 3D body joints. We go significantly further to exploit stability in training an HPS regressor. This requires two technical novelties.", + "bbox": [ + 75, + 704, + 472, + 900 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e48583f23ac2984857a497de7716a6d94a45d0c15d116bf9a57e7f8d01a5ad2f.jpg", + "image_caption": [ + "Figure 2. (1) A SMPL mesh sitting. (2) The inferred pressure map on the ground (color-coded heatmap), CoP (green), CoM (pink), and Base of Support (BoS, yellow polygon). (3) Segmentation of SMPL into $N_P = 10$ parts, used for computing CoM; see Sec. 3.2." + ], + "image_footnote": [], + "bbox": [ + 504, + 82, + 893, + 176 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The first involves computing CoM. To this end, we uniformly sample points on SMPL's surface, and calculate each body part's volume. Then, we compute CoM as the average of all uniformly sampled points weighted by the corresponding part volumes. We denote this as pCoM, standing for \"part-weighted CoM\". Importantly, pCoM takes into account SMPL's shape, pose, and all blend shapes, while it is also computationally efficient and differentiable.", + "bbox": [ + 496, + 250, + 893, + 369 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The second involves estimating CoP directly from the image, without access to a pressure sensor. Our key insight is that the soft tissues of human bodies deform under pressure, e.g., the buttocks deform when sitting. However, SMPL does not model this deformation; it penetrates the ground instead of deforming. We use the penetration depth as a proxy for pressure [68]; deeper penetration means higher pressure. With this, we estimate a pressure field on SMPL's mesh and compute the CoP as the pressure-weighted average of the surface points. Again this is differentiable.", + "bbox": [ + 496, + 371, + 895, + 523 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For evaluation, we use a standard HPS benchmark (Human3.6M [37]), but also the RICH [35] dataset. However, these datasets have limited interactions with the floor. We thus capture a novel dataset, MoYo, of challenging yoga poses, with synchronized multi-view video, ground-truth SMPL-X [63] meshes, pressure sensor measurements, and body CoM. IPMAN, in both of its forms, and across all datasets, produces more accurate and stable 3D bodies than the state of the art. Importantly, we find that IPMAN improves accuracy for static poses, while not hurting dynamic ones. This makes IPMAN applicable to everyday motions.", + "bbox": [ + 496, + 523, + 895, + 690 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To summarize: (1) We develop IPMAN, the first HPS method that integrates intuitive physics. (2) We infer biomechanical properties such as CoM, CoP and body pressure. (3) We define novel intuitive-physics terms that can be easily integrated into HPS methods. (4) We create MoYo, a dataset that uniquely has complex poses, multi-view video, and ground-truth bodies, pressure, and CoM. (5) We show that our IP terms improve HPS accuracy and physical plausibility. (6) Data and code are available for research.", + "bbox": [ + 496, + 690, + 895, + 828 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 844, + 640, + 859 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D Human Pose and Shape (HPS) from images. Existing methods fall into two major categories: (1) non-", + "bbox": [ + 500, + 869, + 895, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "4714", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "parametric methods that reconstruct a free-form body representation, e.g., joints [1, 56, 57] or vertices [52, 58, 100], and (2) parametric methods that use statistical body models [5, 25, 41, 54, 63, 92, 97]. The latter methods focus on various aspects, such as expressiveness [13, 18, 63, 69, 87], clothed bodies [15, 88, 91], videos [24, 45, 78, 99], and multiperson scenarios [38, 75, 103], to name a few.", + "bbox": [ + 75, + 90, + 470, + 196 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Inference is done by either optimization or regression. Optimization-based methods [7, 16, 63, 87, 88] fit a body model to image evidence, such as joints [11], dense vertex correspondences [2] or 2D segmentation masks [23]. Regression-based methods [42, 44, 48, 51, 76, 102, 106, 109] use a loss similar to the objective function of optimization methods to train a network to infer body model parameters. Several methods combine optimization and regression in a training loop [47, 50, 59]. Recent methods [24, 40] fine-tune pre-trained networks at test time w.r.t. an image or a sequence, retaining flexibility (optimization) while being less sensitive to initialization (regression).", + "bbox": [ + 75, + 198, + 470, + 378 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Despite their success, these methods reason about the human in \"isolation\", without taking the surrounding scene into account; see [77, 107] for a comprehensive review.", + "bbox": [ + 76, + 381, + 470, + 425 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Contact-only scene constraints. A common way of using scene information is to consider body-scene contact [12, 17, 27, 28, 65, 84, 90, 94, 98, 104, 105, 110]. Yamamoto et al. [93] and others [19, 27, 70, 98, 104] ensure that estimated bodies have plausible scene contact. For videos, encouraging foot-ground contact reduces foot skating [36, 65, 72, 105, 110]. Weng et al. [84] use contact in estimating the pose and scale of scene objects, while Villegas et al. [80] preserve self- and ground contact for motion retargeting.", + "bbox": [ + 75, + 428, + 470, + 564 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "These methods typically take two steps: (1) detecting contact areas on the body and/or scene and (2) minimizing the distance between these. Surfaces are typically assumed to be in contact if their distance is below a threshold and their relative motion is small [27, 35, 98, 104].", + "bbox": [ + 75, + 565, + 470, + 641 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Many methods only consider contact between the ground and the foot joints [66, 110] or other end-effectors [65]. In contrast, IPMAN uses the full 3D body surface and exploits this to compute the pressure, CoP and CoM. Unlike binary contact, this is differentiable, making the IP terms useful for training HPS regressors.", + "bbox": [ + 75, + 642, + 470, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Physics-based scene constraints. Early work uses physics to estimate walking [8, 9] or full body motion [82]. Recent methods [21, 22, 66, 73, 74, 89, 96] regress 3D humans and then refine them through physics-based optimization. Physics is used for two primary reasons: (1) to regularise dynamics, reducing jitter [49, 66, 74, 96], and (2) to discourage interpenetration and encourage contact. Since contact events are discontinuous, the pipeline is either not end-to-end trainable or trained with reinforcement learning [64, 96]. Xie et al. [89] propose differentiable physics-inspired objectives based on a soft contact penalty, while DiffPhy [21] uses a", + "bbox": [ + 75, + 734, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "differentiable physics simulator [31] during inference. Both methods apply the objectives in an optimization scheme, while IPMAN is applied to both optimization and regression. PhysCap [74] considers a pose as balanced, when the CoM is projected within the BoS. Rempe et al. [66] impose PD control on the pelvis, which they treat as a CoM. Scott et al. [71] regress foot pressure from 2D and 3D joints for stability analysis but do not use it to improve HPS.", + "bbox": [ + 496, + 90, + 893, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "All these methods use unrealistic bodies based on shape primitives. Some require known body dimensions [66, 74, 96] while others estimate body scale [49, 89]. In contrast, IPMAN computes CoM, CoP and BoS directly from the SMPL mesh. Clever et al. [14] and Luo et al. [55] estimate 3D body pose but from pressure measurements, not from images. Their task is fundamentally different from ours.", + "bbox": [ + 496, + 212, + 893, + 319 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 333, + 591, + 349 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Preliminaries", + "text_level": 1, + "bbox": [ + 500, + 359, + 640, + 375 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a color image, I, we estimate the parameters of the camera and the SMPL body model [54].", + "bbox": [ + 496, + 383, + 890, + 412 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Body model. SMPL maps pose, $\\theta$ , and shape, $\\beta$ , parameters to a 3D mesh, $M(\\theta, \\beta)$ . The pose parameters, $\\theta \\in \\mathbb{R}^{24 \\times 6}$ , are rotations of SMPL's 24 joints in a 6D representation [108]. The shape parameters, $\\beta \\in \\mathbb{R}^{10}$ , are the first 10 PCA coefficients of SMPL's shape space. The generated mesh $M(\\theta, \\beta)$ consists of $N_V = 6890$ vertices, $V \\in \\mathbb{R}^{N_V \\times 3}$ , and $N_F = 13776$ faces, $F \\in \\mathbb{R}^{N_F \\times 3 \\times 3}$ .", + "bbox": [ + 496, + 414, + 893, + 520 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Note that our regression method (IPMAN-R, Sec. 3.4.1) uses SMPL, while our optimization method (IPMAN-O, Sec. 3.4.2) uses SMPL-X [63], to match the models used by the baselines. For simplicity of exposition, we refer to both models as SMPL when the distinction is not important.", + "bbox": [ + 496, + 520, + 893, + 595 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Camera. For the regression-based IPMAN-R, we follow the standard convention [42, 43, 47] and use a weak perspective camera with a 2D scale, $s$ , translation, $\\mathbf{t}^c = (t_x^c,t_y^c)$ , fixed camera rotation, $\\mathbf{R}^c = \\mathbf{I}_3$ , and a fixed focal length $(f_{x},f_{y})$ . The root-relative body orientation $\\mathbf{R}^b$ is predicted by the neural network, but body translation stays fixed at $\\mathbf{t}^b = \\mathbf{0}$ as it is absorbed into the camera's translation.", + "bbox": [ + 496, + 597, + 893, + 702 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For the optimization-based IPMAN-O, we follow Müller et al. [59] to use the full-perspective camera model and optimize the focal lengths $(f_x, f_y)$ , camera rotation $\\mathbf{R}^c$ and camera translation $\\mathbf{t}^c$ . The principal point $(o_x, o_y)$ is the center of the input image. $\\mathbf{K}$ is the intrinsic matrix storing focal lengths and the principal point. We assume that the body rotation $\\mathbf{R}^b$ and translation $\\mathbf{t}^b$ are absorbed into the camera parameters, thus, they stay fixed as $\\mathbf{R}^b = \\mathbf{I}_3$ and $\\mathbf{t}^b = \\mathbf{0}$ . Using the camera, we project a 3D point $\\mathbf{X} \\in \\mathbb{R}^3$ to an image point $\\mathbf{x} \\in \\mathbb{R}^2$ through $\\mathbf{x} = \\mathbf{K}(\\mathbf{R}^c\\mathbf{X} + \\mathbf{t}^c)$ .", + "bbox": [ + 496, + 703, + 893, + 854 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Ground plane and gravity-projection. We assume that the gravity direction is perpendicular to the ground plane in the world coordinate system. Thus, for any arbitrary point in", + "bbox": [ + 496, + 854, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "4715", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D space, $\\pmb{u} \\in \\mathbb{R}^3$ , its gravity-projected point, $\\pmb{u}' = g(\\pmb{u}) \\in \\mathbb{R}^3$ , is the projection of $\\pmb{u}$ along the plane normal $\\pmb{n}$ onto the ground plane, and $g(.)$ is the projection operator. The function $h(\\pmb{u})$ returns the signed \"height\" of a point $\\pmb{u}$ with respect to the ground; i.e., the signed distance from $\\pmb{u}$ to the ground plane along the gravity direction, where $h(\\pmb{u}) < 0$ if $\\pmb{u}$ is below the ground and $h(\\pmb{u}) > 0$ if $\\pmb{u}$ is above it.", + "bbox": [ + 76, + 90, + 472, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Stability Analysis", + "text_level": 1, + "bbox": [ + 76, + 214, + 250, + 229 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We follow the biomechanics literature [32, 33, 61] and Scott et al. [71] to define three fundamental elements for stability analysis: We use the Newtonian definition for the \"Center of Mass\" (CoM); i.e., the mass-weighted average of particle positions. The \"Center of Pressure\" (CoP) is the ground-reaction force's point of application. The \"Base of Support\" (BoS) is the convex hull of all body-ground contacts. Below, we define intuitive-physics (IP) terms using the inferred CoM and CoP. BoS is only used for evaluation.", + "bbox": [ + 73, + 239, + 470, + 376 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Body Center of Mass (CoM). We introduce a novel CoM formulation that is fully differentiable and considers the per-part mass contributions, dubbed as pCoM; see Sup. Mat. for alternative CoM definitions. To compute this, we first segment the template mesh into $N_P = 10$ parts $P_i \\in \\mathcal{P}$ ; see Fig. 2. We do this once offline, and keep the segmentation fixed during training and optimization. Assuming a shaped and posed SMPL body, the per-part volumes $\\mathcal{V}^{P_i}$ are calculated by splitting the SMPL mesh into parts.", + "bbox": [ + 75, + 378, + 470, + 516 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "However, mesh splitting is a non-differentiable operation. Thus, it cannot be used for either training a regressor (IPMAN-R) or for optimization (IPMAN-O). Instead, we work with the full SMPL mesh and use differentiable \"close-translate-fill\" operations for each body part on the fly. First, for each part $P$ , we extract boundary vertices $\\mathcal{B}_P$ and add in the middle a virtual vertex $\\boldsymbol{v}_g$ , where $\\boldsymbol{v}_g = \\sum_{j \\in \\mathcal{B}_P} \\boldsymbol{v}_j / |\\mathcal{B}_P|$ . Then, for the $\\mathcal{B}_P$ and $\\boldsymbol{v}_g$ vertices, we add virtual faces to \"close\" $P$ and make it watertight. Next, we \"translate\" $P$ such that the part centroid $\\mathbf{c}_P = \\sum_{j \\in P} \\boldsymbol{v}_j / |P|$ is at the origin. Finally, we \"fill\" the centered $P$ with tetrahedrons by connecting the origin with each face vertex. Then, the part volume, $\\mathcal{V}^{\\mathcal{P}}$ , is the sum of all tetrahedron volumes [101].", + "bbox": [ + 73, + 517, + 470, + 714 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To create a uniform distribution of surface vertices, we uniformly sample $N_U = 20000$ surface points $V_U \\in \\mathbb{R}^{N_U \\times 3}$ on the template SMPL mesh using the Triangle Point Picking method [83]. Given $V_U$ and the template SMPL mesh vertices $V_T$ , we follow [59], and analytically compute a sparse linear regressor $\\mathbf{W} \\in \\mathbb{R}^{N_U \\times N_V}$ such that $V_U = \\mathbf{W}V_T$ . During training and optimization, given an arbitrary shaped and posed mesh with vertices $V$ , we obtain uniformly-sampled mesh surface points as $V_U = \\mathbf{W}V$ . Each surface point, $v_i$ , is assigned to the body part, $P_{v_i}$ , corresponding to the face, $F_{v_i}$ , it was sampled from.", + "bbox": [ + 75, + 715, + 470, + 883 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Finally, the part-weighted pCoM is computed as a", + "bbox": [ + 96, + 885, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "volume-weighted mean of the mesh surface points:", + "bbox": [ + 500, + 90, + 836, + 107 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\mathbf {m}} = \\frac {\\sum_ {i = 1} ^ {N _ {U}} \\mathcal {V} ^ {P _ {v _ {i}}} v _ {i}}{\\sum_ {i = 1} ^ {N _ {U}} \\mathcal {V} ^ {P _ {v _ {i}}}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 624, + 114, + 890, + 156 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{V}^{P_{v_i}}$ is the volume of the part $P_{v_i}\\in \\mathcal{P}$ to which $v_{i}$ is assigned. This formulation is fully differentiable and can be employed with any existing 3D HPS estimation method.", + "bbox": [ + 496, + 164, + 890, + 210 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that computing CoM (or volume) from uniformly sampled surface points does not work (see Sup. Mat.) because it assumes that mass, $M$ , is proportional to surface area, $S$ . Instead, our pCoM computes mass from volume, $\\mathcal{V}$ , via the standard density equation, $M = \\rho \\mathcal{V}$ , while our close-translate-fill operation computes the volume of deformable bodies in an efficient and differentiable manner.", + "bbox": [ + 496, + 210, + 893, + 316 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Center of Pressure (CoP). Recovering a pressure heatmap from an image without using hardware, such as pressure sensors, is a highly ill-posed problem. However, stability analysis requires knowledge of the pressure exerted on the human body by the supporting surfaces, like the ground. Going beyond binary contact, Rogez et al. [68] estimate 3D forces by detecting intersecting vertices between hand and object meshes. Clever et al. [14] recover pressure maps by allowing articulated body models to deform a soft pressure-sensing virtual mattress in a physics simulation.", + "bbox": [ + 496, + 316, + 893, + 468 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In contrast, we observe that, while real bodies interacting with rigid objects (e.g., the floor) deform under contact, SMPL does not model such soft-tissue deformations. Thus, the body mesh penetrates the contacting object surface and the amount of penetration can be a proxy for pressure; a deeper penetration implies higher pressure. With the height $h(v_{i})$ (see Sec. 3.1) of a mesh surface point $v_{i}$ with respect to the ground plane $\\Pi$ , we define a pressure field to compute the per-point pressure $\\rho_{i}$ as:", + "bbox": [ + 496, + 468, + 893, + 604 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\rho_ {i} = \\left\\{ \\begin{array}{l l} 1 - \\alpha h (v _ {i}) & \\text {i f} h (v _ {i}) < 0, \\\\ e ^ {- \\gamma h (v _ {i})} & \\text {i f} h (v _ {i}) \\geq 0, \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 613, + 890, + 654 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\alpha$ and $\\gamma$ are scalar hyperparameters set empirically. We approximate soft tissue via a \"spring\" model and \"penetrating\" pressure field using Hooke's Law. Some pressure is also assigned to points above the ground to allow tolerance for footwear, but this decays quickly. Finally, we compute the CoP, $\\overline{\\mathbf{s}}$ , as", + "bbox": [ + 496, + 662, + 893, + 753 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\overline {{\\mathbf {s}}} = \\frac {\\sum_ {i = 1} ^ {N _ {U}} \\rho_ {i} v _ {i}}{\\sum_ {i = 1} ^ {N _ {U}} \\rho_ {i}}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 625, + 762, + 890, + 801 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Again, note that this term is fully differentiable.", + "bbox": [ + 500, + 810, + 815, + 824 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Base of Support (BoS). In biomechanics [34, 85], BoS is defined as the \"supporting area\" or the possible range of the CoP on the supporting surface. Here, we define BoS as the convex hull [67] of all gravity-projected body-ground contact points. In detail, we first determine all such contacts", + "bbox": [ + 496, + 825, + 893, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4716", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "by selecting the set of mesh surface points $v_{i}$ close to the ground, and then gravity-project them onto the ground to obtain $C = \\{g(v_{i}) \\mid |h(v_{i})| < \\tau\\}$ . The BoS is then defined as the convex hull $\\mathcal{C}$ of $C$ .", + "bbox": [ + 75, + 90, + 468, + 151 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Intuitive-Physics Losses", + "text_level": 1, + "bbox": [ + 76, + 159, + 295, + 174 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Stability loss. The \"inverted pendulum\" model of human balance [85, 86] considers the relationship between the CoM and BoS to determine stability. Simply put, for a given shape and pose, if the body CoM, projected on the gravity-aligned ground plane, lies within the BoS, the pose is considered stable. While this definition of stability is useful for evaluation, using it in a loss or energy function for 3D HPS estimation results in sparse gradients (see Sup. Mat.). Instead, we define the stability criterion as:", + "bbox": [ + 75, + 181, + 470, + 316 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {s t a b i l i t y}} = \\| g (\\bar {\\mathbf {m}}) - g (\\bar {\\mathbf {s}}) \\| _ {2}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 325, + 468, + 340 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $g(\\bar{\\mathbf{m}})$ and $g(\\bar{\\mathbf{s}})$ are the gravity-projected CoM and CoP, respectively.", + "bbox": [ + 75, + 347, + 468, + 378 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Ground contact loss. As shown in Fig. 1, 3D HPS methods minimize the 2D joint reprojection error and do not consider the plausibility of body-ground contact. Ignoring this can result in interpenetrating or hovering meshes. Inspired by self-contact losses [19,59] and hand-object contact losses [26,29], we define two ground losses, namely pushing, $\\mathcal{L}_{\\mathrm{push}}$ , and pulling, $\\mathcal{L}_{\\mathrm{pull}}$ , that take into account the height, $h(v_{i})$ , of a vertex, $v_{i}$ , with respect to the ground plane. For $h(v_{i}) < 0$ , i.e., for vertices under the ground plane, $\\mathcal{L}_{\\mathrm{push}}$ discourages body-ground penetrations. For $h(v_{i}) \\geq 0$ , i.e., for hovering meshes, $\\mathcal{L}_{\\mathrm{pull}}$ encourages the vertices that lie close to the ground to \"snap\" into contact with it. Note that the losses are non-conflicting as they act on disjoint sets of vertices. Then, the ground contact loss is:", + "bbox": [ + 75, + 378, + 470, + 589 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {g r o u n d}} = \\mathcal {L} _ {\\text {p u l l}} + \\mathcal {L} _ {\\text {p u s h}}, \\text {w i t h} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 595, + 468, + 613 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {p u l l}} = \\alpha_ {1} \\tanh \\left(\\frac {h \\left(v _ {i}\\right)}{\\alpha_ {2}}\\right) ^ {2} \\quad \\text {i f} h \\left(v _ {i}\\right) \\geq 0, \\text {a n d} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 116, + 614, + 468, + 646 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {p u s h}} = \\beta_ {1} \\tanh \\left(\\frac {h \\left(v _ {i}\\right)}{\\beta_ {2}}\\right) ^ {2} \\quad \\text {i f} h \\left(v _ {i}\\right) < 0. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 112, + 648, + 468, + 680 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. IPMAN", + "text_level": 1, + "bbox": [ + 76, + 685, + 174, + 699 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use our new IP losses for two tasks: (1) We extend HMR [42] to develop IPMAN-R, a regression-based HPS method. (2) We extend SMPLify-XMC [59] to develop IPMAN-O, an optimization-based method. Note that IPMAN-O uses a reference ground plane, while IPMAN-R uses the ground plane only for training but not at test time. It leverages the known ground in 3D datasets, and thus, does not require additional data beyond past HPS methods.", + "bbox": [ + 75, + 708, + 468, + 829 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4.1 IPMAN-R", + "text_level": 1, + "bbox": [ + 76, + 845, + 204, + 859 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Most HPS methods are trained with a mix of direct supervision using 3D datasets [37,56,81] and 2D reprojection losses", + "bbox": [ + 75, + 869, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a2ec3ec21e7a632c477fa14a5e119000616e9b378ad23a5088e74f081f69c9ca.jpg", + "image_caption": [ + "Figure 3. IPMAN-R architecture. First, the HMR regressor estimates camera translation and SMPL parameters for an input image. These parameters are used to generate the SMPL mesh in the camera frame, $M_{c}$ . To transform the mesh from camera into world coordinates $(M_{c} \\rightarrow M_{w})$ , IPMAN-R uses the ground-truth camera rotation, $R_{w}^{c}$ , and translation, $t_{w}^{c}$ . The IP losses, $\\mathcal{L}_{\\mathrm{ground}}$ and $\\mathcal{L}_{\\mathrm{stability}}$ , are applied on the mesh in the world coordinate system." + ], + "image_footnote": [], + "bbox": [ + 501, + 80, + 890, + 218 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "using image datasets [4, 39, 53]. The 3D losses, however, are calculated in the camera frame, ignoring scene information and physics. IPMAN-R extends HMR [42] with our intuitive-physics terms; see Fig. 3 for the architecture. For training, we use the known camera coordinates and the world ground plane in 3D datasets.", + "bbox": [ + 496, + 349, + 893, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As described in Sec. 3.1 (paragraph \"Camera\"), HMR infers the camera translation, $\\mathbf{t}^c$ , and SMPL parameters, $\\theta$ and $\\beta$ , in the camera coordinates assuming $\\mathbf{R}^c = \\mathbf{I}_3$ and $\\mathbf{t}^b = \\mathbf{0}$ . Ground truth 3D joints and SMPL parameters are used to supervise the inferred mesh $M_c$ in the camera frame. However, 3D datasets also provide the ground, albeit in the world frame. To leverage the known ground, we transform the predicted body orientation, $\\mathbf{R}^b$ , to world coordinates using the ground-truth camera rotation, $\\mathbf{R}_w^c$ , as $\\mathbf{R}_w^b = \\mathbf{R}_w^{c\\top}\\mathbf{R}^b$ . Then, we compute the body translation in world coordinates as $\\mathbf{t}_w^b = -\\mathbf{t}^c + \\mathbf{t}_w^c$ . With the predicted mesh and ground plane in world coordinates, we add the IP terms, $\\mathcal{L}_{\\mathrm{stability}}$ and $\\mathcal{L}_{\\mathrm{ground}}$ , for HPS training as follows:", + "bbox": [ + 496, + 443, + 893, + 641 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {I P M A N - R}} \\left(\\boldsymbol {\\theta}, \\boldsymbol {\\beta}, \\mathbf {t} ^ {c}\\right) = \\lambda_ {2 D} \\mathcal {L} _ {2 D} + \\lambda_ {3 D} \\mathcal {L} _ {3 D} + \\lambda_ {\\mathrm {S M P L}} \\mathcal {L} _ {\\mathrm {S M P L}} +\n$$\n", + "text_format": "latex", + "bbox": [ + 500, + 656, + 890, + 672 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda_ {\\mathrm {s}} \\mathcal {L} _ {\\text {s t a b i l i t y}} + \\lambda_ {\\mathrm {g}} \\mathcal {L} _ {\\text {g r o u n d}}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 675, + 890, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda_{\\mathrm{s}}$ and $\\lambda_{\\mathrm{g}}$ are the weights for the respective IP terms. For training (data augmentation, hyperparameters, etc), we follow Kolotouros et al. [47]; for more details see Sup. Mat.", + "bbox": [ + 500, + 707, + 893, + 752 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4.2 IPMAN-O", + "text_level": 1, + "bbox": [ + 500, + 782, + 627, + 796 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To fit SMPL-X to 2D image keypoints, SMPLify-XMC [59] initializes the fitting process by exploiting the self-contact and global-orientation of a known/presented 3D mesh. We posit that the presented pose contains further information, such as stability, pressure and contact with the ground-plane. IPMAN-O uses this insight to apply stability and ground", + "bbox": [ + 496, + 809, + 893, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "4717", + "bbox": [ + 480, + 955, + 514, + 967 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "contact losses. The IPMAN-O objective is:", + "bbox": [ + 76, + 90, + 362, + 106 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} E _ {\\text {I P M A N - O}} (\\boldsymbol {\\beta}, \\boldsymbol {\\theta}, \\boldsymbol {\\Phi}) = E _ {J 2 D} + \\lambda_ {\\beta} E _ {\\beta} + \\lambda_ {\\theta_ {h}} E _ {\\theta_ {h}} + \\\\ \\lambda_ {\\tilde {\\theta} _ {b}} E _ {\\tilde {\\theta} _ {b}} + \\lambda_ {\\tilde {C}} E _ {\\tilde {C}} + \\\\ \\lambda_ {s} E _ {\\text {s t a b i l i t y}} + \\lambda_ {g} E _ {\\text {g r o u n d}}. \\tag {9} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 109, + 114, + 468, + 172 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$\\Phi$ denotes the camera parameters: rotation $\\mathbf{R}^c$ , translation $\\mathbf{t}^c$ , and focal length, $(f_x, f_y)$ . $E_{J2D}$ is a 2D joint loss, $E_\\beta$ and $E_{\\theta_h}$ are $L_2$ body shape and hand pose priors. $E_{\\tilde{\\theta}_b}$ and $E_{\\tilde{C}}$ are pose and contact terms w.r.t. the presented 3D pose and contact (see [59] for details). $E_S$ and $E_G$ are the stability and ground contact losses from Sec. 3.3. Since the estimated mesh is in the same coordinate system as the presented mesh and the ground-plane, we directly apply IP losses without any transformations. For details see Sup. Mat.", + "bbox": [ + 76, + 179, + 470, + 316 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 327, + 209, + 344 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Training and Evaluation Datasets", + "text_level": 1, + "bbox": [ + 76, + 352, + 372, + 368 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Human3.6M [37]. A dataset of 3D human keypoints and RGB images. The poses are limited in terms of challenging physics, focusing on common activities like walking, discussing, smoking, or taking photos.", + "bbox": [ + 76, + 375, + 470, + 435 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "RICH [35]. A dataset of videos with accurate marker-less motion-captured 3D bodies and 3D scans of scenes. The images are more natural than Human3.6M and Fit3D [20]. We consider sequences with meaningful body-ground interaction. For the list of sequences, see Sup. Mat.", + "bbox": [ + 76, + 436, + 470, + 511 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Other datasets. Similar to [47], for training we use 3D keypoints from MPI-INF-3DHP [56] and 2D keypoints from image datasets such as COCO [53], MPII [4] and LSP [39].", + "bbox": [ + 76, + 511, + 470, + 556 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.1 MoCap Yoga (MoYo) Dataset", + "text_level": 1, + "bbox": [ + 76, + 574, + 334, + 589 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We capture a trained Yoga professional in 200 highly complex poses (see Fig. 4) using a synchronized MoCap system, pressure mat, and a multi-view RGB video system with 8 static, calibrated cameras; for details see Sup. Mat. The dataset contains $\\sim$ 1.75M RGB frames in 4K resolution with ground-truth SMPL-X [63], pressure and CoM. Compared to the Fit3D [20] and PosePrior [1] datasets, MoYo is more challenging; it has extreme poses, strong self-occlusion, and significant body-ground and self-contact.", + "bbox": [ + 75, + 598, + 468, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Evaluation Metrics", + "text_level": 1, + "bbox": [ + 76, + 742, + 261, + 757 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We use standard 3D HPS metrics: The Mean Per-Joint Position Error (MPJPE), its Procrustes Aligned version (PA-MPJPE), and the Per-Vertex Error (PVE) [62].", + "bbox": [ + 76, + 765, + 468, + 810 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "BoS Error (BoSE). To evaluate stability, we propose a new metric called BoS Error (BoSE). Following the definition of stability (Sec. 3.3) we define:", + "bbox": [ + 76, + 811, + 470, + 856 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {B o S E} = \\left\\{ \\begin{array}{l l} 1 & g (\\bar {\\mathbf {m}}) \\in \\mathcal {C} (C) \\\\ 0 & g (\\bar {\\mathbf {m}}) \\notin \\mathcal {C} (C) \\end{array} \\right. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 864, + 468, + 904 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathcal{C}(C)$ is the convex hull of the gravity-projected contact vertices for $\\tau = 10\\mathrm{cm}$ . For efficiency reasons, we formulate this computation as the solution of a convex system via interior point linear programming [3]; see Sup. Mat.", + "bbox": [ + 496, + 90, + 893, + 152 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. IPMAN Evaluation", + "text_level": 1, + "bbox": [ + 498, + 162, + 687, + 176 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "IPMAN-R. We evaluate our regressor, IPMAN-R, on RICH and H3.6M and summarize our results in Tab. 1. We refer to our regression baseline as $\\mathrm{HMR}^*$ which is HMR trained on the same datasets as IPMAN-R. Since we train with paired 3D datasets, we do not use HMR's discriminator during training. Both IP terms individually improve upon the baseline method. Their joint use, however, shows the largest improvement. For example, on RICH the MPJPE improves by $3.5\\mathrm{mm}$ and the PVE by $2.5\\mathrm{mm}$ . It is particularly interesting that IPMAN-R improves upon the baseline on H3.6M, a dataset with largely dynamic poses and little body-ground contact. We also significantly outperform ( $\\sim 12\\%$ ) the MPJPE of optimization approaches that use the ground plane, Zou et al. [110] (69.9 mm) and Zanfir et al. [98] (69.0 mm), on H3.6M. Some video-based methods [49, 96] achieve better MPJPE (56.7 and 52.5 resp.) on H3.6M. However, they initialize with a stronger kinematic predictor [45, 50] and require video frames as input. Further, they use heuristics to estimate body weight and non-physical residual forces to correct for contact estimation errors. In contrast, IPMAN is a single-frame method, models complex full-body pressure and does not rely on approximate body weight to compute CoM. Qualitatively, Fig. 5 (top) shows that IPMAN-R's reconstructions are more stable and contain physically-plausible body-ground contact. While HMR is not SOTA, it is simple, isolating the benefits of our new IP formulation. These terms can also be added to methods with more modern backbones and architectures.", + "bbox": [ + 496, + 185, + 893, + 607 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "IPMAN-O. Our optimization method, IPMAN-O, also improves upon the baseline optimization method, SMPLify-XMC, on all evaluation metrics (see Tab. 2). We note that adding $L_{\\mathrm{stability}}$ independently improves the PVE, but not joint metrics (PA-MPJPE, MPJPE) and BoSE. This can be explained by the dependence of our IP terms on the relative position of the mesh surface to the ground-plane. Since joint metrics do not capture surfaces, they may get worse. Similar trends on joint metrics have been reported in the context of hand-object contact [29, 79] and body-scene contact [27]. We show qualitative results in Fig. 5 (bottom). While both SMPLify-XMC [59] and IPMAN-O achieve similar image projections, another view reveals that our results are more stable and physically plausible w.r.t. the ground.", + "bbox": [ + 496, + 609, + 893, + 821 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Pressure, CoP and CoM Evaluation", + "text_level": 1, + "bbox": [ + 500, + 830, + 808, + 847 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate our estimated pressure, CoP and CoM against the MoYo ground truth. For pressure evaluation, we measure Intersection-over-Union (IoU) between our esti", + "bbox": [ + 496, + 854, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "4718", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6c402716af08545cafa03b79efa58ac2d10b90eb8ef6f3cef9e1f58a8a2267c9.jpg", + "image_caption": [ + "Figure 4. Representative examples illustrating the variation and complexity of 3D pose and body-ground contact in our new MoYo dataset." + ], + "image_footnote": [], + "bbox": [ + 80, + 90, + 890, + 236 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/58d1086d4c8155d3fa91b263de42edfa3f0380e5e87f0fe162e963398bd979e4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 81, + 273, + 890, + 578 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2fb8fad4c94ff37dcf3b4325caeadb041e9b131f9aaa53f972e421f697b8d6c0.jpg", + "image_caption": [ + "Figure 5. Qualitative evaluation of IPMAN-R and IPMAN-O on the RICH and MoYo datasets. The first column shows the input images of a subject doing various sports poses. The second and third block of columns show the baseline's and our results, respectively. In each block, the first image shows the estimated mesh overlayed on the image (camera view), the second image shows the estimated mesh in the world frame (side view), and the last image shows the estimated pressure map with the CoM (in pink) and the CoP (in green)." + ], + "image_footnote": [], + "bbox": [ + 81, + 579, + 890, + 813 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "4719", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/32f3fd9c60bb57399448f54ab7a112c103fef459dc3507084c860bd49363468b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodRICHHuman3.6M
MPJPE ↓PAMPJPE ↓PVE ↓BoSE (%) ↑MPJPE ↓PAMPJPE ↓
PhysCap [74]----113.068.9
DiffPhy [21]----81.755.6
Zou et al. [110]----69.9-
Xie et al. [89]----68.1-
VIBE [45]----61.343.1
Simpoe [96]----56.741.6
D&D [49]----52.535.5
HMR [42]----88.056.8
Zanfir et al. [98]----69.0-
SPIN [47]112.271.5129.554.762.341.9
PARE [46]107.073.1125.074.4--
CLIFF [51]107.067.2122.367.681.452.1
Finetuning on Human3.6M
HMR* [42]----62.141.6
IPMAN-R (Ours)----60.7 (-1.4)41.1 (-0.5)
Finetuning on all datasets
HMR* [42]82.548.392.462.061.641.9
HMR* [42]+Lground80.947.889.966.561.941.8
HMR* [42]+Lstability81.047.5 (-0.8)90.869.661.241.9
IPMAN-R (Ours)79.0 (-3.5)47.689.9 (-2.5)71.2 (+9.2)60.6 (-1.0)41.8 (-0.1)
", + "bbox": [ + 76, + 88, + 470, + 316 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d0ab51e0e15b51c62cea3b840ffb57c2533d3623eb0052d2a7c56f8191cec6da.jpg", + "image_caption": [ + "Figure 6. Qualitative comparison of estimated vs the ground-truth pressure. The ground-truth CoP is shown in green and the estimated CoP is shown in yellow. Pressure heatmap colors as per Fig. 2." + ], + "image_footnote": [], + "bbox": [ + 81, + 425, + 472, + 556 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "mated and ground-truth pressure heatmaps. We also compute the CoP error as the Euclidean distance between estimated and ground-truth CoP. We obtain an IoU of 0.32 and a CoP error of $57.3\\mathrm{mm}$ . Figure 6 shows a qualitative visualization of the estimated pressure compared to the ground truth. For CoM evaluation, we find a $53.3\\mathrm{mm}$ difference between our pCoM and the CoM computed by the commercial software, Vicon Plug-in Gait. Unlike Vicon's estimate, our pCoM does not require anthropometric measurements and takes into account the full 3D body shape. For details about the evaluation protocol and comparisons with alternative CoM formulations, see Sup. Mat.", + "bbox": [ + 75, + 638, + 468, + 819 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Physics Simulation. To evaluate stability, we run a post-hoc physics simulation in \"Bullet\" [10] and measure the displacement of the estimated meshes; a small displacement denotes a stable pose. IPMAN-O produces $14.8\\%$ more stable bodies than the baseline [59]; for details see Sup. Mat.", + "bbox": [ + 75, + 824, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/8b27ff559438762f7cb859dfee21f9a5e1ddb0f0101064d751104a35e085b493.jpg", + "table_caption": [ + "Table 1. Top to Bottom: Comparisons with video-based and single-frame regression methods. IPMAN-R outperforms the single-frame baselines across all benchmarks. * indicates training hyperparameters and datasets are identical to IPMAN-R. All units are in mm except BoSE. Bold denotes best results (per category), and parentheses show improvement over the baseline. Q Zoom in" + ], + "table_footnote": [], + "table_body": "
MethodMoYo
MPJPE ↓PAMPJPE ↓PVE ↓BoSE (%) ↑
SMPLify-XMC [59]75.336.516.898.0
SMPLify-XMC [59] + Lground73.336.214.598.2
SMPLify-XMC [59] + Lstability88.538.615.397.8
IPMAN-O (Ours)71.9 (-3.4)34.3 (-2.2)11.4 (-5.4)98.6 (+0.5)
", + "bbox": [ + 500, + 88, + 893, + 167 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2. Evaluation of IPMAN-O and SMPLify-XMC [59] (optimization-based) on MoYo. Bold shows the best performance, and parentheses show the improvement over SMPLify-XMC.", + "bbox": [ + 498, + 172, + 893, + 215 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 237, + 617, + 253 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Existing 3D HPS estimation methods recover SMPL meshes that align well with the input image, but are often physically implausible. To address this, we propose IPMAN, which incorporates intuitive-physics in 3D HPS estimation. Our IP terms encourage stable poses, promote realistic floor support, and reduce body-floor penetration. The IP terms exploit the interaction between the body CoM, CoP, and BoS - key elements used in stability analysis. To calculate the CoM of SMPL meshes, IPMAN uses on a novel formulation that takes part-specific mass contributions into account. Additionally, IPMAN estimates proxy pressure maps directly from images, which is useful in computing CoP. IPMAN is simple, differentiable, and compatible with both regression and optimization methods. IPMAN goes beyond previous physics-based methods to reason about arbitrary full-body contact with the ground. We show that IPMAN improves both regression and optimization baselines across all metrics on existing datasets and MoYo. MoYo uniquely comprises synchronized multi-view video, SMPL-X bodies in complex poses, and measurements for pressure maps and body CoM. Qualitative results show the effectiveness of IPMAN in recovering physically plausible meshes.", + "bbox": [ + 496, + 263, + 893, + 595 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "While IPMAN addresses body-floor contact, future work should incorporate general body-scene contact and diverse supporting surfaces by integrating 3D scene reconstruction. In this work, the proposed IP terms are designed to help static poses and we show that they do not hurt dynamic poses. However, the large body of biomechanical literature analyzing dynamic poses could be leveraged for activities like walking, jogging, running, etc. It would be interesting to extend IPMAN beyond single-person scenarios by exploiting the various physical constraints offered by multiple subjects.", + "bbox": [ + 496, + 595, + 893, + 750 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. We thank T. Alexiadis, T. McConnell, C. Gallatz, M. Höschle, S. Polikovsky, C. Mendoza, Y. Fincan, L. Sanchez and M. Safroshkin for data collection, G. Becherini for MoSh++, Z. Fang, V. Choutas and all of Perceiving Systems for fruitful discussions. This work was funded by the International Max Planck Research School for Intelligent Systems (IMPRS-IS) and in part by the German Federal Ministry of Education and Research (BMBF), Tübingen AI Center, FKZ: 01IS18039B.", + "bbox": [ + 496, + 763, + 893, + 883 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Disclosure. https://files.is.tue.mpg.de/black/CoI_CVPR_2023.txt", + "bbox": [ + 500, + 886, + 883, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "4720", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Ijaz Akhter and Michael J. Black. Pose-conditioned joint angle limits for 3D human pose reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 1446-1455, 2015. 3, 6", + "[2] Riza Alp Güler, Natalia Neverova, and Iasonas Kokkinos. DensePose: Dense human pose estimation in the wild. In Computer Vision and Pattern Recognition (CVPR), pages 7297-7306, 2018. 3", + "[3] Erling D. Andersen and Knud D. Andersen. The Mosek interior point optimizer for linear programming: An implementation of the homogeneous algorithm. In High Performance Optimization, 2000. 6", + "[4] Mykhaylo Andriluka, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2D human pose estimation: New benchmark and state of the art analysis. In Computer Vision and Pattern Recognition (CVPR), pages 3686-3693, 2014. 5, 6", + "[5] Dragomir Anguelov, Praveen Srinivasan, Daphne Koller, Sebastian Thrun, Jim Rodgers, and James Davis. SCAPE: Shape completion and animation of people. Transactions on Graphics (TOG), 24:408-416, 2005. 3", + "[6] Michael Barnett-Cowan, Roland W. Fleming, Manish Singh, and Heinrich H. Bulthoff. Perceived object stability depends on multisensory estimates of gravity. PLOS ONE, 6(4):1-5, 2011. 2", + "[7] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J. Black. Keep it SMPL: Automatic estimation of 3D human pose and shape from a single image. In European Conference on Computer Vision (ECCV), volume 9909, pages 561-578, 2016. 3", + "[8] Marcus A. Brubaker, David J. Fleet, and Aaron Hertzmann. Physics-based person tracking using the anthropomorphic walker. International Journal of Computer Vision (IJCV), 87(1-2):140-155, 2010. 3", + "[9] Marcus A. Brubaker, Leonid Sigal, and David J. Fleet. Estimating contact dynamics. In Computer Vision and Pattern Recognition (CVPR), pages 2389-2396, 2009. 3", + "[10] Bullet real-time physics simulation. https://pybullet.org.1,8", + "[11] Zhe Cao, Gines Hidalgo, Tomas Simon, Shih-En Wei, and Yaser Sheikh. OpenPose: Realtime multi-person 2D pose estimation using part affinity fields. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 43(1):172–186, 2021. 3", + "[12] Yixin Chen, Sai Kumar Dwivedi, Michael J. Black, and Dimitrios Tzionas. Detecting human-object contact in images. June 2023. 3", + "[13] Vasileios Choutas, Georgios Pavlakos, Timo Bolkart, Dimitrios Tzionas, and Michael J. Black. Monocular expressive body regression through body-driven attention. In European Conference on Computer Vision (ECCV), volume 12355, pages 20-40, 2020. 3", + "[14] Henry M. Clever, Zackory M. Erickson, Ariel Kapusta, Greg Turk, C. Karen Liu, and Charles C. Kemp. Bodies at rest: 3D human pose and shape estimation from a pressure image using synthetic data. In Computer Vision and Pattern Recognition (CVPR), pages 6214-6223, 2020. 3, 4" + ], + "bbox": [ + 86, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Enric Corona, Albert Pumarola, Guillem Alenyà, Gerard Pons-Moll, and Francesc Moreno-Noguer. SMPLicit: Topology-aware generative model for clothed people. In Computer Vision and Pattern Recognition (CVPR), pages 11875-11885, 2021. 3", + "[16] Taosha Fan, Kalyan Vasudev Alwala, Donglai Xiang, Weipeng Xu, Todd Murphey, and Mustafa Mukadam. Revitalizing optimization for 3D human pose and shape estimation: A sparse constrained formulation. In International Conference on Computer Vision (ICCV), pages 11437-11446, 2021. 3", + "[17] Zicong Fan, Omid Taheri, Dimitrios Tzionas, Muhammed Kocabas, Manuel Kaufmann, Michael J. Black, and Otmar Hilliges. ARCTIC: A dataset for dexterous bimanual hand-object manipulation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3", + "[18] Yao Feng, Vasileios Choutas, Timo Bolkart, Dimitrios Tzionas, and Michael J. Black. Collaborative regression of expressive bodies using moderation. In International Conference on 3D Vision (3DV), pages 792-804, 2021. 3", + "[19] Mihai Fieraru, Mihai Zanfir, Teodor Alexandru Szente, Eduard Gabriel Bazavan, Vlad Olaru, and Cristian Sminchisescu. REMIPS: Physically consistent 3D reconstruction of multiple interacting people under weak supervision. In Conference on Neural Information Processing Systems (NeurIPS), volume 34, 2021. 3, 5", + "[20] Mihai Fieraru, Mihai Zanfir, Silviu-Cristian Pirlea, Vlad Olaru, and Cristian Sminchisescu. AIfit: Automatic 3D human-interpretable feedback models for fitness training. In Computer Vision and Pattern Recognition (CVPR), pages 9919–9928, 2021. 6", + "[21] Erik Gartner, Mykhaylo Andriluka, Erwin Coumans, and Cristian Sminchisescu. Differentiable dynamics for articulated 3D human motion reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 13180-13190, 2022. 3, 8", + "[22] Erik Gartner, Mykhaylo Andriluka, Hongyi Xu, and Cristian Sminchisescu. Trajectory optimization for physics-based reconstruction of 3D human pose from monocular video. In Computer Vision and Pattern Recognition (CVPR), pages 13096-13105, 2022. 3", + "[23] Ke Gong, Yiming Gao, Xiaodan Liang, Xiaohui Shen, Meng Wang, and Liang Lin. Graphonomy: Universal human parsing via graph transfer learning. In Computer Vision and Pattern Recognition (CVPR), pages 7450-7459, 2019. 3", + "[24] Shanyan Guan, Jingwei Xu, Yunbo Wang, Bingbing Ni, and Xiaokang Yang. Bilevel online adaptation for out-of-domain human mesh reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 10472-10481, 2021. 3", + "[25] Riza Alp Güler and Iasonas Kokkinos. HoloPose: Holistic 3D human reconstruction in-the-wild. In Computer Vision and Pattern Recognition (CVPR), pages 10876-10886, 2019. 3", + "[26] Shreyas Hampali, Mahdi Rad, Markus Oberweger, and Vincent Lepetit. HOnnotate: A method for 3D annotation of hand and object poses. In Computer Vision and Pattern Recognition (CVPR), pages 3193-3203, 2020. 5" + ], + "bbox": [ + 509, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "4721", + "bbox": [ + 482, + 955, + 513, + 967 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Mohamed Hassan, Vasileios Choutas, Dimitrios Tzionas, and Michael J. Black. Resolving 3D human pose ambiguities with 3D scene constraints. In International Conference on Computer Vision (ICCV), pages 2282-2292, 2019. 3, 6", + "[28] Mohamed Hassan, Partha Ghosh, Joachim Tesch, Dimitrios Tzionas, and Michael J. Black. Populating 3D scenes by learning human-scene interaction. In Computer Vision and Pattern Recognition (CVPR), pages 14708-14718, 2021. 3", + "[29] Yana Hasson, Gül Varol, Dimitrios Tzionas, Igor Kalevatykh, Michael J. Black, Ivan Laptev, and Cordelia Schmid. Learning joint reconstruction of hands and manipulated objects. In Computer Vision and Pattern Recognition (CVPR), pages 11807-11816, 2019. 5, 6", + "[30] Havok: Customizable, fully multithreaded, and highly optimized physics simulation. http://www.havok.com. 1", + "[31] Eric Heiden, David Millard, Erwin Coumans, Yizhou Sheng, and Gaurav S. Sukhatme. NeuralSim: Augmenting differentiable simulators with neural networks. In International Conference on Robotics and Automation (ICRA), pages 9474-9481, 2021. 3", + "[32] At L. Hof. The equations of motion for a standing human reveal three mechanisms for balance. Journal of Biomechanics, 40(2):451-457, 2007. 2, 4", + "[33] At L. Hof. The \"extrapolated center of mass\" concept suggests a simple control of balance in walking. Human movement science, 27(1):112-125, 2008. 2, 4", + "[34] At L. Hof, M. G. J. Gazendam, and Sinke W. E. The condition for dynamic stability. Journal of Biomechanics, 38(1):1-8, 2005. 4", + "[35] Chun-Hao Huang, Hongwei Yi, Markus Höschle, Matvey Safroshkin, Tsvetelina Alexiadis, Senya Polikovsky, Daniel Scharstein, and Michael Black. Capturing and inferring dense full-body human-scene contact. In Computer Vision and Pattern Recognition (CVPR), pages 13264-13275, 2022. 2, 3, 6", + "[36] Leslie Ikemoto, Okan Arikan, and David Forsyth. Knowing when to put your foot down. In Symposium on Interactive 3D Graphics (SI3D), page 49-53, 2006. 3", + "[37] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6M: Large scale datasets and predictive methods for 3D human sensing in natural environments. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 36(7):1325-1339, 2014. 2, 5, 6", + "[38] Wen Jiang, Nikos Kolotouros, Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Coherent reconstruction of multiple humans from a single image. In Computer Vision and Pattern Recognition (CVPR), pages 5578-5587, 2020. 3", + "[39] Sam Johnson and Mark Everingham. Clustered pose and nonlinear appearance models for human pose estimation. In British Machine Vision Conference (BMVC), pages 1-11, 2010. 5, 6", + "[40] Hanbyul Joo, Natalia Neverova, and Andrea Vedaldi. Exemplar fine-tuning for 3D human pose fitting towards in-the-wild 3D human pose estimation. In International Conference on 3D Vision (3DV), pages 42-52, 2021. 3" + ], + "bbox": [ + 86, + 92, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[41] Hanbyul Joo, Tomas Simon, and Yaser Sheikh. Total capture: A 3D deformation model for tracking faces, hands, and bodies. In Computer Vision and Pattern Recognition (CVPR), pages 8320-8329, 2018. 2, 3", + "[42] Angjoo Kanazawa, Michael J. Black, David W. Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In Computer Vision and Pattern Recognition (CVPR), pages 7122-7131, 2018. 3, 5, 8", + "[43] Angjoo Kanazawa, Jason Y. Zhang, Panna Felsen, and Jitendra Malik. Learning 3D human dynamics from video. Computer Vision and Pattern Recognition (CVPR), pages 5607-5616, 2019. 3", + "[44] Rawal Khirodkar, Shashank Tripathi, and Kris Kitani. Occluded human mesh recovery. In Computer Vision and Pattern Recognition (CVPR), pages 1705-1715, 2022. 3", + "[45] Muhammed Kocabas, Nikos Athanasiou, and Michael J. Black. VIBE: Video inference for human body pose and shape estimation. In Computer Vision and Pattern Recognition (CVPR), pages 5252-5262, 2020. 3, 6, 8", + "[46] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. PARE: Part attention regressor for 3D human body estimation. In International Conference on Computer Vision (ICCV), pages 11127-11137, 2021. 1, 8", + "[47] Nikos Kolotouros, Georgios Pavlakos, Michael J. Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In International Conference on Computer Vision (ICCV), pages 2252-2261, 2019. 3, 5, 6, 8", + "[48] Nikos Kolotouros, Georgios Pavlakos, and Kostas Dani-ilidis. Convolutional mesh regression for single-image human shape reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 4496–4505, 2019. 3", + "[49] Jiefeng Li, Siyuan Bian, Chao Xu, Gang Liu, Gang Yu, and Cewu Lu. D&D: Learning human dynamics from dynamic camera. In European Conference on Computer Vision (ECCV), 2022. 3, 6, 8", + "[50] Jiefeng Li, Chao Xu, Zhicun Chen, Siyuan Bian, Lixin Yang, and Cewu Lu. HybrIK: A hybrid analytical-neural inverse kinematics solution for 3D human pose and shape estimation. In Computer Vision and Pattern Recognition (CVPR), pages 3383-3393, 2021. 3, 6", + "[51] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. CLIFF: Carrying location information in full frames into human pose and shape estimation. In ECCV, volume 13665, pages 590-606, 2022. 1, 3, 8", + "[52] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In Computer Vision and Pattern Recognition (CVPR), pages 1954-1963, 2021. 3", + "[53] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólár, and C. Lawrence Zitnick. Microsoft COCO: Common objects in context. In European Conference on Computer Vision (ECCV), volume 8693, pages 740-755, 2014. 5, 6", + "[54] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multi-person linear model. Transactions on Graphics (TOG), 34(6):248:1-248:16, 2015. 2, 3" + ], + "bbox": [ + 509, + 92, + 893, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "4722", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[55] Yiyue Luo, Yunzhu Li, Michael Foshey, Wan Shou, Pratyusha Sharma, Tomás Palacios, Antonio Torralba, and Wojciech Matusik. Intelligent carpet: Inferring 3D human pose from tactile signals. In Computer Vision and Pattern Recognition (CVPR), pages 11255-11265, 2021. 3", + "[56] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal V. Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved CNN supervision. International Conference on 3D Vision (3DV), pages 506-516, 2017. 3, 5, 6", + "[57] Dushyant Mehta, Srinath Sridhar, Oleksandr Sotnychenko, Helge Rhodin, Mohammad Shafiei, Hans-Peter Seidel, Weipeng Xu, Dan Casas, and Christian Theobalt. VNect: Real-time 3D human pose estimation with a single RGB camera. Transactions on Graphics (TOG), 36(4):44:1-44:14, 2017. 3", + "[58] Gyeongsik Moon and Kyoung Mu Lee. I2L-MeshNet: Image-to-lixel prediction network for accurate 3D human pose and mesh estimation from a single RGB image. In European Conference on Computer Vision (ECCV), volume 12352, pages 752-768, 2020. 3", + "[59] Lea Müller, Ahmed A. A. Osman, Siyu Tang, Chun-Hao P. Huang, and Michael J. Black. On self-contact and human pose. In Computer Vision and Pattern Recognition (CVPR), pages 9990-9999, 2021. 1, 2, 3, 4, 5, 6, 8", + "[60] NVIDIA PhysX: A scalable multi-platform physics simulation solution. https://developer.nvidia.com/physx-sdk.1", + "[61] Yi-Chung Pai. Movement termination and stability in standing. Exercise and sport sciences reviews, 31(1):19-25, 2003. 2,4", + "[62] Priyanka Patel, Chun-Hao P Huang, Joachim Tesch, David T Hoffmann, Shashank Tripathi, and Michael J Black. AGORA: Avatars in geography optimized for regression analysis. In Computer Vision and Pattern Recognition (CVPR), pages 13468-13478, 2021. 6", + "[63] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed A. A. Osman, Dimitrios Tzionas, and Michael J. Black. Expressive body capture: 3D hands, face, and body from a single image. In Computer Vision and Pattern Recognition (CVPR), pages 10975-10985, 2019. 2, 3, 6", + "[64] Xue Bin Peng, Pieter Abbeel, Sergey Levine, and Michiel van de Panne. DeepMimic: Example-guided deep reinforcement learning of physics-based character skills. Transactions on Graphics (TOG), 37(4):1-14, 2018. 2, 3", + "[65] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J. Guibas. HuMoR: 3D human motion model for robust pose estimation. In International Conference on Computer Vision (ICCV), pages 11468-11479, 2021. 3", + "[66] Davis Rempe, Leonidas J. Guibas, Aaron Hertzmann, Bryan Russell, Ruben Villegas, and Jamei Yang. Contact and human dynamics from monocular video. In European Conference on Computer Vision (ECCV), volume 12350, pages 71-87, 2020. 1, 3", + "[67] Ralph Tyrell Rockafellar. Convex analysis. Princeton university press, 2015. 4" + ], + "bbox": [ + 86, + 90, + 470, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[68] Grégory Rogez, James Steven Supancic, and Deva Ramanan. Understanding everyday hands in action from RGB-D images. In International Conference on Computer Vision (ICCV), pages 3889-3897, 2015. 2, 4", + "[69] Yu Rong, Takaaki Shiratori, and Hanbyul Joo. FrankMocap: A monocular 3D whole-body pose estimation system via regression and integration. In International Conference on Computer Vision Workshops (ICCVw), pages 1749-1759, 2021. 3", + "[70] Nadine Rueegg, Shashank Tripathi, Konrad Schindler, Michael J. Black, and Silvia Zuffi. BITE: Beyond priors for improved three-D dog pose estimation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3", + "[71] Jesse Scott, Bharadwaj Ravichandran, Christopher Funk, Robert T Collins, and Yanxi Liu. From image to stability: Learning dynamics from human pose. In European Conference on Computer Vision (ECCV), volume 12368, pages 536-554, 2020. 2, 3, 4", + "[72] Mingyi Shi, Kfir Aberman, Andreas Aristidou, Taku Komura, Dani Lischinski, Daniel Cohen-Or, and Baoquan Chen. MotioNet: 3D human motion reconstruction from monocular video with skeleton consistency. Transactions on Graphics (TOG), 40(1):1:1-1:15, 2021. 3", + "[73] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, Patrick Pérez, and Christian Theobalt. Neural monocular 3D human motion capture with physical awareness. Transactions on Graphics (TOG), 40(4), 2021. 3", + "[74] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, and Christian Theobalt. PhysCap: Physically plausible monocular 3D motion capture in real time. Transactions on Graphics (TOG), 39(6):235:1-235:16, 2020. 1, 2, 3, 8", + "[75] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J. Black, and Tao Mei. Monocular, one-stage, regression of multiple 3D people. In International Conference on Computer Vision (ICCV), pages 11179-11188, 2021. 1, 3", + "[76] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a skeleton-disentangled representation. In International Conference on Computer Vision (ICCV), pages 5348-5357, 2019. 3", + "[77] Yating Tian, Hongwen Zhang, Yebin Liu, and limin Wang. Recovering 3D human mesh from monocular images: A survey. arXiv:2203.01923, 2022. 3", + "[78] Shashank Tripathi, Siddhant Ranade, Ambrish Tyagi, and Amit K. Agrawal. PoseNet3D: Learning temporally consistent 3D human pose via knowledge distillation. In International Conference on 3D Vision (3DV), pages 311-321, 2020. 3", + "[79] Dimitrios Tzionas, Luca Ballan, Abhilash Srikantha, Pablo Aponte, Marc Pollefeys, and Juergen Gall. Capturing hands in action using discriminative salient points and physics simulation. International Journal of Computer Vision (IJCV), 118:172-193, 2016. 6", + "[80] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In International Conference on Computer Vision (ICCV), pages 9720-9729, 2021. 3" + ], + "bbox": [ + 509, + 90, + 893, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "4723", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[81] Timo von Marcard, Roberto Henschel, Michael J. Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3D human pose in the wild using IMUs and a moving camera. In European Conference on Computer Vision (ECCV), volume 11214, pages 614-631, 2018. 5", + "[82] Marek Vondrak, Leonid Sigal, and Odest Chadwicke Jenkins. Physical simulation for probabilistic motion tracking. In Computer Vision and Pattern Recognition (CVPR), pages 1-8, 2008. 3", + "[83] Eric W. Weisstein. Triangle point picking. https://mathworld.wolfram.com/TrianglePointPicking.html, 2014. From MathWorld - A Wolfram Web Resource. 4", + "[84] Zhenzhen Weng and Serena Yeung. Holistic 3D human and scene mesh estimation from single view images. In Computer Vision and Pattern Recognition (CVPR), pages 334-343, 2020. 3", + "[85] David A. Winter. A.B.C. (Anatomy, Biomechanics and Control) of balance during standing and walking. Waterloo Biomechanics, 1995. 2, 4, 5", + "[86] David A. Winter. Human balance and posture control during standing and walking. Gait & Posture, 3(4):193-214, 1995. 2, 5", + "[87] Donglai Xiang, Hanbyul Joo, and Yaser Sheikh. Monocular total capture: Posing face, body, and hands in the wild. In Computer Vision and Pattern Recognition (CVPR), pages 10957-10966, 2019. 3", + "[88] Donglai Xiang, Fabian Prada, Chenglei Wu, and Jessica Hodgins. MonoClothCap: Towards temporally coherent clothing capture from monocular RGB video. In International Conference on 3D Vision (3DV), pages 322-332, 2020. 3", + "[89] Kevin Xie, Tingwu Wang, Umar Iqbal, Yunrong Guo, Sanja Fidler, and Florian Shkurti. Physics-based human motion estimation and synthesis from videos. In International Conference on Computer Vision (ICCV), pages 11532-11541, 2021. 3, 8", + "[90] Xianghui Xie, Bharat Lal Bhatnagar, and Gerard Pons-Moll. CHORE: Contact, human and object reconstruction from a single RGB image. In European Conference on Computer Vision (ECCV), 2022. 3", + "[91] Yuliang Xiu, Jinlong Yang, Xu Cao, Dimitrios Tzionas, and Michael J. Black. ECON: Explicit Clothed humans Optimized via Normal Integration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2023. 3", + "[92] Hongyi Xu, Eduard Gabriel Bazavan, Andrei Zanfir, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. GHUM & GHUML: Generative 3D human shape and articulated pose models. In Computer Vision and Pattern Recognition (CVPR), pages 6183-6192, 2020. 2, 3", + "[93] Masanobu Yamamoto and Katsutoshi Yagishita. Scene constraints-aided tracking of human body. In Computer Vision and Pattern Recognition (CVPR), pages 151–156, 2000. 3", + "[94] Hongwei Yi, Chun-Hao P. Huang, Shashank Tripathi, Lea Hering, Justus Thies, and Michael J. Black. MIME: Human-" + ], + "bbox": [ + 86, + 90, + 470, + 900 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "aware 3D scene generation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3", + "[95] Ye Yuan and Kris Kitani. 3D ego-pose estimation via imitation learning. In European Conference on Computer Vision (ECCV), volume 11220, pages 735–750, 2018. 2", + "[96] Ye Yuan, Shih-En Wei, Tomas Simon, Kris Kitani, and Jason Saragih. SimPoE: Simulated character control for 3D human pose estimation. In Computer Vision and Pattern Recognition (CVPR), pages 7159–7169, 2021. 1, 2, 3, 6, 8", + "[97] Andrei Zanfir, Eduard Gabriel Bazavan, Hongyi Xu, William T Freeman, Rahul Sukthankar, and Cristian Sminchisescu. Weakly supervised 3D human pose and shape reconstruction with normalizing flows. In European Conference on Computer Vision (ECCV), pages 465-481, 2020. 3", + "[98] Andrei Zanfir, Elisabella Maroiniu, and Cristian Sminchisescu. Monocular 3D pose and shape estimation of multiple people in natural scenes – the importance of multiple scene constraints. In Computer Vision and Pattern Recognition (CVPR), pages 2148–2157, 2018. 3, 6, 8", + "[99] Ailing Zeng, Lei Yang, Xuan Ju, Jiefeng Li, Jianyi Wang, and Qiang Xu. SmoothNet: A plug-and-play network for refining human poses in videos. In European Conference on Computer Vision (ECCV), volume 13665, pages 625-642, 2022. 3", + "[100] Wang Zeng, Wanli Ouyang, Ping Luo, Wentao Liu, and Xiaogang Wang. 3D human mesh regression with dense correspondence. In Computer Vision and Pattern Recognition (CVPR), 2020. 3", + "[101] Cha Zhang and Tsuhan Chen. Efficient feature extraction for 2d/3d objects in mesh representation. In Proceedings 2001 International Conference on Image Processing (Cat. No. 01CH37205), volume 3, pages 935-938. IEEE, 2001. 4", + "[102] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. PyMAF: 3D human pose and shape regression with pyramidal mesh alignment feedback loop. In International Conference on Computer Vision (ICCV), pages 11426-11436, 2021. 1, 3", + "[103] Jianfeng Zhang, Dongdong Yu, Jun Hao Liew, Xuecheng Nie, and Jiashi Feng. Body meshes as points. In Computer Vision and Pattern Recognition (CVPR), pages 546-556, 2021. 3", + "[104] Jason Y. Zhang, Sam Pepose, Hanbyul Joo, Deva Ramanan, Jitendra Malik, and Angjoo Kanazawa. Perceiving 3D human-object spatial arrangements from a single image in the wild. In European Conference on Computer Vision (ECCV), volume 12357, pages 34-51, 2020. 3", + "[105] Siwei Zhang, Yan Zhang, Federica Bogo, Marc Pollefeys, and Siyu Tang. Learning motion priors for 4D human body capture in 3D scenes. In International Conference on Computer Vision (ICCV), pages 11343-11353, 2021. 3", + "[106] Tianshu Zhang, Buzhen Huang, and Yangang Wang. Object-occluded human shape and pose estimation from a single color image. In Computer Vision and Pattern Recognition (CVPR), pages 7374–7383, 2020. 3", + "[107] Ce Zheng, Wenhan Wu, Chen Chen, Taojiannan Yang, Sijie Zhu, Ju Shen, Nasser Kehtarnavaz, and Mubarak Shah." + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "4724", + "bbox": [ + 482, + 955, + 514, + 967 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Deep learning-based human pose estimation: A survey. arXiv:2012.13392, 2022.3", + "[108] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Computer Vision and Pattern Recognition (CVPR), pages 5745-5753, 2019. 3", + "[109] Yuxiao Zhou, Marc Habermann, Ikhsanul Habibie, Ayush Tewari, Christian Theobalt, and Feng Xu. Monocular real" + ], + "bbox": [ + 78, + 90, + 468, + 204 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "time full body capture with inter-part correlations. In Computer Vision and Pattern Recognition (CVPR), pages 4811-4822, 2021. 3", + "[110] Yuliang Zou, Jimei Yang, Duygu Ceylan, Jianming Zhang, Federico Perazzi, and Jia-Bin Huang. Reducing footskate in human motion reconstruction with ground contact constraints. In Winter Conference on Applications of Computer Vision (WACV), pages 459-468, 2020. 3, 6, 8" + ], + "bbox": [ + 503, + 92, + 893, + 204 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "4725", + "bbox": [ + 482, + 955, + 513, + 967 + ], + "page_idx": 12 + } +] \ No newline at end of file diff --git a/2023/3D Human Pose Estimation via Intuitive Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_model.json b/2023/3D Human Pose Estimation via Intuitive Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_model.json new file mode 100644 index 0000000000000000000000000000000000000000..b9772b04be8c5cf40374ca7c36fa86983ffe1d98 --- /dev/null +++ b/2023/3D Human Pose Estimation via Intuitive Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_model.json @@ -0,0 +1,2855 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.24, + 0.122, + 0.73, + 0.143 + ], + "angle": 0, + "content": "3D Human Pose Estimation via Intuitive Physics" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.162, + 0.78, + 0.197 + ], + "angle": 0, + "content": "Shashank Tripathi1 Lea Müller1 Chun-Hao P. Huang1 Omid Taheri1 Michael J. Black1 Dimitrios Tzionas2*" + }, + { + "type": "text", + "bbox": [ + 0.147, + 0.2, + 0.823, + 0.234 + ], + "angle": 0, + "content": "\\(^{1}\\)Max Planck Institute for Intelligent Systems, Tübingen, Germany \\(^{2}\\)University of Amsterdam, the Netherlands {stripathi, lmueller2, chuang2, otaheri, black}@tue.mpg.de d.tzionas@uva.nl" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.256, + 0.895, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.413, + 0.895, + 0.47 + ], + "angle": 0, + "content": "Figure 1. Estimating a 3D body from an image is ill-posed. A recent, representative, optimization method [59] produces bodies that are in unstable poses, penetrate the floor, or hover above it. In contrast, IPMAN estimates a 3D body that is physically plausible. To achieve this, IPMAN uses novel intuitive-physics (IP) terms that exploit inferred pressure heatmaps on the body, the Center of Pressure (CoP), and the body's Center of Mass (CoM). Body heatmap colors encode per-vertex pressure." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.498, + 0.314, + 0.513 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.528, + 0.474, + 0.892 + ], + "angle": 0, + "content": "Estimating 3D humans from images often produces implausible bodies that lean, float, or penetrate the floor. Such methods ignore the fact that bodies are typically supported by the scene. A physics engine can be used to enforce physical plausibility, but these are not differentiable, rely on unrealistic proxy bodies, and are difficult to integrate into existing optimization and learning frameworks. In contrast, we exploit novel intuitive-physics (IP) terms that can be inferred from a 3D SMPL body interacting with the scene. Inspired by biomechanics, we infer the pressure heatmap on the body, the Center of Pressure (CoP) from the heatmap, and the SMPL body's Center of Mass (CoM). With these, we develop IPMAN, to estimate a 3D body from a color image in a \"stable\" configuration by encouraging plausible floor contact and overlapping CoP and CoM. Our IP terms are intuitive, easy to implement, fast to compute, differentiable, and can be integrated into existing optimization and regression methods. We evaluate IPMAN on standard datasets and MoYo, a new dataset with synchronized multi-view images, ground-truth 3D bodies with complex poses, body-floor contact, CoM and pressure. IPMAN produces more plausible results than the state of the art, improving accuracy for static poses, while not hurting dynamic ones. Code and data are available for research at https://ipman.is.tue.mpg.de." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.498, + 0.631, + 0.513 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.523, + 0.896, + 0.825 + ], + "angle": 0, + "content": "To understand humans and their actions, computers need automatic methods to reconstruct the body in 3D. Typically, the problem entails estimating the 3D human pose and shape (HPS) from one or more color images. State-of-the-art (SOTA) methods [46, 51, 75, 102] have made rapid progress, estimating 3D humans that align well with image features in the camera view. Unfortunately, the camera view can be deceiving. When viewed from other directions, or when placed in a 3D scene, the estimated bodies are often physically implausible: they lean, hover, or penetrate the ground (see Fig. 1 top). This is because most SOTA methods reason about humans in isolation; they ignore that people move in a scene, interact with it, and receive physical support by contacting it. This is a deal-breaker for inherently 3D applications, such as biomechanics, augmented/virtual reality (AR/VR) and the \"metaverse\"; these need humans to be reconstructed faithfully and physically plausibly with respect to the scene. For this, we need a method that estimates the 3D human on a ground plane from a color image in a configuration that is physically \"stable\"." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.825, + 0.895, + 0.901 + ], + "angle": 0, + "content": "This is naturally related to reasoning about physics and support. There exist many physics simulators [10, 30, 60] for games, movies, or industrial simulations, and using these for plausible HPS estimation is increasingly popular [66, 74, 96]. However, existing simulators come with two significant" + }, + { + "type": "page_footnote", + "bbox": [ + 0.078, + 0.912, + 0.327, + 0.926 + ], + "angle": 0, + "content": "* This work was mostly performed at MPI-IS." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.516, + 0.968 + ], + "angle": 0, + "content": "4713" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.333 + ], + "angle": 0, + "content": "problems: (1) They are typically non-differentiable black boxes, making them incompatible with existing optimization and learning frameworks. Consequently, most methods [64, 95, 96] use them with reinforcement learning to evaluate whether a certain input has the desired outcome, but with no ability to reason about how changing inputs affects the outputs. (2) They rely on an unrealistic proxy body model for computational efficiency; bodies are represented as groups of rigid 3D shape primitives. Such proxy models are crude approximations of human bodies, which, in reality, are much more complex and deform non-rigidly when they move and interact. Moreover, proxies need a priori known body dimensions that are kept fixed during simulation. Also, these proxies differ significantly from the 3D body models [41, 54, 92] used by SOTA HPS methods. Thus, current physics simulators are too limited for use in HPS." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.336, + 0.473, + 0.565 + ], + "angle": 0, + "content": "What we need, instead, is a solution that is fully differentiable, uses a realistic body model, and seamlessly integrates physical reasoning into HPS methods (both optimization- and regression-based). To this end, instead of using full physics simulation, we introduce novel intuitive-physics (IP) terms that are simple, differentiable, and compatible with a body model like SMPL [54]. Specifically, we define terms that exploit an inferred pressure heatmap of the body on the ground plane, the Center of Pressure (CoP) that arises from the heatmap, and the SMPL body's Center of Mass (CoM) projected on the floor; see Fig. 2 for a visualization. Intuitively, bodies whose CoM lie close to their CoP are more stable than ones with a CoP that is further away (see Fig. 5); the former suggests a static pose, e.g. standing or holding a yoga pose, while the latter a dynamic pose, e.g., walking." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.566, + 0.473, + 0.702 + ], + "angle": 0, + "content": "We use these intuitive-physics terms in two ways. First, we incorporate them in an objective function that extends SMPLify-XMC [59] to optimize for body poses that are stable. We also incorporate the same terms in the training loss for an HPS regressor, called IPMAN (Intuitive-Physics-based huMAN). In both formulations, the intuitive-physics terms encourage estimates of body shape and pose that have sufficient ground contact, while penalizing interpenetration and encouraging an overlap of the CoP and CoM." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.473, + 0.901 + ], + "angle": 0, + "content": "Our intuitive-physics formulation is inspired by work in biomechanics [32, 33, 61], which characterizes the stability of humans in terms of relative positions between the CoP, the CoM, and the Base of Support (BoS). The BoS is defined as the convex hull of all contact regions on the floor (Fig. 2). Following past work [6, 71, 74], we use the \"inverted pendulum\" model [85, 86] for body balance; this considers poses as stable if the gravity-projected CoM onto the floor lies inside the BoS. Similar ideas are explored by Scott et al. [71] but they focus on predicting a foot pressure heatmap from 2D or 3D body joints. We go significantly further to exploit stability in training an HPS regressor. This requires two technical novelties." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.083, + 0.895, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.179, + 0.895, + 0.234 + ], + "angle": 0, + "content": "Figure 2. (1) A SMPL mesh sitting. (2) The inferred pressure map on the ground (color-coded heatmap), CoP (green), CoM (pink), and Base of Support (BoS, yellow polygon). (3) Segmentation of SMPL into \\( N_P = 10 \\) parts, used for computing CoM; see Sec. 3.2." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.25, + 0.895, + 0.37 + ], + "angle": 0, + "content": "The first involves computing CoM. To this end, we uniformly sample points on SMPL's surface, and calculate each body part's volume. Then, we compute CoM as the average of all uniformly sampled points weighted by the corresponding part volumes. We denote this as pCoM, standing for \"part-weighted CoM\". Importantly, pCoM takes into account SMPL's shape, pose, and all blend shapes, while it is also computationally efficient and differentiable." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.372, + 0.896, + 0.524 + ], + "angle": 0, + "content": "The second involves estimating CoP directly from the image, without access to a pressure sensor. Our key insight is that the soft tissues of human bodies deform under pressure, e.g., the buttocks deform when sitting. However, SMPL does not model this deformation; it penetrates the ground instead of deforming. We use the penetration depth as a proxy for pressure [68]; deeper penetration means higher pressure. With this, we estimate a pressure field on SMPL's mesh and compute the CoP as the pressure-weighted average of the surface points. Again this is differentiable." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.897, + 0.691 + ], + "angle": 0, + "content": "For evaluation, we use a standard HPS benchmark (Human3.6M [37]), but also the RICH [35] dataset. However, these datasets have limited interactions with the floor. We thus capture a novel dataset, MoYo, of challenging yoga poses, with synchronized multi-view video, ground-truth SMPL-X [63] meshes, pressure sensor measurements, and body CoM. IPMAN, in both of its forms, and across all datasets, produces more accurate and stable 3D bodies than the state of the art. Importantly, we find that IPMAN improves accuracy for static poses, while not hurting dynamic ones. This makes IPMAN applicable to everyday motions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.691, + 0.897, + 0.829 + ], + "angle": 0, + "content": "To summarize: (1) We develop IPMAN, the first HPS method that integrates intuitive physics. (2) We infer biomechanical properties such as CoM, CoP and body pressure. (3) We define novel intuitive-physics terms that can be easily integrated into HPS methods. (4) We create MoYo, a dataset that uniquely has complex poses, multi-view video, and ground-truth bodies, pressure, and CoM. (5) We show that our IP terms improve HPS accuracy and physical plausibility. (6) Data and code are available for research." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.845, + 0.642, + 0.86 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.897, + 0.903 + ], + "angle": 0, + "content": "3D Human Pose and Shape (HPS) from images. Existing methods fall into two major categories: (1) non-" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.516, + 0.968 + ], + "angle": 0, + "content": "4714" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.198 + ], + "angle": 0, + "content": "parametric methods that reconstruct a free-form body representation, e.g., joints [1, 56, 57] or vertices [52, 58, 100], and (2) parametric methods that use statistical body models [5, 25, 41, 54, 63, 92, 97]. The latter methods focus on various aspects, such as expressiveness [13, 18, 63, 69, 87], clothed bodies [15, 88, 91], videos [24, 45, 78, 99], and multiperson scenarios [38, 75, 103], to name a few." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.199, + 0.472, + 0.38 + ], + "angle": 0, + "content": "Inference is done by either optimization or regression. Optimization-based methods [7, 16, 63, 87, 88] fit a body model to image evidence, such as joints [11], dense vertex correspondences [2] or 2D segmentation masks [23]. Regression-based methods [42, 44, 48, 51, 76, 102, 106, 109] use a loss similar to the objective function of optimization methods to train a network to infer body model parameters. Several methods combine optimization and regression in a training loop [47, 50, 59]. Recent methods [24, 40] fine-tune pre-trained networks at test time w.r.t. an image or a sequence, retaining flexibility (optimization) while being less sensitive to initialization (regression)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.382, + 0.471, + 0.426 + ], + "angle": 0, + "content": "Despite their success, these methods reason about the human in \"isolation\", without taking the surrounding scene into account; see [77, 107] for a comprehensive review." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.429, + 0.472, + 0.565 + ], + "angle": 0, + "content": "Contact-only scene constraints. A common way of using scene information is to consider body-scene contact [12, 17, 27, 28, 65, 84, 90, 94, 98, 104, 105, 110]. Yamamoto et al. [93] and others [19, 27, 70, 98, 104] ensure that estimated bodies have plausible scene contact. For videos, encouraging foot-ground contact reduces foot skating [36, 65, 72, 105, 110]. Weng et al. [84] use contact in estimating the pose and scale of scene objects, while Villegas et al. [80] preserve self- and ground contact for motion retargeting." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.566, + 0.471, + 0.642 + ], + "angle": 0, + "content": "These methods typically take two steps: (1) detecting contact areas on the body and/or scene and (2) minimizing the distance between these. Surfaces are typically assumed to be in contact if their distance is below a threshold and their relative motion is small [27, 35, 98, 104]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.643, + 0.471, + 0.734 + ], + "angle": 0, + "content": "Many methods only consider contact between the ground and the foot joints [66, 110] or other end-effectors [65]. In contrast, IPMAN uses the full 3D body surface and exploits this to compute the pressure, CoP and CoM. Unlike binary contact, this is differentiable, making the IP terms useful for training HPS regressors." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Physics-based scene constraints. Early work uses physics to estimate walking [8, 9] or full body motion [82]. Recent methods [21, 22, 66, 73, 74, 89, 96] regress 3D humans and then refine them through physics-based optimization. Physics is used for two primary reasons: (1) to regularise dynamics, reducing jitter [49, 66, 74, 96], and (2) to discourage interpenetration and encourage contact. Since contact events are discontinuous, the pipeline is either not end-to-end trainable or trained with reinforcement learning [64, 96]. Xie et al. [89] propose differentiable physics-inspired objectives based on a soft contact penalty, while DiffPhy [21] uses a" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.212 + ], + "angle": 0, + "content": "differentiable physics simulator [31] during inference. Both methods apply the objectives in an optimization scheme, while IPMAN is applied to both optimization and regression. PhysCap [74] considers a pose as balanced, when the CoM is projected within the BoS. Rempe et al. [66] impose PD control on the pelvis, which they treat as a CoM. Scott et al. [71] regress foot pressure from 2D and 3D joints for stability analysis but do not use it to improve HPS." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.213, + 0.895, + 0.32 + ], + "angle": 0, + "content": "All these methods use unrealistic bodies based on shape primitives. Some require known body dimensions [66, 74, 96] while others estimate body scale [49, 89]. In contrast, IPMAN computes CoM, CoP and BoS directly from the SMPL mesh. Clever et al. [14] and Luo et al. [55] estimate 3D body pose but from pressure measurements, not from images. Their task is fundamentally different from ours." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.334, + 0.593, + 0.35 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.36, + 0.642, + 0.375 + ], + "angle": 0, + "content": "3.1. Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.384, + 0.892, + 0.414 + ], + "angle": 0, + "content": "Given a color image, I, we estimate the parameters of the camera and the SMPL body model [54]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.415, + 0.895, + 0.521 + ], + "angle": 0, + "content": "Body model. SMPL maps pose, \\(\\theta\\), and shape, \\(\\beta\\), parameters to a 3D mesh, \\(M(\\theta, \\beta)\\). The pose parameters, \\(\\theta \\in \\mathbb{R}^{24 \\times 6}\\), are rotations of SMPL's 24 joints in a 6D representation [108]. The shape parameters, \\(\\beta \\in \\mathbb{R}^{10}\\), are the first 10 PCA coefficients of SMPL's shape space. The generated mesh \\(M(\\theta, \\beta)\\) consists of \\(N_V = 6890\\) vertices, \\(V \\in \\mathbb{R}^{N_V \\times 3}\\), and \\(N_F = 13776\\) faces, \\(F \\in \\mathbb{R}^{N_F \\times 3 \\times 3}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.521, + 0.895, + 0.597 + ], + "angle": 0, + "content": "Note that our regression method (IPMAN-R, Sec. 3.4.1) uses SMPL, while our optimization method (IPMAN-O, Sec. 3.4.2) uses SMPL-X [63], to match the models used by the baselines. For simplicity of exposition, we refer to both models as SMPL when the distinction is not important." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.895, + 0.703 + ], + "angle": 0, + "content": "Camera. For the regression-based IPMAN-R, we follow the standard convention [42, 43, 47] and use a weak perspective camera with a 2D scale, \\(s\\), translation, \\(\\mathbf{t}^c = (t_x^c,t_y^c)\\), fixed camera rotation, \\(\\mathbf{R}^c = \\mathbf{I}_3\\), and a fixed focal length \\((f_{x},f_{y})\\). The root-relative body orientation \\(\\mathbf{R}^b\\) is predicted by the neural network, but body translation stays fixed at \\(\\mathbf{t}^b = \\mathbf{0}\\) as it is absorbed into the camera's translation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.895, + 0.856 + ], + "angle": 0, + "content": "For the optimization-based IPMAN-O, we follow Müller et al. [59] to use the full-perspective camera model and optimize the focal lengths \\((f_x, f_y)\\), camera rotation \\(\\mathbf{R}^c\\) and camera translation \\(\\mathbf{t}^c\\). The principal point \\((o_x, o_y)\\) is the center of the input image. \\(\\mathbf{K}\\) is the intrinsic matrix storing focal lengths and the principal point. We assume that the body rotation \\(\\mathbf{R}^b\\) and translation \\(\\mathbf{t}^b\\) are absorbed into the camera parameters, thus, they stay fixed as \\(\\mathbf{R}^b = \\mathbf{I}_3\\) and \\(\\mathbf{t}^b = \\mathbf{0}\\). Using the camera, we project a 3D point \\(\\mathbf{X} \\in \\mathbb{R}^3\\) to an image point \\(\\mathbf{x} \\in \\mathbb{R}^2\\) through \\(\\mathbf{x} = \\mathbf{K}(\\mathbf{R}^c\\mathbf{X} + \\mathbf{t}^c)\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Ground plane and gravity-projection. We assume that the gravity direction is perpendicular to the ground plane in the world coordinate system. Thus, for any arbitrary point in" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.516, + 0.968 + ], + "angle": 0, + "content": "4715" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.473, + 0.198 + ], + "angle": 0, + "content": "3D space, \\(\\pmb{u} \\in \\mathbb{R}^3\\), its gravity-projected point, \\(\\pmb{u}' = g(\\pmb{u}) \\in \\mathbb{R}^3\\), is the projection of \\(\\pmb{u}\\) along the plane normal \\(\\pmb{n}\\) onto the ground plane, and \\(g(.)\\) is the projection operator. The function \\(h(\\pmb{u})\\) returns the signed \"height\" of a point \\(\\pmb{u}\\) with respect to the ground; i.e., the signed distance from \\(\\pmb{u}\\) to the ground plane along the gravity direction, where \\(h(\\pmb{u}) < 0\\) if \\(\\pmb{u}\\) is below the ground and \\(h(\\pmb{u}) > 0\\) if \\(\\pmb{u}\\) is above it." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.215, + 0.25, + 0.231 + ], + "angle": 0, + "content": "3.2. Stability Analysis" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.241, + 0.471, + 0.377 + ], + "angle": 0, + "content": "We follow the biomechanics literature [32, 33, 61] and Scott et al. [71] to define three fundamental elements for stability analysis: We use the Newtonian definition for the \"Center of Mass\" (CoM); i.e., the mass-weighted average of particle positions. The \"Center of Pressure\" (CoP) is the ground-reaction force's point of application. The \"Base of Support\" (BoS) is the convex hull of all body-ground contacts. Below, we define intuitive-physics (IP) terms using the inferred CoM and CoP. BoS is only used for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.379, + 0.471, + 0.517 + ], + "angle": 0, + "content": "Body Center of Mass (CoM). We introduce a novel CoM formulation that is fully differentiable and considers the per-part mass contributions, dubbed as pCoM; see Sup. Mat. for alternative CoM definitions. To compute this, we first segment the template mesh into \\( N_P = 10 \\) parts \\( P_i \\in \\mathcal{P} \\); see Fig. 2. We do this once offline, and keep the segmentation fixed during training and optimization. Assuming a shaped and posed SMPL body, the per-part volumes \\( \\mathcal{V}^{P_i} \\) are calculated by splitting the SMPL mesh into parts." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.518, + 0.471, + 0.715 + ], + "angle": 0, + "content": "However, mesh splitting is a non-differentiable operation. Thus, it cannot be used for either training a regressor (IPMAN-R) or for optimization (IPMAN-O). Instead, we work with the full SMPL mesh and use differentiable \"close-translate-fill\" operations for each body part on the fly. First, for each part \\( P \\), we extract boundary vertices \\( \\mathcal{B}_P \\) and add in the middle a virtual vertex \\( \\boldsymbol{v}_g \\), where \\( \\boldsymbol{v}_g = \\sum_{j \\in \\mathcal{B}_P} \\boldsymbol{v}_j / |\\mathcal{B}_P| \\). Then, for the \\( \\mathcal{B}_P \\) and \\( \\boldsymbol{v}_g \\) vertices, we add virtual faces to \"close\" \\( P \\) and make it watertight. Next, we \"translate\" \\( P \\) such that the part centroid \\( \\mathbf{c}_P = \\sum_{j \\in P} \\boldsymbol{v}_j / |P| \\) is at the origin. Finally, we \"fill\" the centered \\( P \\) with tetrahedrons by connecting the origin with each face vertex. Then, the part volume, \\( \\mathcal{V}^{\\mathcal{P}} \\), is the sum of all tetrahedron volumes [101]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.717, + 0.472, + 0.884 + ], + "angle": 0, + "content": "To create a uniform distribution of surface vertices, we uniformly sample \\( N_U = 20000 \\) surface points \\( V_U \\in \\mathbb{R}^{N_U \\times 3} \\) on the template SMPL mesh using the Triangle Point Picking method [83]. Given \\( V_U \\) and the template SMPL mesh vertices \\( V_T \\), we follow [59], and analytically compute a sparse linear regressor \\( \\mathbf{W} \\in \\mathbb{R}^{N_U \\times N_V} \\) such that \\( V_U = \\mathbf{W}V_T \\). During training and optimization, given an arbitrary shaped and posed mesh with vertices \\( V \\), we obtain uniformly-sampled mesh surface points as \\( V_U = \\mathbf{W}V \\). Each surface point, \\( v_i \\), is assigned to the body part, \\( P_{v_i} \\), corresponding to the face, \\( F_{v_i} \\), it was sampled from." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Finally, the part-weighted pCoM is computed as a" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.838, + 0.108 + ], + "angle": 0, + "content": "volume-weighted mean of the mesh surface points:" + }, + { + "type": "equation", + "bbox": [ + 0.625, + 0.116, + 0.892, + 0.157 + ], + "angle": 0, + "content": "\\[\n\\bar {\\mathbf {m}} = \\frac {\\sum_ {i = 1} ^ {N _ {U}} \\mathcal {V} ^ {P _ {v _ {i}}} v _ {i}}{\\sum_ {i = 1} ^ {N _ {U}} \\mathcal {V} ^ {P _ {v _ {i}}}}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.165, + 0.892, + 0.212 + ], + "angle": 0, + "content": "where \\(\\mathcal{V}^{P_{v_i}}\\) is the volume of the part \\(P_{v_i}\\in \\mathcal{P}\\) to which \\(v_{i}\\) is assigned. This formulation is fully differentiable and can be employed with any existing 3D HPS estimation method." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.212, + 0.895, + 0.317 + ], + "angle": 0, + "content": "Note that computing CoM (or volume) from uniformly sampled surface points does not work (see Sup. Mat.) because it assumes that mass, \\( M \\), is proportional to surface area, \\( S \\). Instead, our pCoM computes mass from volume, \\( \\mathcal{V} \\), via the standard density equation, \\( M = \\rho \\mathcal{V} \\), while our close-translate-fill operation computes the volume of deformable bodies in an efficient and differentiable manner." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.318, + 0.895, + 0.469 + ], + "angle": 0, + "content": "Center of Pressure (CoP). Recovering a pressure heatmap from an image without using hardware, such as pressure sensors, is a highly ill-posed problem. However, stability analysis requires knowledge of the pressure exerted on the human body by the supporting surfaces, like the ground. Going beyond binary contact, Rogez et al. [68] estimate 3D forces by detecting intersecting vertices between hand and object meshes. Clever et al. [14] recover pressure maps by allowing articulated body models to deform a soft pressure-sensing virtual mattress in a physics simulation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.469, + 0.894, + 0.606 + ], + "angle": 0, + "content": "In contrast, we observe that, while real bodies interacting with rigid objects (e.g., the floor) deform under contact, SMPL does not model such soft-tissue deformations. Thus, the body mesh penetrates the contacting object surface and the amount of penetration can be a proxy for pressure; a deeper penetration implies higher pressure. With the height \\( h(v_{i}) \\) (see Sec. 3.1) of a mesh surface point \\( v_{i} \\) with respect to the ground plane \\( \\Pi \\), we define a pressure field to compute the per-point pressure \\( \\rho_{i} \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.614, + 0.892, + 0.655 + ], + "angle": 0, + "content": "\\[\n\\rho_ {i} = \\left\\{ \\begin{array}{l l} 1 - \\alpha h (v _ {i}) & \\text {i f} h (v _ {i}) < 0, \\\\ e ^ {- \\gamma h (v _ {i})} & \\text {i f} h (v _ {i}) \\geq 0, \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.663, + 0.895, + 0.755 + ], + "angle": 0, + "content": "where \\(\\alpha\\) and \\(\\gamma\\) are scalar hyperparameters set empirically. We approximate soft tissue via a \"spring\" model and \"penetrating\" pressure field using Hooke's Law. Some pressure is also assigned to points above the ground to allow tolerance for footwear, but this decays quickly. Finally, we compute the CoP, \\(\\overline{\\mathbf{s}}\\), as" + }, + { + "type": "equation", + "bbox": [ + 0.627, + 0.763, + 0.892, + 0.803 + ], + "angle": 0, + "content": "\\[\n\\overline {{\\mathbf {s}}} = \\frac {\\sum_ {i = 1} ^ {N _ {U}} \\rho_ {i} v _ {i}}{\\sum_ {i = 1} ^ {N _ {U}} \\rho_ {i}}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.811, + 0.816, + 0.825 + ], + "angle": 0, + "content": "Again, note that this term is fully differentiable." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Base of Support (BoS). In biomechanics [34, 85], BoS is defined as the \"supporting area\" or the possible range of the CoP on the supporting surface. Here, we define BoS as the convex hull [67] of all gravity-projected body-ground contact points. In detail, we first determine all such contacts" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.516, + 0.968 + ], + "angle": 0, + "content": "4716" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.152 + ], + "angle": 0, + "content": "by selecting the set of mesh surface points \\( v_{i} \\) close to the ground, and then gravity-project them onto the ground to obtain \\( C = \\{g(v_{i}) \\mid |h(v_{i})| < \\tau\\} \\). The BoS is then defined as the convex hull \\( \\mathcal{C} \\) of \\( C \\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.16, + 0.296, + 0.175 + ], + "angle": 0, + "content": "3.3. Intuitive-Physics Losses" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.182, + 0.471, + 0.318 + ], + "angle": 0, + "content": "Stability loss. The \"inverted pendulum\" model of human balance [85, 86] considers the relationship between the CoM and BoS to determine stability. Simply put, for a given shape and pose, if the body CoM, projected on the gravity-aligned ground plane, lies within the BoS, the pose is considered stable. While this definition of stability is useful for evaluation, using it in a loss or energy function for 3D HPS estimation results in sparse gradients (see Sup. Mat.). Instead, we define the stability criterion as:" + }, + { + "type": "equation", + "bbox": [ + 0.163, + 0.326, + 0.469, + 0.342 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {s t a b i l i t y}} = \\| g (\\bar {\\mathbf {m}}) - g (\\bar {\\mathbf {s}}) \\| _ {2}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.348, + 0.47, + 0.379 + ], + "angle": 0, + "content": "where \\(g(\\bar{\\mathbf{m}})\\) and \\(g(\\bar{\\mathbf{s}})\\) are the gravity-projected CoM and CoP, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.379, + 0.471, + 0.59 + ], + "angle": 0, + "content": "Ground contact loss. As shown in Fig. 1, 3D HPS methods minimize the 2D joint reprojection error and do not consider the plausibility of body-ground contact. Ignoring this can result in interpenetrating or hovering meshes. Inspired by self-contact losses [19,59] and hand-object contact losses [26,29], we define two ground losses, namely pushing, \\(\\mathcal{L}_{\\mathrm{push}}\\), and pulling, \\(\\mathcal{L}_{\\mathrm{pull}}\\), that take into account the height, \\(h(v_{i})\\), of a vertex, \\(v_{i}\\), with respect to the ground plane. For \\(h(v_{i}) < 0\\), i.e., for vertices under the ground plane, \\(\\mathcal{L}_{\\mathrm{push}}\\) discourages body-ground penetrations. For \\(h(v_{i}) \\geq 0\\), i.e., for hovering meshes, \\(\\mathcal{L}_{\\mathrm{pull}}\\) encourages the vertices that lie close to the ground to \"snap\" into contact with it. Note that the losses are non-conflicting as they act on disjoint sets of vertices. Then, the ground contact loss is:" + }, + { + "type": "equation", + "bbox": [ + 0.103, + 0.597, + 0.469, + 0.614 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {g r o u n d}} = \\mathcal {L} _ {\\text {p u l l}} + \\mathcal {L} _ {\\text {p u s h}}, \\text {w i t h} \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.117, + 0.616, + 0.469, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {p u l l}} = \\alpha_ {1} \\tanh \\left(\\frac {h \\left(v _ {i}\\right)}{\\alpha_ {2}}\\right) ^ {2} \\quad \\text {i f} h \\left(v _ {i}\\right) \\geq 0, \\text {a n d} \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.114, + 0.649, + 0.469, + 0.681 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {p u s h}} = \\beta_ {1} \\tanh \\left(\\frac {h \\left(v _ {i}\\right)}{\\beta_ {2}}\\right) ^ {2} \\quad \\text {i f} h \\left(v _ {i}\\right) < 0. \\tag {7}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.686, + 0.175, + 0.7 + ], + "angle": 0, + "content": "3.4. IPMAN" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.709, + 0.47, + 0.83 + ], + "angle": 0, + "content": "We use our new IP losses for two tasks: (1) We extend HMR [42] to develop IPMAN-R, a regression-based HPS method. (2) We extend SMPLify-XMC [59] to develop IPMAN-O, an optimization-based method. Note that IPMAN-O uses a reference ground plane, while IPMAN-R uses the ground plane only for training but not at test time. It leverages the known ground in 3D datasets, and thus, does not require additional data beyond past HPS methods." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.847, + 0.205, + 0.86 + ], + "angle": 0, + "content": "3.4.1 IPMAN-R" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Most HPS methods are trained with a mix of direct supervision using 3D datasets [37,56,81] and 2D reprojection losses" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.081, + 0.891, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.223, + 0.894, + 0.321 + ], + "angle": 0, + "content": "Figure 3. IPMAN-R architecture. First, the HMR regressor estimates camera translation and SMPL parameters for an input image. These parameters are used to generate the SMPL mesh in the camera frame, \\( M_{c} \\). To transform the mesh from camera into world coordinates \\( (M_{c} \\rightarrow M_{w}) \\), IPMAN-R uses the ground-truth camera rotation, \\( R_{w}^{c} \\), and translation, \\( t_{w}^{c} \\). The IP losses, \\( \\mathcal{L}_{\\mathrm{ground}} \\) and \\( \\mathcal{L}_{\\mathrm{stability}} \\), are applied on the mesh in the world coordinate system." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.351, + 0.894, + 0.442 + ], + "angle": 0, + "content": "using image datasets [4, 39, 53]. The 3D losses, however, are calculated in the camera frame, ignoring scene information and physics. IPMAN-R extends HMR [42] with our intuitive-physics terms; see Fig. 3 for the architecture. For training, we use the known camera coordinates and the world ground plane in 3D datasets." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.444, + 0.894, + 0.642 + ], + "angle": 0, + "content": "As described in Sec. 3.1 (paragraph \"Camera\"), HMR infers the camera translation, \\(\\mathbf{t}^c\\), and SMPL parameters, \\(\\theta\\) and \\(\\beta\\), in the camera coordinates assuming \\(\\mathbf{R}^c = \\mathbf{I}_3\\) and \\(\\mathbf{t}^b = \\mathbf{0}\\). Ground truth 3D joints and SMPL parameters are used to supervise the inferred mesh \\(M_c\\) in the camera frame. However, 3D datasets also provide the ground, albeit in the world frame. To leverage the known ground, we transform the predicted body orientation, \\(\\mathbf{R}^b\\), to world coordinates using the ground-truth camera rotation, \\(\\mathbf{R}_w^c\\), as \\(\\mathbf{R}_w^b = \\mathbf{R}_w^{c\\top}\\mathbf{R}^b\\). Then, we compute the body translation in world coordinates as \\(\\mathbf{t}_w^b = -\\mathbf{t}^c + \\mathbf{t}_w^c\\). With the predicted mesh and ground plane in world coordinates, we add the IP terms, \\(\\mathcal{L}_{\\mathrm{stability}}\\) and \\(\\mathcal{L}_{\\mathrm{ground}}\\), for HPS training as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.501, + 0.657, + 0.892, + 0.673 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {I P M A N - R}} \\left(\\boldsymbol {\\theta}, \\boldsymbol {\\beta}, \\mathbf {t} ^ {c}\\right) = \\lambda_ {2 D} \\mathcal {L} _ {2 D} + \\lambda_ {3 D} \\mathcal {L} _ {3 D} + \\lambda_ {\\mathrm {S M P L}} \\mathcal {L} _ {\\mathrm {S M P L}} +\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.641, + 0.676, + 0.891, + 0.692 + ], + "angle": 0, + "content": "\\[\n\\lambda_ {\\mathrm {s}} \\mathcal {L} _ {\\text {s t a b i l i t y}} + \\lambda_ {\\mathrm {g}} \\mathcal {L} _ {\\text {g r o u n d}}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.708, + 0.894, + 0.753 + ], + "angle": 0, + "content": "where \\(\\lambda_{\\mathrm{s}}\\) and \\(\\lambda_{\\mathrm{g}}\\) are the weights for the respective IP terms. For training (data augmentation, hyperparameters, etc), we follow Kolotouros et al. [47]; for more details see Sup. Mat." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.783, + 0.628, + 0.797 + ], + "angle": 0, + "content": "3.4.2 IPMAN-O" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.894, + 0.902 + ], + "angle": 0, + "content": "To fit SMPL-X to 2D image keypoints, SMPLify-XMC [59] initializes the fitting process by exploiting the self-contact and global-orientation of a known/presented 3D mesh. We posit that the presented pose contains further information, such as stability, pressure and contact with the ground-plane. IPMAN-O uses this insight to apply stability and ground" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.957, + 0.515, + 0.968 + ], + "angle": 0, + "content": "4717" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.364, + 0.107 + ], + "angle": 0, + "content": "contact losses. The IPMAN-O objective is:" + }, + { + "type": "equation", + "bbox": [ + 0.11, + 0.115, + 0.469, + 0.173 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} E _ {\\text {I P M A N - O}} (\\boldsymbol {\\beta}, \\boldsymbol {\\theta}, \\boldsymbol {\\Phi}) = E _ {J 2 D} + \\lambda_ {\\beta} E _ {\\beta} + \\lambda_ {\\theta_ {h}} E _ {\\theta_ {h}} + \\\\ \\lambda_ {\\tilde {\\theta} _ {b}} E _ {\\tilde {\\theta} _ {b}} + \\lambda_ {\\tilde {C}} E _ {\\tilde {C}} + \\\\ \\lambda_ {s} E _ {\\text {s t a b i l i t y}} + \\lambda_ {g} E _ {\\text {g r o u n d}}. \\tag {9} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.18, + 0.471, + 0.317 + ], + "angle": 0, + "content": "\\(\\Phi\\) denotes the camera parameters: rotation \\(\\mathbf{R}^c\\), translation \\(\\mathbf{t}^c\\), and focal length, \\((f_x, f_y)\\). \\(E_{J2D}\\) is a 2D joint loss, \\(E_\\beta\\) and \\(E_{\\theta_h}\\) are \\(L_2\\) body shape and hand pose priors. \\(E_{\\tilde{\\theta}_b}\\) and \\(E_{\\tilde{C}}\\) are pose and contact terms w.r.t. the presented 3D pose and contact (see [59] for details). \\(E_S\\) and \\(E_G\\) are the stability and ground contact losses from Sec. 3.3. Since the estimated mesh is in the same coordinate system as the presented mesh and the ground-plane, we directly apply IP losses without any transformations. For details see Sup. Mat." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.328, + 0.21, + 0.345 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.353, + 0.373, + 0.369 + ], + "angle": 0, + "content": "4.1. Training and Evaluation Datasets" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.376, + 0.471, + 0.436 + ], + "angle": 0, + "content": "Human3.6M [37]. A dataset of 3D human keypoints and RGB images. The poses are limited in terms of challenging physics, focusing on common activities like walking, discussing, smoking, or taking photos." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.437, + 0.472, + 0.512 + ], + "angle": 0, + "content": "RICH [35]. A dataset of videos with accurate marker-less motion-captured 3D bodies and 3D scans of scenes. The images are more natural than Human3.6M and Fit3D [20]. We consider sequences with meaningful body-ground interaction. For the list of sequences, see Sup. Mat." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.512, + 0.471, + 0.558 + ], + "angle": 0, + "content": "Other datasets. Similar to [47], for training we use 3D keypoints from MPI-INF-3DHP [56] and 2D keypoints from image datasets such as COCO [53], MPII [4] and LSP [39]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.575, + 0.336, + 0.59 + ], + "angle": 0, + "content": "4.1.1 MoCap Yoga (MoYo) Dataset" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.47, + 0.735 + ], + "angle": 0, + "content": "We capture a trained Yoga professional in 200 highly complex poses (see Fig. 4) using a synchronized MoCap system, pressure mat, and a multi-view RGB video system with 8 static, calibrated cameras; for details see Sup. Mat. The dataset contains \\(\\sim\\) 1.75M RGB frames in 4K resolution with ground-truth SMPL-X [63], pressure and CoM. Compared to the Fit3D [20] and PosePrior [1] datasets, MoYo is more challenging; it has extreme poses, strong self-occlusion, and significant body-ground and self-contact." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.743, + 0.262, + 0.758 + ], + "angle": 0, + "content": "4.2. Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.766, + 0.469, + 0.811 + ], + "angle": 0, + "content": "We use standard 3D HPS metrics: The Mean Per-Joint Position Error (MPJPE), its Procrustes Aligned version (PA-MPJPE), and the Per-Vertex Error (PVE) [62]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.812, + 0.471, + 0.857 + ], + "angle": 0, + "content": "BoS Error (BoSE). To evaluate stability, we propose a new metric called BoS Error (BoSE). Following the definition of stability (Sec. 3.3) we define:" + }, + { + "type": "equation", + "bbox": [ + 0.163, + 0.865, + 0.469, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\operatorname {B o S E} = \\left\\{ \\begin{array}{l l} 1 & g (\\bar {\\mathbf {m}}) \\in \\mathcal {C} (C) \\\\ 0 & g (\\bar {\\mathbf {m}}) \\notin \\mathcal {C} (C) \\end{array} \\right. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.153 + ], + "angle": 0, + "content": "where \\(\\mathcal{C}(C)\\) is the convex hull of the gravity-projected contact vertices for \\(\\tau = 10\\mathrm{cm}\\). For efficiency reasons, we formulate this computation as the solution of a convex system via interior point linear programming [3]; see Sup. Mat." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.163, + 0.688, + 0.178 + ], + "angle": 0, + "content": "4.3. IPMAN Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.186, + 0.895, + 0.608 + ], + "angle": 0, + "content": "IPMAN-R. We evaluate our regressor, IPMAN-R, on RICH and H3.6M and summarize our results in Tab. 1. We refer to our regression baseline as \\(\\mathrm{HMR}^*\\) which is HMR trained on the same datasets as IPMAN-R. Since we train with paired 3D datasets, we do not use HMR's discriminator during training. Both IP terms individually improve upon the baseline method. Their joint use, however, shows the largest improvement. For example, on RICH the MPJPE improves by \\(3.5\\mathrm{mm}\\) and the PVE by \\(2.5\\mathrm{mm}\\). It is particularly interesting that IPMAN-R improves upon the baseline on H3.6M, a dataset with largely dynamic poses and little body-ground contact. We also significantly outperform (\\(\\sim 12\\%\\)) the MPJPE of optimization approaches that use the ground plane, Zou et al. [110] (69.9 mm) and Zanfir et al. [98] (69.0 mm), on H3.6M. Some video-based methods [49, 96] achieve better MPJPE (56.7 and 52.5 resp.) on H3.6M. However, they initialize with a stronger kinematic predictor [45, 50] and require video frames as input. Further, they use heuristics to estimate body weight and non-physical residual forces to correct for contact estimation errors. In contrast, IPMAN is a single-frame method, models complex full-body pressure and does not rely on approximate body weight to compute CoM. Qualitatively, Fig. 5 (top) shows that IPMAN-R's reconstructions are more stable and contain physically-plausible body-ground contact. While HMR is not SOTA, it is simple, isolating the benefits of our new IP formulation. These terms can also be added to methods with more modern backbones and architectures." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.895, + 0.822 + ], + "angle": 0, + "content": "IPMAN-O. Our optimization method, IPMAN-O, also improves upon the baseline optimization method, SMPLify-XMC, on all evaluation metrics (see Tab. 2). We note that adding \\( L_{\\mathrm{stability}} \\) independently improves the PVE, but not joint metrics (PA-MPJPE, MPJPE) and BoSE. This can be explained by the dependence of our IP terms on the relative position of the mesh surface to the ground-plane. Since joint metrics do not capture surfaces, they may get worse. Similar trends on joint metrics have been reported in the context of hand-object contact [29, 79] and body-scene contact [27]. We show qualitative results in Fig. 5 (bottom). While both SMPLify-XMC [59] and IPMAN-O achieve similar image projections, another view reveals that our results are more stable and physically plausible w.r.t. the ground." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.81, + 0.848 + ], + "angle": 0, + "content": "4.4. Pressure, CoP and CoM Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.895, + 0.901 + ], + "angle": 0, + "content": "We evaluate our estimated pressure, CoP and CoM against the MoYo ground truth. For pressure evaluation, we measure Intersection-over-Union (IoU) between our esti" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.516, + 0.968 + ], + "angle": 0, + "content": "4718" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.091, + 0.891, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.243, + 0.891, + 0.258 + ], + "angle": 0, + "content": "Figure 4. Representative examples illustrating the variation and complexity of 3D pose and body-ground contact in our new MoYo dataset." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.274, + 0.891, + 0.579 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.58, + 0.891, + 0.814 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.819, + 0.893, + 0.875 + ], + "angle": 0, + "content": "Figure 5. Qualitative evaluation of IPMAN-R and IPMAN-O on the RICH and MoYo datasets. The first column shows the input images of a subject doing various sports poses. The second and third block of columns show the baseline's and our results, respectively. In each block, the first image shows the estimated mesh overlayed on the image (camera view), the second image shows the estimated mesh in the world frame (side view), and the last image shows the estimated pressure map with the CoM (in pink) and the CoP (in green)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.515, + 0.968 + ], + "angle": 0, + "content": "4719" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.078, + 0.089, + 0.472, + 0.317 + ], + "angle": 0, + "content": "
MethodRICHHuman3.6M
MPJPE ↓PAMPJPE ↓PVE ↓BoSE (%) ↑MPJPE ↓PAMPJPE ↓
PhysCap [74]----113.068.9
DiffPhy [21]----81.755.6
Zou et al. [110]----69.9-
Xie et al. [89]----68.1-
VIBE [45]----61.343.1
Simpoe [96]----56.741.6
D&D [49]----52.535.5
HMR [42]----88.056.8
Zanfir et al. [98]----69.0-
SPIN [47]112.271.5129.554.762.341.9
PARE [46]107.073.1125.074.4--
CLIFF [51]107.067.2122.367.681.452.1
Finetuning on Human3.6M
HMR* [42]----62.141.6
IPMAN-R (Ours)----60.7 (-1.4)41.1 (-0.5)
Finetuning on all datasets
HMR* [42]82.548.392.462.061.641.9
HMR* [42]+Lground80.947.889.966.561.941.8
HMR* [42]+Lstability81.047.5 (-0.8)90.869.661.241.9
IPMAN-R (Ours)79.0 (-3.5)47.689.9 (-2.5)71.2 (+9.2)60.6 (-1.0)41.8 (-0.1)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.323, + 0.472, + 0.407 + ], + "angle": 0, + "content": "Table 1. Top to Bottom: Comparisons with video-based and single-frame regression methods. IPMAN-R outperforms the single-frame baselines across all benchmarks. * indicates training hyperparameters and datasets are identical to IPMAN-R. All units are in mm except BoSE. Bold denotes best results (per category), and parentheses show improvement over the baseline. Q Zoom in" + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.426, + 0.473, + 0.557 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.559, + 0.47, + 0.602 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparison of estimated vs the ground-truth pressure. The ground-truth CoP is shown in green and the estimated CoP is shown in yellow. Pressure heatmap colors as per Fig. 2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.639, + 0.47, + 0.82 + ], + "angle": 0, + "content": "mated and ground-truth pressure heatmaps. We also compute the CoP error as the Euclidean distance between estimated and ground-truth CoP. We obtain an IoU of 0.32 and a CoP error of \\(57.3\\mathrm{mm}\\). Figure 6 shows a qualitative visualization of the estimated pressure compared to the ground truth. For CoM evaluation, we find a \\(53.3\\mathrm{mm}\\) difference between our pCoM and the CoM computed by the commercial software, Vicon Plug-in Gait. Unlike Vicon's estimate, our pCoM does not require anthropometric measurements and takes into account the full 3D body shape. For details about the evaluation protocol and comparisons with alternative CoM formulations, see Sup. Mat." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Physics Simulation. To evaluate stability, we run a post-hoc physics simulation in \"Bullet\" [10] and measure the displacement of the estimated meshes; a small displacement denotes a stable pose. IPMAN-O produces \\(14.8\\%\\) more stable bodies than the baseline [59]; for details see Sup. Mat." + }, + { + "type": "table", + "bbox": [ + 0.5, + 0.089, + 0.895, + 0.169 + ], + "angle": 0, + "content": "
MethodMoYo
MPJPE ↓PAMPJPE ↓PVE ↓BoSE (%) ↑
SMPLify-XMC [59]75.336.516.898.0
SMPLify-XMC [59] + Lground73.336.214.598.2
SMPLify-XMC [59] + Lstability88.538.615.397.8
IPMAN-O (Ours)71.9 (-3.4)34.3 (-2.2)11.4 (-5.4)98.6 (+0.5)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.174, + 0.894, + 0.217 + ], + "angle": 0, + "content": "Table 2. Evaluation of IPMAN-O and SMPLify-XMC [59] (optimization-based) on MoYo. Bold shows the best performance, and parentheses show the improvement over SMPLify-XMC." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.238, + 0.618, + 0.254 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.264, + 0.895, + 0.596 + ], + "angle": 0, + "content": "Existing 3D HPS estimation methods recover SMPL meshes that align well with the input image, but are often physically implausible. To address this, we propose IPMAN, which incorporates intuitive-physics in 3D HPS estimation. Our IP terms encourage stable poses, promote realistic floor support, and reduce body-floor penetration. The IP terms exploit the interaction between the body CoM, CoP, and BoS - key elements used in stability analysis. To calculate the CoM of SMPL meshes, IPMAN uses on a novel formulation that takes part-specific mass contributions into account. Additionally, IPMAN estimates proxy pressure maps directly from images, which is useful in computing CoP. IPMAN is simple, differentiable, and compatible with both regression and optimization methods. IPMAN goes beyond previous physics-based methods to reason about arbitrary full-body contact with the ground. We show that IPMAN improves both regression and optimization baselines across all metrics on existing datasets and MoYo. MoYo uniquely comprises synchronized multi-view video, SMPL-X bodies in complex poses, and measurements for pressure maps and body CoM. Qualitative results show the effectiveness of IPMAN in recovering physically plausible meshes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.597, + 0.895, + 0.75 + ], + "angle": 0, + "content": "While IPMAN addresses body-floor contact, future work should incorporate general body-scene contact and diverse supporting surfaces by integrating 3D scene reconstruction. In this work, the proposed IP terms are designed to help static poses and we show that they do not hurt dynamic poses. However, the large body of biomechanical literature analyzing dynamic poses could be leveraged for activities like walking, jogging, running, etc. It would be interesting to extend IPMAN beyond single-person scenarios by exploiting the various physical constraints offered by multiple subjects." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.894, + 0.884 + ], + "angle": 0, + "content": "Acknowledgements. We thank T. Alexiadis, T. McConnell, C. Gallatz, M. Höschle, S. Polikovsky, C. Mendoza, Y. Fincan, L. Sanchez and M. Safroshkin for data collection, G. Becherini for MoSh++, Z. Fang, V. Choutas and all of Perceiving Systems for fruitful discussions. This work was funded by the International Max Planck Research School for Intelligent Systems (IMPRS-IS) and in part by the German Federal Ministry of Education and Research (BMBF), Tübingen AI Center, FKZ: 01IS18039B." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.887, + 0.885, + 0.901 + ], + "angle": 0, + "content": "Disclosure. https://files.is.tue.mpg.de/black/CoI_CVPR_2023.txt" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.516, + 0.968 + ], + "angle": 0, + "content": "4720" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.116, + 0.47, + 0.169 + ], + "angle": 0, + "content": "[1] Ijaz Akhter and Michael J. Black. Pose-conditioned joint angle limits for 3D human pose reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 1446-1455, 2015. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.172, + 0.471, + 0.225 + ], + "angle": 0, + "content": "[2] Riza Alp Güler, Natalia Neverova, and Iasonas Kokkinos. DensePose: Dense human pose estimation in the wild. In Computer Vision and Pattern Recognition (CVPR), pages 7297-7306, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.228, + 0.471, + 0.282 + ], + "angle": 0, + "content": "[3] Erling D. Andersen and Knud D. Andersen. The Mosek interior point optimizer for linear programming: An implementation of the homogeneous algorithm. In High Performance Optimization, 2000. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.284, + 0.469, + 0.338 + ], + "angle": 0, + "content": "[4] Mykhaylo Andriluka, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2D human pose estimation: New benchmark and state of the art analysis. In Computer Vision and Pattern Recognition (CVPR), pages 3686-3693, 2014. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.341, + 0.469, + 0.394 + ], + "angle": 0, + "content": "[5] Dragomir Anguelov, Praveen Srinivasan, Daphne Koller, Sebastian Thrun, Jim Rodgers, and James Davis. SCAPE: Shape completion and animation of people. Transactions on Graphics (TOG), 24:408-416, 2005. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.397, + 0.469, + 0.45 + ], + "angle": 0, + "content": "[6] Michael Barnett-Cowan, Roland W. Fleming, Manish Singh, and Heinrich H. Bulthoff. Perceived object stability depends on multisensory estimates of gravity. PLOS ONE, 6(4):1-5, 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.453, + 0.469, + 0.521 + ], + "angle": 0, + "content": "[7] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J. Black. Keep it SMPL: Automatic estimation of 3D human pose and shape from a single image. In European Conference on Computer Vision (ECCV), volume 9909, pages 561-578, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.524, + 0.469, + 0.576 + ], + "angle": 0, + "content": "[8] Marcus A. Brubaker, David J. Fleet, and Aaron Hertzmann. Physics-based person tracking using the anthropomorphic walker. International Journal of Computer Vision (IJCV), 87(1-2):140-155, 2010. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.096, + 0.579, + 0.469, + 0.619 + ], + "angle": 0, + "content": "[9] Marcus A. Brubaker, Leonid Sigal, and David J. Fleet. Estimating contact dynamics. In Computer Vision and Pattern Recognition (CVPR), pages 2389-2396, 2009. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.622, + 0.469, + 0.647 + ], + "angle": 0, + "content": "[10] Bullet real-time physics simulation. https://pybullet.org.1,8" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.65, + 0.469, + 0.717 + ], + "angle": 0, + "content": "[11] Zhe Cao, Gines Hidalgo, Tomas Simon, Shih-En Wei, and Yaser Sheikh. OpenPose: Realtime multi-person 2D pose estimation using part affinity fields. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 43(1):172–186, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.72, + 0.469, + 0.759 + ], + "angle": 0, + "content": "[12] Yixin Chen, Sai Kumar Dwivedi, Michael J. Black, and Dimitrios Tzionas. Detecting human-object contact in images. June 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.762, + 0.469, + 0.83 + ], + "angle": 0, + "content": "[13] Vasileios Choutas, Georgios Pavlakos, Timo Bolkart, Dimitrios Tzionas, and Michael J. Black. Monocular expressive body regression through body-driven attention. In European Conference on Computer Vision (ECCV), volume 12355, pages 20-40, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.832, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[14] Henry M. Clever, Zackory M. Erickson, Ariel Kapusta, Greg Turk, C. Karen Liu, and Charles C. Kemp. Bodies at rest: 3D human pose and shape estimation from a pressure image using synthetic data. In Computer Vision and Pattern Recognition (CVPR), pages 6214-6223, 2020. 3, 4" + }, + { + "type": "list", + "bbox": [ + 0.088, + 0.116, + 0.471, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.093, + 0.892, + 0.16 + ], + "angle": 0, + "content": "[15] Enric Corona, Albert Pumarola, Guillem Alenyà, Gerard Pons-Moll, and Francesc Moreno-Noguer. SMPLicit: Topology-aware generative model for clothed people. In Computer Vision and Pattern Recognition (CVPR), pages 11875-11885, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.165, + 0.892, + 0.244 + ], + "angle": 0, + "content": "[16] Taosha Fan, Kalyan Vasudev Alwala, Donglai Xiang, Weipeng Xu, Todd Murphey, and Mustafa Mukadam. Revitalizing optimization for 3D human pose and shape estimation: A sparse constrained formulation. In International Conference on Computer Vision (ICCV), pages 11437-11446, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.249, + 0.892, + 0.316 + ], + "angle": 0, + "content": "[17] Zicong Fan, Omid Taheri, Dimitrios Tzionas, Muhammed Kocabas, Manuel Kaufmann, Michael J. Black, and Otmar Hilliges. ARCTIC: A dataset for dexterous bimanual hand-object manipulation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.32, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[18] Yao Feng, Vasileios Choutas, Timo Bolkart, Dimitrios Tzionas, and Michael J. Black. Collaborative regression of expressive bodies using moderation. In International Conference on 3D Vision (3DV), pages 792-804, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.377, + 0.892, + 0.458 + ], + "angle": 0, + "content": "[19] Mihai Fieraru, Mihai Zanfir, Teodor Alexandru Szente, Eduard Gabriel Bazavan, Vlad Olaru, and Cristian Sminchisescu. REMIPS: Physically consistent 3D reconstruction of multiple interacting people under weak supervision. In Conference on Neural Information Processing Systems (NeurIPS), volume 34, 2021. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.462, + 0.892, + 0.528 + ], + "angle": 0, + "content": "[20] Mihai Fieraru, Mihai Zanfir, Silviu-Cristian Pirlea, Vlad Olaru, and Cristian Sminchisescu. AIfit: Automatic 3D human-interpretable feedback models for fitness training. In Computer Vision and Pattern Recognition (CVPR), pages 9919–9928, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.533, + 0.892, + 0.599 + ], + "angle": 0, + "content": "[21] Erik Gartner, Mykhaylo Andriluka, Erwin Coumans, and Cristian Sminchisescu. Differentiable dynamics for articulated 3D human motion reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 13180-13190, 2022. 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.604, + 0.892, + 0.67 + ], + "angle": 0, + "content": "[22] Erik Gartner, Mykhaylo Andriluka, Hongyi Xu, and Cristian Sminchisescu. Trajectory optimization for physics-based reconstruction of 3D human pose from monocular video. In Computer Vision and Pattern Recognition (CVPR), pages 13096-13105, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.675, + 0.892, + 0.729 + ], + "angle": 0, + "content": "[23] Ke Gong, Yiming Gao, Xiaodan Liang, Xiaohui Shen, Meng Wang, and Liang Lin. Graphonomy: Universal human parsing via graph transfer learning. In Computer Vision and Pattern Recognition (CVPR), pages 7450-7459, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.732, + 0.892, + 0.786 + ], + "angle": 0, + "content": "[24] Shanyan Guan, Jingwei Xu, Yunbo Wang, Bingbing Ni, and Xiaokang Yang. Bilevel online adaptation for out-of-domain human mesh reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 10472-10481, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.789, + 0.892, + 0.841 + ], + "angle": 0, + "content": "[25] Riza Alp Güler and Iasonas Kokkinos. HoloPose: Holistic 3D human reconstruction in-the-wild. In Computer Vision and Pattern Recognition (CVPR), pages 10876-10886, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.846, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[26] Shreyas Hampali, Mahdi Rad, Markus Oberweger, and Vincent Lepetit. HOnnotate: A method for 3D annotation of hand and object poses. In Computer Vision and Pattern Recognition (CVPR), pages 3193-3203, 2020. 5" + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.514, + 0.968 + ], + "angle": 0, + "content": "4721" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.093, + 0.47, + 0.148 + ], + "angle": 0, + "content": "[27] Mohamed Hassan, Vasileios Choutas, Dimitrios Tzionas, and Michael J. Black. Resolving 3D human pose ambiguities with 3D scene constraints. In International Conference on Computer Vision (ICCV), pages 2282-2292, 2019. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.15, + 0.472, + 0.207 + ], + "angle": 0, + "content": "[28] Mohamed Hassan, Partha Ghosh, Joachim Tesch, Dimitrios Tzionas, and Michael J. Black. Populating 3D scenes by learning human-scene interaction. In Computer Vision and Pattern Recognition (CVPR), pages 14708-14718, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.208, + 0.472, + 0.276 + ], + "angle": 0, + "content": "[29] Yana Hasson, Gül Varol, Dimitrios Tzionas, Igor Kalevatykh, Michael J. Black, Ivan Laptev, and Cordelia Schmid. Learning joint reconstruction of hands and manipulated objects. In Computer Vision and Pattern Recognition (CVPR), pages 11807-11816, 2019. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.278, + 0.472, + 0.318 + ], + "angle": 0, + "content": "[30] Havok: Customizable, fully multithreaded, and highly optimized physics simulation. http://www.havok.com. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.321, + 0.472, + 0.389 + ], + "angle": 0, + "content": "[31] Eric Heiden, David Millard, Erwin Coumans, Yizhou Sheng, and Gaurav S. Sukhatme. NeuralSim: Augmenting differentiable simulators with neural networks. In International Conference on Robotics and Automation (ICRA), pages 9474-9481, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.391, + 0.472, + 0.432 + ], + "angle": 0, + "content": "[32] At L. Hof. The equations of motion for a standing human reveal three mechanisms for balance. Journal of Biomechanics, 40(2):451-457, 2007. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.434, + 0.472, + 0.475 + ], + "angle": 0, + "content": "[33] At L. Hof. The \"extrapolated center of mass\" concept suggests a simple control of balance in walking. Human movement science, 27(1):112-125, 2008. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.477, + 0.472, + 0.517 + ], + "angle": 0, + "content": "[34] At L. Hof, M. G. J. Gazendam, and Sinke W. E. The condition for dynamic stability. Journal of Biomechanics, 38(1):1-8, 2005. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.52, + 0.472, + 0.602 + ], + "angle": 0, + "content": "[35] Chun-Hao Huang, Hongwei Yi, Markus Höschle, Matvey Safroshkin, Tsvetelina Alexiadis, Senya Polikovsky, Daniel Scharstein, and Michael Black. Capturing and inferring dense full-body human-scene contact. In Computer Vision and Pattern Recognition (CVPR), pages 13264-13275, 2022. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.604, + 0.472, + 0.646 + ], + "angle": 0, + "content": "[36] Leslie Ikemoto, Okan Arikan, and David Forsyth. Knowing when to put your foot down. In Symposium on Interactive 3D Graphics (SI3D), page 49-53, 2006. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.648, + 0.472, + 0.716 + ], + "angle": 0, + "content": "[37] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6M: Large scale datasets and predictive methods for 3D human sensing in natural environments. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 36(7):1325-1339, 2014. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.718, + 0.472, + 0.786 + ], + "angle": 0, + "content": "[38] Wen Jiang, Nikos Kolotouros, Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Coherent reconstruction of multiple humans from a single image. In Computer Vision and Pattern Recognition (CVPR), pages 5578-5587, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.789, + 0.472, + 0.843 + ], + "angle": 0, + "content": "[39] Sam Johnson and Mark Everingham. Clustered pose and nonlinear appearance models for human pose estimation. In British Machine Vision Conference (BMVC), pages 1-11, 2010. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.846, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[40] Hanbyul Joo, Natalia Neverova, and Andrea Vedaldi. Exemplar fine-tuning for 3D human pose fitting towards in-the-wild 3D human pose estimation. In International Conference on 3D Vision (3DV), pages 42-52, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.093, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.093, + 0.894, + 0.148 + ], + "angle": 0, + "content": "[41] Hanbyul Joo, Tomas Simon, and Yaser Sheikh. Total capture: A 3D deformation model for tracking faces, hands, and bodies. In Computer Vision and Pattern Recognition (CVPR), pages 8320-8329, 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.149, + 0.894, + 0.204 + ], + "angle": 0, + "content": "[42] Angjoo Kanazawa, Michael J. Black, David W. Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In Computer Vision and Pattern Recognition (CVPR), pages 7122-7131, 2018. 3, 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.205, + 0.894, + 0.259 + ], + "angle": 0, + "content": "[43] Angjoo Kanazawa, Jason Y. Zhang, Panna Felsen, and Jitendra Malik. Learning 3D human dynamics from video. Computer Vision and Pattern Recognition (CVPR), pages 5607-5616, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.261, + 0.894, + 0.302 + ], + "angle": 0, + "content": "[44] Rawal Khirodkar, Shashank Tripathi, and Kris Kitani. Occluded human mesh recovery. In Computer Vision and Pattern Recognition (CVPR), pages 1705-1715, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.303, + 0.894, + 0.357 + ], + "angle": 0, + "content": "[45] Muhammed Kocabas, Nikos Athanasiou, and Michael J. Black. VIBE: Video inference for human body pose and shape estimation. In Computer Vision and Pattern Recognition (CVPR), pages 5252-5262, 2020. 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.359, + 0.894, + 0.413 + ], + "angle": 0, + "content": "[46] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. PARE: Part attention regressor for 3D human body estimation. In International Conference on Computer Vision (ICCV), pages 11127-11137, 2021. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.414, + 0.894, + 0.482 + ], + "angle": 0, + "content": "[47] Nikos Kolotouros, Georgios Pavlakos, Michael J. Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In International Conference on Computer Vision (ICCV), pages 2252-2261, 2019. 3, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.484, + 0.894, + 0.538 + ], + "angle": 0, + "content": "[48] Nikos Kolotouros, Georgios Pavlakos, and Kostas Dani-ilidis. Convolutional mesh regression for single-image human shape reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 4496–4505, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.539, + 0.894, + 0.594 + ], + "angle": 0, + "content": "[49] Jiefeng Li, Siyuan Bian, Chao Xu, Gang Liu, Gang Yu, and Cewu Lu. D&D: Learning human dynamics from dynamic camera. In European Conference on Computer Vision (ECCV), 2022. 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.596, + 0.894, + 0.663 + ], + "angle": 0, + "content": "[50] Jiefeng Li, Chao Xu, Zhicun Chen, Siyuan Bian, Lixin Yang, and Cewu Lu. HybrIK: A hybrid analytical-neural inverse kinematics solution for 3D human pose and shape estimation. In Computer Vision and Pattern Recognition (CVPR), pages 3383-3393, 2021. 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.665, + 0.894, + 0.719 + ], + "angle": 0, + "content": "[51] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. CLIFF: Carrying location information in full frames into human pose and shape estimation. In ECCV, volume 13665, pages 590-606, 2022. 1, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.721, + 0.894, + 0.774 + ], + "angle": 0, + "content": "[52] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In Computer Vision and Pattern Recognition (CVPR), pages 1954-1963, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.776, + 0.894, + 0.845 + ], + "angle": 0, + "content": "[53] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólár, and C. Lawrence Zitnick. Microsoft COCO: Common objects in context. In European Conference on Computer Vision (ECCV), volume 8693, pages 740-755, 2014. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.846, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[54] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multi-person linear model. Transactions on Graphics (TOG), 34(6):248:1-248:16, 2015. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.516, + 0.968 + ], + "angle": 0, + "content": "4722" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[55] Yiyue Luo, Yunzhu Li, Michael Foshey, Wan Shou, Pratyusha Sharma, Tomás Palacios, Antonio Torralba, and Wojciech Matusik. Intelligent carpet: Inferring 3D human pose from tactile signals. In Computer Vision and Pattern Recognition (CVPR), pages 11255-11265, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.162, + 0.472, + 0.232 + ], + "angle": 0, + "content": "[56] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal V. Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved CNN supervision. International Conference on 3D Vision (3DV), pages 506-516, 2017. 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.233, + 0.472, + 0.313 + ], + "angle": 0, + "content": "[57] Dushyant Mehta, Srinath Sridhar, Oleksandr Sotnychenko, Helge Rhodin, Mohammad Shafiei, Hans-Peter Seidel, Weipeng Xu, Dan Casas, and Christian Theobalt. VNect: Real-time 3D human pose estimation with a single RGB camera. Transactions on Graphics (TOG), 36(4):44:1-44:14, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.315, + 0.472, + 0.384 + ], + "angle": 0, + "content": "[58] Gyeongsik Moon and Kyoung Mu Lee. I2L-MeshNet: Image-to-lixel prediction network for accurate 3D human pose and mesh estimation from a single RGB image. In European Conference on Computer Vision (ECCV), volume 12352, pages 752-768, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.385, + 0.472, + 0.44 + ], + "angle": 0, + "content": "[59] Lea Müller, Ahmed A. A. Osman, Siyu Tang, Chun-Hao P. Huang, and Michael J. Black. On self-contact and human pose. In Computer Vision and Pattern Recognition (CVPR), pages 9990-9999, 2021. 1, 2, 3, 4, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.441, + 0.472, + 0.482 + ], + "angle": 0, + "content": "[60] NVIDIA PhysX: A scalable multi-platform physics simulation solution. https://developer.nvidia.com/physx-sdk.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.483, + 0.472, + 0.523 + ], + "angle": 0, + "content": "[61] Yi-Chung Pai. Movement termination and stability in standing. Exercise and sport sciences reviews, 31(1):19-25, 2003. 2,4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.525, + 0.472, + 0.594 + ], + "angle": 0, + "content": "[62] Priyanka Patel, Chun-Hao P Huang, Joachim Tesch, David T Hoffmann, Shashank Tripathi, and Michael J Black. AGORA: Avatars in geography optimized for regression analysis. In Computer Vision and Pattern Recognition (CVPR), pages 13468-13478, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.595, + 0.472, + 0.676 + ], + "angle": 0, + "content": "[63] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed A. A. Osman, Dimitrios Tzionas, and Michael J. Black. Expressive body capture: 3D hands, face, and body from a single image. In Computer Vision and Pattern Recognition (CVPR), pages 10975-10985, 2019. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.678, + 0.472, + 0.734 + ], + "angle": 0, + "content": "[64] Xue Bin Peng, Pieter Abbeel, Sergey Levine, and Michiel van de Panne. DeepMimic: Example-guided deep reinforcement learning of physics-based character skills. Transactions on Graphics (TOG), 37(4):1-14, 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.735, + 0.472, + 0.803 + ], + "angle": 0, + "content": "[65] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J. Guibas. HuMoR: 3D human motion model for robust pose estimation. In International Conference on Computer Vision (ICCV), pages 11468-11479, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.804, + 0.472, + 0.872 + ], + "angle": 0, + "content": "[66] Davis Rempe, Leonidas J. Guibas, Aaron Hertzmann, Bryan Russell, Ruben Villegas, and Jamei Yang. Contact and human dynamics from monocular video. In European Conference on Computer Vision (ECCV), volume 12350, pages 71-87, 2020. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.873, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[67] Ralph Tyrell Rockafellar. Convex analysis. Princeton university press, 2015. 4" + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.092, + 0.894, + 0.148 + ], + "angle": 0, + "content": "[68] Grégory Rogez, James Steven Supancic, and Deva Ramanan. Understanding everyday hands in action from RGB-D images. In International Conference on Computer Vision (ICCV), pages 3889-3897, 2015. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.149, + 0.894, + 0.218 + ], + "angle": 0, + "content": "[69] Yu Rong, Takaaki Shiratori, and Hanbyul Joo. FrankMocap: A monocular 3D whole-body pose estimation system via regression and integration. In International Conference on Computer Vision Workshops (ICCVw), pages 1749-1759, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.22, + 0.894, + 0.275 + ], + "angle": 0, + "content": "[70] Nadine Rueegg, Shashank Tripathi, Konrad Schindler, Michael J. Black, and Silvia Zuffi. BITE: Beyond priors for improved three-D dog pose estimation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.277, + 0.894, + 0.346 + ], + "angle": 0, + "content": "[71] Jesse Scott, Bharadwaj Ravichandran, Christopher Funk, Robert T Collins, and Yanxi Liu. From image to stability: Learning dynamics from human pose. In European Conference on Computer Vision (ECCV), volume 12368, pages 536-554, 2020. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.348, + 0.894, + 0.417 + ], + "angle": 0, + "content": "[72] Mingyi Shi, Kfir Aberman, Andreas Aristidou, Taku Komura, Dani Lischinski, Daniel Cohen-Or, and Baoquan Chen. MotioNet: 3D human motion reconstruction from monocular video with skeleton consistency. Transactions on Graphics (TOG), 40(1):1:1-1:15, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.419, + 0.893, + 0.474 + ], + "angle": 0, + "content": "[73] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, Patrick Pérez, and Christian Theobalt. Neural monocular 3D human motion capture with physical awareness. Transactions on Graphics (TOG), 40(4), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.476, + 0.894, + 0.532 + ], + "angle": 0, + "content": "[74] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, and Christian Theobalt. PhysCap: Physically plausible monocular 3D motion capture in real time. Transactions on Graphics (TOG), 39(6):235:1-235:16, 2020. 1, 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.533, + 0.893, + 0.588 + ], + "angle": 0, + "content": "[75] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J. Black, and Tao Mei. Monocular, one-stage, regression of multiple 3D people. In International Conference on Computer Vision (ICCV), pages 11179-11188, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.59, + 0.894, + 0.657 + ], + "angle": 0, + "content": "[76] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a skeleton-disentangled representation. In International Conference on Computer Vision (ICCV), pages 5348-5357, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.66, + 0.894, + 0.702 + ], + "angle": 0, + "content": "[77] Yating Tian, Hongwen Zhang, Yebin Liu, and limin Wang. Recovering 3D human mesh from monocular images: A survey. arXiv:2203.01923, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.704, + 0.894, + 0.771 + ], + "angle": 0, + "content": "[78] Shashank Tripathi, Siddhant Ranade, Ambrish Tyagi, and Amit K. Agrawal. PoseNet3D: Learning temporally consistent 3D human pose via knowledge distillation. In International Conference on 3D Vision (3DV), pages 311-321, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.774, + 0.894, + 0.843 + ], + "angle": 0, + "content": "[79] Dimitrios Tzionas, Luca Ballan, Abhilash Srikantha, Pablo Aponte, Marc Pollefeys, and Juergen Gall. Capturing hands in action using discriminative salient points and physics simulation. International Journal of Computer Vision (IJCV), 118:172-193, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.845, + 0.893, + 0.901 + ], + "angle": 0, + "content": "[80] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In International Conference on Computer Vision (ICCV), pages 9720-9729, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.092, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.515, + 0.968 + ], + "angle": 0, + "content": "4723" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.092, + 0.472, + 0.161 + ], + "angle": 0, + "content": "[81] Timo von Marcard, Roberto Henschel, Michael J. Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3D human pose in the wild using IMUs and a moving camera. In European Conference on Computer Vision (ECCV), volume 11214, pages 614-631, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.163, + 0.472, + 0.218 + ], + "angle": 0, + "content": "[82] Marek Vondrak, Leonid Sigal, and Odest Chadwicke Jenkins. Physical simulation for probabilistic motion tracking. In Computer Vision and Pattern Recognition (CVPR), pages 1-8, 2008. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.22, + 0.472, + 0.275 + ], + "angle": 0, + "content": "[83] Eric W. Weisstein. Triangle point picking. https://mathworld.wolfram.com/TrianglePointPicking.html, 2014. From MathWorld - A Wolfram Web Resource. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.277, + 0.472, + 0.332 + ], + "angle": 0, + "content": "[84] Zhenzhen Weng and Serena Yeung. Holistic 3D human and scene mesh estimation from single view images. In Computer Vision and Pattern Recognition (CVPR), pages 334-343, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.334, + 0.472, + 0.375 + ], + "angle": 0, + "content": "[85] David A. Winter. A.B.C. (Anatomy, Biomechanics and Control) of balance during standing and walking. Waterloo Biomechanics, 1995. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.377, + 0.472, + 0.417 + ], + "angle": 0, + "content": "[86] David A. Winter. Human balance and posture control during standing and walking. Gait & Posture, 3(4):193-214, 1995. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.42, + 0.472, + 0.475 + ], + "angle": 0, + "content": "[87] Donglai Xiang, Hanbyul Joo, and Yaser Sheikh. Monocular total capture: Posing face, body, and hands in the wild. In Computer Vision and Pattern Recognition (CVPR), pages 10957-10966, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.477, + 0.472, + 0.545 + ], + "angle": 0, + "content": "[88] Donglai Xiang, Fabian Prada, Chenglei Wu, and Jessica Hodgins. MonoClothCap: Towards temporally coherent clothing capture from monocular RGB video. In International Conference on 3D Vision (3DV), pages 322-332, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.548, + 0.472, + 0.616 + ], + "angle": 0, + "content": "[89] Kevin Xie, Tingwu Wang, Umar Iqbal, Yunrong Guo, Sanja Fidler, and Florian Shkurti. Physics-based human motion estimation and synthesis from videos. In International Conference on Computer Vision (ICCV), pages 11532-11541, 2021. 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.618, + 0.472, + 0.673 + ], + "angle": 0, + "content": "[90] Xianghui Xie, Bharat Lal Bhatnagar, and Gerard Pons-Moll. CHORE: Contact, human and object reconstruction from a single RGB image. In European Conference on Computer Vision (ECCV), 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.675, + 0.472, + 0.743 + ], + "angle": 0, + "content": "[91] Yuliang Xiu, Jinlong Yang, Xu Cao, Dimitrios Tzionas, and Michael J. Black. ECON: Explicit Clothed humans Optimized via Normal Integration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.746, + 0.472, + 0.815 + ], + "angle": 0, + "content": "[92] Hongyi Xu, Eduard Gabriel Bazavan, Andrei Zanfir, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. GHUM & GHUML: Generative 3D human shape and articulated pose models. In Computer Vision and Pattern Recognition (CVPR), pages 6183-6192, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.817, + 0.472, + 0.87 + ], + "angle": 0, + "content": "[93] Masanobu Yamamoto and Katsutoshi Yagishita. Scene constraints-aided tracking of human body. In Computer Vision and Pattern Recognition (CVPR), pages 151–156, 2000. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.873, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[94] Hongwei Yi, Chun-Hao P. Huang, Shashank Tripathi, Lea Hering, Justus Thies, and Michael J. Black. MIME: Human-" + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "aware 3D scene generation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.122, + 0.894, + 0.164 + ], + "angle": 0, + "content": "[95] Ye Yuan and Kris Kitani. 3D ego-pose estimation via imitation learning. In European Conference on Computer Vision (ECCV), volume 11220, pages 735–750, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.165, + 0.894, + 0.22 + ], + "angle": 0, + "content": "[96] Ye Yuan, Shih-En Wei, Tomas Simon, Kris Kitani, and Jason Saragih. SimPoE: Simulated character control for 3D human pose estimation. In Computer Vision and Pattern Recognition (CVPR), pages 7159–7169, 2021. 1, 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.222, + 0.894, + 0.303 + ], + "angle": 0, + "content": "[97] Andrei Zanfir, Eduard Gabriel Bazavan, Hongyi Xu, William T Freeman, Rahul Sukthankar, and Cristian Sminchisescu. Weakly supervised 3D human pose and shape reconstruction with normalizing flows. In European Conference on Computer Vision (ECCV), pages 465-481, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.307, + 0.894, + 0.375 + ], + "angle": 0, + "content": "[98] Andrei Zanfir, Elisabella Maroiniu, and Cristian Sminchisescu. Monocular 3D pose and shape estimation of multiple people in natural scenes – the importance of multiple scene constraints. In Computer Vision and Pattern Recognition (CVPR), pages 2148–2157, 2018. 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.377, + 0.894, + 0.445 + ], + "angle": 0, + "content": "[99] Ailing Zeng, Lei Yang, Xuan Ju, Jiefeng Li, Jianyi Wang, and Qiang Xu. SmoothNet: A plug-and-play network for refining human poses in videos. In European Conference on Computer Vision (ECCV), volume 13665, pages 625-642, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.448, + 0.894, + 0.503 + ], + "angle": 0, + "content": "[100] Wang Zeng, Wanli Ouyang, Ping Luo, Wentao Liu, and Xiaogang Wang. 3D human mesh regression with dense correspondence. In Computer Vision and Pattern Recognition (CVPR), 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.894, + 0.56 + ], + "angle": 0, + "content": "[101] Cha Zhang and Tsuhan Chen. Efficient feature extraction for 2d/3d objects in mesh representation. In Proceedings 2001 International Conference on Image Processing (Cat. No. 01CH37205), volume 3, pages 935-938. IEEE, 2001. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.561, + 0.894, + 0.631 + ], + "angle": 0, + "content": "[102] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. PyMAF: 3D human pose and shape regression with pyramidal mesh alignment feedback loop. In International Conference on Computer Vision (ICCV), pages 11426-11436, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.632, + 0.894, + 0.687 + ], + "angle": 0, + "content": "[103] Jianfeng Zhang, Dongdong Yu, Jun Hao Liew, Xuecheng Nie, and Jiashi Feng. Body meshes as points. In Computer Vision and Pattern Recognition (CVPR), pages 546-556, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.894, + 0.758 + ], + "angle": 0, + "content": "[104] Jason Y. Zhang, Sam Pepose, Hanbyul Joo, Deva Ramanan, Jitendra Malik, and Angjoo Kanazawa. Perceiving 3D human-object spatial arrangements from a single image in the wild. In European Conference on Computer Vision (ECCV), volume 12357, pages 34-51, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.76, + 0.894, + 0.815 + ], + "angle": 0, + "content": "[105] Siwei Zhang, Yan Zhang, Federica Bogo, Marc Pollefeys, and Siyu Tang. Learning motion priors for 4D human body capture in 3D scenes. In International Conference on Computer Vision (ICCV), pages 11343-11353, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.894, + 0.871 + ], + "angle": 0, + "content": "[106] Tianshu Zhang, Buzhen Huang, and Yangang Wang. Object-occluded human shape and pose estimation from a single color image. In Computer Vision and Pattern Recognition (CVPR), pages 7374–7383, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[107] Ce Zheng, Wenhan Wu, Chen Chen, Taojiannan Yang, Sijie Zhu, Ju Shen, Nasser Kehtarnavaz, and Mubarak Shah." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.516, + 0.968 + ], + "angle": 0, + "content": "4724" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.092, + 0.47, + 0.119 + ], + "angle": 0, + "content": "Deep learning-based human pose estimation: A survey. arXiv:2012.13392, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.177 + ], + "angle": 0, + "content": "[108] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Computer Vision and Pattern Recognition (CVPR), pages 5745-5753, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.205 + ], + "angle": 0, + "content": "[109] Yuxiao Zhou, Marc Habermann, Ikhsanul Habibie, Ayush Tewari, Christian Theobalt, and Feng Xu. Monocular real" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.093, + 0.894, + 0.133 + ], + "angle": 0, + "content": "time full body capture with inter-part correlations. In Computer Vision and Pattern Recognition (CVPR), pages 4811-4822, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[110] Yuliang Zou, Jimei Yang, Duygu Ceylan, Jianming Zhang, Federico Perazzi, and Jia-Bin Huang. Reducing footskate in human motion reconstruction with ground contact constraints. In Winter Conference on Applications of Computer Vision (WACV), pages 459-468, 2020. 3, 6, 8" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.957, + 0.514, + 0.968 + ], + "angle": 0, + "content": "4725" + } + ] +] \ No newline at end of file diff --git a/2023/3D Human Pose Estimation via Intuitive Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_origin.pdf b/2023/3D Human Pose Estimation via Intuitive Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..df5784f31e709120d81d2aaf3b2ebd6bf2b1dd13 --- /dev/null +++ b/2023/3D Human Pose Estimation via Intuitive Physics/23a54e7d-fed1-435b-b507-df1bdee18df4_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6fb61a3cf89b5974899426568b6b017c1dd3237f97964520ea932a2288ce062 +size 6701092 diff --git a/2023/3D Human Pose Estimation via Intuitive Physics/full.md b/2023/3D Human Pose Estimation via Intuitive Physics/full.md new file mode 100644 index 0000000000000000000000000000000000000000..6fa155b69bf66d1a84f6e12f49a50ce4b34f8564 --- /dev/null +++ b/2023/3D Human Pose Estimation via Intuitive Physics/full.md @@ -0,0 +1,374 @@ +# 3D Human Pose Estimation via Intuitive Physics + +Shashank Tripathi1 Lea Müller1 Chun-Hao P. Huang1 Omid Taheri1 Michael J. Black1 Dimitrios Tzionas2* + +$^{1}$ Max Planck Institute for Intelligent Systems, Tübingen, Germany $^{2}$ University of Amsterdam, the Netherlands {stripathi, lmueller2, chuang2, otaheri, black}@tue.mpg.de d.tzionas@uva.nl + +![](images/df44fdeea43adfe7d6345099c566049078468d7fd6b71b85fce44c44ceee9761.jpg) +Figure 1. Estimating a 3D body from an image is ill-posed. A recent, representative, optimization method [59] produces bodies that are in unstable poses, penetrate the floor, or hover above it. In contrast, IPMAN estimates a 3D body that is physically plausible. To achieve this, IPMAN uses novel intuitive-physics (IP) terms that exploit inferred pressure heatmaps on the body, the Center of Pressure (CoP), and the body's Center of Mass (CoM). Body heatmap colors encode per-vertex pressure. + +# Abstract + +Estimating 3D humans from images often produces implausible bodies that lean, float, or penetrate the floor. Such methods ignore the fact that bodies are typically supported by the scene. A physics engine can be used to enforce physical plausibility, but these are not differentiable, rely on unrealistic proxy bodies, and are difficult to integrate into existing optimization and learning frameworks. In contrast, we exploit novel intuitive-physics (IP) terms that can be inferred from a 3D SMPL body interacting with the scene. Inspired by biomechanics, we infer the pressure heatmap on the body, the Center of Pressure (CoP) from the heatmap, and the SMPL body's Center of Mass (CoM). With these, we develop IPMAN, to estimate a 3D body from a color image in a "stable" configuration by encouraging plausible floor contact and overlapping CoP and CoM. Our IP terms are intuitive, easy to implement, fast to compute, differentiable, and can be integrated into existing optimization and regression methods. We evaluate IPMAN on standard datasets and MoYo, a new dataset with synchronized multi-view images, ground-truth 3D bodies with complex poses, body-floor contact, CoM and pressure. IPMAN produces more plausible results than the state of the art, improving accuracy for static poses, while not hurting dynamic ones. Code and data are available for research at https://ipman.is.tue.mpg.de. + +# 1. Introduction + +To understand humans and their actions, computers need automatic methods to reconstruct the body in 3D. Typically, the problem entails estimating the 3D human pose and shape (HPS) from one or more color images. State-of-the-art (SOTA) methods [46, 51, 75, 102] have made rapid progress, estimating 3D humans that align well with image features in the camera view. Unfortunately, the camera view can be deceiving. When viewed from other directions, or when placed in a 3D scene, the estimated bodies are often physically implausible: they lean, hover, or penetrate the ground (see Fig. 1 top). This is because most SOTA methods reason about humans in isolation; they ignore that people move in a scene, interact with it, and receive physical support by contacting it. This is a deal-breaker for inherently 3D applications, such as biomechanics, augmented/virtual reality (AR/VR) and the "metaverse"; these need humans to be reconstructed faithfully and physically plausibly with respect to the scene. For this, we need a method that estimates the 3D human on a ground plane from a color image in a configuration that is physically "stable". + +This is naturally related to reasoning about physics and support. There exist many physics simulators [10, 30, 60] for games, movies, or industrial simulations, and using these for plausible HPS estimation is increasingly popular [66, 74, 96]. However, existing simulators come with two significant + +problems: (1) They are typically non-differentiable black boxes, making them incompatible with existing optimization and learning frameworks. Consequently, most methods [64, 95, 96] use them with reinforcement learning to evaluate whether a certain input has the desired outcome, but with no ability to reason about how changing inputs affects the outputs. (2) They rely on an unrealistic proxy body model for computational efficiency; bodies are represented as groups of rigid 3D shape primitives. Such proxy models are crude approximations of human bodies, which, in reality, are much more complex and deform non-rigidly when they move and interact. Moreover, proxies need a priori known body dimensions that are kept fixed during simulation. Also, these proxies differ significantly from the 3D body models [41, 54, 92] used by SOTA HPS methods. Thus, current physics simulators are too limited for use in HPS. + +What we need, instead, is a solution that is fully differentiable, uses a realistic body model, and seamlessly integrates physical reasoning into HPS methods (both optimization- and regression-based). To this end, instead of using full physics simulation, we introduce novel intuitive-physics (IP) terms that are simple, differentiable, and compatible with a body model like SMPL [54]. Specifically, we define terms that exploit an inferred pressure heatmap of the body on the ground plane, the Center of Pressure (CoP) that arises from the heatmap, and the SMPL body's Center of Mass (CoM) projected on the floor; see Fig. 2 for a visualization. Intuitively, bodies whose CoM lie close to their CoP are more stable than ones with a CoP that is further away (see Fig. 5); the former suggests a static pose, e.g. standing or holding a yoga pose, while the latter a dynamic pose, e.g., walking. + +We use these intuitive-physics terms in two ways. First, we incorporate them in an objective function that extends SMPLify-XMC [59] to optimize for body poses that are stable. We also incorporate the same terms in the training loss for an HPS regressor, called IPMAN (Intuitive-Physics-based huMAN). In both formulations, the intuitive-physics terms encourage estimates of body shape and pose that have sufficient ground contact, while penalizing interpenetration and encouraging an overlap of the CoP and CoM. + +Our intuitive-physics formulation is inspired by work in biomechanics [32, 33, 61], which characterizes the stability of humans in terms of relative positions between the CoP, the CoM, and the Base of Support (BoS). The BoS is defined as the convex hull of all contact regions on the floor (Fig. 2). Following past work [6, 71, 74], we use the "inverted pendulum" model [85, 86] for body balance; this considers poses as stable if the gravity-projected CoM onto the floor lies inside the BoS. Similar ideas are explored by Scott et al. [71] but they focus on predicting a foot pressure heatmap from 2D or 3D body joints. We go significantly further to exploit stability in training an HPS regressor. This requires two technical novelties. + +![](images/e48583f23ac2984857a497de7716a6d94a45d0c15d116bf9a57e7f8d01a5ad2f.jpg) +Figure 2. (1) A SMPL mesh sitting. (2) The inferred pressure map on the ground (color-coded heatmap), CoP (green), CoM (pink), and Base of Support (BoS, yellow polygon). (3) Segmentation of SMPL into $N_P = 10$ parts, used for computing CoM; see Sec. 3.2. + +The first involves computing CoM. To this end, we uniformly sample points on SMPL's surface, and calculate each body part's volume. Then, we compute CoM as the average of all uniformly sampled points weighted by the corresponding part volumes. We denote this as pCoM, standing for "part-weighted CoM". Importantly, pCoM takes into account SMPL's shape, pose, and all blend shapes, while it is also computationally efficient and differentiable. + +The second involves estimating CoP directly from the image, without access to a pressure sensor. Our key insight is that the soft tissues of human bodies deform under pressure, e.g., the buttocks deform when sitting. However, SMPL does not model this deformation; it penetrates the ground instead of deforming. We use the penetration depth as a proxy for pressure [68]; deeper penetration means higher pressure. With this, we estimate a pressure field on SMPL's mesh and compute the CoP as the pressure-weighted average of the surface points. Again this is differentiable. + +For evaluation, we use a standard HPS benchmark (Human3.6M [37]), but also the RICH [35] dataset. However, these datasets have limited interactions with the floor. We thus capture a novel dataset, MoYo, of challenging yoga poses, with synchronized multi-view video, ground-truth SMPL-X [63] meshes, pressure sensor measurements, and body CoM. IPMAN, in both of its forms, and across all datasets, produces more accurate and stable 3D bodies than the state of the art. Importantly, we find that IPMAN improves accuracy for static poses, while not hurting dynamic ones. This makes IPMAN applicable to everyday motions. + +To summarize: (1) We develop IPMAN, the first HPS method that integrates intuitive physics. (2) We infer biomechanical properties such as CoM, CoP and body pressure. (3) We define novel intuitive-physics terms that can be easily integrated into HPS methods. (4) We create MoYo, a dataset that uniquely has complex poses, multi-view video, and ground-truth bodies, pressure, and CoM. (5) We show that our IP terms improve HPS accuracy and physical plausibility. (6) Data and code are available for research. + +# 2. Related Work + +3D Human Pose and Shape (HPS) from images. Existing methods fall into two major categories: (1) non- + +parametric methods that reconstruct a free-form body representation, e.g., joints [1, 56, 57] or vertices [52, 58, 100], and (2) parametric methods that use statistical body models [5, 25, 41, 54, 63, 92, 97]. The latter methods focus on various aspects, such as expressiveness [13, 18, 63, 69, 87], clothed bodies [15, 88, 91], videos [24, 45, 78, 99], and multiperson scenarios [38, 75, 103], to name a few. + +Inference is done by either optimization or regression. Optimization-based methods [7, 16, 63, 87, 88] fit a body model to image evidence, such as joints [11], dense vertex correspondences [2] or 2D segmentation masks [23]. Regression-based methods [42, 44, 48, 51, 76, 102, 106, 109] use a loss similar to the objective function of optimization methods to train a network to infer body model parameters. Several methods combine optimization and regression in a training loop [47, 50, 59]. Recent methods [24, 40] fine-tune pre-trained networks at test time w.r.t. an image or a sequence, retaining flexibility (optimization) while being less sensitive to initialization (regression). + +Despite their success, these methods reason about the human in "isolation", without taking the surrounding scene into account; see [77, 107] for a comprehensive review. + +Contact-only scene constraints. A common way of using scene information is to consider body-scene contact [12, 17, 27, 28, 65, 84, 90, 94, 98, 104, 105, 110]. Yamamoto et al. [93] and others [19, 27, 70, 98, 104] ensure that estimated bodies have plausible scene contact. For videos, encouraging foot-ground contact reduces foot skating [36, 65, 72, 105, 110]. Weng et al. [84] use contact in estimating the pose and scale of scene objects, while Villegas et al. [80] preserve self- and ground contact for motion retargeting. + +These methods typically take two steps: (1) detecting contact areas on the body and/or scene and (2) minimizing the distance between these. Surfaces are typically assumed to be in contact if their distance is below a threshold and their relative motion is small [27, 35, 98, 104]. + +Many methods only consider contact between the ground and the foot joints [66, 110] or other end-effectors [65]. In contrast, IPMAN uses the full 3D body surface and exploits this to compute the pressure, CoP and CoM. Unlike binary contact, this is differentiable, making the IP terms useful for training HPS regressors. + +Physics-based scene constraints. Early work uses physics to estimate walking [8, 9] or full body motion [82]. Recent methods [21, 22, 66, 73, 74, 89, 96] regress 3D humans and then refine them through physics-based optimization. Physics is used for two primary reasons: (1) to regularise dynamics, reducing jitter [49, 66, 74, 96], and (2) to discourage interpenetration and encourage contact. Since contact events are discontinuous, the pipeline is either not end-to-end trainable or trained with reinforcement learning [64, 96]. Xie et al. [89] propose differentiable physics-inspired objectives based on a soft contact penalty, while DiffPhy [21] uses a + +differentiable physics simulator [31] during inference. Both methods apply the objectives in an optimization scheme, while IPMAN is applied to both optimization and regression. PhysCap [74] considers a pose as balanced, when the CoM is projected within the BoS. Rempe et al. [66] impose PD control on the pelvis, which they treat as a CoM. Scott et al. [71] regress foot pressure from 2D and 3D joints for stability analysis but do not use it to improve HPS. + +All these methods use unrealistic bodies based on shape primitives. Some require known body dimensions [66, 74, 96] while others estimate body scale [49, 89]. In contrast, IPMAN computes CoM, CoP and BoS directly from the SMPL mesh. Clever et al. [14] and Luo et al. [55] estimate 3D body pose but from pressure measurements, not from images. Their task is fundamentally different from ours. + +# 3. Method + +# 3.1. Preliminaries + +Given a color image, I, we estimate the parameters of the camera and the SMPL body model [54]. + +Body model. SMPL maps pose, $\theta$ , and shape, $\beta$ , parameters to a 3D mesh, $M(\theta, \beta)$ . The pose parameters, $\theta \in \mathbb{R}^{24 \times 6}$ , are rotations of SMPL's 24 joints in a 6D representation [108]. The shape parameters, $\beta \in \mathbb{R}^{10}$ , are the first 10 PCA coefficients of SMPL's shape space. The generated mesh $M(\theta, \beta)$ consists of $N_V = 6890$ vertices, $V \in \mathbb{R}^{N_V \times 3}$ , and $N_F = 13776$ faces, $F \in \mathbb{R}^{N_F \times 3 \times 3}$ . + +Note that our regression method (IPMAN-R, Sec. 3.4.1) uses SMPL, while our optimization method (IPMAN-O, Sec. 3.4.2) uses SMPL-X [63], to match the models used by the baselines. For simplicity of exposition, we refer to both models as SMPL when the distinction is not important. + +Camera. For the regression-based IPMAN-R, we follow the standard convention [42, 43, 47] and use a weak perspective camera with a 2D scale, $s$ , translation, $\mathbf{t}^c = (t_x^c,t_y^c)$ , fixed camera rotation, $\mathbf{R}^c = \mathbf{I}_3$ , and a fixed focal length $(f_{x},f_{y})$ . The root-relative body orientation $\mathbf{R}^b$ is predicted by the neural network, but body translation stays fixed at $\mathbf{t}^b = \mathbf{0}$ as it is absorbed into the camera's translation. + +For the optimization-based IPMAN-O, we follow Müller et al. [59] to use the full-perspective camera model and optimize the focal lengths $(f_x, f_y)$ , camera rotation $\mathbf{R}^c$ and camera translation $\mathbf{t}^c$ . The principal point $(o_x, o_y)$ is the center of the input image. $\mathbf{K}$ is the intrinsic matrix storing focal lengths and the principal point. We assume that the body rotation $\mathbf{R}^b$ and translation $\mathbf{t}^b$ are absorbed into the camera parameters, thus, they stay fixed as $\mathbf{R}^b = \mathbf{I}_3$ and $\mathbf{t}^b = \mathbf{0}$ . Using the camera, we project a 3D point $\mathbf{X} \in \mathbb{R}^3$ to an image point $\mathbf{x} \in \mathbb{R}^2$ through $\mathbf{x} = \mathbf{K}(\mathbf{R}^c\mathbf{X} + \mathbf{t}^c)$ . + +Ground plane and gravity-projection. We assume that the gravity direction is perpendicular to the ground plane in the world coordinate system. Thus, for any arbitrary point in + +3D space, $\pmb{u} \in \mathbb{R}^3$ , its gravity-projected point, $\pmb{u}' = g(\pmb{u}) \in \mathbb{R}^3$ , is the projection of $\pmb{u}$ along the plane normal $\pmb{n}$ onto the ground plane, and $g(.)$ is the projection operator. The function $h(\pmb{u})$ returns the signed "height" of a point $\pmb{u}$ with respect to the ground; i.e., the signed distance from $\pmb{u}$ to the ground plane along the gravity direction, where $h(\pmb{u}) < 0$ if $\pmb{u}$ is below the ground and $h(\pmb{u}) > 0$ if $\pmb{u}$ is above it. + +# 3.2. Stability Analysis + +We follow the biomechanics literature [32, 33, 61] and Scott et al. [71] to define three fundamental elements for stability analysis: We use the Newtonian definition for the "Center of Mass" (CoM); i.e., the mass-weighted average of particle positions. The "Center of Pressure" (CoP) is the ground-reaction force's point of application. The "Base of Support" (BoS) is the convex hull of all body-ground contacts. Below, we define intuitive-physics (IP) terms using the inferred CoM and CoP. BoS is only used for evaluation. + +Body Center of Mass (CoM). We introduce a novel CoM formulation that is fully differentiable and considers the per-part mass contributions, dubbed as pCoM; see Sup. Mat. for alternative CoM definitions. To compute this, we first segment the template mesh into $N_P = 10$ parts $P_i \in \mathcal{P}$ ; see Fig. 2. We do this once offline, and keep the segmentation fixed during training and optimization. Assuming a shaped and posed SMPL body, the per-part volumes $\mathcal{V}^{P_i}$ are calculated by splitting the SMPL mesh into parts. + +However, mesh splitting is a non-differentiable operation. Thus, it cannot be used for either training a regressor (IPMAN-R) or for optimization (IPMAN-O). Instead, we work with the full SMPL mesh and use differentiable "close-translate-fill" operations for each body part on the fly. First, for each part $P$ , we extract boundary vertices $\mathcal{B}_P$ and add in the middle a virtual vertex $\boldsymbol{v}_g$ , where $\boldsymbol{v}_g = \sum_{j \in \mathcal{B}_P} \boldsymbol{v}_j / |\mathcal{B}_P|$ . Then, for the $\mathcal{B}_P$ and $\boldsymbol{v}_g$ vertices, we add virtual faces to "close" $P$ and make it watertight. Next, we "translate" $P$ such that the part centroid $\mathbf{c}_P = \sum_{j \in P} \boldsymbol{v}_j / |P|$ is at the origin. Finally, we "fill" the centered $P$ with tetrahedrons by connecting the origin with each face vertex. Then, the part volume, $\mathcal{V}^{\mathcal{P}}$ , is the sum of all tetrahedron volumes [101]. + +To create a uniform distribution of surface vertices, we uniformly sample $N_U = 20000$ surface points $V_U \in \mathbb{R}^{N_U \times 3}$ on the template SMPL mesh using the Triangle Point Picking method [83]. Given $V_U$ and the template SMPL mesh vertices $V_T$ , we follow [59], and analytically compute a sparse linear regressor $\mathbf{W} \in \mathbb{R}^{N_U \times N_V}$ such that $V_U = \mathbf{W}V_T$ . During training and optimization, given an arbitrary shaped and posed mesh with vertices $V$ , we obtain uniformly-sampled mesh surface points as $V_U = \mathbf{W}V$ . Each surface point, $v_i$ , is assigned to the body part, $P_{v_i}$ , corresponding to the face, $F_{v_i}$ , it was sampled from. + +Finally, the part-weighted pCoM is computed as a + +volume-weighted mean of the mesh surface points: + +$$ +\bar {\mathbf {m}} = \frac {\sum_ {i = 1} ^ {N _ {U}} \mathcal {V} ^ {P _ {v _ {i}}} v _ {i}}{\sum_ {i = 1} ^ {N _ {U}} \mathcal {V} ^ {P _ {v _ {i}}}}, \tag {1} +$$ + +where $\mathcal{V}^{P_{v_i}}$ is the volume of the part $P_{v_i}\in \mathcal{P}$ to which $v_{i}$ is assigned. This formulation is fully differentiable and can be employed with any existing 3D HPS estimation method. + +Note that computing CoM (or volume) from uniformly sampled surface points does not work (see Sup. Mat.) because it assumes that mass, $M$ , is proportional to surface area, $S$ . Instead, our pCoM computes mass from volume, $\mathcal{V}$ , via the standard density equation, $M = \rho \mathcal{V}$ , while our close-translate-fill operation computes the volume of deformable bodies in an efficient and differentiable manner. + +Center of Pressure (CoP). Recovering a pressure heatmap from an image without using hardware, such as pressure sensors, is a highly ill-posed problem. However, stability analysis requires knowledge of the pressure exerted on the human body by the supporting surfaces, like the ground. Going beyond binary contact, Rogez et al. [68] estimate 3D forces by detecting intersecting vertices between hand and object meshes. Clever et al. [14] recover pressure maps by allowing articulated body models to deform a soft pressure-sensing virtual mattress in a physics simulation. + +In contrast, we observe that, while real bodies interacting with rigid objects (e.g., the floor) deform under contact, SMPL does not model such soft-tissue deformations. Thus, the body mesh penetrates the contacting object surface and the amount of penetration can be a proxy for pressure; a deeper penetration implies higher pressure. With the height $h(v_{i})$ (see Sec. 3.1) of a mesh surface point $v_{i}$ with respect to the ground plane $\Pi$ , we define a pressure field to compute the per-point pressure $\rho_{i}$ as: + +$$ +\rho_ {i} = \left\{ \begin{array}{l l} 1 - \alpha h (v _ {i}) & \text {i f} h (v _ {i}) < 0, \\ e ^ {- \gamma h (v _ {i})} & \text {i f} h (v _ {i}) \geq 0, \end{array} \right. \tag {2} +$$ + +where $\alpha$ and $\gamma$ are scalar hyperparameters set empirically. We approximate soft tissue via a "spring" model and "penetrating" pressure field using Hooke's Law. Some pressure is also assigned to points above the ground to allow tolerance for footwear, but this decays quickly. Finally, we compute the CoP, $\overline{\mathbf{s}}$ , as + +$$ +\overline {{\mathbf {s}}} = \frac {\sum_ {i = 1} ^ {N _ {U}} \rho_ {i} v _ {i}}{\sum_ {i = 1} ^ {N _ {U}} \rho_ {i}}. \tag {3} +$$ + +Again, note that this term is fully differentiable. + +Base of Support (BoS). In biomechanics [34, 85], BoS is defined as the "supporting area" or the possible range of the CoP on the supporting surface. Here, we define BoS as the convex hull [67] of all gravity-projected body-ground contact points. In detail, we first determine all such contacts + +by selecting the set of mesh surface points $v_{i}$ close to the ground, and then gravity-project them onto the ground to obtain $C = \{g(v_{i}) \mid |h(v_{i})| < \tau\}$ . The BoS is then defined as the convex hull $\mathcal{C}$ of $C$ . + +# 3.3. Intuitive-Physics Losses + +Stability loss. The "inverted pendulum" model of human balance [85, 86] considers the relationship between the CoM and BoS to determine stability. Simply put, for a given shape and pose, if the body CoM, projected on the gravity-aligned ground plane, lies within the BoS, the pose is considered stable. While this definition of stability is useful for evaluation, using it in a loss or energy function for 3D HPS estimation results in sparse gradients (see Sup. Mat.). Instead, we define the stability criterion as: + +$$ +\mathcal {L} _ {\text {s t a b i l i t y}} = \| g (\bar {\mathbf {m}}) - g (\bar {\mathbf {s}}) \| _ {2}, \tag {4} +$$ + +where $g(\bar{\mathbf{m}})$ and $g(\bar{\mathbf{s}})$ are the gravity-projected CoM and CoP, respectively. + +Ground contact loss. As shown in Fig. 1, 3D HPS methods minimize the 2D joint reprojection error and do not consider the plausibility of body-ground contact. Ignoring this can result in interpenetrating or hovering meshes. Inspired by self-contact losses [19,59] and hand-object contact losses [26,29], we define two ground losses, namely pushing, $\mathcal{L}_{\mathrm{push}}$ , and pulling, $\mathcal{L}_{\mathrm{pull}}$ , that take into account the height, $h(v_{i})$ , of a vertex, $v_{i}$ , with respect to the ground plane. For $h(v_{i}) < 0$ , i.e., for vertices under the ground plane, $\mathcal{L}_{\mathrm{push}}$ discourages body-ground penetrations. For $h(v_{i}) \geq 0$ , i.e., for hovering meshes, $\mathcal{L}_{\mathrm{pull}}$ encourages the vertices that lie close to the ground to "snap" into contact with it. Note that the losses are non-conflicting as they act on disjoint sets of vertices. Then, the ground contact loss is: + +$$ +\mathcal {L} _ {\text {g r o u n d}} = \mathcal {L} _ {\text {p u l l}} + \mathcal {L} _ {\text {p u s h}}, \text {w i t h} \tag {5} +$$ + +$$ +\mathcal {L} _ {\text {p u l l}} = \alpha_ {1} \tanh \left(\frac {h \left(v _ {i}\right)}{\alpha_ {2}}\right) ^ {2} \quad \text {i f} h \left(v _ {i}\right) \geq 0, \text {a n d} \tag {6} +$$ + +$$ +\mathcal {L} _ {\text {p u s h}} = \beta_ {1} \tanh \left(\frac {h \left(v _ {i}\right)}{\beta_ {2}}\right) ^ {2} \quad \text {i f} h \left(v _ {i}\right) < 0. \tag {7} +$$ + +# 3.4. IPMAN + +We use our new IP losses for two tasks: (1) We extend HMR [42] to develop IPMAN-R, a regression-based HPS method. (2) We extend SMPLify-XMC [59] to develop IPMAN-O, an optimization-based method. Note that IPMAN-O uses a reference ground plane, while IPMAN-R uses the ground plane only for training but not at test time. It leverages the known ground in 3D datasets, and thus, does not require additional data beyond past HPS methods. + +# 3.4.1 IPMAN-R + +Most HPS methods are trained with a mix of direct supervision using 3D datasets [37,56,81] and 2D reprojection losses + +![](images/a2ec3ec21e7a632c477fa14a5e119000616e9b378ad23a5088e74f081f69c9ca.jpg) +Figure 3. IPMAN-R architecture. First, the HMR regressor estimates camera translation and SMPL parameters for an input image. These parameters are used to generate the SMPL mesh in the camera frame, $M_{c}$ . To transform the mesh from camera into world coordinates $(M_{c} \rightarrow M_{w})$ , IPMAN-R uses the ground-truth camera rotation, $R_{w}^{c}$ , and translation, $t_{w}^{c}$ . The IP losses, $\mathcal{L}_{\mathrm{ground}}$ and $\mathcal{L}_{\mathrm{stability}}$ , are applied on the mesh in the world coordinate system. + +using image datasets [4, 39, 53]. The 3D losses, however, are calculated in the camera frame, ignoring scene information and physics. IPMAN-R extends HMR [42] with our intuitive-physics terms; see Fig. 3 for the architecture. For training, we use the known camera coordinates and the world ground plane in 3D datasets. + +As described in Sec. 3.1 (paragraph "Camera"), HMR infers the camera translation, $\mathbf{t}^c$ , and SMPL parameters, $\theta$ and $\beta$ , in the camera coordinates assuming $\mathbf{R}^c = \mathbf{I}_3$ and $\mathbf{t}^b = \mathbf{0}$ . Ground truth 3D joints and SMPL parameters are used to supervise the inferred mesh $M_c$ in the camera frame. However, 3D datasets also provide the ground, albeit in the world frame. To leverage the known ground, we transform the predicted body orientation, $\mathbf{R}^b$ , to world coordinates using the ground-truth camera rotation, $\mathbf{R}_w^c$ , as $\mathbf{R}_w^b = \mathbf{R}_w^{c\top}\mathbf{R}^b$ . Then, we compute the body translation in world coordinates as $\mathbf{t}_w^b = -\mathbf{t}^c + \mathbf{t}_w^c$ . With the predicted mesh and ground plane in world coordinates, we add the IP terms, $\mathcal{L}_{\mathrm{stability}}$ and $\mathcal{L}_{\mathrm{ground}}$ , for HPS training as follows: + +$$ +\mathcal {L} _ {\mathrm {I P M A N - R}} \left(\boldsymbol {\theta}, \boldsymbol {\beta}, \mathbf {t} ^ {c}\right) = \lambda_ {2 D} \mathcal {L} _ {2 D} + \lambda_ {3 D} \mathcal {L} _ {3 D} + \lambda_ {\mathrm {S M P L}} \mathcal {L} _ {\mathrm {S M P L}} + +$$ + +$$ +\lambda_ {\mathrm {s}} \mathcal {L} _ {\text {s t a b i l i t y}} + \lambda_ {\mathrm {g}} \mathcal {L} _ {\text {g r o u n d}}, \tag {8} +$$ + +where $\lambda_{\mathrm{s}}$ and $\lambda_{\mathrm{g}}$ are the weights for the respective IP terms. For training (data augmentation, hyperparameters, etc), we follow Kolotouros et al. [47]; for more details see Sup. Mat. + +# 3.4.2 IPMAN-O + +To fit SMPL-X to 2D image keypoints, SMPLify-XMC [59] initializes the fitting process by exploiting the self-contact and global-orientation of a known/presented 3D mesh. We posit that the presented pose contains further information, such as stability, pressure and contact with the ground-plane. IPMAN-O uses this insight to apply stability and ground + +contact losses. The IPMAN-O objective is: + +$$ +\begin{array}{l} E _ {\text {I P M A N - O}} (\boldsymbol {\beta}, \boldsymbol {\theta}, \boldsymbol {\Phi}) = E _ {J 2 D} + \lambda_ {\beta} E _ {\beta} + \lambda_ {\theta_ {h}} E _ {\theta_ {h}} + \\ \lambda_ {\tilde {\theta} _ {b}} E _ {\tilde {\theta} _ {b}} + \lambda_ {\tilde {C}} E _ {\tilde {C}} + \\ \lambda_ {s} E _ {\text {s t a b i l i t y}} + \lambda_ {g} E _ {\text {g r o u n d}}. \tag {9} \\ \end{array} +$$ + +$\Phi$ denotes the camera parameters: rotation $\mathbf{R}^c$ , translation $\mathbf{t}^c$ , and focal length, $(f_x, f_y)$ . $E_{J2D}$ is a 2D joint loss, $E_\beta$ and $E_{\theta_h}$ are $L_2$ body shape and hand pose priors. $E_{\tilde{\theta}_b}$ and $E_{\tilde{C}}$ are pose and contact terms w.r.t. the presented 3D pose and contact (see [59] for details). $E_S$ and $E_G$ are the stability and ground contact losses from Sec. 3.3. Since the estimated mesh is in the same coordinate system as the presented mesh and the ground-plane, we directly apply IP losses without any transformations. For details see Sup. Mat. + +# 4. Experiments + +# 4.1. Training and Evaluation Datasets + +Human3.6M [37]. A dataset of 3D human keypoints and RGB images. The poses are limited in terms of challenging physics, focusing on common activities like walking, discussing, smoking, or taking photos. + +RICH [35]. A dataset of videos with accurate marker-less motion-captured 3D bodies and 3D scans of scenes. The images are more natural than Human3.6M and Fit3D [20]. We consider sequences with meaningful body-ground interaction. For the list of sequences, see Sup. Mat. + +Other datasets. Similar to [47], for training we use 3D keypoints from MPI-INF-3DHP [56] and 2D keypoints from image datasets such as COCO [53], MPII [4] and LSP [39]. + +# 4.1.1 MoCap Yoga (MoYo) Dataset + +We capture a trained Yoga professional in 200 highly complex poses (see Fig. 4) using a synchronized MoCap system, pressure mat, and a multi-view RGB video system with 8 static, calibrated cameras; for details see Sup. Mat. The dataset contains $\sim$ 1.75M RGB frames in 4K resolution with ground-truth SMPL-X [63], pressure and CoM. Compared to the Fit3D [20] and PosePrior [1] datasets, MoYo is more challenging; it has extreme poses, strong self-occlusion, and significant body-ground and self-contact. + +# 4.2. Evaluation Metrics + +We use standard 3D HPS metrics: The Mean Per-Joint Position Error (MPJPE), its Procrustes Aligned version (PA-MPJPE), and the Per-Vertex Error (PVE) [62]. + +BoS Error (BoSE). To evaluate stability, we propose a new metric called BoS Error (BoSE). Following the definition of stability (Sec. 3.3) we define: + +$$ +\operatorname {B o S E} = \left\{ \begin{array}{l l} 1 & g (\bar {\mathbf {m}}) \in \mathcal {C} (C) \\ 0 & g (\bar {\mathbf {m}}) \notin \mathcal {C} (C) \end{array} \right. \tag {10} +$$ + +where $\mathcal{C}(C)$ is the convex hull of the gravity-projected contact vertices for $\tau = 10\mathrm{cm}$ . For efficiency reasons, we formulate this computation as the solution of a convex system via interior point linear programming [3]; see Sup. Mat. + +# 4.3. IPMAN Evaluation + +IPMAN-R. We evaluate our regressor, IPMAN-R, on RICH and H3.6M and summarize our results in Tab. 1. We refer to our regression baseline as $\mathrm{HMR}^*$ which is HMR trained on the same datasets as IPMAN-R. Since we train with paired 3D datasets, we do not use HMR's discriminator during training. Both IP terms individually improve upon the baseline method. Their joint use, however, shows the largest improvement. For example, on RICH the MPJPE improves by $3.5\mathrm{mm}$ and the PVE by $2.5\mathrm{mm}$ . It is particularly interesting that IPMAN-R improves upon the baseline on H3.6M, a dataset with largely dynamic poses and little body-ground contact. We also significantly outperform ( $\sim 12\%$ ) the MPJPE of optimization approaches that use the ground plane, Zou et al. [110] (69.9 mm) and Zanfir et al. [98] (69.0 mm), on H3.6M. Some video-based methods [49, 96] achieve better MPJPE (56.7 and 52.5 resp.) on H3.6M. However, they initialize with a stronger kinematic predictor [45, 50] and require video frames as input. Further, they use heuristics to estimate body weight and non-physical residual forces to correct for contact estimation errors. In contrast, IPMAN is a single-frame method, models complex full-body pressure and does not rely on approximate body weight to compute CoM. Qualitatively, Fig. 5 (top) shows that IPMAN-R's reconstructions are more stable and contain physically-plausible body-ground contact. While HMR is not SOTA, it is simple, isolating the benefits of our new IP formulation. These terms can also be added to methods with more modern backbones and architectures. + +IPMAN-O. Our optimization method, IPMAN-O, also improves upon the baseline optimization method, SMPLify-XMC, on all evaluation metrics (see Tab. 2). We note that adding $L_{\mathrm{stability}}$ independently improves the PVE, but not joint metrics (PA-MPJPE, MPJPE) and BoSE. This can be explained by the dependence of our IP terms on the relative position of the mesh surface to the ground-plane. Since joint metrics do not capture surfaces, they may get worse. Similar trends on joint metrics have been reported in the context of hand-object contact [29, 79] and body-scene contact [27]. We show qualitative results in Fig. 5 (bottom). While both SMPLify-XMC [59] and IPMAN-O achieve similar image projections, another view reveals that our results are more stable and physically plausible w.r.t. the ground. + +# 4.4. Pressure, CoP and CoM Evaluation + +We evaluate our estimated pressure, CoP and CoM against the MoYo ground truth. For pressure evaluation, we measure Intersection-over-Union (IoU) between our esti + +![](images/6c402716af08545cafa03b79efa58ac2d10b90eb8ef6f3cef9e1f58a8a2267c9.jpg) +Figure 4. Representative examples illustrating the variation and complexity of 3D pose and body-ground contact in our new MoYo dataset. + +![](images/58d1086d4c8155d3fa91b263de42edfa3f0380e5e87f0fe162e963398bd979e4.jpg) + +![](images/2fb8fad4c94ff37dcf3b4325caeadb041e9b131f9aaa53f972e421f697b8d6c0.jpg) +Figure 5. Qualitative evaluation of IPMAN-R and IPMAN-O on the RICH and MoYo datasets. The first column shows the input images of a subject doing various sports poses. The second and third block of columns show the baseline's and our results, respectively. In each block, the first image shows the estimated mesh overlayed on the image (camera view), the second image shows the estimated mesh in the world frame (side view), and the last image shows the estimated pressure map with the CoM (in pink) and the CoP (in green). + +
MethodRICHHuman3.6M
MPJPE ↓PAMPJPE ↓PVE ↓BoSE (%) ↑MPJPE ↓PAMPJPE ↓
PhysCap [74]----113.068.9
DiffPhy [21]----81.755.6
Zou et al. [110]----69.9-
Xie et al. [89]----68.1-
VIBE [45]----61.343.1
Simpoe [96]----56.741.6
D&D [49]----52.535.5
HMR [42]----88.056.8
Zanfir et al. [98]----69.0-
SPIN [47]112.271.5129.554.762.341.9
PARE [46]107.073.1125.074.4--
CLIFF [51]107.067.2122.367.681.452.1
Finetuning on Human3.6M
HMR* [42]----62.141.6
IPMAN-R (Ours)----60.7 (-1.4)41.1 (-0.5)
Finetuning on all datasets
HMR* [42]82.548.392.462.061.641.9
HMR* [42]+Lground80.947.889.966.561.941.8
HMR* [42]+Lstability81.047.5 (-0.8)90.869.661.241.9
IPMAN-R (Ours)79.0 (-3.5)47.689.9 (-2.5)71.2 (+9.2)60.6 (-1.0)41.8 (-0.1)
+ +![](images/d0ab51e0e15b51c62cea3b840ffb57c2533d3623eb0052d2a7c56f8191cec6da.jpg) +Figure 6. Qualitative comparison of estimated vs the ground-truth pressure. The ground-truth CoP is shown in green and the estimated CoP is shown in yellow. Pressure heatmap colors as per Fig. 2. + +mated and ground-truth pressure heatmaps. We also compute the CoP error as the Euclidean distance between estimated and ground-truth CoP. We obtain an IoU of 0.32 and a CoP error of $57.3\mathrm{mm}$ . Figure 6 shows a qualitative visualization of the estimated pressure compared to the ground truth. For CoM evaluation, we find a $53.3\mathrm{mm}$ difference between our pCoM and the CoM computed by the commercial software, Vicon Plug-in Gait. Unlike Vicon's estimate, our pCoM does not require anthropometric measurements and takes into account the full 3D body shape. For details about the evaluation protocol and comparisons with alternative CoM formulations, see Sup. Mat. + +Physics Simulation. To evaluate stability, we run a post-hoc physics simulation in "Bullet" [10] and measure the displacement of the estimated meshes; a small displacement denotes a stable pose. IPMAN-O produces $14.8\%$ more stable bodies than the baseline [59]; for details see Sup. Mat. + +Table 1. Top to Bottom: Comparisons with video-based and single-frame regression methods. IPMAN-R outperforms the single-frame baselines across all benchmarks. * indicates training hyperparameters and datasets are identical to IPMAN-R. All units are in mm except BoSE. Bold denotes best results (per category), and parentheses show improvement over the baseline. Q Zoom in + +
MethodMoYo
MPJPE ↓PAMPJPE ↓PVE ↓BoSE (%) ↑
SMPLify-XMC [59]75.336.516.898.0
SMPLify-XMC [59] + Lground73.336.214.598.2
SMPLify-XMC [59] + Lstability88.538.615.397.8
IPMAN-O (Ours)71.9 (-3.4)34.3 (-2.2)11.4 (-5.4)98.6 (+0.5)
+ +Table 2. Evaluation of IPMAN-O and SMPLify-XMC [59] (optimization-based) on MoYo. Bold shows the best performance, and parentheses show the improvement over SMPLify-XMC. + +# 5. Conclusion + +Existing 3D HPS estimation methods recover SMPL meshes that align well with the input image, but are often physically implausible. To address this, we propose IPMAN, which incorporates intuitive-physics in 3D HPS estimation. Our IP terms encourage stable poses, promote realistic floor support, and reduce body-floor penetration. The IP terms exploit the interaction between the body CoM, CoP, and BoS - key elements used in stability analysis. To calculate the CoM of SMPL meshes, IPMAN uses on a novel formulation that takes part-specific mass contributions into account. Additionally, IPMAN estimates proxy pressure maps directly from images, which is useful in computing CoP. IPMAN is simple, differentiable, and compatible with both regression and optimization methods. IPMAN goes beyond previous physics-based methods to reason about arbitrary full-body contact with the ground. We show that IPMAN improves both regression and optimization baselines across all metrics on existing datasets and MoYo. MoYo uniquely comprises synchronized multi-view video, SMPL-X bodies in complex poses, and measurements for pressure maps and body CoM. Qualitative results show the effectiveness of IPMAN in recovering physically plausible meshes. + +While IPMAN addresses body-floor contact, future work should incorporate general body-scene contact and diverse supporting surfaces by integrating 3D scene reconstruction. In this work, the proposed IP terms are designed to help static poses and we show that they do not hurt dynamic poses. However, the large body of biomechanical literature analyzing dynamic poses could be leveraged for activities like walking, jogging, running, etc. It would be interesting to extend IPMAN beyond single-person scenarios by exploiting the various physical constraints offered by multiple subjects. + +Acknowledgements. We thank T. Alexiadis, T. McConnell, C. Gallatz, M. Höschle, S. Polikovsky, C. Mendoza, Y. Fincan, L. Sanchez and M. Safroshkin for data collection, G. Becherini for MoSh++, Z. Fang, V. Choutas and all of Perceiving Systems for fruitful discussions. This work was funded by the International Max Planck Research School for Intelligent Systems (IMPRS-IS) and in part by the German Federal Ministry of Education and Research (BMBF), Tübingen AI Center, FKZ: 01IS18039B. + +Disclosure. https://files.is.tue.mpg.de/black/CoI_CVPR_2023.txt + +# References + +[1] Ijaz Akhter and Michael J. Black. Pose-conditioned joint angle limits for 3D human pose reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 1446-1455, 2015. 3, 6 +[2] Riza Alp Güler, Natalia Neverova, and Iasonas Kokkinos. DensePose: Dense human pose estimation in the wild. In Computer Vision and Pattern Recognition (CVPR), pages 7297-7306, 2018. 3 +[3] Erling D. Andersen and Knud D. Andersen. The Mosek interior point optimizer for linear programming: An implementation of the homogeneous algorithm. In High Performance Optimization, 2000. 6 +[4] Mykhaylo Andriluka, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2D human pose estimation: New benchmark and state of the art analysis. In Computer Vision and Pattern Recognition (CVPR), pages 3686-3693, 2014. 5, 6 +[5] Dragomir Anguelov, Praveen Srinivasan, Daphne Koller, Sebastian Thrun, Jim Rodgers, and James Davis. SCAPE: Shape completion and animation of people. Transactions on Graphics (TOG), 24:408-416, 2005. 3 +[6] Michael Barnett-Cowan, Roland W. Fleming, Manish Singh, and Heinrich H. Bulthoff. Perceived object stability depends on multisensory estimates of gravity. PLOS ONE, 6(4):1-5, 2011. 2 +[7] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J. Black. Keep it SMPL: Automatic estimation of 3D human pose and shape from a single image. In European Conference on Computer Vision (ECCV), volume 9909, pages 561-578, 2016. 3 +[8] Marcus A. Brubaker, David J. Fleet, and Aaron Hertzmann. Physics-based person tracking using the anthropomorphic walker. International Journal of Computer Vision (IJCV), 87(1-2):140-155, 2010. 3 +[9] Marcus A. Brubaker, Leonid Sigal, and David J. Fleet. Estimating contact dynamics. In Computer Vision and Pattern Recognition (CVPR), pages 2389-2396, 2009. 3 +[10] Bullet real-time physics simulation. https://pybullet.org.1,8 +[11] Zhe Cao, Gines Hidalgo, Tomas Simon, Shih-En Wei, and Yaser Sheikh. OpenPose: Realtime multi-person 2D pose estimation using part affinity fields. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 43(1):172–186, 2021. 3 +[12] Yixin Chen, Sai Kumar Dwivedi, Michael J. Black, and Dimitrios Tzionas. Detecting human-object contact in images. June 2023. 3 +[13] Vasileios Choutas, Georgios Pavlakos, Timo Bolkart, Dimitrios Tzionas, and Michael J. Black. Monocular expressive body regression through body-driven attention. In European Conference on Computer Vision (ECCV), volume 12355, pages 20-40, 2020. 3 +[14] Henry M. Clever, Zackory M. Erickson, Ariel Kapusta, Greg Turk, C. Karen Liu, and Charles C. Kemp. Bodies at rest: 3D human pose and shape estimation from a pressure image using synthetic data. In Computer Vision and Pattern Recognition (CVPR), pages 6214-6223, 2020. 3, 4 + +[15] Enric Corona, Albert Pumarola, Guillem Alenyà, Gerard Pons-Moll, and Francesc Moreno-Noguer. SMPLicit: Topology-aware generative model for clothed people. In Computer Vision and Pattern Recognition (CVPR), pages 11875-11885, 2021. 3 +[16] Taosha Fan, Kalyan Vasudev Alwala, Donglai Xiang, Weipeng Xu, Todd Murphey, and Mustafa Mukadam. Revitalizing optimization for 3D human pose and shape estimation: A sparse constrained formulation. In International Conference on Computer Vision (ICCV), pages 11437-11446, 2021. 3 +[17] Zicong Fan, Omid Taheri, Dimitrios Tzionas, Muhammed Kocabas, Manuel Kaufmann, Michael J. Black, and Otmar Hilliges. ARCTIC: A dataset for dexterous bimanual hand-object manipulation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3 +[18] Yao Feng, Vasileios Choutas, Timo Bolkart, Dimitrios Tzionas, and Michael J. Black. Collaborative regression of expressive bodies using moderation. In International Conference on 3D Vision (3DV), pages 792-804, 2021. 3 +[19] Mihai Fieraru, Mihai Zanfir, Teodor Alexandru Szente, Eduard Gabriel Bazavan, Vlad Olaru, and Cristian Sminchisescu. REMIPS: Physically consistent 3D reconstruction of multiple interacting people under weak supervision. In Conference on Neural Information Processing Systems (NeurIPS), volume 34, 2021. 3, 5 +[20] Mihai Fieraru, Mihai Zanfir, Silviu-Cristian Pirlea, Vlad Olaru, and Cristian Sminchisescu. AIfit: Automatic 3D human-interpretable feedback models for fitness training. In Computer Vision and Pattern Recognition (CVPR), pages 9919–9928, 2021. 6 +[21] Erik Gartner, Mykhaylo Andriluka, Erwin Coumans, and Cristian Sminchisescu. Differentiable dynamics for articulated 3D human motion reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 13180-13190, 2022. 3, 8 +[22] Erik Gartner, Mykhaylo Andriluka, Hongyi Xu, and Cristian Sminchisescu. Trajectory optimization for physics-based reconstruction of 3D human pose from monocular video. In Computer Vision and Pattern Recognition (CVPR), pages 13096-13105, 2022. 3 +[23] Ke Gong, Yiming Gao, Xiaodan Liang, Xiaohui Shen, Meng Wang, and Liang Lin. Graphonomy: Universal human parsing via graph transfer learning. In Computer Vision and Pattern Recognition (CVPR), pages 7450-7459, 2019. 3 +[24] Shanyan Guan, Jingwei Xu, Yunbo Wang, Bingbing Ni, and Xiaokang Yang. Bilevel online adaptation for out-of-domain human mesh reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 10472-10481, 2021. 3 +[25] Riza Alp Güler and Iasonas Kokkinos. HoloPose: Holistic 3D human reconstruction in-the-wild. In Computer Vision and Pattern Recognition (CVPR), pages 10876-10886, 2019. 3 +[26] Shreyas Hampali, Mahdi Rad, Markus Oberweger, and Vincent Lepetit. HOnnotate: A method for 3D annotation of hand and object poses. In Computer Vision and Pattern Recognition (CVPR), pages 3193-3203, 2020. 5 + +[27] Mohamed Hassan, Vasileios Choutas, Dimitrios Tzionas, and Michael J. Black. Resolving 3D human pose ambiguities with 3D scene constraints. In International Conference on Computer Vision (ICCV), pages 2282-2292, 2019. 3, 6 +[28] Mohamed Hassan, Partha Ghosh, Joachim Tesch, Dimitrios Tzionas, and Michael J. Black. Populating 3D scenes by learning human-scene interaction. In Computer Vision and Pattern Recognition (CVPR), pages 14708-14718, 2021. 3 +[29] Yana Hasson, Gül Varol, Dimitrios Tzionas, Igor Kalevatykh, Michael J. Black, Ivan Laptev, and Cordelia Schmid. Learning joint reconstruction of hands and manipulated objects. In Computer Vision and Pattern Recognition (CVPR), pages 11807-11816, 2019. 5, 6 +[30] Havok: Customizable, fully multithreaded, and highly optimized physics simulation. http://www.havok.com. 1 +[31] Eric Heiden, David Millard, Erwin Coumans, Yizhou Sheng, and Gaurav S. Sukhatme. NeuralSim: Augmenting differentiable simulators with neural networks. In International Conference on Robotics and Automation (ICRA), pages 9474-9481, 2021. 3 +[32] At L. Hof. The equations of motion for a standing human reveal three mechanisms for balance. Journal of Biomechanics, 40(2):451-457, 2007. 2, 4 +[33] At L. Hof. The "extrapolated center of mass" concept suggests a simple control of balance in walking. Human movement science, 27(1):112-125, 2008. 2, 4 +[34] At L. Hof, M. G. J. Gazendam, and Sinke W. E. The condition for dynamic stability. Journal of Biomechanics, 38(1):1-8, 2005. 4 +[35] Chun-Hao Huang, Hongwei Yi, Markus Höschle, Matvey Safroshkin, Tsvetelina Alexiadis, Senya Polikovsky, Daniel Scharstein, and Michael Black. Capturing and inferring dense full-body human-scene contact. In Computer Vision and Pattern Recognition (CVPR), pages 13264-13275, 2022. 2, 3, 6 +[36] Leslie Ikemoto, Okan Arikan, and David Forsyth. Knowing when to put your foot down. In Symposium on Interactive 3D Graphics (SI3D), page 49-53, 2006. 3 +[37] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6M: Large scale datasets and predictive methods for 3D human sensing in natural environments. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 36(7):1325-1339, 2014. 2, 5, 6 +[38] Wen Jiang, Nikos Kolotouros, Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Coherent reconstruction of multiple humans from a single image. In Computer Vision and Pattern Recognition (CVPR), pages 5578-5587, 2020. 3 +[39] Sam Johnson and Mark Everingham. Clustered pose and nonlinear appearance models for human pose estimation. In British Machine Vision Conference (BMVC), pages 1-11, 2010. 5, 6 +[40] Hanbyul Joo, Natalia Neverova, and Andrea Vedaldi. Exemplar fine-tuning for 3D human pose fitting towards in-the-wild 3D human pose estimation. In International Conference on 3D Vision (3DV), pages 42-52, 2021. 3 + +[41] Hanbyul Joo, Tomas Simon, and Yaser Sheikh. Total capture: A 3D deformation model for tracking faces, hands, and bodies. In Computer Vision and Pattern Recognition (CVPR), pages 8320-8329, 2018. 2, 3 +[42] Angjoo Kanazawa, Michael J. Black, David W. Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In Computer Vision and Pattern Recognition (CVPR), pages 7122-7131, 2018. 3, 5, 8 +[43] Angjoo Kanazawa, Jason Y. Zhang, Panna Felsen, and Jitendra Malik. Learning 3D human dynamics from video. Computer Vision and Pattern Recognition (CVPR), pages 5607-5616, 2019. 3 +[44] Rawal Khirodkar, Shashank Tripathi, and Kris Kitani. Occluded human mesh recovery. In Computer Vision and Pattern Recognition (CVPR), pages 1705-1715, 2022. 3 +[45] Muhammed Kocabas, Nikos Athanasiou, and Michael J. Black. VIBE: Video inference for human body pose and shape estimation. In Computer Vision and Pattern Recognition (CVPR), pages 5252-5262, 2020. 3, 6, 8 +[46] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. PARE: Part attention regressor for 3D human body estimation. In International Conference on Computer Vision (ICCV), pages 11127-11137, 2021. 1, 8 +[47] Nikos Kolotouros, Georgios Pavlakos, Michael J. Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In International Conference on Computer Vision (ICCV), pages 2252-2261, 2019. 3, 5, 6, 8 +[48] Nikos Kolotouros, Georgios Pavlakos, and Kostas Dani-ilidis. Convolutional mesh regression for single-image human shape reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 4496–4505, 2019. 3 +[49] Jiefeng Li, Siyuan Bian, Chao Xu, Gang Liu, Gang Yu, and Cewu Lu. D&D: Learning human dynamics from dynamic camera. In European Conference on Computer Vision (ECCV), 2022. 3, 6, 8 +[50] Jiefeng Li, Chao Xu, Zhicun Chen, Siyuan Bian, Lixin Yang, and Cewu Lu. HybrIK: A hybrid analytical-neural inverse kinematics solution for 3D human pose and shape estimation. In Computer Vision and Pattern Recognition (CVPR), pages 3383-3393, 2021. 3, 6 +[51] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. CLIFF: Carrying location information in full frames into human pose and shape estimation. In ECCV, volume 13665, pages 590-606, 2022. 1, 3, 8 +[52] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In Computer Vision and Pattern Recognition (CVPR), pages 1954-1963, 2021. 3 +[53] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólár, and C. Lawrence Zitnick. Microsoft COCO: Common objects in context. In European Conference on Computer Vision (ECCV), volume 8693, pages 740-755, 2014. 5, 6 +[54] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multi-person linear model. Transactions on Graphics (TOG), 34(6):248:1-248:16, 2015. 2, 3 + +[55] Yiyue Luo, Yunzhu Li, Michael Foshey, Wan Shou, Pratyusha Sharma, Tomás Palacios, Antonio Torralba, and Wojciech Matusik. Intelligent carpet: Inferring 3D human pose from tactile signals. In Computer Vision and Pattern Recognition (CVPR), pages 11255-11265, 2021. 3 +[56] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal V. Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved CNN supervision. International Conference on 3D Vision (3DV), pages 506-516, 2017. 3, 5, 6 +[57] Dushyant Mehta, Srinath Sridhar, Oleksandr Sotnychenko, Helge Rhodin, Mohammad Shafiei, Hans-Peter Seidel, Weipeng Xu, Dan Casas, and Christian Theobalt. VNect: Real-time 3D human pose estimation with a single RGB camera. Transactions on Graphics (TOG), 36(4):44:1-44:14, 2017. 3 +[58] Gyeongsik Moon and Kyoung Mu Lee. I2L-MeshNet: Image-to-lixel prediction network for accurate 3D human pose and mesh estimation from a single RGB image. In European Conference on Computer Vision (ECCV), volume 12352, pages 752-768, 2020. 3 +[59] Lea Müller, Ahmed A. A. Osman, Siyu Tang, Chun-Hao P. Huang, and Michael J. Black. On self-contact and human pose. In Computer Vision and Pattern Recognition (CVPR), pages 9990-9999, 2021. 1, 2, 3, 4, 5, 6, 8 +[60] NVIDIA PhysX: A scalable multi-platform physics simulation solution. https://developer.nvidia.com/physx-sdk.1 +[61] Yi-Chung Pai. Movement termination and stability in standing. Exercise and sport sciences reviews, 31(1):19-25, 2003. 2,4 +[62] Priyanka Patel, Chun-Hao P Huang, Joachim Tesch, David T Hoffmann, Shashank Tripathi, and Michael J Black. AGORA: Avatars in geography optimized for regression analysis. In Computer Vision and Pattern Recognition (CVPR), pages 13468-13478, 2021. 6 +[63] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed A. A. Osman, Dimitrios Tzionas, and Michael J. Black. Expressive body capture: 3D hands, face, and body from a single image. In Computer Vision and Pattern Recognition (CVPR), pages 10975-10985, 2019. 2, 3, 6 +[64] Xue Bin Peng, Pieter Abbeel, Sergey Levine, and Michiel van de Panne. DeepMimic: Example-guided deep reinforcement learning of physics-based character skills. Transactions on Graphics (TOG), 37(4):1-14, 2018. 2, 3 +[65] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J. Guibas. HuMoR: 3D human motion model for robust pose estimation. In International Conference on Computer Vision (ICCV), pages 11468-11479, 2021. 3 +[66] Davis Rempe, Leonidas J. Guibas, Aaron Hertzmann, Bryan Russell, Ruben Villegas, and Jamei Yang. Contact and human dynamics from monocular video. In European Conference on Computer Vision (ECCV), volume 12350, pages 71-87, 2020. 1, 3 +[67] Ralph Tyrell Rockafellar. Convex analysis. Princeton university press, 2015. 4 + +[68] Grégory Rogez, James Steven Supancic, and Deva Ramanan. Understanding everyday hands in action from RGB-D images. In International Conference on Computer Vision (ICCV), pages 3889-3897, 2015. 2, 4 +[69] Yu Rong, Takaaki Shiratori, and Hanbyul Joo. FrankMocap: A monocular 3D whole-body pose estimation system via regression and integration. In International Conference on Computer Vision Workshops (ICCVw), pages 1749-1759, 2021. 3 +[70] Nadine Rueegg, Shashank Tripathi, Konrad Schindler, Michael J. Black, and Silvia Zuffi. BITE: Beyond priors for improved three-D dog pose estimation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3 +[71] Jesse Scott, Bharadwaj Ravichandran, Christopher Funk, Robert T Collins, and Yanxi Liu. From image to stability: Learning dynamics from human pose. In European Conference on Computer Vision (ECCV), volume 12368, pages 536-554, 2020. 2, 3, 4 +[72] Mingyi Shi, Kfir Aberman, Andreas Aristidou, Taku Komura, Dani Lischinski, Daniel Cohen-Or, and Baoquan Chen. MotioNet: 3D human motion reconstruction from monocular video with skeleton consistency. Transactions on Graphics (TOG), 40(1):1:1-1:15, 2021. 3 +[73] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, Patrick Pérez, and Christian Theobalt. Neural monocular 3D human motion capture with physical awareness. Transactions on Graphics (TOG), 40(4), 2021. 3 +[74] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, and Christian Theobalt. PhysCap: Physically plausible monocular 3D motion capture in real time. Transactions on Graphics (TOG), 39(6):235:1-235:16, 2020. 1, 2, 3, 8 +[75] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J. Black, and Tao Mei. Monocular, one-stage, regression of multiple 3D people. In International Conference on Computer Vision (ICCV), pages 11179-11188, 2021. 1, 3 +[76] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a skeleton-disentangled representation. In International Conference on Computer Vision (ICCV), pages 5348-5357, 2019. 3 +[77] Yating Tian, Hongwen Zhang, Yebin Liu, and limin Wang. Recovering 3D human mesh from monocular images: A survey. arXiv:2203.01923, 2022. 3 +[78] Shashank Tripathi, Siddhant Ranade, Ambrish Tyagi, and Amit K. Agrawal. PoseNet3D: Learning temporally consistent 3D human pose via knowledge distillation. In International Conference on 3D Vision (3DV), pages 311-321, 2020. 3 +[79] Dimitrios Tzionas, Luca Ballan, Abhilash Srikantha, Pablo Aponte, Marc Pollefeys, and Juergen Gall. Capturing hands in action using discriminative salient points and physics simulation. International Journal of Computer Vision (IJCV), 118:172-193, 2016. 6 +[80] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In International Conference on Computer Vision (ICCV), pages 9720-9729, 2021. 3 + +[81] Timo von Marcard, Roberto Henschel, Michael J. Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3D human pose in the wild using IMUs and a moving camera. In European Conference on Computer Vision (ECCV), volume 11214, pages 614-631, 2018. 5 +[82] Marek Vondrak, Leonid Sigal, and Odest Chadwicke Jenkins. Physical simulation for probabilistic motion tracking. In Computer Vision and Pattern Recognition (CVPR), pages 1-8, 2008. 3 +[83] Eric W. Weisstein. Triangle point picking. https://mathworld.wolfram.com/TrianglePointPicking.html, 2014. From MathWorld - A Wolfram Web Resource. 4 +[84] Zhenzhen Weng and Serena Yeung. Holistic 3D human and scene mesh estimation from single view images. In Computer Vision and Pattern Recognition (CVPR), pages 334-343, 2020. 3 +[85] David A. Winter. A.B.C. (Anatomy, Biomechanics and Control) of balance during standing and walking. Waterloo Biomechanics, 1995. 2, 4, 5 +[86] David A. Winter. Human balance and posture control during standing and walking. Gait & Posture, 3(4):193-214, 1995. 2, 5 +[87] Donglai Xiang, Hanbyul Joo, and Yaser Sheikh. Monocular total capture: Posing face, body, and hands in the wild. In Computer Vision and Pattern Recognition (CVPR), pages 10957-10966, 2019. 3 +[88] Donglai Xiang, Fabian Prada, Chenglei Wu, and Jessica Hodgins. MonoClothCap: Towards temporally coherent clothing capture from monocular RGB video. In International Conference on 3D Vision (3DV), pages 322-332, 2020. 3 +[89] Kevin Xie, Tingwu Wang, Umar Iqbal, Yunrong Guo, Sanja Fidler, and Florian Shkurti. Physics-based human motion estimation and synthesis from videos. In International Conference on Computer Vision (ICCV), pages 11532-11541, 2021. 3, 8 +[90] Xianghui Xie, Bharat Lal Bhatnagar, and Gerard Pons-Moll. CHORE: Contact, human and object reconstruction from a single RGB image. In European Conference on Computer Vision (ECCV), 2022. 3 +[91] Yuliang Xiu, Jinlong Yang, Xu Cao, Dimitrios Tzionas, and Michael J. Black. ECON: Explicit Clothed humans Optimized via Normal Integration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2023. 3 +[92] Hongyi Xu, Eduard Gabriel Bazavan, Andrei Zanfir, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. GHUM & GHUML: Generative 3D human shape and articulated pose models. In Computer Vision and Pattern Recognition (CVPR), pages 6183-6192, 2020. 2, 3 +[93] Masanobu Yamamoto and Katsutoshi Yagishita. Scene constraints-aided tracking of human body. In Computer Vision and Pattern Recognition (CVPR), pages 151–156, 2000. 3 +[94] Hongwei Yi, Chun-Hao P. Huang, Shashank Tripathi, Lea Hering, Justus Thies, and Michael J. Black. MIME: Human- + +aware 3D scene generation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3 +[95] Ye Yuan and Kris Kitani. 3D ego-pose estimation via imitation learning. In European Conference on Computer Vision (ECCV), volume 11220, pages 735–750, 2018. 2 +[96] Ye Yuan, Shih-En Wei, Tomas Simon, Kris Kitani, and Jason Saragih. SimPoE: Simulated character control for 3D human pose estimation. In Computer Vision and Pattern Recognition (CVPR), pages 7159–7169, 2021. 1, 2, 3, 6, 8 +[97] Andrei Zanfir, Eduard Gabriel Bazavan, Hongyi Xu, William T Freeman, Rahul Sukthankar, and Cristian Sminchisescu. Weakly supervised 3D human pose and shape reconstruction with normalizing flows. In European Conference on Computer Vision (ECCV), pages 465-481, 2020. 3 +[98] Andrei Zanfir, Elisabella Maroiniu, and Cristian Sminchisescu. Monocular 3D pose and shape estimation of multiple people in natural scenes – the importance of multiple scene constraints. In Computer Vision and Pattern Recognition (CVPR), pages 2148–2157, 2018. 3, 6, 8 +[99] Ailing Zeng, Lei Yang, Xuan Ju, Jiefeng Li, Jianyi Wang, and Qiang Xu. SmoothNet: A plug-and-play network for refining human poses in videos. In European Conference on Computer Vision (ECCV), volume 13665, pages 625-642, 2022. 3 +[100] Wang Zeng, Wanli Ouyang, Ping Luo, Wentao Liu, and Xiaogang Wang. 3D human mesh regression with dense correspondence. In Computer Vision and Pattern Recognition (CVPR), 2020. 3 +[101] Cha Zhang and Tsuhan Chen. Efficient feature extraction for 2d/3d objects in mesh representation. In Proceedings 2001 International Conference on Image Processing (Cat. No. 01CH37205), volume 3, pages 935-938. IEEE, 2001. 4 +[102] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. PyMAF: 3D human pose and shape regression with pyramidal mesh alignment feedback loop. In International Conference on Computer Vision (ICCV), pages 11426-11436, 2021. 1, 3 +[103] Jianfeng Zhang, Dongdong Yu, Jun Hao Liew, Xuecheng Nie, and Jiashi Feng. Body meshes as points. In Computer Vision and Pattern Recognition (CVPR), pages 546-556, 2021. 3 +[104] Jason Y. Zhang, Sam Pepose, Hanbyul Joo, Deva Ramanan, Jitendra Malik, and Angjoo Kanazawa. Perceiving 3D human-object spatial arrangements from a single image in the wild. In European Conference on Computer Vision (ECCV), volume 12357, pages 34-51, 2020. 3 +[105] Siwei Zhang, Yan Zhang, Federica Bogo, Marc Pollefeys, and Siyu Tang. Learning motion priors for 4D human body capture in 3D scenes. In International Conference on Computer Vision (ICCV), pages 11343-11353, 2021. 3 +[106] Tianshu Zhang, Buzhen Huang, and Yangang Wang. Object-occluded human shape and pose estimation from a single color image. In Computer Vision and Pattern Recognition (CVPR), pages 7374–7383, 2020. 3 +[107] Ce Zheng, Wenhan Wu, Chen Chen, Taojiannan Yang, Sijie Zhu, Ju Shen, Nasser Kehtarnavaz, and Mubarak Shah. + +Deep learning-based human pose estimation: A survey. arXiv:2012.13392, 2022.3 +[108] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Computer Vision and Pattern Recognition (CVPR), pages 5745-5753, 2019. 3 +[109] Yuxiao Zhou, Marc Habermann, Ikhsanul Habibie, Ayush Tewari, Christian Theobalt, and Feng Xu. Monocular real + +time full body capture with inter-part correlations. In Computer Vision and Pattern Recognition (CVPR), pages 4811-4822, 2021. 3 +[110] Yuliang Zou, Jimei Yang, Duygu Ceylan, Jianming Zhang, Federico Perazzi, and Jia-Bin Huang. Reducing footskate in human motion reconstruction with ground contact constraints. In Winter Conference on Applications of Computer Vision (WACV), pages 459-468, 2020. 3, 6, 8 \ No newline at end of file diff --git a/2023/3D Human Pose Estimation via Intuitive Physics/images.zip b/2023/3D Human Pose Estimation via Intuitive Physics/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..f3e506374db1aa023db3232c447ca32a49124e1e --- /dev/null +++ b/2023/3D Human Pose Estimation via Intuitive Physics/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32a11aaf8316d81f59e2cbfca2bc870befbc1d41e40795da798efcbe6c0ba937 +size 503516 diff --git a/2023/3D Human Pose Estimation via Intuitive Physics/layout.json b/2023/3D Human Pose Estimation via Intuitive Physics/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..78e0fab88e6a5ebb1b7d00eb1a7cc454cf6ac9c9 --- /dev/null +++ b/2023/3D Human Pose Estimation via Intuitive Physics/layout.json @@ -0,0 +1,11321 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 146, + 96, + 446, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 96, + 446, + 113 + ], + "spans": [ + { + "bbox": [ + 146, + 96, + 446, + 113 + ], + "type": "text", + "content": "3D Human Pose Estimation via Intuitive Physics" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 114, + 128, + 477, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 128, + 477, + 156 + ], + "spans": [ + { + "bbox": [ + 114, + 128, + 477, + 156 + ], + "type": "text", + "content": "Shashank Tripathi1 Lea Müller1 Chun-Hao P. Huang1 Omid Taheri1 Michael J. Black1 Dimitrios Tzionas2*" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 158, + 503, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 158, + 503, + 185 + ], + "spans": [ + { + "bbox": [ + 89, + 158, + 503, + 185 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 89, + 158, + 503, + 185 + ], + "type": "text", + "content": "Max Planck Institute for Intelligent Systems, Tübingen, Germany " + }, + { + "bbox": [ + 89, + 158, + 503, + 185 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 89, + 158, + 503, + 185 + ], + "type": "text", + "content": "University of Amsterdam, the Netherlands {stripathi, lmueller2, chuang2, otaheri, black}@tue.mpg.de d.tzionas@uva.nl" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 47, + 202, + 547, + 323 + ], + "blocks": [ + { + "bbox": [ + 47, + 202, + 547, + 323 + ], + "lines": [ + { + "bbox": [ + 47, + 202, + 547, + 323 + ], + "spans": [ + { + "bbox": [ + 47, + 202, + 547, + 323 + ], + "type": "image", + "image_path": "df44fdeea43adfe7d6345099c566049078468d7fd6b71b85fce44c44ceee9761.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 327, + 547, + 372 + ], + "lines": [ + { + "bbox": [ + 46, + 327, + 547, + 372 + ], + "spans": [ + { + "bbox": [ + 46, + 327, + 547, + 372 + ], + "type": "text", + "content": "Figure 1. Estimating a 3D body from an image is ill-posed. A recent, representative, optimization method [59] produces bodies that are in unstable poses, penetrate the floor, or hover above it. In contrast, IPMAN estimates a 3D body that is physically plausible. To achieve this, IPMAN uses novel intuitive-physics (IP) terms that exploit inferred pressure heatmaps on the body, the Center of Pressure (CoP), and the body's Center of Mass (CoM). Body heatmap colors encode per-vertex pressure." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 394, + 192, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 394, + 192, + 406 + ], + "spans": [ + { + "bbox": [ + 143, + 394, + 192, + 406 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 418, + 290, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 418, + 290, + 706 + ], + "spans": [ + { + "bbox": [ + 45, + 418, + 290, + 706 + ], + "type": "text", + "content": "Estimating 3D humans from images often produces implausible bodies that lean, float, or penetrate the floor. Such methods ignore the fact that bodies are typically supported by the scene. A physics engine can be used to enforce physical plausibility, but these are not differentiable, rely on unrealistic proxy bodies, and are difficult to integrate into existing optimization and learning frameworks. In contrast, we exploit novel intuitive-physics (IP) terms that can be inferred from a 3D SMPL body interacting with the scene. Inspired by biomechanics, we infer the pressure heatmap on the body, the Center of Pressure (CoP) from the heatmap, and the SMPL body's Center of Mass (CoM). With these, we develop IPMAN, to estimate a 3D body from a color image in a \"stable\" configuration by encouraging plausible floor contact and overlapping CoP and CoM. Our IP terms are intuitive, easy to implement, fast to compute, differentiable, and can be integrated into existing optimization and regression methods. We evaluate IPMAN on standard datasets and MoYo, a new dataset with synchronized multi-view images, ground-truth 3D bodies with complex poses, body-floor contact, CoM and pressure. IPMAN produces more plausible results than the state of the art, improving accuracy for static poses, while not hurting dynamic ones. Code and data are available for research at https://ipman.is.tue.mpg.de." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 394, + 386, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 394, + 386, + 406 + ], + "spans": [ + { + "bbox": [ + 307, + 394, + 386, + 406 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 414, + 548, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 414, + 548, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 414, + 548, + 653 + ], + "type": "text", + "content": "To understand humans and their actions, computers need automatic methods to reconstruct the body in 3D. Typically, the problem entails estimating the 3D human pose and shape (HPS) from one or more color images. State-of-the-art (SOTA) methods [46, 51, 75, 102] have made rapid progress, estimating 3D humans that align well with image features in the camera view. Unfortunately, the camera view can be deceiving. When viewed from other directions, or when placed in a 3D scene, the estimated bodies are often physically implausible: they lean, hover, or penetrate the ground (see Fig. 1 top). This is because most SOTA methods reason about humans in isolation; they ignore that people move in a scene, interact with it, and receive physical support by contacting it. This is a deal-breaker for inherently 3D applications, such as biomechanics, augmented/virtual reality (AR/VR) and the \"metaverse\"; these need humans to be reconstructed faithfully and physically plausibly with respect to the scene. For this, we need a method that estimates the 3D human on a ground plane from a color image in a configuration that is physically \"stable\"." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 653, + 547, + 713 + ], + "type": "text", + "content": "This is naturally related to reasoning about physics and support. There exist many physics simulators [10, 30, 60] for games, movies, or industrial simulations, and using these for plausible HPS estimation is increasingly popular [66, 74, 96]. However, existing simulators come with two significant" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 722, + 200, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 722, + 200, + 733 + ], + "spans": [ + { + "bbox": [ + 47, + 722, + 200, + 733 + ], + "type": "text", + "content": "* This work was mostly performed at MPI-IS." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4713" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 263 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 263 + ], + "type": "text", + "content": "problems: (1) They are typically non-differentiable black boxes, making them incompatible with existing optimization and learning frameworks. Consequently, most methods [64, 95, 96] use them with reinforcement learning to evaluate whether a certain input has the desired outcome, but with no ability to reason about how changing inputs affects the outputs. (2) They rely on an unrealistic proxy body model for computational efficiency; bodies are represented as groups of rigid 3D shape primitives. Such proxy models are crude approximations of human bodies, which, in reality, are much more complex and deform non-rigidly when they move and interact. Moreover, proxies need a priori known body dimensions that are kept fixed during simulation. Also, these proxies differ significantly from the 3D body models [41, 54, 92] used by SOTA HPS methods. Thus, current physics simulators are too limited for use in HPS." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 266, + 289, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 289, + 447 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 289, + 447 + ], + "type": "text", + "content": "What we need, instead, is a solution that is fully differentiable, uses a realistic body model, and seamlessly integrates physical reasoning into HPS methods (both optimization- and regression-based). To this end, instead of using full physics simulation, we introduce novel intuitive-physics (IP) terms that are simple, differentiable, and compatible with a body model like SMPL [54]. Specifically, we define terms that exploit an inferred pressure heatmap of the body on the ground plane, the Center of Pressure (CoP) that arises from the heatmap, and the SMPL body's Center of Mass (CoM) projected on the floor; see Fig. 2 for a visualization. Intuitively, bodies whose CoM lie close to their CoP are more stable than ones with a CoP that is further away (see Fig. 5); the former suggests a static pose, e.g. standing or holding a yoga pose, while the latter a dynamic pose, e.g., walking." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 448, + 289, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 448, + 289, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 448, + 289, + 555 + ], + "type": "text", + "content": "We use these intuitive-physics terms in two ways. First, we incorporate them in an objective function that extends SMPLify-XMC [59] to optimize for body poses that are stable. We also incorporate the same terms in the training loss for an HPS regressor, called IPMAN (Intuitive-Physics-based huMAN). In both formulations, the intuitive-physics terms encourage estimates of body shape and pose that have sufficient ground contact, while penalizing interpenetration and encouraging an overlap of the CoP and CoM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 558, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 289, + 713 + ], + "type": "text", + "content": "Our intuitive-physics formulation is inspired by work in biomechanics [32, 33, 61], which characterizes the stability of humans in terms of relative positions between the CoP, the CoM, and the Base of Support (BoS). The BoS is defined as the convex hull of all contact regions on the floor (Fig. 2). Following past work [6, 71, 74], we use the \"inverted pendulum\" model [85, 86] for body balance; this considers poses as stable if the gravity-projected CoM onto the floor lies inside the BoS. Similar ideas are explored by Scott et al. [71] but they focus on predicting a foot pressure heatmap from 2D or 3D body joints. We go significantly further to exploit stability in training an HPS regressor. This requires two technical novelties." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 65, + 547, + 140 + ], + "blocks": [ + { + "bbox": [ + 309, + 65, + 547, + 140 + ], + "lines": [ + { + "bbox": [ + 309, + 65, + 547, + 140 + ], + "spans": [ + { + "bbox": [ + 309, + 65, + 547, + 140 + ], + "type": "image", + "image_path": "e48583f23ac2984857a497de7716a6d94a45d0c15d116bf9a57e7f8d01a5ad2f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 141, + 547, + 185 + ], + "lines": [ + { + "bbox": [ + 304, + 141, + 547, + 185 + ], + "spans": [ + { + "bbox": [ + 304, + 141, + 547, + 185 + ], + "type": "text", + "content": "Figure 2. (1) A SMPL mesh sitting. (2) The inferred pressure map on the ground (color-coded heatmap), CoP (green), CoM (pink), and Base of Support (BoS, yellow polygon). (3) Segmentation of SMPL into " + }, + { + "bbox": [ + 304, + 141, + 547, + 185 + ], + "type": "inline_equation", + "content": "N_P = 10" + }, + { + "bbox": [ + 304, + 141, + 547, + 185 + ], + "type": "text", + "content": " parts, used for computing CoM; see Sec. 3.2." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 198, + 547, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 198, + 547, + 293 + ], + "spans": [ + { + "bbox": [ + 304, + 198, + 547, + 293 + ], + "type": "text", + "content": "The first involves computing CoM. To this end, we uniformly sample points on SMPL's surface, and calculate each body part's volume. Then, we compute CoM as the average of all uniformly sampled points weighted by the corresponding part volumes. We denote this as pCoM, standing for \"part-weighted CoM\". Importantly, pCoM takes into account SMPL's shape, pose, and all blend shapes, while it is also computationally efficient and differentiable." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 294, + 548, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 294, + 548, + 415 + ], + "spans": [ + { + "bbox": [ + 304, + 294, + 548, + 415 + ], + "type": "text", + "content": "The second involves estimating CoP directly from the image, without access to a pressure sensor. Our key insight is that the soft tissues of human bodies deform under pressure, e.g., the buttocks deform when sitting. However, SMPL does not model this deformation; it penetrates the ground instead of deforming. We use the penetration depth as a proxy for pressure [68]; deeper penetration means higher pressure. With this, we estimate a pressure field on SMPL's mesh and compute the CoP as the pressure-weighted average of the surface points. Again this is differentiable." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 415, + 548, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 548, + 547 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 548, + 547 + ], + "type": "text", + "content": "For evaluation, we use a standard HPS benchmark (Human3.6M [37]), but also the RICH [35] dataset. However, these datasets have limited interactions with the floor. We thus capture a novel dataset, MoYo, of challenging yoga poses, with synchronized multi-view video, ground-truth SMPL-X [63] meshes, pressure sensor measurements, and body CoM. IPMAN, in both of its forms, and across all datasets, produces more accurate and stable 3D bodies than the state of the art. Importantly, we find that IPMAN improves accuracy for static poses, while not hurting dynamic ones. This makes IPMAN applicable to everyday motions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 547, + 548, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 547, + 548, + 656 + ], + "spans": [ + { + "bbox": [ + 304, + 547, + 548, + 656 + ], + "type": "text", + "content": "To summarize: (1) We develop IPMAN, the first HPS method that integrates intuitive physics. (2) We infer biomechanical properties such as CoM, CoP and body pressure. (3) We define novel intuitive-physics terms that can be easily integrated into HPS methods. (4) We create MoYo, a dataset that uniquely has complex poses, multi-view video, and ground-truth bodies, pressure, and CoM. (5) We show that our IP terms improve HPS accuracy and physical plausibility. (6) Data and code are available for research." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 669, + 392, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 669, + 392, + 681 + ], + "spans": [ + { + "bbox": [ + 306, + 669, + 392, + 681 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 689, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 548, + 715 + ], + "type": "text", + "content": "3D Human Pose and Shape (HPS) from images. Existing methods fall into two major categories: (1) non-" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4714" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 156 + ], + "type": "text", + "content": "parametric methods that reconstruct a free-form body representation, e.g., joints [1, 56, 57] or vertices [52, 58, 100], and (2) parametric methods that use statistical body models [5, 25, 41, 54, 63, 92, 97]. The latter methods focus on various aspects, such as expressiveness [13, 18, 63, 69, 87], clothed bodies [15, 88, 91], videos [24, 45, 78, 99], and multiperson scenarios [38, 75, 103], to name a few." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 157, + 288, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 157, + 288, + 300 + ], + "spans": [ + { + "bbox": [ + 46, + 157, + 288, + 300 + ], + "type": "text", + "content": "Inference is done by either optimization or regression. Optimization-based methods [7, 16, 63, 87, 88] fit a body model to image evidence, such as joints [11], dense vertex correspondences [2] or 2D segmentation masks [23]. Regression-based methods [42, 44, 48, 51, 76, 102, 106, 109] use a loss similar to the objective function of optimization methods to train a network to infer body model parameters. Several methods combine optimization and regression in a training loop [47, 50, 59]. Recent methods [24, 40] fine-tune pre-trained networks at test time w.r.t. an image or a sequence, retaining flexibility (optimization) while being less sensitive to initialization (regression)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 302, + 288, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 302, + 288, + 337 + ], + "spans": [ + { + "bbox": [ + 47, + 302, + 288, + 337 + ], + "type": "text", + "content": "Despite their success, these methods reason about the human in \"isolation\", without taking the surrounding scene into account; see [77, 107] for a comprehensive review." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 339, + 288, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 339, + 288, + 447 + ], + "spans": [ + { + "bbox": [ + 46, + 339, + 288, + 447 + ], + "type": "text", + "content": "Contact-only scene constraints. A common way of using scene information is to consider body-scene contact [12, 17, 27, 28, 65, 84, 90, 94, 98, 104, 105, 110]. Yamamoto et al. [93] and others [19, 27, 70, 98, 104] ensure that estimated bodies have plausible scene contact. For videos, encouraging foot-ground contact reduces foot skating [36, 65, 72, 105, 110]. Weng et al. [84] use contact in estimating the pose and scale of scene objects, while Villegas et al. [80] preserve self- and ground contact for motion retargeting." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 448, + 288, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 448, + 288, + 508 + ], + "spans": [ + { + "bbox": [ + 46, + 448, + 288, + 508 + ], + "type": "text", + "content": "These methods typically take two steps: (1) detecting contact areas on the body and/or scene and (2) minimizing the distance between these. Surfaces are typically assumed to be in contact if their distance is below a threshold and their relative motion is small [27, 35, 98, 104]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 509, + 288, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 509, + 288, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 509, + 288, + 581 + ], + "type": "text", + "content": "Many methods only consider contact between the ground and the foot joints [66, 110] or other end-effectors [65]. In contrast, IPMAN uses the full 3D body surface and exploits this to compute the pressure, CoP and CoM. Unlike binary contact, this is differentiable, making the IP terms useful for training HPS regressors." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 288, + 715 + ], + "type": "text", + "content": "Physics-based scene constraints. Early work uses physics to estimate walking [8, 9] or full body motion [82]. Recent methods [21, 22, 66, 73, 74, 89, 96] regress 3D humans and then refine them through physics-based optimization. Physics is used for two primary reasons: (1) to regularise dynamics, reducing jitter [49, 66, 74, 96], and (2) to discourage interpenetration and encourage contact. Since contact events are discontinuous, the pipeline is either not end-to-end trainable or trained with reinforcement learning [64, 96]. Xie et al. [89] propose differentiable physics-inspired objectives based on a soft contact penalty, while DiffPhy [21] uses a" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 547, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 167 + ], + "type": "text", + "content": "differentiable physics simulator [31] during inference. Both methods apply the objectives in an optimization scheme, while IPMAN is applied to both optimization and regression. PhysCap [74] considers a pose as balanced, when the CoM is projected within the BoS. Rempe et al. [66] impose PD control on the pelvis, which they treat as a CoM. Scott et al. [71] regress foot pressure from 2D and 3D joints for stability analysis but do not use it to improve HPS." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 168, + 547, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 168, + 547, + 253 + ], + "spans": [ + { + "bbox": [ + 304, + 168, + 547, + 253 + ], + "type": "text", + "content": "All these methods use unrealistic bodies based on shape primitives. Some require known body dimensions [66, 74, 96] while others estimate body scale [49, 89]. In contrast, IPMAN computes CoM, CoP and BoS directly from the SMPL mesh. Clever et al. [14] and Luo et al. [55] estimate 3D body pose but from pressure measurements, not from images. Their task is fundamentally different from ours." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 264, + 362, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 264, + 362, + 277 + ], + "spans": [ + { + "bbox": [ + 306, + 264, + 362, + 277 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 285, + 392, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 285, + 392, + 297 + ], + "spans": [ + { + "bbox": [ + 306, + 285, + 392, + 297 + ], + "type": "text", + "content": "3.1. Preliminaries" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 304, + 545, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 304, + 545, + 327 + ], + "spans": [ + { + "bbox": [ + 304, + 304, + 545, + 327 + ], + "type": "text", + "content": "Given a color image, I, we estimate the parameters of the camera and the SMPL body model [54]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "spans": [ + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": "Body model. SMPL maps pose, " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": ", and shape, " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": ", parameters to a 3D mesh, " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "M(\\theta, \\beta)" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": ". The pose parameters, " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "\\theta \\in \\mathbb{R}^{24 \\times 6}" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": ", are rotations of SMPL's 24 joints in a 6D representation [108]. The shape parameters, " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "\\beta \\in \\mathbb{R}^{10}" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": ", are the first 10 PCA coefficients of SMPL's shape space. The generated mesh " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "M(\\theta, \\beta)" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": " consists of " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "N_V = 6890" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": " vertices, " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "V \\in \\mathbb{R}^{N_V \\times 3}" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "N_F = 13776" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": " faces, " + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "inline_equation", + "content": "F \\in \\mathbb{R}^{N_F \\times 3 \\times 3}" + }, + { + "bbox": [ + 304, + 328, + 547, + 412 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 412, + 547, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 547, + 472 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 547, + 472 + ], + "type": "text", + "content": "Note that our regression method (IPMAN-R, Sec. 3.4.1) uses SMPL, while our optimization method (IPMAN-O, Sec. 3.4.2) uses SMPL-X [63], to match the models used by the baselines. For simplicity of exposition, we refer to both models as SMPL when the distinction is not important." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "text", + "content": "Camera. For the regression-based IPMAN-R, we follow the standard convention [42, 43, 47] and use a weak perspective camera with a 2D scale, " + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "text", + "content": ", translation, " + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^c = (t_x^c,t_y^c)" + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "text", + "content": ", fixed camera rotation, " + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^c = \\mathbf{I}_3" + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "text", + "content": ", and a fixed focal length " + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "inline_equation", + "content": "(f_{x},f_{y})" + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "text", + "content": ". The root-relative body orientation " + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^b" + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "text", + "content": " is predicted by the neural network, but body translation stays fixed at " + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^b = \\mathbf{0}" + }, + { + "bbox": [ + 304, + 473, + 547, + 556 + ], + "type": "text", + "content": " as it is absorbed into the camera's translation." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": "For the optimization-based IPMAN-O, we follow Müller et al. [59] to use the full-perspective camera model and optimize the focal lengths " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "(f_x, f_y)" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": ", camera rotation " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^c" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": " and camera translation " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^c" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": ". The principal point " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "(o_x, o_y)" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": " is the center of the input image. " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": " is the intrinsic matrix storing focal lengths and the principal point. We assume that the body rotation " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^b" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": " and translation " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^b" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": " are absorbed into the camera parameters, thus, they stay fixed as " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^b = \\mathbf{I}_3" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^b = \\mathbf{0}" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": ". Using the camera, we project a 3D point " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": " to an image point " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}^2" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": " through " + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{x} = \\mathbf{K}(\\mathbf{R}^c\\mathbf{X} + \\mathbf{t}^c)" + }, + { + "bbox": [ + 304, + 557, + 547, + 677 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "Ground plane and gravity-projection. We assume that the gravity direction is perpendicular to the ground plane in the world coordinate system. Thus, for any arbitrary point in" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4715" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": "3D space, " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\pmb{u} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": ", its gravity-projected point, " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\pmb{u}' = g(\\pmb{u}) \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": ", is the projection of " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\pmb{u}" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " along the plane normal " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\pmb{n}" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " onto the ground plane, and " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "g(.)" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " is the projection operator. The function " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "h(\\pmb{u})" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " returns the signed \"height\" of a point " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\pmb{u}" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " with respect to the ground; i.e., the signed distance from " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\pmb{u}" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " to the ground plane along the gravity direction, where " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "h(\\pmb{u}) < 0" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\pmb{u}" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " is below the ground and " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "h(\\pmb{u}) > 0" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "inline_equation", + "content": "\\pmb{u}" + }, + { + "bbox": [ + 47, + 72, + 289, + 156 + ], + "type": "text", + "content": " is above it." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 170, + 153, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 153, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 153, + 182 + ], + "type": "text", + "content": "3.2. Stability Analysis" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 190, + 288, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 190, + 288, + 298 + ], + "spans": [ + { + "bbox": [ + 45, + 190, + 288, + 298 + ], + "type": "text", + "content": "We follow the biomechanics literature [32, 33, 61] and Scott et al. [71] to define three fundamental elements for stability analysis: We use the Newtonian definition for the \"Center of Mass\" (CoM); i.e., the mass-weighted average of particle positions. The \"Center of Pressure\" (CoP) is the ground-reaction force's point of application. The \"Base of Support\" (BoS) is the convex hull of all body-ground contacts. Below, we define intuitive-physics (IP) terms using the inferred CoM and CoP. BoS is only used for evaluation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 300, + 288, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 300, + 288, + 409 + ], + "spans": [ + { + "bbox": [ + 46, + 300, + 288, + 409 + ], + "type": "text", + "content": "Body Center of Mass (CoM). We introduce a novel CoM formulation that is fully differentiable and considers the per-part mass contributions, dubbed as pCoM; see Sup. Mat. for alternative CoM definitions. To compute this, we first segment the template mesh into " + }, + { + "bbox": [ + 46, + 300, + 288, + 409 + ], + "type": "inline_equation", + "content": "N_P = 10" + }, + { + "bbox": [ + 46, + 300, + 288, + 409 + ], + "type": "text", + "content": " parts " + }, + { + "bbox": [ + 46, + 300, + 288, + 409 + ], + "type": "inline_equation", + "content": "P_i \\in \\mathcal{P}" + }, + { + "bbox": [ + 46, + 300, + 288, + 409 + ], + "type": "text", + "content": "; see Fig. 2. We do this once offline, and keep the segmentation fixed during training and optimization. Assuming a shaped and posed SMPL body, the per-part volumes " + }, + { + "bbox": [ + 46, + 300, + 288, + 409 + ], + "type": "inline_equation", + "content": "\\mathcal{V}^{P_i}" + }, + { + "bbox": [ + 46, + 300, + 288, + 409 + ], + "type": "text", + "content": " are calculated by splitting the SMPL mesh into parts." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "spans": [ + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": "However, mesh splitting is a non-differentiable operation. Thus, it cannot be used for either training a regressor (IPMAN-R) or for optimization (IPMAN-O). Instead, we work with the full SMPL mesh and use differentiable \"close-translate-fill\" operations for each body part on the fly. First, for each part " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": ", we extract boundary vertices " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_P" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": " and add in the middle a virtual vertex " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "\\boldsymbol{v}_g" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "\\boldsymbol{v}_g = \\sum_{j \\in \\mathcal{B}_P} \\boldsymbol{v}_j / |\\mathcal{B}_P|" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": ". Then, for the " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_P" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "\\boldsymbol{v}_g" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": " vertices, we add virtual faces to \"close\" " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": " and make it watertight. Next, we \"translate\" " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": " such that the part centroid " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_P = \\sum_{j \\in P} \\boldsymbol{v}_j / |P|" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": " is at the origin. Finally, we \"fill\" the centered " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": " with tetrahedrons by connecting the origin with each face vertex. Then, the part volume, " + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "inline_equation", + "content": "\\mathcal{V}^{\\mathcal{P}}" + }, + { + "bbox": [ + 45, + 410, + 288, + 566 + ], + "type": "text", + "content": ", is the sum of all tetrahedron volumes [101]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": "To create a uniform distribution of surface vertices, we uniformly sample " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "N_U = 20000" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": " surface points " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "V_U \\in \\mathbb{R}^{N_U \\times 3}" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": " on the template SMPL mesh using the Triangle Point Picking method [83]. Given " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "V_U" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": " and the template SMPL mesh vertices " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "V_T" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": ", we follow [59], and analytically compute a sparse linear regressor " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "\\mathbf{W} \\in \\mathbb{R}^{N_U \\times N_V}" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "V_U = \\mathbf{W}V_T" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": ". During training and optimization, given an arbitrary shaped and posed mesh with vertices " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": ", we obtain uniformly-sampled mesh surface points as " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "V_U = \\mathbf{W}V" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": ". Each surface point, " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "v_i" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": ", is assigned to the body part, " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "P_{v_i}" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": ", corresponding to the face, " + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "inline_equation", + "content": "F_{v_i}" + }, + { + "bbox": [ + 46, + 567, + 288, + 700 + ], + "type": "text", + "content": ", it was sampled from." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "type": "text", + "content": "Finally, the part-weighted pCoM is computed as a" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 72, + 512, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 512, + 85 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 512, + 85 + ], + "type": "text", + "content": "volume-weighted mean of the mesh surface points:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 382, + 91, + 545, + 124 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 91, + 545, + 124 + ], + "spans": [ + { + "bbox": [ + 382, + 91, + 545, + 124 + ], + "type": "interline_equation", + "content": "\\bar {\\mathbf {m}} = \\frac {\\sum_ {i = 1} ^ {N _ {U}} \\mathcal {V} ^ {P _ {v _ {i}}} v _ {i}}{\\sum_ {i = 1} ^ {N _ {U}} \\mathcal {V} ^ {P _ {v _ {i}}}}, \\tag {1}", + "image_path": "82cae4c2ccaed476fda5ccdd7090a9413e07a2a7cf0b2bf6b8b453b3e47d66d2.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 130, + 545, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 130, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 304, + 130, + 545, + 167 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 130, + 545, + 167 + ], + "type": "inline_equation", + "content": "\\mathcal{V}^{P_{v_i}}" + }, + { + "bbox": [ + 304, + 130, + 545, + 167 + ], + "type": "text", + "content": " is the volume of the part " + }, + { + "bbox": [ + 304, + 130, + 545, + 167 + ], + "type": "inline_equation", + "content": "P_{v_i}\\in \\mathcal{P}" + }, + { + "bbox": [ + 304, + 130, + 545, + 167 + ], + "type": "text", + "content": " to which " + }, + { + "bbox": [ + 304, + 130, + 545, + 167 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 130, + 545, + 167 + ], + "type": "text", + "content": " is assigned. This formulation is fully differentiable and can be employed with any existing 3D HPS estimation method." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "text", + "content": "Note that computing CoM (or volume) from uniformly sampled surface points does not work (see Sup. Mat.) because it assumes that mass, " + }, + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "text", + "content": ", is proportional to surface area, " + }, + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "text", + "content": ". Instead, our pCoM computes mass from volume, " + }, + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "text", + "content": ", via the standard density equation, " + }, + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "inline_equation", + "content": "M = \\rho \\mathcal{V}" + }, + { + "bbox": [ + 304, + 167, + 547, + 251 + ], + "type": "text", + "content": ", while our close-translate-fill operation computes the volume of deformable bodies in an efficient and differentiable manner." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 251, + 547, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 251, + 547, + 371 + ], + "spans": [ + { + "bbox": [ + 304, + 251, + 547, + 371 + ], + "type": "text", + "content": "Center of Pressure (CoP). Recovering a pressure heatmap from an image without using hardware, such as pressure sensors, is a highly ill-posed problem. However, stability analysis requires knowledge of the pressure exerted on the human body by the supporting surfaces, like the ground. Going beyond binary contact, Rogez et al. [68] estimate 3D forces by detecting intersecting vertices between hand and object meshes. Clever et al. [14] recover pressure maps by allowing articulated body models to deform a soft pressure-sensing virtual mattress in a physics simulation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "text", + "content": "In contrast, we observe that, while real bodies interacting with rigid objects (e.g., the floor) deform under contact, SMPL does not model such soft-tissue deformations. Thus, the body mesh penetrates the contacting object surface and the amount of penetration can be a proxy for pressure; a deeper penetration implies higher pressure. With the height " + }, + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "inline_equation", + "content": "h(v_{i})" + }, + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "text", + "content": " (see Sec. 3.1) of a mesh surface point " + }, + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "text", + "content": " with respect to the ground plane " + }, + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "inline_equation", + "content": "\\Pi" + }, + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "text", + "content": ", we define a pressure field to compute the per-point pressure " + }, + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 304, + 371, + 547, + 479 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 347, + 486, + 545, + 518 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 486, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 347, + 486, + 545, + 518 + ], + "type": "interline_equation", + "content": "\\rho_ {i} = \\left\\{ \\begin{array}{l l} 1 - \\alpha h (v _ {i}) & \\text {i f} h (v _ {i}) < 0, \\\\ e ^ {- \\gamma h (v _ {i})} & \\text {i f} h (v _ {i}) \\geq 0, \\end{array} \\right. \\tag {2}", + "image_path": "231d224b8057f6ad89ab7ec29438981c65fb023586be40a9e333557c91c62b63.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 525, + 547, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 525, + 547, + 597 + ], + "spans": [ + { + "bbox": [ + 304, + 525, + 547, + 597 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 525, + 547, + 597 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 525, + 547, + 597 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 525, + 547, + 597 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 304, + 525, + 547, + 597 + ], + "type": "text", + "content": " are scalar hyperparameters set empirically. We approximate soft tissue via a \"spring\" model and \"penetrating\" pressure field using Hooke's Law. Some pressure is also assigned to points above the ground to allow tolerance for footwear, but this decays quickly. Finally, we compute the CoP, " + }, + { + "bbox": [ + 304, + 525, + 547, + 597 + ], + "type": "inline_equation", + "content": "\\overline{\\mathbf{s}}" + }, + { + "bbox": [ + 304, + 525, + 547, + 597 + ], + "type": "text", + "content": ", as" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 383, + 604, + 545, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 604, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 383, + 604, + 545, + 635 + ], + "type": "interline_equation", + "content": "\\overline {{\\mathbf {s}}} = \\frac {\\sum_ {i = 1} ^ {N _ {U}} \\rho_ {i} v _ {i}}{\\sum_ {i = 1} ^ {N _ {U}} \\rho_ {i}}. \\tag {3}", + "image_path": "fd234a9e9a8a0cfb20cab19cb7d703938ebe6d8fce06a8fe944553ae8b9c5f5d.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 642, + 499, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 642, + 499, + 653 + ], + "spans": [ + { + "bbox": [ + 306, + 642, + 499, + 653 + ], + "type": "text", + "content": "Again, note that this term is fully differentiable." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": "Base of Support (BoS). In biomechanics [34, 85], BoS is defined as the \"supporting area\" or the possible range of the CoP on the supporting surface. Here, we define BoS as the convex hull [67] of all gravity-projected body-ground contact points. In detail, we first determine all such contacts" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4716" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "by selecting the set of mesh surface points " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " close to the ground, and then gravity-project them onto the ground to obtain " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "C = \\{g(v_{i}) \\mid |h(v_{i})| < \\tau\\}" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": ". The BoS is then defined as the convex hull " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 72, + 287, + 120 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 126, + 181, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 126, + 181, + 138 + ], + "spans": [ + { + "bbox": [ + 47, + 126, + 181, + 138 + ], + "type": "text", + "content": "3.3. Intuitive-Physics Losses" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 144, + 288, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 144, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 46, + 144, + 288, + 251 + ], + "type": "text", + "content": "Stability loss. The \"inverted pendulum\" model of human balance [85, 86] considers the relationship between the CoM and BoS to determine stability. Simply put, for a given shape and pose, if the body CoM, projected on the gravity-aligned ground plane, lies within the BoS, the pose is considered stable. While this definition of stability is useful for evaluation, using it in a loss or energy function for 3D HPS estimation results in sparse gradients (see Sup. Mat.). Instead, we define the stability criterion as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 99, + 258, + 287, + 270 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 258, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 99, + 258, + 287, + 270 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {s t a b i l i t y}} = \\| g (\\bar {\\mathbf {m}}) - g (\\bar {\\mathbf {s}}) \\| _ {2}, \\tag {4}", + "image_path": "64d07c76fdc3b0a4b865318fbf76c8fcc1be2d500c95a331a007649eb8e46932.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 275, + 287, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 275, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 46, + 275, + 287, + 300 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 275, + 287, + 300 + ], + "type": "inline_equation", + "content": "g(\\bar{\\mathbf{m}})" + }, + { + "bbox": [ + 46, + 275, + 287, + 300 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 275, + 287, + 300 + ], + "type": "inline_equation", + "content": "g(\\bar{\\mathbf{s}})" + }, + { + "bbox": [ + 46, + 275, + 287, + 300 + ], + "type": "text", + "content": " are the gravity-projected CoM and CoP, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "spans": [ + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "content": "Ground contact loss. As shown in Fig. 1, 3D HPS methods minimize the 2D joint reprojection error and do not consider the plausibility of body-ground contact. Ignoring this can result in interpenetrating or hovering meshes. Inspired by self-contact losses [19,59] and hand-object contact losses [26,29], we define two ground losses, namely pushing, " + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{push}}" + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "content": ", and pulling, " + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{pull}}" + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "content": ", that take into account the height, " + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "inline_equation", + "content": "h(v_{i})" + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "content": ", of a vertex, " + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "content": ", with respect to the ground plane. For " + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "inline_equation", + "content": "h(v_{i}) < 0" + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "content": ", i.e., for vertices under the ground plane, " + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{push}}" + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "content": " discourages body-ground penetrations. For " + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "inline_equation", + "content": "h(v_{i}) \\geq 0" + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "content": ", i.e., for hovering meshes, " + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{pull}}" + }, + { + "bbox": [ + 46, + 300, + 288, + 467 + ], + "type": "text", + "content": " encourages the vertices that lie close to the ground to \"snap\" into contact with it. Note that the losses are non-conflicting as they act on disjoint sets of vertices. Then, the ground contact loss is:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 472, + 287, + 486 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 472, + 287, + 486 + ], + "spans": [ + { + "bbox": [ + 63, + 472, + 287, + 486 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {g r o u n d}} = \\mathcal {L} _ {\\text {p u l l}} + \\mathcal {L} _ {\\text {p u s h}}, \\text {w i t h} \\tag {5}", + "image_path": "2709dbad10446d440c532a3cbd7ad8d01028b036ab7057442ff48103134a7927.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 71, + 487, + 287, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 487, + 287, + 512 + ], + "spans": [ + { + "bbox": [ + 71, + 487, + 287, + 512 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {p u l l}} = \\alpha_ {1} \\tanh \\left(\\frac {h \\left(v _ {i}\\right)}{\\alpha_ {2}}\\right) ^ {2} \\quad \\text {i f} h \\left(v _ {i}\\right) \\geq 0, \\text {a n d} \\tag {6}", + "image_path": "7a6e287567a4f8120f04af51be1896db8639e20bb2ef5558af153d27d992bb4e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 514, + 287, + 539 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 514, + 287, + 539 + ], + "spans": [ + { + "bbox": [ + 69, + 514, + 287, + 539 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {p u s h}} = \\beta_ {1} \\tanh \\left(\\frac {h \\left(v _ {i}\\right)}{\\beta_ {2}}\\right) ^ {2} \\quad \\text {i f} h \\left(v _ {i}\\right) < 0. \\tag {7}", + "image_path": "fbf889a0840aa25f521ca5fc6dce8f1829ffc835352cc47e12c0cfb101c6c02b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 543, + 107, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 543, + 107, + 554 + ], + "spans": [ + { + "bbox": [ + 47, + 543, + 107, + 554 + ], + "type": "text", + "content": "3.4. IPMAN" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 561, + 287, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 561, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 46, + 561, + 287, + 657 + ], + "type": "text", + "content": "We use our new IP losses for two tasks: (1) We extend HMR [42] to develop IPMAN-R, a regression-based HPS method. (2) We extend SMPLify-XMC [59] to develop IPMAN-O, an optimization-based method. Note that IPMAN-O uses a reference ground plane, while IPMAN-R uses the ground plane only for training but not at test time. It leverages the known ground in 3D datasets, and thus, does not require additional data beyond past HPS methods." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 670, + 125, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 125, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 125, + 681 + ], + "type": "text", + "content": "3.4.1 IPMAN-R" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 714 + ], + "type": "text", + "content": "Most HPS methods are trained with a mix of direct supervision using 3D datasets [37,56,81] and 2D reprojection losses" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 307, + 64, + 545, + 173 + ], + "blocks": [ + { + "bbox": [ + 307, + 64, + 545, + 173 + ], + "lines": [ + { + "bbox": [ + 307, + 64, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 64, + 545, + 173 + ], + "type": "image", + "image_path": "a2ec3ec21e7a632c477fa14a5e119000616e9b378ad23a5088e74f081f69c9ca.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "lines": [ + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "spans": [ + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "text", + "content": "Figure 3. IPMAN-R architecture. First, the HMR regressor estimates camera translation and SMPL parameters for an input image. These parameters are used to generate the SMPL mesh in the camera frame, " + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "inline_equation", + "content": "M_{c}" + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "text", + "content": ". To transform the mesh from camera into world coordinates " + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "inline_equation", + "content": "(M_{c} \\rightarrow M_{w})" + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "text", + "content": ", IPMAN-R uses the ground-truth camera rotation, " + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "inline_equation", + "content": "R_{w}^{c}" + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "text", + "content": ", and translation, " + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "inline_equation", + "content": "t_{w}^{c}" + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "text", + "content": ". The IP losses, " + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{ground}}" + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{stability}}" + }, + { + "bbox": [ + 305, + 176, + 547, + 254 + ], + "type": "text", + "content": ", are applied on the mesh in the world coordinate system." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 277, + 547, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 277, + 547, + 350 + ], + "spans": [ + { + "bbox": [ + 304, + 277, + 547, + 350 + ], + "type": "text", + "content": "using image datasets [4, 39, 53]. The 3D losses, however, are calculated in the camera frame, ignoring scene information and physics. IPMAN-R extends HMR [42] with our intuitive-physics terms; see Fig. 3 for the architecture. For training, we use the known camera coordinates and the world ground plane in 3D datasets." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "spans": [ + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": "As described in Sec. 3.1 (paragraph \"Camera\"), HMR infers the camera translation, " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^c" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": ", and SMPL parameters, " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": ", in the camera coordinates assuming " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^c = \\mathbf{I}_3" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^b = \\mathbf{0}" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": ". Ground truth 3D joints and SMPL parameters are used to supervise the inferred mesh " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "M_c" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": " in the camera frame. However, 3D datasets also provide the ground, albeit in the world frame. To leverage the known ground, we transform the predicted body orientation, " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^b" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": ", to world coordinates using the ground-truth camera rotation, " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_w^c" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": ", as " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_w^b = \\mathbf{R}_w^{c\\top}\\mathbf{R}^b" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": ". Then, we compute the body translation in world coordinates as " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_w^b = -\\mathbf{t}^c + \\mathbf{t}_w^c" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": ". With the predicted mesh and ground plane in world coordinates, we add the IP terms, " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{stability}}" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{ground}}" + }, + { + "bbox": [ + 304, + 351, + 547, + 508 + ], + "type": "text", + "content": ", for HPS training as follows:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 520, + 545, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 520, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 306, + 520, + 545, + 533 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {I P M A N - R}} \\left(\\boldsymbol {\\theta}, \\boldsymbol {\\beta}, \\mathbf {t} ^ {c}\\right) = \\lambda_ {2 D} \\mathcal {L} _ {2 D} + \\lambda_ {3 D} \\mathcal {L} _ {3 D} + \\lambda_ {\\mathrm {S M P L}} \\mathcal {L} _ {\\mathrm {S M P L}} +", + "image_path": "0b6d8cb8bac6d8abd11e85c411cc86a6a7cad66c4c10557de2ff5a6293671aaf.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 392, + 535, + 545, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 535, + 545, + 548 + ], + "spans": [ + { + "bbox": [ + 392, + 535, + 545, + 548 + ], + "type": "interline_equation", + "content": "\\lambda_ {\\mathrm {s}} \\mathcal {L} _ {\\text {s t a b i l i t y}} + \\lambda_ {\\mathrm {g}} \\mathcal {L} _ {\\text {g r o u n d}}, \\tag {8}", + "image_path": "01f969682857e25a574af03371059ec5131e27beb0be5476424f7a02b33b9e31.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 560, + 547, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 560, + 547, + 596 + ], + "spans": [ + { + "bbox": [ + 306, + 560, + 547, + 596 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 560, + 547, + 596 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{s}}" + }, + { + "bbox": [ + 306, + 560, + 547, + 596 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 560, + 547, + 596 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{g}}" + }, + { + "bbox": [ + 306, + 560, + 547, + 596 + ], + "type": "text", + "content": " are the weights for the respective IP terms. For training (data augmentation, hyperparameters, etc), we follow Kolotouros et al. [47]; for more details see Sup. Mat." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 620, + 384, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 620, + 384, + 631 + ], + "spans": [ + { + "bbox": [ + 306, + 620, + 384, + 631 + ], + "type": "text", + "content": "3.4.2 IPMAN-O" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "content": "To fit SMPL-X to 2D image keypoints, SMPLify-XMC [59] initializes the fitting process by exploiting the self-contact and global-orientation of a known/presented 3D mesh. We posit that the presented pose contains further information, such as stability, pressure and contact with the ground-plane. IPMAN-O uses this insight to apply stability and ground" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 294, + 757, + 315, + 766 + ], + "type": "text", + "content": "4717" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 222, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 222, + 84 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 222, + 84 + ], + "type": "text", + "content": "contact losses. The IPMAN-O objective is:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 91, + 287, + 137 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 287, + 137 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} E _ {\\text {I P M A N - O}} (\\boldsymbol {\\beta}, \\boldsymbol {\\theta}, \\boldsymbol {\\Phi}) = E _ {J 2 D} + \\lambda_ {\\beta} E _ {\\beta} + \\lambda_ {\\theta_ {h}} E _ {\\theta_ {h}} + \\\\ \\lambda_ {\\tilde {\\theta} _ {b}} E _ {\\tilde {\\theta} _ {b}} + \\lambda_ {\\tilde {C}} E _ {\\tilde {C}} + \\\\ \\lambda_ {s} E _ {\\text {s t a b i l i t y}} + \\lambda_ {g} E _ {\\text {g r o u n d}}. \\tag {9} \\\\ \\end{array}", + "image_path": "c7e717377efb07051bae4fd76446b28a23230dbec1e871a86c98a43f98d39fa3.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": " denotes the camera parameters: rotation " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^c" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": ", translation " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^c" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": ", and focal length, " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "(f_x, f_y)" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "E_{J2D}" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": " is a 2D joint loss, " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "E_\\beta" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "E_{\\theta_h}" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "L_2" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": " body shape and hand pose priors. " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "E_{\\tilde{\\theta}_b}" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "E_{\\tilde{C}}" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": " are pose and contact terms w.r.t. the presented 3D pose and contact (see [59] for details). " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "E_S" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "inline_equation", + "content": "E_G" + }, + { + "bbox": [ + 47, + 142, + 288, + 251 + ], + "type": "text", + "content": " are the stability and ground contact losses from Sec. 3.3. Since the estimated mesh is in the same coordinate system as the presented mesh and the ground-plane, we directly apply IP losses without any transformations. For details see Sup. Mat." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 259, + 128, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 259, + 128, + 273 + ], + "spans": [ + { + "bbox": [ + 47, + 259, + 128, + 273 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 279, + 228, + 292 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 279, + 228, + 292 + ], + "spans": [ + { + "bbox": [ + 47, + 279, + 228, + 292 + ], + "type": "text", + "content": "4.1. Training and Evaluation Datasets" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 297, + 288, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 288, + 345 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 288, + 345 + ], + "type": "text", + "content": "Human3.6M [37]. A dataset of 3D human keypoints and RGB images. The poses are limited in terms of challenging physics, focusing on common activities like walking, discussing, smoking, or taking photos." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 346, + 288, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 346, + 288, + 405 + ], + "spans": [ + { + "bbox": [ + 47, + 346, + 288, + 405 + ], + "type": "text", + "content": "RICH [35]. A dataset of videos with accurate marker-less motion-captured 3D bodies and 3D scans of scenes. The images are more natural than Human3.6M and Fit3D [20]. We consider sequences with meaningful body-ground interaction. For the list of sequences, see Sup. Mat." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 405, + 288, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 405, + 288, + 441 + ], + "spans": [ + { + "bbox": [ + 47, + 405, + 288, + 441 + ], + "type": "text", + "content": "Other datasets. Similar to [47], for training we use 3D keypoints from MPI-INF-3DHP [56] and 2D keypoints from image datasets such as COCO [53], MPII [4] and LSP [39]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 455, + 205, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 455, + 205, + 467 + ], + "spans": [ + { + "bbox": [ + 47, + 455, + 205, + 467 + ], + "type": "text", + "content": "4.1.1 MoCap Yoga (MoYo) Dataset" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 474, + 287, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 582 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 582 + ], + "type": "text", + "content": "We capture a trained Yoga professional in 200 highly complex poses (see Fig. 4) using a synchronized MoCap system, pressure mat, and a multi-view RGB video system with 8 static, calibrated cameras; for details see Sup. Mat. The dataset contains " + }, + { + "bbox": [ + 46, + 474, + 287, + 582 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 46, + 474, + 287, + 582 + ], + "type": "text", + "content": " 1.75M RGB frames in 4K resolution with ground-truth SMPL-X [63], pressure and CoM. Compared to the Fit3D [20] and PosePrior [1] datasets, MoYo is more challenging; it has extreme poses, strong self-occlusion, and significant body-ground and self-contact." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 588, + 160, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 588, + 160, + 600 + ], + "spans": [ + { + "bbox": [ + 47, + 588, + 160, + 600 + ], + "type": "text", + "content": "4.2. Evaluation Metrics" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 606, + 287, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 606, + 287, + 642 + ], + "spans": [ + { + "bbox": [ + 47, + 606, + 287, + 642 + ], + "type": "text", + "content": "We use standard 3D HPS metrics: The Mean Per-Joint Position Error (MPJPE), its Procrustes Aligned version (PA-MPJPE), and the Per-Vertex Error (PVE) [62]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 643, + 288, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 288, + 678 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 288, + 678 + ], + "type": "text", + "content": "BoS Error (BoSE). To evaluate stability, we propose a new metric called BoS Error (BoSE). Following the definition of stability (Sec. 3.3) we define:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 99, + 685, + 287, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 685, + 287, + 716 + ], + "spans": [ + { + "bbox": [ + 99, + 685, + 287, + 716 + ], + "type": "interline_equation", + "content": "\\operatorname {B o S E} = \\left\\{ \\begin{array}{l l} 1 & g (\\bar {\\mathbf {m}}) \\in \\mathcal {C} (C) \\\\ 0 & g (\\bar {\\mathbf {m}}) \\notin \\mathcal {C} (C) \\end{array} \\right. \\tag {10}", + "image_path": "79666d817312e6902c062e32118fe051184a6490d3d913e007de6861d61eedde.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "inline_equation", + "content": "\\mathcal{C}(C)" + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "content": " is the convex hull of the gravity-projected contact vertices for " + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "inline_equation", + "content": "\\tau = 10\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 72, + 547, + 121 + ], + "type": "text", + "content": ". For efficiency reasons, we formulate this computation as the solution of a convex system via interior point linear programming [3]; see Sup. Mat." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 129, + 421, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 129, + 421, + 140 + ], + "spans": [ + { + "bbox": [ + 305, + 129, + 421, + 140 + ], + "type": "text", + "content": "4.3. IPMAN Evaluation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "spans": [ + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "text", + "content": "IPMAN-R. We evaluate our regressor, IPMAN-R, on RICH and H3.6M and summarize our results in Tab. 1. We refer to our regression baseline as " + }, + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "inline_equation", + "content": "\\mathrm{HMR}^*" + }, + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "text", + "content": " which is HMR trained on the same datasets as IPMAN-R. Since we train with paired 3D datasets, we do not use HMR's discriminator during training. Both IP terms individually improve upon the baseline method. Their joint use, however, shows the largest improvement. For example, on RICH the MPJPE improves by " + }, + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "inline_equation", + "content": "3.5\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "text", + "content": " and the PVE by " + }, + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "inline_equation", + "content": "2.5\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "text", + "content": ". It is particularly interesting that IPMAN-R improves upon the baseline on H3.6M, a dataset with largely dynamic poses and little body-ground contact. We also significantly outperform (" + }, + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "inline_equation", + "content": "\\sim 12\\%" + }, + { + "bbox": [ + 304, + 147, + 547, + 481 + ], + "type": "text", + "content": ") the MPJPE of optimization approaches that use the ground plane, Zou et al. [110] (69.9 mm) and Zanfir et al. [98] (69.0 mm), on H3.6M. Some video-based methods [49, 96] achieve better MPJPE (56.7 and 52.5 resp.) on H3.6M. However, they initialize with a stronger kinematic predictor [45, 50] and require video frames as input. Further, they use heuristics to estimate body weight and non-physical residual forces to correct for contact estimation errors. In contrast, IPMAN is a single-frame method, models complex full-body pressure and does not rely on approximate body weight to compute CoM. Qualitatively, Fig. 5 (top) shows that IPMAN-R's reconstructions are more stable and contain physically-plausible body-ground contact. While HMR is not SOTA, it is simple, isolating the benefits of our new IP formulation. These terms can also be added to methods with more modern backbones and architectures." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 483, + 547, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 547, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 547, + 651 + ], + "type": "text", + "content": "IPMAN-O. Our optimization method, IPMAN-O, also improves upon the baseline optimization method, SMPLify-XMC, on all evaluation metrics (see Tab. 2). We note that adding " + }, + { + "bbox": [ + 304, + 483, + 547, + 651 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{stability}}" + }, + { + "bbox": [ + 304, + 483, + 547, + 651 + ], + "type": "text", + "content": " independently improves the PVE, but not joint metrics (PA-MPJPE, MPJPE) and BoSE. This can be explained by the dependence of our IP terms on the relative position of the mesh surface to the ground-plane. Since joint metrics do not capture surfaces, they may get worse. Similar trends on joint metrics have been reported in the context of hand-object contact [29, 79] and body-scene contact [27]. We show qualitative results in Fig. 5 (bottom). While both SMPLify-XMC [59] and IPMAN-O achieve similar image projections, another view reveals that our results are more stable and physically plausible w.r.t. the ground." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 658, + 495, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 495, + 671 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 495, + 671 + ], + "type": "text", + "content": "4.4. Pressure, CoP and CoM Evaluation" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 713 + ], + "type": "text", + "content": "We evaluate our estimated pressure, CoP and CoM against the MoYo ground truth. For pressure evaluation, we measure Intersection-over-Union (IoU) between our esti" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4718" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 72, + 545, + 187 + ], + "blocks": [ + { + "bbox": [ + 49, + 72, + 545, + 187 + ], + "lines": [ + { + "bbox": [ + 49, + 72, + 545, + 187 + ], + "spans": [ + { + "bbox": [ + 49, + 72, + 545, + 187 + ], + "type": "image", + "image_path": "6c402716af08545cafa03b79efa58ac2d10b90eb8ef6f3cef9e1f58a8a2267c9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 192, + 545, + 204 + ], + "lines": [ + { + "bbox": [ + 47, + 192, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 545, + 204 + ], + "type": "text", + "content": "Figure 4. Representative examples illustrating the variation and complexity of 3D pose and body-ground contact in our new MoYo dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 50, + 217, + 545, + 458 + ], + "blocks": [ + { + "bbox": [ + 50, + 217, + 545, + 458 + ], + "lines": [ + { + "bbox": [ + 50, + 217, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 50, + 217, + 545, + 458 + ], + "type": "image", + "image_path": "58d1086d4c8155d3fa91b263de42edfa3f0380e5e87f0fe162e963398bd979e4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 50, + 459, + 545, + 644 + ], + "blocks": [ + { + "bbox": [ + 50, + 459, + 545, + 644 + ], + "lines": [ + { + "bbox": [ + 50, + 459, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 50, + 459, + 545, + 644 + ], + "type": "image", + "image_path": "2fb8fad4c94ff37dcf3b4325caeadb041e9b131f9aaa53f972e421f697b8d6c0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 648, + 546, + 693 + ], + "lines": [ + { + "bbox": [ + 46, + 648, + 546, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 648, + 546, + 693 + ], + "type": "text", + "content": "Figure 5. Qualitative evaluation of IPMAN-R and IPMAN-O on the RICH and MoYo datasets. The first column shows the input images of a subject doing various sports poses. The second and third block of columns show the baseline's and our results, respectively. In each block, the first image shows the estimated mesh overlayed on the image (camera view), the second image shows the estimated mesh in the world frame (side view), and the last image shows the estimated pressure map with the CoM (in pink) and the CoP (in green)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4719" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 70, + 288, + 251 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 288, + 251 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 288, + 251 + ], + "type": "table", + "html": "
MethodRICHHuman3.6M
MPJPE ↓PAMPJPE ↓PVE ↓BoSE (%) ↑MPJPE ↓PAMPJPE ↓
PhysCap [74]----113.068.9
DiffPhy [21]----81.755.6
Zou et al. [110]----69.9-
Xie et al. [89]----68.1-
VIBE [45]----61.343.1
Simpoe [96]----56.741.6
D&D [49]----52.535.5
HMR [42]----88.056.8
Zanfir et al. [98]----69.0-
SPIN [47]112.271.5129.554.762.341.9
PARE [46]107.073.1125.074.4--
CLIFF [51]107.067.2122.367.681.452.1
Finetuning on Human3.6M
HMR* [42]----62.141.6
IPMAN-R (Ours)----60.7 (-1.4)41.1 (-0.5)
Finetuning on all datasets
HMR* [42]82.548.392.462.061.641.9
HMR* [42]+Lground80.947.889.966.561.941.8
HMR* [42]+Lstability81.047.5 (-0.8)90.869.661.241.9
IPMAN-R (Ours)79.0 (-3.5)47.689.9 (-2.5)71.2 (+9.2)60.6 (-1.0)41.8 (-0.1)
", + "image_path": "32f3fd9c60bb57399448f54ab7a112c103fef459dc3507084c860bd49363468b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 50, + 337, + 289, + 441 + ], + "blocks": [ + { + "bbox": [ + 50, + 337, + 289, + 441 + ], + "lines": [ + { + "bbox": [ + 50, + 337, + 289, + 441 + ], + "spans": [ + { + "bbox": [ + 50, + 337, + 289, + 441 + ], + "type": "image", + "image_path": "d0ab51e0e15b51c62cea3b840ffb57c2533d3623eb0052d2a7c56f8191cec6da.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 442, + 287, + 476 + ], + "lines": [ + { + "bbox": [ + 46, + 442, + 287, + 476 + ], + "spans": [ + { + "bbox": [ + 46, + 442, + 287, + 476 + ], + "type": "text", + "content": "Figure 6. Qualitative comparison of estimated vs the ground-truth pressure. The ground-truth CoP is shown in green and the estimated CoP is shown in yellow. Pressure heatmap colors as per Fig. 2." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 506, + 287, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 287, + 649 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 287, + 649 + ], + "type": "text", + "content": "mated and ground-truth pressure heatmaps. We also compute the CoP error as the Euclidean distance between estimated and ground-truth CoP. We obtain an IoU of 0.32 and a CoP error of " + }, + { + "bbox": [ + 46, + 506, + 287, + 649 + ], + "type": "inline_equation", + "content": "57.3\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 506, + 287, + 649 + ], + "type": "text", + "content": ". Figure 6 shows a qualitative visualization of the estimated pressure compared to the ground truth. For CoM evaluation, we find a " + }, + { + "bbox": [ + 46, + 506, + 287, + 649 + ], + "type": "inline_equation", + "content": "53.3\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 506, + 287, + 649 + ], + "type": "text", + "content": " difference between our pCoM and the CoM computed by the commercial software, Vicon Plug-in Gait. Unlike Vicon's estimate, our pCoM does not require anthropometric measurements and takes into account the full 3D body shape. For details about the evaluation protocol and comparisons with alternative CoM formulations, see Sup. Mat." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": "Physics Simulation. To evaluate stability, we run a post-hoc physics simulation in \"Bullet\" [10] and measure the displacement of the estimated meshes; a small displacement denotes a stable pose. IPMAN-O produces " + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "14.8\\%" + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": " more stable bodies than the baseline [59]; for details see Sup. Mat." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 306, + 70, + 547, + 133 + ], + "blocks": [ + { + "bbox": [ + 46, + 255, + 288, + 322 + ], + "lines": [ + { + "bbox": [ + 46, + 255, + 288, + 322 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 288, + 322 + ], + "type": "text", + "content": "Table 1. Top to Bottom: Comparisons with video-based and single-frame regression methods. IPMAN-R outperforms the single-frame baselines across all benchmarks. * indicates training hyperparameters and datasets are identical to IPMAN-R. All units are in mm except BoSE. Bold denotes best results (per category), and parentheses show improvement over the baseline. Q Zoom in" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 306, + 70, + 547, + 133 + ], + "lines": [ + { + "bbox": [ + 306, + 70, + 547, + 133 + ], + "spans": [ + { + "bbox": [ + 306, + 70, + 547, + 133 + ], + "type": "table", + "html": "
MethodMoYo
MPJPE ↓PAMPJPE ↓PVE ↓BoSE (%) ↑
SMPLify-XMC [59]75.336.516.898.0
SMPLify-XMC [59] + Lground73.336.214.598.2
SMPLify-XMC [59] + Lstability88.538.615.397.8
IPMAN-O (Ours)71.9 (-3.4)34.3 (-2.2)11.4 (-5.4)98.6 (+0.5)
", + "image_path": "8b27ff559438762f7cb859dfee21f9a5e1ddb0f0101064d751104a35e085b493.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 137, + 547, + 171 + ], + "lines": [ + { + "bbox": [ + 305, + 137, + 547, + 171 + ], + "spans": [ + { + "bbox": [ + 305, + 137, + 547, + 171 + ], + "type": "text", + "content": "Table 2. Evaluation of IPMAN-O and SMPLify-XMC [59] (optimization-based) on MoYo. Bold shows the best performance, and parentheses show the improvement over SMPLify-XMC." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 188, + 378, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 188, + 378, + 201 + ], + "spans": [ + { + "bbox": [ + 306, + 188, + 378, + 201 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 209, + 547, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 209, + 547, + 472 + ], + "spans": [ + { + "bbox": [ + 304, + 209, + 547, + 472 + ], + "type": "text", + "content": "Existing 3D HPS estimation methods recover SMPL meshes that align well with the input image, but are often physically implausible. To address this, we propose IPMAN, which incorporates intuitive-physics in 3D HPS estimation. Our IP terms encourage stable poses, promote realistic floor support, and reduce body-floor penetration. The IP terms exploit the interaction between the body CoM, CoP, and BoS - key elements used in stability analysis. To calculate the CoM of SMPL meshes, IPMAN uses on a novel formulation that takes part-specific mass contributions into account. Additionally, IPMAN estimates proxy pressure maps directly from images, which is useful in computing CoP. IPMAN is simple, differentiable, and compatible with both regression and optimization methods. IPMAN goes beyond previous physics-based methods to reason about arbitrary full-body contact with the ground. We show that IPMAN improves both regression and optimization baselines across all metrics on existing datasets and MoYo. MoYo uniquely comprises synchronized multi-view video, SMPL-X bodies in complex poses, and measurements for pressure maps and body CoM. Qualitative results show the effectiveness of IPMAN in recovering physically plausible meshes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 472, + 547, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 547, + 594 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 547, + 594 + ], + "type": "text", + "content": "While IPMAN addresses body-floor contact, future work should incorporate general body-scene contact and diverse supporting surfaces by integrating 3D scene reconstruction. In this work, the proposed IP terms are designed to help static poses and we show that they do not hurt dynamic poses. However, the large body of biomechanical literature analyzing dynamic poses could be leveraged for activities like walking, jogging, running, etc. It would be interesting to extend IPMAN beyond single-person scenarios by exploiting the various physical constraints offered by multiple subjects." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 605, + 547, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 700 + ], + "type": "text", + "content": "Acknowledgements. We thank T. Alexiadis, T. McConnell, C. Gallatz, M. Höschle, S. Polikovsky, C. Mendoza, Y. Fincan, L. Sanchez and M. Safroshkin for data collection, G. Becherini for MoSh++, Z. Fang, V. Choutas and all of Perceiving Systems for fruitful discussions. This work was funded by the International Max Planck Research School for Intelligent Systems (IMPRS-IS) and in part by the German Federal Ministry of Education and Research (BMBF), Tübingen AI Center, FKZ: 01IS18039B." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 702, + 541, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 702, + 541, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 702, + 541, + 713 + ], + "type": "text", + "content": "Disclosure. https://files.is.tue.mpg.de/black/CoI_CVPR_2023.txt" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4720" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 58, + 91, + 287, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 91, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 58, + 91, + 287, + 133 + ], + "type": "text", + "content": "[1] Ijaz Akhter and Michael J. Black. Pose-conditioned joint angle limits for 3D human pose reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 1446-1455, 2015. 3, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 136, + 288, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 136, + 288, + 178 + ], + "spans": [ + { + "bbox": [ + 58, + 136, + 288, + 178 + ], + "type": "text", + "content": "[2] Riza Alp Güler, Natalia Neverova, and Iasonas Kokkinos. DensePose: Dense human pose estimation in the wild. In Computer Vision and Pattern Recognition (CVPR), pages 7297-7306, 2018. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 58, + 180, + 288, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 180, + 288, + 223 + ], + "spans": [ + { + "bbox": [ + 58, + 180, + 288, + 223 + ], + "type": "text", + "content": "[3] Erling D. Andersen and Knud D. Andersen. The Mosek interior point optimizer for linear programming: An implementation of the homogeneous algorithm. In High Performance Optimization, 2000. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 224, + 287, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 224, + 287, + 267 + ], + "spans": [ + { + "bbox": [ + 58, + 224, + 287, + 267 + ], + "type": "text", + "content": "[4] Mykhaylo Andriluka, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2D human pose estimation: New benchmark and state of the art analysis. In Computer Vision and Pattern Recognition (CVPR), pages 3686-3693, 2014. 5, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 270, + 287, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 270, + 287, + 312 + ], + "spans": [ + { + "bbox": [ + 58, + 270, + 287, + 312 + ], + "type": "text", + "content": "[5] Dragomir Anguelov, Praveen Srinivasan, Daphne Koller, Sebastian Thrun, Jim Rodgers, and James Davis. SCAPE: Shape completion and animation of people. Transactions on Graphics (TOG), 24:408-416, 2005. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 314, + 287, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 314, + 287, + 356 + ], + "spans": [ + { + "bbox": [ + 58, + 314, + 287, + 356 + ], + "type": "text", + "content": "[6] Michael Barnett-Cowan, Roland W. Fleming, Manish Singh, and Heinrich H. Bulthoff. Perceived object stability depends on multisensory estimates of gravity. PLOS ONE, 6(4):1-5, 2011. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 358, + 287, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 358, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 58, + 358, + 287, + 412 + ], + "type": "text", + "content": "[7] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J. Black. Keep it SMPL: Automatic estimation of 3D human pose and shape from a single image. In European Conference on Computer Vision (ECCV), volume 9909, pages 561-578, 2016. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 58, + 415, + 287, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 415, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 58, + 415, + 287, + 456 + ], + "type": "text", + "content": "[8] Marcus A. Brubaker, David J. Fleet, and Aaron Hertzmann. Physics-based person tracking using the anthropomorphic walker. International Journal of Computer Vision (IJCV), 87(1-2):140-155, 2010. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 58, + 458, + 287, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 458, + 287, + 490 + ], + "spans": [ + { + "bbox": [ + 58, + 458, + 287, + 490 + ], + "type": "text", + "content": "[9] Marcus A. Brubaker, Leonid Sigal, and David J. Fleet. Estimating contact dynamics. In Computer Vision and Pattern Recognition (CVPR), pages 2389-2396, 2009. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 492, + 287, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 492, + 287, + 512 + ], + "spans": [ + { + "bbox": [ + 53, + 492, + 287, + 512 + ], + "type": "text", + "content": "[10] Bullet real-time physics simulation. https://pybullet.org.1,8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 514, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 287, + 567 + ], + "type": "text", + "content": "[11] Zhe Cao, Gines Hidalgo, Tomas Simon, Shih-En Wei, and Yaser Sheikh. OpenPose: Realtime multi-person 2D pose estimation using part affinity fields. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 43(1):172–186, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 570, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 570, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 53, + 570, + 287, + 601 + ], + "type": "text", + "content": "[12] Yixin Chen, Sai Kumar Dwivedi, Michael J. Black, and Dimitrios Tzionas. Detecting human-object contact in images. June 2023. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 603, + 287, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 603, + 287, + 657 + ], + "spans": [ + { + "bbox": [ + 53, + 603, + 287, + 657 + ], + "type": "text", + "content": "[13] Vasileios Choutas, Georgios Pavlakos, Timo Bolkart, Dimitrios Tzionas, and Michael J. Black. Monocular expressive body regression through body-driven attention. In European Conference on Computer Vision (ECCV), volume 12355, pages 20-40, 2020. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 53, + 658, + 287, + 712 + ], + "type": "text", + "content": "[14] Henry M. Clever, Zackory M. Erickson, Ariel Kapusta, Greg Turk, C. Karen Liu, and Charles C. Kemp. Bodies at rest: 3D human pose and shape estimation from a pressure image using synthetic data. In Computer Vision and Pattern Recognition (CVPR), pages 6214-6223, 2020. 3, 4" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 312, + 73, + 545, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 73, + 545, + 126 + ], + "spans": [ + { + "bbox": [ + 312, + 73, + 545, + 126 + ], + "type": "text", + "content": "[15] Enric Corona, Albert Pumarola, Guillem Alenyà, Gerard Pons-Moll, and Francesc Moreno-Noguer. SMPLicit: Topology-aware generative model for clothed people. In Computer Vision and Pattern Recognition (CVPR), pages 11875-11885, 2021. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 130, + 545, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 130, + 545, + 193 + ], + "spans": [ + { + "bbox": [ + 312, + 130, + 545, + 193 + ], + "type": "text", + "content": "[16] Taosha Fan, Kalyan Vasudev Alwala, Donglai Xiang, Weipeng Xu, Todd Murphey, and Mustafa Mukadam. Revitalizing optimization for 3D human pose and shape estimation: A sparse constrained formulation. In International Conference on Computer Vision (ICCV), pages 11437-11446, 2021. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 197, + 545, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 197, + 545, + 250 + ], + "spans": [ + { + "bbox": [ + 312, + 197, + 545, + 250 + ], + "type": "text", + "content": "[17] Zicong Fan, Omid Taheri, Dimitrios Tzionas, Muhammed Kocabas, Manuel Kaufmann, Michael J. Black, and Otmar Hilliges. ARCTIC: A dataset for dexterous bimanual hand-object manipulation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 253, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 253, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 312, + 253, + 545, + 296 + ], + "type": "text", + "content": "[18] Yao Feng, Vasileios Choutas, Timo Bolkart, Dimitrios Tzionas, and Michael J. Black. Collaborative regression of expressive bodies using moderation. In International Conference on 3D Vision (3DV), pages 792-804, 2021. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 298, + 545, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 298, + 545, + 362 + ], + "spans": [ + { + "bbox": [ + 312, + 298, + 545, + 362 + ], + "type": "text", + "content": "[19] Mihai Fieraru, Mihai Zanfir, Teodor Alexandru Szente, Eduard Gabriel Bazavan, Vlad Olaru, and Cristian Sminchisescu. REMIPS: Physically consistent 3D reconstruction of multiple interacting people under weak supervision. In Conference on Neural Information Processing Systems (NeurIPS), volume 34, 2021. 3, 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 365, + 545, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 365, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 312, + 365, + 545, + 418 + ], + "type": "text", + "content": "[20] Mihai Fieraru, Mihai Zanfir, Silviu-Cristian Pirlea, Vlad Olaru, and Cristian Sminchisescu. AIfit: Automatic 3D human-interpretable feedback models for fitness training. In Computer Vision and Pattern Recognition (CVPR), pages 9919–9928, 2021. 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 422, + 545, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 422, + 545, + 474 + ], + "spans": [ + { + "bbox": [ + 312, + 422, + 545, + 474 + ], + "type": "text", + "content": "[21] Erik Gartner, Mykhaylo Andriluka, Erwin Coumans, and Cristian Sminchisescu. Differentiable dynamics for articulated 3D human motion reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 13180-13190, 2022. 3, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 478, + 545, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 478, + 545, + 530 + ], + "spans": [ + { + "bbox": [ + 312, + 478, + 545, + 530 + ], + "type": "text", + "content": "[22] Erik Gartner, Mykhaylo Andriluka, Hongyi Xu, and Cristian Sminchisescu. Trajectory optimization for physics-based reconstruction of 3D human pose from monocular video. In Computer Vision and Pattern Recognition (CVPR), pages 13096-13105, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 534, + 545, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 534, + 545, + 577 + ], + "spans": [ + { + "bbox": [ + 312, + 534, + 545, + 577 + ], + "type": "text", + "content": "[23] Ke Gong, Yiming Gao, Xiaodan Liang, Xiaohui Shen, Meng Wang, and Liang Lin. Graphonomy: Universal human parsing via graph transfer learning. In Computer Vision and Pattern Recognition (CVPR), pages 7450-7459, 2019. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 579, + 545, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 579, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 312, + 579, + 545, + 622 + ], + "type": "text", + "content": "[24] Shanyan Guan, Jingwei Xu, Yunbo Wang, Bingbing Ni, and Xiaokang Yang. Bilevel online adaptation for out-of-domain human mesh reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 10472-10481, 2021. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 624, + 545, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 624, + 545, + 666 + ], + "spans": [ + { + "bbox": [ + 312, + 624, + 545, + 666 + ], + "type": "text", + "content": "[25] Riza Alp Güler and Iasonas Kokkinos. HoloPose: Holistic 3D human reconstruction in-the-wild. In Computer Vision and Pattern Recognition (CVPR), pages 10876-10886, 2019. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 670, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 670, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 312, + 670, + 545, + 712 + ], + "type": "text", + "content": "[26] Shreyas Hampali, Mahdi Rad, Markus Oberweger, and Vincent Lepetit. HOnnotate: A method for 3D annotation of hand and object poses. In Computer Vision and Pattern Recognition (CVPR), pages 3193-3203, 2020. 5" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 314, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 314, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 314, + 766 + ], + "type": "text", + "content": "4721" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 73, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 73, + 287, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 73, + 287, + 117 + ], + "spans": [ + { + "bbox": [ + 53, + 73, + 287, + 117 + ], + "type": "text", + "content": "[27] Mohamed Hassan, Vasileios Choutas, Dimitrios Tzionas, and Michael J. Black. Resolving 3D human pose ambiguities with 3D scene constraints. In International Conference on Computer Vision (ICCV), pages 2282-2292, 2019. 3, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 118, + 288, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 118, + 288, + 163 + ], + "spans": [ + { + "bbox": [ + 53, + 118, + 288, + 163 + ], + "type": "text", + "content": "[28] Mohamed Hassan, Partha Ghosh, Joachim Tesch, Dimitrios Tzionas, and Michael J. Black. Populating 3D scenes by learning human-scene interaction. In Computer Vision and Pattern Recognition (CVPR), pages 14708-14718, 2021. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 164, + 288, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 164, + 288, + 218 + ], + "spans": [ + { + "bbox": [ + 53, + 164, + 288, + 218 + ], + "type": "text", + "content": "[29] Yana Hasson, Gül Varol, Dimitrios Tzionas, Igor Kalevatykh, Michael J. Black, Ivan Laptev, and Cordelia Schmid. Learning joint reconstruction of hands and manipulated objects. In Computer Vision and Pattern Recognition (CVPR), pages 11807-11816, 2019. 5, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 220, + 288, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 220, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 53, + 220, + 288, + 251 + ], + "type": "text", + "content": "[30] Havok: Customizable, fully multithreaded, and highly optimized physics simulation. http://www.havok.com. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 254, + 288, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 254, + 288, + 308 + ], + "spans": [ + { + "bbox": [ + 53, + 254, + 288, + 308 + ], + "type": "text", + "content": "[31] Eric Heiden, David Millard, Erwin Coumans, Yizhou Sheng, and Gaurav S. Sukhatme. NeuralSim: Augmenting differentiable simulators with neural networks. In International Conference on Robotics and Automation (ICRA), pages 9474-9481, 2021. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 309, + 288, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 309, + 288, + 342 + ], + "spans": [ + { + "bbox": [ + 53, + 309, + 288, + 342 + ], + "type": "text", + "content": "[32] At L. Hof. The equations of motion for a standing human reveal three mechanisms for balance. Journal of Biomechanics, 40(2):451-457, 2007. 2, 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 343, + 288, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 343, + 288, + 376 + ], + "spans": [ + { + "bbox": [ + 53, + 343, + 288, + 376 + ], + "type": "text", + "content": "[33] At L. Hof. The \"extrapolated center of mass\" concept suggests a simple control of balance in walking. Human movement science, 27(1):112-125, 2008. 2, 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 377, + 288, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 377, + 288, + 409 + ], + "spans": [ + { + "bbox": [ + 53, + 377, + 288, + 409 + ], + "type": "text", + "content": "[34] At L. Hof, M. G. J. Gazendam, and Sinke W. E. The condition for dynamic stability. Journal of Biomechanics, 38(1):1-8, 2005. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 411, + 288, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 411, + 288, + 476 + ], + "spans": [ + { + "bbox": [ + 53, + 411, + 288, + 476 + ], + "type": "text", + "content": "[35] Chun-Hao Huang, Hongwei Yi, Markus Höschle, Matvey Safroshkin, Tsvetelina Alexiadis, Senya Polikovsky, Daniel Scharstein, and Michael Black. Capturing and inferring dense full-body human-scene contact. In Computer Vision and Pattern Recognition (CVPR), pages 13264-13275, 2022. 2, 3, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 478, + 288, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 478, + 288, + 511 + ], + "spans": [ + { + "bbox": [ + 53, + 478, + 288, + 511 + ], + "type": "text", + "content": "[36] Leslie Ikemoto, Okan Arikan, and David Forsyth. Knowing when to put your foot down. In Symposium on Interactive 3D Graphics (SI3D), page 49-53, 2006. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 513, + 288, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 513, + 288, + 567 + ], + "spans": [ + { + "bbox": [ + 53, + 513, + 288, + 567 + ], + "type": "text", + "content": "[37] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6M: Large scale datasets and predictive methods for 3D human sensing in natural environments. Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 36(7):1325-1339, 2014. 2, 5, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 568, + 288, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 568, + 288, + 622 + ], + "spans": [ + { + "bbox": [ + 53, + 568, + 288, + 622 + ], + "type": "text", + "content": "[38] Wen Jiang, Nikos Kolotouros, Georgios Pavlakos, Xiaowei Zhou, and Kostas Daniilidis. Coherent reconstruction of multiple humans from a single image. In Computer Vision and Pattern Recognition (CVPR), pages 5578-5587, 2020. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 624, + 288, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 624, + 288, + 667 + ], + "spans": [ + { + "bbox": [ + 53, + 624, + 288, + 667 + ], + "type": "text", + "content": "[39] Sam Johnson and Mark Everingham. Clustered pose and nonlinear appearance models for human pose estimation. In British Machine Vision Conference (BMVC), pages 1-11, 2010. 5, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 670, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 670, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 670, + 288, + 713 + ], + "type": "text", + "content": "[40] Hanbyul Joo, Natalia Neverova, and Andrea Vedaldi. Exemplar fine-tuning for 3D human pose fitting towards in-the-wild 3D human pose estimation. In International Conference on 3D Vision (3DV), pages 42-52, 2021. 3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 312, + 73, + 547, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 73, + 547, + 117 + ], + "spans": [ + { + "bbox": [ + 312, + 73, + 547, + 117 + ], + "type": "text", + "content": "[41] Hanbyul Joo, Tomas Simon, and Yaser Sheikh. Total capture: A 3D deformation model for tracking faces, hands, and bodies. In Computer Vision and Pattern Recognition (CVPR), pages 8320-8329, 2018. 2, 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 118, + 547, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 118, + 547, + 161 + ], + "spans": [ + { + "bbox": [ + 312, + 118, + 547, + 161 + ], + "type": "text", + "content": "[42] Angjoo Kanazawa, Michael J. Black, David W. Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In Computer Vision and Pattern Recognition (CVPR), pages 7122-7131, 2018. 3, 5, 8" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 162, + 547, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 162, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 312, + 162, + 547, + 205 + ], + "type": "text", + "content": "[43] Angjoo Kanazawa, Jason Y. Zhang, Panna Felsen, and Jitendra Malik. Learning 3D human dynamics from video. Computer Vision and Pattern Recognition (CVPR), pages 5607-5616, 2019. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 206, + 547, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 206, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 312, + 206, + 547, + 239 + ], + "type": "text", + "content": "[44] Rawal Khirodkar, Shashank Tripathi, and Kris Kitani. Occluded human mesh recovery. In Computer Vision and Pattern Recognition (CVPR), pages 1705-1715, 2022. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 239, + 547, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 239, + 547, + 282 + ], + "spans": [ + { + "bbox": [ + 312, + 239, + 547, + 282 + ], + "type": "text", + "content": "[45] Muhammed Kocabas, Nikos Athanasiou, and Michael J. Black. VIBE: Video inference for human body pose and shape estimation. In Computer Vision and Pattern Recognition (CVPR), pages 5252-5262, 2020. 3, 6, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 284, + 547, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 284, + 547, + 327 + ], + "spans": [ + { + "bbox": [ + 312, + 284, + 547, + 327 + ], + "type": "text", + "content": "[46] Muhammed Kocabas, Chun-Hao P. Huang, Otmar Hilliges, and Michael J. Black. PARE: Part attention regressor for 3D human body estimation. In International Conference on Computer Vision (ICCV), pages 11127-11137, 2021. 1, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 327, + 547, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 327, + 547, + 381 + ], + "spans": [ + { + "bbox": [ + 312, + 327, + 547, + 381 + ], + "type": "text", + "content": "[47] Nikos Kolotouros, Georgios Pavlakos, Michael J. Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In International Conference on Computer Vision (ICCV), pages 2252-2261, 2019. 3, 5, 6, 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 383, + 547, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 383, + 547, + 426 + ], + "spans": [ + { + "bbox": [ + 312, + 383, + 547, + 426 + ], + "type": "text", + "content": "[48] Nikos Kolotouros, Georgios Pavlakos, and Kostas Dani-ilidis. Convolutional mesh regression for single-image human shape reconstruction. In Computer Vision and Pattern Recognition (CVPR), pages 4496–4505, 2019. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 426, + 547, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 426, + 547, + 470 + ], + "spans": [ + { + "bbox": [ + 312, + 426, + 547, + 470 + ], + "type": "text", + "content": "[49] Jiefeng Li, Siyuan Bian, Chao Xu, Gang Liu, Gang Yu, and Cewu Lu. D&D: Learning human dynamics from dynamic camera. In European Conference on Computer Vision (ECCV), 2022. 3, 6, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 472, + 547, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 472, + 547, + 525 + ], + "spans": [ + { + "bbox": [ + 312, + 472, + 547, + 525 + ], + "type": "text", + "content": "[50] Jiefeng Li, Chao Xu, Zhicun Chen, Siyuan Bian, Lixin Yang, and Cewu Lu. HybrIK: A hybrid analytical-neural inverse kinematics solution for 3D human pose and shape estimation. In Computer Vision and Pattern Recognition (CVPR), pages 3383-3393, 2021. 3, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 526, + 547, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 526, + 547, + 569 + ], + "spans": [ + { + "bbox": [ + 312, + 526, + 547, + 569 + ], + "type": "text", + "content": "[51] Zhihao Li, Jianzhuang Liu, Zhensong Zhang, Songcen Xu, and Youliang Yan. CLIFF: Carrying location information in full frames into human pose and shape estimation. In ECCV, volume 13665, pages 590-606, 2022. 1, 3, 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 571, + 547, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 571, + 547, + 613 + ], + "spans": [ + { + "bbox": [ + 312, + 571, + 547, + 613 + ], + "type": "text", + "content": "[52] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In Computer Vision and Pattern Recognition (CVPR), pages 1954-1963, 2021. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 312, + 614, + 547, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 614, + 547, + 669 + ], + "spans": [ + { + "bbox": [ + 312, + 614, + 547, + 669 + ], + "type": "text", + "content": "[53] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólár, and C. Lawrence Zitnick. Microsoft COCO: Common objects in context. In European Conference on Computer Vision (ECCV), volume 8693, pages 740-755, 2014. 5, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 312, + 670, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 670, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 312, + 670, + 547, + 713 + ], + "type": "text", + "content": "[54] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multi-person linear model. Transactions on Graphics (TOG), 34(6):248:1-248:16, 2015. 2, 3" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4722" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 287, + 127 + ], + "type": "text", + "content": "[55] Yiyue Luo, Yunzhu Li, Michael Foshey, Wan Shou, Pratyusha Sharma, Tomás Palacios, Antonio Torralba, and Wojciech Matusik. Intelligent carpet: Inferring 3D human pose from tactile signals. In Computer Vision and Pattern Recognition (CVPR), pages 11255-11265, 2021. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 128, + 288, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 128, + 288, + 183 + ], + "spans": [ + { + "bbox": [ + 53, + 128, + 288, + 183 + ], + "type": "text", + "content": "[56] Dushyant Mehta, Helge Rhodin, Dan Casas, Pascal V. Fua, Oleksandr Sotnychenko, Weipeng Xu, and Christian Theobalt. Monocular 3D human pose estimation in the wild using improved CNN supervision. International Conference on 3D Vision (3DV), pages 506-516, 2017. 3, 5, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 184, + 288, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 184, + 288, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 184, + 288, + 247 + ], + "type": "text", + "content": "[57] Dushyant Mehta, Srinath Sridhar, Oleksandr Sotnychenko, Helge Rhodin, Mohammad Shafiei, Hans-Peter Seidel, Weipeng Xu, Dan Casas, and Christian Theobalt. VNect: Real-time 3D human pose estimation with a single RGB camera. Transactions on Graphics (TOG), 36(4):44:1-44:14, 2017. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 249, + 288, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 249, + 288, + 304 + ], + "spans": [ + { + "bbox": [ + 53, + 249, + 288, + 304 + ], + "type": "text", + "content": "[58] Gyeongsik Moon and Kyoung Mu Lee. I2L-MeshNet: Image-to-lixel prediction network for accurate 3D human pose and mesh estimation from a single RGB image. In European Conference on Computer Vision (ECCV), volume 12352, pages 752-768, 2020. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 304, + 288, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 304, + 288, + 348 + ], + "spans": [ + { + "bbox": [ + 53, + 304, + 288, + 348 + ], + "type": "text", + "content": "[59] Lea Müller, Ahmed A. A. Osman, Siyu Tang, Chun-Hao P. Huang, and Michael J. Black. On self-contact and human pose. In Computer Vision and Pattern Recognition (CVPR), pages 9990-9999, 2021. 1, 2, 3, 4, 5, 6, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 349, + 288, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 349, + 288, + 381 + ], + "spans": [ + { + "bbox": [ + 53, + 349, + 288, + 381 + ], + "type": "text", + "content": "[60] NVIDIA PhysX: A scalable multi-platform physics simulation solution. https://developer.nvidia.com/physx-sdk.1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 382, + 288, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 382, + 288, + 414 + ], + "spans": [ + { + "bbox": [ + 53, + 382, + 288, + 414 + ], + "type": "text", + "content": "[61] Yi-Chung Pai. Movement termination and stability in standing. Exercise and sport sciences reviews, 31(1):19-25, 2003. 2,4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 415, + 288, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 415, + 288, + 470 + ], + "spans": [ + { + "bbox": [ + 53, + 415, + 288, + 470 + ], + "type": "text", + "content": "[62] Priyanka Patel, Chun-Hao P Huang, Joachim Tesch, David T Hoffmann, Shashank Tripathi, and Michael J Black. AGORA: Avatars in geography optimized for regression analysis. In Computer Vision and Pattern Recognition (CVPR), pages 13468-13478, 2021. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 471, + 288, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 471, + 288, + 535 + ], + "spans": [ + { + "bbox": [ + 53, + 471, + 288, + 535 + ], + "type": "text", + "content": "[63] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed A. A. Osman, Dimitrios Tzionas, and Michael J. Black. Expressive body capture: 3D hands, face, and body from a single image. In Computer Vision and Pattern Recognition (CVPR), pages 10975-10985, 2019. 2, 3, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 536, + 288, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 536, + 288, + 581 + ], + "spans": [ + { + "bbox": [ + 53, + 536, + 288, + 581 + ], + "type": "text", + "content": "[64] Xue Bin Peng, Pieter Abbeel, Sergey Levine, and Michiel van de Panne. DeepMimic: Example-guided deep reinforcement learning of physics-based character skills. Transactions on Graphics (TOG), 37(4):1-14, 2018. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 582, + 288, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 582, + 288, + 635 + ], + "spans": [ + { + "bbox": [ + 53, + 582, + 288, + 635 + ], + "type": "text", + "content": "[65] Davis Rempe, Tolga Birdal, Aaron Hertzmann, Jimei Yang, Srinath Sridhar, and Leonidas J. Guibas. HuMoR: 3D human motion model for robust pose estimation. In International Conference on Computer Vision (ICCV), pages 11468-11479, 2021. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 636, + 288, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 636, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 53, + 636, + 288, + 690 + ], + "type": "text", + "content": "[66] Davis Rempe, Leonidas J. Guibas, Aaron Hertzmann, Bryan Russell, Ruben Villegas, and Jamei Yang. Contact and human dynamics from monocular video. In European Conference on Computer Vision (ECCV), volume 12350, pages 71-87, 2020. 1, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 691, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 691, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 691, + 288, + 713 + ], + "type": "text", + "content": "[67] Ralph Tyrell Rockafellar. Convex analysis. Princeton university press, 2015. 4" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 72, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 312, + 72, + 547, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 72, + 547, + 117 + ], + "spans": [ + { + "bbox": [ + 312, + 72, + 547, + 117 + ], + "type": "text", + "content": "[68] Grégory Rogez, James Steven Supancic, and Deva Ramanan. Understanding everyday hands in action from RGB-D images. In International Conference on Computer Vision (ICCV), pages 3889-3897, 2015. 2, 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 312, + 118, + 547, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 118, + 547, + 172 + ], + "spans": [ + { + "bbox": [ + 312, + 118, + 547, + 172 + ], + "type": "text", + "content": "[69] Yu Rong, Takaaki Shiratori, and Hanbyul Joo. FrankMocap: A monocular 3D whole-body pose estimation system via regression and integration. In International Conference on Computer Vision Workshops (ICCVw), pages 1749-1759, 2021. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 174, + 547, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 174, + 547, + 217 + ], + "spans": [ + { + "bbox": [ + 312, + 174, + 547, + 217 + ], + "type": "text", + "content": "[70] Nadine Rueegg, Shashank Tripathi, Konrad Schindler, Michael J. Black, and Silvia Zuffi. BITE: Beyond priors for improved three-D dog pose estimation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 219, + 547, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 219, + 547, + 274 + ], + "spans": [ + { + "bbox": [ + 312, + 219, + 547, + 274 + ], + "type": "text", + "content": "[71] Jesse Scott, Bharadwaj Ravichandran, Christopher Funk, Robert T Collins, and Yanxi Liu. From image to stability: Learning dynamics from human pose. In European Conference on Computer Vision (ECCV), volume 12368, pages 536-554, 2020. 2, 3, 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 275, + 547, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 275, + 547, + 330 + ], + "spans": [ + { + "bbox": [ + 312, + 275, + 547, + 330 + ], + "type": "text", + "content": "[72] Mingyi Shi, Kfir Aberman, Andreas Aristidou, Taku Komura, Dani Lischinski, Daniel Cohen-Or, and Baoquan Chen. MotioNet: 3D human motion reconstruction from monocular video with skeleton consistency. Transactions on Graphics (TOG), 40(1):1:1-1:15, 2021. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 331, + 546, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 331, + 546, + 375 + ], + "spans": [ + { + "bbox": [ + 312, + 331, + 546, + 375 + ], + "type": "text", + "content": "[73] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, Patrick Pérez, and Christian Theobalt. Neural monocular 3D human motion capture with physical awareness. Transactions on Graphics (TOG), 40(4), 2021. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 376, + 547, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 376, + 547, + 421 + ], + "spans": [ + { + "bbox": [ + 312, + 376, + 547, + 421 + ], + "type": "text", + "content": "[74] Soshi Shimada, Vladislav Golyanik, Weipeng Xu, and Christian Theobalt. PhysCap: Physically plausible monocular 3D motion capture in real time. Transactions on Graphics (TOG), 39(6):235:1-235:16, 2020. 1, 2, 3, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 422, + 546, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 422, + 546, + 465 + ], + "spans": [ + { + "bbox": [ + 312, + 422, + 546, + 465 + ], + "type": "text", + "content": "[75] Yu Sun, Qian Bao, Wu Liu, Yili Fu, Michael J. Black, and Tao Mei. Monocular, one-stage, regression of multiple 3D people. In International Conference on Computer Vision (ICCV), pages 11179-11188, 2021. 1, 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 467, + 547, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 467, + 547, + 520 + ], + "spans": [ + { + "bbox": [ + 312, + 467, + 547, + 520 + ], + "type": "text", + "content": "[76] Yu Sun, Yun Ye, Wu Liu, Wenpeng Gao, Yili Fu, and Tao Mei. Human mesh recovery from monocular images via a skeleton-disentangled representation. In International Conference on Computer Vision (ICCV), pages 5348-5357, 2019. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 522, + 547, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 522, + 547, + 555 + ], + "spans": [ + { + "bbox": [ + 312, + 522, + 547, + 555 + ], + "type": "text", + "content": "[77] Yating Tian, Hongwen Zhang, Yebin Liu, and limin Wang. Recovering 3D human mesh from monocular images: A survey. arXiv:2203.01923, 2022. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 312, + 557, + 547, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 557, + 547, + 610 + ], + "spans": [ + { + "bbox": [ + 312, + 557, + 547, + 610 + ], + "type": "text", + "content": "[78] Shashank Tripathi, Siddhant Ranade, Ambrish Tyagi, and Amit K. Agrawal. PoseNet3D: Learning temporally consistent 3D human pose via knowledge distillation. In International Conference on 3D Vision (3DV), pages 311-321, 2020. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 312, + 613, + 547, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 613, + 547, + 667 + ], + "spans": [ + { + "bbox": [ + 312, + 613, + 547, + 667 + ], + "type": "text", + "content": "[79] Dimitrios Tzionas, Luca Ballan, Abhilash Srikantha, Pablo Aponte, Marc Pollefeys, and Juergen Gall. Capturing hands in action using discriminative salient points and physics simulation. International Journal of Computer Vision (IJCV), 118:172-193, 2016. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 312, + 669, + 546, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 669, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 312, + 669, + 546, + 713 + ], + "type": "text", + "content": "[80] Ruben Villegas, Duygu Ceylan, Aaron Hertzmann, Jimei Yang, and Jun Saito. Contact-aware retargeting of skinned motion. In International Conference on Computer Vision (ICCV), pages 9720-9729, 2021. 3" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4723" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 72, + 288, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 72, + 288, + 127 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 288, + 127 + ], + "type": "text", + "content": "[81] Timo von Marcard, Roberto Henschel, Michael J. Black, Bodo Rosenhahn, and Gerard Pons-Moll. Recovering accurate 3D human pose in the wild using IMUs and a moving camera. In European Conference on Computer Vision (ECCV), volume 11214, pages 614-631, 2018. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 129, + 288, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 129, + 288, + 172 + ], + "spans": [ + { + "bbox": [ + 53, + 129, + 288, + 172 + ], + "type": "text", + "content": "[82] Marek Vondrak, Leonid Sigal, and Odest Chadwicke Jenkins. Physical simulation for probabilistic motion tracking. In Computer Vision and Pattern Recognition (CVPR), pages 1-8, 2008. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 174, + 288, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 174, + 288, + 217 + ], + "spans": [ + { + "bbox": [ + 53, + 174, + 288, + 217 + ], + "type": "text", + "content": "[83] Eric W. Weisstein. Triangle point picking. https://mathworld.wolfram.com/TrianglePointPicking.html, 2014. From MathWorld - A Wolfram Web Resource. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 219, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 219, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 53, + 219, + 288, + 262 + ], + "type": "text", + "content": "[84] Zhenzhen Weng and Serena Yeung. Holistic 3D human and scene mesh estimation from single view images. In Computer Vision and Pattern Recognition (CVPR), pages 334-343, 2020. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 264, + 288, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 264, + 288, + 297 + ], + "spans": [ + { + "bbox": [ + 53, + 264, + 288, + 297 + ], + "type": "text", + "content": "[85] David A. Winter. A.B.C. (Anatomy, Biomechanics and Control) of balance during standing and walking. Waterloo Biomechanics, 1995. 2, 4, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 298, + 288, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 298, + 288, + 330 + ], + "spans": [ + { + "bbox": [ + 53, + 298, + 288, + 330 + ], + "type": "text", + "content": "[86] David A. Winter. Human balance and posture control during standing and walking. Gait & Posture, 3(4):193-214, 1995. 2, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 332, + 288, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 332, + 288, + 376 + ], + "spans": [ + { + "bbox": [ + 53, + 332, + 288, + 376 + ], + "type": "text", + "content": "[87] Donglai Xiang, Hanbyul Joo, and Yaser Sheikh. Monocular total capture: Posing face, body, and hands in the wild. In Computer Vision and Pattern Recognition (CVPR), pages 10957-10966, 2019. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 377, + 288, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 377, + 288, + 431 + ], + "spans": [ + { + "bbox": [ + 53, + 377, + 288, + 431 + ], + "type": "text", + "content": "[88] Donglai Xiang, Fabian Prada, Chenglei Wu, and Jessica Hodgins. MonoClothCap: Towards temporally coherent clothing capture from monocular RGB video. In International Conference on 3D Vision (3DV), pages 322-332, 2020. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 434, + 288, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 434, + 288, + 487 + ], + "spans": [ + { + "bbox": [ + 53, + 434, + 288, + 487 + ], + "type": "text", + "content": "[89] Kevin Xie, Tingwu Wang, Umar Iqbal, Yunrong Guo, Sanja Fidler, and Florian Shkurti. Physics-based human motion estimation and synthesis from videos. In International Conference on Computer Vision (ICCV), pages 11532-11541, 2021. 3, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 489, + 288, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 489, + 288, + 533 + ], + "spans": [ + { + "bbox": [ + 53, + 489, + 288, + 533 + ], + "type": "text", + "content": "[90] Xianghui Xie, Bharat Lal Bhatnagar, and Gerard Pons-Moll. CHORE: Contact, human and object reconstruction from a single RGB image. In European Conference on Computer Vision (ECCV), 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 534, + 288, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 534, + 288, + 588 + ], + "spans": [ + { + "bbox": [ + 53, + 534, + 288, + 588 + ], + "type": "text", + "content": "[91] Yuliang Xiu, Jinlong Yang, Xu Cao, Dimitrios Tzionas, and Michael J. Black. ECON: Explicit Clothed humans Optimized via Normal Integration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 590, + 288, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 590, + 288, + 645 + ], + "spans": [ + { + "bbox": [ + 53, + 590, + 288, + 645 + ], + "type": "text", + "content": "[92] Hongyi Xu, Eduard Gabriel Bazavan, Andrei Zanfir, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. GHUM & GHUML: Generative 3D human shape and articulated pose models. In Computer Vision and Pattern Recognition (CVPR), pages 6183-6192, 2020. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 647, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 647, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 53, + 647, + 288, + 689 + ], + "type": "text", + "content": "[93] Masanobu Yamamoto and Katsutoshi Yagishita. Scene constraints-aided tracking of human body. In Computer Vision and Pattern Recognition (CVPR), pages 151–156, 2000. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 691, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 691, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 53, + 691, + 288, + 713 + ], + "type": "text", + "content": "[94] Hongwei Yi, Chun-Hao P. Huang, Shashank Tripathi, Lea Hering, Justus Thies, and Michael J. Black. MIME: Human-" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 73, + 545, + 95 + ], + "type": "text", + "content": "aware 3D scene generation. In Computer Vision and Pattern Recognition (CVPR), June 2023. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 96, + 547, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 96, + 547, + 129 + ], + "spans": [ + { + "bbox": [ + 312, + 96, + 547, + 129 + ], + "type": "text", + "content": "[95] Ye Yuan and Kris Kitani. 3D ego-pose estimation via imitation learning. In European Conference on Computer Vision (ECCV), volume 11220, pages 735–750, 2018. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 130, + 547, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 130, + 547, + 174 + ], + "spans": [ + { + "bbox": [ + 312, + 130, + 547, + 174 + ], + "type": "text", + "content": "[96] Ye Yuan, Shih-En Wei, Tomas Simon, Kris Kitani, and Jason Saragih. SimPoE: Simulated character control for 3D human pose estimation. In Computer Vision and Pattern Recognition (CVPR), pages 7159–7169, 2021. 1, 2, 3, 6, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 175, + 547, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 175, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 312, + 175, + 547, + 239 + ], + "type": "text", + "content": "[97] Andrei Zanfir, Eduard Gabriel Bazavan, Hongyi Xu, William T Freeman, Rahul Sukthankar, and Cristian Sminchisescu. Weakly supervised 3D human pose and shape reconstruction with normalizing flows. In European Conference on Computer Vision (ECCV), pages 465-481, 2020. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 243, + 547, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 243, + 547, + 297 + ], + "spans": [ + { + "bbox": [ + 312, + 243, + 547, + 297 + ], + "type": "text", + "content": "[98] Andrei Zanfir, Elisabella Maroiniu, and Cristian Sminchisescu. Monocular 3D pose and shape estimation of multiple people in natural scenes – the importance of multiple scene constraints. In Computer Vision and Pattern Recognition (CVPR), pages 2148–2157, 2018. 3, 6, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 298, + 547, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 298, + 547, + 352 + ], + "spans": [ + { + "bbox": [ + 312, + 298, + 547, + 352 + ], + "type": "text", + "content": "[99] Ailing Zeng, Lei Yang, Xuan Ju, Jiefeng Li, Jianyi Wang, and Qiang Xu. SmoothNet: A plug-and-play network for refining human poses in videos. In European Conference on Computer Vision (ECCV), volume 13665, pages 625-642, 2022. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 354, + 547, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 547, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 547, + 398 + ], + "type": "text", + "content": "[100] Wang Zeng, Wanli Ouyang, Ping Luo, Wentao Liu, and Xiaogang Wang. 3D human mesh regression with dense correspondence. In Computer Vision and Pattern Recognition (CVPR), 2020. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 399, + 547, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 547, + 443 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 547, + 443 + ], + "type": "text", + "content": "[101] Cha Zhang and Tsuhan Chen. Efficient feature extraction for 2d/3d objects in mesh representation. In Proceedings 2001 International Conference on Image Processing (Cat. No. 01CH37205), volume 3, pages 935-938. IEEE, 2001. 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 444, + 547, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 444, + 547, + 499 + ], + "spans": [ + { + "bbox": [ + 307, + 444, + 547, + 499 + ], + "type": "text", + "content": "[102] Hongwen Zhang, Yating Tian, Xinchi Zhou, Wanli Ouyang, Yebin Liu, Limin Wang, and Zhenan Sun. PyMAF: 3D human pose and shape regression with pyramidal mesh alignment feedback loop. In International Conference on Computer Vision (ICCV), pages 11426-11436, 2021. 1, 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 500, + 547, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 500, + 547, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 500, + 547, + 544 + ], + "type": "text", + "content": "[103] Jianfeng Zhang, Dongdong Yu, Jun Hao Liew, Xuecheng Nie, and Jiashi Feng. Body meshes as points. In Computer Vision and Pattern Recognition (CVPR), pages 546-556, 2021. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 545, + 547, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 547, + 600 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 547, + 600 + ], + "type": "text", + "content": "[104] Jason Y. Zhang, Sam Pepose, Hanbyul Joo, Deva Ramanan, Jitendra Malik, and Angjoo Kanazawa. Perceiving 3D human-object spatial arrangements from a single image in the wild. In European Conference on Computer Vision (ECCV), volume 12357, pages 34-51, 2020. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 601, + 547, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 601, + 547, + 645 + ], + "spans": [ + { + "bbox": [ + 307, + 601, + 547, + 645 + ], + "type": "text", + "content": "[105] Siwei Zhang, Yan Zhang, Federica Bogo, Marc Pollefeys, and Siyu Tang. Learning motion priors for 4D human body capture in 3D scenes. In International Conference on Computer Vision (ICCV), pages 11343-11353, 2021. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 647, + 547, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 547, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 547, + 689 + ], + "type": "text", + "content": "[106] Tianshu Zhang, Buzhen Huang, and Yangang Wang. Object-occluded human shape and pose estimation from a single color image. In Computer Vision and Pattern Recognition (CVPR), pages 7374–7383, 2020. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "type": "text", + "content": "[107] Ce Zheng, Wenhan Wu, Chen Chen, Taojiannan Yang, Sijie Zhu, Ju Shen, Nasser Kehtarnavaz, and Mubarak Shah." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 315, + 766 + ], + "type": "text", + "content": "4724" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 162 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 72, + 72, + 287, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 72, + 287, + 94 + ], + "spans": [ + { + "bbox": [ + 72, + 72, + 287, + 94 + ], + "type": "text", + "content": "Deep learning-based human pose estimation: A survey. arXiv:2012.13392, 2022.3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[108] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Computer Vision and Pattern Recognition (CVPR), pages 5745-5753, 2019. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 162 + ], + "type": "text", + "content": "[109] Yuxiao Zhou, Marc Habermann, Ikhsanul Habibie, Ayush Tewari, Christian Theobalt, and Feng Xu. Monocular real" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 162 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 331, + 73, + 547, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 73, + 547, + 105 + ], + "spans": [ + { + "bbox": [ + 331, + 73, + 547, + 105 + ], + "type": "text", + "content": "time full body capture with inter-part correlations. In Computer Vision and Pattern Recognition (CVPR), pages 4811-4822, 2021. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 308, + 107, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 547, + 162 + ], + "type": "text", + "content": "[110] Yuliang Zou, Jimei Yang, Duygu Ceylan, Jianming Zhang, Federico Perazzi, and Jia-Bin Huang. Reducing footskate in human motion reconstruction with ground contact constraints. In Winter Conference on Applications of Computer Vision (WACV), pages 459-468, 2020. 3, 6, 8" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 757, + 314, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 757, + 314, + 766 + ], + "spans": [ + { + "bbox": [ + 295, + 757, + 314, + 766 + ], + "type": "text", + "content": "4725" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Line Mapping Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_content_list.json b/2023/3D Line Mapping Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e726748f85d93c9ab1e20700d31857cdae4ceb6c --- /dev/null +++ b/2023/3D Line Mapping Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_content_list.json @@ -0,0 +1,1833 @@ +[ + { + "type": "text", + "text": "3D Line Mapping Revisited", + "text_level": 1, + "bbox": [ + 344, + 130, + 627, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shaohui Liu $^{1}$ Yifan Yu $^{1}$ Rémi Pautrat $^{1}$ Marc Pollefeys $^{1,2}$ Viktor Larsson $^{3}$ \\\n $^{1}$ Department of Computer Science, ETH Zurich $^{2}$ Microsoft $^{3}$ Lund University", + "bbox": [ + 156, + 179, + 812, + 218 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 251, + 313, + 266 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In contrast to sparse keypoints, a handful of line segments can concisely encode the high-level scene layout, as they often delineate the main structural elements. In addition to offering strong geometric cues, they are also omnipresent in urban landscapes and indoor scenes. Despite their apparent advantages, current line-based reconstruction methods are far behind their point-based counterparts. In this paper we aim to close the gap by introducing LIMAP, a library for 3D line mapping that robustly and efficiently creates 3D line maps from multi-view imagery. This is achieved through revisiting the degeneracy problem of line triangulation, carefully crafted scoring and track building, and exploiting structural priors such as line coincidence, parallelism, and orthogonality. Our code integrates seamlessly with existing point-based Structure-from-Motion methods and can leverage their 3D points to further improve the line reconstruction. Furthermore, as a byproduct, the method is able to recover 3D association graphs between lines and points / vanishing points (VPs). In thorough experiments, we show that LIMAP significantly outperforms existing approaches for 3D line mapping. Our robust 3D line maps also open up new research directions. We show two example applications: visual localization and bundle adjustment, where integrating lines alongside points yields the best results. Code is available at https://github.com/cvg/limap.", + "bbox": [ + 75, + 282, + 473, + 661 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 691, + 209, + 707 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The ability to estimate 3D geometry and build sparse maps via Structure-from-Motion (SfM) has become ubiquitous in 3D computer vision. These frameworks enable important tasks such as building maps for localization [60], providing initial estimates for dense reconstruction and refinement [65], and novel view synthesis [45, 48]. Currently, the field is dominated by point-based methods in which 2D keypoints are detected, matched, and triangulated into 3D maps [20, 64]. These sparse maps offer a compact scene representation, only reconstructing the most distinctive points.", + "bbox": [ + 75, + 718, + 470, + 868 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While there have been tremendous progress in point-based reconstruction methods, they still struggle in scenes", + "bbox": [ + 76, + 869, + 470, + 900 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b7c6006e8d87ae2a4d4b06e0b0a0163695abd45b888839868b1d0e9dc9ff0164.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 535, + 247, + 678, + 337 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/42c688333ec5f636caf7e8f17d09de082462e00986e8a477cbffcb053d7a618a.jpg", + "image_caption": [ + "(a) Point mapping [13,64]", + "(c) Line-point association", + "Figure 1. In this paper, we propose a robust pipeline for mapping 3D lines (b), which offers stronger geometric clues about the scene layout compared to the widely used point mapping (a). Part of the success of our pipeline attributes to the modeling of structural priors such as coincidence (c), and parallelism / orthogonality (d). The corresponding 3D association graphs between lines and points / vanishing points (VPs) are also recovered from our system as a byproduct. The degree-1 point and degree-2 junctions are colored in blue and red respectively in (c), while parallel lines associated with the same VP are colored the same in (d)." + ], + "image_footnote": [], + "bbox": [ + 537, + 359, + 679, + 448 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8a261521be3287e665917dbe7fcad46e562a47fcdcbe60356f850c172e6956ce.jpg", + "image_caption": [ + "(b) Line mapping" + ], + "image_footnote": [], + "bbox": [ + 720, + 247, + 861, + 340 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/063cf27913e9fb84ea7d5b41014e9b218b4e790037689a747a42081cdb737421.jpg", + "image_caption": [ + "(d) Line-VP association" + ], + "image_footnote": [], + "bbox": [ + 718, + 359, + 859, + 448 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "where it is difficult to detect and match sufficiently many stable keypoints, such as in indoor areas. On the contrary, these man-made scenes contain abundant lines, e.g. in walls, windows, doors, or ceilings. Furthermore, lines exhibit higher localization accuracy with less uncertainty in pixels [16]. Last but not least, lines appear in highly structured patterns, often satisfying scene-wide geometric constraints such as co-planarity, coincidence (line intersections), parallelism, and orthogonality. In practice, lines suffer from different issues, such as poor endpoint localization and partial occlusion. However, recent line detectors and matchers are bridging the gap of performance between points and lines [25, 46, 84], making it timely to revisit the line reconstruction problem.", + "bbox": [ + 496, + 627, + 893, + 823 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite their rich geometric properties and abundance in the real world, there exist very few line-based reconstruction methods in the literature [22,23,44,77]. In practical applications, they have also not achieved the same level of success as their point-based counterparts. We believe this is due to", + "bbox": [ + 496, + 825, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "21445", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "several intrinsic challenges specific to line mapping:", + "bbox": [ + 76, + 90, + 421, + 106 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Inconsistent endpoints. Due to partial occlusion, lines often have inconsistent endpoints across images.", + "- Line fragmentation. In each image there might be multiple line segments that belong to the same line in 3D. This makes the process of creating track associations more complex compared to building 3D point tracks.", + "- No two-view geometric verification. While point matches can be verified in two views via epipolar geometry, lines require at least three views to filter.", + "- Degenerate configurations. In practice line triangulation is more prone to unstable configurations (see Fig. 8), e.g. becoming degenerate whenever the line is parallel with the camera motion (i.e. to epipolar lines).", + "- Weaker descriptor-based matching. State-of-the-art descriptors for line segments are far behind their point-based counterparts, putting more emphasis on geometric verification and filtering during reconstruction." + ], + "bbox": [ + 76, + 107, + 467, + 359 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper we aim to reduce the gap between point-based and line-based mapping solutions. We propose a new robust mapping method, LIMAP, that integrates seamlessly into existing open-source point-based SfM frameworks [64, 67, 80]. By sharing the code with the research community we hope to enable more research related to lines; both for low-level tasks (such as improving line segment detection and description) and for integrating lines into higher-level tasks (such as visual localization or dense reconstruction). In particular, we make the following contributions in the paper:", + "bbox": [ + 75, + 363, + 468, + 513 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We build a new line mapping system that reliably reconstructs 3D line segments from multi-view RGB images. Compared to previous approaches, our line maps are significantly more complete and accurate, while having more robust 2D-3D track associations.", + "- We achieve this by automatically identifying and exploiting structural priors such as coincidence (junctions) and parallelism. Our technical contribution spans all stages of line mapping including triangulating proposals, scoring, track building, and joint optimization, with 3D line-point / VP association graphs output as a byproduct.", + "- The framework is flexible such that researchers can easily change components (e.g. detectors, matchers, vanishing point estimators, etc.) or integrate additional sensor data (e.g. depth maps or other 3D information).", + "- We are the first to go beyond small test sets by quantitatively evaluating on both synthetic and real datasets to benchmark the performance, with hundreds of images for each scene, in which LIMAP consistently and significantly outperforms existing approaches.", + "- Finally, we demonstrate the usefulness of having robust line maps by showing improvement over purely point-based methods in tasks such as visual localization and bundle adjustment in Structure-from-Motion." + ], + "bbox": [ + 76, + 520, + 468, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 89, + 638, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Line Detection and Matching. Detecting 2D line segments conventionally relies on grouping image gradients [5, 75]. To improve the robustness and repeatability, learning-based line detectors were later proposed to tackle the problem of wireframe parsing [25, 43, 82, 83, 88, 90]. Recent deep detectors [26, 46, 81] manage to achieve impressive results for detecting general line segments. Matching of the detected line segments is often based on comparing either handcrafted [8, 74, 76, 85] or learning-based [1, 34, 46, 73, 84] descriptors. Some recent methods also exploit point-line [14, 15] and line-junction-line structures [38, 39] to improve matching results, yet still not reaching the reliability level of advanced point matchers [58, 70]. Our method can leverage any line detector and matcher, and is robust to outliers.", + "bbox": [ + 496, + 114, + 890, + 324 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Line Reconstruction. As a seminal work, Bartoli and Sturm [6, 7] proposed a full SfM pipeline for line segments, later improved by Schindler [63] with Manhattan-world assumption [12]. Jain et al. [27] proposed to impose global topological constraints between neighboring lines, which were further explored in [51, 53, 54] to build wireframe models. Some learning-based methods [42, 90] were introduced as well to predict 3D wireframes. Hofer et al. [21-23] proposed checking weak epipolar constraints over exhaustive matches and graph clustering, and introduced the Line3D++ software (referred as L3D++ in this paper), which remains the top choice [17, 42] for acquiring 3D line maps so far. Recently, ELSR [77] employed planes and points to guide the matching. However, all prior work mainly shows qualitative results and provides quantitative evaluation only on relatively small image sets [27, 69]. In this paper, we set up a quantitative evaluation on benchmarks with hundreds of images, where our proposed system significantly surpasses prior work by improving all stages in the mapping pipeline.", + "bbox": [ + 496, + 340, + 890, + 628 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Line-based Applications. The resulting 3D line maps can be used for many downstream applications. [23] advocates the complementary nature of line reconstruction for structure visualization. Some incremental line-based SfM systems are introduced in [24,44,86]. To improve quality and robustness, recent methods [18,19,40,41,49,78,91] jointly employ point and line features in SLAM. While their line maps are often noisy and incomplete, noticeable improvement has been achieved in the accuracy of the recovered camera motion. There has also been development on VP estimation [9,37,50, 87] and solvers for joint point-line pose estimation [4,52,72, 89]. Recently, promising performance in visual localization has been achieved by combining point and line features in a refinement step [17]. In this paper, we show that our line maps can benefit multiple applications such as localization, SfM, and MVS (Sec. J in supp.). In particular, we present very competitive results on point-line visual localization.", + "bbox": [ + 496, + 643, + 890, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "21446", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e2035426f0870c1a56ee692ad1a0dd00604fc1258dd5014566dea5d66113f6f4.jpg", + "image_caption": [ + "Figure 2. Overview. Given a set of posed images and optional 3D points, we associate nearby points to lines, match the lines, triangulate them with 4 different strategies, score 3D line proposals, build line tracks, jointly optimize all features, before obtaining our final reconstruction." + ], + "image_footnote": [], + "bbox": [ + 88, + 87, + 885, + 223 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. The Proposed 3D Line Mapping Pipeline", + "text_level": 1, + "bbox": [ + 76, + 265, + 442, + 282 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We now present our proposed pipeline for 3D line mapping. Our method takes as input a set of images with 2D line segments from any existing line detectors. We assume the camera pose for each image is available (e.g. from SfM/SLAM), and optionally we can also leverage a 3D point cloud (e.g. obtained from point-based SfM). The pipeline consists of three main steps:", + "bbox": [ + 75, + 290, + 470, + 397 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Proposal Generation (Sec. 3.1): For each 2D line segment, we generate a set of 3D line segment proposals.", + "- Scoring and Track Association (Sec. 3.2): Considering multi-view consistency, we score each proposal, select the best candidate for each 2D line, and associate them into a set of 3D line tracks.", + "- Joint Refinement (Sec. 3.3): Finally, we jointly perform non-linear refinement over the 3D line tracks along with 3D points and VP directions, integrating additional structural priors as soft constraints." + ], + "bbox": [ + 76, + 402, + 470, + 563 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Figure 2 shows an overview of the overall pipeline. In the following sections, we detail each of the three main steps.", + "bbox": [ + 75, + 571, + 468, + 602 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "By design our pipeline is robust to scale changes and we use the same hyper-parameters for all experiments across datasets, which are provided in Sec. F.2 in the supp.", + "bbox": [ + 75, + 602, + 470, + 648 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Generating 3D Line Segment Proposals", + "text_level": 1, + "bbox": [ + 76, + 656, + 415, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The first step is to generate a set of 3D line proposals for each 2D line segment. Given a segment in an image, we use any existing line finder to retrieve the top $K$ line matches in each of the $n_v$ closest images. Using the top $K$ line matches instead of a single match increases the chance of getting a correct match, while wrong matches will be filtered out in subsequent steps.", + "bbox": [ + 75, + 679, + 468, + 784 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Let $(\\pmb{x}_1^r,\\pmb{x}_2^r)\\in \\mathbb{R}^3\\times \\mathbb{R}^3$ be the two endpoints (in homogeneous coordinates normalized by the intrinsics) for the reference line segment that we wish to generate proposals for. For ease of notation, we let the world-coordinate system align with the reference view. The endpoints of the 3D line proposals that we generate can all be written as", + "bbox": [ + 75, + 784, + 470, + 875 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {X} _ {1} = \\lambda_ {1} \\boldsymbol {x} _ {1} ^ {r}, \\quad \\boldsymbol {X} _ {2} = \\lambda_ {2} \\boldsymbol {x} _ {2} ^ {r}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 174, + 885, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "for some values of $\\lambda_1, \\lambda_2 \\in \\mathbb{R}$ . Having the 3D endpoints of all proposals lie on the camera rays of the 2D endpoints simplifies the scoring procedure in the second step (Sec. 3.2).", + "bbox": [ + 498, + 266, + 893, + 313 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.1 Line Triangulation", + "text_level": 1, + "bbox": [ + 500, + 329, + 686, + 344 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For each matched 2D line segment $(\\pmb{x}_1^m, \\pmb{x}_2^m)$ we generate one proposal via algebraic line triangulation. Let $(R^m, t^m)$ be the camera pose of the matched view. We can then solve linearly for the endpoint ray depths $\\lambda_i$ as", + "bbox": [ + 498, + 353, + 893, + 414 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\boldsymbol {x} _ {1} ^ {m} \\times \\boldsymbol {x} _ {2} ^ {m}\\right) ^ {T} \\left(R ^ {m} \\left(\\lambda_ {i} \\boldsymbol {x} _ {i} ^ {r}\\right) + \\boldsymbol {t} ^ {m}\\right) = 0, \\quad i = 1, 2. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 422, + 892, + 441 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The proposals are then filtered with cheirality checks (positive $\\lambda$ ) and degeneracy check via the angle between ray $x_{i}^{r}$ and $\\ell_{m} = x_{1}^{m} \\times x_{2}^{m}$ . Note that line triangulation becomes inherently unstable close to degenerate configurations when $\\ell_{m}^{T} R^{m} x_{i}^{r} = 0$ , where we get zero or infinite solutions from (2). Geometrically, this happens when the line is parallel with the epipolar plane: If $\\ell_{m}^{T} t^{m} \\neq 0$ they have no intersection, otherwise they intersect fully and we get infinite solutions $\\ell_{m} \\sim t^{m} \\times R^{m} x_{i}^{r} = E x_{i}^{r}$ , i.e. the line segment coincides with the epipolar line from $x_{i}^{r}$ . This issue is further illustrated in Figure 8. Since we solve for each $\\lambda_{i}$ independently, the triangulation problem can have zero, one, or two degenerate endpoints. We term the case with one degenerate endpoint as a weakly degenerate one, and the case with two degenerate endpoints as fully degenerate. In contrast to the point case, two-view line triangulation is minimal such that any solution fits the measurements exactly with zero error, preventing filtering with 2D reprojection error at this stage.", + "bbox": [ + 496, + 450, + 893, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.2 Point-Line Association", + "text_level": 1, + "bbox": [ + 500, + 739, + 710, + 753 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To obtain meaningful proposals in degenerate cases, we leverage additional geometric information coming from either points or associated vanishing points (VPs). 2D-3D point correspondences can either come from a point-based SfM model or be triangulated from matched endpoints/junctions. For each 2D line segment, we associate all 2D points within a fixed pixel threshold and thereby associate with their corresponding 3D points. For each image, we also estimate a set of VPs and their association to 2D lines using JLinkage [71].", + "bbox": [ + 496, + 763, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "21447", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.3 Point-guided Line Triangulation", + "text_level": 1, + "bbox": [ + 76, + 90, + 357, + 107 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We now generate a second set of proposals for each 2D line segment with the assistance of the associated 2D-3D point correspondences and vanishing points. In the following parts we present three different methods. M1 employs multiple associated 3D points so it is stable for all cases including the fully degenerate ones, while M2 and M3 with one known point / VP can help generate stable proposals in weakly degenerate cases, which are more common in practice. Cheirality tests are applied to all proposals with respect to both views.", + "bbox": [ + 75, + 114, + 472, + 263 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "M1. Multiple Points. For each matched line segment we generate one proposal by collecting all of the associated 3D points that are common between the reference and the match. On top of those common points, we fit a 3D line that is then projected onto two camera rays corresponding to $\\boldsymbol{x}_1^r$ and $\\boldsymbol{x}_2^r$ . M2. Line + Point. For each matched line segment we also generate one proposal for each shared 3D point. We first project the 3D point onto the plane spanned by $\\boldsymbol{x}_1^r$ and $\\boldsymbol{x}_2^r$ . We then aim to find a line that passes through the projection and minimizes the residuals in (2) to the matched line. This can be formulated as a quadratic optimization problem in the two endpoint depths $\\lambda = (\\lambda_1, \\lambda_2)$ with a single constraint:", + "bbox": [ + 75, + 266, + 472, + 446 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\boldsymbol {\\lambda} \\in \\mathbb {R} ^ {2}} \\boldsymbol {\\lambda} ^ {T} A \\boldsymbol {\\lambda} + \\boldsymbol {b} ^ {T} \\boldsymbol {\\lambda}, \\quad \\text {s . t .} \\quad \\boldsymbol {\\lambda} ^ {T} Q \\boldsymbol {\\lambda} + \\boldsymbol {q} ^ {T} \\boldsymbol {\\lambda} = 0. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 453, + 470, + 479 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Due to the low-dimensionality of the problem, a closed-form solution can be derived by reducing it to a univariate quartic polynomial. We show the full derivation in Sec. B in supp. M3. Line + VP. Each VP corresponds to a 3D direction. For each associated VP, we generate one proposal based on its direction (again projected onto the plane spanned by $\\boldsymbol{x}_1^r$ and $\\boldsymbol{x}_2^r$ ). This gives a single linear constraint on the ray depths,", + "bbox": [ + 75, + 486, + 468, + 593 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\boldsymbol {v} \\times \\left(\\boldsymbol {x} _ {1} ^ {r} \\times \\boldsymbol {x} _ {2} ^ {r}\\right)\\right) ^ {T} \\left(\\lambda_ {2} \\boldsymbol {x} _ {2} ^ {r} - \\lambda_ {1} \\boldsymbol {x} _ {1} ^ {r}\\right) = 0. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 599, + 470, + 619 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\pmb{v} \\in \\mathbb{R}^3$ is the VP. Using the constraint, we then solve for $\\lambda = (\\lambda_1, \\lambda_2)$ by minimizing the two residuals of (2) in a least squares sense. Note that $\\pmb{v}$ can either come from the reference image, or from a matched line in another image.", + "bbox": [ + 75, + 627, + 468, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Extension: Line Mapping Given Depth Maps. The proposal generation step can be improved when each image has a corresponding depth map (e.g. from an RGB-D sensor), which can be leveraged with robust line fitting to generate the 3D line proposals. Refer to Sec. E in our supplementary material for more details and results.", + "bbox": [ + 75, + 686, + 470, + 777 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Proposal Scoring and Track Association", + "text_level": 1, + "bbox": [ + 76, + 786, + 419, + 801 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "At this point, each 2D line segment $l$ in image $I$ is associated with a set $\\mathcal{K}$ of 3D line segment proposals (stemming from the top $K$ line matches and various triangulations) for each neighboring image $J$ . We describe in the following how we select the best 3D line proposal for each 2D line segment, and associate these lines into tracks. For each of these", + "bbox": [ + 75, + 809, + 472, + 898 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9ab2fc94295adffedb7ce2f593bfb4279a84e9e444b7a516c1ed6d8fb933c756.jpg", + "image_caption": [ + "(a) Perspective distance" + ], + "image_footnote": [], + "bbox": [ + 506, + 87, + 617, + 152 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/7ad7199b9cb8013334d75749a5e64dc34611bc011aeb77d11ba26b0d2abc2a08.jpg", + "image_caption": [ + "(b) Overlap score", + "Figure 3. Scoring methods. We propose three novel line scoring measures that are scale-invariant and handle different line lengths." + ], + "image_footnote": [], + "bbox": [ + 622, + 87, + 754, + 152 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/8aaf83de13b438ccf8867e72a0c85829d85a7b58a298e6c58675ff6c85fc2c0e.jpg", + "image_caption": [ + "(c) InnerSeg distance" + ], + "image_footnote": [], + "bbox": [ + 759, + 87, + 890, + 152 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "steps, we leverage different scoring methods quantifying the distance between two 3D line segments $(L_1, L_2)$ . These distances are usually computed symmetrically and averaged, and can be obtained both in 3D and in 2D by projecting each 3D line into the other view. We start by presenting two classic ones, and then define our three novel line distances (one for 3D proposal selection and two for track building).", + "bbox": [ + 496, + 212, + 893, + 319 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Angular distance: angle between $L_{1}$ and $L_{2}$ .", + "- Perpendicular distance: maximum orthogonal distance of the endpoints of $L_{1}$ to the infinite line spanned by $L_{2}$ ." + ], + "bbox": [ + 500, + 330, + 893, + 380 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D Proposal Selection. To select best 3D candidate for each 2D line, we score each proposal $L_{i}$ by measuring its consistency with the others. Here we introduce a new distance:", + "bbox": [ + 496, + 392, + 893, + 436 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Perspective distance: assuming the endpoints of $L_{1}$ and $L_{2}$ are on the same rays as in Fig. 3(a), the distance is defined as the endpoint distances, divided by the ray depths $d_{s}, d_{e}$ of the endpoints of $L_{1}$ in image 1. This score can filter out ill-posed triangulations (refer to Sec. F.3 in supp. for detailed discussions), while remaining scale-invariant.", + "bbox": [ + 500, + 446, + 893, + 539 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This new distance, together with the angular distance in 2D and 3D, and the perpendicular distance in 2D, have different scales. In order to aggregate them together, we associate a scaling factor $\\tau_r$ to each distance $r$ and get a normalized score $s_n = e^{-(r / \\tau_r)^2} \\in (0,1]$ . Denoting by $\\mathcal{S}$ the set of all the corresponding normalized scores and $\\mathbb{I}$ the indicator function, the score between $L_1$ and $L_2$ becomes", + "bbox": [ + 496, + 547, + 893, + 655 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ns \\left(L _ {1}, L _ {2}\\right) = \\min _ {s _ {n} \\in S} \\left(s _ {n} \\cdot \\mathbb {1} _ {s _ {n} \\geq 0. 5}\\right) \\in \\{0 \\} \\cup [ 0. 5, 1 ]. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 666, + 890, + 689 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Now equipped with unique score per line pair, we can consider all the neighboring 3D line candidates $L_{j}^{k}$ coming from the neighboring image $J$ and proposal $k$ . The consistency score is defined by summing the best score from each image:", + "bbox": [ + 496, + 700, + 893, + 761 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ns _ {c} \\left(L _ {i}\\right) = \\sum_ {J \\in \\mathcal {N} _ {I}} \\max _ {k \\in \\mathcal {K}} s \\left(L _ {i}, L _ {J} ^ {k}\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 771, + 890, + 805 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{N}_I$ is the set of neighboring images of $I$ . The best 3D line candidate for each 2D line segment $l$ is then selected as the proposal with the highest score: $L = \\operatorname{argmax}_{L_i} s_c(L_i)$ . If the score is less than 1.0, i.e. the best candidate has less than two supports from neighboring views, we ignore this 2D line segment in the subsequent track building process.", + "bbox": [ + 496, + 809, + 893, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "21448", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Track Building. At this point, each 2D segment has been assigned a unique 3D line (its best 3D line candidate). The goal of this step is to gather these 2D segments into line tracks. For this, we form a graph where the 2D segments are nodes and all initial line matches are edges. We aim to prune edges in the graph such that the connected 2D segments share similar 3D assignments. We propose two new line scoring measures that can cope with different endpoint configurations and variable scales across images.", + "bbox": [ + 75, + 90, + 472, + 227 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Overlap score: we project $L_{1}$ orthogonally onto $L_{2}$ , clip the projected endpoints to the endpoints of $L_{2}$ if they fall outside of $L_{2}$ to get segment $\\Pi(L_{1})$ , and compare the ratio of lengths to a threshold $\\tau_{o}$ : $\\mathbb{1}_{\\frac{|\\Pi(L_1)|}{|L_2|} \\geq \\tau_o}$ (see Fig. 3(b)).", + "- InnerSeg distance: the endpoints of $L_{1}$ are perpendicularly unprojected to $L_{2}$ . If they fall outside of $L_{2}$ , we clip them to the closest endpoint of $L_{2}$ . By doing this in both directions, we can define two inner segments (see Fig. 3(c)), and the InnerSeg distance as the maximum distance between their endpoints. To make this measure scale-invariant, we additionally divide it by a scale factor $\\sigma = \\frac{\\min(d_{1}, d_{2})}{f}$ , where $d_{j}$ is the depth of the mid-point of $L_{j}$ in image $J$ and $f$ is the focal length. This encodes how far the mid-point can move in 3D before reaching 1 pixel error in the image (detailed in Sec. F.3 in supp.)." + ], + "bbox": [ + 76, + 233, + 470, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We then convert the InnerSeg distance computed in 3D to a normalized score as in the previous paragraph, and combine it with the overlap score in 2D and 3D and previous scores using (5). Given these pairwise scores of 3D lines, we can now prune edges whose score is below a threshold $t_f = 0.5$ . The connected components of the resulting graph yield the line tracks, ignoring components with less than 3 nodes.", + "bbox": [ + 75, + 477, + 468, + 582 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For each track, we then re-estimate a single 3D line segment. Using the set of endpoints from the 3D assignments of all nodes in the track, we apply Principal Component Analysis (PCA) and use the principal eigenvector and mean 3D point to estimate the infinite 3D line. We then project all endpoints on this infinite line to get the new 3D endpoints.", + "bbox": [ + 75, + 583, + 470, + 674 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Joint Optimization of Lines and Structures", + "text_level": 1, + "bbox": [ + 76, + 681, + 442, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, we perform non-linear refinement on the acquired 3D lines with their track information. The straightforward approach is to perform geometric refinement on the reprojection error. With the 2D point-line association available, we can formulate a joint optimization problem by including additional structural information. The energy to minimize can be written as follows:", + "bbox": [ + 75, + 704, + 470, + 809 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nE = \\sum_ {p} E _ {P} (p) + \\sum_ {l} E _ {L} (l) + \\sum_ {(p, l)} E _ {P L} (p, l), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 114, + 829, + 470, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $E_{P}$ and $E_{L}$ are the data terms, and $E_{PL}$ encodes the 3D association between lines and points / VPs. In particular,", + "bbox": [ + 76, + 869, + 472, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$E_{P}$ is the 2D point reprojection error as in regular bundle adjustment [64]. The association energy is softly weighted (as discussed later) and optimized with robust Huber loss [3]. Each line is converted into a 4-DoF infinite line with Plücker coordinate [7] for optimization and converted back to line segments by unprojecting its 2D supports. Each vanishing point is parameterized with a 3-dimensional homogeneous vector. Refer to Sec. A in supp. for details on efficient computation with minimal parameterization.", + "bbox": [ + 496, + 90, + 893, + 226 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Geometric Refinement. The data term of each line track is also defined on its 2D reprojections. In particular, we measure the 2D perpendicular distance weighted by the angle consistency, which we robustly equip with Cauchy loss [3]:", + "bbox": [ + 496, + 227, + 893, + 287 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nE _ {L} (l) = \\sum_ {k} w _ {\\angle} ^ {2} \\left(L _ {k}, \\ell_ {k}\\right) \\cdot e _ {\\text {p e r p}} ^ {2} \\left(L _ {k}, \\ell_ {k}\\right), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 294, + 892, + 325 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $e_{\\mathrm{perp}}$ is the perpendicular distance, $L_{k}$ is the 2D projection of the 3D segment, $\\ell_{k}$ are the 2D line segments, and $w_{\\angle}$ is the exponential of one minus the cosine of the 2D angle between the projected and the observed line.", + "bbox": [ + 496, + 333, + 893, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Soft Association between Lines and Points. For each pair of 3D line and 3D point with their track information, we can estimate how likely they are spatially associated by traversing the 2D association graph (described in Sec. 3.1.2) of their supports. Specifically, we count the number of associations among the 2D supports of the line track and point track, and keep pairs with at least three 2D associations. The 3D association energy $E_{PL}$ , defined on the surviving pairs, is formulated as the 3D point-line distance weighted by the number of 2D associations on their supports.", + "bbox": [ + 496, + 393, + 893, + 545 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Soft Association between Lines and VPs. Same as the point case, we can also build a soft association problem between lines and VPs. First, we acquire 3D VP tracks by transitively propagating line correspondences from the 3D line tracks. Then, we count the number of associations among the 2D supports for each pair of 3D line and VP track. The 3D line-VP association energy is defined as the sine of the direction angle between the 3D line and the VP, implicitly enforcing parallelism. Furthermore, we add regularizations to the nearly orthogonal VP pairs to enforce orthogonality of different line groups. Refer to Sec. C in supp. for details.", + "bbox": [ + 496, + 545, + 895, + 712 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 723, + 633, + 739 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details. Our whole library is implemented in C++ with Python bindings [28]. The triangulation and scoring can be run in parallel for each node, enabling scalability to large datasets. We use $n_v = 20$ visual neighbors and keep the top $K = 10$ line matches. We provide all the values of thresholds and scaling factors in Sec. F.2 in supp.", + "bbox": [ + 496, + 748, + 893, + 840 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Line Mapping", + "text_level": 1, + "bbox": [ + 500, + 847, + 648, + 863 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To validate the effectiveness of our system, we set up an evaluation benchmark to quantify the quality of the recon", + "bbox": [ + 500, + 869, + 893, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "21449", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/7053f5a94953ca193118ab9388432590b5fef4d36220be03888482e502ae6c06.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Line typeMethodR1R5R10P1P5P10# supports
LSD [75]L3D++ [23]37.0153.1218.853.180.890.6(14.8 / 16.8)
ELSR [77]13.959.796.555.472.682.2(N/A / N/A)
Ours48.6185.2251.360.182.490.0(16.4 / 20.5)
SOLD2 [46]L3D++ [23]36.9107.5132.867.286.893.2(13.2 / 20.4)
Ours54.3151.1191.269.884.690.0(16.5 / 38.7)
", + "bbox": [ + 83, + 88, + 464, + 172 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/aea1ae949101420f4bac694bb61506ec315a362011dc0e5fe62eb421075699ec.jpg", + "table_caption": [ + "Table 1. Line reconstruction on Hypersim [55] with LSD [75] and SOLD2 [46] lines. $R\\tau$ and $P\\tau$ are reported at $1\\mathrm{\\;{mm}},5\\mathrm{\\;{mm}},{10}$ mm along with the average number of supporting images/lines." + ], + "table_footnote": [], + "table_body": "
MethodR5R10R50P5P10P50# supports
L3D++ [23]373.7831.62783.640.654.585.9(8.8 / 9.3)
ELSR [77]139.2322.51308.038.548.074.5(N/A / N/A)
Ours (line-only)472.11058.83720.746.858.486.1(10.3 / 11.8)
Ours508.31154.54179.546.056.983.7(10.4 / 12.0)
", + "bbox": [ + 93, + 239, + 455, + 308 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Line reconstruction on train split of Tanks and Temples [32] with LSD [75] lines. $R\\tau$ and $P\\tau$ are reported at $5\\mathrm{\\;{mm}},{10}\\mathrm{\\;{mm}}$ , ${50}\\mathrm{\\;{mm}}$ along with the average number of supporting images/lines.", + "bbox": [ + 76, + 318, + 470, + 361 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "structed 3D line maps. As there are no ground truth (GT) 3D lines, we evaluate the 3D line mapping with either GT mesh models or point clouds. We use the following metrics:", + "bbox": [ + 76, + 378, + 468, + 424 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Length recall (in meters) at $\\tau (R\\tau)$ : sum of the lengths of the line portions within $\\tau$ mm from the GT model.", + "- Inlier percentage at $\\tau (P\\tau)$ : the percentage of tracks that are within $\\tau$ mm from the GT model.", + "- Average supports: average number of image supports and 2D line supports across all line tracks." + ], + "bbox": [ + 76, + 425, + 470, + 515 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the following, we compare our system with two state-of-the-art methods as baselines: L3D++ [23] and ELSR [77], using two line detectors: the traditional LSD detector [75] and the learning-based SOLD2 [46]. For ELSR [77], we convert the input into VisualSfM [80] format and use code from the authors (only supporting LSD [75]).", + "bbox": [ + 75, + 516, + 468, + 604 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our first evaluation is run on the first eight scenes of the Hypersim dataset [55], composed of 100 images each, and is reported in Tab. 1. For both detectors, we reconstruct much more complete line maps with better or comparable precision than the competitors, while also exhibiting significantly higher quality of track information. This abundant track association is beneficial particularly for line-based applications such as visual localization [17]. After discussing with the authors of ELSR, it seems that their method does not achieve satisfactory results due to a lack of point and plane features.", + "bbox": [ + 75, + 606, + 468, + 756 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We further evaluate all three methods on the train split of the Tanks and Temples dataset [32] without Ignatius as it has no line structures. As SOLD2 [46] is trained for indoor images, we only use LSD [75]. Since the provided point cloud was cleaned to focus only on the main subject, we compute its bounding box, extend it by one meter, and only evaluate lines inside this region. This prevents incorrectly penalizing correct lines that are far away from the main scene,", + "bbox": [ + 75, + 757, + 470, + 878 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ccb021e2a27d19fa77bda20fabb5aee20a3900e76a021bda5915e472624bd5af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 87, + 887, + 247 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/da17233417b9be6bbb3c5e74e999c89b1a874c9cfcf2e8246a1b0b0d33ace9e1.jpg", + "image_caption": [ + "Figure 4. Top row: L3D++ [23]. Bottom row: Ours. Both systems are run on Horse and Family from [32]. We show two different views on the main scene of Horse." + ], + "image_footnote": [], + "bbox": [ + 503, + 301, + 890, + 398 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7bb06070b83fe3941bfbdfac8de8bce99f064bfde89ecc59a067f3427b81cc33.jpg", + "image_caption": [ + "Figure 5. Qualitative results on Hypersim [55] and Tanks and Temples [32]. On Barn we jointly visualize our results and the aligned ground truth point cloud.", + "Figure 6. Qualitative results of the recovered line-point and line-VP association graphs (visualized similarly as in Fig. 1)." + ], + "image_footnote": [], + "bbox": [ + 506, + 453, + 887, + 522 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "which our method is particularly good at thanks to our scale-invariant design (refer to Sec. G in supp.). Tab. 2 shows the results, where our methods significantly improve the mapping quality across the board. Fig. 4 shows qualitative comparison between our method and L3D++ [23]. Our results exhibit better completeness, have less noisy lines that are flying around, and achieve significantly more robust reconstructions of subtle details (e.g. on the ground). More examples of our produced line maps are shown in Fig. 5.", + "bbox": [ + 496, + 568, + 890, + 703 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As an additional output of our system, junction structures and line-line relations such as parallelism and orthogonality are discovered, as shown in Fig. 6. This directly comes from the line-point and line-VP soft associations of Sec. 3.3. From the recovered structures, we can clearly perceive the scene and easily recognize the main Manhattan directions [12].", + "bbox": [ + 496, + 704, + 890, + 794 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To demonstrate the scalability of the proposed system, we also run our method on two large-scale datasets: Aachen (6,697 images) [61, 62] and Rome city (16,179 images) [2, 67, 68]. Fig. 7 shows that our method produces reliable line maps with clear structures. Note that the camera poses from Bundler [67] on Rome city are far from perfect, while our mapping still works reasonably well. The efficiency", + "bbox": [ + 496, + 795, + 892, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "Line typeTriangulationR1R5R10P1P5P10# supportsLSDEndpoints27.6101.4138.058.283.592.1(13.0 / 13.2)[75]Line48.3187.0257.459.281.989.8(15.8 / 19.1)SOLD2Endpoints27.382.8106.568.284.590.9(12.3 / 19.9)[46]Line50.8143.5180.874.486.991.2(15.1 / 32.2)", + "bbox": [ + 81, + 414, + 467, + 488 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3dbb284bc5bade4aacf7ed7b71237c9bec067f5d13d91686975790ed75df2eda.jpg", + "table_caption": [ + "Table 3. Comparison between endpoint and line triangulation on Hypersim [55]. While being more stable at triangulation, the endpoints are often unmatched between line pairs." + ], + "table_footnote": [], + "table_body": "
LineM1M2M3R1R5R10P1P5P10# supports
50.8143.5180.874.486.991.2(15.1 / 32.2)
24.972.595.865.981.288.5(11.3 / 15.7)
37.7116.8152.671.084.289.7(13.8 / 25.8)
51.5146.9185.471.785.490.1(14.9 / 31.2)
51.3146.4186.473.485.790.5(15.8 / 35.6)
51.4145.4184.974.186.190.6(16.5 / 38.7)
", + "bbox": [ + 89, + 556, + 459, + 643 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "bottleneck is in line detection and matching (we use SOLD2 [46] descriptors), while the rest of the mapping takes only $\\sim 10$ minutes on Aachen [61, 62]. The time complexity of our system is nearly linear with the number of images.", + "bbox": [ + 75, + 700, + 470, + 763 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. More Insights and Ablation Studies", + "text_level": 1, + "bbox": [ + 76, + 771, + 387, + 787 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Line Triangulation. To study the stability of the triangulation, we perform a small test on a stereo pair from AdelaideRMF [79] on the uncertainty (measured by the largest singular value of the covariance) of the triangulated 3D segments. We further run a synthetic experiment by generating random lines on a plane orthogonal to the stereo pair, and plot the uncertainty of point and line triangulations with", + "bbox": [ + 75, + 794, + 472, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/f48cd0902ca8f45d89dd5dc393ff534a107a181646ae8ab8a62ab9ada13359b3.jpg", + "table_caption": [ + "Table 4. Ablation study on different types of triangulation proposals (defined in Sec. 3.1.3) on Hypersim [55] with SOLD2 [46]." + ], + "table_footnote": [], + "table_body": "
Line typeMethodR1R5P1P5# supports
LSD [75]L3D++ [23]37.0153.153.180.8(14.8 / 16.8)
Ours (line) w/ [23] scoring48.6186.056.580.6(14.4 / 16.8)
Ours (line) w/ [23] merging41.2158.259.682.5(15.6 / 16.7)
Ours (line) w/ exhaustive46.7177.257.680.9(16.8 / 20.8)
Ours (line)48.3187.059.281.9(15.8 / 19.1)
SOLD2 [46]L3D++ [23]36.9107.567.286.8(13.2 / 20.4)
Ours (line) w/ [23] scoring45.8133.272.685.9(15.0 / 31.1)
Ours (line) w/ [23] merging37.7113.470.584.5(13.3 / 23.9)
Ours (line) w/ exhaustive48.9139.772.985.7(16.2 / 36.9)
Ours (line)50.8143.574.486.9(15.1 / 32.2)
", + "bbox": [ + 503, + 214, + 890, + 348 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Studies on different components of our method with only line-line proposals against L3D++ [23].", + "bbox": [ + 498, + 358, + 892, + 387 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "respect to the angle of the lines with the baseline (refer to Sec. D in supp. for details). The results in Fig. 8 show that when the matched line is nearly parallel to the epipolar line, the line triangulation becomes degenerate with exploding uncertainty, while triangulating the endpoints is significantly more stable. Thus, combining points and VPs from the 2D association is beneficial to improve the stability of the proposals. However, the endpoints are generally not consistent across line matches in practice and need to be complemented with line-line triangulation. This can be verified in Tab. 3 where the performance significantly drops when we change line triangulation into endpoint triangulation.", + "bbox": [ + 496, + 400, + 893, + 582 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further ablate our four types of triangulation for generating proposals. Results in Tab. 4 show that integrating points and VPs enhance the 3D line maps, in particular significantly improving the track quality. Another surprising fact is that the third line in the table, relying only on points and line + point triangulation, already achieves better results than the prior baselines in Tab. 1. Employing all four types of proposals obtains the best trade-off.", + "bbox": [ + 496, + 582, + 893, + 703 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Scoring and Track Building. We first study the effects of using exhaustive line matching as in L3D++ [23]. To enable direct comparison we only use line triangulation proposals. Results are shown in Tab. 5. While there are more proposals generated from the exhaustive matches, both the recall and precision decrease by a noticeable margin. This is probably due to the large number of wrong proposals misleading the scoring process. Nevertheless, our method with exhaustive matches still works significantly better than L3D++ [23]. To further study the effects of the proposed distance measurements at scoring and track building (merging), we re-implement the ones proposed in L3D++ [23] and perform direct comparison. Both our scoring and track", + "bbox": [ + 496, + 703, + 895, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "21451", + "bbox": [ + 478, + 944, + 517, + 957 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/9eacc632f14b0ca475d1276cbd1ef5a00b7b55230b60446006bc9f706028ff3e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodR1R5R10P1P5P10# supports
Line-only w/o refine43.5135.8180.175.187.292.2(15.1 / 32.2)
Line-only w/ geom alone50.8143.5180.874.486.991.2(15.1 / 32.2)
w/o refine46.5146.0189.776.888.993.3(16.5 / 38.7)
w/ geom alone51.4145.4184.974.186.190.6(16.5 / 38.7)
w/ joint optimization54.3151.1191.269.884.690.0(16.5 / 38.7)
", + "bbox": [ + 81, + 88, + 467, + 178 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/85770fc6ad5dacb8a7a2ef730b78b0f7903a9d5f89fc1736e0ea363e9babcb4a.jpg", + "table_caption": [ + "Table 6. Line refinement on Hypersim [55] with SOLD2 [46]." + ], + "table_footnote": [], + "table_body": "
DatasetHLoc2[56,57]PtLine [17]Ours
Cambridge [30]7.0 / 0.13 / 44.07.4 / 0.13 / 43.56.7 / 0.12 / 46.1
7Scenes [66]3.3 / 1.08 / 73.03.3 / 1.09 / 72.73.0 / 1.00 / 78.0
", + "bbox": [ + 94, + 220, + 452, + 268 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cf577f24e36a9d81671111e9d544f20d7c20934b3bbab435b3ecd9fd36a4098a.jpg", + "image_caption": [ + "HLoc [56, 57]", + "Figure 9. Line-assisted Visual localization on Stairs from 7Scenes [66]. Blue: 2D points/lines; Green/Red: Projected 3D points/lines." + ], + "image_footnote": [], + "bbox": [ + 81, + 339, + 166, + 412 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/90947af990730977988c4c5147b295f1fe3c7d01b3235d38c4427cb9488e05da.jpg", + "image_caption": [ + "Ours w/ LIMAP" + ], + "image_footnote": [], + "bbox": [ + 171, + 339, + 256, + 412 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/1677ad2107fef78001d3ed5afed02d80544ff5d999294ca948356c38707cb0de.jpg", + "table_caption": [ + "Table 7. Visual localization on Cambridge [31] and 7Scenes [66]. We report the median translation and rotation errors in cm and degrees, and the pose accuracy $(\\%)$ at $5\\mathrm{cm} / 5$ deg threshold. All metrics are averaged across all scenes of each dataset." + ], + "table_footnote": [], + "table_body": "
(T / R) err. ↓Acc. ↑
HLoc [57]5.2 / 1.4646.8
HLoc [57] w/ depth4.7 / 1.2553.4
PtLine [17]4.8 / 1.3351.9
Ours w/L3D++ [23]4.1 / 1.1460.8
Ours w/LIMAP3.7 / 1.0271.1
", + "bbox": [ + 259, + 339, + 468, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "building are significantly better, especially when equipped with SOLD2 [46] which produces more structured lines.", + "bbox": [ + 75, + 470, + 468, + 501 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Joint Optimization. Finally, we ablate the proposed joint optimization in our pipeline. First, we remove the point-line association and only apply the geometric residuals (reprojection error). Results in Tab. 6 show that the geometric refinement improves significantly when the proposals solely come from line triangulation. However, when adding additional proposals from points and VPs, it contributes marginally and even misleads some lines that are generated from points and VPs but poorly conditioned for lines (R10 decreases). When integrated with joint optimization with soft association, the recall is further improved noticeably, while sacrificing a bit on the precision. It is worth pointing out that the joint optimization also enables the byproduct of junction structures and line-line relations (e.g. in Fig. 6).", + "bbox": [ + 75, + 501, + 468, + 713 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Applications", + "text_level": 1, + "bbox": [ + 76, + 723, + 210, + 739 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Line-Assisted Visual Localization. We build a hybrid visual localization with both points and lines on top of the acquired 3D line maps. Specifically, we first build point maps as in HLoc [56, 57] and line maps with our proposed method. Then, we match points and lines respectively and get 2D-3D correspondences from the track information in the 3D maps. Given these correspondences, we combine", + "bbox": [ + 75, + 746, + 468, + 853 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/1c52a762c89341a3e49e1093c4eac1343247e9aa87de7347e9b8f87537a266bd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Med. error ↓AUC @ (1° / 3° / 5°) ↑
COLMAP [64]0.18877.3 / 89.0 / 91.6
COLMAP [64] + LIMAP refinement0.14682.9 / 91.2 / 93.0
", + "bbox": [ + 501, + 88, + 890, + 135 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 8. Joint bundle adjustment of points and lines on Hypersim [55]. Relative pose errors are measured on all image pairs.", + "bbox": [ + 498, + 145, + 890, + 174 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "four minimal solvers [33, 47, 89]: P3P, P2P1LL, P1P2LL, P3LL from PoseLib [35], together in a hybrid RANSAC framework [10, 59] with local optimization [11, 36] to get the final 6-DoF pose (refer to Sec. H in supp. for details). This also enables direct comparison since only using P3P [47] corresponds to the point-alone baseline similar to HLoc [56, 57]. We also compare with the post-refinement of PtLine [17] that optimizes over the initial point-alone predictions.", + "bbox": [ + 496, + 180, + 893, + 301 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Results in Tab. 7 show that our localization system achieves consistently better results than the point-alone baseline both indoors [66] and outdoors [30], validating the effectiveness of employing 3D line maps for visual localization. In Fig. 9 we show more detailed results from the Stairs scene from 7Scenes [66] as it is one of the most challenging ones. Integrating lines significantly benefits the alignment of the reprojected structures, improving the pose accuracy from 46.8 to 71.1. Also, with our localization pipeline, using the map built from our proposed method is better than from L3D++ [23] by a noticeable margin, again demonstrating the advantages of our proposed line mapping system.", + "bbox": [ + 496, + 301, + 893, + 482 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Refining Structure-from-Motion. With the acquired 3D line maps built from a roughly correct point-based structure-from-motion model, e.g., COLMAP [64], we can use the 3D lines with their track information to refine the input camera poses with joint optimization of points and lines. To verify this, we run COLMAP [64] with SuperPoint [13] on the first eight scenes of Hypersim [55], run the proposed line mapping on top of it, and perform joint bundle adjustment to refine poses and intrinsics. We report the relative pose evaluation of all image pairs [29]. Tab. 8 shows that the joint point-line refinement consistently benefits the accuracy of the camera poses, in particular improving AUC@ $1^{\\circ}$ by 5.6.", + "bbox": [ + 496, + 482, + 893, + 664 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 676, + 617, + 691 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we introduce LIMAP: a library for robust 3D line mapping from multi-view imagery. Extensive experiments show that our method, by improving all stages of the reconstruction pipeline, produces significantly more complete 3D lines, with much higher quality of track association. As a byproduct, the method can also recover 3D association graphs between lines and points / VPs. We further show the usefulness of 3D line maps on visual localization and bundle adjustment. Future directions include incremental / real-time structure mapping, distinguishing structural lines from textural lines for wireframe modeling, and exploiting higher-level structures and relations for downstream applications.", + "bbox": [ + 496, + 702, + 893, + 883 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. V. Larsson was supported by ELLIIT.", + "bbox": [ + 500, + 883, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "2Up to the date of submission, the COLMAP model [64] used by HLoc [56, 57] does not consider radial distortion from the VisualSfM [80] model. So our results are better than the original ones.", + "bbox": [ + 75, + 862, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "21452", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Hichem Abdellali, Robert Frohlich, Viktor Vilagos, and Zoltan Kato. L2d2: Learnable line detector and descriptor. In 3DV, 2021. 2", + "[2] Sameer Agarwal, Yasutaka Furukawa, Noah Snavely, Ian Simon, Brian Curless, Steven M Seitz, and Richard Szeliski. Building rome in a day. Communications of the ACM, 54(10):105-112, 2011. 6, 7", + "[3] Sameer Agarwal and Keir Mierle. Ceres solver. http://ceres-solver.org.5", + "[4] Sérgio Agostinho, João Gomes, and Alessio Del Bue. Cvxpl: A unified convex solution to the absolute pose estimation problem from point and line correspondences. arXiv preprint arXiv:1907.10545, 2019. 2", + "[5] Cuneyt Akinlar and Cihan Topal. Edlines: Real-time line segment detection by edge drawing (ed). In IEEE International Conference on Image Processing, 2011. 2", + "[6] Adrien Bartoli, Mathieu Coquerelle, and Peter Sturm. A framework for pencil-of-points structure-from-motion. In ECCV, 2004. 2", + "[7] Adrien Bartoli and Peter Sturm. Structure-from-motion using lines: Representation, triangulation, and bundle adjustment. Computer Vision and Image Understanding (CVIU), 100(3):416-441, 2005. 2, 5", + "[8] Herbert Bay, Vittorio Ferraris, and Luc Van Gool. Wide-baseline stereo matching with line segments. In CVPR, 2005. 2", + "[9] Jean-Charles Bazin, Yongduek Seo, Cédric Demonceaux, Pascal Vasseur, Katsushi Ikeuchi, Inso Kweon, and Marc Pollefeys. Globally optimal line clustering and vanishing point estimation in manhattan world. In CVPR, 2012. 2", + "[10] Federico Camposeco, Andrea Cohen, Marc Pollefeys, and Torsten Sattler. Hybrid camera pose estimation. In CVPR, 2018. 8", + "[11] Ondrej Chum, Jiri Matas, and Josef Kittler. Locally optimized ransac. In Joint Pattern Recognition Symposium, pages 236-243, 2003. 8", + "[12] James Coughlan and Alan L Yuille. The manhattan world assumption: Regularities in scene statistics which enable bayesian inference. In NeurIPS, 2000. 2, 6", + "[13] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Computer Vision and Pattern Recognition Workshops (CVPRW), 2018. 1, 8", + "[14] Bin Fan, Fuchao Wu, and Zhanyi Hu. Line matching leveraged by point correspondences. In CVPR, 2010. 2", + "[15] Bin Fan, Fuchao Wu, and Zhanyi Hu. Robust line matching through line-point invariants. Pattern Recognition, 45(2):794-805, 2012. 2", + "[16] Wolfgang Förstner and Bernhard P Wrobel. Photogrammetric computer vision. Springer, 2016. 1", + "[17] Shuang Gao, Jixiang Wan, Yishan Ping, Xudong Zhang, Shuzhou Dong, Yuchen Yang, Haikuan Ning, Jijunnan Li, and Yandong Guo. Pose refinement with joint optimization of visual points and lines. In IROS, 2022. 2, 6, 8" + ], + "bbox": [ + 78, + 116, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[18] Ruben Gomez-Ojeda, Francisco-Angel Moreno, David Zuniga-Noël, Davide Scaramuzza, and Javier Gonzalez-Jimenez. Pl-slam: A stereo slam system through the combination of points and line segments. IEEE Transactions on Robotics, 35(3):734-746, 2019. 2", + "[19] Yijia He, Ji Zhao, Yue Guo, Wenhao He, and Kui Yuan. Pl-vio: Tightly-coupled monocular visual-inertial odometry using point and line features. Sensors, 18(4):1159, 2018. 2", + "[20] Jared Heinly, Johannes L. Schonberger, Enrique Dunn, and Jan-Michael Frahm. Reconstructing the world in six days. In CVPR, 2015. 1", + "[21] Manuel Hofer, Michael Maurer, and Horst Bischof. Improving sparse 3d models for man-made environments using line-based 3d reconstruction. In 3DV, 2014. 2", + "[22] Manuel Hofer, Michael Maurer, and Horst Bischof. Line3d: Efficient 3d scene abstraction for the built environment. In German Conference on Pattern Recognition, 2015. 1, 2", + "[23] Manuel Hofer, Michael Maurer, and Horst Bischof. Efficient 3d scene abstraction using line segments. Computer Vision and Image Understanding (CVIU), 157:167-178, 2017. 1, 2, 6, 7, 8", + "[24] Aleksander Holynski, David Geraghty, Jan-Michael Frahm, Chris Sweeney, and Richard Szeliski. Reducing drift in structure from motion using extended features. In 3DV, 2020. 2", + "[25] Kun Huang, Yifan Wang, Zihan Zhou, Tianjiao Ding, Shenghua Gao, and Yi Ma. Learning to parse wireframes in images of man-made environments. In CVPR, 2018. 1, 2", + "[26] Siyu Huang, Fangbo Qin, Pengfei Xiong, Ning Ding, Yijia He, and Xiao Liu. Tp-lsd: Tri-points based line segment detector. In ECCV, 2020. 2", + "[27] Arjun Jain, Christian Kurz, Thorsten Thormahlen, and Hans-Peter Seidel. Exploiting global connectivity constraints for reconstruction of 3d line segments from images. In CVPR, 2010. 2", + "[28] Wenzel Jakob, Jason Rhinelander, and Dean Moldovan. pybind11 - seamless operability between c++11 and python. https://github.com/pybind/pybind11.5", + "[29] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, Pascal Fua, Kwang Moo Yi, and Eduard Trulls. Image matching across wide baselines: From paper to practice. IJCV, 129(2):517-547, 2021. 8", + "[30] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. In CVPR, 2017. 8", + "[31] Alex Kendall, Matthew Grimes, and Roberto Cipolla. PoseNet: A convolutional network for real-time 6-DoF camera relocalization. In ICCV, 2015. 8", + "[32] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics, 36(4), 2017. 6", + "[33] Zuzana Kukelova, Jan Heller, and Andrew Fitzgibbon. Efficient intersection of three quadrics and applications in computer vision. In CVPR, 2016. 8", + "[34] Manuel Lange, Fabian Schweinfurth, and Andreas Schilling. Dld: A deep learning based line descriptor for line feature matching. In IROS, 2019. 2" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "21453", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[35] Viktor Larsson. PoseLib - Minimal Solvers for Camera Pose Estimation. https://github.com/vlarsson/ PoseLib.8", + "[36] Karel Lebeda, Jiri Matas, and Ondrej Chum. Fixing the Locally Optimized RANSAC. In BMVC, 2012. 8", + "[37] Haoang Li, Ji Zhao, Jean-Charles Bazin, Wen Chen, Zhe Liu, and Yun-Hui Liu. Quasi-globally optimal and efficient vanishing point estimation in Manhattan world. In ICCV, 2019. 2", + "[38] Kai Li, Jian Yao, and Xiaohu Lu. Robust line matching based on ray-point-ray structure descriptor. In ACCV, 2014. 2", + "[39] Kai Li, Jian Yao, Xiaohu Lu, Li Li, and Zhichao Zhang. Hierarchical line matching based on line-junction-line structure descriptor and local homography estimation. Neurocomputing, 184:207-220, 2016. 2", + "[40] Hyunjun Lim, Jinwoo Jeon, and Hyun Myung. Uv-slam: Unconstrained line-based slam using vanishing points for structural mapping. IEEE Robotics and Automation Letters, 7(2):1518-1525, 2022. 2", + "[41] Hyunjun Lim, Yeeun Kim, Kwangik Jung, Sumin Hu, and Hyun Myung. Avoiding degeneracy for monocular visual slam with point and line features. In ICRA, 2021. 2", + "[42] Yicheng Luo, Jing Ren, Xuefei Zhe, Di Kang, Yajing Xu, Peter Wonka, and Linchao Bao. Lc2wf: learning to construct 3d building wireframes from 3d line clouds. In BMVC, 2022. 2", + "[43] Quan Meng, Jiakai Zhang, Qiang Hu, Xuming He, and Jingyi Yu. Lgnn: A context-aware line segment detector. In ACM International Conference on Multimedia, 2020. 2", + "[44] Branislav Micusik and Horst Wildenauer. Structure from motion with line segments under relaxed endpoint constraints. IJCV, 124(1):65-79, 2017. 1, 2", + "[45] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 1", + "[46] Rémi Pautrat, Juan-Ting Lin, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Sold2: Self-supervised occlusion-aware line description and detection. In CVPR, 2021. 1, 2, 6, 7, 8", + "[47] Mikael Persson and Klas Nordberg. Lambda twist: An accurate fast robust perspective three point (p3p) solver. In ECCV, 2018. 8", + "[48] Francesco Pittaluga, Sanjeev J Koppal, Sing Bing Kang, and Sudipta N Sinha. Revealing scenes by inverting structure from motion reconstructions. In CVPR, 2019. 1", + "[49] Albert Pumarola, Alexander Vakhitov, Antonio Agudo, Alberto Sanfeliu, and Francese Moreno-Noguer. Pl-slam: Realtime monocular visual slam with points and lines. In ICRA, 2017. 2", + "[50] Yiming Qian and James H. Elder. A reliable online method for joint estimation of focal length and camera rotation. In ECCV, 2022. 2", + "[51] Srikumar Ramalingam, Michel Antunes, Dan Snow, Gim Hee Lee, and Sudeep Pillai. Line-sweep: Cross-ratio for wide-baseline matching and 3d reconstruction. In CVPR, 2015. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] Srikumar Ramalingam, Sofien Bouaziz, and Peter Sturm. Pose estimation using both points and lines for geolocation. In ICRA, 2011. 2", + "[53] Siddhant Ranade and Srikumar Ramalingam. Novel single view constraints for Manhattan 3d line reconstruction. In 3DV, 2018. 2", + "[54] Jing Ren, Biao Zhang, Bojian Wu, Jianqiang Huang, Lubin Fan, Maks Ovsjanikov, and Peter Wonka. Intuitive and efficient roof modeling for reconstruction and synthesis. In ACM SIGGRAPH Asia, 2021. 2", + "[55] Mike Roberts, Jason Ramapuram, Anurag Ranjan, Atulit Kumar, Miguel Angel Bautista, Nathan Paczan, Russ Webb, and Joshua M. Susskind. Hypersim: A photorealistic synthetic dataset for holistic indoor scene understanding. In ICCV, 2021. 6, 7, 8", + "[56] Paul-Edouard Sarlin. Visual localization made easy with hloc. https://github.com/cvg/Hierarchical-Localization/.8", + "[57] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In CVPR, 2019. 8", + "[58] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In CVPR, 2020. 2", + "[59] Torsten Sattler et al. RansacLib - A Template-based *SAC Implementation. https://github.com/tsattler/RansacLib.8", + "[60] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Fast image-based localization using direct 2d-to-3d matching. In ICCV, 2011. 1", + "[61] Torsten Sattler, Will Maddern, Carl Toft, Akihiko Torii, Lars Hammarstrand, Erik Stenberg, Daniel Safari, Masatoshi Okutomi, Marc Pollefeys, Josef Sivic, et al. Benchmarking 6dof outdoor visual localization in changing conditions. In CVPR, 2018. 6, 7", + "[62] Torsten Sattler, Tobias Weyand, Bastian Leibe, and Leif Kobbelt. Image retrieval for image-based localization revisited. In BMVC, 2012. 6, 7", + "[63] Grant Schindler, Panchapagesan Krishnamurthy, and Frank Dellaert. Line-based structure from motion for urban environments. In International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT), 2006. 2", + "[64] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 1, 2, 5, 8", + "[65] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In ECCV, 2016. 1", + "[66] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in RGB-D images. In CVPR, 2013. 8", + "[67] Noah Snavely, Steven M Seitz, and Richard Szeliski. Photo tourism: exploring photo collections in 3d. In ACM SIGGRAPH, 2006. 2, 6, 7", + "[68] Noah Snavely, Steven M Seitz, and Richard Szeliski. Modeling the world from internet photo collections. *IJCV*, 80(2):189-210, 2008. 6, 7" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "21454", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[69] Christoph Strecha, Wolfgang Von Hansen, Luc Van Gool, Pascal Fua, and Ulrich Thoennessen. On benchmarking camera calibration and multi-view stereo for high resolution imagery. In CVPR, 2008. 2", + "[70] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In CVPR, 2021. 2", + "[71] Roberto Toldo and Andrea Fusiello. Robust multiple structures estimation with j-linkage. In ECCV, 2008. 3", + "[72] Alexander Vakhitov, Jan Funke, and Francesc Moreno-Noguer. Accurate and linear time pose estimation from points and lines. In ECCV, 2016. 2", + "[73] Alexander Vakhitov and Victor Lempitsky. Learnable line segment descriptor for visual slam. IEEE Access, 7:39923-39934, 2019. 2", + "[74] Bart Verhagen, Radu Timofte, and Luc Van Gool. Scale-invariant line descriptors for wide baseline matching. In WACV, 2014. 2", + "[75] Rafael Grompone Von Gioi, Jeremie Jakubowicz, Jean-Michel Morel, and Gregory Randall. Lsd: A fast line segment detector with a false detection control. TPAMI, 32(4):722-732, 2008. 2, 6, 7", + "[76] Zhiheng Wang, Fuchao Wu, and Zhanyi Hu. Msld: A robust descriptor for line matching. Pattern Recognition, 42(5):941-953, 2009. 2", + "[77] Dong Wei, Yi Wan, Yongjun Zhang, Xinyi Liu, Bin Zhang, and Xiqi Wang. Elsr: Efficient line segment reconstruction with planes and points guidance. In CVPR, 2022. 1, 2, 6", + "[78] Xinyu Wei, Jun Huang, and Xiaoyuan Ma. Real-time monocular visual slam by combining points and lines. In IEEE International Conference on Multimedia and Expo (ICME), 2019. 2", + "[79] Hoi Sim Wong, Tat-Jun Chin, Jin Yu, and David Suter. Dynamic and hierarchical multi-structure geometric model fitting. In ICCV, 2011. 7", + "[80] Changchang Wu. Visualsfm: A visual structure from motion system. http://www.cs.washington.edu/homes/ccwu/vsfm, 2011. 2, 6, 8", + "[81] Yifan Xu, Weijian Xu, David Cheung, and Zhuowen Tu. Line segment detection using transformers without edges. In CVPR, 2021. 2", + "[82] Nan Xue, Song Bai, Fudong Wang, Gui-Song Xia, Tianfu Wu, and Liangpei Zhang. Learning attraction field representation for robust line segment detection. In CVPR, 2019. 2", + "[83] Nan Xue, Tianfu Wu, Song Bai, Fudong Wang, Gui-Song Xia, Liangpei Zhang, and Philip HS Torr. Holistically-attracted wireframe parsing. In CVPR, 2020. 2", + "[84] Sungho Yoon and Ayoung Kim. Line as a visual sentence: Context-aware line descriptor for visual localization. IEEE Robotics and Automation Letters, 6(4):8726-8733, 2021. 1, 2", + "[85] Lilian Zhang and Reinhard Koch. An efficient and robust line segment matching approach based on lbd descriptor and pairwise geometric consistency. Journal of Visual Communication and Image Representation, 24(7):794-805, 2013. 2", + "[86] Lilian Zhang and Reinhard Koch. Structure and motion from line correspondences: Representation, projection, initializa-" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tion and sparse bundle adjustment. Journal of Visual Communication and Image Representation, 25(5):904-915, 2014. 2", + "[87] Lilian Zhang, Huimin Lu, Xiaoping Hu, and Reinhard Koch. Vanishing point estimation and line classification in a Manhattan world with a unifying camera model. *IJCV*, 117, 2015. 2", + "[88] Ziheng Zhang, Zhengxin Li, Ning Bi, Jia Zheng, Jinlei Wang, Kun Huang, Weixin Luo, Yanyu Xu, and Shenghua Gao. Ppgnet: Learning point-pair graph for line segment detection. In CVPR, 2019. 2", + "[89] Lipu Zhou, Jiamin Ye, and Michael Kaess. A stable algebraic camera pose estimation for minimal configurations of 2d/3d point and line correspondences. In ACCV, 2018. 2, 8", + "[90] Yichao Zhou, Haozhi Qi, Yuexiang Zhai, Qi Sun, Zhili Chen, Li-Yi Wei, and Yi Ma. Learning to reconstruct 3d Manhattan wireframes from a single image. In ICCV, 2019. 2", + "[91] Xingxing Zuo, Xiaojia Xie, Yong Liu, and Guoquan Huang. Robust visual slam with point and line features. In IROS, 2017. 2" + ], + "bbox": [ + 503, + 92, + 893, + 373 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "21455", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3D Line Mapping Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_model.json b/2023/3D Line Mapping Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5cea61d7ec5e6b27875506ae237a99bb717c8ed3 --- /dev/null +++ b/2023/3D Line Mapping Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_model.json @@ -0,0 +1,3082 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.345, + 0.131, + 0.628, + 0.154 + ], + "angle": 0, + "content": "3D Line Mapping Revisited" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.18, + 0.813, + 0.219 + ], + "angle": 0, + "content": "Shaohui Liu\\(^{1}\\) Yifan Yu\\(^{1}\\) Rémi Pautrat\\(^{1}\\) Marc Pollefeys\\(^{1,2}\\) Viktor Larsson\\(^{3}\\)\\\n\\(^{1}\\)Department of Computer Science, ETH Zurich \\(^{2}\\)Microsoft \\(^{3}\\)Lund University" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.252, + 0.314, + 0.267 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.284, + 0.474, + 0.662 + ], + "angle": 0, + "content": "In contrast to sparse keypoints, a handful of line segments can concisely encode the high-level scene layout, as they often delineate the main structural elements. In addition to offering strong geometric cues, they are also omnipresent in urban landscapes and indoor scenes. Despite their apparent advantages, current line-based reconstruction methods are far behind their point-based counterparts. In this paper we aim to close the gap by introducing LIMAP, a library for 3D line mapping that robustly and efficiently creates 3D line maps from multi-view imagery. This is achieved through revisiting the degeneracy problem of line triangulation, carefully crafted scoring and track building, and exploiting structural priors such as line coincidence, parallelism, and orthogonality. Our code integrates seamlessly with existing point-based Structure-from-Motion methods and can leverage their 3D points to further improve the line reconstruction. Furthermore, as a byproduct, the method is able to recover 3D association graphs between lines and points / vanishing points (VPs). In thorough experiments, we show that LIMAP significantly outperforms existing approaches for 3D line mapping. Our robust 3D line maps also open up new research directions. We show two example applications: visual localization and bundle adjustment, where integrating lines alongside points yields the best results. Code is available at https://github.com/cvg/limap." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.693, + 0.21, + 0.708 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.719, + 0.472, + 0.869 + ], + "angle": 0, + "content": "The ability to estimate 3D geometry and build sparse maps via Structure-from-Motion (SfM) has become ubiquitous in 3D computer vision. These frameworks enable important tasks such as building maps for localization [60], providing initial estimates for dense reconstruction and refinement [65], and novel view synthesis [45, 48]. Currently, the field is dominated by point-based methods in which 2D keypoints are detected, matched, and triangulated into 3D maps [20, 64]. These sparse maps offer a compact scene representation, only reconstructing the most distinctive points." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.471, + 0.901 + ], + "angle": 0, + "content": "While there have been tremendous progress in point-based reconstruction methods, they still struggle in scenes" + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.248, + 0.679, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.522, + 0.344, + 0.697, + 0.359 + ], + "angle": 0, + "content": "(a) Point mapping [13,64]" + }, + { + "type": "image", + "bbox": [ + 0.538, + 0.36, + 0.68, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.525, + 0.452, + 0.694, + 0.467 + ], + "angle": 0, + "content": "(c) Line-point association" + }, + { + "type": "image", + "bbox": [ + 0.721, + 0.248, + 0.862, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.734, + 0.344, + 0.851, + 0.359 + ], + "angle": 0, + "content": "(b) Line mapping" + }, + { + "type": "image", + "bbox": [ + 0.72, + 0.361, + 0.861, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.713, + 0.452, + 0.871, + 0.466 + ], + "angle": 0, + "content": "(d) Line-VP association" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.479, + 0.895, + 0.619 + ], + "angle": 0, + "content": "Figure 1. In this paper, we propose a robust pipeline for mapping 3D lines (b), which offers stronger geometric clues about the scene layout compared to the widely used point mapping (a). Part of the success of our pipeline attributes to the modeling of structural priors such as coincidence (c), and parallelism / orthogonality (d). The corresponding 3D association graphs between lines and points / vanishing points (VPs) are also recovered from our system as a byproduct. The degree-1 point and degree-2 junctions are colored in blue and red respectively in (c), while parallel lines associated with the same VP are colored the same in (d)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.628, + 0.895, + 0.824 + ], + "angle": 0, + "content": "where it is difficult to detect and match sufficiently many stable keypoints, such as in indoor areas. On the contrary, these man-made scenes contain abundant lines, e.g. in walls, windows, doors, or ceilings. Furthermore, lines exhibit higher localization accuracy with less uncertainty in pixels [16]. Last but not least, lines appear in highly structured patterns, often satisfying scene-wide geometric constraints such as co-planarity, coincidence (line intersections), parallelism, and orthogonality. In practice, lines suffer from different issues, such as poor endpoint localization and partial occlusion. However, recent line detectors and matchers are bridging the gap of performance between points and lines [25, 46, 84], making it timely to revisit the line reconstruction problem." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Despite their rich geometric properties and abundance in the real world, there exist very few line-based reconstruction methods in the literature [22,23,44,77]. In practical applications, they have also not achieved the same level of success as their point-based counterparts. We believe this is due to" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "21445" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.092, + 0.423, + 0.107 + ], + "angle": 0, + "content": "several intrinsic challenges specific to line mapping:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.108, + 0.468, + 0.136 + ], + "angle": 0, + "content": "- Inconsistent endpoints. Due to partial occlusion, lines often have inconsistent endpoints across images." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.137, + 0.468, + 0.195 + ], + "angle": 0, + "content": "- Line fragmentation. In each image there might be multiple line segments that belong to the same line in 3D. This makes the process of creating track associations more complex compared to building 3D point tracks." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.198, + 0.468, + 0.24 + ], + "angle": 0, + "content": "- No two-view geometric verification. While point matches can be verified in two views via epipolar geometry, lines require at least three views to filter." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.243, + 0.468, + 0.3 + ], + "angle": 0, + "content": "- Degenerate configurations. In practice line triangulation is more prone to unstable configurations (see Fig. 8), e.g. becoming degenerate whenever the line is parallel with the camera motion (i.e. to epipolar lines)." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.303, + 0.468, + 0.36 + ], + "angle": 0, + "content": "- Weaker descriptor-based matching. State-of-the-art descriptors for line segments are far behind their point-based counterparts, putting more emphasis on geometric verification and filtering during reconstruction." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.108, + 0.468, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.364, + 0.47, + 0.514 + ], + "angle": 0, + "content": "In this paper we aim to reduce the gap between point-based and line-based mapping solutions. We propose a new robust mapping method, LIMAP, that integrates seamlessly into existing open-source point-based SfM frameworks [64, 67, 80]. By sharing the code with the research community we hope to enable more research related to lines; both for low-level tasks (such as improving line segment detection and description) and for integrating lines into higher-level tasks (such as visual localization or dense reconstruction). In particular, we make the following contributions in the paper:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.521, + 0.47, + 0.595 + ], + "angle": 0, + "content": "- We build a new line mapping system that reliably reconstructs 3D line segments from multi-view RGB images. Compared to previous approaches, our line maps are significantly more complete and accurate, while having more robust 2D-3D track associations." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.601, + 0.468, + 0.691 + ], + "angle": 0, + "content": "- We achieve this by automatically identifying and exploiting structural priors such as coincidence (junctions) and parallelism. Our technical contribution spans all stages of line mapping including triangulating proposals, scoring, track building, and joint optimization, with 3D line-point / VP association graphs output as a byproduct." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.697, + 0.468, + 0.755 + ], + "angle": 0, + "content": "- The framework is flexible such that researchers can easily change components (e.g. detectors, matchers, vanishing point estimators, etc.) or integrate additional sensor data (e.g. depth maps or other 3D information)." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.761, + 0.468, + 0.835 + ], + "angle": 0, + "content": "- We are the first to go beyond small test sets by quantitatively evaluating on both synthetic and real datasets to benchmark the performance, with hundreds of images for each scene, in which LIMAP consistently and significantly outperforms existing approaches." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.84, + 0.468, + 0.899 + ], + "angle": 0, + "content": "- Finally, we demonstrate the usefulness of having robust line maps by showing improvement over purely point-based methods in tasks such as visual localization and bundle adjustment in Structure-from-Motion." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.521, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.09, + 0.64, + 0.105 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.892, + 0.325 + ], + "angle": 0, + "content": "Line Detection and Matching. Detecting 2D line segments conventionally relies on grouping image gradients [5, 75]. To improve the robustness and repeatability, learning-based line detectors were later proposed to tackle the problem of wireframe parsing [25, 43, 82, 83, 88, 90]. Recent deep detectors [26, 46, 81] manage to achieve impressive results for detecting general line segments. Matching of the detected line segments is often based on comparing either handcrafted [8, 74, 76, 85] or learning-based [1, 34, 46, 73, 84] descriptors. Some recent methods also exploit point-line [14, 15] and line-junction-line structures [38, 39] to improve matching results, yet still not reaching the reliability level of advanced point matchers [58, 70]. Our method can leverage any line detector and matcher, and is robust to outliers." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.342, + 0.892, + 0.629 + ], + "angle": 0, + "content": "Line Reconstruction. As a seminal work, Bartoli and Sturm [6, 7] proposed a full SfM pipeline for line segments, later improved by Schindler [63] with Manhattan-world assumption [12]. Jain et al. [27] proposed to impose global topological constraints between neighboring lines, which were further explored in [51, 53, 54] to build wireframe models. Some learning-based methods [42, 90] were introduced as well to predict 3D wireframes. Hofer et al. [21-23] proposed checking weak epipolar constraints over exhaustive matches and graph clustering, and introduced the Line3D++ software (referred as L3D++ in this paper), which remains the top choice [17, 42] for acquiring 3D line maps so far. Recently, ELSR [77] employed planes and points to guide the matching. However, all prior work mainly shows qualitative results and provides quantitative evaluation only on relatively small image sets [27, 69]. In this paper, we set up a quantitative evaluation on benchmarks with hundreds of images, where our proposed system significantly surpasses prior work by improving all stages in the mapping pipeline." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Line-based Applications. The resulting 3D line maps can be used for many downstream applications. [23] advocates the complementary nature of line reconstruction for structure visualization. Some incremental line-based SfM systems are introduced in [24,44,86]. To improve quality and robustness, recent methods [18,19,40,41,49,78,91] jointly employ point and line features in SLAM. While their line maps are often noisy and incomplete, noticeable improvement has been achieved in the accuracy of the recovered camera motion. There has also been development on VP estimation [9,37,50, 87] and solvers for joint point-line pose estimation [4,52,72, 89]. Recently, promising performance in visual localization has been achieved by combining point and line features in a refinement step [17]. In this paper, we show that our line maps can benefit multiple applications such as localization, SfM, and MVS (Sec. J in supp.). In particular, we present very competitive results on point-line visual localization." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "21446" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.089, + 0.088, + 0.887, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.232, + 0.895, + 0.262 + ], + "angle": 0, + "content": "Figure 2. Overview. Given a set of posed images and optional 3D points, we associate nearby points to lines, match the lines, triangulate them with 4 different strategies, score 3D line proposals, build line tracks, jointly optimize all features, before obtaining our final reconstruction." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.266, + 0.444, + 0.284 + ], + "angle": 0, + "content": "3. The Proposed 3D Line Mapping Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.291, + 0.471, + 0.398 + ], + "angle": 0, + "content": "We now present our proposed pipeline for 3D line mapping. Our method takes as input a set of images with 2D line segments from any existing line detectors. We assume the camera pose for each image is available (e.g. from SfM/SLAM), and optionally we can also leverage a 3D point cloud (e.g. obtained from point-based SfM). The pipeline consists of three main steps:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.404, + 0.472, + 0.435 + ], + "angle": 0, + "content": "- Proposal Generation (Sec. 3.1): For each 2D line segment, we generate a set of 3D line segment proposals." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.44, + 0.47, + 0.498 + ], + "angle": 0, + "content": "- Scoring and Track Association (Sec. 3.2): Considering multi-view consistency, we score each proposal, select the best candidate for each 2D line, and associate them into a set of 3D line tracks." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.504, + 0.471, + 0.564 + ], + "angle": 0, + "content": "- Joint Refinement (Sec. 3.3): Finally, we jointly perform non-linear refinement over the 3D line tracks along with 3D points and VP directions, integrating additional structural priors as soft constraints." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.404, + 0.472, + 0.564 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.573, + 0.47, + 0.603 + ], + "angle": 0, + "content": "Figure 2 shows an overview of the overall pipeline. In the following sections, we detail each of the three main steps." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.603, + 0.471, + 0.649 + ], + "angle": 0, + "content": "By design our pipeline is robust to scale changes and we use the same hyper-parameters for all experiments across datasets, which are provided in Sec. F.2 in the supp." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.657, + 0.416, + 0.673 + ], + "angle": 0, + "content": "3.1. Generating 3D Line Segment Proposals" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.68, + 0.47, + 0.785 + ], + "angle": 0, + "content": "The first step is to generate a set of 3D line proposals for each 2D line segment. Given a segment in an image, we use any existing line finder to retrieve the top \\( K \\) line matches in each of the \\( n_v \\) closest images. Using the top \\( K \\) line matches instead of a single match increases the chance of getting a correct match, while wrong matches will be filtered out in subsequent steps." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.785, + 0.471, + 0.876 + ], + "angle": 0, + "content": "Let \\((\\pmb{x}_1^r,\\pmb{x}_2^r)\\in \\mathbb{R}^3\\times \\mathbb{R}^3\\) be the two endpoints (in homogeneous coordinates normalized by the intrinsics) for the reference line segment that we wish to generate proposals for. For ease of notation, we let the world-coordinate system align with the reference view. The endpoints of the 3D line proposals that we generate can all be written as" + }, + { + "type": "equation", + "bbox": [ + 0.176, + 0.886, + 0.47, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {X} _ {1} = \\lambda_ {1} \\boldsymbol {x} _ {1} ^ {r}, \\quad \\boldsymbol {X} _ {2} = \\lambda_ {2} \\boldsymbol {x} _ {2} ^ {r}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.267, + 0.895, + 0.314 + ], + "angle": 0, + "content": "for some values of \\(\\lambda_1, \\lambda_2 \\in \\mathbb{R}\\). Having the 3D endpoints of all proposals lie on the camera rays of the 2D endpoints simplifies the scoring procedure in the second step (Sec. 3.2)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.33, + 0.687, + 0.345 + ], + "angle": 0, + "content": "3.1.1 Line Triangulation" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.354, + 0.894, + 0.415 + ], + "angle": 0, + "content": "For each matched 2D line segment \\((\\pmb{x}_1^m, \\pmb{x}_2^m)\\) we generate one proposal via algebraic line triangulation. Let \\((R^m, t^m)\\) be the camera pose of the matched view. We can then solve linearly for the endpoint ray depths \\(\\lambda_i\\) as" + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.424, + 0.893, + 0.442 + ], + "angle": 0, + "content": "\\[\n\\left(\\boldsymbol {x} _ {1} ^ {m} \\times \\boldsymbol {x} _ {2} ^ {m}\\right) ^ {T} \\left(R ^ {m} \\left(\\lambda_ {i} \\boldsymbol {x} _ {i} ^ {r}\\right) + \\boldsymbol {t} ^ {m}\\right) = 0, \\quad i = 1, 2. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.452, + 0.895, + 0.725 + ], + "angle": 0, + "content": "The proposals are then filtered with cheirality checks (positive \\(\\lambda\\)) and degeneracy check via the angle between ray \\(x_{i}^{r}\\) and \\(\\ell_{m} = x_{1}^{m} \\times x_{2}^{m}\\). Note that line triangulation becomes inherently unstable close to degenerate configurations when \\(\\ell_{m}^{T} R^{m} x_{i}^{r} = 0\\), where we get zero or infinite solutions from (2). Geometrically, this happens when the line is parallel with the epipolar plane: If \\(\\ell_{m}^{T} t^{m} \\neq 0\\) they have no intersection, otherwise they intersect fully and we get infinite solutions \\(\\ell_{m} \\sim t^{m} \\times R^{m} x_{i}^{r} = E x_{i}^{r}\\), i.e. the line segment coincides with the epipolar line from \\(x_{i}^{r}\\). This issue is further illustrated in Figure 8. Since we solve for each \\(\\lambda_{i}\\) independently, the triangulation problem can have zero, one, or two degenerate endpoints. We term the case with one degenerate endpoint as a weakly degenerate one, and the case with two degenerate endpoints as fully degenerate. In contrast to the point case, two-view line triangulation is minimal such that any solution fits the measurements exactly with zero error, preventing filtering with 2D reprojection error at this stage." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.741, + 0.712, + 0.755 + ], + "angle": 0, + "content": "3.1.2 Point-Line Association" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.901 + ], + "angle": 0, + "content": "To obtain meaningful proposals in degenerate cases, we leverage additional geometric information coming from either points or associated vanishing points (VPs). 2D-3D point correspondences can either come from a point-based SfM model or be triangulated from matched endpoints/junctions. For each 2D line segment, we associate all 2D points within a fixed pixel threshold and thereby associate with their corresponding 3D points. For each image, we also estimate a set of VPs and their association to 2D lines using JLinkage [71]." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "21447" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.092, + 0.358, + 0.108 + ], + "angle": 0, + "content": "3.1.3 Point-guided Line Triangulation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.115, + 0.473, + 0.265 + ], + "angle": 0, + "content": "We now generate a second set of proposals for each 2D line segment with the assistance of the associated 2D-3D point correspondences and vanishing points. In the following parts we present three different methods. M1 employs multiple associated 3D points so it is stable for all cases including the fully degenerate ones, while M2 and M3 with one known point / VP can help generate stable proposals in weakly degenerate cases, which are more common in practice. Cheirality tests are applied to all proposals with respect to both views." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.267, + 0.473, + 0.448 + ], + "angle": 0, + "content": "M1. Multiple Points. For each matched line segment we generate one proposal by collecting all of the associated 3D points that are common between the reference and the match. On top of those common points, we fit a 3D line that is then projected onto two camera rays corresponding to \\( \\boldsymbol{x}_1^r \\) and \\( \\boldsymbol{x}_2^r \\). M2. Line + Point. For each matched line segment we also generate one proposal for each shared 3D point. We first project the 3D point onto the plane spanned by \\( \\boldsymbol{x}_1^r \\) and \\( \\boldsymbol{x}_2^r \\). We then aim to find a line that passes through the projection and minimizes the residuals in (2) to the matched line. This can be formulated as a quadratic optimization problem in the two endpoint depths \\( \\lambda = (\\lambda_1, \\lambda_2) \\) with a single constraint:" + }, + { + "type": "equation", + "bbox": [ + 0.103, + 0.454, + 0.471, + 0.48 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\boldsymbol {\\lambda} \\in \\mathbb {R} ^ {2}} \\boldsymbol {\\lambda} ^ {T} A \\boldsymbol {\\lambda} + \\boldsymbol {b} ^ {T} \\boldsymbol {\\lambda}, \\quad \\text {s . t .} \\quad \\boldsymbol {\\lambda} ^ {T} Q \\boldsymbol {\\lambda} + \\boldsymbol {q} ^ {T} \\boldsymbol {\\lambda} = 0. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.487, + 0.47, + 0.594 + ], + "angle": 0, + "content": "Due to the low-dimensionality of the problem, a closed-form solution can be derived by reducing it to a univariate quartic polynomial. We show the full derivation in Sec. B in supp. M3. Line + VP. Each VP corresponds to a 3D direction. For each associated VP, we generate one proposal based on its direction (again projected onto the plane spanned by \\( \\boldsymbol{x}_1^r \\) and \\( \\boldsymbol{x}_2^r \\)). This gives a single linear constraint on the ray depths," + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.601, + 0.471, + 0.621 + ], + "angle": 0, + "content": "\\[\n\\left(\\boldsymbol {v} \\times \\left(\\boldsymbol {x} _ {1} ^ {r} \\times \\boldsymbol {x} _ {2} ^ {r}\\right)\\right) ^ {T} \\left(\\lambda_ {2} \\boldsymbol {x} _ {2} ^ {r} - \\lambda_ {1} \\boldsymbol {x} _ {1} ^ {r}\\right) = 0. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.628, + 0.47, + 0.688 + ], + "angle": 0, + "content": "where \\(\\pmb{v} \\in \\mathbb{R}^3\\) is the VP. Using the constraint, we then solve for \\(\\lambda = (\\lambda_1, \\lambda_2)\\) by minimizing the two residuals of (2) in a least squares sense. Note that \\(\\pmb{v}\\) can either come from the reference image, or from a matched line in another image." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.688, + 0.471, + 0.779 + ], + "angle": 0, + "content": "Extension: Line Mapping Given Depth Maps. The proposal generation step can be improved when each image has a corresponding depth map (e.g. from an RGB-D sensor), which can be leveraged with robust line fitting to generate the 3D line proposals. Refer to Sec. E in our supplementary material for more details and results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.787, + 0.421, + 0.803 + ], + "angle": 0, + "content": "3.2. Proposal Scoring and Track Association" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.473, + 0.9 + ], + "angle": 0, + "content": "At this point, each 2D line segment \\(l\\) in image \\(I\\) is associated with a set \\(\\mathcal{K}\\) of 3D line segment proposals (stemming from the top \\(K\\) line matches and various triangulations) for each neighboring image \\(J\\). We describe in the following how we select the best 3D line proposal for each 2D line segment, and associate these lines into tracks. For each of these" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.088, + 0.619, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.507, + 0.155, + 0.619, + 0.167 + ], + "angle": 0, + "content": "(a) Perspective distance" + }, + { + "type": "image", + "bbox": [ + 0.624, + 0.088, + 0.755, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.648, + 0.155, + 0.732, + 0.167 + ], + "angle": 0, + "content": "(b) Overlap score" + }, + { + "type": "image", + "bbox": [ + 0.761, + 0.088, + 0.892, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.777, + 0.155, + 0.877, + 0.167 + ], + "angle": 0, + "content": "(c) InnerSeg distance" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.178, + 0.894, + 0.207 + ], + "angle": 0, + "content": "Figure 3. Scoring methods. We propose three novel line scoring measures that are scale-invariant and handle different line lengths." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.213, + 0.894, + 0.32 + ], + "angle": 0, + "content": "steps, we leverage different scoring methods quantifying the distance between two 3D line segments \\((L_1, L_2)\\). These distances are usually computed symmetrically and averaged, and can be obtained both in 3D and in 2D by projecting each 3D line into the other view. We start by presenting two classic ones, and then define our three novel line distances (one for 3D proposal selection and two for track building)." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.331, + 0.81, + 0.346 + ], + "angle": 0, + "content": "- Angular distance: angle between \\( L_{1} \\) and \\( L_{2} \\)." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.351, + 0.894, + 0.381 + ], + "angle": 0, + "content": "- Perpendicular distance: maximum orthogonal distance of the endpoints of \\( L_{1} \\) to the infinite line spanned by \\( L_{2} \\)." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.331, + 0.894, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.393, + 0.894, + 0.438 + ], + "angle": 0, + "content": "3D Proposal Selection. To select best 3D candidate for each 2D line, we score each proposal \\( L_{i} \\) by measuring its consistency with the others. Here we introduce a new distance:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.448, + 0.894, + 0.54 + ], + "angle": 0, + "content": "- Perspective distance: assuming the endpoints of \\( L_{1} \\) and \\( L_{2} \\) are on the same rays as in Fig. 3(a), the distance is defined as the endpoint distances, divided by the ray depths \\( d_{s}, d_{e} \\) of the endpoints of \\( L_{1} \\) in image 1. This score can filter out ill-posed triangulations (refer to Sec. F.3 in supp. for detailed discussions), while remaining scale-invariant." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.549, + 0.894, + 0.656 + ], + "angle": 0, + "content": "This new distance, together with the angular distance in 2D and 3D, and the perpendicular distance in 2D, have different scales. In order to aggregate them together, we associate a scaling factor \\(\\tau_r\\) to each distance \\(r\\) and get a normalized score \\(s_n = e^{-(r / \\tau_r)^2} \\in (0,1]\\). Denoting by \\(\\mathcal{S}\\) the set of all the corresponding normalized scores and \\(\\mathbb{I}\\) the indicator function, the score between \\(L_1\\) and \\(L_2\\) becomes" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.667, + 0.892, + 0.69 + ], + "angle": 0, + "content": "\\[\ns \\left(L _ {1}, L _ {2}\\right) = \\min _ {s _ {n} \\in S} \\left(s _ {n} \\cdot \\mathbb {1} _ {s _ {n} \\geq 0. 5}\\right) \\in \\{0 \\} \\cup [ 0. 5, 1 ]. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.701, + 0.894, + 0.762 + ], + "angle": 0, + "content": "Now equipped with unique score per line pair, we can consider all the neighboring 3D line candidates \\( L_{j}^{k} \\) coming from the neighboring image \\( J \\) and proposal \\( k \\). The consistency score is defined by summing the best score from each image:" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.772, + 0.892, + 0.806 + ], + "angle": 0, + "content": "\\[\ns _ {c} \\left(L _ {i}\\right) = \\sum_ {J \\in \\mathcal {N} _ {I}} \\max _ {k \\in \\mathcal {K}} s \\left(L _ {i}, L _ {J} ^ {k}\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.894, + 0.902 + ], + "angle": 0, + "content": "where \\(\\mathcal{N}_I\\) is the set of neighboring images of \\(I\\). The best 3D line candidate for each 2D line segment \\(l\\) is then selected as the proposal with the highest score: \\(L = \\operatorname{argmax}_{L_i} s_c(L_i)\\). If the score is less than 1.0, i.e. the best candidate has less than two supports from neighboring views, we ignore this 2D line segment in the subsequent track building process." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21448" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.228 + ], + "angle": 0, + "content": "Track Building. At this point, each 2D segment has been assigned a unique 3D line (its best 3D line candidate). The goal of this step is to gather these 2D segments into line tracks. For this, we form a graph where the 2D segments are nodes and all initial line matches are edges. We aim to prune edges in the graph such that the connected 2D segments share similar 3D assignments. We propose two new line scoring measures that can cope with different endpoint configurations and variable scales across images." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.234, + 0.47, + 0.302 + ], + "angle": 0, + "content": "- Overlap score: we project \\( L_{1} \\) orthogonally onto \\( L_{2} \\), clip the projected endpoints to the endpoints of \\( L_{2} \\) if they fall outside of \\( L_{2} \\) to get segment \\( \\Pi(L_{1}) \\), and compare the ratio of lengths to a threshold \\( \\tau_{o} \\): \\( \\mathbb{1}_{\\frac{|\\Pi(L_1)|}{|L_2|} \\geq \\tau_o} \\) (see Fig. 3(b))." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.304, + 0.472, + 0.472 + ], + "angle": 0, + "content": "- InnerSeg distance: the endpoints of \\( L_{1} \\) are perpendicularly unprojected to \\( L_{2} \\). If they fall outside of \\( L_{2} \\), we clip them to the closest endpoint of \\( L_{2} \\). By doing this in both directions, we can define two inner segments (see Fig. 3(c)), and the InnerSeg distance as the maximum distance between their endpoints. To make this measure scale-invariant, we additionally divide it by a scale factor \\( \\sigma = \\frac{\\min(d_{1}, d_{2})}{f} \\), where \\( d_{j} \\) is the depth of the mid-point of \\( L_{j} \\) in image \\( J \\) and \\( f \\) is the focal length. This encodes how far the mid-point can move in 3D before reaching 1 pixel error in the image (detailed in Sec. F.3 in supp.)." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.234, + 0.472, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.478, + 0.47, + 0.583 + ], + "angle": 0, + "content": "We then convert the InnerSeg distance computed in 3D to a normalized score as in the previous paragraph, and combine it with the overlap score in 2D and 3D and previous scores using (5). Given these pairwise scores of 3D lines, we can now prune edges whose score is below a threshold \\( t_f = 0.5 \\). The connected components of the resulting graph yield the line tracks, ignoring components with less than 3 nodes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.584, + 0.472, + 0.675 + ], + "angle": 0, + "content": "For each track, we then re-estimate a single 3D line segment. Using the set of endpoints from the 3D assignments of all nodes in the track, we apply Principal Component Analysis (PCA) and use the principal eigenvector and mean 3D point to estimate the infinite 3D line. We then project all endpoints on this infinite line to get the new 3D endpoints." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.682, + 0.444, + 0.698 + ], + "angle": 0, + "content": "3.3. Joint Optimization of Lines and Structures" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.472, + 0.81 + ], + "angle": 0, + "content": "Finally, we perform non-linear refinement on the acquired 3D lines with their track information. The straightforward approach is to perform geometric refinement on the reprojection error. With the 2D point-line association available, we can formulate a joint optimization problem by including additional structural information. The energy to minimize can be written as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.116, + 0.83, + 0.472, + 0.865 + ], + "angle": 0, + "content": "\\[\nE = \\sum_ {p} E _ {P} (p) + \\sum_ {l} E _ {L} (l) + \\sum_ {(p, l)} E _ {P L} (p, l), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.473, + 0.902 + ], + "angle": 0, + "content": "where \\(E_{P}\\) and \\(E_{L}\\) are the data terms, and \\(E_{PL}\\) encodes the 3D association between lines and points / VPs. In particular," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.227 + ], + "angle": 0, + "content": "\\(E_{P}\\) is the 2D point reprojection error as in regular bundle adjustment [64]. The association energy is softly weighted (as discussed later) and optimized with robust Huber loss [3]. Each line is converted into a 4-DoF infinite line with Plücker coordinate [7] for optimization and converted back to line segments by unprojecting its 2D supports. Each vanishing point is parameterized with a 3-dimensional homogeneous vector. Refer to Sec. A in supp. for details on efficient computation with minimal parameterization." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.228, + 0.895, + 0.289 + ], + "angle": 0, + "content": "Geometric Refinement. The data term of each line track is also defined on its 2D reprojections. In particular, we measure the 2D perpendicular distance weighted by the angle consistency, which we robustly equip with Cauchy loss [3]:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.295, + 0.893, + 0.327 + ], + "angle": 0, + "content": "\\[\nE _ {L} (l) = \\sum_ {k} w _ {\\angle} ^ {2} \\left(L _ {k}, \\ell_ {k}\\right) \\cdot e _ {\\text {p e r p}} ^ {2} \\left(L _ {k}, \\ell_ {k}\\right), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.334, + 0.895, + 0.394 + ], + "angle": 0, + "content": "where \\( e_{\\mathrm{perp}} \\) is the perpendicular distance, \\( L_{k} \\) is the 2D projection of the 3D segment, \\( \\ell_{k} \\) are the 2D line segments, and \\( w_{\\angle} \\) is the exponential of one minus the cosine of the 2D angle between the projected and the observed line." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.395, + 0.895, + 0.546 + ], + "angle": 0, + "content": "Soft Association between Lines and Points. For each pair of 3D line and 3D point with their track information, we can estimate how likely they are spatially associated by traversing the 2D association graph (described in Sec. 3.1.2) of their supports. Specifically, we count the number of associations among the 2D supports of the line track and point track, and keep pairs with at least three 2D associations. The 3D association energy \\( E_{PL} \\), defined on the surviving pairs, is formulated as the 3D point-line distance weighted by the number of 2D associations on their supports." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.546, + 0.896, + 0.713 + ], + "angle": 0, + "content": "Soft Association between Lines and VPs. Same as the point case, we can also build a soft association problem between lines and VPs. First, we acquire 3D VP tracks by transitively propagating line correspondences from the 3D line tracks. Then, we count the number of associations among the 2D supports for each pair of 3D line and VP track. The 3D line-VP association energy is defined as the sine of the direction angle between the 3D line and the VP, implicitly enforcing parallelism. Furthermore, we add regularizations to the nearly orthogonal VP pairs to enforce orthogonality of different line groups. Refer to Sec. C in supp. for details." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.724, + 0.634, + 0.74 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.749, + 0.895, + 0.841 + ], + "angle": 0, + "content": "Implementation Details. Our whole library is implemented in C++ with Python bindings [28]. The triangulation and scoring can be run in parallel for each node, enabling scalability to large datasets. We use \\( n_v = 20 \\) visual neighbors and keep the top \\( K = 10 \\) line matches. We provide all the values of thresholds and scaling factors in Sec. F.2 in supp." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.848, + 0.649, + 0.864 + ], + "angle": 0, + "content": "4.1. Line Mapping" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.895, + 0.901 + ], + "angle": 0, + "content": "To validate the effectiveness of our system, we set up an evaluation benchmark to quantify the quality of the recon" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21449" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.084, + 0.089, + 0.465, + 0.173 + ], + "angle": 0, + "content": "
Line typeMethodR1R5R10P1P5P10# supports
LSD [75]L3D++ [23]37.0153.1218.853.180.890.6(14.8 / 16.8)
ELSR [77]13.959.796.555.472.682.2(N/A / N/A)
Ours48.6185.2251.360.182.490.0(16.4 / 20.5)
SOLD2 [46]L3D++ [23]36.9107.5132.867.286.893.2(13.2 / 20.4)
Ours54.3151.1191.269.884.690.0(16.5 / 38.7)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.183, + 0.471, + 0.225 + ], + "angle": 0, + "content": "Table 1. Line reconstruction on Hypersim [55] with LSD [75] and SOLD2 [46] lines. \\( R\\tau \\) and \\( P\\tau \\) are reported at \\( 1\\mathrm{\\;{mm}},5\\mathrm{\\;{mm}},{10} \\) mm along with the average number of supporting images/lines." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.241, + 0.456, + 0.309 + ], + "angle": 0, + "content": "
MethodR5R10R50P5P10P50# supports
L3D++ [23]373.7831.62783.640.654.585.9(8.8 / 9.3)
ELSR [77]139.2322.51308.038.548.074.5(N/A / N/A)
Ours (line-only)472.11058.83720.746.858.486.1(10.3 / 11.8)
Ours508.31154.54179.546.056.983.7(10.4 / 12.0)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.319, + 0.472, + 0.362 + ], + "angle": 0, + "content": "Table 2. Line reconstruction on train split of Tanks and Temples [32] with LSD [75] lines. \\( R\\tau \\) and \\( P\\tau \\) are reported at \\( 5\\mathrm{\\;{mm}},{10}\\mathrm{\\;{mm}} \\) , \\( {50}\\mathrm{\\;{mm}} \\) along with the average number of supporting images/lines." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.38, + 0.47, + 0.425 + ], + "angle": 0, + "content": "structed 3D line maps. As there are no ground truth (GT) 3D lines, we evaluate the 3D line mapping with either GT mesh models or point clouds. We use the following metrics:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.426, + 0.472, + 0.455 + ], + "angle": 0, + "content": "- Length recall (in meters) at \\(\\tau (R\\tau)\\): sum of the lengths of the line portions within \\(\\tau\\) mm from the GT model." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.456, + 0.471, + 0.485 + ], + "angle": 0, + "content": "- Inlier percentage at \\(\\tau (P\\tau)\\): the percentage of tracks that are within \\(\\tau\\) mm from the GT model." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.486, + 0.471, + 0.516 + ], + "angle": 0, + "content": "- Average supports: average number of image supports and 2D line supports across all line tracks." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.426, + 0.472, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.517, + 0.47, + 0.606 + ], + "angle": 0, + "content": "In the following, we compare our system with two state-of-the-art methods as baselines: L3D++ [23] and ELSR [77], using two line detectors: the traditional LSD detector [75] and the learning-based SOLD2 [46]. For ELSR [77], we convert the input into VisualSfM [80] format and use code from the authors (only supporting LSD [75])." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.607, + 0.47, + 0.757 + ], + "angle": 0, + "content": "Our first evaluation is run on the first eight scenes of the Hypersim dataset [55], composed of 100 images each, and is reported in Tab. 1. For both detectors, we reconstruct much more complete line maps with better or comparable precision than the competitors, while also exhibiting significantly higher quality of track information. This abundant track association is beneficial particularly for line-based applications such as visual localization [17]. After discussing with the authors of ELSR, it seems that their method does not achieve satisfactory results due to a lack of point and plane features." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.758, + 0.471, + 0.879 + ], + "angle": 0, + "content": "We further evaluate all three methods on the train split of the Tanks and Temples dataset [32] without Ignatius as it has no line structures. As SOLD2 [46] is trained for indoor images, we only use LSD [75]. Since the provided point cloud was cleaned to focus only on the main subject, we compute its bounding box, extend it by one meter, and only evaluate lines inside this region. This prevents incorrectly penalizing correct lines that are far away from the main scene," + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.088, + 0.888, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.258, + 0.892, + 0.3 + ], + "angle": 0, + "content": "Figure 4. Top row: L3D++ [23]. Bottom row: Ours. Both systems are run on Horse and Family from [32]. We show two different views on the main scene of Horse." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.303, + 0.891, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.409, + 0.892, + 0.451 + ], + "angle": 0, + "content": "Figure 5. Qualitative results on Hypersim [55] and Tanks and Temples [32]. On Barn we jointly visualize our results and the aligned ground truth point cloud." + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.454, + 0.888, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.534, + 0.894, + 0.563 + ], + "angle": 0, + "content": "Figure 6. Qualitative results of the recovered line-point and line-VP association graphs (visualized similarly as in Fig. 1)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.569, + 0.892, + 0.704 + ], + "angle": 0, + "content": "which our method is particularly good at thanks to our scale-invariant design (refer to Sec. G in supp.). Tab. 2 shows the results, where our methods significantly improve the mapping quality across the board. Fig. 4 shows qualitative comparison between our method and L3D++ [23]. Our results exhibit better completeness, have less noisy lines that are flying around, and achieve significantly more robust reconstructions of subtle details (e.g. on the ground). More examples of our produced line maps are shown in Fig. 5." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.892, + 0.795 + ], + "angle": 0, + "content": "As an additional output of our system, junction structures and line-line relations such as parallelism and orthogonality are discovered, as shown in Fig. 6. This directly comes from the line-point and line-VP soft associations of Sec. 3.3. From the recovered structures, we can clearly perceive the scene and easily recognize the main Manhattan directions [12]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.893, + 0.902 + ], + "angle": 0, + "content": "To demonstrate the scalability of the proposed system, we also run our method on two large-scale datasets: Aachen (6,697 images) [61, 62] and Rome city (16,179 images) [2, 67, 68]. Fig. 7 shows that our method produces reliable line maps with clear structures. Note that the camera poses from Bundler [67] on Rome city are far from perfect, while our mapping still works reasonably well. The efficiency" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.465, + 0.901 + ], + "angle": 0, + "content": "Line typeTriangulationR1R5R10P1P5P10# supportsLSDEndpoints27.6101.4138.058.283.592.1(13.0 / 13.2)[75]Line48.3187.0257.459.281.989.8(15.8 / 19.1)SOLD2Endpoints27.382.8106.568.284.590.9(12.3 / 19.9)[46]Line50.8143.5180.874.486.991.2(15.1 / 32.2)" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.499, + 0.47, + 0.542 + ], + "angle": 0, + "content": "Table 3. Comparison between endpoint and line triangulation on Hypersim [55]. While being more stable at triangulation, the endpoints are often unmatched between line pairs." + }, + { + "type": "table", + "bbox": [ + 0.09, + 0.557, + 0.46, + 0.645 + ], + "angle": 0, + "content": "
LineM1M2M3R1R5R10P1P5P10# supports
50.8143.5180.874.486.991.2(15.1 / 32.2)
24.972.595.865.981.288.5(11.3 / 15.7)
37.7116.8152.671.084.289.7(13.8 / 25.8)
51.5146.9185.471.785.490.1(14.9 / 31.2)
51.3146.4186.473.485.790.5(15.8 / 35.6)
51.4145.4184.974.186.190.6(16.5 / 38.7)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.655, + 0.473, + 0.684 + ], + "angle": 0, + "content": "Table 4. Ablation study on different types of triangulation proposals (defined in Sec. 3.1.3) on Hypersim [55] with SOLD2 [46]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.702, + 0.472, + 0.765 + ], + "angle": 0, + "content": "bottleneck is in line detection and matching (we use SOLD2 [46] descriptors), while the rest of the mapping takes only \\(\\sim 10\\) minutes on Aachen [61, 62]. The time complexity of our system is nearly linear with the number of images." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.772, + 0.388, + 0.789 + ], + "angle": 0, + "content": "4.2. More Insights and Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Line Triangulation. To study the stability of the triangulation, we perform a small test on a stereo pair from AdelaideRMF [79] on the uncertainty (measured by the largest singular value of the covariance) of the triangulated 3D segments. We further run a synthetic experiment by generating random lines on a plane orthogonal to the stereo pair, and plot the uncertainty of point and line triangulations with" + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.215, + 0.892, + 0.349 + ], + "angle": 0, + "content": "
Line typeMethodR1R5P1P5# supports
LSD [75]L3D++ [23]37.0153.153.180.8(14.8 / 16.8)
Ours (line) w/ [23] scoring48.6186.056.580.6(14.4 / 16.8)
Ours (line) w/ [23] merging41.2158.259.682.5(15.6 / 16.7)
Ours (line) w/ exhaustive46.7177.257.680.9(16.8 / 20.8)
Ours (line)48.3187.059.281.9(15.8 / 19.1)
SOLD2 [46]L3D++ [23]36.9107.567.286.8(13.2 / 20.4)
Ours (line) w/ [23] scoring45.8133.272.685.9(15.0 / 31.1)
Ours (line) w/ [23] merging37.7113.470.584.5(13.3 / 23.9)
Ours (line) w/ exhaustive48.9139.772.985.7(16.2 / 36.9)
Ours (line)50.8143.574.486.9(15.1 / 32.2)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.359, + 0.893, + 0.388 + ], + "angle": 0, + "content": "Table 5. Studies on different components of our method with only line-line proposals against L3D++ [23]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.401, + 0.895, + 0.583 + ], + "angle": 0, + "content": "respect to the angle of the lines with the baseline (refer to Sec. D in supp. for details). The results in Fig. 8 show that when the matched line is nearly parallel to the epipolar line, the line triangulation becomes degenerate with exploding uncertainty, while triangulating the endpoints is significantly more stable. Thus, combining points and VPs from the 2D association is beneficial to improve the stability of the proposals. However, the endpoints are generally not consistent across line matches in practice and need to be complemented with line-line triangulation. This can be verified in Tab. 3 where the performance significantly drops when we change line triangulation into endpoint triangulation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.583, + 0.895, + 0.704 + ], + "angle": 0, + "content": "We further ablate our four types of triangulation for generating proposals. Results in Tab. 4 show that integrating points and VPs enhance the 3D line maps, in particular significantly improving the track quality. Another surprising fact is that the third line in the table, relying only on points and line + point triangulation, already achieves better results than the prior baselines in Tab. 1. Employing all four types of proposals obtains the best trade-off." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.897, + 0.902 + ], + "angle": 0, + "content": "Scoring and Track Building. We first study the effects of using exhaustive line matching as in L3D++ [23]. To enable direct comparison we only use line triangulation proposals. Results are shown in Tab. 5. While there are more proposals generated from the exhaustive matches, both the recall and precision decrease by a noticeable margin. This is probably due to the large number of wrong proposals misleading the scoring process. Nevertheless, our method with exhaustive matches still works significantly better than L3D++ [23]. To further study the effects of the proposed distance measurements at scoring and track building (merging), we re-implement the ones proposed in L3D++ [23] and perform direct comparison. Both our scoring and track" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.958 + ], + "angle": 0, + "content": "21451" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.468, + 0.179 + ], + "angle": 0, + "content": "
MethodR1R5R10P1P5P10# supports
Line-only w/o refine43.5135.8180.175.187.292.2(15.1 / 32.2)
Line-only w/ geom alone50.8143.5180.874.486.991.2(15.1 / 32.2)
w/o refine46.5146.0189.776.888.993.3(16.5 / 38.7)
w/ geom alone51.4145.4184.974.186.190.6(16.5 / 38.7)
w/ joint optimization54.3151.1191.269.884.690.0(16.5 / 38.7)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.189, + 0.46, + 0.204 + ], + "angle": 0, + "content": "Table 6. Line refinement on Hypersim [55] with SOLD2 [46]." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.221, + 0.454, + 0.269 + ], + "angle": 0, + "content": "
DatasetHLoc2[56,57]PtLine [17]Ours
Cambridge [30]7.0 / 0.13 / 44.07.4 / 0.13 / 43.56.7 / 0.12 / 46.1
7Scenes [66]3.3 / 1.08 / 73.03.3 / 1.09 / 72.73.0 / 1.00 / 78.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.279, + 0.47, + 0.335 + ], + "angle": 0, + "content": "Table 7. Visual localization on Cambridge [31] and 7Scenes [66]. We report the median translation and rotation errors in cm and degrees, and the pose accuracy \\((\\%)\\) at \\(5\\mathrm{cm} / 5\\) deg threshold. All metrics are averaged across all scenes of each dataset." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.34, + 0.168, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.092, + 0.414, + 0.159, + 0.424 + ], + "angle": 0, + "content": "HLoc [56, 57]" + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.34, + 0.257, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.176, + 0.414, + 0.253, + 0.423 + ], + "angle": 0, + "content": "Ours w/ LIMAP" + }, + { + "type": "table", + "bbox": [ + 0.261, + 0.34, + 0.47, + 0.424 + ], + "angle": 0, + "content": "
(T / R) err. ↓Acc. ↑
HLoc [57]5.2 / 1.4646.8
HLoc [57] w/ depth4.7 / 1.2553.4
PtLine [17]4.8 / 1.3351.9
Ours w/L3D++ [23]4.1 / 1.1460.8
Ours w/LIMAP3.7 / 1.0271.1
" + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.437, + 0.469, + 0.465 + ], + "angle": 0, + "content": "Figure 9. Line-assisted Visual localization on Stairs from 7Scenes [66]. Blue: 2D points/lines; Green/Red: Projected 3D points/lines." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.472, + 0.469, + 0.502 + ], + "angle": 0, + "content": "building are significantly better, especially when equipped with SOLD2 [46] which produces more structured lines." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.502, + 0.47, + 0.714 + ], + "angle": 0, + "content": "Joint Optimization. Finally, we ablate the proposed joint optimization in our pipeline. First, we remove the point-line association and only apply the geometric residuals (reprojection error). Results in Tab. 6 show that the geometric refinement improves significantly when the proposals solely come from line triangulation. However, when adding additional proposals from points and VPs, it contributes marginally and even misleads some lines that are generated from points and VPs but poorly conditioned for lines (R10 decreases). When integrated with joint optimization with soft association, the recall is further improved noticeably, while sacrificing a bit on the precision. It is worth pointing out that the joint optimization also enables the byproduct of junction structures and line-line relations (e.g. in Fig. 6)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.724, + 0.212, + 0.74 + ], + "angle": 0, + "content": "4.3. Applications" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.747, + 0.47, + 0.854 + ], + "angle": 0, + "content": "Line-Assisted Visual Localization. We build a hybrid visual localization with both points and lines on top of the acquired 3D line maps. Specifically, we first build point maps as in HLoc [56, 57] and line maps with our proposed method. Then, we match points and lines respectively and get 2D-3D correspondences from the track information in the 3D maps. Given these correspondences, we combine" + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.863, + 0.47, + 0.901 + ], + "angle": 0, + "content": "2Up to the date of submission, the COLMAP model [64] used by HLoc [56, 57] does not consider radial distortion from the VisualSfM [80] model. So our results are better than the original ones." + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.089, + 0.892, + 0.136 + ], + "angle": 0, + "content": "
Med. error ↓AUC @ (1° / 3° / 5°) ↑
COLMAP [64]0.18877.3 / 89.0 / 91.6
COLMAP [64] + LIMAP refinement0.14682.9 / 91.2 / 93.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.146, + 0.892, + 0.175 + ], + "angle": 0, + "content": "Table 8. Joint bundle adjustment of points and lines on Hypersim [55]. Relative pose errors are measured on all image pairs." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.181, + 0.895, + 0.302 + ], + "angle": 0, + "content": "four minimal solvers [33, 47, 89]: P3P, P2P1LL, P1P2LL, P3LL from PoseLib [35], together in a hybrid RANSAC framework [10, 59] with local optimization [11, 36] to get the final 6-DoF pose (refer to Sec. H in supp. for details). This also enables direct comparison since only using P3P [47] corresponds to the point-alone baseline similar to HLoc [56, 57]. We also compare with the post-refinement of PtLine [17] that optimizes over the initial point-alone predictions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.302, + 0.895, + 0.483 + ], + "angle": 0, + "content": "Results in Tab. 7 show that our localization system achieves consistently better results than the point-alone baseline both indoors [66] and outdoors [30], validating the effectiveness of employing 3D line maps for visual localization. In Fig. 9 we show more detailed results from the Stairs scene from 7Scenes [66] as it is one of the most challenging ones. Integrating lines significantly benefits the alignment of the reprojected structures, improving the pose accuracy from 46.8 to 71.1. Also, with our localization pipeline, using the map built from our proposed method is better than from L3D++ [23] by a noticeable margin, again demonstrating the advantages of our proposed line mapping system." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.483, + 0.895, + 0.665 + ], + "angle": 0, + "content": "Refining Structure-from-Motion. With the acquired 3D line maps built from a roughly correct point-based structure-from-motion model, e.g., COLMAP [64], we can use the 3D lines with their track information to refine the input camera poses with joint optimization of points and lines. To verify this, we run COLMAP [64] with SuperPoint [13] on the first eight scenes of Hypersim [55], run the proposed line mapping on top of it, and perform joint bundle adjustment to refine poses and intrinsics. We report the relative pose evaluation of all image pairs [29]. Tab. 8 shows that the joint point-line refinement consistently benefits the accuracy of the camera poses, in particular improving AUC@ \\(1^{\\circ}\\) by 5.6." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.678, + 0.619, + 0.693 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.703, + 0.895, + 0.884 + ], + "angle": 0, + "content": "In this paper, we introduce LIMAP: a library for robust 3D line mapping from multi-view imagery. Extensive experiments show that our method, by improving all stages of the reconstruction pipeline, produces significantly more complete 3D lines, with much higher quality of track association. As a byproduct, the method can also recover 3D association graphs between lines and points / VPs. We further show the usefulness of 3D line maps on visual localization and bundle adjustment. Future directions include incremental / real-time structure mapping, distinguishing structural lines from textural lines for wireframe modeling, and exploiting higher-level structures and relations for downstream applications." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.885, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Acknowledgements. V. Larsson was supported by ELLIIT." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "21452" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.117, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Hichem Abdellali, Robert Frohlich, Viktor Vilagos, and Zoltan Kato. L2d2: Learnable line detector and descriptor. In 3DV, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.16, + 0.472, + 0.215 + ], + "angle": 0, + "content": "[2] Sameer Agarwal, Yasutaka Furukawa, Noah Snavely, Ian Simon, Brian Curless, Steven M Seitz, and Richard Szeliski. Building rome in a day. Communications of the ACM, 54(10):105-112, 2011. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.218, + 0.472, + 0.246 + ], + "angle": 0, + "content": "[3] Sameer Agarwal and Keir Mierle. Ceres solver. http://ceres-solver.org.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.248, + 0.472, + 0.303 + ], + "angle": 0, + "content": "[4] Sérgio Agostinho, João Gomes, and Alessio Del Bue. Cvxpl: A unified convex solution to the absolute pose estimation problem from point and line correspondences. arXiv preprint arXiv:1907.10545, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.305, + 0.472, + 0.348 + ], + "angle": 0, + "content": "[5] Cuneyt Akinlar and Cihan Topal. Edlines: Real-time line segment detection by edge drawing (ed). In IEEE International Conference on Image Processing, 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.35, + 0.472, + 0.39 + ], + "angle": 0, + "content": "[6] Adrien Bartoli, Mathieu Coquerelle, and Peter Sturm. A framework for pencil-of-points structure-from-motion. In ECCV, 2004. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.393, + 0.472, + 0.448 + ], + "angle": 0, + "content": "[7] Adrien Bartoli and Peter Sturm. Structure-from-motion using lines: Representation, triangulation, and bundle adjustment. Computer Vision and Image Understanding (CVIU), 100(3):416-441, 2005. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.451, + 0.472, + 0.491 + ], + "angle": 0, + "content": "[8] Herbert Bay, Vittorio Ferraris, and Luc Van Gool. Wide-baseline stereo matching with line segments. In CVPR, 2005. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.494, + 0.472, + 0.551 + ], + "angle": 0, + "content": "[9] Jean-Charles Bazin, Yongduek Seo, Cédric Demonceaux, Pascal Vasseur, Katsushi Ikeuchi, Inso Kweon, and Marc Pollefeys. Globally optimal line clustering and vanishing point estimation in manhattan world. In CVPR, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.472, + 0.593 + ], + "angle": 0, + "content": "[10] Federico Camposeco, Andrea Cohen, Marc Pollefeys, and Torsten Sattler. Hybrid camera pose estimation. In CVPR, 2018. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.596, + 0.472, + 0.637 + ], + "angle": 0, + "content": "[11] Ondrej Chum, Jiri Matas, and Josef Kittler. Locally optimized ransac. In Joint Pattern Recognition Symposium, pages 236-243, 2003. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.64, + 0.472, + 0.682 + ], + "angle": 0, + "content": "[12] James Coughlan and Alan L Yuille. The manhattan world assumption: Regularities in scene statistics which enable bayesian inference. In NeurIPS, 2000. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.684, + 0.472, + 0.739 + ], + "angle": 0, + "content": "[13] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Computer Vision and Pattern Recognition Workshops (CVPRW), 2018. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.741, + 0.472, + 0.77 + ], + "angle": 0, + "content": "[14] Bin Fan, Fuchao Wu, and Zhanyi Hu. Line matching leveraged by point correspondences. In CVPR, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.772, + 0.472, + 0.812 + ], + "angle": 0, + "content": "[15] Bin Fan, Fuchao Wu, and Zhanyi Hu. Robust line matching through line-point invariants. Pattern Recognition, 45(2):794-805, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.814, + 0.472, + 0.844 + ], + "angle": 0, + "content": "[16] Wolfgang Förstner and Bernhard P Wrobel. Photogrammetric computer vision. Springer, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[17] Shuang Gao, Jixiang Wan, Yishan Ping, Xudong Zhang, Shuzhou Dong, Yuchen Yang, Haikuan Ning, Jijunnan Li, and Yandong Guo. Pose refinement with joint optimization of visual points and lines. In IROS, 2022. 2, 6, 8" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.117, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.161 + ], + "angle": 0, + "content": "[18] Ruben Gomez-Ojeda, Francisco-Angel Moreno, David Zuniga-Noël, Davide Scaramuzza, and Javier Gonzalez-Jimenez. Pl-slam: A stereo slam system through the combination of points and line segments. IEEE Transactions on Robotics, 35(3):734-746, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.894, + 0.204 + ], + "angle": 0, + "content": "[19] Yijia He, Ji Zhao, Yue Guo, Wenhao He, and Kui Yuan. Pl-vio: Tightly-coupled monocular visual-inertial odometry using point and line features. Sensors, 18(4):1159, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.205, + 0.894, + 0.245 + ], + "angle": 0, + "content": "[20] Jared Heinly, Johannes L. Schonberger, Enrique Dunn, and Jan-Michael Frahm. Reconstructing the world in six days. In CVPR, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.247, + 0.894, + 0.287 + ], + "angle": 0, + "content": "[21] Manuel Hofer, Michael Maurer, and Horst Bischof. Improving sparse 3d models for man-made environments using line-based 3d reconstruction. In 3DV, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.288, + 0.894, + 0.329 + ], + "angle": 0, + "content": "[22] Manuel Hofer, Michael Maurer, and Horst Bischof. Line3d: Efficient 3d scene abstraction for the built environment. In German Conference on Pattern Recognition, 2015. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.33, + 0.894, + 0.383 + ], + "angle": 0, + "content": "[23] Manuel Hofer, Michael Maurer, and Horst Bischof. Efficient 3d scene abstraction using line segments. Computer Vision and Image Understanding (CVIU), 157:167-178, 2017. 1, 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.385, + 0.894, + 0.438 + ], + "angle": 0, + "content": "[24] Aleksander Holynski, David Geraghty, Jan-Michael Frahm, Chris Sweeney, and Richard Szeliski. Reducing drift in structure from motion using extended features. In 3DV, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.441, + 0.894, + 0.482 + ], + "angle": 0, + "content": "[25] Kun Huang, Yifan Wang, Zihan Zhou, Tianjiao Ding, Shenghua Gao, and Yi Ma. Learning to parse wireframes in images of man-made environments. In CVPR, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.483, + 0.894, + 0.523 + ], + "angle": 0, + "content": "[26] Siyu Huang, Fangbo Qin, Pengfei Xiong, Ning Ding, Yijia He, and Xiao Liu. Tp-lsd: Tri-points based line segment detector. In ECCV, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.525, + 0.894, + 0.579 + ], + "angle": 0, + "content": "[27] Arjun Jain, Christian Kurz, Thorsten Thormahlen, and Hans-Peter Seidel. Exploiting global connectivity constraints for reconstruction of 3d line segments from images. In CVPR, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.581, + 0.894, + 0.622 + ], + "angle": 0, + "content": "[28] Wenzel Jakob, Jason Rhinelander, and Dean Moldovan. pybind11 - seamless operability between c++11 and python. https://github.com/pybind/pybind11.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.623, + 0.894, + 0.677 + ], + "angle": 0, + "content": "[29] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, Pascal Fua, Kwang Moo Yi, and Eduard Trulls. Image matching across wide baselines: From paper to practice. IJCV, 129(2):517-547, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.679, + 0.894, + 0.718 + ], + "angle": 0, + "content": "[30] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. In CVPR, 2017. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.72, + 0.894, + 0.761 + ], + "angle": 0, + "content": "[31] Alex Kendall, Matthew Grimes, and Roberto Cipolla. PoseNet: A convolutional network for real-time 6-DoF camera relocalization. In ICCV, 2015. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.762, + 0.894, + 0.815 + ], + "angle": 0, + "content": "[32] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics, 36(4), 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.894, + 0.859 + ], + "angle": 0, + "content": "[33] Zuzana Kukelova, Jan Heller, and Andrew Fitzgibbon. Efficient intersection of three quadrics and applications in computer vision. In CVPR, 2016. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[34] Manuel Lange, Fabian Schweinfurth, and Andreas Schilling. Dld: A deep learning based line descriptor for line feature matching. In IROS, 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "21453" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.133 + ], + "angle": 0, + "content": "[35] Viktor Larsson. PoseLib - Minimal Solvers for Camera Pose Estimation. https://github.com/vlarsson/ PoseLib.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.136, + 0.469, + 0.163 + ], + "angle": 0, + "content": "[36] Karel Lebeda, Jiri Matas, and Ondrej Chum. Fixing the Locally Optimized RANSAC. In BMVC, 2012. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.165, + 0.47, + 0.219 + ], + "angle": 0, + "content": "[37] Haoang Li, Ji Zhao, Jean-Charles Bazin, Wen Chen, Zhe Liu, and Yun-Hui Liu. Quasi-globally optimal and efficient vanishing point estimation in Manhattan world. In ICCV, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.221, + 0.469, + 0.248 + ], + "angle": 0, + "content": "[38] Kai Li, Jian Yao, and Xiaohu Lu. Robust line matching based on ray-point-ray structure descriptor. In ACCV, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.25, + 0.47, + 0.305 + ], + "angle": 0, + "content": "[39] Kai Li, Jian Yao, Xiaohu Lu, Li Li, and Zhichao Zhang. Hierarchical line matching based on line-junction-line structure descriptor and local homography estimation. Neurocomputing, 184:207-220, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.307, + 0.47, + 0.361 + ], + "angle": 0, + "content": "[40] Hyunjun Lim, Jinwoo Jeon, and Hyun Myung. Uv-slam: Unconstrained line-based slam using vanishing points for structural mapping. IEEE Robotics and Automation Letters, 7(2):1518-1525, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.363, + 0.469, + 0.404 + ], + "angle": 0, + "content": "[41] Hyunjun Lim, Yeeun Kim, Kwangik Jung, Sumin Hu, and Hyun Myung. Avoiding degeneracy for monocular visual slam with point and line features. In ICRA, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.406, + 0.47, + 0.459 + ], + "angle": 0, + "content": "[42] Yicheng Luo, Jing Ren, Xuefei Zhe, Di Kang, Yajing Xu, Peter Wonka, and Linchao Bao. Lc2wf: learning to construct 3d building wireframes from 3d line clouds. In BMVC, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.462, + 0.469, + 0.503 + ], + "angle": 0, + "content": "[43] Quan Meng, Jiakai Zhang, Qiang Hu, Xuming He, and Jingyi Yu. Lgnn: A context-aware line segment detector. In ACM International Conference on Multimedia, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.505, + 0.469, + 0.545 + ], + "angle": 0, + "content": "[44] Branislav Micusik and Horst Wildenauer. Structure from motion with line segments under relaxed endpoint constraints. IJCV, 124(1):65-79, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.548, + 0.47, + 0.602 + ], + "angle": 0, + "content": "[45] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.604, + 0.47, + 0.658 + ], + "angle": 0, + "content": "[46] Rémi Pautrat, Juan-Ting Lin, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Sold2: Self-supervised occlusion-aware line description and detection. In CVPR, 2021. 1, 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.661, + 0.47, + 0.701 + ], + "angle": 0, + "content": "[47] Mikael Persson and Klas Nordberg. Lambda twist: An accurate fast robust perspective three point (p3p) solver. In ECCV, 2018. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.704, + 0.469, + 0.744 + ], + "angle": 0, + "content": "[48] Francesco Pittaluga, Sanjeev J Koppal, Sing Bing Kang, and Sudipta N Sinha. Revealing scenes by inverting structure from motion reconstructions. In CVPR, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.747, + 0.47, + 0.801 + ], + "angle": 0, + "content": "[49] Albert Pumarola, Alexander Vakhitov, Antonio Agudo, Alberto Sanfeliu, and Francese Moreno-Noguer. Pl-slam: Realtime monocular visual slam with points and lines. In ICRA, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.804, + 0.469, + 0.843 + ], + "angle": 0, + "content": "[50] Yiming Qian and James H. Elder. A reliable online method for joint estimation of focal length and camera rotation. In ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.846, + 0.47, + 0.899 + ], + "angle": 0, + "content": "[51] Srikumar Ramalingam, Michel Antunes, Dan Snow, Gim Hee Lee, and Sudeep Pillai. Line-sweep: Cross-ratio for wide-baseline matching and 3d reconstruction. In CVPR, 2015. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.134 + ], + "angle": 0, + "content": "[52] Srikumar Ramalingam, Sofien Bouaziz, and Peter Sturm. Pose estimation using both points and lines for geolocation. In ICRA, 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.137, + 0.894, + 0.175 + ], + "angle": 0, + "content": "[53] Siddhant Ranade and Srikumar Ramalingam. Novel single view constraints for Manhattan 3d line reconstruction. In 3DV, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.179, + 0.894, + 0.233 + ], + "angle": 0, + "content": "[54] Jing Ren, Biao Zhang, Bojian Wu, Jianqiang Huang, Lubin Fan, Maks Ovsjanikov, and Peter Wonka. Intuitive and efficient roof modeling for reconstruction and synthesis. In ACM SIGGRAPH Asia, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.235, + 0.894, + 0.303 + ], + "angle": 0, + "content": "[55] Mike Roberts, Jason Ramapuram, Anurag Ranjan, Atulit Kumar, Miguel Angel Bautista, Nathan Paczan, Russ Webb, and Joshua M. Susskind. Hypersim: A photorealistic synthetic dataset for holistic indoor scene understanding. In ICCV, 2021. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.305, + 0.894, + 0.346 + ], + "angle": 0, + "content": "[56] Paul-Edouard Sarlin. Visual localization made easy with hloc. https://github.com/cvg/Hierarchical-Localization/.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.349, + 0.893, + 0.389 + ], + "angle": 0, + "content": "[57] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In CVPR, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.391, + 0.894, + 0.432 + ], + "angle": 0, + "content": "[58] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.434, + 0.893, + 0.473 + ], + "angle": 0, + "content": "[59] Torsten Sattler et al. RansacLib - A Template-based *SAC Implementation. https://github.com/tsattler/RansacLib.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.476, + 0.894, + 0.516 + ], + "angle": 0, + "content": "[60] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Fast image-based localization using direct 2d-to-3d matching. In ICCV, 2011. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.519, + 0.894, + 0.586 + ], + "angle": 0, + "content": "[61] Torsten Sattler, Will Maddern, Carl Toft, Akihiko Torii, Lars Hammarstrand, Erik Stenberg, Daniel Safari, Masatoshi Okutomi, Marc Pollefeys, Josef Sivic, et al. Benchmarking 6dof outdoor visual localization in changing conditions. In CVPR, 2018. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.589, + 0.894, + 0.63 + ], + "angle": 0, + "content": "[62] Torsten Sattler, Tobias Weyand, Bastian Leibe, and Leif Kobbelt. Image retrieval for image-based localization revisited. In BMVC, 2012. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.633, + 0.894, + 0.687 + ], + "angle": 0, + "content": "[63] Grant Schindler, Panchapagesan Krishnamurthy, and Frank Dellaert. Line-based structure from motion for urban environments. In International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT), 2006. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.689, + 0.894, + 0.716 + ], + "angle": 0, + "content": "[64] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 1, 2, 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.718, + 0.894, + 0.758 + ], + "angle": 0, + "content": "[65] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In ECCV, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.761, + 0.894, + 0.814 + ], + "angle": 0, + "content": "[66] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in RGB-D images. In CVPR, 2013. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.817, + 0.894, + 0.857 + ], + "angle": 0, + "content": "[67] Noah Snavely, Steven M Seitz, and Richard Szeliski. Photo tourism: exploring photo collections in 3d. In ACM SIGGRAPH, 2006. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.859, + 0.894, + 0.9 + ], + "angle": 0, + "content": "[68] Noah Snavely, Steven M Seitz, and Richard Szeliski. Modeling the world from internet photo collections. *IJCV*, 80(2):189-210, 2008. 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "21454" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.147 + ], + "angle": 0, + "content": "[69] Christoph Strecha, Wolfgang Von Hansen, Luc Van Gool, Pascal Fua, and Ulrich Thoennessen. On benchmarking camera calibration and multi-view stereo for high resolution imagery. In CVPR, 2008. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.47, + 0.19 + ], + "angle": 0, + "content": "[70] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.472, + 0.22 + ], + "angle": 0, + "content": "[71] Roberto Toldo and Andrea Fusiello. Robust multiple structures estimation with j-linkage. In ECCV, 2008. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.221, + 0.472, + 0.262 + ], + "angle": 0, + "content": "[72] Alexander Vakhitov, Jan Funke, and Francesc Moreno-Noguer. Accurate and linear time pose estimation from points and lines. In ECCV, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.263, + 0.472, + 0.303 + ], + "angle": 0, + "content": "[73] Alexander Vakhitov and Victor Lempitsky. Learnable line segment descriptor for visual slam. IEEE Access, 7:39923-39934, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.472, + 0.347 + ], + "angle": 0, + "content": "[74] Bart Verhagen, Radu Timofte, and Luc Van Gool. Scale-invariant line descriptors for wide baseline matching. In WACV, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.349, + 0.472, + 0.403 + ], + "angle": 0, + "content": "[75] Rafael Grompone Von Gioi, Jeremie Jakubowicz, Jean-Michel Morel, and Gregory Randall. Lsd: A fast line segment detector with a false detection control. TPAMI, 32(4):722-732, 2008. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.472, + 0.446 + ], + "angle": 0, + "content": "[76] Zhiheng Wang, Fuchao Wu, and Zhanyi Hu. Msld: A robust descriptor for line matching. Pattern Recognition, 42(5):941-953, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.448, + 0.472, + 0.49 + ], + "angle": 0, + "content": "[77] Dong Wei, Yi Wan, Yongjun Zhang, Xinyi Liu, Bin Zhang, and Xiqi Wang. Elsr: Efficient line segment reconstruction with planes and points guidance. In CVPR, 2022. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.491, + 0.472, + 0.544 + ], + "angle": 0, + "content": "[78] Xinyu Wei, Jun Huang, and Xiaoyuan Ma. Real-time monocular visual slam by combining points and lines. In IEEE International Conference on Multimedia and Expo (ICME), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.547, + 0.472, + 0.588 + ], + "angle": 0, + "content": "[79] Hoi Sim Wong, Tat-Jun Chin, Jin Yu, and David Suter. Dynamic and hierarchical multi-structure geometric model fitting. In ICCV, 2011. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.59, + 0.472, + 0.63 + ], + "angle": 0, + "content": "[80] Changchang Wu. Visualsfm: A visual structure from motion system. http://www.cs.washington.edu/homes/ccwu/vsfm, 2011. 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.632, + 0.472, + 0.673 + ], + "angle": 0, + "content": "[81] Yifan Xu, Weijian Xu, David Cheung, and Zhuowen Tu. Line segment detection using transformers without edges. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.675, + 0.472, + 0.716 + ], + "angle": 0, + "content": "[82] Nan Xue, Song Bai, Fudong Wang, Gui-Song Xia, Tianfu Wu, and Liangpei Zhang. Learning attraction field representation for robust line segment detection. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.472, + 0.759 + ], + "angle": 0, + "content": "[83] Nan Xue, Tianfu Wu, Song Bai, Fudong Wang, Gui-Song Xia, Liangpei Zhang, and Philip HS Torr. Holistically-attracted wireframe parsing. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.472, + 0.802 + ], + "angle": 0, + "content": "[84] Sungho Yoon and Ayoung Kim. Line as a visual sentence: Context-aware line descriptor for visual localization. IEEE Robotics and Automation Letters, 6(4):8726-8733, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.472, + 0.87 + ], + "angle": 0, + "content": "[85] Lilian Zhang and Reinhard Koch. An efficient and robust line segment matching approach based on lbd descriptor and pairwise geometric consistency. Journal of Visual Communication and Image Representation, 24(7):794-805, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.472, + 0.902 + ], + "angle": 0, + "content": "[86] Lilian Zhang and Reinhard Koch. Structure and motion from line correspondences: Representation, projection, initializa-" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.894, + 0.133 + ], + "angle": 0, + "content": "tion and sparse bundle adjustment. Journal of Visual Communication and Image Representation, 25(5):904-915, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.894, + 0.189 + ], + "angle": 0, + "content": "[87] Lilian Zhang, Huimin Lu, Xiaoping Hu, and Reinhard Koch. Vanishing point estimation and line classification in a Manhattan world with a unifying camera model. *IJCV*, 117, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.192, + 0.894, + 0.246 + ], + "angle": 0, + "content": "[88] Ziheng Zhang, Zhengxin Li, Ning Bi, Jia Zheng, Jinlei Wang, Kun Huang, Weixin Luo, Yanyu Xu, and Shenghua Gao. Ppgnet: Learning point-pair graph for line segment detection. In CVPR, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.249, + 0.893, + 0.29 + ], + "angle": 0, + "content": "[89] Lipu Zhou, Jiamin Ye, and Michael Kaess. A stable algebraic camera pose estimation for minimal configurations of 2d/3d point and line correspondences. In ACCV, 2018. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.292, + 0.893, + 0.333 + ], + "angle": 0, + "content": "[90] Yichao Zhou, Haozhi Qi, Yuexiang Zhai, Qi Sun, Zhili Chen, Li-Yi Wei, and Yi Ma. Learning to reconstruct 3d Manhattan wireframes from a single image. In ICCV, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.335, + 0.894, + 0.374 + ], + "angle": 0, + "content": "[91] Xingxing Zuo, Xiaojia Xie, Yong Liu, and Guoquan Huang. Robust visual slam with point and line features. In IROS, 2017. 2" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "21455" + } + ] +] \ No newline at end of file diff --git a/2023/3D Line Mapping Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_origin.pdf b/2023/3D Line Mapping Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aaa0d77f0067b5d187504d27d2d1f635b228abf8 --- /dev/null +++ b/2023/3D Line Mapping Revisited/6d931762-d036-45d2-bafa-8ad88d81ad10_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f06990ed8615f8020d269edd70a84dfcac8b971255221f60375ff327fd6f8f4b +size 7222974 diff --git a/2023/3D Line Mapping Revisited/full.md b/2023/3D Line Mapping Revisited/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8c89a0e7b1a0ec8843d950c8f3b271d0aea1ddf5 --- /dev/null +++ b/2023/3D Line Mapping Revisited/full.md @@ -0,0 +1,400 @@ +# 3D Line Mapping Revisited + +Shaohui Liu $^{1}$ Yifan Yu $^{1}$ Rémi Pautrat $^{1}$ Marc Pollefeys $^{1,2}$ Viktor Larsson $^{3}$ \ + $^{1}$ Department of Computer Science, ETH Zurich $^{2}$ Microsoft $^{3}$ Lund University + +# Abstract + +In contrast to sparse keypoints, a handful of line segments can concisely encode the high-level scene layout, as they often delineate the main structural elements. In addition to offering strong geometric cues, they are also omnipresent in urban landscapes and indoor scenes. Despite their apparent advantages, current line-based reconstruction methods are far behind their point-based counterparts. In this paper we aim to close the gap by introducing LIMAP, a library for 3D line mapping that robustly and efficiently creates 3D line maps from multi-view imagery. This is achieved through revisiting the degeneracy problem of line triangulation, carefully crafted scoring and track building, and exploiting structural priors such as line coincidence, parallelism, and orthogonality. Our code integrates seamlessly with existing point-based Structure-from-Motion methods and can leverage their 3D points to further improve the line reconstruction. Furthermore, as a byproduct, the method is able to recover 3D association graphs between lines and points / vanishing points (VPs). In thorough experiments, we show that LIMAP significantly outperforms existing approaches for 3D line mapping. Our robust 3D line maps also open up new research directions. We show two example applications: visual localization and bundle adjustment, where integrating lines alongside points yields the best results. Code is available at https://github.com/cvg/limap. + +# 1. Introduction + +The ability to estimate 3D geometry and build sparse maps via Structure-from-Motion (SfM) has become ubiquitous in 3D computer vision. These frameworks enable important tasks such as building maps for localization [60], providing initial estimates for dense reconstruction and refinement [65], and novel view synthesis [45, 48]. Currently, the field is dominated by point-based methods in which 2D keypoints are detected, matched, and triangulated into 3D maps [20, 64]. These sparse maps offer a compact scene representation, only reconstructing the most distinctive points. + +While there have been tremendous progress in point-based reconstruction methods, they still struggle in scenes + +![](images/b7c6006e8d87ae2a4d4b06e0b0a0163695abd45b888839868b1d0e9dc9ff0164.jpg) + +![](images/42c688333ec5f636caf7e8f17d09de082462e00986e8a477cbffcb053d7a618a.jpg) +(a) Point mapping [13,64] +(c) Line-point association +Figure 1. In this paper, we propose a robust pipeline for mapping 3D lines (b), which offers stronger geometric clues about the scene layout compared to the widely used point mapping (a). Part of the success of our pipeline attributes to the modeling of structural priors such as coincidence (c), and parallelism / orthogonality (d). The corresponding 3D association graphs between lines and points / vanishing points (VPs) are also recovered from our system as a byproduct. The degree-1 point and degree-2 junctions are colored in blue and red respectively in (c), while parallel lines associated with the same VP are colored the same in (d). + +![](images/8a261521be3287e665917dbe7fcad46e562a47fcdcbe60356f850c172e6956ce.jpg) +(b) Line mapping + +![](images/063cf27913e9fb84ea7d5b41014e9b218b4e790037689a747a42081cdb737421.jpg) +(d) Line-VP association + +where it is difficult to detect and match sufficiently many stable keypoints, such as in indoor areas. On the contrary, these man-made scenes contain abundant lines, e.g. in walls, windows, doors, or ceilings. Furthermore, lines exhibit higher localization accuracy with less uncertainty in pixels [16]. Last but not least, lines appear in highly structured patterns, often satisfying scene-wide geometric constraints such as co-planarity, coincidence (line intersections), parallelism, and orthogonality. In practice, lines suffer from different issues, such as poor endpoint localization and partial occlusion. However, recent line detectors and matchers are bridging the gap of performance between points and lines [25, 46, 84], making it timely to revisit the line reconstruction problem. + +Despite their rich geometric properties and abundance in the real world, there exist very few line-based reconstruction methods in the literature [22,23,44,77]. In practical applications, they have also not achieved the same level of success as their point-based counterparts. We believe this is due to + +several intrinsic challenges specific to line mapping: + +- Inconsistent endpoints. Due to partial occlusion, lines often have inconsistent endpoints across images. +- Line fragmentation. In each image there might be multiple line segments that belong to the same line in 3D. This makes the process of creating track associations more complex compared to building 3D point tracks. +- No two-view geometric verification. While point matches can be verified in two views via epipolar geometry, lines require at least three views to filter. +- Degenerate configurations. In practice line triangulation is more prone to unstable configurations (see Fig. 8), e.g. becoming degenerate whenever the line is parallel with the camera motion (i.e. to epipolar lines). +- Weaker descriptor-based matching. State-of-the-art descriptors for line segments are far behind their point-based counterparts, putting more emphasis on geometric verification and filtering during reconstruction. + +In this paper we aim to reduce the gap between point-based and line-based mapping solutions. We propose a new robust mapping method, LIMAP, that integrates seamlessly into existing open-source point-based SfM frameworks [64, 67, 80]. By sharing the code with the research community we hope to enable more research related to lines; both for low-level tasks (such as improving line segment detection and description) and for integrating lines into higher-level tasks (such as visual localization or dense reconstruction). In particular, we make the following contributions in the paper: + +- We build a new line mapping system that reliably reconstructs 3D line segments from multi-view RGB images. Compared to previous approaches, our line maps are significantly more complete and accurate, while having more robust 2D-3D track associations. +- We achieve this by automatically identifying and exploiting structural priors such as coincidence (junctions) and parallelism. Our technical contribution spans all stages of line mapping including triangulating proposals, scoring, track building, and joint optimization, with 3D line-point / VP association graphs output as a byproduct. +- The framework is flexible such that researchers can easily change components (e.g. detectors, matchers, vanishing point estimators, etc.) or integrate additional sensor data (e.g. depth maps or other 3D information). +- We are the first to go beyond small test sets by quantitatively evaluating on both synthetic and real datasets to benchmark the performance, with hundreds of images for each scene, in which LIMAP consistently and significantly outperforms existing approaches. +- Finally, we demonstrate the usefulness of having robust line maps by showing improvement over purely point-based methods in tasks such as visual localization and bundle adjustment in Structure-from-Motion. + +# 2. Related Work + +Line Detection and Matching. Detecting 2D line segments conventionally relies on grouping image gradients [5, 75]. To improve the robustness and repeatability, learning-based line detectors were later proposed to tackle the problem of wireframe parsing [25, 43, 82, 83, 88, 90]. Recent deep detectors [26, 46, 81] manage to achieve impressive results for detecting general line segments. Matching of the detected line segments is often based on comparing either handcrafted [8, 74, 76, 85] or learning-based [1, 34, 46, 73, 84] descriptors. Some recent methods also exploit point-line [14, 15] and line-junction-line structures [38, 39] to improve matching results, yet still not reaching the reliability level of advanced point matchers [58, 70]. Our method can leverage any line detector and matcher, and is robust to outliers. + +Line Reconstruction. As a seminal work, Bartoli and Sturm [6, 7] proposed a full SfM pipeline for line segments, later improved by Schindler [63] with Manhattan-world assumption [12]. Jain et al. [27] proposed to impose global topological constraints between neighboring lines, which were further explored in [51, 53, 54] to build wireframe models. Some learning-based methods [42, 90] were introduced as well to predict 3D wireframes. Hofer et al. [21-23] proposed checking weak epipolar constraints over exhaustive matches and graph clustering, and introduced the Line3D++ software (referred as L3D++ in this paper), which remains the top choice [17, 42] for acquiring 3D line maps so far. Recently, ELSR [77] employed planes and points to guide the matching. However, all prior work mainly shows qualitative results and provides quantitative evaluation only on relatively small image sets [27, 69]. In this paper, we set up a quantitative evaluation on benchmarks with hundreds of images, where our proposed system significantly surpasses prior work by improving all stages in the mapping pipeline. + +Line-based Applications. The resulting 3D line maps can be used for many downstream applications. [23] advocates the complementary nature of line reconstruction for structure visualization. Some incremental line-based SfM systems are introduced in [24,44,86]. To improve quality and robustness, recent methods [18,19,40,41,49,78,91] jointly employ point and line features in SLAM. While their line maps are often noisy and incomplete, noticeable improvement has been achieved in the accuracy of the recovered camera motion. There has also been development on VP estimation [9,37,50, 87] and solvers for joint point-line pose estimation [4,52,72, 89]. Recently, promising performance in visual localization has been achieved by combining point and line features in a refinement step [17]. In this paper, we show that our line maps can benefit multiple applications such as localization, SfM, and MVS (Sec. J in supp.). In particular, we present very competitive results on point-line visual localization. + +![](images/e2035426f0870c1a56ee692ad1a0dd00604fc1258dd5014566dea5d66113f6f4.jpg) +Figure 2. Overview. Given a set of posed images and optional 3D points, we associate nearby points to lines, match the lines, triangulate them with 4 different strategies, score 3D line proposals, build line tracks, jointly optimize all features, before obtaining our final reconstruction. + +# 3. The Proposed 3D Line Mapping Pipeline + +We now present our proposed pipeline for 3D line mapping. Our method takes as input a set of images with 2D line segments from any existing line detectors. We assume the camera pose for each image is available (e.g. from SfM/SLAM), and optionally we can also leverage a 3D point cloud (e.g. obtained from point-based SfM). The pipeline consists of three main steps: + +- Proposal Generation (Sec. 3.1): For each 2D line segment, we generate a set of 3D line segment proposals. +- Scoring and Track Association (Sec. 3.2): Considering multi-view consistency, we score each proposal, select the best candidate for each 2D line, and associate them into a set of 3D line tracks. +- Joint Refinement (Sec. 3.3): Finally, we jointly perform non-linear refinement over the 3D line tracks along with 3D points and VP directions, integrating additional structural priors as soft constraints. + +Figure 2 shows an overview of the overall pipeline. In the following sections, we detail each of the three main steps. + +By design our pipeline is robust to scale changes and we use the same hyper-parameters for all experiments across datasets, which are provided in Sec. F.2 in the supp. + +# 3.1. Generating 3D Line Segment Proposals + +The first step is to generate a set of 3D line proposals for each 2D line segment. Given a segment in an image, we use any existing line finder to retrieve the top $K$ line matches in each of the $n_v$ closest images. Using the top $K$ line matches instead of a single match increases the chance of getting a correct match, while wrong matches will be filtered out in subsequent steps. + +Let $(\pmb{x}_1^r,\pmb{x}_2^r)\in \mathbb{R}^3\times \mathbb{R}^3$ be the two endpoints (in homogeneous coordinates normalized by the intrinsics) for the reference line segment that we wish to generate proposals for. For ease of notation, we let the world-coordinate system align with the reference view. The endpoints of the 3D line proposals that we generate can all be written as + +$$ +\boldsymbol {X} _ {1} = \lambda_ {1} \boldsymbol {x} _ {1} ^ {r}, \quad \boldsymbol {X} _ {2} = \lambda_ {2} \boldsymbol {x} _ {2} ^ {r}, \tag {1} +$$ + +for some values of $\lambda_1, \lambda_2 \in \mathbb{R}$ . Having the 3D endpoints of all proposals lie on the camera rays of the 2D endpoints simplifies the scoring procedure in the second step (Sec. 3.2). + +# 3.1.1 Line Triangulation + +For each matched 2D line segment $(\pmb{x}_1^m, \pmb{x}_2^m)$ we generate one proposal via algebraic line triangulation. Let $(R^m, t^m)$ be the camera pose of the matched view. We can then solve linearly for the endpoint ray depths $\lambda_i$ as + +$$ +\left(\boldsymbol {x} _ {1} ^ {m} \times \boldsymbol {x} _ {2} ^ {m}\right) ^ {T} \left(R ^ {m} \left(\lambda_ {i} \boldsymbol {x} _ {i} ^ {r}\right) + \boldsymbol {t} ^ {m}\right) = 0, \quad i = 1, 2. \tag {2} +$$ + +The proposals are then filtered with cheirality checks (positive $\lambda$ ) and degeneracy check via the angle between ray $x_{i}^{r}$ and $\ell_{m} = x_{1}^{m} \times x_{2}^{m}$ . Note that line triangulation becomes inherently unstable close to degenerate configurations when $\ell_{m}^{T} R^{m} x_{i}^{r} = 0$ , where we get zero or infinite solutions from (2). Geometrically, this happens when the line is parallel with the epipolar plane: If $\ell_{m}^{T} t^{m} \neq 0$ they have no intersection, otherwise they intersect fully and we get infinite solutions $\ell_{m} \sim t^{m} \times R^{m} x_{i}^{r} = E x_{i}^{r}$ , i.e. the line segment coincides with the epipolar line from $x_{i}^{r}$ . This issue is further illustrated in Figure 8. Since we solve for each $\lambda_{i}$ independently, the triangulation problem can have zero, one, or two degenerate endpoints. We term the case with one degenerate endpoint as a weakly degenerate one, and the case with two degenerate endpoints as fully degenerate. In contrast to the point case, two-view line triangulation is minimal such that any solution fits the measurements exactly with zero error, preventing filtering with 2D reprojection error at this stage. + +# 3.1.2 Point-Line Association + +To obtain meaningful proposals in degenerate cases, we leverage additional geometric information coming from either points or associated vanishing points (VPs). 2D-3D point correspondences can either come from a point-based SfM model or be triangulated from matched endpoints/junctions. For each 2D line segment, we associate all 2D points within a fixed pixel threshold and thereby associate with their corresponding 3D points. For each image, we also estimate a set of VPs and their association to 2D lines using JLinkage [71]. + +# 3.1.3 Point-guided Line Triangulation + +We now generate a second set of proposals for each 2D line segment with the assistance of the associated 2D-3D point correspondences and vanishing points. In the following parts we present three different methods. M1 employs multiple associated 3D points so it is stable for all cases including the fully degenerate ones, while M2 and M3 with one known point / VP can help generate stable proposals in weakly degenerate cases, which are more common in practice. Cheirality tests are applied to all proposals with respect to both views. + +M1. Multiple Points. For each matched line segment we generate one proposal by collecting all of the associated 3D points that are common between the reference and the match. On top of those common points, we fit a 3D line that is then projected onto two camera rays corresponding to $\boldsymbol{x}_1^r$ and $\boldsymbol{x}_2^r$ . M2. Line + Point. For each matched line segment we also generate one proposal for each shared 3D point. We first project the 3D point onto the plane spanned by $\boldsymbol{x}_1^r$ and $\boldsymbol{x}_2^r$ . We then aim to find a line that passes through the projection and minimizes the residuals in (2) to the matched line. This can be formulated as a quadratic optimization problem in the two endpoint depths $\lambda = (\lambda_1, \lambda_2)$ with a single constraint: + +$$ +\min _ {\boldsymbol {\lambda} \in \mathbb {R} ^ {2}} \boldsymbol {\lambda} ^ {T} A \boldsymbol {\lambda} + \boldsymbol {b} ^ {T} \boldsymbol {\lambda}, \quad \text {s . t .} \quad \boldsymbol {\lambda} ^ {T} Q \boldsymbol {\lambda} + \boldsymbol {q} ^ {T} \boldsymbol {\lambda} = 0. \tag {3} +$$ + +Due to the low-dimensionality of the problem, a closed-form solution can be derived by reducing it to a univariate quartic polynomial. We show the full derivation in Sec. B in supp. M3. Line + VP. Each VP corresponds to a 3D direction. For each associated VP, we generate one proposal based on its direction (again projected onto the plane spanned by $\boldsymbol{x}_1^r$ and $\boldsymbol{x}_2^r$ ). This gives a single linear constraint on the ray depths, + +$$ +\left(\boldsymbol {v} \times \left(\boldsymbol {x} _ {1} ^ {r} \times \boldsymbol {x} _ {2} ^ {r}\right)\right) ^ {T} \left(\lambda_ {2} \boldsymbol {x} _ {2} ^ {r} - \lambda_ {1} \boldsymbol {x} _ {1} ^ {r}\right) = 0. \tag {4} +$$ + +where $\pmb{v} \in \mathbb{R}^3$ is the VP. Using the constraint, we then solve for $\lambda = (\lambda_1, \lambda_2)$ by minimizing the two residuals of (2) in a least squares sense. Note that $\pmb{v}$ can either come from the reference image, or from a matched line in another image. + +Extension: Line Mapping Given Depth Maps. The proposal generation step can be improved when each image has a corresponding depth map (e.g. from an RGB-D sensor), which can be leveraged with robust line fitting to generate the 3D line proposals. Refer to Sec. E in our supplementary material for more details and results. + +# 3.2. Proposal Scoring and Track Association + +At this point, each 2D line segment $l$ in image $I$ is associated with a set $\mathcal{K}$ of 3D line segment proposals (stemming from the top $K$ line matches and various triangulations) for each neighboring image $J$ . We describe in the following how we select the best 3D line proposal for each 2D line segment, and associate these lines into tracks. For each of these + +![](images/9ab2fc94295adffedb7ce2f593bfb4279a84e9e444b7a516c1ed6d8fb933c756.jpg) +(a) Perspective distance + +![](images/7ad7199b9cb8013334d75749a5e64dc34611bc011aeb77d11ba26b0d2abc2a08.jpg) +(b) Overlap score +Figure 3. Scoring methods. We propose three novel line scoring measures that are scale-invariant and handle different line lengths. + +![](images/8aaf83de13b438ccf8867e72a0c85829d85a7b58a298e6c58675ff6c85fc2c0e.jpg) +(c) InnerSeg distance + +steps, we leverage different scoring methods quantifying the distance between two 3D line segments $(L_1, L_2)$ . These distances are usually computed symmetrically and averaged, and can be obtained both in 3D and in 2D by projecting each 3D line into the other view. We start by presenting two classic ones, and then define our three novel line distances (one for 3D proposal selection and two for track building). + +- Angular distance: angle between $L_{1}$ and $L_{2}$ . +- Perpendicular distance: maximum orthogonal distance of the endpoints of $L_{1}$ to the infinite line spanned by $L_{2}$ . + +3D Proposal Selection. To select best 3D candidate for each 2D line, we score each proposal $L_{i}$ by measuring its consistency with the others. Here we introduce a new distance: + +- Perspective distance: assuming the endpoints of $L_{1}$ and $L_{2}$ are on the same rays as in Fig. 3(a), the distance is defined as the endpoint distances, divided by the ray depths $d_{s}, d_{e}$ of the endpoints of $L_{1}$ in image 1. This score can filter out ill-posed triangulations (refer to Sec. F.3 in supp. for detailed discussions), while remaining scale-invariant. + +This new distance, together with the angular distance in 2D and 3D, and the perpendicular distance in 2D, have different scales. In order to aggregate them together, we associate a scaling factor $\tau_r$ to each distance $r$ and get a normalized score $s_n = e^{-(r / \tau_r)^2} \in (0,1]$ . Denoting by $\mathcal{S}$ the set of all the corresponding normalized scores and $\mathbb{I}$ the indicator function, the score between $L_1$ and $L_2$ becomes + +$$ +s \left(L _ {1}, L _ {2}\right) = \min _ {s _ {n} \in S} \left(s _ {n} \cdot \mathbb {1} _ {s _ {n} \geq 0. 5}\right) \in \{0 \} \cup [ 0. 5, 1 ]. \tag {5} +$$ + +Now equipped with unique score per line pair, we can consider all the neighboring 3D line candidates $L_{j}^{k}$ coming from the neighboring image $J$ and proposal $k$ . The consistency score is defined by summing the best score from each image: + +$$ +s _ {c} \left(L _ {i}\right) = \sum_ {J \in \mathcal {N} _ {I}} \max _ {k \in \mathcal {K}} s \left(L _ {i}, L _ {J} ^ {k}\right), \tag {6} +$$ + +where $\mathcal{N}_I$ is the set of neighboring images of $I$ . The best 3D line candidate for each 2D line segment $l$ is then selected as the proposal with the highest score: $L = \operatorname{argmax}_{L_i} s_c(L_i)$ . If the score is less than 1.0, i.e. the best candidate has less than two supports from neighboring views, we ignore this 2D line segment in the subsequent track building process. + +Track Building. At this point, each 2D segment has been assigned a unique 3D line (its best 3D line candidate). The goal of this step is to gather these 2D segments into line tracks. For this, we form a graph where the 2D segments are nodes and all initial line matches are edges. We aim to prune edges in the graph such that the connected 2D segments share similar 3D assignments. We propose two new line scoring measures that can cope with different endpoint configurations and variable scales across images. + +- Overlap score: we project $L_{1}$ orthogonally onto $L_{2}$ , clip the projected endpoints to the endpoints of $L_{2}$ if they fall outside of $L_{2}$ to get segment $\Pi(L_{1})$ , and compare the ratio of lengths to a threshold $\tau_{o}$ : $\mathbb{1}_{\frac{|\Pi(L_1)|}{|L_2|} \geq \tau_o}$ (see Fig. 3(b)). +- InnerSeg distance: the endpoints of $L_{1}$ are perpendicularly unprojected to $L_{2}$ . If they fall outside of $L_{2}$ , we clip them to the closest endpoint of $L_{2}$ . By doing this in both directions, we can define two inner segments (see Fig. 3(c)), and the InnerSeg distance as the maximum distance between their endpoints. To make this measure scale-invariant, we additionally divide it by a scale factor $\sigma = \frac{\min(d_{1}, d_{2})}{f}$ , where $d_{j}$ is the depth of the mid-point of $L_{j}$ in image $J$ and $f$ is the focal length. This encodes how far the mid-point can move in 3D before reaching 1 pixel error in the image (detailed in Sec. F.3 in supp.). + +We then convert the InnerSeg distance computed in 3D to a normalized score as in the previous paragraph, and combine it with the overlap score in 2D and 3D and previous scores using (5). Given these pairwise scores of 3D lines, we can now prune edges whose score is below a threshold $t_f = 0.5$ . The connected components of the resulting graph yield the line tracks, ignoring components with less than 3 nodes. + +For each track, we then re-estimate a single 3D line segment. Using the set of endpoints from the 3D assignments of all nodes in the track, we apply Principal Component Analysis (PCA) and use the principal eigenvector and mean 3D point to estimate the infinite 3D line. We then project all endpoints on this infinite line to get the new 3D endpoints. + +# 3.3. Joint Optimization of Lines and Structures + +Finally, we perform non-linear refinement on the acquired 3D lines with their track information. The straightforward approach is to perform geometric refinement on the reprojection error. With the 2D point-line association available, we can formulate a joint optimization problem by including additional structural information. The energy to minimize can be written as follows: + +$$ +E = \sum_ {p} E _ {P} (p) + \sum_ {l} E _ {L} (l) + \sum_ {(p, l)} E _ {P L} (p, l), \tag {7} +$$ + +where $E_{P}$ and $E_{L}$ are the data terms, and $E_{PL}$ encodes the 3D association between lines and points / VPs. In particular, + +$E_{P}$ is the 2D point reprojection error as in regular bundle adjustment [64]. The association energy is softly weighted (as discussed later) and optimized with robust Huber loss [3]. Each line is converted into a 4-DoF infinite line with Plücker coordinate [7] for optimization and converted back to line segments by unprojecting its 2D supports. Each vanishing point is parameterized with a 3-dimensional homogeneous vector. Refer to Sec. A in supp. for details on efficient computation with minimal parameterization. + +Geometric Refinement. The data term of each line track is also defined on its 2D reprojections. In particular, we measure the 2D perpendicular distance weighted by the angle consistency, which we robustly equip with Cauchy loss [3]: + +$$ +E _ {L} (l) = \sum_ {k} w _ {\angle} ^ {2} \left(L _ {k}, \ell_ {k}\right) \cdot e _ {\text {p e r p}} ^ {2} \left(L _ {k}, \ell_ {k}\right), \tag {8} +$$ + +where $e_{\mathrm{perp}}$ is the perpendicular distance, $L_{k}$ is the 2D projection of the 3D segment, $\ell_{k}$ are the 2D line segments, and $w_{\angle}$ is the exponential of one minus the cosine of the 2D angle between the projected and the observed line. + +Soft Association between Lines and Points. For each pair of 3D line and 3D point with their track information, we can estimate how likely they are spatially associated by traversing the 2D association graph (described in Sec. 3.1.2) of their supports. Specifically, we count the number of associations among the 2D supports of the line track and point track, and keep pairs with at least three 2D associations. The 3D association energy $E_{PL}$ , defined on the surviving pairs, is formulated as the 3D point-line distance weighted by the number of 2D associations on their supports. + +Soft Association between Lines and VPs. Same as the point case, we can also build a soft association problem between lines and VPs. First, we acquire 3D VP tracks by transitively propagating line correspondences from the 3D line tracks. Then, we count the number of associations among the 2D supports for each pair of 3D line and VP track. The 3D line-VP association energy is defined as the sine of the direction angle between the 3D line and the VP, implicitly enforcing parallelism. Furthermore, we add regularizations to the nearly orthogonal VP pairs to enforce orthogonality of different line groups. Refer to Sec. C in supp. for details. + +# 4. Experiments + +Implementation Details. Our whole library is implemented in C++ with Python bindings [28]. The triangulation and scoring can be run in parallel for each node, enabling scalability to large datasets. We use $n_v = 20$ visual neighbors and keep the top $K = 10$ line matches. We provide all the values of thresholds and scaling factors in Sec. F.2 in supp. + +# 4.1. Line Mapping + +To validate the effectiveness of our system, we set up an evaluation benchmark to quantify the quality of the recon + +
Line typeMethodR1R5R10P1P5P10# supports
LSD [75]L3D++ [23]37.0153.1218.853.180.890.6(14.8 / 16.8)
ELSR [77]13.959.796.555.472.682.2(N/A / N/A)
Ours48.6185.2251.360.182.490.0(16.4 / 20.5)
SOLD2 [46]L3D++ [23]36.9107.5132.867.286.893.2(13.2 / 20.4)
Ours54.3151.1191.269.884.690.0(16.5 / 38.7)
+ +Table 1. Line reconstruction on Hypersim [55] with LSD [75] and SOLD2 [46] lines. $R\tau$ and $P\tau$ are reported at $1\mathrm{\;{mm}},5\mathrm{\;{mm}},{10}$ mm along with the average number of supporting images/lines. + +
MethodR5R10R50P5P10P50# supports
L3D++ [23]373.7831.62783.640.654.585.9(8.8 / 9.3)
ELSR [77]139.2322.51308.038.548.074.5(N/A / N/A)
Ours (line-only)472.11058.83720.746.858.486.1(10.3 / 11.8)
Ours508.31154.54179.546.056.983.7(10.4 / 12.0)
+ +Table 2. Line reconstruction on train split of Tanks and Temples [32] with LSD [75] lines. $R\tau$ and $P\tau$ are reported at $5\mathrm{\;{mm}},{10}\mathrm{\;{mm}}$ , ${50}\mathrm{\;{mm}}$ along with the average number of supporting images/lines. + +structed 3D line maps. As there are no ground truth (GT) 3D lines, we evaluate the 3D line mapping with either GT mesh models or point clouds. We use the following metrics: + +- Length recall (in meters) at $\tau (R\tau)$ : sum of the lengths of the line portions within $\tau$ mm from the GT model. +- Inlier percentage at $\tau (P\tau)$ : the percentage of tracks that are within $\tau$ mm from the GT model. +- Average supports: average number of image supports and 2D line supports across all line tracks. + +In the following, we compare our system with two state-of-the-art methods as baselines: L3D++ [23] and ELSR [77], using two line detectors: the traditional LSD detector [75] and the learning-based SOLD2 [46]. For ELSR [77], we convert the input into VisualSfM [80] format and use code from the authors (only supporting LSD [75]). + +Our first evaluation is run on the first eight scenes of the Hypersim dataset [55], composed of 100 images each, and is reported in Tab. 1. For both detectors, we reconstruct much more complete line maps with better or comparable precision than the competitors, while also exhibiting significantly higher quality of track information. This abundant track association is beneficial particularly for line-based applications such as visual localization [17]. After discussing with the authors of ELSR, it seems that their method does not achieve satisfactory results due to a lack of point and plane features. + +We further evaluate all three methods on the train split of the Tanks and Temples dataset [32] without Ignatius as it has no line structures. As SOLD2 [46] is trained for indoor images, we only use LSD [75]. Since the provided point cloud was cleaned to focus only on the main subject, we compute its bounding box, extend it by one meter, and only evaluate lines inside this region. This prevents incorrectly penalizing correct lines that are far away from the main scene, + +![](images/ccb021e2a27d19fa77bda20fabb5aee20a3900e76a021bda5915e472624bd5af.jpg) + +![](images/da17233417b9be6bbb3c5e74e999c89b1a874c9cfcf2e8246a1b0b0d33ace9e1.jpg) +Figure 4. Top row: L3D++ [23]. Bottom row: Ours. Both systems are run on Horse and Family from [32]. We show two different views on the main scene of Horse. + +![](images/7bb06070b83fe3941bfbdfac8de8bce99f064bfde89ecc59a067f3427b81cc33.jpg) +Figure 5. Qualitative results on Hypersim [55] and Tanks and Temples [32]. On Barn we jointly visualize our results and the aligned ground truth point cloud. +Figure 6. Qualitative results of the recovered line-point and line-VP association graphs (visualized similarly as in Fig. 1). + +which our method is particularly good at thanks to our scale-invariant design (refer to Sec. G in supp.). Tab. 2 shows the results, where our methods significantly improve the mapping quality across the board. Fig. 4 shows qualitative comparison between our method and L3D++ [23]. Our results exhibit better completeness, have less noisy lines that are flying around, and achieve significantly more robust reconstructions of subtle details (e.g. on the ground). More examples of our produced line maps are shown in Fig. 5. + +As an additional output of our system, junction structures and line-line relations such as parallelism and orthogonality are discovered, as shown in Fig. 6. This directly comes from the line-point and line-VP soft associations of Sec. 3.3. From the recovered structures, we can clearly perceive the scene and easily recognize the main Manhattan directions [12]. + +To demonstrate the scalability of the proposed system, we also run our method on two large-scale datasets: Aachen (6,697 images) [61, 62] and Rome city (16,179 images) [2, 67, 68]. Fig. 7 shows that our method produces reliable line maps with clear structures. Note that the camera poses from Bundler [67] on Rome city are far from perfect, while our mapping still works reasonably well. The efficiency + +![](images/e5995e201c77b441b824e621bf37722d768366e26d3c2fbc5d81645ca514369b.jpg) + +![](images/a79e6bcb6166a484df303977cb993a10647310aeb7490252f726ab3ec6773294.jpg) +Figure 7. Scalability to large-scale datasets: Aachen (6,697 images) [61] and Rome (16,179 images) [2,67,68]. For Aachen [61], parallel lines from the line-VP association graph are colored the same. For Rome [2,67,68], we visualize 10 representative components individually. +Figure 8. Uncertainty in line triangulation measured by the largest eigenvalue of the covariance (Sec. D in supp.). Left: Each segment is colored by the uncertainty in the triangulation. Lines that align with the epipolar lines (shown in blue) exhibit higher (red) uncertainty. Right: We perform a small synthetic experiment to illustrate this. The graph shows the uncertainty for line triangulation as the lines approach the degenerate state. We compare with point-based triangulation assuming that endpoints are consistent. + +
Line typeTriangulationR1R5R10P1P5P10# supports
LSDEndpoints27.6101.4138.058.283.592.1(13.0 / 13.2)
[75]Line48.3187.0257.459.281.989.8(15.8 / 19.1)
SOLD2Endpoints27.382.8106.568.284.590.9(12.3 / 19.9)
[46]Line50.8143.5180.874.486.991.2(15.1 / 32.2)
+ +Table 3. Comparison between endpoint and line triangulation on Hypersim [55]. While being more stable at triangulation, the endpoints are often unmatched between line pairs. + +
LineM1M2M3R1R5R10P1P5P10# supports
50.8143.5180.874.486.991.2(15.1 / 32.2)
24.972.595.865.981.288.5(11.3 / 15.7)
37.7116.8152.671.084.289.7(13.8 / 25.8)
51.5146.9185.471.785.490.1(14.9 / 31.2)
51.3146.4186.473.485.790.5(15.8 / 35.6)
51.4145.4184.974.186.190.6(16.5 / 38.7)
+ +bottleneck is in line detection and matching (we use SOLD2 [46] descriptors), while the rest of the mapping takes only $\sim 10$ minutes on Aachen [61, 62]. The time complexity of our system is nearly linear with the number of images. + +# 4.2. More Insights and Ablation Studies + +Line Triangulation. To study the stability of the triangulation, we perform a small test on a stereo pair from AdelaideRMF [79] on the uncertainty (measured by the largest singular value of the covariance) of the triangulated 3D segments. We further run a synthetic experiment by generating random lines on a plane orthogonal to the stereo pair, and plot the uncertainty of point and line triangulations with + +Table 4. Ablation study on different types of triangulation proposals (defined in Sec. 3.1.3) on Hypersim [55] with SOLD2 [46]. + +
Line typeMethodR1R5P1P5# supports
LSD [75]L3D++ [23]37.0153.153.180.8(14.8 / 16.8)
Ours (line) w/ [23] scoring48.6186.056.580.6(14.4 / 16.8)
Ours (line) w/ [23] merging41.2158.259.682.5(15.6 / 16.7)
Ours (line) w/ exhaustive46.7177.257.680.9(16.8 / 20.8)
Ours (line)48.3187.059.281.9(15.8 / 19.1)
SOLD2 [46]L3D++ [23]36.9107.567.286.8(13.2 / 20.4)
Ours (line) w/ [23] scoring45.8133.272.685.9(15.0 / 31.1)
Ours (line) w/ [23] merging37.7113.470.584.5(13.3 / 23.9)
Ours (line) w/ exhaustive48.9139.772.985.7(16.2 / 36.9)
Ours (line)50.8143.574.486.9(15.1 / 32.2)
+ +Table 5. Studies on different components of our method with only line-line proposals against L3D++ [23]. + +respect to the angle of the lines with the baseline (refer to Sec. D in supp. for details). The results in Fig. 8 show that when the matched line is nearly parallel to the epipolar line, the line triangulation becomes degenerate with exploding uncertainty, while triangulating the endpoints is significantly more stable. Thus, combining points and VPs from the 2D association is beneficial to improve the stability of the proposals. However, the endpoints are generally not consistent across line matches in practice and need to be complemented with line-line triangulation. This can be verified in Tab. 3 where the performance significantly drops when we change line triangulation into endpoint triangulation. + +We further ablate our four types of triangulation for generating proposals. Results in Tab. 4 show that integrating points and VPs enhance the 3D line maps, in particular significantly improving the track quality. Another surprising fact is that the third line in the table, relying only on points and line + point triangulation, already achieves better results than the prior baselines in Tab. 1. Employing all four types of proposals obtains the best trade-off. + +Scoring and Track Building. We first study the effects of using exhaustive line matching as in L3D++ [23]. To enable direct comparison we only use line triangulation proposals. Results are shown in Tab. 5. While there are more proposals generated from the exhaustive matches, both the recall and precision decrease by a noticeable margin. This is probably due to the large number of wrong proposals misleading the scoring process. Nevertheless, our method with exhaustive matches still works significantly better than L3D++ [23]. To further study the effects of the proposed distance measurements at scoring and track building (merging), we re-implement the ones proposed in L3D++ [23] and perform direct comparison. Both our scoring and track + +
MethodR1R5R10P1P5P10# supports
Line-only w/o refine43.5135.8180.175.187.292.2(15.1 / 32.2)
Line-only w/ geom alone50.8143.5180.874.486.991.2(15.1 / 32.2)
w/o refine46.5146.0189.776.888.993.3(16.5 / 38.7)
w/ geom alone51.4145.4184.974.186.190.6(16.5 / 38.7)
w/ joint optimization54.3151.1191.269.884.690.0(16.5 / 38.7)
+ +Table 6. Line refinement on Hypersim [55] with SOLD2 [46]. + +
DatasetHLoc2[56,57]PtLine [17]Ours
Cambridge [30]7.0 / 0.13 / 44.07.4 / 0.13 / 43.56.7 / 0.12 / 46.1
7Scenes [66]3.3 / 1.08 / 73.03.3 / 1.09 / 72.73.0 / 1.00 / 78.0
+ +![](images/cf577f24e36a9d81671111e9d544f20d7c20934b3bbab435b3ecd9fd36a4098a.jpg) +HLoc [56, 57] +Figure 9. Line-assisted Visual localization on Stairs from 7Scenes [66]. Blue: 2D points/lines; Green/Red: Projected 3D points/lines. + +![](images/90947af990730977988c4c5147b295f1fe3c7d01b3235d38c4427cb9488e05da.jpg) +Ours w/ LIMAP + +Table 7. Visual localization on Cambridge [31] and 7Scenes [66]. We report the median translation and rotation errors in cm and degrees, and the pose accuracy $(\%)$ at $5\mathrm{cm} / 5$ deg threshold. All metrics are averaged across all scenes of each dataset. + +
(T / R) err. ↓Acc. ↑
HLoc [57]5.2 / 1.4646.8
HLoc [57] w/ depth4.7 / 1.2553.4
PtLine [17]4.8 / 1.3351.9
Ours w/L3D++ [23]4.1 / 1.1460.8
Ours w/LIMAP3.7 / 1.0271.1
+ +building are significantly better, especially when equipped with SOLD2 [46] which produces more structured lines. + +Joint Optimization. Finally, we ablate the proposed joint optimization in our pipeline. First, we remove the point-line association and only apply the geometric residuals (reprojection error). Results in Tab. 6 show that the geometric refinement improves significantly when the proposals solely come from line triangulation. However, when adding additional proposals from points and VPs, it contributes marginally and even misleads some lines that are generated from points and VPs but poorly conditioned for lines (R10 decreases). When integrated with joint optimization with soft association, the recall is further improved noticeably, while sacrificing a bit on the precision. It is worth pointing out that the joint optimization also enables the byproduct of junction structures and line-line relations (e.g. in Fig. 6). + +# 4.3. Applications + +Line-Assisted Visual Localization. We build a hybrid visual localization with both points and lines on top of the acquired 3D line maps. Specifically, we first build point maps as in HLoc [56, 57] and line maps with our proposed method. Then, we match points and lines respectively and get 2D-3D correspondences from the track information in the 3D maps. Given these correspondences, we combine + +
Med. error ↓AUC @ (1° / 3° / 5°) ↑
COLMAP [64]0.18877.3 / 89.0 / 91.6
COLMAP [64] + LIMAP refinement0.14682.9 / 91.2 / 93.0
+ +Table 8. Joint bundle adjustment of points and lines on Hypersim [55]. Relative pose errors are measured on all image pairs. + +four minimal solvers [33, 47, 89]: P3P, P2P1LL, P1P2LL, P3LL from PoseLib [35], together in a hybrid RANSAC framework [10, 59] with local optimization [11, 36] to get the final 6-DoF pose (refer to Sec. H in supp. for details). This also enables direct comparison since only using P3P [47] corresponds to the point-alone baseline similar to HLoc [56, 57]. We also compare with the post-refinement of PtLine [17] that optimizes over the initial point-alone predictions. + +Results in Tab. 7 show that our localization system achieves consistently better results than the point-alone baseline both indoors [66] and outdoors [30], validating the effectiveness of employing 3D line maps for visual localization. In Fig. 9 we show more detailed results from the Stairs scene from 7Scenes [66] as it is one of the most challenging ones. Integrating lines significantly benefits the alignment of the reprojected structures, improving the pose accuracy from 46.8 to 71.1. Also, with our localization pipeline, using the map built from our proposed method is better than from L3D++ [23] by a noticeable margin, again demonstrating the advantages of our proposed line mapping system. + +Refining Structure-from-Motion. With the acquired 3D line maps built from a roughly correct point-based structure-from-motion model, e.g., COLMAP [64], we can use the 3D lines with their track information to refine the input camera poses with joint optimization of points and lines. To verify this, we run COLMAP [64] with SuperPoint [13] on the first eight scenes of Hypersim [55], run the proposed line mapping on top of it, and perform joint bundle adjustment to refine poses and intrinsics. We report the relative pose evaluation of all image pairs [29]. Tab. 8 shows that the joint point-line refinement consistently benefits the accuracy of the camera poses, in particular improving AUC@ $1^{\circ}$ by 5.6. + +# 5. Conclusion + +In this paper, we introduce LIMAP: a library for robust 3D line mapping from multi-view imagery. Extensive experiments show that our method, by improving all stages of the reconstruction pipeline, produces significantly more complete 3D lines, with much higher quality of track association. As a byproduct, the method can also recover 3D association graphs between lines and points / VPs. We further show the usefulness of 3D line maps on visual localization and bundle adjustment. Future directions include incremental / real-time structure mapping, distinguishing structural lines from textural lines for wireframe modeling, and exploiting higher-level structures and relations for downstream applications. + +Acknowledgements. V. Larsson was supported by ELLIIT. + +# References + +[1] Hichem Abdellali, Robert Frohlich, Viktor Vilagos, and Zoltan Kato. L2d2: Learnable line detector and descriptor. In 3DV, 2021. 2 +[2] Sameer Agarwal, Yasutaka Furukawa, Noah Snavely, Ian Simon, Brian Curless, Steven M Seitz, and Richard Szeliski. Building rome in a day. Communications of the ACM, 54(10):105-112, 2011. 6, 7 +[3] Sameer Agarwal and Keir Mierle. Ceres solver. http://ceres-solver.org.5 +[4] Sérgio Agostinho, João Gomes, and Alessio Del Bue. Cvxpl: A unified convex solution to the absolute pose estimation problem from point and line correspondences. arXiv preprint arXiv:1907.10545, 2019. 2 +[5] Cuneyt Akinlar and Cihan Topal. Edlines: Real-time line segment detection by edge drawing (ed). In IEEE International Conference on Image Processing, 2011. 2 +[6] Adrien Bartoli, Mathieu Coquerelle, and Peter Sturm. A framework for pencil-of-points structure-from-motion. In ECCV, 2004. 2 +[7] Adrien Bartoli and Peter Sturm. Structure-from-motion using lines: Representation, triangulation, and bundle adjustment. Computer Vision and Image Understanding (CVIU), 100(3):416-441, 2005. 2, 5 +[8] Herbert Bay, Vittorio Ferraris, and Luc Van Gool. Wide-baseline stereo matching with line segments. In CVPR, 2005. 2 +[9] Jean-Charles Bazin, Yongduek Seo, Cédric Demonceaux, Pascal Vasseur, Katsushi Ikeuchi, Inso Kweon, and Marc Pollefeys. Globally optimal line clustering and vanishing point estimation in manhattan world. In CVPR, 2012. 2 +[10] Federico Camposeco, Andrea Cohen, Marc Pollefeys, and Torsten Sattler. Hybrid camera pose estimation. In CVPR, 2018. 8 +[11] Ondrej Chum, Jiri Matas, and Josef Kittler. Locally optimized ransac. In Joint Pattern Recognition Symposium, pages 236-243, 2003. 8 +[12] James Coughlan and Alan L Yuille. The manhattan world assumption: Regularities in scene statistics which enable bayesian inference. In NeurIPS, 2000. 2, 6 +[13] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Computer Vision and Pattern Recognition Workshops (CVPRW), 2018. 1, 8 +[14] Bin Fan, Fuchao Wu, and Zhanyi Hu. Line matching leveraged by point correspondences. In CVPR, 2010. 2 +[15] Bin Fan, Fuchao Wu, and Zhanyi Hu. Robust line matching through line-point invariants. Pattern Recognition, 45(2):794-805, 2012. 2 +[16] Wolfgang Förstner and Bernhard P Wrobel. Photogrammetric computer vision. Springer, 2016. 1 +[17] Shuang Gao, Jixiang Wan, Yishan Ping, Xudong Zhang, Shuzhou Dong, Yuchen Yang, Haikuan Ning, Jijunnan Li, and Yandong Guo. Pose refinement with joint optimization of visual points and lines. In IROS, 2022. 2, 6, 8 + +[18] Ruben Gomez-Ojeda, Francisco-Angel Moreno, David Zuniga-Noël, Davide Scaramuzza, and Javier Gonzalez-Jimenez. Pl-slam: A stereo slam system through the combination of points and line segments. IEEE Transactions on Robotics, 35(3):734-746, 2019. 2 +[19] Yijia He, Ji Zhao, Yue Guo, Wenhao He, and Kui Yuan. Pl-vio: Tightly-coupled monocular visual-inertial odometry using point and line features. Sensors, 18(4):1159, 2018. 2 +[20] Jared Heinly, Johannes L. Schonberger, Enrique Dunn, and Jan-Michael Frahm. Reconstructing the world in six days. In CVPR, 2015. 1 +[21] Manuel Hofer, Michael Maurer, and Horst Bischof. Improving sparse 3d models for man-made environments using line-based 3d reconstruction. In 3DV, 2014. 2 +[22] Manuel Hofer, Michael Maurer, and Horst Bischof. Line3d: Efficient 3d scene abstraction for the built environment. In German Conference on Pattern Recognition, 2015. 1, 2 +[23] Manuel Hofer, Michael Maurer, and Horst Bischof. Efficient 3d scene abstraction using line segments. Computer Vision and Image Understanding (CVIU), 157:167-178, 2017. 1, 2, 6, 7, 8 +[24] Aleksander Holynski, David Geraghty, Jan-Michael Frahm, Chris Sweeney, and Richard Szeliski. Reducing drift in structure from motion using extended features. In 3DV, 2020. 2 +[25] Kun Huang, Yifan Wang, Zihan Zhou, Tianjiao Ding, Shenghua Gao, and Yi Ma. Learning to parse wireframes in images of man-made environments. In CVPR, 2018. 1, 2 +[26] Siyu Huang, Fangbo Qin, Pengfei Xiong, Ning Ding, Yijia He, and Xiao Liu. Tp-lsd: Tri-points based line segment detector. In ECCV, 2020. 2 +[27] Arjun Jain, Christian Kurz, Thorsten Thormahlen, and Hans-Peter Seidel. Exploiting global connectivity constraints for reconstruction of 3d line segments from images. In CVPR, 2010. 2 +[28] Wenzel Jakob, Jason Rhinelander, and Dean Moldovan. pybind11 - seamless operability between c++11 and python. https://github.com/pybind/pybind11.5 +[29] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, Pascal Fua, Kwang Moo Yi, and Eduard Trulls. Image matching across wide baselines: From paper to practice. IJCV, 129(2):517-547, 2021. 8 +[30] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. In CVPR, 2017. 8 +[31] Alex Kendall, Matthew Grimes, and Roberto Cipolla. PoseNet: A convolutional network for real-time 6-DoF camera relocalization. In ICCV, 2015. 8 +[32] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics, 36(4), 2017. 6 +[33] Zuzana Kukelova, Jan Heller, and Andrew Fitzgibbon. Efficient intersection of three quadrics and applications in computer vision. In CVPR, 2016. 8 +[34] Manuel Lange, Fabian Schweinfurth, and Andreas Schilling. Dld: A deep learning based line descriptor for line feature matching. In IROS, 2019. 2 + +[35] Viktor Larsson. PoseLib - Minimal Solvers for Camera Pose Estimation. https://github.com/vlarsson/ PoseLib.8 +[36] Karel Lebeda, Jiri Matas, and Ondrej Chum. Fixing the Locally Optimized RANSAC. In BMVC, 2012. 8 +[37] Haoang Li, Ji Zhao, Jean-Charles Bazin, Wen Chen, Zhe Liu, and Yun-Hui Liu. Quasi-globally optimal and efficient vanishing point estimation in Manhattan world. In ICCV, 2019. 2 +[38] Kai Li, Jian Yao, and Xiaohu Lu. Robust line matching based on ray-point-ray structure descriptor. In ACCV, 2014. 2 +[39] Kai Li, Jian Yao, Xiaohu Lu, Li Li, and Zhichao Zhang. Hierarchical line matching based on line-junction-line structure descriptor and local homography estimation. Neurocomputing, 184:207-220, 2016. 2 +[40] Hyunjun Lim, Jinwoo Jeon, and Hyun Myung. Uv-slam: Unconstrained line-based slam using vanishing points for structural mapping. IEEE Robotics and Automation Letters, 7(2):1518-1525, 2022. 2 +[41] Hyunjun Lim, Yeeun Kim, Kwangik Jung, Sumin Hu, and Hyun Myung. Avoiding degeneracy for monocular visual slam with point and line features. In ICRA, 2021. 2 +[42] Yicheng Luo, Jing Ren, Xuefei Zhe, Di Kang, Yajing Xu, Peter Wonka, and Linchao Bao. Lc2wf: learning to construct 3d building wireframes from 3d line clouds. In BMVC, 2022. 2 +[43] Quan Meng, Jiakai Zhang, Qiang Hu, Xuming He, and Jingyi Yu. Lgnn: A context-aware line segment detector. In ACM International Conference on Multimedia, 2020. 2 +[44] Branislav Micusik and Horst Wildenauer. Structure from motion with line segments under relaxed endpoint constraints. IJCV, 124(1):65-79, 2017. 1, 2 +[45] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 1 +[46] Rémi Pautrat, Juan-Ting Lin, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Sold2: Self-supervised occlusion-aware line description and detection. In CVPR, 2021. 1, 2, 6, 7, 8 +[47] Mikael Persson and Klas Nordberg. Lambda twist: An accurate fast robust perspective three point (p3p) solver. In ECCV, 2018. 8 +[48] Francesco Pittaluga, Sanjeev J Koppal, Sing Bing Kang, and Sudipta N Sinha. Revealing scenes by inverting structure from motion reconstructions. In CVPR, 2019. 1 +[49] Albert Pumarola, Alexander Vakhitov, Antonio Agudo, Alberto Sanfeliu, and Francese Moreno-Noguer. Pl-slam: Realtime monocular visual slam with points and lines. In ICRA, 2017. 2 +[50] Yiming Qian and James H. Elder. A reliable online method for joint estimation of focal length and camera rotation. In ECCV, 2022. 2 +[51] Srikumar Ramalingam, Michel Antunes, Dan Snow, Gim Hee Lee, and Sudeep Pillai. Line-sweep: Cross-ratio for wide-baseline matching and 3d reconstruction. In CVPR, 2015. 2 + +[52] Srikumar Ramalingam, Sofien Bouaziz, and Peter Sturm. Pose estimation using both points and lines for geolocation. In ICRA, 2011. 2 +[53] Siddhant Ranade and Srikumar Ramalingam. Novel single view constraints for Manhattan 3d line reconstruction. In 3DV, 2018. 2 +[54] Jing Ren, Biao Zhang, Bojian Wu, Jianqiang Huang, Lubin Fan, Maks Ovsjanikov, and Peter Wonka. Intuitive and efficient roof modeling for reconstruction and synthesis. In ACM SIGGRAPH Asia, 2021. 2 +[55] Mike Roberts, Jason Ramapuram, Anurag Ranjan, Atulit Kumar, Miguel Angel Bautista, Nathan Paczan, Russ Webb, and Joshua M. Susskind. Hypersim: A photorealistic synthetic dataset for holistic indoor scene understanding. In ICCV, 2021. 6, 7, 8 +[56] Paul-Edouard Sarlin. Visual localization made easy with hloc. https://github.com/cvg/Hierarchical-Localization/.8 +[57] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In CVPR, 2019. 8 +[58] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In CVPR, 2020. 2 +[59] Torsten Sattler et al. RansacLib - A Template-based *SAC Implementation. https://github.com/tsattler/RansacLib.8 +[60] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Fast image-based localization using direct 2d-to-3d matching. In ICCV, 2011. 1 +[61] Torsten Sattler, Will Maddern, Carl Toft, Akihiko Torii, Lars Hammarstrand, Erik Stenberg, Daniel Safari, Masatoshi Okutomi, Marc Pollefeys, Josef Sivic, et al. Benchmarking 6dof outdoor visual localization in changing conditions. In CVPR, 2018. 6, 7 +[62] Torsten Sattler, Tobias Weyand, Bastian Leibe, and Leif Kobbelt. Image retrieval for image-based localization revisited. In BMVC, 2012. 6, 7 +[63] Grant Schindler, Panchapagesan Krishnamurthy, and Frank Dellaert. Line-based structure from motion for urban environments. In International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT), 2006. 2 +[64] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 1, 2, 5, 8 +[65] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In ECCV, 2016. 1 +[66] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in RGB-D images. In CVPR, 2013. 8 +[67] Noah Snavely, Steven M Seitz, and Richard Szeliski. Photo tourism: exploring photo collections in 3d. In ACM SIGGRAPH, 2006. 2, 6, 7 +[68] Noah Snavely, Steven M Seitz, and Richard Szeliski. Modeling the world from internet photo collections. *IJCV*, 80(2):189-210, 2008. 6, 7 + +[69] Christoph Strecha, Wolfgang Von Hansen, Luc Van Gool, Pascal Fua, and Ulrich Thoennessen. On benchmarking camera calibration and multi-view stereo for high resolution imagery. In CVPR, 2008. 2 +[70] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In CVPR, 2021. 2 +[71] Roberto Toldo and Andrea Fusiello. Robust multiple structures estimation with j-linkage. In ECCV, 2008. 3 +[72] Alexander Vakhitov, Jan Funke, and Francesc Moreno-Noguer. Accurate and linear time pose estimation from points and lines. In ECCV, 2016. 2 +[73] Alexander Vakhitov and Victor Lempitsky. Learnable line segment descriptor for visual slam. IEEE Access, 7:39923-39934, 2019. 2 +[74] Bart Verhagen, Radu Timofte, and Luc Van Gool. Scale-invariant line descriptors for wide baseline matching. In WACV, 2014. 2 +[75] Rafael Grompone Von Gioi, Jeremie Jakubowicz, Jean-Michel Morel, and Gregory Randall. Lsd: A fast line segment detector with a false detection control. TPAMI, 32(4):722-732, 2008. 2, 6, 7 +[76] Zhiheng Wang, Fuchao Wu, and Zhanyi Hu. Msld: A robust descriptor for line matching. Pattern Recognition, 42(5):941-953, 2009. 2 +[77] Dong Wei, Yi Wan, Yongjun Zhang, Xinyi Liu, Bin Zhang, and Xiqi Wang. Elsr: Efficient line segment reconstruction with planes and points guidance. In CVPR, 2022. 1, 2, 6 +[78] Xinyu Wei, Jun Huang, and Xiaoyuan Ma. Real-time monocular visual slam by combining points and lines. In IEEE International Conference on Multimedia and Expo (ICME), 2019. 2 +[79] Hoi Sim Wong, Tat-Jun Chin, Jin Yu, and David Suter. Dynamic and hierarchical multi-structure geometric model fitting. In ICCV, 2011. 7 +[80] Changchang Wu. Visualsfm: A visual structure from motion system. http://www.cs.washington.edu/homes/ccwu/vsfm, 2011. 2, 6, 8 +[81] Yifan Xu, Weijian Xu, David Cheung, and Zhuowen Tu. Line segment detection using transformers without edges. In CVPR, 2021. 2 +[82] Nan Xue, Song Bai, Fudong Wang, Gui-Song Xia, Tianfu Wu, and Liangpei Zhang. Learning attraction field representation for robust line segment detection. In CVPR, 2019. 2 +[83] Nan Xue, Tianfu Wu, Song Bai, Fudong Wang, Gui-Song Xia, Liangpei Zhang, and Philip HS Torr. Holistically-attracted wireframe parsing. In CVPR, 2020. 2 +[84] Sungho Yoon and Ayoung Kim. Line as a visual sentence: Context-aware line descriptor for visual localization. IEEE Robotics and Automation Letters, 6(4):8726-8733, 2021. 1, 2 +[85] Lilian Zhang and Reinhard Koch. An efficient and robust line segment matching approach based on lbd descriptor and pairwise geometric consistency. Journal of Visual Communication and Image Representation, 24(7):794-805, 2013. 2 +[86] Lilian Zhang and Reinhard Koch. Structure and motion from line correspondences: Representation, projection, initializa- + +tion and sparse bundle adjustment. Journal of Visual Communication and Image Representation, 25(5):904-915, 2014. 2 +[87] Lilian Zhang, Huimin Lu, Xiaoping Hu, and Reinhard Koch. Vanishing point estimation and line classification in a Manhattan world with a unifying camera model. *IJCV*, 117, 2015. 2 +[88] Ziheng Zhang, Zhengxin Li, Ning Bi, Jia Zheng, Jinlei Wang, Kun Huang, Weixin Luo, Yanyu Xu, and Shenghua Gao. Ppgnet: Learning point-pair graph for line segment detection. In CVPR, 2019. 2 +[89] Lipu Zhou, Jiamin Ye, and Michael Kaess. A stable algebraic camera pose estimation for minimal configurations of 2d/3d point and line correspondences. In ACCV, 2018. 2, 8 +[90] Yichao Zhou, Haozhi Qi, Yuexiang Zhai, Qi Sun, Zhili Chen, Li-Yi Wei, and Yi Ma. Learning to reconstruct 3d Manhattan wireframes from a single image. In ICCV, 2019. 2 +[91] Xingxing Zuo, Xiaojia Xie, Yong Liu, and Guoquan Huang. Robust visual slam with point and line features. In IROS, 2017. 2 \ No newline at end of file diff --git a/2023/3D Line Mapping Revisited/images.zip b/2023/3D Line Mapping Revisited/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..854d6821bfe5999bf7a398f461ab3cacc1265af2 --- /dev/null +++ b/2023/3D Line Mapping Revisited/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8197ae4b5edd6145fd72d3b3d0f6035d3749b6a553d3e8ddc9469a1ee72ea75 +size 554829 diff --git a/2023/3D Line Mapping Revisited/layout.json b/2023/3D Line Mapping Revisited/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..cfea6a49394b27891eefcb52811e0f2926d9d91c --- /dev/null +++ b/2023/3D Line Mapping Revisited/layout.json @@ -0,0 +1,11488 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 211, + 103, + 384, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 103, + 384, + 121 + ], + "spans": [ + { + "bbox": [ + 211, + 103, + 384, + 121 + ], + "type": "text", + "content": "3D Line Mapping Revisited" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "spans": [ + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "content": "Shaohui Liu" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "content": " Yifan Yu" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "content": " Rémi Pautrat" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "content": " Marc Pollefeys" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "content": " Viktor Larsson" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "content": "\\\n" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "content": "Department of Computer Science, ETH Zurich " + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "content": "Microsoft " + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 96, + 142, + 497, + 173 + ], + "type": "text", + "content": "Lund University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 199, + 192, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 199, + 192, + 211 + ], + "spans": [ + { + "bbox": [ + 143, + 199, + 192, + 211 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 224, + 290, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 224, + 290, + 524 + ], + "spans": [ + { + "bbox": [ + 46, + 224, + 290, + 524 + ], + "type": "text", + "content": "In contrast to sparse keypoints, a handful of line segments can concisely encode the high-level scene layout, as they often delineate the main structural elements. In addition to offering strong geometric cues, they are also omnipresent in urban landscapes and indoor scenes. Despite their apparent advantages, current line-based reconstruction methods are far behind their point-based counterparts. In this paper we aim to close the gap by introducing LIMAP, a library for 3D line mapping that robustly and efficiently creates 3D line maps from multi-view imagery. This is achieved through revisiting the degeneracy problem of line triangulation, carefully crafted scoring and track building, and exploiting structural priors such as line coincidence, parallelism, and orthogonality. Our code integrates seamlessly with existing point-based Structure-from-Motion methods and can leverage their 3D points to further improve the line reconstruction. Furthermore, as a byproduct, the method is able to recover 3D association graphs between lines and points / vanishing points (VPs). In thorough experiments, we show that LIMAP significantly outperforms existing approaches for 3D line mapping. Our robust 3D line maps also open up new research directions. We show two example applications: visual localization and bundle adjustment, where integrating lines alongside points yields the best results. Code is available at https://github.com/cvg/limap." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 548, + 128, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 128, + 560 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 128, + 560 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 569, + 288, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 569, + 288, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 569, + 288, + 688 + ], + "type": "text", + "content": "The ability to estimate 3D geometry and build sparse maps via Structure-from-Motion (SfM) has become ubiquitous in 3D computer vision. These frameworks enable important tasks such as building maps for localization [60], providing initial estimates for dense reconstruction and refinement [65], and novel view synthesis [45, 48]. Currently, the field is dominated by point-based methods in which 2D keypoints are detected, matched, and triangulated into 3D maps [20, 64]. These sparse maps offer a compact scene representation, only reconstructing the most distinctive points." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 288, + 713 + ], + "type": "text", + "content": "While there have been tremendous progress in point-based reconstruction methods, they still struggle in scenes" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 328, + 196, + 415, + 267 + ], + "blocks": [ + { + "bbox": [ + 328, + 196, + 415, + 267 + ], + "lines": [ + { + "bbox": [ + 328, + 196, + 415, + 267 + ], + "spans": [ + { + "bbox": [ + 328, + 196, + 415, + 267 + ], + "type": "image", + "image_path": "b7c6006e8d87ae2a4d4b06e0b0a0163695abd45b888839868b1d0e9dc9ff0164.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 329, + 285, + 416, + 355 + ], + "blocks": [ + { + "bbox": [ + 319, + 272, + 426, + 284 + ], + "lines": [ + { + "bbox": [ + 319, + 272, + 426, + 284 + ], + "spans": [ + { + "bbox": [ + 319, + 272, + 426, + 284 + ], + "type": "text", + "content": "(a) Point mapping [13,64]" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 329, + 285, + 416, + 355 + ], + "lines": [ + { + "bbox": [ + 329, + 285, + 416, + 355 + ], + "spans": [ + { + "bbox": [ + 329, + 285, + 416, + 355 + ], + "type": "image", + "image_path": "42c688333ec5f636caf7e8f17d09de082462e00986e8a477cbffcb053d7a618a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 357, + 424, + 369 + ], + "lines": [ + { + "bbox": [ + 321, + 357, + 424, + 369 + ], + "spans": [ + { + "bbox": [ + 321, + 357, + 424, + 369 + ], + "type": "text", + "content": "(c) Line-point association" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 379, + 547, + 490 + ], + "lines": [ + { + "bbox": [ + 304, + 379, + 547, + 490 + ], + "spans": [ + { + "bbox": [ + 304, + 379, + 547, + 490 + ], + "type": "text", + "content": "Figure 1. In this paper, we propose a robust pipeline for mapping 3D lines (b), which offers stronger geometric clues about the scene layout compared to the widely used point mapping (a). Part of the success of our pipeline attributes to the modeling of structural priors such as coincidence (c), and parallelism / orthogonality (d). The corresponding 3D association graphs between lines and points / vanishing points (VPs) are also recovered from our system as a byproduct. The degree-1 point and degree-2 junctions are colored in blue and red respectively in (c), while parallel lines associated with the same VP are colored the same in (d)." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 441, + 196, + 527, + 270 + ], + "blocks": [ + { + "bbox": [ + 441, + 196, + 527, + 270 + ], + "lines": [ + { + "bbox": [ + 441, + 196, + 527, + 270 + ], + "spans": [ + { + "bbox": [ + 441, + 196, + 527, + 270 + ], + "type": "image", + "image_path": "8a261521be3287e665917dbe7fcad46e562a47fcdcbe60356f850c172e6956ce.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 449, + 272, + 520, + 284 + ], + "lines": [ + { + "bbox": [ + 449, + 272, + 520, + 284 + ], + "spans": [ + { + "bbox": [ + 449, + 272, + 520, + 284 + ], + "type": "text", + "content": "(b) Line mapping" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 440, + 285, + 526, + 355 + ], + "blocks": [ + { + "bbox": [ + 440, + 285, + 526, + 355 + ], + "lines": [ + { + "bbox": [ + 440, + 285, + 526, + 355 + ], + "spans": [ + { + "bbox": [ + 440, + 285, + 526, + 355 + ], + "type": "image", + "image_path": "063cf27913e9fb84ea7d5b41014e9b218b4e790037689a747a42081cdb737421.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 436, + 357, + 533, + 369 + ], + "lines": [ + { + "bbox": [ + 436, + 357, + 533, + 369 + ], + "spans": [ + { + "bbox": [ + 436, + 357, + 533, + 369 + ], + "type": "text", + "content": "(d) Line-VP association" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 497, + 547, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 497, + 547, + 652 + ], + "spans": [ + { + "bbox": [ + 304, + 497, + 547, + 652 + ], + "type": "text", + "content": "where it is difficult to detect and match sufficiently many stable keypoints, such as in indoor areas. On the contrary, these man-made scenes contain abundant lines, e.g. in walls, windows, doors, or ceilings. Furthermore, lines exhibit higher localization accuracy with less uncertainty in pixels [16]. Last but not least, lines appear in highly structured patterns, often satisfying scene-wide geometric constraints such as co-planarity, coincidence (line intersections), parallelism, and orthogonality. In practice, lines suffer from different issues, such as poor endpoint localization and partial occlusion. However, recent line detectors and matchers are bridging the gap of performance between points and lines [25, 46, 84], making it timely to revisit the line reconstruction problem." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": "Despite their rich geometric properties and abundance in the real world, there exist very few line-based reconstruction methods in the literature [22,23,44,77]. In practical applications, they have also not achieved the same level of success as their point-based counterparts. We believe this is due to" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "21445" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 258, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 258, + 84 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 258, + 84 + ], + "type": "text", + "content": "several intrinsic challenges specific to line mapping:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 85, + 286, + 285 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 47, + 85, + 286, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 85, + 286, + 107 + ], + "spans": [ + { + "bbox": [ + 47, + 85, + 286, + 107 + ], + "type": "text", + "content": "- Inconsistent endpoints. Due to partial occlusion, lines often have inconsistent endpoints across images." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 108, + 286, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 108, + 286, + 154 + ], + "spans": [ + { + "bbox": [ + 47, + 108, + 286, + 154 + ], + "type": "text", + "content": "- Line fragmentation. In each image there might be multiple line segments that belong to the same line in 3D. This makes the process of creating track associations more complex compared to building 3D point tracks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 156, + 286, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 156, + 286, + 190 + ], + "spans": [ + { + "bbox": [ + 47, + 156, + 286, + 190 + ], + "type": "text", + "content": "- No two-view geometric verification. While point matches can be verified in two views via epipolar geometry, lines require at least three views to filter." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 192, + 286, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 192, + 286, + 237 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 286, + 237 + ], + "type": "text", + "content": "- Degenerate configurations. In practice line triangulation is more prone to unstable configurations (see Fig. 8), e.g. becoming degenerate whenever the line is parallel with the camera motion (i.e. to epipolar lines)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 239, + 286, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 286, + 285 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 286, + 285 + ], + "type": "text", + "content": "- Weaker descriptor-based matching. State-of-the-art descriptors for line segments are far behind their point-based counterparts, putting more emphasis on geometric verification and filtering during reconstruction." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 288, + 287, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 288, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 288, + 287, + 407 + ], + "type": "text", + "content": "In this paper we aim to reduce the gap between point-based and line-based mapping solutions. We propose a new robust mapping method, LIMAP, that integrates seamlessly into existing open-source point-based SfM frameworks [64, 67, 80]. By sharing the code with the research community we hope to enable more research related to lines; both for low-level tasks (such as improving line segment detection and description) and for integrating lines into higher-level tasks (such as visual localization or dense reconstruction). In particular, we make the following contributions in the paper:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 412, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 47, + 412, + 287, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 287, + 471 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 287, + 471 + ], + "type": "text", + "content": "- We build a new line mapping system that reliably reconstructs 3D line segments from multi-view RGB images. Compared to previous approaches, our line maps are significantly more complete and accurate, while having more robust 2D-3D track associations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 475, + 286, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 475, + 286, + 547 + ], + "spans": [ + { + "bbox": [ + 47, + 475, + 286, + 547 + ], + "type": "text", + "content": "- We achieve this by automatically identifying and exploiting structural priors such as coincidence (junctions) and parallelism. Our technical contribution spans all stages of line mapping including triangulating proposals, scoring, track building, and joint optimization, with 3D line-point / VP association graphs output as a byproduct." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 552, + 286, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 552, + 286, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 552, + 286, + 597 + ], + "type": "text", + "content": "- The framework is flexible such that researchers can easily change components (e.g. detectors, matchers, vanishing point estimators, etc.) or integrate additional sensor data (e.g. depth maps or other 3D information)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 602, + 286, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 602, + 286, + 661 + ], + "spans": [ + { + "bbox": [ + 47, + 602, + 286, + 661 + ], + "type": "text", + "content": "- We are the first to go beyond small test sets by quantitatively evaluating on both synthetic and real datasets to benchmark the performance, with hundreds of images for each scene, in which LIMAP consistently and significantly outperforms existing approaches." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 665, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 286, + 712 + ], + "type": "text", + "content": "- Finally, we demonstrate the usefulness of having robust line maps by showing improvement over purely point-based methods in tasks such as visual localization and bundle adjustment in Structure-from-Motion." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 71, + 391, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 391, + 83 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 391, + 83 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 91, + 545, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 545, + 257 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 545, + 257 + ], + "type": "text", + "content": "Line Detection and Matching. Detecting 2D line segments conventionally relies on grouping image gradients [5, 75]. To improve the robustness and repeatability, learning-based line detectors were later proposed to tackle the problem of wireframe parsing [25, 43, 82, 83, 88, 90]. Recent deep detectors [26, 46, 81] manage to achieve impressive results for detecting general line segments. Matching of the detected line segments is often based on comparing either handcrafted [8, 74, 76, 85] or learning-based [1, 34, 46, 73, 84] descriptors. Some recent methods also exploit point-line [14, 15] and line-junction-line structures [38, 39] to improve matching results, yet still not reaching the reliability level of advanced point matchers [58, 70]. Our method can leverage any line detector and matcher, and is robust to outliers." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 270, + 545, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 270, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 270, + 545, + 498 + ], + "type": "text", + "content": "Line Reconstruction. As a seminal work, Bartoli and Sturm [6, 7] proposed a full SfM pipeline for line segments, later improved by Schindler [63] with Manhattan-world assumption [12]. Jain et al. [27] proposed to impose global topological constraints between neighboring lines, which were further explored in [51, 53, 54] to build wireframe models. Some learning-based methods [42, 90] were introduced as well to predict 3D wireframes. Hofer et al. [21-23] proposed checking weak epipolar constraints over exhaustive matches and graph clustering, and introduced the Line3D++ software (referred as L3D++ in this paper), which remains the top choice [17, 42] for acquiring 3D line maps so far. Recently, ELSR [77] employed planes and points to guide the matching. However, all prior work mainly shows qualitative results and provides quantitative evaluation only on relatively small image sets [27, 69]. In this paper, we set up a quantitative evaluation on benchmarks with hundreds of images, where our proposed system significantly surpasses prior work by improving all stages in the mapping pipeline." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 510, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 712 + ], + "type": "text", + "content": "Line-based Applications. The resulting 3D line maps can be used for many downstream applications. [23] advocates the complementary nature of line reconstruction for structure visualization. Some incremental line-based SfM systems are introduced in [24,44,86]. To improve quality and robustness, recent methods [18,19,40,41,49,78,91] jointly employ point and line features in SLAM. While their line maps are often noisy and incomplete, noticeable improvement has been achieved in the accuracy of the recovered camera motion. There has also been development on VP estimation [9,37,50, 87] and solvers for joint point-line pose estimation [4,52,72, 89]. Recently, promising performance in visual localization has been achieved by combining point and line features in a refinement step [17]. In this paper, we show that our line maps can benefit multiple applications such as localization, SfM, and MVS (Sec. J in supp.). In particular, we present very competitive results on point-line visual localization." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "21446" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 54, + 69, + 542, + 177 + ], + "blocks": [ + { + "bbox": [ + 54, + 69, + 542, + 177 + ], + "lines": [ + { + "bbox": [ + 54, + 69, + 542, + 177 + ], + "spans": [ + { + "bbox": [ + 54, + 69, + 542, + 177 + ], + "type": "image", + "image_path": "e2035426f0870c1a56ee692ad1a0dd00604fc1258dd5014566dea5d66113f6f4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 183, + 547, + 207 + ], + "lines": [ + { + "bbox": [ + 46, + 183, + 547, + 207 + ], + "spans": [ + { + "bbox": [ + 46, + 183, + 547, + 207 + ], + "type": "text", + "content": "Figure 2. Overview. Given a set of posed images and optional 3D points, we associate nearby points to lines, match the lines, triangulate them with 4 different strategies, score 3D line proposals, build line tracks, jointly optimize all features, before obtaining our final reconstruction." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 210, + 271, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 210, + 271, + 224 + ], + "spans": [ + { + "bbox": [ + 47, + 210, + 271, + 224 + ], + "type": "text", + "content": "3. The Proposed 3D Line Mapping Pipeline" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 230, + 288, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 230, + 288, + 315 + ], + "spans": [ + { + "bbox": [ + 46, + 230, + 288, + 315 + ], + "type": "text", + "content": "We now present our proposed pipeline for 3D line mapping. Our method takes as input a set of images with 2D line segments from any existing line detectors. We assume the camera pose for each image is available (e.g. from SfM/SLAM), and optionally we can also leverage a 3D point cloud (e.g. obtained from point-based SfM). The pipeline consists of three main steps:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 319, + 288, + 446 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 47, + 319, + 288, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 319, + 288, + 344 + ], + "spans": [ + { + "bbox": [ + 47, + 319, + 288, + 344 + ], + "type": "text", + "content": "- Proposal Generation (Sec. 3.1): For each 2D line segment, we generate a set of 3D line segment proposals." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 348, + 287, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 348, + 287, + 394 + ], + "spans": [ + { + "bbox": [ + 47, + 348, + 287, + 394 + ], + "type": "text", + "content": "- Scoring and Track Association (Sec. 3.2): Considering multi-view consistency, we score each proposal, select the best candidate for each 2D line, and associate them into a set of 3D line tracks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 399, + 288, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 399, + 288, + 446 + ], + "spans": [ + { + "bbox": [ + 47, + 399, + 288, + 446 + ], + "type": "text", + "content": "- Joint Refinement (Sec. 3.3): Finally, we jointly perform non-linear refinement over the 3D line tracks along with 3D points and VP directions, integrating additional structural priors as soft constraints." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 453, + 287, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 453, + 287, + 477 + ], + "spans": [ + { + "bbox": [ + 46, + 453, + 287, + 477 + ], + "type": "text", + "content": "Figure 2 shows an overview of the overall pipeline. In the following sections, we detail each of the three main steps." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 477, + 288, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 477, + 288, + 514 + ], + "spans": [ + { + "bbox": [ + 46, + 477, + 288, + 514 + ], + "type": "text", + "content": "By design our pipeline is robust to scale changes and we use the same hyper-parameters for all experiments across datasets, which are provided in Sec. F.2 in the supp." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 520, + 254, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 520, + 254, + 533 + ], + "spans": [ + { + "bbox": [ + 47, + 520, + 254, + 533 + ], + "type": "text", + "content": "3.1. Generating 3D Line Segment Proposals" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 538, + 287, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 538, + 287, + 621 + ], + "spans": [ + { + "bbox": [ + 46, + 538, + 287, + 621 + ], + "type": "text", + "content": "The first step is to generate a set of 3D line proposals for each 2D line segment. Given a segment in an image, we use any existing line finder to retrieve the top " + }, + { + "bbox": [ + 46, + 538, + 287, + 621 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 538, + 287, + 621 + ], + "type": "text", + "content": " line matches in each of the " + }, + { + "bbox": [ + 46, + 538, + 287, + 621 + ], + "type": "inline_equation", + "content": "n_v" + }, + { + "bbox": [ + 46, + 538, + 287, + 621 + ], + "type": "text", + "content": " closest images. Using the top " + }, + { + "bbox": [ + 46, + 538, + 287, + 621 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 538, + 287, + 621 + ], + "type": "text", + "content": " line matches instead of a single match increases the chance of getting a correct match, while wrong matches will be filtered out in subsequent steps." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 621, + 288, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 621, + 288, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 621, + 288, + 693 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 46, + 621, + 288, + 693 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_1^r,\\pmb{x}_2^r)\\in \\mathbb{R}^3\\times \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 621, + 288, + 693 + ], + "type": "text", + "content": " be the two endpoints (in homogeneous coordinates normalized by the intrinsics) for the reference line segment that we wish to generate proposals for. For ease of notation, we let the world-coordinate system align with the reference view. The endpoints of the 3D line proposals that we generate can all be written as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 701, + 287, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 701, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 107, + 701, + 287, + 714 + ], + "type": "interline_equation", + "content": "\\boldsymbol {X} _ {1} = \\lambda_ {1} \\boldsymbol {x} _ {1} ^ {r}, \\quad \\boldsymbol {X} _ {2} = \\lambda_ {2} \\boldsymbol {x} _ {2} ^ {r}, \\tag {1}", + "image_path": "20555513ec631d9dfbe336531dc17c023005f2add0a35e825b038efc1ce331c5.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 211, + 547, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 211, + 547, + 248 + ], + "spans": [ + { + "bbox": [ + 305, + 211, + 547, + 248 + ], + "type": "text", + "content": "for some values of " + }, + { + "bbox": [ + 305, + 211, + 547, + 248 + ], + "type": "inline_equation", + "content": "\\lambda_1, \\lambda_2 \\in \\mathbb{R}" + }, + { + "bbox": [ + 305, + 211, + 547, + 248 + ], + "type": "text", + "content": ". Having the 3D endpoints of all proposals lie on the camera rays of the 2D endpoints simplifies the scoring procedure in the second step (Sec. 3.2)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 261, + 420, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 261, + 420, + 273 + ], + "spans": [ + { + "bbox": [ + 306, + 261, + 420, + 273 + ], + "type": "text", + "content": "3.1.1 Line Triangulation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 280, + 547, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 280, + 547, + 328 + ], + "spans": [ + { + "bbox": [ + 305, + 280, + 547, + 328 + ], + "type": "text", + "content": "For each matched 2D line segment " + }, + { + "bbox": [ + 305, + 280, + 547, + 328 + ], + "type": "inline_equation", + "content": "(\\pmb{x}_1^m, \\pmb{x}_2^m)" + }, + { + "bbox": [ + 305, + 280, + 547, + 328 + ], + "type": "text", + "content": " we generate one proposal via algebraic line triangulation. Let " + }, + { + "bbox": [ + 305, + 280, + 547, + 328 + ], + "type": "inline_equation", + "content": "(R^m, t^m)" + }, + { + "bbox": [ + 305, + 280, + 547, + 328 + ], + "type": "text", + "content": " be the camera pose of the matched view. We can then solve linearly for the endpoint ray depths " + }, + { + "bbox": [ + 305, + 280, + 547, + 328 + ], + "type": "inline_equation", + "content": "\\lambda_i" + }, + { + "bbox": [ + 305, + 280, + 547, + 328 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 321, + 335, + 546, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 335, + 546, + 350 + ], + "spans": [ + { + "bbox": [ + 321, + 335, + 546, + 350 + ], + "type": "interline_equation", + "content": "\\left(\\boldsymbol {x} _ {1} ^ {m} \\times \\boldsymbol {x} _ {2} ^ {m}\\right) ^ {T} \\left(R ^ {m} \\left(\\lambda_ {i} \\boldsymbol {x} _ {i} ^ {r}\\right) + \\boldsymbol {t} ^ {m}\\right) = 0, \\quad i = 1, 2. \\tag {2}", + "image_path": "8d99ef45366d6c14ccb294fd69141e7e2670d164abe31c2785449a93261ee304.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "spans": [ + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "content": "The proposals are then filtered with cheirality checks (positive " + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "content": ") and degeneracy check via the angle between ray " + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "inline_equation", + "content": "x_{i}^{r}" + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "inline_equation", + "content": "\\ell_{m} = x_{1}^{m} \\times x_{2}^{m}" + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "content": ". Note that line triangulation becomes inherently unstable close to degenerate configurations when " + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "inline_equation", + "content": "\\ell_{m}^{T} R^{m} x_{i}^{r} = 0" + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "content": ", where we get zero or infinite solutions from (2). Geometrically, this happens when the line is parallel with the epipolar plane: If " + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "inline_equation", + "content": "\\ell_{m}^{T} t^{m} \\neq 0" + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "content": " they have no intersection, otherwise they intersect fully and we get infinite solutions " + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "inline_equation", + "content": "\\ell_{m} \\sim t^{m} \\times R^{m} x_{i}^{r} = E x_{i}^{r}" + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "content": ", i.e. the line segment coincides with the epipolar line from " + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "inline_equation", + "content": "x_{i}^{r}" + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "content": ". This issue is further illustrated in Figure 8. Since we solve for each " + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "inline_equation", + "content": "\\lambda_{i}" + }, + { + "bbox": [ + 304, + 357, + 547, + 574 + ], + "type": "text", + "content": " independently, the triangulation problem can have zero, one, or two degenerate endpoints. We term the case with one degenerate endpoint as a weakly degenerate one, and the case with two degenerate endpoints as fully degenerate. In contrast to the point case, two-view line triangulation is minimal such that any solution fits the measurements exactly with zero error, preventing filtering with 2D reprojection error at this stage." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 586, + 435, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 586, + 435, + 597 + ], + "spans": [ + { + "bbox": [ + 306, + 586, + 435, + 597 + ], + "type": "text", + "content": "3.1.2 Point-Line Association" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 605, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 713 + ], + "type": "text", + "content": "To obtain meaningful proposals in degenerate cases, we leverage additional geometric information coming from either points or associated vanishing points (VPs). 2D-3D point correspondences can either come from a point-based SfM model or be triangulated from matched endpoints/junctions. For each 2D line segment, we associate all 2D points within a fixed pixel threshold and thereby associate with their corresponding 3D points. For each image, we also estimate a set of VPs and their association to 2D lines using JLinkage [71]." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "21447" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 219, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 219, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 219, + 85 + ], + "type": "text", + "content": "3.1.3 Point-guided Line Triangulation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 289, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 289, + 209 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 289, + 209 + ], + "type": "text", + "content": "We now generate a second set of proposals for each 2D line segment with the assistance of the associated 2D-3D point correspondences and vanishing points. In the following parts we present three different methods. M1 employs multiple associated 3D points so it is stable for all cases including the fully degenerate ones, while M2 and M3 with one known point / VP can help generate stable proposals in weakly degenerate cases, which are more common in practice. Cheirality tests are applied to all proposals with respect to both views." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "text", + "content": "M1. Multiple Points. For each matched line segment we generate one proposal by collecting all of the associated 3D points that are common between the reference and the match. On top of those common points, we fit a 3D line that is then projected onto two camera rays corresponding to " + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_1^r" + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_2^r" + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "text", + "content": ". M2. Line + Point. For each matched line segment we also generate one proposal for each shared 3D point. We first project the 3D point onto the plane spanned by " + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_1^r" + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_2^r" + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "text", + "content": ". We then aim to find a line that passes through the projection and minimizes the residuals in (2) to the matched line. This can be formulated as a quadratic optimization problem in the two endpoint depths " + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "inline_equation", + "content": "\\lambda = (\\lambda_1, \\lambda_2)" + }, + { + "bbox": [ + 46, + 211, + 289, + 354 + ], + "type": "text", + "content": " with a single constraint:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 63, + 359, + 288, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 359, + 288, + 380 + ], + "spans": [ + { + "bbox": [ + 63, + 359, + 288, + 380 + ], + "type": "interline_equation", + "content": "\\min _ {\\boldsymbol {\\lambda} \\in \\mathbb {R} ^ {2}} \\boldsymbol {\\lambda} ^ {T} A \\boldsymbol {\\lambda} + \\boldsymbol {b} ^ {T} \\boldsymbol {\\lambda}, \\quad \\text {s . t .} \\quad \\boldsymbol {\\lambda} ^ {T} Q \\boldsymbol {\\lambda} + \\boldsymbol {q} ^ {T} \\boldsymbol {\\lambda} = 0. \\tag {3}", + "image_path": "6a70c22d1ce340fdc5c08702c76be3565c978ada052abe784117b308b20f960b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 385, + 287, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 385, + 287, + 470 + ], + "spans": [ + { + "bbox": [ + 46, + 385, + 287, + 470 + ], + "type": "text", + "content": "Due to the low-dimensionality of the problem, a closed-form solution can be derived by reducing it to a univariate quartic polynomial. We show the full derivation in Sec. B in supp. M3. Line + VP. Each VP corresponds to a 3D direction. For each associated VP, we generate one proposal based on its direction (again projected onto the plane spanned by " + }, + { + "bbox": [ + 46, + 385, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_1^r" + }, + { + "bbox": [ + 46, + 385, + 287, + 470 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 385, + 287, + 470 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x}_2^r" + }, + { + "bbox": [ + 46, + 385, + 287, + 470 + ], + "type": "text", + "content": "). This gives a single linear constraint on the ray depths," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 475, + 288, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 475, + 288, + 491 + ], + "spans": [ + { + "bbox": [ + 85, + 475, + 288, + 491 + ], + "type": "interline_equation", + "content": "\\left(\\boldsymbol {v} \\times \\left(\\boldsymbol {x} _ {1} ^ {r} \\times \\boldsymbol {x} _ {2} ^ {r}\\right)\\right) ^ {T} \\left(\\lambda_ {2} \\boldsymbol {x} _ {2} ^ {r} - \\lambda_ {1} \\boldsymbol {x} _ {1} ^ {r}\\right) = 0. \\tag {4}", + "image_path": "8140c9829bb370b88d883a6d307c2a2b082bba37b25db3e1c358c5e37afcf915.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 497, + 287, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 497, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 497, + 287, + 544 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 497, + 287, + 544 + ], + "type": "inline_equation", + "content": "\\pmb{v} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 497, + 287, + 544 + ], + "type": "text", + "content": " is the VP. Using the constraint, we then solve for " + }, + { + "bbox": [ + 46, + 497, + 287, + 544 + ], + "type": "inline_equation", + "content": "\\lambda = (\\lambda_1, \\lambda_2)" + }, + { + "bbox": [ + 46, + 497, + 287, + 544 + ], + "type": "text", + "content": " by minimizing the two residuals of (2) in a least squares sense. Note that " + }, + { + "bbox": [ + 46, + 497, + 287, + 544 + ], + "type": "inline_equation", + "content": "\\pmb{v}" + }, + { + "bbox": [ + 46, + 497, + 287, + 544 + ], + "type": "text", + "content": " can either come from the reference image, or from a matched line in another image." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 544, + 288, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 288, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 288, + 616 + ], + "type": "text", + "content": "Extension: Line Mapping Given Depth Maps. The proposal generation step can be improved when each image has a corresponding depth map (e.g. from an RGB-D sensor), which can be leveraged with robust line fitting to generate the 3D line proposals. Refer to Sec. E in our supplementary material for more details and results." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 623, + 257, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 623, + 257, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 623, + 257, + 635 + ], + "type": "text", + "content": "3.2. Proposal Scoring and Track Association" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "text", + "content": "At this point, each 2D line segment " + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "text", + "content": " in image " + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "text", + "content": " is associated with a set " + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{K}" + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "text", + "content": " of 3D line segment proposals (stemming from the top " + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "text", + "content": " line matches and various triangulations) for each neighboring image " + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 46, + 641, + 289, + 712 + ], + "type": "text", + "content": ". We describe in the following how we select the best 3D line proposal for each 2D line segment, and associate these lines into tracks. For each of these" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 69, + 378, + 121 + ], + "blocks": [ + { + "bbox": [ + 310, + 69, + 378, + 121 + ], + "lines": [ + { + "bbox": [ + 310, + 69, + 378, + 121 + ], + "spans": [ + { + "bbox": [ + 310, + 69, + 378, + 121 + ], + "type": "image", + "image_path": "9ab2fc94295adffedb7ce2f593bfb4279a84e9e444b7a516c1ed6d8fb933c756.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 122, + 378, + 132 + ], + "lines": [ + { + "bbox": [ + 310, + 122, + 378, + 132 + ], + "spans": [ + { + "bbox": [ + 310, + 122, + 378, + 132 + ], + "type": "text", + "content": "(a) Perspective distance" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 381, + 69, + 462, + 121 + ], + "blocks": [ + { + "bbox": [ + 381, + 69, + 462, + 121 + ], + "lines": [ + { + "bbox": [ + 381, + 69, + 462, + 121 + ], + "spans": [ + { + "bbox": [ + 381, + 69, + 462, + 121 + ], + "type": "image", + "image_path": "7ad7199b9cb8013334d75749a5e64dc34611bc011aeb77d11ba26b0d2abc2a08.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 396, + 122, + 447, + 132 + ], + "lines": [ + { + "bbox": [ + 396, + 122, + 447, + 132 + ], + "spans": [ + { + "bbox": [ + 396, + 122, + 447, + 132 + ], + "type": "text", + "content": "(b) Overlap score" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 140, + 547, + 163 + ], + "lines": [ + { + "bbox": [ + 304, + 140, + 547, + 163 + ], + "spans": [ + { + "bbox": [ + 304, + 140, + 547, + 163 + ], + "type": "text", + "content": "Figure 3. Scoring methods. We propose three novel line scoring measures that are scale-invariant and handle different line lengths." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 465, + 69, + 545, + 121 + ], + "blocks": [ + { + "bbox": [ + 465, + 69, + 545, + 121 + ], + "lines": [ + { + "bbox": [ + 465, + 69, + 545, + 121 + ], + "spans": [ + { + "bbox": [ + 465, + 69, + 545, + 121 + ], + "type": "image", + "image_path": "8aaf83de13b438ccf8867e72a0c85829d85a7b58a298e6c58675ff6c85fc2c0e.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 475, + 122, + 536, + 132 + ], + "lines": [ + { + "bbox": [ + 475, + 122, + 536, + 132 + ], + "spans": [ + { + "bbox": [ + 475, + 122, + 536, + 132 + ], + "type": "text", + "content": "(c) InnerSeg distance" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 168, + 547, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 168, + 547, + 253 + ], + "spans": [ + { + "bbox": [ + 304, + 168, + 547, + 253 + ], + "type": "text", + "content": "steps, we leverage different scoring methods quantifying the distance between two 3D line segments " + }, + { + "bbox": [ + 304, + 168, + 547, + 253 + ], + "type": "inline_equation", + "content": "(L_1, L_2)" + }, + { + "bbox": [ + 304, + 168, + 547, + 253 + ], + "type": "text", + "content": ". These distances are usually computed symmetrically and averaged, and can be obtained both in 3D and in 2D by projecting each 3D line into the other view. We start by presenting two classic ones, and then define our three novel line distances (one for 3D proposal selection and two for track building)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 262, + 547, + 301 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 306, + 262, + 495, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 262, + 495, + 274 + ], + "spans": [ + { + "bbox": [ + 306, + 262, + 495, + 274 + ], + "type": "text", + "content": "- Angular distance: angle between " + }, + { + "bbox": [ + 306, + 262, + 495, + 274 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 306, + 262, + 495, + 274 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 262, + 495, + 274 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 306, + 262, + 495, + 274 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 277, + 547, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 277, + 547, + 301 + ], + "spans": [ + { + "bbox": [ + 306, + 277, + 547, + 301 + ], + "type": "text", + "content": "- Perpendicular distance: maximum orthogonal distance of the endpoints of " + }, + { + "bbox": [ + 306, + 277, + 547, + 301 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 306, + 277, + 547, + 301 + ], + "type": "text", + "content": " to the infinite line spanned by " + }, + { + "bbox": [ + 306, + 277, + 547, + 301 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 306, + 277, + 547, + 301 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 311, + 547, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 311, + 547, + 346 + ], + "spans": [ + { + "bbox": [ + 304, + 311, + 547, + 346 + ], + "type": "text", + "content": "3D Proposal Selection. To select best 3D candidate for each 2D line, we score each proposal " + }, + { + "bbox": [ + 304, + 311, + 547, + 346 + ], + "type": "inline_equation", + "content": "L_{i}" + }, + { + "bbox": [ + 304, + 311, + 547, + 346 + ], + "type": "text", + "content": " by measuring its consistency with the others. Here we introduce a new distance:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "spans": [ + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "text", + "content": "- Perspective distance: assuming the endpoints of " + }, + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "text", + "content": " are on the same rays as in Fig. 3(a), the distance is defined as the endpoint distances, divided by the ray depths " + }, + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "d_{s}, d_{e}" + }, + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "text", + "content": " of the endpoints of " + }, + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 306, + 354, + 547, + 427 + ], + "type": "text", + "content": " in image 1. This score can filter out ill-posed triangulations (refer to Sec. F.3 in supp. for detailed discussions), while remaining scale-invariant." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "spans": [ + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "text", + "content": "This new distance, together with the angular distance in 2D and 3D, and the perpendicular distance in 2D, have different scales. In order to aggregate them together, we associate a scaling factor " + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "inline_equation", + "content": "\\tau_r" + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "text", + "content": " to each distance " + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "text", + "content": " and get a normalized score " + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "inline_equation", + "content": "s_n = e^{-(r / \\tau_r)^2} \\in (0,1]" + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "text", + "content": ". Denoting by " + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "text", + "content": " the set of all the corresponding normalized scores and " + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "inline_equation", + "content": "\\mathbb{I}" + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "text", + "content": " the indicator function, the score between " + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "inline_equation", + "content": "L_1" + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "inline_equation", + "content": "L_2" + }, + { + "bbox": [ + 304, + 434, + 547, + 519 + ], + "type": "text", + "content": " becomes" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 319, + 528, + 545, + 546 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 528, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 319, + 528, + 545, + 546 + ], + "type": "interline_equation", + "content": "s \\left(L _ {1}, L _ {2}\\right) = \\min _ {s _ {n} \\in S} \\left(s _ {n} \\cdot \\mathbb {1} _ {s _ {n} \\geq 0. 5}\\right) \\in \\{0 \\} \\cup [ 0. 5, 1 ]. \\tag {5}", + "image_path": "4b3fcd5471bae4e8041f6abd4dd287c038cd3221f2b5ef51f7cb91bebaf64491.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 555, + 547, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 555, + 547, + 603 + ], + "spans": [ + { + "bbox": [ + 304, + 555, + 547, + 603 + ], + "type": "text", + "content": "Now equipped with unique score per line pair, we can consider all the neighboring 3D line candidates " + }, + { + "bbox": [ + 304, + 555, + 547, + 603 + ], + "type": "inline_equation", + "content": "L_{j}^{k}" + }, + { + "bbox": [ + 304, + 555, + 547, + 603 + ], + "type": "text", + "content": " coming from the neighboring image " + }, + { + "bbox": [ + 304, + 555, + 547, + 603 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 304, + 555, + 547, + 603 + ], + "type": "text", + "content": " and proposal " + }, + { + "bbox": [ + 304, + 555, + 547, + 603 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 555, + 547, + 603 + ], + "type": "text", + "content": ". The consistency score is defined by summing the best score from each image:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 361, + 611, + 545, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 611, + 545, + 638 + ], + "spans": [ + { + "bbox": [ + 361, + 611, + 545, + 638 + ], + "type": "interline_equation", + "content": "s _ {c} \\left(L _ {i}\\right) = \\sum_ {J \\in \\mathcal {N} _ {I}} \\max _ {k \\in \\mathcal {K}} s \\left(L _ {i}, L _ {J} ^ {k}\\right), \\tag {6}", + "image_path": "f17b9efb8dba30c61903ae46e33431b25c02d9228cd40629c3ee02b7cbe14008.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{N}_I" + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "content": " is the set of neighboring images of " + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "content": ". The best 3D line candidate for each 2D line segment " + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "content": " is then selected as the proposal with the highest score: " + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "inline_equation", + "content": "L = \\operatorname{argmax}_{L_i} s_c(L_i)" + }, + { + "bbox": [ + 304, + 641, + 547, + 714 + ], + "type": "text", + "content": ". If the score is less than 1.0, i.e. the best candidate has less than two supports from neighboring views, we ignore this 2D line segment in the subsequent track building process." + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21448" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 180 + ], + "type": "text", + "content": "Track Building. At this point, each 2D segment has been assigned a unique 3D line (its best 3D line candidate). The goal of this step is to gather these 2D segments into line tracks. For this, we form a graph where the 2D segments are nodes and all initial line matches are edges. We aim to prune edges in the graph such that the connected 2D segments share similar 3D assignments. We propose two new line scoring measures that can cope with different endpoint configurations and variable scales across images." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 185, + 288, + 373 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "text", + "content": "- Overlap score: we project " + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "text", + "content": " orthogonally onto " + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "text", + "content": ", clip the projected endpoints to the endpoints of " + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "text", + "content": " if they fall outside of " + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "text", + "content": " to get segment " + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\Pi(L_{1})" + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "text", + "content": ", and compare the ratio of lengths to a threshold " + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\tau_{o}" + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "inline_equation", + "content": "\\mathbb{1}_{\\frac{|\\Pi(L_1)|}{|L_2|} \\geq \\tau_o}" + }, + { + "bbox": [ + 47, + 185, + 287, + 239 + ], + "type": "text", + "content": " (see Fig. 3(b))." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "spans": [ + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": "- InnerSeg distance: the endpoints of " + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": " are perpendicularly unprojected to " + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": ". If they fall outside of " + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": ", we clip them to the closest endpoint of " + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": ". By doing this in both directions, we can define two inner segments (see Fig. 3(c)), and the InnerSeg distance as the maximum distance between their endpoints. To make this measure scale-invariant, we additionally divide it by a scale factor " + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "inline_equation", + "content": "\\sigma = \\frac{\\min(d_{1}, d_{2})}{f}" + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "inline_equation", + "content": "d_{j}" + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": " is the depth of the mid-point of " + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "inline_equation", + "content": "L_{j}" + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": " in image " + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 47, + 240, + 288, + 373 + ], + "type": "text", + "content": " is the focal length. This encodes how far the mid-point can move in 3D before reaching 1 pixel error in the image (detailed in Sec. F.3 in supp.)." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 378, + 287, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 378, + 287, + 461 + ], + "spans": [ + { + "bbox": [ + 46, + 378, + 287, + 461 + ], + "type": "text", + "content": "We then convert the InnerSeg distance computed in 3D to a normalized score as in the previous paragraph, and combine it with the overlap score in 2D and 3D and previous scores using (5). Given these pairwise scores of 3D lines, we can now prune edges whose score is below a threshold " + }, + { + "bbox": [ + 46, + 378, + 287, + 461 + ], + "type": "inline_equation", + "content": "t_f = 0.5" + }, + { + "bbox": [ + 46, + 378, + 287, + 461 + ], + "type": "text", + "content": ". The connected components of the resulting graph yield the line tracks, ignoring components with less than 3 nodes." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 462, + 288, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 462, + 288, + 534 + ], + "spans": [ + { + "bbox": [ + 46, + 462, + 288, + 534 + ], + "type": "text", + "content": "For each track, we then re-estimate a single 3D line segment. Using the set of endpoints from the 3D assignments of all nodes in the track, we apply Principal Component Analysis (PCA) and use the principal eigenvector and mean 3D point to estimate the infinite 3D line. We then project all endpoints on this infinite line to get the new 3D endpoints." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 540, + 271, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 540, + 271, + 552 + ], + "spans": [ + { + "bbox": [ + 47, + 540, + 271, + 552 + ], + "type": "text", + "content": "3.3. Joint Optimization of Lines and Structures" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 558, + 288, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 288, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 288, + 641 + ], + "type": "text", + "content": "Finally, we perform non-linear refinement on the acquired 3D lines with their track information. The straightforward approach is to perform geometric refinement on the reprojection error. With the 2D point-line association available, we can formulate a joint optimization problem by including additional structural information. The energy to minimize can be written as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 657, + 288, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 657, + 288, + 685 + ], + "spans": [ + { + "bbox": [ + 70, + 657, + 288, + 685 + ], + "type": "interline_equation", + "content": "E = \\sum_ {p} E _ {P} (p) + \\sum_ {l} E _ {L} (l) + \\sum_ {(p, l)} E _ {P L} (p, l), \\tag {7}", + "image_path": "038ef35eaf6b4a8a87c2eaf3c74f33804d0e0a7618eebe182afdb6c8e19ae3ac.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 689, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 289, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 689, + 289, + 714 + ], + "type": "inline_equation", + "content": "E_{P}" + }, + { + "bbox": [ + 47, + 689, + 289, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 689, + 289, + 714 + ], + "type": "inline_equation", + "content": "E_{L}" + }, + { + "bbox": [ + 47, + 689, + 289, + 714 + ], + "type": "text", + "content": " are the data terms, and " + }, + { + "bbox": [ + 47, + 689, + 289, + 714 + ], + "type": "inline_equation", + "content": "E_{PL}" + }, + { + "bbox": [ + 47, + 689, + 289, + 714 + ], + "type": "text", + "content": " encodes the 3D association between lines and points / VPs. In particular," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 547, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 179 + ], + "type": "inline_equation", + "content": "E_{P}" + }, + { + "bbox": [ + 304, + 72, + 547, + 179 + ], + "type": "text", + "content": " is the 2D point reprojection error as in regular bundle adjustment [64]. The association energy is softly weighted (as discussed later) and optimized with robust Huber loss [3]. Each line is converted into a 4-DoF infinite line with Plücker coordinate [7] for optimization and converted back to line segments by unprojecting its 2D supports. Each vanishing point is parameterized with a 3-dimensional homogeneous vector. Refer to Sec. A in supp. for details on efficient computation with minimal parameterization." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 180, + 547, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 180, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 304, + 180, + 547, + 228 + ], + "type": "text", + "content": "Geometric Refinement. The data term of each line track is also defined on its 2D reprojections. In particular, we measure the 2D perpendicular distance weighted by the angle consistency, which we robustly equip with Cauchy loss [3]:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 341, + 233, + 546, + 258 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 233, + 546, + 258 + ], + "spans": [ + { + "bbox": [ + 341, + 233, + 546, + 258 + ], + "type": "interline_equation", + "content": "E _ {L} (l) = \\sum_ {k} w _ {\\angle} ^ {2} \\left(L _ {k}, \\ell_ {k}\\right) \\cdot e _ {\\text {p e r p}} ^ {2} \\left(L _ {k}, \\ell_ {k}\\right), \\tag {8}", + "image_path": "9bf4722958638b2169cb59f9fa835b81f1c46a4de8a5dd1f2bb00a5909314edf.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "spans": [ + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "inline_equation", + "content": "e_{\\mathrm{perp}}" + }, + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "text", + "content": " is the perpendicular distance, " + }, + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "inline_equation", + "content": "L_{k}" + }, + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "text", + "content": " is the 2D projection of the 3D segment, " + }, + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "inline_equation", + "content": "\\ell_{k}" + }, + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "text", + "content": " are the 2D line segments, and " + }, + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "inline_equation", + "content": "w_{\\angle}" + }, + { + "bbox": [ + 304, + 264, + 547, + 312 + ], + "type": "text", + "content": " is the exponential of one minus the cosine of the 2D angle between the projected and the observed line." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 312, + 547, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 312, + 547, + 432 + ], + "spans": [ + { + "bbox": [ + 304, + 312, + 547, + 432 + ], + "type": "text", + "content": "Soft Association between Lines and Points. For each pair of 3D line and 3D point with their track information, we can estimate how likely they are spatially associated by traversing the 2D association graph (described in Sec. 3.1.2) of their supports. Specifically, we count the number of associations among the 2D supports of the line track and point track, and keep pairs with at least three 2D associations. The 3D association energy " + }, + { + "bbox": [ + 304, + 312, + 547, + 432 + ], + "type": "inline_equation", + "content": "E_{PL}" + }, + { + "bbox": [ + 304, + 312, + 547, + 432 + ], + "type": "text", + "content": ", defined on the surviving pairs, is formulated as the 3D point-line distance weighted by the number of 2D associations on their supports." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 432, + 548, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 432, + 548, + 564 + ], + "spans": [ + { + "bbox": [ + 304, + 432, + 548, + 564 + ], + "type": "text", + "content": "Soft Association between Lines and VPs. Same as the point case, we can also build a soft association problem between lines and VPs. First, we acquire 3D VP tracks by transitively propagating line correspondences from the 3D line tracks. Then, we count the number of associations among the 2D supports for each pair of 3D line and VP track. The 3D line-VP association energy is defined as the sine of the direction angle between the 3D line and the VP, implicitly enforcing parallelism. Furthermore, we add regularizations to the nearly orthogonal VP pairs to enforce orthogonality of different line groups. Refer to Sec. C in supp. for details." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 573, + 388, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 573, + 388, + 586 + ], + "spans": [ + { + "bbox": [ + 306, + 573, + 388, + 586 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 593, + 547, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 593, + 547, + 666 + ], + "spans": [ + { + "bbox": [ + 304, + 593, + 547, + 666 + ], + "type": "text", + "content": "Implementation Details. Our whole library is implemented in C++ with Python bindings [28]. The triangulation and scoring can be run in parallel for each node, enabling scalability to large datasets. We use " + }, + { + "bbox": [ + 304, + 593, + 547, + 666 + ], + "type": "inline_equation", + "content": "n_v = 20" + }, + { + "bbox": [ + 304, + 593, + 547, + 666 + ], + "type": "text", + "content": " visual neighbors and keep the top " + }, + { + "bbox": [ + 304, + 593, + 547, + 666 + ], + "type": "inline_equation", + "content": "K = 10" + }, + { + "bbox": [ + 304, + 593, + 547, + 666 + ], + "type": "text", + "content": " line matches. We provide all the values of thresholds and scaling factors in Sec. F.2 in supp." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 671, + 397, + 684 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 397, + 684 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 397, + 684 + ], + "type": "text", + "content": "4.1. Line Mapping" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 547, + 713 + ], + "type": "text", + "content": "To validate the effectiveness of our system, we set up an evaluation benchmark to quantify the quality of the recon" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21449" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 51, + 70, + 284, + 137 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 284, + 137 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 284, + 137 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 284, + 137 + ], + "type": "table", + "html": "
Line typeMethodR1R5R10P1P5P10# supports
LSD [75]L3D++ [23]37.0153.1218.853.180.890.6(14.8 / 16.8)
ELSR [77]13.959.796.555.472.682.2(N/A / N/A)
Ours48.6185.2251.360.182.490.0(16.4 / 20.5)
SOLD2 [46]L3D++ [23]36.9107.5132.867.286.893.2(13.2 / 20.4)
Ours54.3151.1191.269.884.690.0(16.5 / 38.7)
", + "image_path": "7053f5a94953ca193118ab9388432590b5fef4d36220be03888482e502ae6c06.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 57, + 190, + 279, + 244 + ], + "blocks": [ + { + "bbox": [ + 47, + 144, + 288, + 178 + ], + "lines": [ + { + "bbox": [ + 47, + 144, + 288, + 178 + ], + "spans": [ + { + "bbox": [ + 47, + 144, + 288, + 178 + ], + "type": "text", + "content": "Table 1. Line reconstruction on Hypersim [55] with LSD [75] and SOLD2 [46] lines. " + }, + { + "bbox": [ + 47, + 144, + 288, + 178 + ], + "type": "inline_equation", + "content": "R\\tau" + }, + { + "bbox": [ + 47, + 144, + 288, + 178 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 144, + 288, + 178 + ], + "type": "inline_equation", + "content": "P\\tau" + }, + { + "bbox": [ + 47, + 144, + 288, + 178 + ], + "type": "text", + "content": " are reported at " + }, + { + "bbox": [ + 47, + 144, + 288, + 178 + ], + "type": "inline_equation", + "content": "1\\mathrm{\\;{mm}},5\\mathrm{\\;{mm}},{10}" + }, + { + "bbox": [ + 47, + 144, + 288, + 178 + ], + "type": "text", + "content": " mm along with the average number of supporting images/lines." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 190, + 279, + 244 + ], + "lines": [ + { + "bbox": [ + 57, + 190, + 279, + 244 + ], + "spans": [ + { + "bbox": [ + 57, + 190, + 279, + 244 + ], + "type": "table", + "html": "
MethodR5R10R50P5P10P50# supports
L3D++ [23]373.7831.62783.640.654.585.9(8.8 / 9.3)
ELSR [77]139.2322.51308.038.548.074.5(N/A / N/A)
Ours (line-only)472.11058.83720.746.858.486.1(10.3 / 11.8)
Ours508.31154.54179.546.056.983.7(10.4 / 12.0)
", + "image_path": "aea1ae949101420f4bac694bb61506ec315a362011dc0e5fe62eb421075699ec.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "lines": [ + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "spans": [ + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "type": "text", + "content": "Table 2. Line reconstruction on train split of Tanks and Temples [32] with LSD [75] lines. " + }, + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "type": "inline_equation", + "content": "R\\tau" + }, + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "type": "inline_equation", + "content": "P\\tau" + }, + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "type": "text", + "content": " are reported at " + }, + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "type": "inline_equation", + "content": "5\\mathrm{\\;{mm}},{10}\\mathrm{\\;{mm}}" + }, + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "type": "text", + "content": " , " + }, + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "type": "inline_equation", + "content": "{50}\\mathrm{\\;{mm}}" + }, + { + "bbox": [ + 47, + 252, + 288, + 286 + ], + "type": "text", + "content": " along with the average number of supporting images/lines." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 300, + 287, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 300, + 287, + 336 + ], + "spans": [ + { + "bbox": [ + 47, + 300, + 287, + 336 + ], + "type": "text", + "content": "structed 3D line maps. As there are no ground truth (GT) 3D lines, we evaluate the 3D line mapping with either GT mesh models or point clouds. We use the following metrics:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 337, + 288, + 408 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 47, + 337, + 288, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 337, + 288, + 360 + ], + "spans": [ + { + "bbox": [ + 47, + 337, + 288, + 360 + ], + "type": "text", + "content": "- Length recall (in meters) at " + }, + { + "bbox": [ + 47, + 337, + 288, + 360 + ], + "type": "inline_equation", + "content": "\\tau (R\\tau)" + }, + { + "bbox": [ + 47, + 337, + 288, + 360 + ], + "type": "text", + "content": ": sum of the lengths of the line portions within " + }, + { + "bbox": [ + 47, + 337, + 288, + 360 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 47, + 337, + 288, + 360 + ], + "type": "text", + "content": " mm from the GT model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 361, + 288, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 361, + 288, + 384 + ], + "spans": [ + { + "bbox": [ + 47, + 361, + 288, + 384 + ], + "type": "text", + "content": "- Inlier percentage at " + }, + { + "bbox": [ + 47, + 361, + 288, + 384 + ], + "type": "inline_equation", + "content": "\\tau (P\\tau)" + }, + { + "bbox": [ + 47, + 361, + 288, + 384 + ], + "type": "text", + "content": ": the percentage of tracks that are within " + }, + { + "bbox": [ + 47, + 361, + 288, + 384 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 47, + 361, + 288, + 384 + ], + "type": "text", + "content": " mm from the GT model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 384, + 288, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 384, + 288, + 408 + ], + "spans": [ + { + "bbox": [ + 47, + 384, + 288, + 408 + ], + "type": "text", + "content": "- Average supports: average number of image supports and 2D line supports across all line tracks." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 409, + 287, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 409, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 46, + 409, + 287, + 479 + ], + "type": "text", + "content": "In the following, we compare our system with two state-of-the-art methods as baselines: L3D++ [23] and ELSR [77], using two line detectors: the traditional LSD detector [75] and the learning-based SOLD2 [46]. For ELSR [77], we convert the input into VisualSfM [80] format and use code from the authors (only supporting LSD [75])." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 480, + 287, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 480, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 46, + 480, + 287, + 599 + ], + "type": "text", + "content": "Our first evaluation is run on the first eight scenes of the Hypersim dataset [55], composed of 100 images each, and is reported in Tab. 1. For both detectors, we reconstruct much more complete line maps with better or comparable precision than the competitors, while also exhibiting significantly higher quality of track information. This abundant track association is beneficial particularly for line-based applications such as visual localization [17]. After discussing with the authors of ELSR, it seems that their method does not achieve satisfactory results due to a lack of point and plane features." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 600, + 288, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 600, + 288, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 600, + 288, + 696 + ], + "type": "text", + "content": "We further evaluate all three methods on the train split of the Tanks and Temples dataset [32] without Ignatius as it has no line structures. As SOLD2 [46] is trained for indoor images, we only use LSD [75]. Since the provided point cloud was cleaned to focus only on the main subject, we compute its bounding box, extend it by one meter, and only evaluate lines inside this region. This prevents incorrectly penalizing correct lines that are far away from the main scene," + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 69, + 543, + 196 + ], + "blocks": [ + { + "bbox": [ + 309, + 69, + 543, + 196 + ], + "lines": [ + { + "bbox": [ + 309, + 69, + 543, + 196 + ], + "spans": [ + { + "bbox": [ + 309, + 69, + 543, + 196 + ], + "type": "image", + "image_path": "ccb021e2a27d19fa77bda20fabb5aee20a3900e76a021bda5915e472624bd5af.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 308, + 239, + 545, + 316 + ], + "blocks": [ + { + "bbox": [ + 305, + 204, + 545, + 237 + ], + "lines": [ + { + "bbox": [ + 305, + 204, + 545, + 237 + ], + "spans": [ + { + "bbox": [ + 305, + 204, + 545, + 237 + ], + "type": "text", + "content": "Figure 4. Top row: L3D++ [23]. Bottom row: Ours. Both systems are run on Horse and Family from [32]. We show two different views on the main scene of Horse." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 308, + 239, + 545, + 316 + ], + "lines": [ + { + "bbox": [ + 308, + 239, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 308, + 239, + 545, + 316 + ], + "type": "image", + "image_path": "da17233417b9be6bbb3c5e74e999c89b1a874c9cfcf2e8246a1b0b0d33ace9e1.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 310, + 359, + 543, + 414 + ], + "blocks": [ + { + "bbox": [ + 305, + 323, + 545, + 357 + ], + "lines": [ + { + "bbox": [ + 305, + 323, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 305, + 323, + 545, + 357 + ], + "type": "text", + "content": "Figure 5. Qualitative results on Hypersim [55] and Tanks and Temples [32]. On Barn we jointly visualize our results and the aligned ground truth point cloud." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 359, + 543, + 414 + ], + "lines": [ + { + "bbox": [ + 310, + 359, + 543, + 414 + ], + "spans": [ + { + "bbox": [ + 310, + 359, + 543, + 414 + ], + "type": "image", + "image_path": "7bb06070b83fe3941bfbdfac8de8bce99f064bfde89ecc59a067f3427b81cc33.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 422, + 547, + 445 + ], + "lines": [ + { + "bbox": [ + 305, + 422, + 547, + 445 + ], + "spans": [ + { + "bbox": [ + 305, + 422, + 547, + 445 + ], + "type": "text", + "content": "Figure 6. Qualitative results of the recovered line-point and line-VP association graphs (visualized similarly as in Fig. 1)." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 450, + 545, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 450, + 545, + 557 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 545, + 557 + ], + "type": "text", + "content": "which our method is particularly good at thanks to our scale-invariant design (refer to Sec. G in supp.). Tab. 2 shows the results, where our methods significantly improve the mapping quality across the board. Fig. 4 shows qualitative comparison between our method and L3D++ [23]. Our results exhibit better completeness, have less noisy lines that are flying around, and achieve significantly more robust reconstructions of subtle details (e.g. on the ground). More examples of our produced line maps are shown in Fig. 5." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 558, + 545, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 545, + 629 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 545, + 629 + ], + "type": "text", + "content": "As an additional output of our system, junction structures and line-line relations such as parallelism and orthogonality are discovered, as shown in Fig. 6. This directly comes from the line-point and line-VP soft associations of Sec. 3.3. From the recovered structures, we can clearly perceive the scene and easily recognize the main Manhattan directions [12]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 630, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 546, + 714 + ], + "type": "text", + "content": "To demonstrate the scalability of the proposed system, we also run our method on two large-scale datasets: Aachen (6,697 images) [61, 62] and Rome city (16,179 images) [2, 67, 68]. Fig. 7 shows that our method produces reliable line maps with clear structures. Note that the camera poses from Bundler [67] on Rome city are far from perfect, while our mapping still works reasonably well. The efficiency" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 702, + 284, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 284, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 284, + 713 + ], + "type": "text", + "content": "Line typeTriangulationR1R5R10P1P5P10# supportsLSDEndpoints27.6101.4138.058.283.592.1(13.0 / 13.2)[75]Line48.3187.0257.459.281.989.8(15.8 / 19.1)SOLD2Endpoints27.382.8106.568.284.590.9(12.3 / 19.9)[46]Line50.8143.5180.874.486.991.2(15.1 / 32.2)", + "image_path": "4fd6160525e3614a22987dddccc5c424dc78f27b20c7da5b2aaf2a2e7d3ceea4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 55, + 441, + 281, + 510 + ], + "blocks": [ + { + "bbox": [ + 46, + 395, + 287, + 429 + ], + "lines": [ + { + "bbox": [ + 46, + 395, + 287, + 429 + ], + "spans": [ + { + "bbox": [ + 46, + 395, + 287, + 429 + ], + "type": "text", + "content": "Table 3. Comparison between endpoint and line triangulation on Hypersim [55]. While being more stable at triangulation, the endpoints are often unmatched between line pairs." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 55, + 441, + 281, + 510 + ], + "lines": [ + { + "bbox": [ + 55, + 441, + 281, + 510 + ], + "spans": [ + { + "bbox": [ + 55, + 441, + 281, + 510 + ], + "type": "table", + "html": "
LineM1M2M3R1R5R10P1P5P10# supports
50.8143.5180.874.486.991.2(15.1 / 32.2)
24.972.595.865.981.288.5(11.3 / 15.7)
37.7116.8152.671.084.289.7(13.8 / 25.8)
51.5146.9185.471.785.490.1(14.9 / 31.2)
51.3146.4186.473.485.790.5(15.8 / 35.6)
51.4145.4184.974.186.190.6(16.5 / 38.7)
", + "image_path": "3dbb284bc5bade4aacf7ed7b71237c9bec067f5d13d91686975790ed75df2eda.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 555, + 288, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 555, + 288, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 288, + 605 + ], + "type": "text", + "content": "bottleneck is in line detection and matching (we use SOLD2 [46] descriptors), while the rest of the mapping takes only " + }, + { + "bbox": [ + 46, + 555, + 288, + 605 + ], + "type": "inline_equation", + "content": "\\sim 10" + }, + { + "bbox": [ + 46, + 555, + 288, + 605 + ], + "type": "text", + "content": " minutes on Aachen [61, 62]. The time complexity of our system is nearly linear with the number of images." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 611, + 237, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 611, + 237, + 624 + ], + "spans": [ + { + "bbox": [ + 47, + 611, + 237, + 624 + ], + "type": "text", + "content": "4.2. More Insights and Ablation Studies" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 629, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 289, + 715 + ], + "type": "text", + "content": "Line Triangulation. To study the stability of the triangulation, we perform a small test on a stereo pair from AdelaideRMF [79] on the uncertainty (measured by the largest singular value of the covariance) of the triangulated 3D segments. We further run a synthetic experiment by generating random lines on a plane orthogonal to the stereo pair, and plot the uncertainty of point and line triangulations with" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 308, + 170, + 545, + 276 + ], + "blocks": [ + { + "bbox": [ + 46, + 518, + 289, + 541 + ], + "lines": [ + { + "bbox": [ + 46, + 518, + 289, + 541 + ], + "spans": [ + { + "bbox": [ + 46, + 518, + 289, + 541 + ], + "type": "text", + "content": "Table 4. Ablation study on different types of triangulation proposals (defined in Sec. 3.1.3) on Hypersim [55] with SOLD2 [46]." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 170, + 545, + 276 + ], + "lines": [ + { + "bbox": [ + 308, + 170, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 308, + 170, + 545, + 276 + ], + "type": "table", + "html": "
Line typeMethodR1R5P1P5# supports
LSD [75]L3D++ [23]37.0153.153.180.8(14.8 / 16.8)
Ours (line) w/ [23] scoring48.6186.056.580.6(14.4 / 16.8)
Ours (line) w/ [23] merging41.2158.259.682.5(15.6 / 16.7)
Ours (line) w/ exhaustive46.7177.257.680.9(16.8 / 20.8)
Ours (line)48.3187.059.281.9(15.8 / 19.1)
SOLD2 [46]L3D++ [23]36.9107.567.286.8(13.2 / 20.4)
Ours (line) w/ [23] scoring45.8133.272.685.9(15.0 / 31.1)
Ours (line) w/ [23] merging37.7113.470.584.5(13.3 / 23.9)
Ours (line) w/ exhaustive48.9139.772.985.7(16.2 / 36.9)
Ours (line)50.8143.574.486.9(15.1 / 32.2)
", + "image_path": "f48cd0902ca8f45d89dd5dc393ff534a107a181646ae8ab8a62ab9ada13359b3.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 284, + 546, + 307 + ], + "lines": [ + { + "bbox": [ + 305, + 284, + 546, + 307 + ], + "spans": [ + { + "bbox": [ + 305, + 284, + 546, + 307 + ], + "type": "text", + "content": "Table 5. Studies on different components of our method with only line-line proposals against L3D++ [23]." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 317, + 547, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 317, + 547, + 461 + ], + "spans": [ + { + "bbox": [ + 304, + 317, + 547, + 461 + ], + "type": "text", + "content": "respect to the angle of the lines with the baseline (refer to Sec. D in supp. for details). The results in Fig. 8 show that when the matched line is nearly parallel to the epipolar line, the line triangulation becomes degenerate with exploding uncertainty, while triangulating the endpoints is significantly more stable. Thus, combining points and VPs from the 2D association is beneficial to improve the stability of the proposals. However, the endpoints are generally not consistent across line matches in practice and need to be complemented with line-line triangulation. This can be verified in Tab. 3 where the performance significantly drops when we change line triangulation into endpoint triangulation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 461, + 547, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 461, + 547, + 557 + ], + "spans": [ + { + "bbox": [ + 304, + 461, + 547, + 557 + ], + "type": "text", + "content": "We further ablate our four types of triangulation for generating proposals. Results in Tab. 4 show that integrating points and VPs enhance the 3D line maps, in particular significantly improving the track quality. Another surprising fact is that the third line in the table, relying only on points and line + point triangulation, already achieves better results than the prior baselines in Tab. 1. Employing all four types of proposals obtains the best trade-off." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 557, + 548, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 548, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 548, + 714 + ], + "type": "text", + "content": "Scoring and Track Building. We first study the effects of using exhaustive line matching as in L3D++ [23]. To enable direct comparison we only use line triangulation proposals. Results are shown in Tab. 5. While there are more proposals generated from the exhaustive matches, both the recall and precision decrease by a noticeable margin. This is probably due to the large number of wrong proposals misleading the scoring process. Nevertheless, our method with exhaustive matches still works significantly better than L3D++ [23]. To further study the effects of the proposed distance measurements at scoring and track building (merging), we re-implement the ones proposed in L3D++ [23] and perform direct comparison. Both our scoring and track" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 758 + ], + "type": "text", + "content": "21451" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 286, + 141 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 286, + 141 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 286, + 141 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 286, + 141 + ], + "type": "table", + "html": "
MethodR1R5R10P1P5P10# supports
Line-only w/o refine43.5135.8180.175.187.292.2(15.1 / 32.2)
Line-only w/ geom alone50.8143.5180.874.486.991.2(15.1 / 32.2)
w/o refine46.5146.0189.776.888.993.3(16.5 / 38.7)
w/ geom alone51.4145.4184.974.186.190.6(16.5 / 38.7)
w/ joint optimization54.3151.1191.269.884.690.0(16.5 / 38.7)
", + "image_path": "9eacc632f14b0ca475d1276cbd1ef5a00b7b55230b60446006bc9f706028ff3e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 58, + 175, + 277, + 213 + ], + "blocks": [ + { + "bbox": [ + 52, + 149, + 281, + 161 + ], + "lines": [ + { + "bbox": [ + 52, + 149, + 281, + 161 + ], + "spans": [ + { + "bbox": [ + 52, + 149, + 281, + 161 + ], + "type": "text", + "content": "Table 6. Line refinement on Hypersim [55] with SOLD2 [46]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 175, + 277, + 213 + ], + "lines": [ + { + "bbox": [ + 58, + 175, + 277, + 213 + ], + "spans": [ + { + "bbox": [ + 58, + 175, + 277, + 213 + ], + "type": "table", + "html": "
DatasetHLoc2[56,57]PtLine [17]Ours
Cambridge [30]7.0 / 0.13 / 44.07.4 / 0.13 / 43.56.7 / 0.12 / 46.1
7Scenes [66]3.3 / 1.08 / 73.03.3 / 1.09 / 72.73.0 / 1.00 / 78.0
", + "image_path": "85770fc6ad5dacb8a7a2ef730b78b0f7903a9d5f89fc1736e0ea363e9babcb4a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 50, + 269, + 102, + 327 + ], + "blocks": [ + { + "bbox": [ + 50, + 269, + 102, + 327 + ], + "lines": [ + { + "bbox": [ + 50, + 269, + 102, + 327 + ], + "spans": [ + { + "bbox": [ + 50, + 269, + 102, + 327 + ], + "type": "image", + "image_path": "cf577f24e36a9d81671111e9d544f20d7c20934b3bbab435b3ecd9fd36a4098a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 327, + 97, + 335 + ], + "lines": [ + { + "bbox": [ + 56, + 327, + 97, + 335 + ], + "spans": [ + { + "bbox": [ + 56, + 327, + 97, + 335 + ], + "type": "text", + "content": "HLoc [56, 57]" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 47, + 346, + 287, + 368 + ], + "lines": [ + { + "bbox": [ + 47, + 346, + 287, + 368 + ], + "spans": [ + { + "bbox": [ + 47, + 346, + 287, + 368 + ], + "type": "text", + "content": "Figure 9. Line-assisted Visual localization on Stairs from 7Scenes [66]. Blue: 2D points/lines; Green/Red: Projected 3D points/lines." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 105, + 269, + 157, + 327 + ], + "blocks": [ + { + "bbox": [ + 105, + 269, + 157, + 327 + ], + "lines": [ + { + "bbox": [ + 105, + 269, + 157, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 157, + 327 + ], + "type": "image", + "image_path": "90947af990730977988c4c5147b295f1fe3c7d01b3235d38c4427cb9488e05da.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 327, + 154, + 335 + ], + "lines": [ + { + "bbox": [ + 107, + 327, + 154, + 335 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 154, + 335 + ], + "type": "text", + "content": "Ours w/ LIMAP" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 159, + 269, + 287, + 335 + ], + "blocks": [ + { + "bbox": [ + 47, + 220, + 287, + 265 + ], + "lines": [ + { + "bbox": [ + 47, + 220, + 287, + 265 + ], + "spans": [ + { + "bbox": [ + 47, + 220, + 287, + 265 + ], + "type": "text", + "content": "Table 7. Visual localization on Cambridge [31] and 7Scenes [66]. We report the median translation and rotation errors in cm and degrees, and the pose accuracy " + }, + { + "bbox": [ + 47, + 220, + 287, + 265 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 47, + 220, + 287, + 265 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 47, + 220, + 287, + 265 + ], + "type": "inline_equation", + "content": "5\\mathrm{cm} / 5" + }, + { + "bbox": [ + 47, + 220, + 287, + 265 + ], + "type": "text", + "content": " deg threshold. All metrics are averaged across all scenes of each dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 159, + 269, + 287, + 335 + ], + "lines": [ + { + "bbox": [ + 159, + 269, + 287, + 335 + ], + "spans": [ + { + "bbox": [ + 159, + 269, + 287, + 335 + ], + "type": "table", + "html": "
(T / R) err. ↓Acc. ↑
HLoc [57]5.2 / 1.4646.8
HLoc [57] w/ depth4.7 / 1.2553.4
PtLine [17]4.8 / 1.3351.9
Ours w/L3D++ [23]4.1 / 1.1460.8
Ours w/LIMAP3.7 / 1.0271.1
", + "image_path": "1677ad2107fef78001d3ed5afed02d80544ff5d999294ca948356c38707cb0de.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 373, + 287, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 373, + 287, + 397 + ], + "spans": [ + { + "bbox": [ + 46, + 373, + 287, + 397 + ], + "type": "text", + "content": "building are significantly better, especially when equipped with SOLD2 [46] which produces more structured lines." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 397, + 287, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 397, + 287, + 565 + ], + "spans": [ + { + "bbox": [ + 46, + 397, + 287, + 565 + ], + "type": "text", + "content": "Joint Optimization. Finally, we ablate the proposed joint optimization in our pipeline. First, we remove the point-line association and only apply the geometric residuals (reprojection error). Results in Tab. 6 show that the geometric refinement improves significantly when the proposals solely come from line triangulation. However, when adding additional proposals from points and VPs, it contributes marginally and even misleads some lines that are generated from points and VPs but poorly conditioned for lines (R10 decreases). When integrated with joint optimization with soft association, the recall is further improved noticeably, while sacrificing a bit on the precision. It is worth pointing out that the joint optimization also enables the byproduct of junction structures and line-line relations (e.g. in Fig. 6)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 573, + 129, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 129, + 586 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 129, + 586 + ], + "type": "text", + "content": "4.3. Applications" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 591, + 287, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 591, + 287, + 676 + ], + "spans": [ + { + "bbox": [ + 46, + 591, + 287, + 676 + ], + "type": "text", + "content": "Line-Assisted Visual Localization. We build a hybrid visual localization with both points and lines on top of the acquired 3D line maps. Specifically, we first build point maps as in HLoc [56, 57] and line maps with our proposed method. Then, we match points and lines respectively and get 2D-3D correspondences from the track information in the 3D maps. Given these correspondences, we combine" + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 307, + 70, + 545, + 107 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 545, + 107 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 545, + 107 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 545, + 107 + ], + "type": "table", + "html": "
Med. error ↓AUC @ (1° / 3° / 5°) ↑
COLMAP [64]0.18877.3 / 89.0 / 91.6
COLMAP [64] + LIMAP refinement0.14682.9 / 91.2 / 93.0
", + "image_path": "1c52a762c89341a3e49e1093c4eac1343247e9aa87de7347e9b8f87537a266bd.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 115, + 545, + 138 + ], + "lines": [ + { + "bbox": [ + 305, + 115, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 305, + 115, + 545, + 138 + ], + "type": "text", + "content": "Table 8. Joint bundle adjustment of points and lines on Hypersim [55]. Relative pose errors are measured on all image pairs." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 143, + 547, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 143, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 304, + 143, + 547, + 239 + ], + "type": "text", + "content": "four minimal solvers [33, 47, 89]: P3P, P2P1LL, P1P2LL, P3LL from PoseLib [35], together in a hybrid RANSAC framework [10, 59] with local optimization [11, 36] to get the final 6-DoF pose (refer to Sec. H in supp. for details). This also enables direct comparison since only using P3P [47] corresponds to the point-alone baseline similar to HLoc [56, 57]. We also compare with the post-refinement of PtLine [17] that optimizes over the initial point-alone predictions." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 239, + 547, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 239, + 547, + 382 + ], + "spans": [ + { + "bbox": [ + 304, + 239, + 547, + 382 + ], + "type": "text", + "content": "Results in Tab. 7 show that our localization system achieves consistently better results than the point-alone baseline both indoors [66] and outdoors [30], validating the effectiveness of employing 3D line maps for visual localization. In Fig. 9 we show more detailed results from the Stairs scene from 7Scenes [66] as it is one of the most challenging ones. Integrating lines significantly benefits the alignment of the reprojected structures, improving the pose accuracy from 46.8 to 71.1. Also, with our localization pipeline, using the map built from our proposed method is better than from L3D++ [23] by a noticeable margin, again demonstrating the advantages of our proposed line mapping system." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 382, + 547, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 382, + 547, + 526 + ], + "spans": [ + { + "bbox": [ + 304, + 382, + 547, + 526 + ], + "type": "text", + "content": "Refining Structure-from-Motion. With the acquired 3D line maps built from a roughly correct point-based structure-from-motion model, e.g., COLMAP [64], we can use the 3D lines with their track information to refine the input camera poses with joint optimization of points and lines. To verify this, we run COLMAP [64] with SuperPoint [13] on the first eight scenes of Hypersim [55], run the proposed line mapping on top of it, and perform joint bundle adjustment to refine poses and intrinsics. We report the relative pose evaluation of all image pairs [29]. Tab. 8 shows that the joint point-line refinement consistently benefits the accuracy of the camera poses, in particular improving AUC@ " + }, + { + "bbox": [ + 304, + 382, + 547, + 526 + ], + "type": "inline_equation", + "content": "1^{\\circ}" + }, + { + "bbox": [ + 304, + 382, + 547, + 526 + ], + "type": "text", + "content": " by 5.6." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 536, + 378, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 536, + 378, + 548 + ], + "spans": [ + { + "bbox": [ + 306, + 536, + 378, + 548 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 556, + 547, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 556, + 547, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 556, + 547, + 700 + ], + "type": "text", + "content": "In this paper, we introduce LIMAP: a library for robust 3D line mapping from multi-view imagery. Extensive experiments show that our method, by improving all stages of the reconstruction pipeline, produces significantly more complete 3D lines, with much higher quality of track association. As a byproduct, the method can also recover 3D association graphs between lines and points / VPs. We further show the usefulness of 3D line maps on visual localization and bundle adjustment. Future directions include incremental / real-time structure mapping, distinguishing structural lines from textural lines for wireframe modeling, and exploiting higher-level structures and relations for downstream applications." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 700, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 700, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 700, + 545, + 712 + ], + "type": "text", + "content": "Acknowledgements. V. Larsson was supported by ELLIIT." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 683, + 287, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 683, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 683, + 287, + 713 + ], + "type": "text", + "content": "2Up to the date of submission, the COLMAP model [64] used by HLoc [56, 57] does not consider radial distortion from the VisualSfM [80] model. So our results are better than the original ones." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "21452" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 92, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 53, + 92, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 92, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 92, + 287, + 124 + ], + "type": "text", + "content": "[1] Hichem Abdellali, Robert Frohlich, Viktor Vilagos, and Zoltan Kato. L2d2: Learnable line detector and descriptor. In 3DV, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 126, + 288, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 126, + 288, + 170 + ], + "spans": [ + { + "bbox": [ + 53, + 126, + 288, + 170 + ], + "type": "text", + "content": "[2] Sameer Agarwal, Yasutaka Furukawa, Noah Snavely, Ian Simon, Brian Curless, Steven M Seitz, and Richard Szeliski. Building rome in a day. Communications of the ACM, 54(10):105-112, 2011. 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 172, + 288, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 172, + 288, + 194 + ], + "spans": [ + { + "bbox": [ + 53, + 172, + 288, + 194 + ], + "type": "text", + "content": "[3] Sameer Agarwal and Keir Mierle. Ceres solver. http://ceres-solver.org.5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 196, + 288, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 196, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 53, + 196, + 288, + 239 + ], + "type": "text", + "content": "[4] Sérgio Agostinho, João Gomes, and Alessio Del Bue. Cvxpl: A unified convex solution to the absolute pose estimation problem from point and line correspondences. arXiv preprint arXiv:1907.10545, 2019. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 241, + 288, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 241, + 288, + 275 + ], + "spans": [ + { + "bbox": [ + 53, + 241, + 288, + 275 + ], + "type": "text", + "content": "[5] Cuneyt Akinlar and Cihan Topal. Edlines: Real-time line segment detection by edge drawing (ed). In IEEE International Conference on Image Processing, 2011. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 277, + 288, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 277, + 288, + 308 + ], + "spans": [ + { + "bbox": [ + 53, + 277, + 288, + 308 + ], + "type": "text", + "content": "[6] Adrien Bartoli, Mathieu Coquerelle, and Peter Sturm. A framework for pencil-of-points structure-from-motion. In ECCV, 2004. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 311, + 288, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 311, + 288, + 354 + ], + "spans": [ + { + "bbox": [ + 53, + 311, + 288, + 354 + ], + "type": "text", + "content": "[7] Adrien Bartoli and Peter Sturm. Structure-from-motion using lines: Representation, triangulation, and bundle adjustment. Computer Vision and Image Understanding (CVIU), 100(3):416-441, 2005. 2, 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 357, + 288, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 357, + 288, + 388 + ], + "spans": [ + { + "bbox": [ + 53, + 357, + 288, + 388 + ], + "type": "text", + "content": "[8] Herbert Bay, Vittorio Ferraris, and Luc Van Gool. Wide-baseline stereo matching with line segments. In CVPR, 2005. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 391, + 288, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 391, + 288, + 436 + ], + "spans": [ + { + "bbox": [ + 53, + 391, + 288, + 436 + ], + "type": "text", + "content": "[9] Jean-Charles Bazin, Yongduek Seo, Cédric Demonceaux, Pascal Vasseur, Katsushi Ikeuchi, Inso Kweon, and Marc Pollefeys. Globally optimal line clustering and vanishing point estimation in manhattan world. In CVPR, 2012. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 437, + 288, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 288, + 469 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 288, + 469 + ], + "type": "text", + "content": "[10] Federico Camposeco, Andrea Cohen, Marc Pollefeys, and Torsten Sattler. Hybrid camera pose estimation. In CVPR, 2018. 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 472, + 288, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 472, + 288, + 504 + ], + "spans": [ + { + "bbox": [ + 48, + 472, + 288, + 504 + ], + "type": "text", + "content": "[11] Ondrej Chum, Jiri Matas, and Josef Kittler. Locally optimized ransac. In Joint Pattern Recognition Symposium, pages 236-243, 2003. 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 506, + 288, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 506, + 288, + 540 + ], + "spans": [ + { + "bbox": [ + 48, + 506, + 288, + 540 + ], + "type": "text", + "content": "[12] James Coughlan and Alan L Yuille. The manhattan world assumption: Regularities in scene statistics which enable bayesian inference. In NeurIPS, 2000. 2, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 541, + 288, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 541, + 288, + 585 + ], + "spans": [ + { + "bbox": [ + 48, + 541, + 288, + 585 + ], + "type": "text", + "content": "[13] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Computer Vision and Pattern Recognition Workshops (CVPRW), 2018. 1, 8" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 586, + 288, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 586, + 288, + 609 + ], + "spans": [ + { + "bbox": [ + 48, + 586, + 288, + 609 + ], + "type": "text", + "content": "[14] Bin Fan, Fuchao Wu, and Zhanyi Hu. Line matching leveraged by point correspondences. In CVPR, 2010. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 611, + 288, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 611, + 288, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 611, + 288, + 643 + ], + "type": "text", + "content": "[15] Bin Fan, Fuchao Wu, and Zhanyi Hu. Robust line matching through line-point invariants. Pattern Recognition, 45(2):794-805, 2012. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 644, + 288, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 644, + 288, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 644, + 288, + 668 + ], + "type": "text", + "content": "[16] Wolfgang Förstner and Bernhard P Wrobel. Photogrammetric computer vision. Springer, 2016. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 713 + ], + "type": "text", + "content": "[17] Shuang Gao, Jixiang Wan, Yishan Ping, Xudong Zhang, Shuzhou Dong, Yuchen Yang, Haikuan Ning, Jijunnan Li, and Yandong Guo. Pose refinement with joint optimization of visual points and lines. In IROS, 2022. 2, 6, 8" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 127 + ], + "type": "text", + "content": "[18] Ruben Gomez-Ojeda, Francisco-Angel Moreno, David Zuniga-Noël, Davide Scaramuzza, and Javier Gonzalez-Jimenez. Pl-slam: A stereo slam system through the combination of points and line segments. IEEE Transactions on Robotics, 35(3):734-746, 2019. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 129, + 547, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 547, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 547, + 161 + ], + "type": "text", + "content": "[19] Yijia He, Ji Zhao, Yue Guo, Wenhao He, and Kui Yuan. Pl-vio: Tightly-coupled monocular visual-inertial odometry using point and line features. Sensors, 18(4):1159, 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 162, + 547, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 162, + 547, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 162, + 547, + 194 + ], + "type": "text", + "content": "[20] Jared Heinly, Johannes L. Schonberger, Enrique Dunn, and Jan-Michael Frahm. Reconstructing the world in six days. In CVPR, 2015. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 195, + 547, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 195, + 547, + 227 + ], + "spans": [ + { + "bbox": [ + 307, + 195, + 547, + 227 + ], + "type": "text", + "content": "[21] Manuel Hofer, Michael Maurer, and Horst Bischof. Improving sparse 3d models for man-made environments using line-based 3d reconstruction. In 3DV, 2014. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 228, + 547, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 228, + 547, + 260 + ], + "spans": [ + { + "bbox": [ + 307, + 228, + 547, + 260 + ], + "type": "text", + "content": "[22] Manuel Hofer, Michael Maurer, and Horst Bischof. Line3d: Efficient 3d scene abstraction for the built environment. In German Conference on Pattern Recognition, 2015. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 261, + 547, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 261, + 547, + 303 + ], + "spans": [ + { + "bbox": [ + 307, + 261, + 547, + 303 + ], + "type": "text", + "content": "[23] Manuel Hofer, Michael Maurer, and Horst Bischof. Efficient 3d scene abstraction using line segments. Computer Vision and Image Understanding (CVIU), 157:167-178, 2017. 1, 2, 6, 7, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 304, + 547, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 304, + 547, + 346 + ], + "spans": [ + { + "bbox": [ + 307, + 304, + 547, + 346 + ], + "type": "text", + "content": "[24] Aleksander Holynski, David Geraghty, Jan-Michael Frahm, Chris Sweeney, and Richard Szeliski. Reducing drift in structure from motion using extended features. In 3DV, 2020. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 349, + 547, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 349, + 547, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 349, + 547, + 381 + ], + "type": "text", + "content": "[25] Kun Huang, Yifan Wang, Zihan Zhou, Tianjiao Ding, Shenghua Gao, and Yi Ma. Learning to parse wireframes in images of man-made environments. In CVPR, 2018. 1, 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 382, + 547, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 382, + 547, + 414 + ], + "spans": [ + { + "bbox": [ + 307, + 382, + 547, + 414 + ], + "type": "text", + "content": "[26] Siyu Huang, Fangbo Qin, Pengfei Xiong, Ning Ding, Yijia He, and Xiao Liu. Tp-lsd: Tri-points based line segment detector. In ECCV, 2020. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 415, + 547, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 415, + 547, + 458 + ], + "spans": [ + { + "bbox": [ + 307, + 415, + 547, + 458 + ], + "type": "text", + "content": "[27] Arjun Jain, Christian Kurz, Thorsten Thormahlen, and Hans-Peter Seidel. Exploiting global connectivity constraints for reconstruction of 3d line segments from images. In CVPR, 2010. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 460, + 547, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 460, + 547, + 492 + ], + "spans": [ + { + "bbox": [ + 307, + 460, + 547, + 492 + ], + "type": "text", + "content": "[28] Wenzel Jakob, Jason Rhinelander, and Dean Moldovan. pybind11 - seamless operability between c++11 and python. https://github.com/pybind/pybind11.5" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 493, + 547, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 493, + 547, + 536 + ], + "spans": [ + { + "bbox": [ + 307, + 493, + 547, + 536 + ], + "type": "text", + "content": "[29] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, Pascal Fua, Kwang Moo Yi, and Eduard Trulls. Image matching across wide baselines: From paper to practice. IJCV, 129(2):517-547, 2021. 8" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 537, + 547, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 537, + 547, + 568 + ], + "spans": [ + { + "bbox": [ + 307, + 537, + 547, + 568 + ], + "type": "text", + "content": "[30] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. In CVPR, 2017. 8" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 570, + 547, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 570, + 547, + 602 + ], + "spans": [ + { + "bbox": [ + 307, + 570, + 547, + 602 + ], + "type": "text", + "content": "[31] Alex Kendall, Matthew Grimes, and Roberto Cipolla. PoseNet: A convolutional network for real-time 6-DoF camera relocalization. In ICCV, 2015. 8" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 603, + 547, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 603, + 547, + 645 + ], + "spans": [ + { + "bbox": [ + 307, + 603, + 547, + 645 + ], + "type": "text", + "content": "[32] Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics, 36(4), 2017. 6" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 647, + 547, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 547, + 680 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 547, + 680 + ], + "type": "text", + "content": "[33] Zuzana Kukelova, Jan Heller, and Andrew Fitzgibbon. Efficient intersection of three quadrics and applications in computer vision. In CVPR, 2016. 8" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 307, + 681, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 547, + 713 + ], + "type": "text", + "content": "[34] Manuel Lange, Fabian Schweinfurth, and Andreas Schilling. Dld: A deep learning based line descriptor for line feature matching. In IROS, 2019. 2" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "21453" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "text", + "content": "[35] Viktor Larsson. PoseLib - Minimal Solvers for Camera Pose Estimation. https://github.com/vlarsson/ PoseLib.8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 107, + 287, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 107, + 287, + 129 + ], + "spans": [ + { + "bbox": [ + 49, + 107, + 287, + 129 + ], + "type": "text", + "content": "[36] Karel Lebeda, Jiri Matas, and Ondrej Chum. Fixing the Locally Optimized RANSAC. In BMVC, 2012. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 130, + 287, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 130, + 287, + 173 + ], + "spans": [ + { + "bbox": [ + 49, + 130, + 287, + 173 + ], + "type": "text", + "content": "[37] Haoang Li, Ji Zhao, Jean-Charles Bazin, Wen Chen, Zhe Liu, and Yun-Hui Liu. Quasi-globally optimal and efficient vanishing point estimation in Manhattan world. In ICCV, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 175, + 287, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 175, + 287, + 196 + ], + "spans": [ + { + "bbox": [ + 49, + 175, + 287, + 196 + ], + "type": "text", + "content": "[38] Kai Li, Jian Yao, and Xiaohu Lu. Robust line matching based on ray-point-ray structure descriptor. In ACCV, 2014. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 198, + 287, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 198, + 287, + 241 + ], + "spans": [ + { + "bbox": [ + 49, + 198, + 287, + 241 + ], + "type": "text", + "content": "[39] Kai Li, Jian Yao, Xiaohu Lu, Li Li, and Zhichao Zhang. Hierarchical line matching based on line-junction-line structure descriptor and local homography estimation. Neurocomputing, 184:207-220, 2016. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 243, + 287, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 243, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 49, + 243, + 287, + 285 + ], + "type": "text", + "content": "[40] Hyunjun Lim, Jinwoo Jeon, and Hyun Myung. Uv-slam: Unconstrained line-based slam using vanishing points for structural mapping. IEEE Robotics and Automation Letters, 7(2):1518-1525, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 287, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 287, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 49, + 287, + 287, + 319 + ], + "type": "text", + "content": "[41] Hyunjun Lim, Yeeun Kim, Kwangik Jung, Sumin Hu, and Hyun Myung. Avoiding degeneracy for monocular visual slam with point and line features. In ICRA, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 321, + 287, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 321, + 287, + 363 + ], + "spans": [ + { + "bbox": [ + 49, + 321, + 287, + 363 + ], + "type": "text", + "content": "[42] Yicheng Luo, Jing Ren, Xuefei Zhe, Di Kang, Yajing Xu, Peter Wonka, and Linchao Bao. Lc2wf: learning to construct 3d building wireframes from 3d line clouds. In BMVC, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 365, + 287, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 365, + 287, + 398 + ], + "spans": [ + { + "bbox": [ + 49, + 365, + 287, + 398 + ], + "type": "text", + "content": "[43] Quan Meng, Jiakai Zhang, Qiang Hu, Xuming He, and Jingyi Yu. Lgnn: A context-aware line segment detector. In ACM International Conference on Multimedia, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 399, + 287, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 399, + 287, + 431 + ], + "spans": [ + { + "bbox": [ + 49, + 399, + 287, + 431 + ], + "type": "text", + "content": "[44] Branislav Micusik and Horst Wildenauer. Structure from motion with line segments under relaxed endpoint constraints. IJCV, 124(1):65-79, 2017. 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 434, + 287, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 434, + 287, + 476 + ], + "spans": [ + { + "bbox": [ + 49, + 434, + 287, + 476 + ], + "type": "text", + "content": "[45] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 478, + 287, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 478, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 49, + 478, + 287, + 521 + ], + "type": "text", + "content": "[46] Rémi Pautrat, Juan-Ting Lin, Viktor Larsson, Martin R Oswald, and Marc Pollefeys. Sold2: Self-supervised occlusion-aware line description and detection. In CVPR, 2021. 1, 2, 6, 7, 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 523, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 523, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 49, + 523, + 287, + 555 + ], + "type": "text", + "content": "[47] Mikael Persson and Klas Nordberg. Lambda twist: An accurate fast robust perspective three point (p3p) solver. In ECCV, 2018. 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 557, + 287, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 557, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 49, + 557, + 287, + 589 + ], + "type": "text", + "content": "[48] Francesco Pittaluga, Sanjeev J Koppal, Sing Bing Kang, and Sudipta N Sinha. Revealing scenes by inverting structure from motion reconstructions. In CVPR, 2019. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 49, + 591, + 287, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 591, + 287, + 634 + ], + "spans": [ + { + "bbox": [ + 49, + 591, + 287, + 634 + ], + "type": "text", + "content": "[49] Albert Pumarola, Alexander Vakhitov, Antonio Agudo, Alberto Sanfeliu, and Francese Moreno-Noguer. Pl-slam: Realtime monocular visual slam with points and lines. In ICRA, 2017. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 49, + 636, + 287, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 636, + 287, + 667 + ], + "spans": [ + { + "bbox": [ + 49, + 636, + 287, + 667 + ], + "type": "text", + "content": "[50] Yiming Qian and James H. Elder. A reliable online method for joint estimation of focal length and camera rotation. In ECCV, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 49, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 670, + 287, + 712 + ], + "type": "text", + "content": "[51] Srikumar Ramalingam, Michel Antunes, Dan Snow, Gim Hee Lee, and Sudeep Pillai. Line-sweep: Cross-ratio for wide-baseline matching and 3d reconstruction. In CVPR, 2015. 2" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 106 + ], + "type": "text", + "content": "[52] Srikumar Ramalingam, Sofien Bouaziz, and Peter Sturm. Pose estimation using both points and lines for geolocation. In ICRA, 2011. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 108, + 547, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 108, + 547, + 138 + ], + "spans": [ + { + "bbox": [ + 307, + 108, + 547, + 138 + ], + "type": "text", + "content": "[53] Siddhant Ranade and Srikumar Ramalingam. Novel single view constraints for Manhattan 3d line reconstruction. In 3DV, 2018. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 141, + 547, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 141, + 547, + 184 + ], + "spans": [ + { + "bbox": [ + 308, + 141, + 547, + 184 + ], + "type": "text", + "content": "[54] Jing Ren, Biao Zhang, Bojian Wu, Jianqiang Huang, Lubin Fan, Maks Ovsjanikov, and Peter Wonka. Intuitive and efficient roof modeling for reconstruction and synthesis. In ACM SIGGRAPH Asia, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 186, + 547, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 186, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 308, + 186, + 547, + 239 + ], + "type": "text", + "content": "[55] Mike Roberts, Jason Ramapuram, Anurag Ranjan, Atulit Kumar, Miguel Angel Bautista, Nathan Paczan, Russ Webb, and Joshua M. Susskind. Hypersim: A photorealistic synthetic dataset for holistic indoor scene understanding. In ICCV, 2021. 6, 7, 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 241, + 547, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 241, + 547, + 274 + ], + "spans": [ + { + "bbox": [ + 308, + 241, + 547, + 274 + ], + "type": "text", + "content": "[56] Paul-Edouard Sarlin. Visual localization made easy with hloc. https://github.com/cvg/Hierarchical-Localization/.8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 276, + 546, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 276, + 546, + 308 + ], + "spans": [ + { + "bbox": [ + 308, + 276, + 546, + 308 + ], + "type": "text", + "content": "[57] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In CVPR, 2019. 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 309, + 547, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 309, + 547, + 342 + ], + "spans": [ + { + "bbox": [ + 308, + 309, + 547, + 342 + ], + "type": "text", + "content": "[58] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In CVPR, 2020. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 343, + 546, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 343, + 546, + 374 + ], + "spans": [ + { + "bbox": [ + 308, + 343, + 546, + 374 + ], + "type": "text", + "content": "[59] Torsten Sattler et al. RansacLib - A Template-based *SAC Implementation. https://github.com/tsattler/RansacLib.8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 376, + 547, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 376, + 547, + 408 + ], + "spans": [ + { + "bbox": [ + 308, + 376, + 547, + 408 + ], + "type": "text", + "content": "[60] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Fast image-based localization using direct 2d-to-3d matching. In ICCV, 2011. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 411, + 547, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 411, + 547, + 464 + ], + "spans": [ + { + "bbox": [ + 308, + 411, + 547, + 464 + ], + "type": "text", + "content": "[61] Torsten Sattler, Will Maddern, Carl Toft, Akihiko Torii, Lars Hammarstrand, Erik Stenberg, Daniel Safari, Masatoshi Okutomi, Marc Pollefeys, Josef Sivic, et al. Benchmarking 6dof outdoor visual localization in changing conditions. In CVPR, 2018. 6, 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 466, + 547, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 466, + 547, + 498 + ], + "spans": [ + { + "bbox": [ + 308, + 466, + 547, + 498 + ], + "type": "text", + "content": "[62] Torsten Sattler, Tobias Weyand, Bastian Leibe, and Leif Kobbelt. Image retrieval for image-based localization revisited. In BMVC, 2012. 6, 7" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 501, + 547, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 501, + 547, + 544 + ], + "spans": [ + { + "bbox": [ + 308, + 501, + 547, + 544 + ], + "type": "text", + "content": "[63] Grant Schindler, Panchapagesan Krishnamurthy, and Frank Dellaert. Line-based structure from motion for urban environments. In International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT), 2006. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 545, + 547, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 545, + 547, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 545, + 547, + 567 + ], + "type": "text", + "content": "[64] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 1, 2, 5, 8" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 568, + 547, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 568, + 547, + 600 + ], + "spans": [ + { + "bbox": [ + 308, + 568, + 547, + 600 + ], + "type": "text", + "content": "[65] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In ECCV, 2016. 1" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 602, + 547, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 602, + 547, + 644 + ], + "spans": [ + { + "bbox": [ + 308, + 602, + 547, + 644 + ], + "type": "text", + "content": "[66] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in RGB-D images. In CVPR, 2013. 8" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 308, + 647, + 547, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 547, + 678 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 547, + 678 + ], + "type": "text", + "content": "[67] Noah Snavely, Steven M Seitz, and Richard Szeliski. Photo tourism: exploring photo collections in 3d. In ACM SIGGRAPH, 2006. 2, 6, 7" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 308, + 680, + 547, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 680, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 680, + 547, + 712 + ], + "type": "text", + "content": "[68] Noah Snavely, Steven M Seitz, and Richard Szeliski. Modeling the world from internet photo collections. *IJCV*, 80(2):189-210, 2008. 6, 7" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "21454" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 288, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 288, + 116 + ], + "type": "text", + "content": "[69] Christoph Strecha, Wolfgang Von Hansen, Luc Van Gool, Pascal Fua, and Ulrich Thoennessen. On benchmarking camera calibration and multi-view stereo for high resolution imagery. In CVPR, 2008. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 287, + 150 + ], + "type": "text", + "content": "[70] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In CVPR, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 288, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 288, + 174 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 288, + 174 + ], + "type": "text", + "content": "[71] Roberto Toldo and Andrea Fusiello. Robust multiple structures estimation with j-linkage. In ECCV, 2008. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 175, + 288, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 288, + 207 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 288, + 207 + ], + "type": "text", + "content": "[72] Alexander Vakhitov, Jan Funke, and Francesc Moreno-Noguer. Accurate and linear time pose estimation from points and lines. In ECCV, 2016. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 208, + 288, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 288, + 239 + ], + "type": "text", + "content": "[73] Alexander Vakhitov and Victor Lempitsky. Learnable line segment descriptor for visual slam. IEEE Access, 7:39923-39934, 2019. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 241, + 288, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 288, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 288, + 274 + ], + "type": "text", + "content": "[74] Bart Verhagen, Radu Timofte, and Luc Van Gool. Scale-invariant line descriptors for wide baseline matching. In WACV, 2014. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 276, + 288, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 276, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 276, + 288, + 319 + ], + "type": "text", + "content": "[75] Rafael Grompone Von Gioi, Jeremie Jakubowicz, Jean-Michel Morel, and Gregory Randall. Lsd: A fast line segment detector with a false detection control. TPAMI, 32(4):722-732, 2008. 2, 6, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 320, + 288, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 288, + 353 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 288, + 353 + ], + "type": "text", + "content": "[76] Zhiheng Wang, Fuchao Wu, and Zhanyi Hu. Msld: A robust descriptor for line matching. Pattern Recognition, 42(5):941-953, 2009. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 354, + 288, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 354, + 288, + 388 + ], + "spans": [ + { + "bbox": [ + 48, + 354, + 288, + 388 + ], + "type": "text", + "content": "[77] Dong Wei, Yi Wan, Yongjun Zhang, Xinyi Liu, Bin Zhang, and Xiqi Wang. Elsr: Efficient line segment reconstruction with planes and points guidance. In CVPR, 2022. 1, 2, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 388, + 288, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 288, + 430 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 288, + 430 + ], + "type": "text", + "content": "[78] Xinyu Wei, Jun Huang, and Xiaoyuan Ma. Real-time monocular visual slam by combining points and lines. In IEEE International Conference on Multimedia and Expo (ICME), 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 433, + 288, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 433, + 288, + 465 + ], + "spans": [ + { + "bbox": [ + 48, + 433, + 288, + 465 + ], + "type": "text", + "content": "[79] Hoi Sim Wong, Tat-Jun Chin, Jin Yu, and David Suter. Dynamic and hierarchical multi-structure geometric model fitting. In ICCV, 2011. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 467, + 288, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 467, + 288, + 498 + ], + "spans": [ + { + "bbox": [ + 48, + 467, + 288, + 498 + ], + "type": "text", + "content": "[80] Changchang Wu. Visualsfm: A visual structure from motion system. http://www.cs.washington.edu/homes/ccwu/vsfm, 2011. 2, 6, 8" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 500, + 288, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 500, + 288, + 533 + ], + "spans": [ + { + "bbox": [ + 48, + 500, + 288, + 533 + ], + "type": "text", + "content": "[81] Yifan Xu, Weijian Xu, David Cheung, and Zhuowen Tu. Line segment detection using transformers without edges. In CVPR, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 534, + 288, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 534, + 288, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 534, + 288, + 567 + ], + "type": "text", + "content": "[82] Nan Xue, Song Bai, Fudong Wang, Gui-Song Xia, Tianfu Wu, and Liangpei Zhang. Learning attraction field representation for robust line segment detection. In CVPR, 2019. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 568, + 288, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 288, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 288, + 601 + ], + "type": "text", + "content": "[83] Nan Xue, Tianfu Wu, Song Bai, Fudong Wang, Gui-Song Xia, Liangpei Zhang, and Philip HS Torr. Holistically-attracted wireframe parsing. In CVPR, 2020. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 602, + 288, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 288, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 288, + 635 + ], + "type": "text", + "content": "[84] Sungho Yoon and Ayoung Kim. Line as a visual sentence: Context-aware line descriptor for visual localization. IEEE Robotics and Automation Letters, 6(4):8726-8733, 2021. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 635, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 288, + 689 + ], + "type": "text", + "content": "[85] Lilian Zhang and Reinhard Koch. An efficient and robust line segment matching approach based on lbd descriptor and pairwise geometric consistency. Journal of Visual Communication and Image Representation, 24(7):794-805, 2013. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 714 + ], + "type": "text", + "content": "[86] Lilian Zhang and Reinhard Koch. Structure and motion from line correspondences: Representation, projection, initializa-" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 296 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 327, + 73, + 547, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 547, + 105 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 547, + 105 + ], + "type": "text", + "content": "tion and sparse bundle adjustment. Journal of Visual Communication and Image Representation, 25(5):904-915, 2014. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 107, + 547, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 547, + 149 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 547, + 149 + ], + "type": "text", + "content": "[87] Lilian Zhang, Huimin Lu, Xiaoping Hu, and Reinhard Koch. Vanishing point estimation and line classification in a Manhattan world with a unifying camera model. *IJCV*, 117, 2015. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 152, + 547, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 547, + 194 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 547, + 194 + ], + "type": "text", + "content": "[88] Ziheng Zhang, Zhengxin Li, Ning Bi, Jia Zheng, Jinlei Wang, Kun Huang, Weixin Luo, Yanyu Xu, and Shenghua Gao. Ppgnet: Learning point-pair graph for line segment detection. In CVPR, 2019. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 197, + 546, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 197, + 546, + 229 + ], + "spans": [ + { + "bbox": [ + 308, + 197, + 546, + 229 + ], + "type": "text", + "content": "[89] Lipu Zhou, Jiamin Ye, and Michael Kaess. A stable algebraic camera pose estimation for minimal configurations of 2d/3d point and line correspondences. In ACCV, 2018. 2, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 231, + 546, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 231, + 546, + 263 + ], + "spans": [ + { + "bbox": [ + 308, + 231, + 546, + 263 + ], + "type": "text", + "content": "[90] Yichao Zhou, Haozhi Qi, Yuexiang Zhai, Qi Sun, Zhili Chen, Li-Yi Wei, and Yi Ma. Learning to reconstruct 3d Manhattan wireframes from a single image. In ICCV, 2019. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 265, + 547, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 265, + 547, + 296 + ], + "spans": [ + { + "bbox": [ + 308, + 265, + 547, + 296 + ], + "type": "text", + "content": "[91] Xingxing Zuo, Xiaojia Xie, Yong Liu, and Guoquan Huang. Robust visual slam with point and line features. In IROS, 2017. 2" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "21455" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Neural Field Generation Using Triplane Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_content_list.json b/2023/3D Neural Field Generation Using Triplane Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..581ec30698ad52ae73cc2e8ff1599558f6864d2a --- /dev/null +++ b/2023/3D Neural Field Generation Using Triplane Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_content_list.json @@ -0,0 +1,1562 @@ +[ + { + "type": "text", + "text": "3D Neural Field Generation using Triplane Diffusion", + "text_level": 1, + "bbox": [ + 220, + 130, + 750, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "J. Ryan Shue\\* Eric Ryan Chan\\*2 Ryan Po\\*2 Zachary Ankner\\*3,4 Jiajun Wu\\*2 Gordon Wetzstein\\*2 Milton Academy 2Stanford University 3Massachusetts Institute of Technology 4MosaicML", + "bbox": [ + 102, + 191, + 866, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 286, + 313, + 303 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Diffusion models have emerged as the state-of-the-art for image generation, among other tasks. Here, we present an efficient diffusion-based model for 3D-aware generation of neural fields. Our approach pre-processes training data, such as ShapeNet meshes, by converting them to continuous occupancy fields and factoring them into a set of axis-aligned triplane feature representations. Thus, our 3D training scenes are all represented by 2D feature planes, and we can directly train existing 2D diffusion models on these representations to generate 3D neural fields with high quality and diversity, outperforming alternative approaches to 3D-aware generation. Our approach requires essential modifications to existing triplane factorization pipelines to make the resulting features easy to learn for the diffusion model. We demonstrate state-of-the-art results on 3D generation on several object classes from ShapeNet.", + "bbox": [ + 73, + 318, + 473, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 603, + 207, + 618 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Diffusion models have seen rapid progress, setting state-of-the-art (SOTA) performance across a variety of image generation tasks. While most diffusion methods model 2D images, recent work [2, 14, 42, 86] has attempted to develop denoising methods for 3D shape generation. These 3D diffusion methods operate on discrete point clouds and, while successful, exhibit limited quality and resolution.", + "bbox": [ + 75, + 628, + 470, + 733 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In contrast to 2D diffusion, which directly leverages the image as the target for the diffusion process, it is not directly obvious how to construct such 2D targets in the case of 3D diffusion. Interestingly, recent work on 3D-aware generative adversarial networks (GANs) (see Sec. 2 for an overview) has demonstrated impressive results for 3D shape generation using 2D generators. We build upon this idea of learning to generate triplane representations [6] that encode 3D scenes or", + "bbox": [ + 75, + 734, + 470, + 854 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/450fe3152bfc1f768b556951bf1fcd42a550acbbd9424e433875c2618a8dea15.jpg", + "image_caption": [ + "Figure 1. Our method leverages existing 2D diffusion models for 3D shape generation using hybrid explicit-implicit neural representations. Top: triplane-based 3D shape diffusion process using our framework. Bottom: Interpolation between generated shapes." + ], + "image_footnote": [], + "bbox": [ + 506, + 287, + 890, + 496 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "radiance fields as a set of axis-aligned 2D feature planes. The structure of a triplane is analogous to that of a 2D image and can be used as part of a 3D generative method that leverages conventional 2D generator architectures.", + "bbox": [ + 496, + 595, + 892, + 655 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Inspired by recent efforts in designing efficient 3D GAN architectures, we introduce a neural field-based diffusion framework for 3D representation learning. Our approach follows a two-step process. In the first step, a training set of 3D scenes is factored into a set of per-scene triplane features and a single, shared feature decoder. In the second step, a 2D diffusion model is trained on these triplanes. The trained diffusion model can then be used at inference time to generate novel and diverse 3D scenes. By interpreting triplanes as multi-channel 2D images and thus decoupling generation from rendering, we can leverage current (and likely future) SOTA 2D diffusion model backbones nearly out of the box. Fig. 1 illustrates how a single object is generated with our framework (top), and how two generated objects—even with different topologies—can be interpolated (bottom).", + "bbox": [ + 496, + 657, + 893, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Our core contributions are as follows:", + "bbox": [ + 519, + 885, + 767, + 898 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution. \nPart of the work was done during an internship at Stanford. \nProject page: https://jryanshue.com/nfd", + "bbox": [ + 94, + 862, + 408, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "20875", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/24404e688258727d62d4c8a74cb692aa929c6ec91ff1e58989e7466b255ce1e8.jpg", + "image_caption": [ + "Figure 2. Visualization of the denoising process. Here, we show examples of triplanes as they are iteratively denoised at inference, as well as the shapes we obtain by \"decoding\" the noisy triplanes with our jointly-learned MLP. By interpreting triplane features simply as multi-channel feature images, we build our framework around 2D diffusion models." + ], + "image_footnote": [], + "bbox": [ + 101, + 88, + 869, + 401 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a generative framework for diffusion on 3D scenes that utilizes 2D diffusion model backbones and has a built-in 3D inductive bias.", + "- We show that our approach is capable of generating both high-fidelity and diverse 3D scenes that outperform state-of-the-art 3D GANs." + ], + "bbox": [ + 96, + 479, + 468, + 585 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 601, + 217, + 617 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural fields. Implicit neural representations, or neural fields, hold the SOTA for 3D scene representation [74, 80]. They either solely learn geometry [1, 4, 5, 10, 11, 15, 21, 23, 45, 47, 48, 57, 65, 73] or use posed images to jointly optimize geometry and appearance [6, 7, 18, 25, 29, 33, 36–39, 44, 49, 50, 54, 55, 59, 67, 72, 82–84]. Neural fields represent scenes as continuous functions, allowing them to scale well with scene complexity compared to their discrete counterparts [40, 66]. Initial methods used a single, large multilayer perceptron (MLP) to represent entire scenes [10, 47, 49, 57, 65], but reconstruction with this approach can be computationally inefficient because training such a representation requires thousands of forward passes through the large model per scene. Recent years have shown a trend towards locally conditioned representations, which either learn local functions [5, 9, 28, 62] or locally modulate a shared function with a hybrid explicit-implicit representation [4, 6, 12, 19–21, 37, 43, 45, 58]. These methods use small MLPs, which are efficient during inference and significantly", + "bbox": [ + 75, + 628, + 473, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "better at capturing local scene details. We adopt the expressive hybrid triplane representation introduced by Chan et al. [6]. Triplanes are efficient, scaling with the surface area rather than volume, and naturally integrate with expressive, fine-tuned 2D generator architectures. We modify the triplane representation for compatibility with our denoising framework.", + "bbox": [ + 496, + 479, + 893, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Generative synthesis in 2D and 3D. Some of the most popular generative models include GANs [22, 31, 32], autoregressive models [16, 60, 77, 78], score matching models [68, 70, 71], and denoising diffusion probabilistic models (DDPMs) [13, 26, 52, 76]. DDPMs are arguably the SOTA approach for synthesizing high-quality and diverse 2D images [13]. Moreover, GANs can be difficult to train and suffer from issues like mode collapse [75] whereas diffusion models train stably and have been shown to better capture the full training distribution.", + "bbox": [ + 496, + 597, + 893, + 733 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In 3D, however, GANs still outperform alternative generative approaches [6,7,17,24,34,35,37,46,51,53,56, 61,64,79,85,87]. Some of the most successful 3D GANs use an expressive 2D generator backbone (e.g., StyleGAN2 [32]) to synthesize triplane representations which are then decoded with a small, efficient MLP [6]. Because the decoder is small and must generalize across many local latents, these methods assign most of their expressiveness to the powerful backbone. In addition, these methods treat the triplane as a multi-channel image, allowing the generator backbone to be used almost out of the box.", + "bbox": [ + 496, + 734, + 895, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "20876", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Current 3D diffusion models [2, 14, 42, 81, 86] are still very limited. They either denoise a single latent or do not utilize neural fields at all, opting for a discrete point-cloud-based approach. For example, concurrently developed single-latent approaches [2, 14] generate a global latent for conditioning the neural field, relying on a 3D decoder to transform the scene representation from 1D to 3D without directly performing 3D diffusion. As a result, the diffusion model does not actually operate in 3D, losing this important inductive bias and generating blurry results. Point-cloud-based approaches [42, 86], on the other hand, give the diffusion model explicit 3D control over the shape, but limit its resolution and scalability due to the coarse discrete representation. While showing promise, both 1D-to-3D and point cloud diffusion approaches require specific architectures that cannot easily leverage recent advances in 2D diffusion models.", + "bbox": [ + 75, + 90, + 472, + 330 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In our work, we propose to directly generate triplanes with out-of-the-box SOTA 2D diffusion models, granting the diffusion model near-complete control over the generated neural field. Key to our approach is our treatment of well-fit triplanes in a shared latent space as ground truth data for training our diffusion model. We show that the latent space of these triplanes is grounded spatially in local detail, giving the diffusion model a critical inductive bias for 3D generation. Our approach gives rise to an expressive 3D diffusion model.", + "bbox": [ + 75, + 332, + 473, + 470 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Triplane Diffusion Framework", + "text_level": 1, + "bbox": [ + 76, + 481, + 357, + 498 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here, we explain the architecture of our neural field diffusion (NFD) model for 3D shapes. In Section 3.1, we explain how we can represent the occupancy field of a single object using a triplane. In Section 3.2, we describe how we can extend this framework to represent an entire dataset of 3D objects. In Section 3.3, we describe the regularization techniques that we found necessary to achieve optimal results. Finally, Sections 3.4 and 3.5 illustrate training and sampling from our model. For an overview of the pipeline at inference, see Figure 3.", + "bbox": [ + 75, + 506, + 470, + 657 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Representing a 3D Scene using a Triplane", + "text_level": 1, + "bbox": [ + 76, + 665, + 431, + 681 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Neural fields have been introduced as continuous and expressive 3D scene representations. In this context, a neural field $\\mathrm{NF}:\\mathbb{R}^3\\to \\mathbb{R}^M$ is a neural network-parameterized mapping function that takes as input a three-dimensional coordinate $\\mathbf{x}$ and that outputs an $M$ -dimensional vector representing the neural field. Neural fields have been demonstrated for occupancy fields [47], signed distance functions [57], radiance fields [49], among many other types of signals [65]. For the remainder of this work, we focus on 3D scene representations using occupancy fields such that the output of the neural field is a binary value, indicating whether a coordinate is inside or outside an object and $M = 1$ .", + "bbox": [ + 75, + 688, + 468, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The triplane representation is a hybrid explicit-implicit network architecture for neural fields that is particularly effi", + "bbox": [ + 76, + 869, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "cient to evaluate [6]. This representation uses three 2D feature planes $\\mathbf{f}_{xy},\\mathbf{f}_{xz},\\mathbf{f}_{yz}\\in \\mathbb{R}^{N\\times N\\times C}$ with a spatial resolution of $N\\times N$ and $C$ feature channels each, and a multilayer perceptron (MLP) \"decoder\" tasked with interpreting features sampled from the planes. A 3D coordinate is queried by projecting it onto each of the axis-aligned planes (i.e., the $x - y,x - z,$ and $y - z$ planes), querying and aggregating the respective features, and decoding the resulting feature using a lightweight $\\mathrm{MLP}_{\\phi}$ with parameters $\\phi$ . Similar to Chan et al. [6], we found the sum to be an efficient feature aggregation function, resulting in the following formulation for the triplane architecture:", + "bbox": [ + 496, + 90, + 893, + 258 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {N F} (\\mathbf {x}) = \\operatorname {M L P} _ {\\phi} \\left(\\mathbf {f} _ {x y} (\\mathbf {x}) + \\mathbf {f} _ {y z} (\\mathbf {x}) + \\mathbf {f} _ {x z} (\\mathbf {x})\\right). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 272, + 893, + 290 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The feature planes and MLP can be jointly optimized to represent the occupancy field of a shape.", + "bbox": [ + 496, + 304, + 890, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Representing a Class of Objects with Triplanes", + "text_level": 1, + "bbox": [ + 498, + 349, + 890, + 366 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We aim to convert our dataset of shapes into a dataset of triplanes so that we can train a diffusion model on these learned feature planes. However, because the MLP and feature planes are typically jointly learned, we cannot simply train a triplane for each object of the dataset individually. If we did, the MLP's corresponding to each object in our dataset would fail to generalize to triplanes generated by our diffusion model. Therefore, instead of training triplanes for each object in isolation, we jointly optimize the feature planes for many objects simultaneously, along with a decoder that is shared across all objects. This joint optimization results in a dataset of optimized feature planes and an MLP capable of interpreting any triplane from the dataset distribution. Thus, at inference, we can use this MLP to decode feature planes generated by our model.", + "bbox": [ + 496, + 375, + 893, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In practice, during training, we are given a dataset of $I$ objects, and we preprocess the coordinates and ground-truth occupancy values of $J$ points per object. Typically, $J = 10\\mathrm{M}$ where 5M points are sampled uniformly throughout the volume and 5M points are sampled near the object surface. Our naive training objective is a simple $L2$ loss between predicted occupancy values $\\mathrm{NF}^{(i)}(\\mathbf{x}_j^{(i)})$ and ground-truth occupancy values $\\mathrm{O}_j^{(i)}$ for each point, where $\\mathbf{x}_j^{(i)}$ denotes the $j^{\\mathrm{th}}$ point from the $i^{\\mathrm{th}}$ scene:", + "bbox": [ + 496, + 588, + 893, + 733 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {N A I V E}} = \\sum_ {i} ^ {I} \\sum_ {j} ^ {J} \\left\\| \\mathrm {N F} ^ {(i)} \\left(\\mathbf {x} _ {j} ^ {(i)}\\right) - \\mathrm {O} _ {j} ^ {(i)} \\right\\| _ {2} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 750, + 893, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "During training, we optimize Equation 2 for a shared MLP parameterized by $\\phi$ , as well as the feature planes corresponding to every object in our dataset:", + "bbox": [ + 496, + 805, + 890, + 851 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{\\phi , \\mathbf {f} _ {x y} ^ {(i)}, \\mathbf {f} _ {x z} ^ {(i)}, \\mathbf {f} _ {y z} ^ {(i)} \\right\\} = \\underset {\\left\\{\\phi , \\mathbf {f} _ {x y} ^ {(i)}, \\mathbf {f} _ {x z} ^ {(i)}, \\mathbf {f} _ {y z} ^ {(i)} \\right\\}} {\\operatorname {a r g m i n}} \\mathcal {L} _ {\\mathrm {N A I V E}} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 558, + 864, + 893, + 904 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "20877", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/089437b1d30d05c909262096994b21a0a3a962f57b354646c1483ff477ef08e7.jpg", + "image_caption": [ + "Figure 3. Pipeline. Sampling a 3D neural field from our model consists of two decoupled processes: 1) using a trained DDPM to iteratively denoise latent noise into feature maps and 2) using a locally conditioned Occupancy Network to decode the resulting triplane into the final neural field. This architecture allows the DDPM to generate samples with a 3D inductive bias while utilizing existing 2D DDPM backbones and a continuous output representation." + ], + "image_footnote": [], + "bbox": [ + 194, + 99, + 883, + 250 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Regularizing Triplanes for Effective Generalization", + "text_level": 1, + "bbox": [ + 76, + 351, + 470, + 382 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Following the procedure outlined in the previous section, we can learn a dataset of triplane features and a shared triplane decoder; we can then train a diffusion model on these triplane features and sample novel shapes at inference. Unfortunately, the result of this naive training procedure is a generative model for triplanes that produces shapes with significant artifacts.", + "bbox": [ + 75, + 393, + 468, + 484 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We find it necessary to regularize the triplane features during optimization to simplify the data manifold that the diffusion model must learn. Therefore, we include total variation (TV) regularization terms with weight $\\lambda_{1}$ in the loss function to ensure that the feature planes of each training scene do not contain spurious high-frequency information. This strategy makes the distribution of triplane features more similar to the manifold of natural images (see supplement), which we found necessary to robustly train a diffusion model on them (see Sec. 4).", + "bbox": [ + 75, + 486, + 470, + 637 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While the trained feature values are unbounded, our DDPM backbone requires training inputs with values in the range [-1,1]. We address this by normalizing the feature planes before training, but this process is sensitive to outliers. As a result, we include an L2 regularization term on the triplane features with weight $\\lambda_{2}$ to discourage outlying values.", + "bbox": [ + 75, + 641, + 468, + 732 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We also include an explicit density regularization (EDR) term. Due to our ground-truth occupancy data being concentrated on the surface of the shapes, there is often insufficient data to learn a smooth outside-of-shape volume. Our EDR term combats this issue by sampling a set of random points from the volume, offsetting the points by a random vector $\\omega$ , feeding both sets through the MLP, and calculating the mean squared error. Notationally, this term can be represented as $\\mathrm{EDR}(\\mathrm{NF}(\\mathbf{x}), \\omega) = \\| \\mathrm{NF}(\\mathbf{x}) - \\mathrm{NF}(\\mathbf{x} + \\omega) \\|_2^2$ . We find this term necessary to remove floating artifacts in the volume (see Sec. 4)", + "bbox": [ + 75, + 734, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our training objective, with added regularization terms, is as follows:", + "bbox": [ + 498, + 352, + 893, + 381 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} = \\sum_ {i} ^ {N} \\sum_ {j} ^ {M} \\operatorname {B C E} \\left(\\mathrm {N F} ^ {(i)} \\left(\\mathbf {x} _ {j} ^ {(i)}\\right) - \\mathrm {O} _ {j} ^ {(i)}\\right) \\\\ + \\lambda_ {1} \\left(\\operatorname {T V} \\left(\\mathbf {f} _ {x y} ^ {(i)}\\right) + \\operatorname {T V} \\left(\\mathbf {f} _ {x z} ^ {(i)}\\right) + \\operatorname {T V} \\left(\\mathbf {f} _ {y z} ^ {(i)}\\right)\\right) \\\\ + \\lambda_ {2} \\left(\\left| \\left| \\mathbf {f} _ {x y} ^ {(i)} \\right| \\right| _ {2} + \\left| \\left| \\mathbf {f} _ {y z} ^ {(i)} \\right| \\right| _ {2} + \\left| \\left| \\mathbf {f} _ {x z} ^ {(i)} \\right| \\right| _ {2}\\right) \\\\ + \\operatorname {E D R} \\left(\\mathrm {N F} \\left(\\mathbf {x} _ {j} ^ {(i)}\\right), \\boldsymbol {\\omega}\\right) \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 391, + 892, + 518 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Training a Diffusion Model for Triplane Features", + "text_level": 1, + "bbox": [ + 500, + 527, + 890, + 556 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For unconditional generation, a diffusion model takes Gaussian noise as input and gradually denoises it in $T$ steps. In our framework, the diffusion model operates on triplane features $\\mathbf{f}_{0\\dots T}\\in \\mathbb{R}^{N\\times N\\times 3C}$ that stack the feature channels of all three triplane axes into a single image. In this notation, $\\mathbf{f}_T\\sim \\mathcal{N}(\\mathbf{f}_T;0,\\mathbf{I})$ is the triplane feature image consisting of purely Gaussian noise, and $\\mathbf{f}_0\\sim q(\\mathbf{f}_0)$ is a random sample drawn from the data distribution. The data distribution in our framework includes the pre-factored triplanes of the training set, normalized by the mean and variance of the entire dataset such that each channel has a zero mean and a standard deviation of 0.5.", + "bbox": [ + 496, + 566, + 893, + 732 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The forward or diffusion processes is a Markov chain that gradually adds Gaussian noise to the triplane features, according to a variance schedule $\\beta_{1},\\beta_{2},\\dots,\\beta_{T}$", + "bbox": [ + 496, + 733, + 893, + 779 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nq \\left(\\mathbf {f} _ {t} \\mid \\mathbf {f} _ {t - 1}\\right) = \\mathcal {N} \\left(\\mathbf {f} _ {t}; \\sqrt {1 - \\beta_ {t}} \\mathbf {f} _ {t - 1}, \\beta_ {t} \\mathbf {I}\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 789, + 892, + 815 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This forward process can be directly sampled at step $t$ using the closed-form solution $q(\\mathbf{f}_t|\\mathbf{f}_0) = \\mathcal{N}(\\mathbf{f}_t;\\sqrt{\\bar{\\alpha}_t}\\mathbf{f}_0,(1 - \\bar{\\alpha}_t)\\mathbf{I})$ , where $\\bar{\\alpha}_{t} = \\prod_{s = 1}^{t}\\alpha_{s}$ with $\\alpha_{t} = 1 - \\beta_{t}$ .", + "bbox": [ + 496, + 825, + 890, + 871 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The goal of training a diffusion model is to learn the reverse process. For this purpose, a function approximator", + "bbox": [ + 498, + 871, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "20878", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\epsilon_{\\theta}$ is needed that predicts the noise $\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ from its noisy input. Typically, this function approximator is implemented as a variant of a convolutional neural network defined by its parameters $\\theta$ . Following [26], we train our triplane diffusion model by optimizing the simplified variant of the variational bound on negative log-likelihood:", + "bbox": [ + 75, + 90, + 472, + 183 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {D D P M}} = \\mathbb {E} _ {t, \\mathbf {f} _ {0}, \\epsilon} \\left[ \\left\\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} \\left(\\sqrt {\\bar {\\alpha} _ {t}} \\mathbf {f} _ {0} + \\sqrt {1 - \\bar {\\alpha} _ {t}} \\boldsymbol {\\epsilon}, t\\right) \\right\\| ^ {2} \\right], \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 191, + 470, + 219 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $t$ is sampled uniformly between 1 and $T$ .", + "bbox": [ + 76, + 228, + 385, + 244 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Sampling Novel 3D Shapes", + "text_level": 1, + "bbox": [ + 76, + 252, + 320, + 268 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The unconditional generation of shapes at inference is a two-stage process that involves sampling a triplane from the trained diffusion model and then querying the neural field.", + "bbox": [ + 76, + 276, + 468, + 321 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Sampling a triplane from the diffusion model is identical to sampling an image from a diffusion model. Beginning with a random Gaussian noise $\\mathbf{f}_T\\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ , we iteratively denoise the sample in $T$ steps as", + "bbox": [ + 76, + 321, + 468, + 382 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} _ {t - 1} = \\frac {1}{\\sqrt {\\alpha_ {t}}} \\left(\\mathbf {f} _ {t} - \\frac {1 - \\alpha_ {t}}{\\sqrt {1 - \\bar {\\alpha} _ {t}}} \\boldsymbol {\\epsilon} _ {\\theta} (\\mathbf {f} _ {t}, t)\\right) + \\sigma_ {t} \\boldsymbol {\\epsilon}, \\qquad (7)\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 392, + 470, + 426 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})$ for all but the very last step (i.e., $t = 1$ ), at which $\\epsilon = 0$ and $\\sigma_t^2 = \\beta_t$ .", + "bbox": [ + 75, + 436, + 468, + 467 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The result of the denoising process, $\\mathbf{f}_0$ , is a sample from the normalized triplane feature image distribution. Denormalizing it using the dataset normalization statistics and splitting the generated features into the axis aligned planes $\\mathbf{f}_{xy}, \\mathbf{f}_{yz}, \\mathbf{f}_{xz}$ yields a set of triplane features which, when combined with the pre-trained MLP, are used to query the neural field.", + "bbox": [ + 75, + 467, + 468, + 556 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use the marching cubes algorithm [41] to extract meshes from the resulting neural fields. Note that our framework is largely agnostic to the diffusion backbone used; we choose to use ADM [52], a 2D state-of-the-art diffusion model.", + "bbox": [ + 75, + 558, + 468, + 632 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Source code and pre-trained models will be made available.", + "bbox": [ + 76, + 633, + 468, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 676, + 207, + 694 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. To compare NFD against existing 3D generative methods, we train our model on three object categories from the ShapeNet dataset individually. Consistent with previous work [85, 86], we choose the categories: cars, chairs and airplanes. Each mesh is normalized to lie within $[-1,1]^3$ and then passed through watertighting. The generation of ground truth triplanes then works as follows: we precompute the occupancies of 10M points per object, where 5M points are distributed uniformly at random in the volume, and 5M points are sampled within a 0.01 distance from the mesh surface.", + "bbox": [ + 75, + 702, + 470, + 854 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/0ee6fdf78f27a9541f5facc73e5d45685bbbc5df0792a0230a6fe677281658c9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DataMethodFID ↓Precision ↑Recall ↑
CarsPVD¹335.80.10.2
SDF-StyleGAN98.035.936.2
NFD (Ours)83.649.550.5
ChairsPVD305.80.21.7
SDF-StyleGAN36.590.987.4
NFD (Ours)26.492.494.8
PlanesPVD244.42.73.8
SDF-StyleGAN65.864.572.8
NFD (Ours)32.470.581.1
", + "bbox": [ + 506, + 88, + 887, + 256 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1. Render quality metrics on ShapeNet. We achieve state-of-the-art FID, which measures overall quality, as well as well as state-of-the-art precision and recall, which measure fidelity and diversity independently. Metrics calculated on shaded renderings of generated and ground-truth shapes.", + "bbox": [ + 496, + 266, + 893, + 337 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We then train an MLP jointly with as many triplanes as we can fit in the GPU memory of a single A6000 GPU. In our case, we initially train on the first 500 objects in the dataset. After this initial joint optimization, we freeze the shared MLP and use it to optimize the triplanes of the remaining objects in the dataset. All triplanes beyond the first 500 are optimized individually with the same shared MLP; thus, the training of these triplanes can be effectively parallelized.", + "bbox": [ + 496, + 363, + 893, + 484 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation metrics. As in [85], we choose to evaluate our model using an adapted version of Fréchet inception distance (FID) that utilizes rendered shading images of our generated meshes. Shading-image FID [85] overcomes limitations of other mesh-based evaluation metrics such as the lightfield-descriptor (LFD) [8] by taking human perception into consideration. Zheng et al. [85] provide a detailed discussion of the various evaluation metrics for 3D generative models. Following the method [85], shading images of each shape are rendered from 20 distinct views; FID is then compared across each view and averaged to obtain a final score:", + "bbox": [ + 496, + 503, + 893, + 669 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {F I D} = \\frac {1}{2 0} \\left[ \\sum_ {i = 1} ^ {2 0} \\| \\mu_ {g} ^ {i} - \\mu_ {r} ^ {i} \\| ^ {2} + \\operatorname {T r} \\left(\\Sigma_ {g} ^ {i} + \\Sigma_ {r} ^ {i} - 2 \\left(\\Sigma_ {r} ^ {i} \\Sigma_ {g} ^ {i}\\right) ^ {\\frac {1}{2}}\\right) \\right], \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 680, + 890, + 733 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $g$ and $r$ represent the generated and training datasets, while $\\mu^i,\\Sigma^i$ represent the mean and covariance matrices for shading images rendered from the $i^{\\mathrm{th}}$ view, respectively.", + "bbox": [ + 496, + 734, + 893, + 779 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Along with FID, we also report precision and recall scores using the method proposed by Sajjadi et al. [63]. While FID correlates well with perceived image quality, the one-dimensional nature of the metric prevents it from identifying different failure modes. Sajjadi et al. [63] aim to disentangle FID into separate metrics known as precision and recall, where the former correlates to the quality of the generated images and the latter represents the diversity of", + "bbox": [ + 496, + 779, + 893, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "1As PVD outputs 098 point clouds, we apply the ball pivoting algorithm (BPA) to PVD outputs before calculating FID. BPA was selected as it achieved a good balance between speed and quality.", + "bbox": [ + 75, + 862, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "footer", + "text": "20879", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/773685b9d6e947ccd79535897d9e9843a1d1e959a7ce373df923481ebb168c93.jpg", + "image_caption": [ + "Figure 4. We compare 3D shapes generated by our model against generations of state-of-the-art baselines for ShapeNet Cars, Chairs, and Planes. Our model synthesizes shapes with noticeably sharper details than the previous state-of-the-art, while also capturing the broad diversity in each category." + ], + "image_footnote": [], + "bbox": [ + 83, + 92, + 890, + 474 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the generative model.", + "bbox": [ + 76, + 554, + 220, + 569 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We compare our method against state-of-the-art point-based and neural-field-based 3D generative models, namely PVD [86] and SDF-StyleGAN [85]. For evaluation, we use the pre-trained models for both methods on the three ShapeNet categories listed above. Note that PVD is inherently a point-based generative method and therefore does not output a triangle mesh needed for shading image rendering. To circumvent this, we choose to convert generated point clouds to triangle meshes using the ball-pivoting algorithm [3].", + "bbox": [ + 75, + 590, + 470, + 728 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results. We provide qualitative results, comparing samples generated by our method to samples generated by baselines, in Figure 4. Our method generates a diverse and finely detailed collection of objects. Objects produced by our method contain sharp edges and features that we would expect to be difficult to accurately reconstruct—note that delicate features, such as the suspension of cars, the slats in chairs, and armaments of planes, are faithfully generated. Perhaps more importantly, samples generated by our model are diverse—our model successfully synthesizes many different types of cars, chairs,", + "bbox": [ + 75, + 750, + 470, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and planes, including reproductions of several varieties that we would expect to be rare in the training dataset.", + "bbox": [ + 498, + 554, + 890, + 585 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In comparison, while $PVD$ also produces a wide variety of shapes, it is limited by its nature to generating only coarse object shapes. Furthermore, because $PVD$ produces a fixed-size point cloud with only 2048 points, it cannot synthesize fine elements.", + "bbox": [ + 496, + 588, + 890, + 662 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SDF-StyleGAN creates high-fidelity shapes, accurately reproducing many details, such as airplane engines and chair legs. However, our method is more capable of capturing very fine features. Note that while SDF-StyleGAN smooths over the division between tire and wheel well when generating cars, our method faithfully portrays this gap. Similarly, our method synthesizes the tails and engines of airplanes, and the legs and planks of chairs, with noticeably better definition. Our method also apparently generates a greater diversity of objects than SDF-StyleGAN. While SDF-StyleGAN capably generates varieties of each ShapeNet class, our method reproduces the same classes with greater variation. This is expected, as a noted advantage of diffusion models over GANs is better mode coverage.", + "bbox": [ + 496, + 669, + 892, + 880 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We provide quantitative results in Table 1. The metrics", + "bbox": [ + 519, + 885, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "20880", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e662840be1dd297b5a1cc3012ca032bbe21da126e88633ab1d6a53ae896a00ed.jpg", + "image_caption": [ + "Figure 5. Interpolation. Our model learns a continuous latent space of triplanes. We can smoothly interpolate between two noise triplanes, resulting in semantically meaningful shape interpolation." + ], + "image_footnote": [], + "bbox": [ + 84, + 90, + 885, + 237 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tell a similar story to the qualitative results. Quantitatively, NFD outperforms all baselines in FID, precision, and recall for each ShapeNet category. FID is a standard one-number metric for evaluating generative models, and our performance under this evaluation indicates the generally better quality of object renderings. Precision evaluates the renderings' fidelity, and recall evaluates their diversity. Outperforming baselines in both precision and recall suggest that our model produces higher fidelity of shapes and a more diverse distribution of shapes. This is consistent with the qualitative results in Figure 4, where our method produced sharper and more complex objects while also covering more modes.", + "bbox": [ + 75, + 306, + 472, + 488 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Semantically meaningful interpolation. Figure 5 shows latent space interpolation between pairs of generated neural fields. As shown in prior work [69], smooth interpolation in the latent space of diffusion models can be achieved by interpolation between noise tensors before they are iteratively denoised by the model. As in their method, we sample from our trained model using a deterministic DDIM, and we use spherical interpolation so that the intermediate latent noise retains the same distribution. Our method is capable of smooth latent space interpolation in the generated triplanes and their corresponding neural fields.", + "bbox": [ + 75, + 510, + 468, + 676 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1. Ablation Studies", + "text_level": 1, + "bbox": [ + 76, + 688, + 240, + 702 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We validate the design of our framework by ablating components of our regularization strategies using the cars dataset.", + "bbox": [ + 76, + 712, + 470, + 742 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Explicit density regularization. As discussed by Park et al. [57], the precision of the ground truth decoded meshes is limited by the finite number of point samples guiding the training of the decision boundaries. Because we rely on a limited number of pre-computed coordinate-occupancy pairs to train our triplanes, it is easy to overfit to this limited training set. Even when optimizing a single triplane in isolation (i.e., without learning a generative model), this overfitting manifests in \"floater\" artifacts in the optimized", + "bbox": [ + 75, + 763, + 468, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7ccde9f7d3c9b05386492caf1e4072c91ff08a6f9e1056d274d93a0cf11dd2c4.jpg", + "image_caption": [ + "Figure 6. Ablation over density regularization. Clear artifacts are visible in the resulting occupancy field without explicit density regularization. In this example, we optimize a single triplane on a single shape." + ], + "image_footnote": [], + "bbox": [ + 504, + 300, + 694, + 450 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/64114b937693119bc137c461458451c0c9d23a9807c5a4fa980cd5a7db6858a0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 314, + 885, + 450 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "neural field. Figure 6 shows an example where we fit a single triplane with and without density regularization. Without density regularization, the learned occupancy field contains significant artifacts; with density regularization, the learned occupancy field captures a clean object.", + "bbox": [ + 498, + 549, + 890, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Triplane regularization. Regularization of the triplanes is essential for training a well-behaved diffusion model. Figure 7 compares generated samples produced by our entire framework, with and without regularization terms. If we train only with Equation 2, i.e., without regularization terms, we can optimize a dataset of triplane features and train a diffusion model to generate samples. However, while the surfaces of the optimized shapes will appear real, the triplane features themselves will have many high-frequency artifacts, and these convoluted feature images are a difficult manifold for even a powerful diffusion model to learn. Consequently, generated triplane features produced by a trained diffusion model decode into shapes with significant artifacts. We note that these artifacts are present only in generated samples; shapes directly factored from the ground-truth shapes are artifact-free, even without regularization.", + "bbox": [ + 496, + 643, + 893, + 883 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training with Equation 4 introduces TV, L2, and", + "bbox": [ + 517, + 885, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "20881", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/455982f9569880c9f471fd0d558e2c44254bb6a6d11764e49832bbaab8954a0b.jpg", + "image_caption": [ + "Figure 7. Ablation over regularized triplanes. A generative model trained on unregularized triplanes produces samples with significant artifacts. Effective regularization of triplane features enables training of a generative model that produces shapes without artifacts. Top left: triplane features learned only with Equation 2 contain many high frequency artifacts. Bottom left: a diffusion model trained on these unregularized triplanes fails to produce convincing samples. Top right: triplane features learned with Equation 4 are noticeably smoother. Bottom right: A diffusion model trained on these regularized triplanes produces high-quality shapes." + ], + "image_footnote": [], + "bbox": [ + 83, + 89, + 263, + 265 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c60f75281497649c58e8bb222ebb292080fa3a3368c595a1f50b4347c956e5cf.jpg", + "image_caption": [ + "Figure 8. Failure cases. We observe that our model at times generate axis-aligned artifacts and struggles to account for thin structures, likely caused by the use of a triplane representation." + ], + "image_footnote": [], + "bbox": [ + 285, + 90, + 464, + 263 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/ac7b069d0e3d7e90371fd868e440cfe025e2c0fa43c8e6342c4f1842b31debd4.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodFID ↓Precision ↑Recall ↑
No regularization285.81.60.6
Density + TV + L2 Reg.83.649.550.5
", + "bbox": [ + 99, + 431, + 444, + 477 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2. Quantitative results for the ablation on triplane regularization. Our model performs poorly without explicit regularization on the triplanes.", + "bbox": [ + 75, + 482, + 470, + 525 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "density regularizing factors. Triplanes learned with these regularization terms are noticeably smoother, with frequency distributions that more closely align with those found in natural images (see supplement). As we would expect, a diffusion model more readily learns the manifold of regularized triplane features. Samples produced by a diffusion model trained on these regularized shapes decode into convincing and artifact-free shapes.", + "bbox": [ + 75, + 549, + 468, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Discussion", + "text_level": 1, + "bbox": [ + 76, + 686, + 189, + 702 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In summary, we introduce a 3D-aware diffusion model that uses a 2D diffusion backbone to generate triplane feature maps, which are assembled into 3D neural fields. Our approach improves the quality and diversity of generated objects over existing 3D-aware generative models by a large margin.", + "bbox": [ + 75, + 712, + 470, + 787 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. Similarly to other generative methods, training a diffusion model is slow and computationally demanding. Diffusion models, including ours, are also slow to evaluate, whereas GANs, for example, can be evaluated in real-time once trained. Luckily, our method will benefit from improvements to 2D diffusion models in this research area.", + "bbox": [ + 75, + 809, + 472, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6f305f6e728c27e4d5d4c9fd900359c6a2f0b9df74a00411a38f6a4aab854ad2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 92, + 890, + 250 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Slow sampling at inference could be addressed by more efficient samplers [30] and potentially enable real-time synthesis. While a step forward in quality, some of the samples generated by our method suffer from artifacts, as depicted by Fig. 8. Strategies like guidance [13, 27], which trade off diversity for fidelity, may reduce the prevalence of these outliers.", + "bbox": [ + 498, + 330, + 893, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Future Work. We have demonstrated an effective way to generate occupancy fields, but in principle, our approach can be extended to generating any type of neural field that can be represented by a triplane. In particular, triplanes have already been shown to be excellent representations for radiance fields, so it seems natural to extend our diffusion approach to generating NeRFs. While we demonstrate successful results for unconditional generation, conditioning our generative model on text, images, or other input would be an exciting avenue for future work.", + "bbox": [ + 498, + 441, + 890, + 592 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ethical Considerations. Generative models, including ours, could be extended to generate DeepFakes. These pose a societal threat, and we do not condone using our work to generate fake images or videos of any person intending to spread misinformation or tarnish their reputation.", + "bbox": [ + 498, + 613, + 890, + 689 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Conclusion. 3D-aware object synthesis has many exciting applications in vision and graphics. With our work, which is among the first to connect powerful 2D diffusion models and 3D object synthesis, we take a significant step towards utilizing emerging diffusion models for this goal.", + "bbox": [ + 498, + 709, + 890, + 785 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 799, + 668, + 816 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We thank Vincent Sitzmann for valuable discussions. This project was in part supported by Samsung, the Stanford Institute for Human-Centered AI (HAI), the Stanford Center for Integrated Facility Engineering (CIFE), NSF RI #2211258, Autodesk, and a PECASE from the ARO.", + "bbox": [ + 498, + 825, + 893, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "20882", + "bbox": [ + 478, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Matan Atzmon and Yaron Lipman. SAL: Sign agnostic learning of shapes from raw data. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[2] Miguel Ángel Bautista, Pengsheng Guo, Samira Abnar, Walter Talbott, Alexander Toshev, Zhuoyuan Chen, Laurent Dinh, Shuangfei Zhai, Hanlin Goh, Daniel Ulbricht, Afshin Dehghan, and Josh M. Susskind. GAUDI: A neural architect for immersive 3d scene generation. CoRR, abs/2207.13751, 2022. 1, 3", + "[3] Fausto Bernardini, Joshua Mittleman, Holly E. Rushmeier, Cláudio T. Silva, and Gabriel Taubin. The ball-pivoting algorithm for surface reconstruction. IEEE Transactions on Visualization and Computer Graphics, 5:349-359, 1999. 6", + "[4] Alexandre Boulch and Renaud Marlet. POCO: point convolution for surface reconstruction. CoRR, abs/2201.01831, 2022. 2", + "[5] Rohan Chabra, Jan Eric Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local SDF priors for detailed 3D reconstruction. In European Conference on Computer Vision (ECCV), 2020. 2", + "[6] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J. Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16123-16133, June 2022. 1, 2, 3", + "[7] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-GAN: Periodic implicit generative adversarial networks for 3D-aware image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[8] Ding-Yun Chen, Xiao-Pei Tian, Edward Yu-Te Shen, and Ming Ouhyoung. On visual similarity based 3d model retrieval. Computer Graphics Forum, 22, 2003. 5", + "[9] Yinbo Chen, Sifei Liu, and Xiaolong Wang. Learning continuous image representation with local implicit image function. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[10] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[11] Thomas Davies, Derek Nowrouzehrai, and Alec Jacobson. Overfit neural networks as a compact shape representation. arXiv preprint arXiv:2009.09808, 2020. 2", + "[12] Terrance DeVries, Miguel Angel Bautista, Nitish Srivastava, Graham W. Taylor, and Joshua M. Susskind. Unconstrained scene generation with locally conditioned radiance fields. arXiv preprint arXiv:2104.00670, 2021. 2", + "[13] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 8780-8794. Curran Associates, Inc., 2021. 2, 8", + "[14] Emilien Dupont, Hyunjik Kim, S. M. Ali Eslami, Danilo J. Rezende, and Dan Rosenbaum. From data to functa: Your" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "data point is a function and you should treat it like one. CoRR, abs/2201.12204, 2022. 1, 3", + "[15] SM Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S Morcos, Marta Garnelo, Avraham Ruderman, Andrei A Rusu, Ivo Danihelka, Karol Gregor, et al. Neural scene representation and rendering. Science, 2018. 2", + "[16] Patrick Esser, Robin Rombach, and Björn Ommer. Taming transformers for high-resolution image synthesis, 2020. 2", + "[17] Matheus Gadelha, Subhransu Maji, and Rui Wang. 3d shape induction from 2d views of multiple objects. In 2017 International Conference on 3D Vision (3DV), pages 402-411, 2017. 2", + "[18] Stephan J Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. FastNeRF: High-fidelity neural rendering at 200fps. arXiv preprint arXiv:2103.10380, 2021. 2", + "[19] Kyle Genova, Forrester Cole, Avneesh Sud, Aaron Sarna, and Thomas Funkhouser. Local deep implicit functions for 3D shape. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[20] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In IEEE International Conference on Computer Vision (ICCV), 2019. 2", + "[21] Simon Giebenhain and Bastian Goldlücke. Air-nets: An attention-based framework for locally conditioned implicit representations. In 3DV, pages 1054-1064. IEEE, 2021. 2", + "[22] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems (NeurIPS), 2014. 2", + "[23] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. In International Conference on Machine Learning (ICML), 2020. 2", + "[24] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. StyleNeRF: A style-based 3D-aware generator for high-resolution image synthesis. arXiv preprint arXiv:2110.08985, 2021. 2", + "[25] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[26] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, volume 33, pages 6840-6851, 2020. 2, 5", + "[27] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 8", + "[28] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, and Thomas Funkhouser. Local implicit grid representations for 3D scenes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[29] Yue Jiang, Dantong Ji, Zhizhong Han, and Matthias Zwicker. SDFDiff: Differentiable rendering of signed distance fields for 3D shape optimization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[30] Tero Karras, Miika Aittala, Timo Aila, and Samuli Laine. Elucidating the design space of diffusion-based generative models. ArXiv, abs/2206.00364, 2022. 8" + ], + "bbox": [ + 503, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "20883", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[31] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[32] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[33] Petr Kellnhofer, Lars Jebe, Andrew Jones, Ryan Spicer, Kari Pulli, and Gordon Wetzstein. Neural lumigraph rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[34] Adam R Kosiorek, Heiko Strathmann, Daniel Zoran, Pol Moreno, Rosalia Schneider, Sona Mokra, and Danilo Jimenez Rezende. Nerf-vae: A geometry aware 3d scene generative model. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 5742–5752. PMLR, 18–24 Jul 2021. 2", + "[35] Yiyi Liao, Katja Schwarz, Lars Mescheder, and Andreas Geiger. Towards unsupervised learning of generative models for 3d controllable image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2", + "[36] David B Lindell, Julien NP Martel, and Gordon Wetzstein. AutoInt: Automatic integration for fast neural volume rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[37] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2", + "[38] Shichen Liu, Shunsuke Saito, Weikai Chen, and Hao Li. Learning to infer implicit surfaces without 3D supervision. arXiv preprint arXiv:1911.00767, 2019. 2", + "[39] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. DIST: Rendering deep implicit signed distance function with differentiable sphere tracing. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[40] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. ACM Transactions on Graphics (SIGGRAPH), 2019. 2", + "[41] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3D surface construction algorithm. ACM Transactions on Graphics (ToG), 1987. 5", + "[42] Shitong Luo and Wei Hu. Diffusion probabilistic models for 3d point cloud generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2837-2845, June 2021. 1, 3", + "[43] Julien N.P. Martel, David B. Lindell, Connor Z. Lin, Eric R. Chan, Marco Monteiro, and Gordon Wetzstein. ACORN: Adaptive coordinate networks for neural representation. ACM Transactions on Graphics (SIGGRAPH), 2021. 2", + "[44] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the wild: Neural radiance fields for" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "unconstrained photo collections. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[45] Ishit Mehta, Michael Gharbi, Connelly Barnes, Eli Shechtman, Ravi Ramamoorthi, and Manmohan Chandraker. Modulated periodic activations for generalizable local functional representations. In ICCV, pages 14194-14203. IEEE, 2021. 2", + "[46] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 6351-6361, October 2021. 2", + "[47] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3D reconstruction in function space. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3", + "[48] Mateusz Michalkiewicz, Jhony K Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Implicit surface representations as layers in neural networks. In IEEE International Conference on Computer Vision (ICCV), 2019. 2", + "[49] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2, 3", + "[50] Thomas Neff, Pascal Stadlbauer, Mathias Parger, Andreas Kurz, Joerg H. Mueller, Chakravarty R. Alla Chaitanya, Anton S. Kaplanyan, and Markus Steinberger. DONeRF: Towards Real-Time Rendering of Compact Neural Radiance Fields using Depth Oracle Networks. Computer Graphics Forum, 40(4), 2021. 2", + "[51] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2019. 2", + "[52] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 8162-8171. PMLR, 18-24 Jul 2021. 2, 5", + "[53] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11453-11464, June 2021. 2", + "[54] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[55] Michael Oechsle, Songyou Peng, and Andreas Geiger. UNISURF: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[56] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf:" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "20884", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13503-13513, June 2022. 2", + "[57] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3, 7", + "[58] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European Conference on Computer Vision (ECCV), 2020. 2", + "[59] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. arXiv preprint arXiv:2011.13961, 2020. 2", + "[60] Ali Razavi, Aaron van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2", + "[61] Daniel Rebain, Mark Matthews, Kwang Moo Yi, Dmitry Lagun, and Andrea Tagliasacchi. Lolnerf: Learn from one look. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1558-1567, June 2022. 2", + "[62] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. KiloNeRF: Speeding up neural radiance fields with thousands of tiny MLPs. In IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[63] Mehdi S. M. Sajjadi, Olivier Bachem, Mario Lucic, Olivier Bousquet, and Sylvain Gelly. Assessing generative models via precision and recall. In NeurIPS, 2018. 5", + "[64] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. GRAF: Generative radiance fields for 3D-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2", + "[65] Vincent Sitzmann, Julien N.P. Martel, Alexander W. Bergman, David B. Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 3", + "[66] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhöfer. Deep Voxels: Learning persistent 3D feature embeddings. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[67] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3D-structure-aware neural scene representations. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2", + "[68] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In Francis Bach and David Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 2256-2265, Lille, France, 07-09 Jul 2015. PMLR. 2" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[69] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021. 7", + "[70] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2", + "[71] Yang Song and Stefano Ermon. Improved techniques for training score-based generative models. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 12438-12448. Curran Associates, Inc., 2020. 2", + "[72] Pratul P. Srinivasan, Boyang Deng, Xiuming Zhang, Matthew Tancik, Ben Mildenhall, and Jonathan T. Barron. NeRV: Neural reflectance and visibility fields for relighting and view synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[73] Towaki Takikawa, Joey Litalien, Kangxue Yin, Karsten Kreis, Charles Loop, Derek Nowrouzezahrai, Alec Jacobson, Morgan McGuire, and Sanja Fidler. Neural geometric level of detail: Real-time rendering with implicit 3D shapes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[74] Ayush Tewari, Justus Thies, Ben Mildenhall, Pratul Srinivasan, Edgar Tretschk, W Yifan, Christoph Lassner, Vincent Sitzmann, Ricardo Martin-Brualla, Stephen Lombardi, et al. Advances in neural rendering. In Computer Graphics Forum, volume 41, pages 703-735. Wiley Online Library, 2022. 2", + "[75] Hoang Thanh-Tung and Truyen Tran. Catastrophic forgetting and mode collapse in gans. In IJCNN, pages 1-10. IEEE, 2020. 2", + "[76] Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 11287-11302. Curran Associates, Inc., 2021. 2", + "[77] Aäron van den Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. Pixel recurrent neural networks. In Maria Florina Balcan and Kilian Q. Weinberger, editors, Proceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings of Machine Learning Research, pages 1747-1756, New York, New York, USA, 20-22 Jun 2016. PMLR. 2", + "[78] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. 2", + "[79] Jiajun Wu, Chengkai Zhang, Tianfan Xue, Bill Freeman, and Josh Tenenbaum. Learning a probabilistic latent space of object shapes via 3d generative-adversarial modeling. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc., 2016. 2", + "[80] Yiheng Xie, Towaki Takikawa, Shunsuke Saito, Or Litany, Shiqin Yan, Numair Khan, Federico Tombari, James Tompkin," + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "20885", + "bbox": [ + 478, + 945, + 517, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Vincent Sitzmann, and Srinath Sridhar. Neural fields in visual computing and beyond. Comput. Graph. Forum, 41(2):641-676, 2022. 2", + "[81] Guangming Yao, Hongzhi Wu, Yi Yuan, and Kun Zhou. Dd-nerf: Double-diffusion neural radiance field as a generalizable implicit body representation. arXiv preprint arXiv:2112.12390, 2021. 3", + "[82] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Ronen Basri, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2", + "[83] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[84] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2", + "[85] Xin-Yang Zheng, Yang Liu, Peng-Shuai Wang, and Xin Tong. Sdf-stylegan: Implicit sdf-based stylegan for 3d shape generation. CoRR, abs/2206.12055, 2022. 2, 5, 6", + "[86] Linqi Zhou, Yilun Du, and Jiajun Wu. 3d shape generation and completion through point-voxel diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5826-5835, October 2021. 1, 3, 5, 6", + "[87] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. CIPS-3D: A 3D-Aware Generator of GANs Based on Conditionally-Independent Pixel Synthesis. arXiv preprint arXiv:2110.09788, 2021. 2" + ], + "bbox": [ + 78, + 90, + 470, + 515 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "20886", + "bbox": [ + 478, + 945, + 519, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2023/3D Neural Field Generation Using Triplane Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_model.json b/2023/3D Neural Field Generation Using Triplane Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_model.json new file mode 100644 index 0000000000000000000000000000000000000000..0e8e5e9700a7e0e208db14d57b74e931011a5c38 --- /dev/null +++ b/2023/3D Neural Field Generation Using Triplane Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_model.json @@ -0,0 +1,2523 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.221, + 0.131, + 0.75, + 0.154 + ], + "angle": 0, + "content": "3D Neural Field Generation using Triplane Diffusion" + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.193, + 0.867, + 0.245 + ], + "angle": 0, + "content": "J. Ryan Shue\\* Eric Ryan Chan\\*2 Ryan Po\\*2 Zachary Ankner\\*3,4 Jiajun Wu\\*2 Gordon Wetzstein\\*2 Milton Academy 2Stanford University 3Massachusetts Institute of Technology 4MosaicML" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.287, + 0.314, + 0.304 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.319, + 0.474, + 0.562 + ], + "angle": 0, + "content": "Diffusion models have emerged as the state-of-the-art for image generation, among other tasks. Here, we present an efficient diffusion-based model for 3D-aware generation of neural fields. Our approach pre-processes training data, such as ShapeNet meshes, by converting them to continuous occupancy fields and factoring them into a set of axis-aligned triplane feature representations. Thus, our 3D training scenes are all represented by 2D feature planes, and we can directly train existing 2D diffusion models on these representations to generate 3D neural fields with high quality and diversity, outperforming alternative approaches to 3D-aware generation. Our approach requires essential modifications to existing triplane factorization pipelines to make the resulting features easy to learn for the diffusion model. We demonstrate state-of-the-art results on 3D generation on several object classes from ShapeNet." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.604, + 0.208, + 0.619 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.471, + 0.734 + ], + "angle": 0, + "content": "Diffusion models have seen rapid progress, setting state-of-the-art (SOTA) performance across a variety of image generation tasks. While most diffusion methods model 2D images, recent work [2, 14, 42, 86] has attempted to develop denoising methods for 3D shape generation. These 3D diffusion methods operate on discrete point clouds and, while successful, exhibit limited quality and resolution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.471, + 0.856 + ], + "angle": 0, + "content": "In contrast to 2D diffusion, which directly leverages the image as the target for the diffusion process, it is not directly obvious how to construct such 2D targets in the case of 3D diffusion. Interestingly, recent work on 3D-aware generative adversarial networks (GANs) (see Sec. 2 for an overview) has demonstrated impressive results for 3D shape generation using 2D generators. We build upon this idea of learning to generate triplane representations [6] that encode 3D scenes or" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.288, + 0.891, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.51, + 0.896, + 0.568 + ], + "angle": 0, + "content": "Figure 1. Our method leverages existing 2D diffusion models for 3D shape generation using hybrid explicit-implicit neural representations. Top: triplane-based 3D shape diffusion process using our framework. Bottom: Interpolation between generated shapes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.596, + 0.893, + 0.656 + ], + "angle": 0, + "content": "radiance fields as a set of axis-aligned 2D feature planes. The structure of a triplane is analogous to that of a 2D image and can be used as part of a 3D generative method that leverages conventional 2D generator architectures." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.658, + 0.895, + 0.884 + ], + "angle": 0, + "content": "Inspired by recent efforts in designing efficient 3D GAN architectures, we introduce a neural field-based diffusion framework for 3D representation learning. Our approach follows a two-step process. In the first step, a training set of 3D scenes is factored into a set of per-scene triplane features and a single, shared feature decoder. In the second step, a 2D diffusion model is trained on these triplanes. The trained diffusion model can then be used at inference time to generate novel and diverse 3D scenes. By interpreting triplanes as multi-channel 2D images and thus decoupling generation from rendering, we can leverage current (and likely future) SOTA 2D diffusion model backbones nearly out of the box. Fig. 1 illustrates how a single object is generated with our framework (top), and how two generated objects—even with different topologies—can be interpolated (bottom)." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.886, + 0.768, + 0.9 + ], + "angle": 0, + "content": "Our core contributions are as follows:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.863, + 0.409, + 0.901 + ], + "angle": 0, + "content": "*Equal contribution. \nPart of the work was done during an internship at Stanford. \nProject page: https://jryanshue.com/nfd" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20875" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.089, + 0.87, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.412, + 0.895, + 0.456 + ], + "angle": 0, + "content": "Figure 2. Visualization of the denoising process. Here, we show examples of triplanes as they are iteratively denoised at inference, as well as the shapes we obtain by \"decoding\" the noisy triplanes with our jointly-learned MLP. By interpreting triplane features simply as multi-channel feature images, we build our framework around 2D diffusion models." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.481, + 0.47, + 0.525 + ], + "angle": 0, + "content": "- We introduce a generative framework for diffusion on 3D scenes that utilizes 2D diffusion model backbones and has a built-in 3D inductive bias." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.541, + 0.47, + 0.586 + ], + "angle": 0, + "content": "- We show that our approach is capable of generating both high-fidelity and diverse 3D scenes that outperform state-of-the-art 3D GANs." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.481, + 0.47, + 0.586 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.602, + 0.218, + 0.618 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.474, + 0.903 + ], + "angle": 0, + "content": "Neural fields. Implicit neural representations, or neural fields, hold the SOTA for 3D scene representation [74, 80]. They either solely learn geometry [1, 4, 5, 10, 11, 15, 21, 23, 45, 47, 48, 57, 65, 73] or use posed images to jointly optimize geometry and appearance [6, 7, 18, 25, 29, 33, 36–39, 44, 49, 50, 54, 55, 59, 67, 72, 82–84]. Neural fields represent scenes as continuous functions, allowing them to scale well with scene complexity compared to their discrete counterparts [40, 66]. Initial methods used a single, large multilayer perceptron (MLP) to represent entire scenes [10, 47, 49, 57, 65], but reconstruction with this approach can be computationally inefficient because training such a representation requires thousands of forward passes through the large model per scene. Recent years have shown a trend towards locally conditioned representations, which either learn local functions [5, 9, 28, 62] or locally modulate a shared function with a hybrid explicit-implicit representation [4, 6, 12, 19–21, 37, 43, 45, 58]. These methods use small MLPs, which are efficient during inference and significantly" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.48, + 0.895, + 0.572 + ], + "angle": 0, + "content": "better at capturing local scene details. We adopt the expressive hybrid triplane representation introduced by Chan et al. [6]. Triplanes are efficient, scaling with the surface area rather than volume, and naturally integrate with expressive, fine-tuned 2D generator architectures. We modify the triplane representation for compatibility with our denoising framework." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.895, + 0.734 + ], + "angle": 0, + "content": "Generative synthesis in 2D and 3D. Some of the most popular generative models include GANs [22, 31, 32], autoregressive models [16, 60, 77, 78], score matching models [68, 70, 71], and denoising diffusion probabilistic models (DDPMs) [13, 26, 52, 76]. DDPMs are arguably the SOTA approach for synthesizing high-quality and diverse 2D images [13]. Moreover, GANs can be difficult to train and suffer from issues like mode collapse [75] whereas diffusion models train stably and have been shown to better capture the full training distribution." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.896, + 0.901 + ], + "angle": 0, + "content": "In 3D, however, GANs still outperform alternative generative approaches [6,7,17,24,34,35,37,46,51,53,56, 61,64,79,85,87]. Some of the most successful 3D GANs use an expressive 2D generator backbone (e.g., StyleGAN2 [32]) to synthesize triplane representations which are then decoded with a small, efficient MLP [6]. Because the decoder is small and must generalize across many local latents, these methods assign most of their expressiveness to the powerful backbone. In addition, these methods treat the triplane as a multi-channel image, allowing the generator backbone to be used almost out of the box." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20876" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.332 + ], + "angle": 0, + "content": "Current 3D diffusion models [2, 14, 42, 81, 86] are still very limited. They either denoise a single latent or do not utilize neural fields at all, opting for a discrete point-cloud-based approach. For example, concurrently developed single-latent approaches [2, 14] generate a global latent for conditioning the neural field, relying on a 3D decoder to transform the scene representation from 1D to 3D without directly performing 3D diffusion. As a result, the diffusion model does not actually operate in 3D, losing this important inductive bias and generating blurry results. Point-cloud-based approaches [42, 86], on the other hand, give the diffusion model explicit 3D control over the shape, but limit its resolution and scalability due to the coarse discrete representation. While showing promise, both 1D-to-3D and point cloud diffusion approaches require specific architectures that cannot easily leverage recent advances in 2D diffusion models." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.333, + 0.474, + 0.471 + ], + "angle": 0, + "content": "In our work, we propose to directly generate triplanes with out-of-the-box SOTA 2D diffusion models, granting the diffusion model near-complete control over the generated neural field. Key to our approach is our treatment of well-fit triplanes in a shared latent space as ground truth data for training our diffusion model. We show that the latent space of these triplanes is grounded spatially in local detail, giving the diffusion model a critical inductive bias for 3D generation. Our approach gives rise to an expressive 3D diffusion model." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.482, + 0.358, + 0.499 + ], + "angle": 0, + "content": "3. Triplane Diffusion Framework" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.507, + 0.472, + 0.658 + ], + "angle": 0, + "content": "Here, we explain the architecture of our neural field diffusion (NFD) model for 3D shapes. In Section 3.1, we explain how we can represent the occupancy field of a single object using a triplane. In Section 3.2, we describe how we can extend this framework to represent an entire dataset of 3D objects. In Section 3.3, we describe the regularization techniques that we found necessary to achieve optimal results. Finally, Sections 3.4 and 3.5 illustrate training and sampling from our model. For an overview of the pipeline at inference, see Figure 3." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.666, + 0.433, + 0.683 + ], + "angle": 0, + "content": "3.1. Representing a 3D Scene using a Triplane" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.47, + 0.87 + ], + "angle": 0, + "content": "Neural fields have been introduced as continuous and expressive 3D scene representations. In this context, a neural field \\(\\mathrm{NF}:\\mathbb{R}^3\\to \\mathbb{R}^M\\) is a neural network-parameterized mapping function that takes as input a three-dimensional coordinate \\(\\mathbf{x}\\) and that outputs an \\(M\\)-dimensional vector representing the neural field. Neural fields have been demonstrated for occupancy fields [47], signed distance functions [57], radiance fields [49], among many other types of signals [65]. For the remainder of this work, we focus on 3D scene representations using occupancy fields such that the output of the neural field is a binary value, indicating whether a coordinate is inside or outside an object and \\(M = 1\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.473, + 0.903 + ], + "angle": 0, + "content": "The triplane representation is a hybrid explicit-implicit network architecture for neural fields that is particularly effi" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.259 + ], + "angle": 0, + "content": "cient to evaluate [6]. This representation uses three 2D feature planes \\(\\mathbf{f}_{xy},\\mathbf{f}_{xz},\\mathbf{f}_{yz}\\in \\mathbb{R}^{N\\times N\\times C}\\) with a spatial resolution of \\(N\\times N\\) and \\(C\\) feature channels each, and a multilayer perceptron (MLP) \"decoder\" tasked with interpreting features sampled from the planes. A 3D coordinate is queried by projecting it onto each of the axis-aligned planes (i.e., the \\(x - y,x - z,\\) and \\(y - z\\) planes), querying and aggregating the respective features, and decoding the resulting feature using a lightweight \\(\\mathrm{MLP}_{\\phi}\\) with parameters \\(\\phi\\). Similar to Chan et al. [6], we found the sum to be an efficient feature aggregation function, resulting in the following formulation for the triplane architecture:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.273, + 0.895, + 0.291 + ], + "angle": 0, + "content": "\\[\n\\mathrm {N F} (\\mathbf {x}) = \\operatorname {M L P} _ {\\phi} \\left(\\mathbf {f} _ {x y} (\\mathbf {x}) + \\mathbf {f} _ {y z} (\\mathbf {x}) + \\mathbf {f} _ {x z} (\\mathbf {x})\\right). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.305, + 0.892, + 0.336 + ], + "angle": 0, + "content": "The feature planes and MLP can be jointly optimized to represent the occupancy field of a shape." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.35, + 0.892, + 0.367 + ], + "angle": 0, + "content": "3.2. Representing a Class of Objects with Triplanes" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.375, + 0.895, + 0.587 + ], + "angle": 0, + "content": "We aim to convert our dataset of shapes into a dataset of triplanes so that we can train a diffusion model on these learned feature planes. However, because the MLP and feature planes are typically jointly learned, we cannot simply train a triplane for each object of the dataset individually. If we did, the MLP's corresponding to each object in our dataset would fail to generalize to triplanes generated by our diffusion model. Therefore, instead of training triplanes for each object in isolation, we jointly optimize the feature planes for many objects simultaneously, along with a decoder that is shared across all objects. This joint optimization results in a dataset of optimized feature planes and an MLP capable of interpreting any triplane from the dataset distribution. Thus, at inference, we can use this MLP to decode feature planes generated by our model." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.589, + 0.894, + 0.734 + ], + "angle": 0, + "content": "In practice, during training, we are given a dataset of \\(I\\) objects, and we preprocess the coordinates and ground-truth occupancy values of \\(J\\) points per object. Typically, \\(J = 10\\mathrm{M}\\) where 5M points are sampled uniformly throughout the volume and 5M points are sampled near the object surface. Our naive training objective is a simple \\(L2\\) loss between predicted occupancy values \\(\\mathrm{NF}^{(i)}(\\mathbf{x}_j^{(i)})\\) and ground-truth occupancy values \\(\\mathrm{O}_j^{(i)}\\) for each point, where \\(\\mathbf{x}_j^{(i)}\\) denotes the \\(j^{\\mathrm{th}}\\) point from the \\(i^{\\mathrm{th}}\\) scene:" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.75, + 0.895, + 0.793 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {N A I V E}} = \\sum_ {i} ^ {I} \\sum_ {j} ^ {J} \\left\\| \\mathrm {N F} ^ {(i)} \\left(\\mathbf {x} _ {j} ^ {(i)}\\right) - \\mathrm {O} _ {j} ^ {(i)} \\right\\| _ {2} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.806, + 0.892, + 0.852 + ], + "angle": 0, + "content": "During training, we optimize Equation 2 for a shared MLP parameterized by \\(\\phi\\), as well as the feature planes corresponding to every object in our dataset:" + }, + { + "type": "equation", + "bbox": [ + 0.559, + 0.866, + 0.895, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\left\\{\\phi , \\mathbf {f} _ {x y} ^ {(i)}, \\mathbf {f} _ {x z} ^ {(i)}, \\mathbf {f} _ {y z} ^ {(i)} \\right\\} = \\underset {\\left\\{\\phi , \\mathbf {f} _ {x y} ^ {(i)}, \\mathbf {f} _ {x z} ^ {(i)}, \\mathbf {f} _ {y z} ^ {(i)} \\right\\}} {\\operatorname {a r g m i n}} \\mathcal {L} _ {\\mathrm {N A I V E}} \\tag {3}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20877" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.195, + 0.1, + 0.885, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.27, + 0.895, + 0.327 + ], + "angle": 0, + "content": "Figure 3. Pipeline. Sampling a 3D neural field from our model consists of two decoupled processes: 1) using a trained DDPM to iteratively denoise latent noise into feature maps and 2) using a locally conditioned Occupancy Network to decode the resulting triplane into the final neural field. This architecture allows the DDPM to generate samples with a 3D inductive bias while utilizing existing 2D DDPM backbones and a continuous output representation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.352, + 0.471, + 0.383 + ], + "angle": 0, + "content": "3.3. Regularizing Triplanes for Effective Generalization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.394, + 0.47, + 0.485 + ], + "angle": 0, + "content": "Following the procedure outlined in the previous section, we can learn a dataset of triplane features and a shared triplane decoder; we can then train a diffusion model on these triplane features and sample novel shapes at inference. Unfortunately, the result of this naive training procedure is a generative model for triplanes that produces shapes with significant artifacts." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.487, + 0.471, + 0.638 + ], + "angle": 0, + "content": "We find it necessary to regularize the triplane features during optimization to simplify the data manifold that the diffusion model must learn. Therefore, we include total variation (TV) regularization terms with weight \\(\\lambda_{1}\\) in the loss function to ensure that the feature planes of each training scene do not contain spurious high-frequency information. This strategy makes the distribution of triplane features more similar to the manifold of natural images (see supplement), which we found necessary to robustly train a diffusion model on them (see Sec. 4)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.642, + 0.47, + 0.733 + ], + "angle": 0, + "content": "While the trained feature values are unbounded, our DDPM backbone requires training inputs with values in the range [-1,1]. We address this by normalizing the feature planes before training, but this process is sensitive to outliers. As a result, we include an L2 regularization term on the triplane features with weight \\(\\lambda_{2}\\) to discourage outlying values." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.471, + 0.902 + ], + "angle": 0, + "content": "We also include an explicit density regularization (EDR) term. Due to our ground-truth occupancy data being concentrated on the surface of the shapes, there is often insufficient data to learn a smooth outside-of-shape volume. Our EDR term combats this issue by sampling a set of random points from the volume, offsetting the points by a random vector \\(\\omega\\), feeding both sets through the MLP, and calculating the mean squared error. Notationally, this term can be represented as \\( \\mathrm{EDR}(\\mathrm{NF}(\\mathbf{x}), \\omega) = \\| \\mathrm{NF}(\\mathbf{x}) - \\mathrm{NF}(\\mathbf{x} + \\omega) \\|_2^2 \\). We find this term necessary to remove floating artifacts in the volume (see Sec. 4)" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.353, + 0.895, + 0.382 + ], + "angle": 0, + "content": "Our training objective, with added regularization terms, is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.392, + 0.893, + 0.52 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} = \\sum_ {i} ^ {N} \\sum_ {j} ^ {M} \\operatorname {B C E} \\left(\\mathrm {N F} ^ {(i)} \\left(\\mathbf {x} _ {j} ^ {(i)}\\right) - \\mathrm {O} _ {j} ^ {(i)}\\right) \\\\ + \\lambda_ {1} \\left(\\operatorname {T V} \\left(\\mathbf {f} _ {x y} ^ {(i)}\\right) + \\operatorname {T V} \\left(\\mathbf {f} _ {x z} ^ {(i)}\\right) + \\operatorname {T V} \\left(\\mathbf {f} _ {y z} ^ {(i)}\\right)\\right) \\\\ + \\lambda_ {2} \\left(\\left| \\left| \\mathbf {f} _ {x y} ^ {(i)} \\right| \\right| _ {2} + \\left| \\left| \\mathbf {f} _ {y z} ^ {(i)} \\right| \\right| _ {2} + \\left| \\left| \\mathbf {f} _ {x z} ^ {(i)} \\right| \\right| _ {2}\\right) \\\\ + \\operatorname {E D R} \\left(\\mathrm {N F} \\left(\\mathbf {x} _ {j} ^ {(i)}\\right), \\boldsymbol {\\omega}\\right) \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.528, + 0.892, + 0.558 + ], + "angle": 0, + "content": "3.4. Training a Diffusion Model for Triplane Features" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.895, + 0.733 + ], + "angle": 0, + "content": "For unconditional generation, a diffusion model takes Gaussian noise as input and gradually denoises it in \\(T\\) steps. In our framework, the diffusion model operates on triplane features \\(\\mathbf{f}_{0\\dots T}\\in \\mathbb{R}^{N\\times N\\times 3C}\\) that stack the feature channels of all three triplane axes into a single image. In this notation, \\(\\mathbf{f}_T\\sim \\mathcal{N}(\\mathbf{f}_T;0,\\mathbf{I})\\) is the triplane feature image consisting of purely Gaussian noise, and \\(\\mathbf{f}_0\\sim q(\\mathbf{f}_0)\\) is a random sample drawn from the data distribution. The data distribution in our framework includes the pre-factored triplanes of the training set, normalized by the mean and variance of the entire dataset such that each channel has a zero mean and a standard deviation of 0.5." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.734, + 0.895, + 0.78 + ], + "angle": 0, + "content": "The forward or diffusion processes is a Markov chain that gradually adds Gaussian noise to the triplane features, according to a variance schedule \\(\\beta_{1},\\beta_{2},\\dots,\\beta_{T}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.57, + 0.79, + 0.893, + 0.816 + ], + "angle": 0, + "content": "\\[\nq \\left(\\mathbf {f} _ {t} \\mid \\mathbf {f} _ {t - 1}\\right) = \\mathcal {N} \\left(\\mathbf {f} _ {t}; \\sqrt {1 - \\beta_ {t}} \\mathbf {f} _ {t - 1}, \\beta_ {t} \\mathbf {I}\\right). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.892, + 0.872 + ], + "angle": 0, + "content": "This forward process can be directly sampled at step \\(t\\) using the closed-form solution \\(q(\\mathbf{f}_t|\\mathbf{f}_0) = \\mathcal{N}(\\mathbf{f}_t;\\sqrt{\\bar{\\alpha}_t}\\mathbf{f}_0,(1 - \\bar{\\alpha}_t)\\mathbf{I})\\), where \\(\\bar{\\alpha}_{t} = \\prod_{s = 1}^{t}\\alpha_{s}\\) with \\(\\alpha_{t} = 1 - \\beta_{t}\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.872, + 0.892, + 0.902 + ], + "angle": 0, + "content": "The goal of training a diffusion model is to learn the reverse process. For this purpose, a function approximator" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20878" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.473, + 0.184 + ], + "angle": 0, + "content": "\\(\\epsilon_{\\theta}\\) is needed that predicts the noise \\(\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})\\) from its noisy input. Typically, this function approximator is implemented as a variant of a convolutional neural network defined by its parameters \\(\\theta\\). Following [26], we train our triplane diffusion model by optimizing the simplified variant of the variational bound on negative log-likelihood:" + }, + { + "type": "equation", + "bbox": [ + 0.098, + 0.193, + 0.472, + 0.22 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {D D P M}} = \\mathbb {E} _ {t, \\mathbf {f} _ {0}, \\epsilon} \\left[ \\left\\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} \\left(\\sqrt {\\bar {\\alpha} _ {t}} \\mathbf {f} _ {0} + \\sqrt {1 - \\bar {\\alpha} _ {t}} \\boldsymbol {\\epsilon}, t\\right) \\right\\| ^ {2} \\right], \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.229, + 0.387, + 0.245 + ], + "angle": 0, + "content": "where \\(t\\) is sampled uniformly between 1 and \\(T\\)." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.253, + 0.321, + 0.27 + ], + "angle": 0, + "content": "3.5. Sampling Novel 3D Shapes" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.277, + 0.47, + 0.322 + ], + "angle": 0, + "content": "The unconditional generation of shapes at inference is a two-stage process that involves sampling a triplane from the trained diffusion model and then querying the neural field." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.322, + 0.47, + 0.383 + ], + "angle": 0, + "content": "Sampling a triplane from the diffusion model is identical to sampling an image from a diffusion model. Beginning with a random Gaussian noise \\(\\mathbf{f}_T\\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})\\), we iteratively denoise the sample in \\(T\\) steps as" + }, + { + "type": "equation", + "bbox": [ + 0.132, + 0.393, + 0.472, + 0.428 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} _ {t - 1} = \\frac {1}{\\sqrt {\\alpha_ {t}}} \\left(\\mathbf {f} _ {t} - \\frac {1 - \\alpha_ {t}}{\\sqrt {1 - \\bar {\\alpha} _ {t}}} \\boldsymbol {\\epsilon} _ {\\theta} (\\mathbf {f} _ {t}, t)\\right) + \\sigma_ {t} \\boldsymbol {\\epsilon}, \\qquad (7)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.438, + 0.47, + 0.468 + ], + "angle": 0, + "content": "where \\(\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})\\) for all but the very last step (i.e., \\(t = 1\\)), at which \\(\\epsilon = 0\\) and \\(\\sigma_t^2 = \\beta_t\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.468, + 0.47, + 0.558 + ], + "angle": 0, + "content": "The result of the denoising process, \\(\\mathbf{f}_0\\), is a sample from the normalized triplane feature image distribution. Denormalizing it using the dataset normalization statistics and splitting the generated features into the axis aligned planes \\(\\mathbf{f}_{xy}, \\mathbf{f}_{yz}, \\mathbf{f}_{xz}\\) yields a set of triplane features which, when combined with the pre-trained MLP, are used to query the neural field." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.559, + 0.47, + 0.633 + ], + "angle": 0, + "content": "We use the marching cubes algorithm [41] to extract meshes from the resulting neural fields. Note that our framework is largely agnostic to the diffusion backbone used; we choose to use ADM [52], a 2D state-of-the-art diffusion model." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.634, + 0.47, + 0.664 + ], + "angle": 0, + "content": "Source code and pre-trained models will be made available." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.678, + 0.208, + 0.695 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.703, + 0.471, + 0.855 + ], + "angle": 0, + "content": "Datasets. To compare NFD against existing 3D generative methods, we train our model on three object categories from the ShapeNet dataset individually. Consistent with previous work [85, 86], we choose the categories: cars, chairs and airplanes. Each mesh is normalized to lie within \\([-1,1]^3\\) and then passed through watertighting. The generation of ground truth triplanes then works as follows: we precompute the occupancies of 10M points per object, where 5M points are distributed uniformly at random in the volume, and 5M points are sampled within a 0.01 distance from the mesh surface." + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.089, + 0.888, + 0.257 + ], + "angle": 0, + "content": "
DataMethodFID ↓Precision ↑Recall ↑
CarsPVD¹335.80.10.2
SDF-StyleGAN98.035.936.2
NFD (Ours)83.649.550.5
ChairsPVD305.80.21.7
SDF-StyleGAN36.590.987.4
NFD (Ours)26.492.494.8
PlanesPVD244.42.73.8
SDF-StyleGAN65.864.572.8
NFD (Ours)32.470.581.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.267, + 0.894, + 0.338 + ], + "angle": 0, + "content": "Table 1. Render quality metrics on ShapeNet. We achieve state-of-the-art FID, which measures overall quality, as well as well as state-of-the-art precision and recall, which measure fidelity and diversity independently. Metrics calculated on shaded renderings of generated and ground-truth shapes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.364, + 0.894, + 0.485 + ], + "angle": 0, + "content": "We then train an MLP jointly with as many triplanes as we can fit in the GPU memory of a single A6000 GPU. In our case, we initially train on the first 500 objects in the dataset. After this initial joint optimization, we freeze the shared MLP and use it to optimize the triplanes of the remaining objects in the dataset. All triplanes beyond the first 500 are optimized individually with the same shared MLP; thus, the training of these triplanes can be effectively parallelized." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.504, + 0.895, + 0.67 + ], + "angle": 0, + "content": "Evaluation metrics. As in [85], we choose to evaluate our model using an adapted version of Fréchet inception distance (FID) that utilizes rendered shading images of our generated meshes. Shading-image FID [85] overcomes limitations of other mesh-based evaluation metrics such as the lightfield-descriptor (LFD) [8] by taking human perception into consideration. Zheng et al. [85] provide a detailed discussion of the various evaluation metrics for 3D generative models. Following the method [85], shading images of each shape are rendered from 20 distinct views; FID is then compared across each view and averaged to obtain a final score:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.681, + 0.892, + 0.734 + ], + "angle": 0, + "content": "\\[\n\\mathrm {F I D} = \\frac {1}{2 0} \\left[ \\sum_ {i = 1} ^ {2 0} \\| \\mu_ {g} ^ {i} - \\mu_ {r} ^ {i} \\| ^ {2} + \\operatorname {T r} \\left(\\Sigma_ {g} ^ {i} + \\Sigma_ {r} ^ {i} - 2 \\left(\\Sigma_ {r} ^ {i} \\Sigma_ {g} ^ {i}\\right) ^ {\\frac {1}{2}}\\right) \\right], \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.894, + 0.78 + ], + "angle": 0, + "content": "where \\(g\\) and \\(r\\) represent the generated and training datasets, while \\(\\mu^i,\\Sigma^i\\) represent the mean and covariance matrices for shading images rendered from the \\(i^{\\mathrm{th}}\\) view, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Along with FID, we also report precision and recall scores using the method proposed by Sajjadi et al. [63]. While FID correlates well with perceived image quality, the one-dimensional nature of the metric prevents it from identifying different failure modes. Sajjadi et al. [63] aim to disentangle FID into separate metrics known as precision and recall, where the former correlates to the quality of the generated images and the latter represents the diversity of" + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.863, + 0.47, + 0.902 + ], + "angle": 0, + "content": "1As PVD outputs 098 point clouds, we apply the ball pivoting algorithm (BPA) to PVD outputs before calculating FID. BPA was selected as it achieved a good balance between speed and quality." + }, + { + "type": "footer", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "20879" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.093, + 0.892, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.486, + 0.893, + 0.529 + ], + "angle": 0, + "content": "Figure 4. We compare 3D shapes generated by our model against generations of state-of-the-art baselines for ShapeNet Cars, Chairs, and Planes. Our model synthesizes shapes with noticeably sharper details than the previous state-of-the-art, while also capturing the broad diversity in each category." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.555, + 0.221, + 0.57 + ], + "angle": 0, + "content": "the generative model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.592, + 0.471, + 0.729 + ], + "angle": 0, + "content": "Baselines. We compare our method against state-of-the-art point-based and neural-field-based 3D generative models, namely PVD [86] and SDF-StyleGAN [85]. For evaluation, we use the pre-trained models for both methods on the three ShapeNet categories listed above. Note that PVD is inherently a point-based generative method and therefore does not output a triangle mesh needed for shading image rendering. To circumvent this, we choose to convert generated point clouds to triangle meshes using the ball-pivoting algorithm [3]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.472, + 0.902 + ], + "angle": 0, + "content": "Results. We provide qualitative results, comparing samples generated by our method to samples generated by baselines, in Figure 4. Our method generates a diverse and finely detailed collection of objects. Objects produced by our method contain sharp edges and features that we would expect to be difficult to accurately reconstruct—note that delicate features, such as the suspension of cars, the slats in chairs, and armaments of planes, are faithfully generated. Perhaps more importantly, samples generated by our model are diverse—our model successfully synthesizes many different types of cars, chairs," + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.555, + 0.892, + 0.586 + ], + "angle": 0, + "content": "and planes, including reproductions of several varieties that we would expect to be rare in the training dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.589, + 0.892, + 0.664 + ], + "angle": 0, + "content": "In comparison, while \\( PVD \\) also produces a wide variety of shapes, it is limited by its nature to generating only coarse object shapes. Furthermore, because \\( PVD \\) produces a fixed-size point cloud with only 2048 points, it cannot synthesize fine elements." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.67, + 0.893, + 0.881 + ], + "angle": 0, + "content": "SDF-StyleGAN creates high-fidelity shapes, accurately reproducing many details, such as airplane engines and chair legs. However, our method is more capable of capturing very fine features. Note that while SDF-StyleGAN smooths over the division between tire and wheel well when generating cars, our method faithfully portrays this gap. Similarly, our method synthesizes the tails and engines of airplanes, and the legs and planks of chairs, with noticeably better definition. Our method also apparently generates a greater diversity of objects than SDF-StyleGAN. While SDF-StyleGAN capably generates varieties of each ShapeNet class, our method reproduces the same classes with greater variation. This is expected, as a noted advantage of diffusion models over GANs is better mode coverage." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.886, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We provide quantitative results in Table 1. The metrics" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20880" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.092, + 0.887, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.252, + 0.895, + 0.281 + ], + "angle": 0, + "content": "Figure 5. Interpolation. Our model learns a continuous latent space of triplanes. We can smoothly interpolate between two noise triplanes, resulting in semantically meaningful shape interpolation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.307, + 0.473, + 0.489 + ], + "angle": 0, + "content": "tell a similar story to the qualitative results. Quantitatively, NFD outperforms all baselines in FID, precision, and recall for each ShapeNet category. FID is a standard one-number metric for evaluating generative models, and our performance under this evaluation indicates the generally better quality of object renderings. Precision evaluates the renderings' fidelity, and recall evaluates their diversity. Outperforming baselines in both precision and recall suggest that our model produces higher fidelity of shapes and a more diverse distribution of shapes. This is consistent with the qualitative results in Figure 4, where our method produced sharper and more complex objects while also covering more modes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.511, + 0.47, + 0.678 + ], + "angle": 0, + "content": "Semantically meaningful interpolation. Figure 5 shows latent space interpolation between pairs of generated neural fields. As shown in prior work [69], smooth interpolation in the latent space of diffusion models can be achieved by interpolation between noise tensors before they are iteratively denoised by the model. As in their method, we sample from our trained model using a deterministic DDIM, and we use spherical interpolation so that the intermediate latent noise retains the same distribution. Our method is capable of smooth latent space interpolation in the generated triplanes and their corresponding neural fields." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.689, + 0.241, + 0.703 + ], + "angle": 0, + "content": "4.1. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.713, + 0.472, + 0.743 + ], + "angle": 0, + "content": "We validate the design of our framework by ablating components of our regularization strategies using the cars dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Explicit density regularization. As discussed by Park et al. [57], the precision of the ground truth decoded meshes is limited by the finite number of point samples guiding the training of the decision boundaries. Because we rely on a limited number of pre-computed coordinate-occupancy pairs to train our triplanes, it is easy to overfit to this limited training set. Even when optimizing a single triplane in isolation (i.e., without learning a generative model), this overfitting manifests in \"floater\" artifacts in the optimized" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.301, + 0.695, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.315, + 0.886, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.467, + 0.892, + 0.523 + ], + "angle": 0, + "content": "Figure 6. Ablation over density regularization. Clear artifacts are visible in the resulting occupancy field without explicit density regularization. In this example, we optimize a single triplane on a single shape." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.55, + 0.892, + 0.626 + ], + "angle": 0, + "content": "neural field. Figure 6 shows an example where we fit a single triplane with and without density regularization. Without density regularization, the learned occupancy field contains significant artifacts; with density regularization, the learned occupancy field captures a clean object." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.895, + 0.885 + ], + "angle": 0, + "content": "Triplane regularization. Regularization of the triplanes is essential for training a well-behaved diffusion model. Figure 7 compares generated samples produced by our entire framework, with and without regularization terms. If we train only with Equation 2, i.e., without regularization terms, we can optimize a dataset of triplane features and train a diffusion model to generate samples. However, while the surfaces of the optimized shapes will appear real, the triplane features themselves will have many high-frequency artifacts, and these convoluted feature images are a difficult manifold for even a powerful diffusion model to learn. Consequently, generated triplane features produced by a trained diffusion model decode into shapes with significant artifacts. We note that these artifacts are present only in generated samples; shapes directly factored from the ground-truth shapes are artifact-free, even without regularization." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.886, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Training with Equation 4 introduces TV, L2, and" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "20881" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.09, + 0.264, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.287, + 0.091, + 0.465, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.278, + 0.473, + 0.419 + ], + "angle": 0, + "content": "Figure 7. Ablation over regularized triplanes. A generative model trained on unregularized triplanes produces samples with significant artifacts. Effective regularization of triplane features enables training of a generative model that produces shapes without artifacts. Top left: triplane features learned only with Equation 2 contain many high frequency artifacts. Bottom left: a diffusion model trained on these unregularized triplanes fails to produce convincing samples. Top right: triplane features learned with Equation 4 are noticeably smoother. Bottom right: A diffusion model trained on these regularized triplanes produces high-quality shapes." + }, + { + "type": "table", + "bbox": [ + 0.101, + 0.432, + 0.446, + 0.478 + ], + "angle": 0, + "content": "
MethodFID ↓Precision ↑Recall ↑
No regularization285.81.60.6
Density + TV + L2 Reg.83.649.550.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.483, + 0.472, + 0.526 + ], + "angle": 0, + "content": "Table 2. Quantitative results for the ablation on triplane regularization. Our model performs poorly without explicit regularization on the triplanes." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.55, + 0.47, + 0.673 + ], + "angle": 0, + "content": "density regularizing factors. Triplanes learned with these regularization terms are noticeably smoother, with frequency distributions that more closely align with those found in natural images (see supplement). As we would expect, a diffusion model more readily learns the manifold of regularized triplane features. Samples produced by a diffusion model trained on these regularized shapes decode into convincing and artifact-free shapes." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.687, + 0.191, + 0.703 + ], + "angle": 0, + "content": "5. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.713, + 0.471, + 0.789 + ], + "angle": 0, + "content": "In summary, we introduce a 3D-aware diffusion model that uses a 2D diffusion backbone to generate triplane feature maps, which are assembled into 3D neural fields. Our approach improves the quality and diversity of generated objects over existing 3D-aware generative models by a large margin." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.473, + 0.902 + ], + "angle": 0, + "content": "Limitations. Similarly to other generative methods, training a diffusion model is slow and computationally demanding. Diffusion models, including ours, are also slow to evaluate, whereas GANs, for example, can be evaluated in real-time once trained. Luckily, our method will benefit from improvements to 2D diffusion models in this research area." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.093, + 0.891, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.263, + 0.895, + 0.307 + ], + "angle": 0, + "content": "Figure 8. Failure cases. We observe that our model at times generate axis-aligned artifacts and struggles to account for thin structures, likely caused by the use of a triplane representation." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.332, + 0.895, + 0.424 + ], + "angle": 0, + "content": "Slow sampling at inference could be addressed by more efficient samplers [30] and potentially enable real-time synthesis. While a step forward in quality, some of the samples generated by our method suffer from artifacts, as depicted by Fig. 8. Strategies like guidance [13, 27], which trade off diversity for fidelity, may reduce the prevalence of these outliers." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.443, + 0.892, + 0.593 + ], + "angle": 0, + "content": "Future Work. We have demonstrated an effective way to generate occupancy fields, but in principle, our approach can be extended to generating any type of neural field that can be represented by a triplane. In particular, triplanes have already been shown to be excellent representations for radiance fields, so it seems natural to extend our diffusion approach to generating NeRFs. While we demonstrate successful results for unconditional generation, conditioning our generative model on text, images, or other input would be an exciting avenue for future work." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.614, + 0.892, + 0.69 + ], + "angle": 0, + "content": "Ethical Considerations. Generative models, including ours, could be extended to generate DeepFakes. These pose a societal threat, and we do not condone using our work to generate fake images or videos of any person intending to spread misinformation or tarnish their reputation." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.71, + 0.892, + 0.786 + ], + "angle": 0, + "content": "Conclusion. 3D-aware object synthesis has many exciting applications in vision and graphics. With our work, which is among the first to connect powerful 2D diffusion models and 3D object synthesis, we take a significant step towards utilizing emerging diffusion models for this goal." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.8, + 0.669, + 0.817 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.826, + 0.894, + 0.9 + ], + "angle": 0, + "content": "We thank Vincent Sitzmann for valuable discussions. This project was in part supported by Samsung, the Stanford Institute for Human-Centered AI (HAI), the Stanford Center for Integrated Facility Engineering (CIFE), NSF RI #2211258, Autodesk, and a PECASE from the ARO." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.945, + 0.521, + 0.957 + ], + "angle": 0, + "content": "20882" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Matan Atzmon and Yaron Lipman. SAL: Sign agnostic learning of shapes from raw data. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.157, + 0.472, + 0.227 + ], + "angle": 0, + "content": "[2] Miguel Ángel Bautista, Pengsheng Guo, Samira Abnar, Walter Talbott, Alexander Toshev, Zhuoyuan Chen, Laurent Dinh, Shuangfei Zhai, Hanlin Goh, Daniel Ulbricht, Afshin Dehghan, and Josh M. Susskind. GAUDI: A neural architect for immersive 3d scene generation. CoRR, abs/2207.13751, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.227, + 0.471, + 0.284 + ], + "angle": 0, + "content": "[3] Fausto Bernardini, Joshua Mittleman, Holly E. Rushmeier, Cláudio T. Silva, and Gabriel Taubin. The ball-pivoting algorithm for surface reconstruction. IEEE Transactions on Visualization and Computer Graphics, 5:349-359, 1999. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.284, + 0.472, + 0.324 + ], + "angle": 0, + "content": "[4] Alexandre Boulch and Renaud Marlet. POCO: point convolution for surface reconstruction. CoRR, abs/2201.01831, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.471, + 0.395 + ], + "angle": 0, + "content": "[5] Rohan Chabra, Jan Eric Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local SDF priors for detailed 3D reconstruction. In European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.396, + 0.471, + 0.493 + ], + "angle": 0, + "content": "[6] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J. Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16123-16133, June 2022. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.493, + 0.471, + 0.563 + ], + "angle": 0, + "content": "[7] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-GAN: Periodic implicit generative adversarial networks for 3D-aware image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.564, + 0.471, + 0.605 + ], + "angle": 0, + "content": "[8] Ding-Yun Chen, Xiao-Pei Tian, Edward Yu-Te Shen, and Ming Ouhyoung. On visual similarity based 3d model retrieval. Computer Graphics Forum, 22, 2003. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.606, + 0.471, + 0.661 + ], + "angle": 0, + "content": "[9] Yinbo Chen, Sifei Liu, and Xiaolong Wang. Learning continuous image representation with local implicit image function. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.662, + 0.471, + 0.704 + ], + "angle": 0, + "content": "[10] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.471, + 0.746 + ], + "angle": 0, + "content": "[11] Thomas Davies, Derek Nowrouzehrai, and Alec Jacobson. Overfit neural networks as a compact shape representation. arXiv preprint arXiv:2009.09808, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.747, + 0.471, + 0.802 + ], + "angle": 0, + "content": "[12] Terrance DeVries, Miguel Angel Bautista, Nitish Srivastava, Graham W. Taylor, and Joshua M. Susskind. Unconstrained scene generation with locally conditioned radiance fields. arXiv preprint arXiv:2104.00670, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.471, + 0.872 + ], + "angle": 0, + "content": "[13] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 8780-8794. Curran Associates, Inc., 2021. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.471, + 0.9 + ], + "angle": 0, + "content": "[14] Emilien Dupont, Hyunjik Kim, S. M. Ali Eslami, Danilo J. Rezende, and Dan Rosenbaum. From data to functa: Your" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.894, + 0.12 + ], + "angle": 0, + "content": "data point is a function and you should treat it like one. CoRR, abs/2201.12204, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.121, + 0.894, + 0.178 + ], + "angle": 0, + "content": "[15] SM Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S Morcos, Marta Garnelo, Avraham Ruderman, Andrei A Rusu, Ivo Danihelka, Karol Gregor, et al. Neural scene representation and rendering. Science, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.179, + 0.892, + 0.207 + ], + "angle": 0, + "content": "[16] Patrick Esser, Robin Rombach, and Björn Ommer. Taming transformers for high-resolution image synthesis, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.207, + 0.894, + 0.249 + ], + "angle": 0, + "content": "[17] Matheus Gadelha, Subhransu Maji, and Rui Wang. 3d shape induction from 2d views of multiple objects. In 2017 International Conference on 3D Vision (3DV), pages 402-411, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.25, + 0.892, + 0.292 + ], + "angle": 0, + "content": "[18] Stephan J Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. FastNeRF: High-fidelity neural rendering at 200fps. arXiv preprint arXiv:2103.10380, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.292, + 0.892, + 0.348 + ], + "angle": 0, + "content": "[19] Kyle Genova, Forrester Cole, Avneesh Sud, Aaron Sarna, and Thomas Funkhouser. Local deep implicit functions for 3D shape. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.349, + 0.892, + 0.405 + ], + "angle": 0, + "content": "[20] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.406, + 0.892, + 0.448 + ], + "angle": 0, + "content": "[21] Simon Giebenhain and Bastian Goldlücke. Air-nets: An attention-based framework for locally conditioned implicit representations. In 3DV, pages 1054-1064. IEEE, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.449, + 0.892, + 0.504 + ], + "angle": 0, + "content": "[22] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems (NeurIPS), 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.505, + 0.892, + 0.56 + ], + "angle": 0, + "content": "[23] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. In International Conference on Machine Learning (ICML), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.561, + 0.894, + 0.616 + ], + "angle": 0, + "content": "[24] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. StyleNeRF: A style-based 3D-aware generator for high-resolution image synthesis. arXiv preprint arXiv:2110.08985, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.617, + 0.894, + 0.674 + ], + "angle": 0, + "content": "[25] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.675, + 0.894, + 0.717 + ], + "angle": 0, + "content": "[26] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, volume 33, pages 6840-6851, 2020. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.718, + 0.892, + 0.746 + ], + "angle": 0, + "content": "[27] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.746, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[28] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, and Thomas Funkhouser. Local implicit grid representations for 3D scenes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.803, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[29] Yue Jiang, Dantong Ji, Zhizhong Han, and Matthias Zwicker. SDFDiff: Differentiable rendering of signed distance fields for 3D shape optimization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.859, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[30] Tero Karras, Miika Aittala, Timo Aila, and Samuli Laine. Elucidating the design space of diffusion-based generative models. ArXiv, abs/2206.00364, 2022. 8" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20883" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[31] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.15, + 0.471, + 0.205 + ], + "angle": 0, + "content": "[32] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.207, + 0.471, + 0.261 + ], + "angle": 0, + "content": "[33] Petr Kellnhofer, Lars Jebe, Andrew Jones, Ryan Spicer, Kari Pulli, and Gordon Wetzstein. Neural lumigraph rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.264, + 0.471, + 0.359 + ], + "angle": 0, + "content": "[34] Adam R Kosiorek, Heiko Strathmann, Daniel Zoran, Pol Moreno, Rosalia Schneider, Sona Mokra, and Danilo Jimenez Rezende. Nerf-vae: A geometry aware 3d scene generative model. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 5742–5752. PMLR, 18–24 Jul 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.362, + 0.47, + 0.43 + ], + "angle": 0, + "content": "[35] Yiyi Liao, Katja Schwarz, Lars Mescheder, and Andreas Geiger. Towards unsupervised learning of generative models for 3d controllable image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.433, + 0.471, + 0.487 + ], + "angle": 0, + "content": "[36] David B Lindell, Julien NP Martel, and Gordon Wetzstein. AutoInt: Automatic integration for fast neural volume rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.49, + 0.471, + 0.531 + ], + "angle": 0, + "content": "[37] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.533, + 0.471, + 0.573 + ], + "angle": 0, + "content": "[38] Shichen Liu, Shunsuke Saito, Weikai Chen, and Hao Li. Learning to infer implicit surfaces without 3D supervision. arXiv preprint arXiv:1911.00767, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.576, + 0.47, + 0.644 + ], + "angle": 0, + "content": "[39] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. DIST: Rendering deep implicit signed distance function with differentiable sphere tracing. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.47, + 0.701 + ], + "angle": 0, + "content": "[40] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. ACM Transactions on Graphics (SIGGRAPH), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.703, + 0.471, + 0.743 + ], + "angle": 0, + "content": "[41] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3D surface construction algorithm. ACM Transactions on Graphics (ToG), 1987. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.47, + 0.8 + ], + "angle": 0, + "content": "[42] Shitong Luo and Wei Hu. Diffusion probabilistic models for 3d point cloud generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2837-2845, June 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.471, + 0.857 + ], + "angle": 0, + "content": "[43] Julien N.P. Martel, David B. Lindell, Connor Z. Lin, Eric R. Chan, Marco Monteiro, and Gordon Wetzstein. ACORN: Adaptive coordinate networks for neural representation. ACM Transactions on Graphics (SIGGRAPH), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.471, + 0.899 + ], + "angle": 0, + "content": "[44] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the wild: Neural radiance fields for" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "unconstrained photo collections. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.894, + 0.176 + ], + "angle": 0, + "content": "[45] Ishit Mehta, Michael Gharbi, Connelly Barnes, Eli Shechtman, Ravi Ramamoorthi, and Manmohan Chandraker. Modulated periodic activations for generalizable local functional representations. In ICCV, pages 14194-14203. IEEE, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.178, + 0.894, + 0.245 + ], + "angle": 0, + "content": "[46] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 6351-6361, October 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.247, + 0.894, + 0.314 + ], + "angle": 0, + "content": "[47] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3D reconstruction in function space. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.317, + 0.894, + 0.371 + ], + "angle": 0, + "content": "[48] Mateusz Michalkiewicz, Jhony K Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Implicit surface representations as layers in neural networks. In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.373, + 0.894, + 0.44 + ], + "angle": 0, + "content": "[49] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.443, + 0.894, + 0.524 + ], + "angle": 0, + "content": "[50] Thomas Neff, Pascal Stadlbauer, Mathias Parger, Andreas Kurz, Joerg H. Mueller, Chakravarty R. Alla Chaitanya, Anton S. Kaplanyan, and Markus Steinberger. DONeRF: Towards Real-Time Rendering of Compact Neural Radiance Fields using Depth Oracle Networks. Computer Graphics Forum, 40(4), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.526, + 0.894, + 0.593 + ], + "angle": 0, + "content": "[51] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.596, + 0.894, + 0.676 + ], + "angle": 0, + "content": "[52] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 8162-8171. PMLR, 18-24 Jul 2021. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.746 + ], + "angle": 0, + "content": "[53] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11453-11464, June 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.749, + 0.894, + 0.815 + ], + "angle": 0, + "content": "[54] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.818, + 0.894, + 0.872 + ], + "angle": 0, + "content": "[55] Michael Oechsle, Songyou Peng, and Andreas Geiger. UNISURF: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.874, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[56] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf:" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20884" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.472, + 0.147 + ], + "angle": 0, + "content": "High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13503-13513, June 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.151, + 0.472, + 0.22 + ], + "angle": 0, + "content": "[57] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.223, + 0.471, + 0.278 + ], + "angle": 0, + "content": "[58] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.281, + 0.471, + 0.322 + ], + "angle": 0, + "content": "[59] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. arXiv preprint arXiv:2011.13961, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.325, + 0.471, + 0.394 + ], + "angle": 0, + "content": "[60] Ali Razavi, Aaron van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.397, + 0.471, + 0.466 + ], + "angle": 0, + "content": "[61] Daniel Rebain, Mark Matthews, Kwang Moo Yi, Dmitry Lagun, and Andrea Tagliasacchi. Lolnerf: Learn from one look. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1558-1567, June 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.469, + 0.471, + 0.524 + ], + "angle": 0, + "content": "[62] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. KiloNeRF: Speeding up neural radiance fields with thousands of tiny MLPs. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.527, + 0.471, + 0.568 + ], + "angle": 0, + "content": "[63] Mehdi S. M. Sajjadi, Olivier Bachem, Mario Lucic, Olivier Bousquet, and Sylvain Gelly. Assessing generative models via precision and recall. In NeurIPS, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.571, + 0.471, + 0.627 + ], + "angle": 0, + "content": "[64] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. GRAF: Generative radiance fields for 3D-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.63, + 0.471, + 0.685 + ], + "angle": 0, + "content": "[65] Vincent Sitzmann, Julien N.P. Martel, Alexander W. Bergman, David B. Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.471, + 0.743 + ], + "angle": 0, + "content": "[66] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhöfer. Deep Voxels: Learning persistent 3D feature embeddings. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.746, + 0.471, + 0.801 + ], + "angle": 0, + "content": "[67] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3D-structure-aware neural scene representations. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.804, + 0.471, + 0.9 + ], + "angle": 0, + "content": "[68] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In Francis Bach and David Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 2256-2265, Lille, France, 07-09 Jul 2015. PMLR. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[69] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.137, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[70] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.208, + 0.894, + 0.276 + ], + "angle": 0, + "content": "[71] Yang Song and Stefano Ermon. Improved techniques for training score-based generative models. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 12438-12448. Curran Associates, Inc., 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.894, + 0.347 + ], + "angle": 0, + "content": "[72] Pratul P. Srinivasan, Boyang Deng, Xiuming Zhang, Matthew Tancik, Ben Mildenhall, and Jonathan T. Barron. NeRV: Neural reflectance and visibility fields for relighting and view synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.349, + 0.894, + 0.431 + ], + "angle": 0, + "content": "[73] Towaki Takikawa, Joey Litalien, Kangxue Yin, Karsten Kreis, Charles Loop, Derek Nowrouzezahrai, Alec Jacobson, Morgan McGuire, and Sanja Fidler. Neural geometric level of detail: Real-time rendering with implicit 3D shapes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.434, + 0.894, + 0.503 + ], + "angle": 0, + "content": "[74] Ayush Tewari, Justus Thies, Ben Mildenhall, Pratul Srinivasan, Edgar Tretschk, W Yifan, Christoph Lassner, Vincent Sitzmann, Ricardo Martin-Brualla, Stephen Lombardi, et al. Advances in neural rendering. In Computer Graphics Forum, volume 41, pages 703-735. Wiley Online Library, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.505, + 0.894, + 0.545 + ], + "angle": 0, + "content": "[75] Hoang Thanh-Tung and Truyen Tran. Catastrophic forgetting and mode collapse in gans. In IJCNN, pages 1-10. IEEE, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.548, + 0.894, + 0.617 + ], + "angle": 0, + "content": "[76] Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 11287-11302. Curran Associates, Inc., 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.619, + 0.894, + 0.715 + ], + "angle": 0, + "content": "[77] Aäron van den Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. Pixel recurrent neural networks. In Maria Florina Balcan and Kilian Q. Weinberger, editors, Proceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings of Machine Learning Research, pages 1747-1756, New York, New York, USA, 20-22 Jun 2016. PMLR. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.718, + 0.894, + 0.786 + ], + "angle": 0, + "content": "[78] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.789, + 0.894, + 0.87 + ], + "angle": 0, + "content": "[79] Jiajun Wu, Chengkai Zhang, Tianfan Xue, Bill Freeman, and Josh Tenenbaum. Learning a probabilistic latent space of object shapes via 3d generative-adversarial modeling. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc., 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[80] Yiheng Xie, Towaki Takikawa, Shunsuke Saito, Or Litany, Shiqin Yan, Numair Khan, Federico Tombari, James Tompkin," + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "20885" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "Vincent Sitzmann, and Srinath Sridhar. Neural fields in visual computing and beyond. Comput. Graph. Forum, 41(2):641-676, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.472, + 0.19 + ], + "angle": 0, + "content": "[81] Guangming Yao, Hongzhi Wu, Yi Yuan, and Kun Zhou. Dd-nerf: Double-diffusion neural radiance field as a generalizable implicit body representation. arXiv preprint arXiv:2112.12390, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.471, + 0.261 + ], + "angle": 0, + "content": "[82] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Ronen Basri, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.263, + 0.471, + 0.318 + ], + "angle": 0, + "content": "[83] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.32, + 0.471, + 0.361 + ], + "angle": 0, + "content": "[84] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.471, + 0.404 + ], + "angle": 0, + "content": "[85] Xin-Yang Zheng, Yang Liu, Peng-Shuai Wang, and Xin Tong. Sdf-stylegan: Implicit sdf-based stylegan for 3d shape generation. CoRR, abs/2206.12055, 2022. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.471, + 0.46 + ], + "angle": 0, + "content": "[86] Linqi Zhou, Yilun Du, and Jiajun Wu. 3d shape generation and completion through point-voxel diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5826-5835, October 2021. 1, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.462, + 0.471, + 0.516 + ], + "angle": 0, + "content": "[87] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. CIPS-3D: A 3D-Aware Generator of GANs Based on Conditionally-Independent Pixel Synthesis. arXiv preprint arXiv:2110.09788, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "20886" + } + ] +] \ No newline at end of file diff --git a/2023/3D Neural Field Generation Using Triplane Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_origin.pdf b/2023/3D Neural Field Generation Using Triplane Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..38299c6c4461cec6685ab728df7ff125918b5f6b --- /dev/null +++ b/2023/3D Neural Field Generation Using Triplane Diffusion/9d99632a-6c66-4f96-953f-d0f7ffc4caf8_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:974ccfa136036b9cc0061e3a3c35ab4f58b318e5832dc5e8f88d6dec816a77c5 +size 9305526 diff --git a/2023/3D Neural Field Generation Using Triplane Diffusion/full.md b/2023/3D Neural Field Generation Using Triplane Diffusion/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5c4009a3a664d923c0df98da236d69a541ddd05d --- /dev/null +++ b/2023/3D Neural Field Generation Using Triplane Diffusion/full.md @@ -0,0 +1,328 @@ +# 3D Neural Field Generation using Triplane Diffusion + +J. Ryan Shue\* Eric Ryan Chan\*2 Ryan Po\*2 Zachary Ankner\*3,4 Jiajun Wu\*2 Gordon Wetzstein\*2 Milton Academy 2Stanford University 3Massachusetts Institute of Technology 4MosaicML + +# Abstract + +Diffusion models have emerged as the state-of-the-art for image generation, among other tasks. Here, we present an efficient diffusion-based model for 3D-aware generation of neural fields. Our approach pre-processes training data, such as ShapeNet meshes, by converting them to continuous occupancy fields and factoring them into a set of axis-aligned triplane feature representations. Thus, our 3D training scenes are all represented by 2D feature planes, and we can directly train existing 2D diffusion models on these representations to generate 3D neural fields with high quality and diversity, outperforming alternative approaches to 3D-aware generation. Our approach requires essential modifications to existing triplane factorization pipelines to make the resulting features easy to learn for the diffusion model. We demonstrate state-of-the-art results on 3D generation on several object classes from ShapeNet. + +# 1. Introduction + +Diffusion models have seen rapid progress, setting state-of-the-art (SOTA) performance across a variety of image generation tasks. While most diffusion methods model 2D images, recent work [2, 14, 42, 86] has attempted to develop denoising methods for 3D shape generation. These 3D diffusion methods operate on discrete point clouds and, while successful, exhibit limited quality and resolution. + +In contrast to 2D diffusion, which directly leverages the image as the target for the diffusion process, it is not directly obvious how to construct such 2D targets in the case of 3D diffusion. Interestingly, recent work on 3D-aware generative adversarial networks (GANs) (see Sec. 2 for an overview) has demonstrated impressive results for 3D shape generation using 2D generators. We build upon this idea of learning to generate triplane representations [6] that encode 3D scenes or + +![](images/450fe3152bfc1f768b556951bf1fcd42a550acbbd9424e433875c2618a8dea15.jpg) +Figure 1. Our method leverages existing 2D diffusion models for 3D shape generation using hybrid explicit-implicit neural representations. Top: triplane-based 3D shape diffusion process using our framework. Bottom: Interpolation between generated shapes. + +radiance fields as a set of axis-aligned 2D feature planes. The structure of a triplane is analogous to that of a 2D image and can be used as part of a 3D generative method that leverages conventional 2D generator architectures. + +Inspired by recent efforts in designing efficient 3D GAN architectures, we introduce a neural field-based diffusion framework for 3D representation learning. Our approach follows a two-step process. In the first step, a training set of 3D scenes is factored into a set of per-scene triplane features and a single, shared feature decoder. In the second step, a 2D diffusion model is trained on these triplanes. The trained diffusion model can then be used at inference time to generate novel and diverse 3D scenes. By interpreting triplanes as multi-channel 2D images and thus decoupling generation from rendering, we can leverage current (and likely future) SOTA 2D diffusion model backbones nearly out of the box. Fig. 1 illustrates how a single object is generated with our framework (top), and how two generated objects—even with different topologies—can be interpolated (bottom). + +Our core contributions are as follows: + +![](images/24404e688258727d62d4c8a74cb692aa929c6ec91ff1e58989e7466b255ce1e8.jpg) +Figure 2. Visualization of the denoising process. Here, we show examples of triplanes as they are iteratively denoised at inference, as well as the shapes we obtain by "decoding" the noisy triplanes with our jointly-learned MLP. By interpreting triplane features simply as multi-channel feature images, we build our framework around 2D diffusion models. + +- We introduce a generative framework for diffusion on 3D scenes that utilizes 2D diffusion model backbones and has a built-in 3D inductive bias. +- We show that our approach is capable of generating both high-fidelity and diverse 3D scenes that outperform state-of-the-art 3D GANs. + +# 2. Related Work + +Neural fields. Implicit neural representations, or neural fields, hold the SOTA for 3D scene representation [74, 80]. They either solely learn geometry [1, 4, 5, 10, 11, 15, 21, 23, 45, 47, 48, 57, 65, 73] or use posed images to jointly optimize geometry and appearance [6, 7, 18, 25, 29, 33, 36–39, 44, 49, 50, 54, 55, 59, 67, 72, 82–84]. Neural fields represent scenes as continuous functions, allowing them to scale well with scene complexity compared to their discrete counterparts [40, 66]. Initial methods used a single, large multilayer perceptron (MLP) to represent entire scenes [10, 47, 49, 57, 65], but reconstruction with this approach can be computationally inefficient because training such a representation requires thousands of forward passes through the large model per scene. Recent years have shown a trend towards locally conditioned representations, which either learn local functions [5, 9, 28, 62] or locally modulate a shared function with a hybrid explicit-implicit representation [4, 6, 12, 19–21, 37, 43, 45, 58]. These methods use small MLPs, which are efficient during inference and significantly + +better at capturing local scene details. We adopt the expressive hybrid triplane representation introduced by Chan et al. [6]. Triplanes are efficient, scaling with the surface area rather than volume, and naturally integrate with expressive, fine-tuned 2D generator architectures. We modify the triplane representation for compatibility with our denoising framework. + +Generative synthesis in 2D and 3D. Some of the most popular generative models include GANs [22, 31, 32], autoregressive models [16, 60, 77, 78], score matching models [68, 70, 71], and denoising diffusion probabilistic models (DDPMs) [13, 26, 52, 76]. DDPMs are arguably the SOTA approach for synthesizing high-quality and diverse 2D images [13]. Moreover, GANs can be difficult to train and suffer from issues like mode collapse [75] whereas diffusion models train stably and have been shown to better capture the full training distribution. + +In 3D, however, GANs still outperform alternative generative approaches [6,7,17,24,34,35,37,46,51,53,56, 61,64,79,85,87]. Some of the most successful 3D GANs use an expressive 2D generator backbone (e.g., StyleGAN2 [32]) to synthesize triplane representations which are then decoded with a small, efficient MLP [6]. Because the decoder is small and must generalize across many local latents, these methods assign most of their expressiveness to the powerful backbone. In addition, these methods treat the triplane as a multi-channel image, allowing the generator backbone to be used almost out of the box. + +Current 3D diffusion models [2, 14, 42, 81, 86] are still very limited. They either denoise a single latent or do not utilize neural fields at all, opting for a discrete point-cloud-based approach. For example, concurrently developed single-latent approaches [2, 14] generate a global latent for conditioning the neural field, relying on a 3D decoder to transform the scene representation from 1D to 3D without directly performing 3D diffusion. As a result, the diffusion model does not actually operate in 3D, losing this important inductive bias and generating blurry results. Point-cloud-based approaches [42, 86], on the other hand, give the diffusion model explicit 3D control over the shape, but limit its resolution and scalability due to the coarse discrete representation. While showing promise, both 1D-to-3D and point cloud diffusion approaches require specific architectures that cannot easily leverage recent advances in 2D diffusion models. + +In our work, we propose to directly generate triplanes with out-of-the-box SOTA 2D diffusion models, granting the diffusion model near-complete control over the generated neural field. Key to our approach is our treatment of well-fit triplanes in a shared latent space as ground truth data for training our diffusion model. We show that the latent space of these triplanes is grounded spatially in local detail, giving the diffusion model a critical inductive bias for 3D generation. Our approach gives rise to an expressive 3D diffusion model. + +# 3. Triplane Diffusion Framework + +Here, we explain the architecture of our neural field diffusion (NFD) model for 3D shapes. In Section 3.1, we explain how we can represent the occupancy field of a single object using a triplane. In Section 3.2, we describe how we can extend this framework to represent an entire dataset of 3D objects. In Section 3.3, we describe the regularization techniques that we found necessary to achieve optimal results. Finally, Sections 3.4 and 3.5 illustrate training and sampling from our model. For an overview of the pipeline at inference, see Figure 3. + +# 3.1. Representing a 3D Scene using a Triplane + +Neural fields have been introduced as continuous and expressive 3D scene representations. In this context, a neural field $\mathrm{NF}:\mathbb{R}^3\to \mathbb{R}^M$ is a neural network-parameterized mapping function that takes as input a three-dimensional coordinate $\mathbf{x}$ and that outputs an $M$ -dimensional vector representing the neural field. Neural fields have been demonstrated for occupancy fields [47], signed distance functions [57], radiance fields [49], among many other types of signals [65]. For the remainder of this work, we focus on 3D scene representations using occupancy fields such that the output of the neural field is a binary value, indicating whether a coordinate is inside or outside an object and $M = 1$ . + +The triplane representation is a hybrid explicit-implicit network architecture for neural fields that is particularly effi + +cient to evaluate [6]. This representation uses three 2D feature planes $\mathbf{f}_{xy},\mathbf{f}_{xz},\mathbf{f}_{yz}\in \mathbb{R}^{N\times N\times C}$ with a spatial resolution of $N\times N$ and $C$ feature channels each, and a multilayer perceptron (MLP) "decoder" tasked with interpreting features sampled from the planes. A 3D coordinate is queried by projecting it onto each of the axis-aligned planes (i.e., the $x - y,x - z,$ and $y - z$ planes), querying and aggregating the respective features, and decoding the resulting feature using a lightweight $\mathrm{MLP}_{\phi}$ with parameters $\phi$ . Similar to Chan et al. [6], we found the sum to be an efficient feature aggregation function, resulting in the following formulation for the triplane architecture: + +$$ +\mathrm {N F} (\mathbf {x}) = \operatorname {M L P} _ {\phi} \left(\mathbf {f} _ {x y} (\mathbf {x}) + \mathbf {f} _ {y z} (\mathbf {x}) + \mathbf {f} _ {x z} (\mathbf {x})\right). \tag {1} +$$ + +The feature planes and MLP can be jointly optimized to represent the occupancy field of a shape. + +# 3.2. Representing a Class of Objects with Triplanes + +We aim to convert our dataset of shapes into a dataset of triplanes so that we can train a diffusion model on these learned feature planes. However, because the MLP and feature planes are typically jointly learned, we cannot simply train a triplane for each object of the dataset individually. If we did, the MLP's corresponding to each object in our dataset would fail to generalize to triplanes generated by our diffusion model. Therefore, instead of training triplanes for each object in isolation, we jointly optimize the feature planes for many objects simultaneously, along with a decoder that is shared across all objects. This joint optimization results in a dataset of optimized feature planes and an MLP capable of interpreting any triplane from the dataset distribution. Thus, at inference, we can use this MLP to decode feature planes generated by our model. + +In practice, during training, we are given a dataset of $I$ objects, and we preprocess the coordinates and ground-truth occupancy values of $J$ points per object. Typically, $J = 10\mathrm{M}$ where 5M points are sampled uniformly throughout the volume and 5M points are sampled near the object surface. Our naive training objective is a simple $L2$ loss between predicted occupancy values $\mathrm{NF}^{(i)}(\mathbf{x}_j^{(i)})$ and ground-truth occupancy values $\mathrm{O}_j^{(i)}$ for each point, where $\mathbf{x}_j^{(i)}$ denotes the $j^{\mathrm{th}}$ point from the $i^{\mathrm{th}}$ scene: + +$$ +\mathcal {L} _ {\mathrm {N A I V E}} = \sum_ {i} ^ {I} \sum_ {j} ^ {J} \left\| \mathrm {N F} ^ {(i)} \left(\mathbf {x} _ {j} ^ {(i)}\right) - \mathrm {O} _ {j} ^ {(i)} \right\| _ {2} \tag {2} +$$ + +During training, we optimize Equation 2 for a shared MLP parameterized by $\phi$ , as well as the feature planes corresponding to every object in our dataset: + +$$ +\left\{\phi , \mathbf {f} _ {x y} ^ {(i)}, \mathbf {f} _ {x z} ^ {(i)}, \mathbf {f} _ {y z} ^ {(i)} \right\} = \underset {\left\{\phi , \mathbf {f} _ {x y} ^ {(i)}, \mathbf {f} _ {x z} ^ {(i)}, \mathbf {f} _ {y z} ^ {(i)} \right\}} {\operatorname {a r g m i n}} \mathcal {L} _ {\mathrm {N A I V E}} \tag {3} +$$ + +![](images/089437b1d30d05c909262096994b21a0a3a962f57b354646c1483ff477ef08e7.jpg) +Figure 3. Pipeline. Sampling a 3D neural field from our model consists of two decoupled processes: 1) using a trained DDPM to iteratively denoise latent noise into feature maps and 2) using a locally conditioned Occupancy Network to decode the resulting triplane into the final neural field. This architecture allows the DDPM to generate samples with a 3D inductive bias while utilizing existing 2D DDPM backbones and a continuous output representation. + +# 3.3. Regularizing Triplanes for Effective Generalization + +Following the procedure outlined in the previous section, we can learn a dataset of triplane features and a shared triplane decoder; we can then train a diffusion model on these triplane features and sample novel shapes at inference. Unfortunately, the result of this naive training procedure is a generative model for triplanes that produces shapes with significant artifacts. + +We find it necessary to regularize the triplane features during optimization to simplify the data manifold that the diffusion model must learn. Therefore, we include total variation (TV) regularization terms with weight $\lambda_{1}$ in the loss function to ensure that the feature planes of each training scene do not contain spurious high-frequency information. This strategy makes the distribution of triplane features more similar to the manifold of natural images (see supplement), which we found necessary to robustly train a diffusion model on them (see Sec. 4). + +While the trained feature values are unbounded, our DDPM backbone requires training inputs with values in the range [-1,1]. We address this by normalizing the feature planes before training, but this process is sensitive to outliers. As a result, we include an L2 regularization term on the triplane features with weight $\lambda_{2}$ to discourage outlying values. + +We also include an explicit density regularization (EDR) term. Due to our ground-truth occupancy data being concentrated on the surface of the shapes, there is often insufficient data to learn a smooth outside-of-shape volume. Our EDR term combats this issue by sampling a set of random points from the volume, offsetting the points by a random vector $\omega$ , feeding both sets through the MLP, and calculating the mean squared error. Notationally, this term can be represented as $\mathrm{EDR}(\mathrm{NF}(\mathbf{x}), \omega) = \| \mathrm{NF}(\mathbf{x}) - \mathrm{NF}(\mathbf{x} + \omega) \|_2^2$ . We find this term necessary to remove floating artifacts in the volume (see Sec. 4) + +Our training objective, with added regularization terms, is as follows: + +$$ +\begin{array}{l} \mathcal {L} = \sum_ {i} ^ {N} \sum_ {j} ^ {M} \operatorname {B C E} \left(\mathrm {N F} ^ {(i)} \left(\mathbf {x} _ {j} ^ {(i)}\right) - \mathrm {O} _ {j} ^ {(i)}\right) \\ + \lambda_ {1} \left(\operatorname {T V} \left(\mathbf {f} _ {x y} ^ {(i)}\right) + \operatorname {T V} \left(\mathbf {f} _ {x z} ^ {(i)}\right) + \operatorname {T V} \left(\mathbf {f} _ {y z} ^ {(i)}\right)\right) \\ + \lambda_ {2} \left(\left| \left| \mathbf {f} _ {x y} ^ {(i)} \right| \right| _ {2} + \left| \left| \mathbf {f} _ {y z} ^ {(i)} \right| \right| _ {2} + \left| \left| \mathbf {f} _ {x z} ^ {(i)} \right| \right| _ {2}\right) \\ + \operatorname {E D R} \left(\mathrm {N F} \left(\mathbf {x} _ {j} ^ {(i)}\right), \boldsymbol {\omega}\right) \tag {4} \\ \end{array} +$$ + +# 3.4. Training a Diffusion Model for Triplane Features + +For unconditional generation, a diffusion model takes Gaussian noise as input and gradually denoises it in $T$ steps. In our framework, the diffusion model operates on triplane features $\mathbf{f}_{0\dots T}\in \mathbb{R}^{N\times N\times 3C}$ that stack the feature channels of all three triplane axes into a single image. In this notation, $\mathbf{f}_T\sim \mathcal{N}(\mathbf{f}_T;0,\mathbf{I})$ is the triplane feature image consisting of purely Gaussian noise, and $\mathbf{f}_0\sim q(\mathbf{f}_0)$ is a random sample drawn from the data distribution. The data distribution in our framework includes the pre-factored triplanes of the training set, normalized by the mean and variance of the entire dataset such that each channel has a zero mean and a standard deviation of 0.5. + +The forward or diffusion processes is a Markov chain that gradually adds Gaussian noise to the triplane features, according to a variance schedule $\beta_{1},\beta_{2},\dots,\beta_{T}$ + +$$ +q \left(\mathbf {f} _ {t} \mid \mathbf {f} _ {t - 1}\right) = \mathcal {N} \left(\mathbf {f} _ {t}; \sqrt {1 - \beta_ {t}} \mathbf {f} _ {t - 1}, \beta_ {t} \mathbf {I}\right). \tag {5} +$$ + +This forward process can be directly sampled at step $t$ using the closed-form solution $q(\mathbf{f}_t|\mathbf{f}_0) = \mathcal{N}(\mathbf{f}_t;\sqrt{\bar{\alpha}_t}\mathbf{f}_0,(1 - \bar{\alpha}_t)\mathbf{I})$ , where $\bar{\alpha}_{t} = \prod_{s = 1}^{t}\alpha_{s}$ with $\alpha_{t} = 1 - \beta_{t}$ . + +The goal of training a diffusion model is to learn the reverse process. For this purpose, a function approximator + +$\epsilon_{\theta}$ is needed that predicts the noise $\epsilon \sim \mathcal{N}(\mathbf{0},\mathbf{I})$ from its noisy input. Typically, this function approximator is implemented as a variant of a convolutional neural network defined by its parameters $\theta$ . Following [26], we train our triplane diffusion model by optimizing the simplified variant of the variational bound on negative log-likelihood: + +$$ +\mathcal {L} _ {\mathrm {D D P M}} = \mathbb {E} _ {t, \mathbf {f} _ {0}, \epsilon} \left[ \left\| \boldsymbol {\epsilon} - \boldsymbol {\epsilon} _ {\theta} \left(\sqrt {\bar {\alpha} _ {t}} \mathbf {f} _ {0} + \sqrt {1 - \bar {\alpha} _ {t}} \boldsymbol {\epsilon}, t\right) \right\| ^ {2} \right], \tag {6} +$$ + +where $t$ is sampled uniformly between 1 and $T$ . + +# 3.5. Sampling Novel 3D Shapes + +The unconditional generation of shapes at inference is a two-stage process that involves sampling a triplane from the trained diffusion model and then querying the neural field. + +Sampling a triplane from the diffusion model is identical to sampling an image from a diffusion model. Beginning with a random Gaussian noise $\mathbf{f}_T\sim \mathcal{N}(\mathbf{0},\mathbf{I})$ , we iteratively denoise the sample in $T$ steps as + +$$ +\mathbf {f} _ {t - 1} = \frac {1}{\sqrt {\alpha_ {t}}} \left(\mathbf {f} _ {t} - \frac {1 - \alpha_ {t}}{\sqrt {1 - \bar {\alpha} _ {t}}} \boldsymbol {\epsilon} _ {\theta} (\mathbf {f} _ {t}, t)\right) + \sigma_ {t} \boldsymbol {\epsilon}, \qquad (7) +$$ + +where $\epsilon \sim \mathcal{N}(\mathbf{0},\mathbf{I})$ for all but the very last step (i.e., $t = 1$ ), at which $\epsilon = 0$ and $\sigma_t^2 = \beta_t$ . + +The result of the denoising process, $\mathbf{f}_0$ , is a sample from the normalized triplane feature image distribution. Denormalizing it using the dataset normalization statistics and splitting the generated features into the axis aligned planes $\mathbf{f}_{xy}, \mathbf{f}_{yz}, \mathbf{f}_{xz}$ yields a set of triplane features which, when combined with the pre-trained MLP, are used to query the neural field. + +We use the marching cubes algorithm [41] to extract meshes from the resulting neural fields. Note that our framework is largely agnostic to the diffusion backbone used; we choose to use ADM [52], a 2D state-of-the-art diffusion model. + +Source code and pre-trained models will be made available. + +# 4. Experiments + +Datasets. To compare NFD against existing 3D generative methods, we train our model on three object categories from the ShapeNet dataset individually. Consistent with previous work [85, 86], we choose the categories: cars, chairs and airplanes. Each mesh is normalized to lie within $[-1,1]^3$ and then passed through watertighting. The generation of ground truth triplanes then works as follows: we precompute the occupancies of 10M points per object, where 5M points are distributed uniformly at random in the volume, and 5M points are sampled within a 0.01 distance from the mesh surface. + +
DataMethodFID ↓Precision ↑Recall ↑
CarsPVD¹335.80.10.2
SDF-StyleGAN98.035.936.2
NFD (Ours)83.649.550.5
ChairsPVD305.80.21.7
SDF-StyleGAN36.590.987.4
NFD (Ours)26.492.494.8
PlanesPVD244.42.73.8
SDF-StyleGAN65.864.572.8
NFD (Ours)32.470.581.1
+ +Table 1. Render quality metrics on ShapeNet. We achieve state-of-the-art FID, which measures overall quality, as well as well as state-of-the-art precision and recall, which measure fidelity and diversity independently. Metrics calculated on shaded renderings of generated and ground-truth shapes. + +We then train an MLP jointly with as many triplanes as we can fit in the GPU memory of a single A6000 GPU. In our case, we initially train on the first 500 objects in the dataset. After this initial joint optimization, we freeze the shared MLP and use it to optimize the triplanes of the remaining objects in the dataset. All triplanes beyond the first 500 are optimized individually with the same shared MLP; thus, the training of these triplanes can be effectively parallelized. + +Evaluation metrics. As in [85], we choose to evaluate our model using an adapted version of Fréchet inception distance (FID) that utilizes rendered shading images of our generated meshes. Shading-image FID [85] overcomes limitations of other mesh-based evaluation metrics such as the lightfield-descriptor (LFD) [8] by taking human perception into consideration. Zheng et al. [85] provide a detailed discussion of the various evaluation metrics for 3D generative models. Following the method [85], shading images of each shape are rendered from 20 distinct views; FID is then compared across each view and averaged to obtain a final score: + +$$ +\mathrm {F I D} = \frac {1}{2 0} \left[ \sum_ {i = 1} ^ {2 0} \| \mu_ {g} ^ {i} - \mu_ {r} ^ {i} \| ^ {2} + \operatorname {T r} \left(\Sigma_ {g} ^ {i} + \Sigma_ {r} ^ {i} - 2 \left(\Sigma_ {r} ^ {i} \Sigma_ {g} ^ {i}\right) ^ {\frac {1}{2}}\right) \right], \tag {8} +$$ + +where $g$ and $r$ represent the generated and training datasets, while $\mu^i,\Sigma^i$ represent the mean and covariance matrices for shading images rendered from the $i^{\mathrm{th}}$ view, respectively. + +Along with FID, we also report precision and recall scores using the method proposed by Sajjadi et al. [63]. While FID correlates well with perceived image quality, the one-dimensional nature of the metric prevents it from identifying different failure modes. Sajjadi et al. [63] aim to disentangle FID into separate metrics known as precision and recall, where the former correlates to the quality of the generated images and the latter represents the diversity of + +![](images/773685b9d6e947ccd79535897d9e9843a1d1e959a7ce373df923481ebb168c93.jpg) +Figure 4. We compare 3D shapes generated by our model against generations of state-of-the-art baselines for ShapeNet Cars, Chairs, and Planes. Our model synthesizes shapes with noticeably sharper details than the previous state-of-the-art, while also capturing the broad diversity in each category. + +the generative model. + +Baselines. We compare our method against state-of-the-art point-based and neural-field-based 3D generative models, namely PVD [86] and SDF-StyleGAN [85]. For evaluation, we use the pre-trained models for both methods on the three ShapeNet categories listed above. Note that PVD is inherently a point-based generative method and therefore does not output a triangle mesh needed for shading image rendering. To circumvent this, we choose to convert generated point clouds to triangle meshes using the ball-pivoting algorithm [3]. + +Results. We provide qualitative results, comparing samples generated by our method to samples generated by baselines, in Figure 4. Our method generates a diverse and finely detailed collection of objects. Objects produced by our method contain sharp edges and features that we would expect to be difficult to accurately reconstruct—note that delicate features, such as the suspension of cars, the slats in chairs, and armaments of planes, are faithfully generated. Perhaps more importantly, samples generated by our model are diverse—our model successfully synthesizes many different types of cars, chairs, + +and planes, including reproductions of several varieties that we would expect to be rare in the training dataset. + +In comparison, while $PVD$ also produces a wide variety of shapes, it is limited by its nature to generating only coarse object shapes. Furthermore, because $PVD$ produces a fixed-size point cloud with only 2048 points, it cannot synthesize fine elements. + +SDF-StyleGAN creates high-fidelity shapes, accurately reproducing many details, such as airplane engines and chair legs. However, our method is more capable of capturing very fine features. Note that while SDF-StyleGAN smooths over the division between tire and wheel well when generating cars, our method faithfully portrays this gap. Similarly, our method synthesizes the tails and engines of airplanes, and the legs and planks of chairs, with noticeably better definition. Our method also apparently generates a greater diversity of objects than SDF-StyleGAN. While SDF-StyleGAN capably generates varieties of each ShapeNet class, our method reproduces the same classes with greater variation. This is expected, as a noted advantage of diffusion models over GANs is better mode coverage. + +We provide quantitative results in Table 1. The metrics + +![](images/e662840be1dd297b5a1cc3012ca032bbe21da126e88633ab1d6a53ae896a00ed.jpg) +Figure 5. Interpolation. Our model learns a continuous latent space of triplanes. We can smoothly interpolate between two noise triplanes, resulting in semantically meaningful shape interpolation. + +tell a similar story to the qualitative results. Quantitatively, NFD outperforms all baselines in FID, precision, and recall for each ShapeNet category. FID is a standard one-number metric for evaluating generative models, and our performance under this evaluation indicates the generally better quality of object renderings. Precision evaluates the renderings' fidelity, and recall evaluates their diversity. Outperforming baselines in both precision and recall suggest that our model produces higher fidelity of shapes and a more diverse distribution of shapes. This is consistent with the qualitative results in Figure 4, where our method produced sharper and more complex objects while also covering more modes. + +Semantically meaningful interpolation. Figure 5 shows latent space interpolation between pairs of generated neural fields. As shown in prior work [69], smooth interpolation in the latent space of diffusion models can be achieved by interpolation between noise tensors before they are iteratively denoised by the model. As in their method, we sample from our trained model using a deterministic DDIM, and we use spherical interpolation so that the intermediate latent noise retains the same distribution. Our method is capable of smooth latent space interpolation in the generated triplanes and their corresponding neural fields. + +# 4.1. Ablation Studies + +We validate the design of our framework by ablating components of our regularization strategies using the cars dataset. + +Explicit density regularization. As discussed by Park et al. [57], the precision of the ground truth decoded meshes is limited by the finite number of point samples guiding the training of the decision boundaries. Because we rely on a limited number of pre-computed coordinate-occupancy pairs to train our triplanes, it is easy to overfit to this limited training set. Even when optimizing a single triplane in isolation (i.e., without learning a generative model), this overfitting manifests in "floater" artifacts in the optimized + +![](images/7ccde9f7d3c9b05386492caf1e4072c91ff08a6f9e1056d274d93a0cf11dd2c4.jpg) +Figure 6. Ablation over density regularization. Clear artifacts are visible in the resulting occupancy field without explicit density regularization. In this example, we optimize a single triplane on a single shape. + +![](images/64114b937693119bc137c461458451c0c9d23a9807c5a4fa980cd5a7db6858a0.jpg) + +neural field. Figure 6 shows an example where we fit a single triplane with and without density regularization. Without density regularization, the learned occupancy field contains significant artifacts; with density regularization, the learned occupancy field captures a clean object. + +Triplane regularization. Regularization of the triplanes is essential for training a well-behaved diffusion model. Figure 7 compares generated samples produced by our entire framework, with and without regularization terms. If we train only with Equation 2, i.e., without regularization terms, we can optimize a dataset of triplane features and train a diffusion model to generate samples. However, while the surfaces of the optimized shapes will appear real, the triplane features themselves will have many high-frequency artifacts, and these convoluted feature images are a difficult manifold for even a powerful diffusion model to learn. Consequently, generated triplane features produced by a trained diffusion model decode into shapes with significant artifacts. We note that these artifacts are present only in generated samples; shapes directly factored from the ground-truth shapes are artifact-free, even without regularization. + +Training with Equation 4 introduces TV, L2, and + +![](images/455982f9569880c9f471fd0d558e2c44254bb6a6d11764e49832bbaab8954a0b.jpg) +Figure 7. Ablation over regularized triplanes. A generative model trained on unregularized triplanes produces samples with significant artifacts. Effective regularization of triplane features enables training of a generative model that produces shapes without artifacts. Top left: triplane features learned only with Equation 2 contain many high frequency artifacts. Bottom left: a diffusion model trained on these unregularized triplanes fails to produce convincing samples. Top right: triplane features learned with Equation 4 are noticeably smoother. Bottom right: A diffusion model trained on these regularized triplanes produces high-quality shapes. + +![](images/c60f75281497649c58e8bb222ebb292080fa3a3368c595a1f50b4347c956e5cf.jpg) +Figure 8. Failure cases. We observe that our model at times generate axis-aligned artifacts and struggles to account for thin structures, likely caused by the use of a triplane representation. + +
MethodFID ↓Precision ↑Recall ↑
No regularization285.81.60.6
Density + TV + L2 Reg.83.649.550.5
+ +Table 2. Quantitative results for the ablation on triplane regularization. Our model performs poorly without explicit regularization on the triplanes. + +density regularizing factors. Triplanes learned with these regularization terms are noticeably smoother, with frequency distributions that more closely align with those found in natural images (see supplement). As we would expect, a diffusion model more readily learns the manifold of regularized triplane features. Samples produced by a diffusion model trained on these regularized shapes decode into convincing and artifact-free shapes. + +# 5. Discussion + +In summary, we introduce a 3D-aware diffusion model that uses a 2D diffusion backbone to generate triplane feature maps, which are assembled into 3D neural fields. Our approach improves the quality and diversity of generated objects over existing 3D-aware generative models by a large margin. + +Limitations. Similarly to other generative methods, training a diffusion model is slow and computationally demanding. Diffusion models, including ours, are also slow to evaluate, whereas GANs, for example, can be evaluated in real-time once trained. Luckily, our method will benefit from improvements to 2D diffusion models in this research area. + +![](images/6f305f6e728c27e4d5d4c9fd900359c6a2f0b9df74a00411a38f6a4aab854ad2.jpg) + +Slow sampling at inference could be addressed by more efficient samplers [30] and potentially enable real-time synthesis. While a step forward in quality, some of the samples generated by our method suffer from artifacts, as depicted by Fig. 8. Strategies like guidance [13, 27], which trade off diversity for fidelity, may reduce the prevalence of these outliers. + +Future Work. We have demonstrated an effective way to generate occupancy fields, but in principle, our approach can be extended to generating any type of neural field that can be represented by a triplane. In particular, triplanes have already been shown to be excellent representations for radiance fields, so it seems natural to extend our diffusion approach to generating NeRFs. While we demonstrate successful results for unconditional generation, conditioning our generative model on text, images, or other input would be an exciting avenue for future work. + +Ethical Considerations. Generative models, including ours, could be extended to generate DeepFakes. These pose a societal threat, and we do not condone using our work to generate fake images or videos of any person intending to spread misinformation or tarnish their reputation. + +Conclusion. 3D-aware object synthesis has many exciting applications in vision and graphics. With our work, which is among the first to connect powerful 2D diffusion models and 3D object synthesis, we take a significant step towards utilizing emerging diffusion models for this goal. + +# Acknowledgements + +We thank Vincent Sitzmann for valuable discussions. This project was in part supported by Samsung, the Stanford Institute for Human-Centered AI (HAI), the Stanford Center for Integrated Facility Engineering (CIFE), NSF RI #2211258, Autodesk, and a PECASE from the ARO. + +# References + +[1] Matan Atzmon and Yaron Lipman. SAL: Sign agnostic learning of shapes from raw data. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[2] Miguel Ángel Bautista, Pengsheng Guo, Samira Abnar, Walter Talbott, Alexander Toshev, Zhuoyuan Chen, Laurent Dinh, Shuangfei Zhai, Hanlin Goh, Daniel Ulbricht, Afshin Dehghan, and Josh M. Susskind. GAUDI: A neural architect for immersive 3d scene generation. CoRR, abs/2207.13751, 2022. 1, 3 +[3] Fausto Bernardini, Joshua Mittleman, Holly E. Rushmeier, Cláudio T. Silva, and Gabriel Taubin. The ball-pivoting algorithm for surface reconstruction. IEEE Transactions on Visualization and Computer Graphics, 5:349-359, 1999. 6 +[4] Alexandre Boulch and Renaud Marlet. POCO: point convolution for surface reconstruction. CoRR, abs/2201.01831, 2022. 2 +[5] Rohan Chabra, Jan Eric Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local SDF priors for detailed 3D reconstruction. In European Conference on Computer Vision (ECCV), 2020. 2 +[6] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J. Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16123-16133, June 2022. 1, 2, 3 +[7] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-GAN: Periodic implicit generative adversarial networks for 3D-aware image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[8] Ding-Yun Chen, Xiao-Pei Tian, Edward Yu-Te Shen, and Ming Ouhyoung. On visual similarity based 3d model retrieval. Computer Graphics Forum, 22, 2003. 5 +[9] Yinbo Chen, Sifei Liu, and Xiaolong Wang. Learning continuous image representation with local implicit image function. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[10] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[11] Thomas Davies, Derek Nowrouzehrai, and Alec Jacobson. Overfit neural networks as a compact shape representation. arXiv preprint arXiv:2009.09808, 2020. 2 +[12] Terrance DeVries, Miguel Angel Bautista, Nitish Srivastava, Graham W. Taylor, and Joshua M. Susskind. Unconstrained scene generation with locally conditioned radiance fields. arXiv preprint arXiv:2104.00670, 2021. 2 +[13] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 8780-8794. Curran Associates, Inc., 2021. 2, 8 +[14] Emilien Dupont, Hyunjik Kim, S. M. Ali Eslami, Danilo J. Rezende, and Dan Rosenbaum. From data to functa: Your + +data point is a function and you should treat it like one. CoRR, abs/2201.12204, 2022. 1, 3 +[15] SM Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S Morcos, Marta Garnelo, Avraham Ruderman, Andrei A Rusu, Ivo Danihelka, Karol Gregor, et al. Neural scene representation and rendering. Science, 2018. 2 +[16] Patrick Esser, Robin Rombach, and Björn Ommer. Taming transformers for high-resolution image synthesis, 2020. 2 +[17] Matheus Gadelha, Subhransu Maji, and Rui Wang. 3d shape induction from 2d views of multiple objects. In 2017 International Conference on 3D Vision (3DV), pages 402-411, 2017. 2 +[18] Stephan J Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. FastNeRF: High-fidelity neural rendering at 200fps. arXiv preprint arXiv:2103.10380, 2021. 2 +[19] Kyle Genova, Forrester Cole, Avneesh Sud, Aaron Sarna, and Thomas Funkhouser. Local deep implicit functions for 3D shape. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[20] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In IEEE International Conference on Computer Vision (ICCV), 2019. 2 +[21] Simon Giebenhain and Bastian Goldlücke. Air-nets: An attention-based framework for locally conditioned implicit representations. In 3DV, pages 1054-1064. IEEE, 2021. 2 +[22] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems (NeurIPS), 2014. 2 +[23] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. In International Conference on Machine Learning (ICML), 2020. 2 +[24] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. StyleNeRF: A style-based 3D-aware generator for high-resolution image synthesis. arXiv preprint arXiv:2110.08985, 2021. 2 +[25] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[26] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, volume 33, pages 6840-6851, 2020. 2, 5 +[27] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 8 +[28] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, and Thomas Funkhouser. Local implicit grid representations for 3D scenes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[29] Yue Jiang, Dantong Ji, Zhizhong Han, and Matthias Zwicker. SDFDiff: Differentiable rendering of signed distance fields for 3D shape optimization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[30] Tero Karras, Miika Aittala, Timo Aila, and Samuli Laine. Elucidating the design space of diffusion-based generative models. ArXiv, abs/2206.00364, 2022. 8 + +[31] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[32] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[33] Petr Kellnhofer, Lars Jebe, Andrew Jones, Ryan Spicer, Kari Pulli, and Gordon Wetzstein. Neural lumigraph rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[34] Adam R Kosiorek, Heiko Strathmann, Daniel Zoran, Pol Moreno, Rosalia Schneider, Sona Mokra, and Danilo Jimenez Rezende. Nerf-vae: A geometry aware 3d scene generative model. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 5742–5752. PMLR, 18–24 Jul 2021. 2 +[35] Yiyi Liao, Katja Schwarz, Lars Mescheder, and Andreas Geiger. Towards unsupervised learning of generative models for 3d controllable image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2 +[36] David B Lindell, Julien NP Martel, and Gordon Wetzstein. AutoInt: Automatic integration for fast neural volume rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[37] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2 +[38] Shichen Liu, Shunsuke Saito, Weikai Chen, and Hao Li. Learning to infer implicit surfaces without 3D supervision. arXiv preprint arXiv:1911.00767, 2019. 2 +[39] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. DIST: Rendering deep implicit signed distance function with differentiable sphere tracing. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[40] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. ACM Transactions on Graphics (SIGGRAPH), 2019. 2 +[41] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3D surface construction algorithm. ACM Transactions on Graphics (ToG), 1987. 5 +[42] Shitong Luo and Wei Hu. Diffusion probabilistic models for 3d point cloud generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2837-2845, June 2021. 1, 3 +[43] Julien N.P. Martel, David B. Lindell, Connor Z. Lin, Eric R. Chan, Marco Monteiro, and Gordon Wetzstein. ACORN: Adaptive coordinate networks for neural representation. ACM Transactions on Graphics (SIGGRAPH), 2021. 2 +[44] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the wild: Neural radiance fields for + +unconstrained photo collections. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[45] Ishit Mehta, Michael Gharbi, Connelly Barnes, Eli Shechtman, Ravi Ramamoorthi, and Manmohan Chandraker. Modulated periodic activations for generalizable local functional representations. In ICCV, pages 14194-14203. IEEE, 2021. 2 +[46] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 6351-6361, October 2021. 2 +[47] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3D reconstruction in function space. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3 +[48] Mateusz Michalkiewicz, Jhony K Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Implicit surface representations as layers in neural networks. In IEEE International Conference on Computer Vision (ICCV), 2019. 2 +[49] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2, 3 +[50] Thomas Neff, Pascal Stadlbauer, Mathias Parger, Andreas Kurz, Joerg H. Mueller, Chakravarty R. Alla Chaitanya, Anton S. Kaplanyan, and Markus Steinberger. DONeRF: Towards Real-Time Rendering of Compact Neural Radiance Fields using Depth Oracle Networks. Computer Graphics Forum, 40(4), 2021. 2 +[51] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2019. 2 +[52] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 8162-8171. PMLR, 18-24 Jul 2021. 2, 5 +[53] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11453-11464, June 2021. 2 +[54] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[55] Michael Oechsle, Songyou Peng, and Andreas Geiger. UNISURF: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[56] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: + +High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13503-13513, June 2022. 2 +[57] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3, 7 +[58] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European Conference on Computer Vision (ECCV), 2020. 2 +[59] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. arXiv preprint arXiv:2011.13961, 2020. 2 +[60] Ali Razavi, Aaron van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2 +[61] Daniel Rebain, Mark Matthews, Kwang Moo Yi, Dmitry Lagun, and Andrea Tagliasacchi. Lolnerf: Learn from one look. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1558-1567, June 2022. 2 +[62] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. KiloNeRF: Speeding up neural radiance fields with thousands of tiny MLPs. In IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[63] Mehdi S. M. Sajjadi, Olivier Bachem, Mario Lucic, Olivier Bousquet, and Sylvain Gelly. Assessing generative models via precision and recall. In NeurIPS, 2018. 5 +[64] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. GRAF: Generative radiance fields for 3D-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2 +[65] Vincent Sitzmann, Julien N.P. Martel, Alexander W. Bergman, David B. Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 3 +[66] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhöfer. Deep Voxels: Learning persistent 3D feature embeddings. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[67] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3D-structure-aware neural scene representations. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2 +[68] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In Francis Bach and David Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 2256-2265, Lille, France, 07-09 Jul 2015. PMLR. 2 + +[69] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021. 7 +[70] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2 +[71] Yang Song and Stefano Ermon. Improved techniques for training score-based generative models. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 12438-12448. Curran Associates, Inc., 2020. 2 +[72] Pratul P. Srinivasan, Boyang Deng, Xiuming Zhang, Matthew Tancik, Ben Mildenhall, and Jonathan T. Barron. NeRV: Neural reflectance and visibility fields for relighting and view synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[73] Towaki Takikawa, Joey Litalien, Kangxue Yin, Karsten Kreis, Charles Loop, Derek Nowrouzezahrai, Alec Jacobson, Morgan McGuire, and Sanja Fidler. Neural geometric level of detail: Real-time rendering with implicit 3D shapes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[74] Ayush Tewari, Justus Thies, Ben Mildenhall, Pratul Srinivasan, Edgar Tretschk, W Yifan, Christoph Lassner, Vincent Sitzmann, Ricardo Martin-Brualla, Stephen Lombardi, et al. Advances in neural rendering. In Computer Graphics Forum, volume 41, pages 703-735. Wiley Online Library, 2022. 2 +[75] Hoang Thanh-Tung and Truyen Tran. Catastrophic forgetting and mode collapse in gans. In IJCNN, pages 1-10. IEEE, 2020. 2 +[76] Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 11287-11302. Curran Associates, Inc., 2021. 2 +[77] Aäron van den Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. Pixel recurrent neural networks. In Maria Florina Balcan and Kilian Q. Weinberger, editors, Proceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings of Machine Learning Research, pages 1747-1756, New York, New York, USA, 20-22 Jun 2016. PMLR. 2 +[78] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. 2 +[79] Jiajun Wu, Chengkai Zhang, Tianfan Xue, Bill Freeman, and Josh Tenenbaum. Learning a probabilistic latent space of object shapes via 3d generative-adversarial modeling. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc., 2016. 2 +[80] Yiheng Xie, Towaki Takikawa, Shunsuke Saito, Or Litany, Shiqin Yan, Numair Khan, Federico Tombari, James Tompkin, + +Vincent Sitzmann, and Srinath Sridhar. Neural fields in visual computing and beyond. Comput. Graph. Forum, 41(2):641-676, 2022. 2 +[81] Guangming Yao, Hongzhi Wu, Yi Yuan, and Kun Zhou. Dd-nerf: Double-diffusion neural radiance field as a generalizable implicit body representation. arXiv preprint arXiv:2112.12390, 2021. 3 +[82] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Ronen Basri, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2 +[83] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[84] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2 +[85] Xin-Yang Zheng, Yang Liu, Peng-Shuai Wang, and Xin Tong. Sdf-stylegan: Implicit sdf-based stylegan for 3d shape generation. CoRR, abs/2206.12055, 2022. 2, 5, 6 +[86] Linqi Zhou, Yilun Du, and Jiajun Wu. 3d shape generation and completion through point-voxel diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5826-5835, October 2021. 1, 3, 5, 6 +[87] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. CIPS-3D: A 3D-Aware Generator of GANs Based on Conditionally-Independent Pixel Synthesis. arXiv preprint arXiv:2110.09788, 2021. 2 \ No newline at end of file diff --git a/2023/3D Neural Field Generation Using Triplane Diffusion/images.zip b/2023/3D Neural Field Generation Using Triplane Diffusion/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..1479b339875a65879f87cb319cb2c766c45bf95e --- /dev/null +++ b/2023/3D Neural Field Generation Using Triplane Diffusion/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5945f2c7e35c2bb5313d2fe946911478fe82c522314f8902302ded2243af420a +size 530052 diff --git a/2023/3D Neural Field Generation Using Triplane Diffusion/layout.json b/2023/3D Neural Field Generation Using Triplane Diffusion/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3066c4aa79ecfbf85a024b8df8bd46f56f929473 --- /dev/null +++ b/2023/3D Neural Field Generation Using Triplane Diffusion/layout.json @@ -0,0 +1,8691 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 135, + 103, + 459, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 103, + 459, + 121 + ], + "spans": [ + { + "bbox": [ + 135, + 103, + 459, + 121 + ], + "type": "text", + "content": "3D Neural Field Generation using Triplane Diffusion" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 63, + 152, + 530, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 152, + 530, + 194 + ], + "spans": [ + { + "bbox": [ + 63, + 152, + 530, + 194 + ], + "type": "text", + "content": "J. Ryan Shue\\* Eric Ryan Chan\\*2 Ryan Po\\*2 Zachary Ankner\\*3,4 Jiajun Wu\\*2 Gordon Wetzstein\\*2 Milton Academy 2Stanford University 3Massachusetts Institute of Technology 4MosaicML" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 227, + 192, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 227, + 192, + 240 + ], + "spans": [ + { + "bbox": [ + 143, + 227, + 192, + 240 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 252, + 290, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 252, + 290, + 445 + ], + "spans": [ + { + "bbox": [ + 45, + 252, + 290, + 445 + ], + "type": "text", + "content": "Diffusion models have emerged as the state-of-the-art for image generation, among other tasks. Here, we present an efficient diffusion-based model for 3D-aware generation of neural fields. Our approach pre-processes training data, such as ShapeNet meshes, by converting them to continuous occupancy fields and factoring them into a set of axis-aligned triplane feature representations. Thus, our 3D training scenes are all represented by 2D feature planes, and we can directly train existing 2D diffusion models on these representations to generate 3D neural fields with high quality and diversity, outperforming alternative approaches to 3D-aware generation. Our approach requires essential modifications to existing triplane factorization pipelines to make the resulting features easy to learn for the diffusion model. We demonstrate state-of-the-art results on 3D generation on several object classes from ShapeNet." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 478, + 127, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 478, + 127, + 490 + ], + "spans": [ + { + "bbox": [ + 47, + 478, + 127, + 490 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 498, + 288, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 288, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 288, + 581 + ], + "type": "text", + "content": "Diffusion models have seen rapid progress, setting state-of-the-art (SOTA) performance across a variety of image generation tasks. While most diffusion methods model 2D images, recent work [2, 14, 42, 86] has attempted to develop denoising methods for 3D shape generation. These 3D diffusion methods operate on discrete point clouds and, while successful, exhibit limited quality and resolution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 582, + 288, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 288, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 288, + 677 + ], + "type": "text", + "content": "In contrast to 2D diffusion, which directly leverages the image as the target for the diffusion process, it is not directly obvious how to construct such 2D targets in the case of 3D diffusion. Interestingly, recent work on 3D-aware generative adversarial networks (GANs) (see Sec. 2 for an overview) has demonstrated impressive results for 3D shape generation using 2D generators. We build upon this idea of learning to generate triplane representations [6] that encode 3D scenes or" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 228, + 545, + 393 + ], + "blocks": [ + { + "bbox": [ + 310, + 228, + 545, + 393 + ], + "lines": [ + { + "bbox": [ + 310, + 228, + 545, + 393 + ], + "spans": [ + { + "bbox": [ + 310, + 228, + 545, + 393 + ], + "type": "image", + "image_path": "450fe3152bfc1f768b556951bf1fcd42a550acbbd9424e433875c2618a8dea15.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 403, + 548, + 449 + ], + "lines": [ + { + "bbox": [ + 305, + 403, + 548, + 449 + ], + "spans": [ + { + "bbox": [ + 305, + 403, + 548, + 449 + ], + "type": "text", + "content": "Figure 1. Our method leverages existing 2D diffusion models for 3D shape generation using hybrid explicit-implicit neural representations. Top: triplane-based 3D shape diffusion process using our framework. Bottom: Interpolation between generated shapes." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 472, + 546, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 546, + 519 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 546, + 519 + ], + "type": "text", + "content": "radiance fields as a set of axis-aligned 2D feature planes. The structure of a triplane is analogous to that of a 2D image and can be used as part of a 3D generative method that leverages conventional 2D generator architectures." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 521, + 547, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 547, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 547, + 700 + ], + "type": "text", + "content": "Inspired by recent efforts in designing efficient 3D GAN architectures, we introduce a neural field-based diffusion framework for 3D representation learning. Our approach follows a two-step process. In the first step, a training set of 3D scenes is factored into a set of per-scene triplane features and a single, shared feature decoder. In the second step, a 2D diffusion model is trained on these triplanes. The trained diffusion model can then be used at inference time to generate novel and diverse 3D scenes. By interpreting triplanes as multi-channel 2D images and thus decoupling generation from rendering, we can leverage current (and likely future) SOTA 2D diffusion model backbones nearly out of the box. Fig. 1 illustrates how a single object is generated with our framework (top), and how two generated objects—even with different topologies—can be interpolated (bottom)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 701, + 470, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 701, + 470, + 712 + ], + "spans": [ + { + "bbox": [ + 318, + 701, + 470, + 712 + ], + "type": "text", + "content": "Our core contributions are as follows:" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 683, + 250, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 683, + 250, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 683, + 250, + 713 + ], + "type": "text", + "content": "*Equal contribution. \nPart of the work was done during an internship at Stanford. \nProject page: https://jryanshue.com/nfd" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20875" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 70, + 532, + 318 + ], + "blocks": [ + { + "bbox": [ + 62, + 70, + 532, + 318 + ], + "lines": [ + { + "bbox": [ + 62, + 70, + 532, + 318 + ], + "spans": [ + { + "bbox": [ + 62, + 70, + 532, + 318 + ], + "type": "image", + "image_path": "24404e688258727d62d4c8a74cb692aa929c6ec91ff1e58989e7466b255ce1e8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 326, + 547, + 361 + ], + "lines": [ + { + "bbox": [ + 46, + 326, + 547, + 361 + ], + "spans": [ + { + "bbox": [ + 46, + 326, + 547, + 361 + ], + "type": "text", + "content": "Figure 2. Visualization of the denoising process. Here, we show examples of triplanes as they are iteratively denoised at inference, as well as the shapes we obtain by \"decoding\" the noisy triplanes with our jointly-learned MLP. By interpreting triplane features simply as multi-channel feature images, we build our framework around 2D diffusion models." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 59, + 380, + 287, + 464 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 59, + 380, + 287, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 380, + 287, + 415 + ], + "spans": [ + { + "bbox": [ + 59, + 380, + 287, + 415 + ], + "type": "text", + "content": "- We introduce a generative framework for diffusion on 3D scenes that utilizes 2D diffusion model backbones and has a built-in 3D inductive bias." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 428, + 287, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 428, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 59, + 428, + 287, + 464 + ], + "type": "text", + "content": "- We show that our approach is capable of generating both high-fidelity and diverse 3D scenes that outperform state-of-the-art 3D GANs." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 476, + 133, + 489 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 476, + 133, + 489 + ], + "spans": [ + { + "bbox": [ + 47, + 476, + 133, + 489 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 498, + 290, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 290, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 290, + 715 + ], + "type": "text", + "content": "Neural fields. Implicit neural representations, or neural fields, hold the SOTA for 3D scene representation [74, 80]. They either solely learn geometry [1, 4, 5, 10, 11, 15, 21, 23, 45, 47, 48, 57, 65, 73] or use posed images to jointly optimize geometry and appearance [6, 7, 18, 25, 29, 33, 36–39, 44, 49, 50, 54, 55, 59, 67, 72, 82–84]. Neural fields represent scenes as continuous functions, allowing them to scale well with scene complexity compared to their discrete counterparts [40, 66]. Initial methods used a single, large multilayer perceptron (MLP) to represent entire scenes [10, 47, 49, 57, 65], but reconstruction with this approach can be computationally inefficient because training such a representation requires thousands of forward passes through the large model per scene. Recent years have shown a trend towards locally conditioned representations, which either learn local functions [5, 9, 28, 62] or locally modulate a shared function with a hybrid explicit-implicit representation [4, 6, 12, 19–21, 37, 43, 45, 58]. These methods use small MLPs, which are efficient during inference and significantly" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 380, + 547, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 380, + 547, + 453 + ], + "spans": [ + { + "bbox": [ + 304, + 380, + 547, + 453 + ], + "type": "text", + "content": "better at capturing local scene details. We adopt the expressive hybrid triplane representation introduced by Chan et al. [6]. Triplanes are efficient, scaling with the surface area rather than volume, and naturally integrate with expressive, fine-tuned 2D generator architectures. We modify the triplane representation for compatibility with our denoising framework." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 473, + 547, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 547, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 547, + 581 + ], + "type": "text", + "content": "Generative synthesis in 2D and 3D. Some of the most popular generative models include GANs [22, 31, 32], autoregressive models [16, 60, 77, 78], score matching models [68, 70, 71], and denoising diffusion probabilistic models (DDPMs) [13, 26, 52, 76]. DDPMs are arguably the SOTA approach for synthesizing high-quality and diverse 2D images [13]. Moreover, GANs can be difficult to train and suffer from issues like mode collapse [75] whereas diffusion models train stably and have been shown to better capture the full training distribution." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 582, + 548, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 548, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 548, + 713 + ], + "type": "text", + "content": "In 3D, however, GANs still outperform alternative generative approaches [6,7,17,24,34,35,37,46,51,53,56, 61,64,79,85,87]. Some of the most successful 3D GANs use an expressive 2D generator backbone (e.g., StyleGAN2 [32]) to synthesize triplane representations which are then decoded with a small, efficient MLP [6]. Because the decoder is small and must generalize across many local latents, these methods assign most of their expressiveness to the powerful backbone. In addition, these methods treat the triplane as a multi-channel image, allowing the generator backbone to be used almost out of the box." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20876" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 262 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 262 + ], + "type": "text", + "content": "Current 3D diffusion models [2, 14, 42, 81, 86] are still very limited. They either denoise a single latent or do not utilize neural fields at all, opting for a discrete point-cloud-based approach. For example, concurrently developed single-latent approaches [2, 14] generate a global latent for conditioning the neural field, relying on a 3D decoder to transform the scene representation from 1D to 3D without directly performing 3D diffusion. As a result, the diffusion model does not actually operate in 3D, losing this important inductive bias and generating blurry results. Point-cloud-based approaches [42, 86], on the other hand, give the diffusion model explicit 3D control over the shape, but limit its resolution and scalability due to the coarse discrete representation. While showing promise, both 1D-to-3D and point cloud diffusion approaches require specific architectures that cannot easily leverage recent advances in 2D diffusion models." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 263, + 290, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 263, + 290, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 263, + 290, + 373 + ], + "type": "text", + "content": "In our work, we propose to directly generate triplanes with out-of-the-box SOTA 2D diffusion models, granting the diffusion model near-complete control over the generated neural field. Key to our approach is our treatment of well-fit triplanes in a shared latent space as ground truth data for training our diffusion model. We show that the latent space of these triplanes is grounded spatially in local detail, giving the diffusion model a critical inductive bias for 3D generation. Our approach gives rise to an expressive 3D diffusion model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 381, + 219, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 381, + 219, + 395 + ], + "spans": [ + { + "bbox": [ + 47, + 381, + 219, + 395 + ], + "type": "text", + "content": "3. Triplane Diffusion Framework" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 401, + 288, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 401, + 288, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 401, + 288, + 521 + ], + "type": "text", + "content": "Here, we explain the architecture of our neural field diffusion (NFD) model for 3D shapes. In Section 3.1, we explain how we can represent the occupancy field of a single object using a triplane. In Section 3.2, we describe how we can extend this framework to represent an entire dataset of 3D objects. In Section 3.3, we describe the regularization techniques that we found necessary to achieve optimal results. Finally, Sections 3.4 and 3.5 illustrate training and sampling from our model. For an overview of the pipeline at inference, see Figure 3." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 527, + 264, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 264, + 540 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 264, + 540 + ], + "type": "text", + "content": "3.1. Representing a 3D Scene using a Triplane" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": "Neural fields have been introduced as continuous and expressive 3D scene representations. In this context, a neural field " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "\\mathrm{NF}:\\mathbb{R}^3\\to \\mathbb{R}^M" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": " is a neural network-parameterized mapping function that takes as input a three-dimensional coordinate " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": " and that outputs an " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": "-dimensional vector representing the neural field. Neural fields have been demonstrated for occupancy fields [47], signed distance functions [57], radiance fields [49], among many other types of signals [65]. For the remainder of this work, we focus on 3D scene representations using occupancy fields such that the output of the neural field is a binary value, indicating whether a coordinate is inside or outside an object and " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "M = 1" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 689, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 289, + 715 + ], + "type": "text", + "content": "The triplane representation is a hybrid explicit-implicit network architecture for neural fields that is particularly effi" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "text", + "content": "cient to evaluate [6]. This representation uses three 2D feature planes " + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{xy},\\mathbf{f}_{xz},\\mathbf{f}_{yz}\\in \\mathbb{R}^{N\\times N\\times C}" + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "text", + "content": " with a spatial resolution of " + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "inline_equation", + "content": "N\\times N" + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "text", + "content": " feature channels each, and a multilayer perceptron (MLP) \"decoder\" tasked with interpreting features sampled from the planes. A 3D coordinate is queried by projecting it onto each of the axis-aligned planes (i.e., the " + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "inline_equation", + "content": "x - y,x - z," + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "inline_equation", + "content": "y - z" + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "text", + "content": " planes), querying and aggregating the respective features, and decoding the resulting feature using a lightweight " + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_{\\phi}" + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "text", + "content": " with parameters " + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 304, + 72, + 547, + 205 + ], + "type": "text", + "content": ". Similar to Chan et al. [6], we found the sum to be an efficient feature aggregation function, resulting in the following formulation for the triplane architecture:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 341, + 216, + 547, + 230 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 216, + 547, + 230 + ], + "spans": [ + { + "bbox": [ + 341, + 216, + 547, + 230 + ], + "type": "interline_equation", + "content": "\\mathrm {N F} (\\mathbf {x}) = \\operatorname {M L P} _ {\\phi} \\left(\\mathbf {f} _ {x y} (\\mathbf {x}) + \\mathbf {f} _ {y z} (\\mathbf {x}) + \\mathbf {f} _ {x z} (\\mathbf {x})\\right). \\tag {1}", + "image_path": "c7c7aeb7f00143f6f77b629fbf0b23198e2fb3bdc3b44f995d8f1d62c7a4ddc1.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 241, + 545, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 241, + 545, + 266 + ], + "spans": [ + { + "bbox": [ + 304, + 241, + 545, + 266 + ], + "type": "text", + "content": "The feature planes and MLP can be jointly optimized to represent the occupancy field of a shape." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 277, + 545, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 277, + 545, + 290 + ], + "spans": [ + { + "bbox": [ + 305, + 277, + 545, + 290 + ], + "type": "text", + "content": "3.2. Representing a Class of Objects with Triplanes" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 297, + 547, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 297, + 547, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 297, + 547, + 464 + ], + "type": "text", + "content": "We aim to convert our dataset of shapes into a dataset of triplanes so that we can train a diffusion model on these learned feature planes. However, because the MLP and feature planes are typically jointly learned, we cannot simply train a triplane for each object of the dataset individually. If we did, the MLP's corresponding to each object in our dataset would fail to generalize to triplanes generated by our diffusion model. Therefore, instead of training triplanes for each object in isolation, we jointly optimize the feature planes for many objects simultaneously, along with a decoder that is shared across all objects. This joint optimization results in a dataset of optimized feature planes and an MLP capable of interpreting any triplane from the dataset distribution. Thus, at inference, we can use this MLP to decode feature planes generated by our model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": "In practice, during training, we are given a dataset of " + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": " objects, and we preprocess the coordinates and ground-truth occupancy values of " + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": " points per object. Typically, " + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "inline_equation", + "content": "J = 10\\mathrm{M}" + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": " where 5M points are sampled uniformly throughout the volume and 5M points are sampled near the object surface. Our naive training objective is a simple " + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "inline_equation", + "content": "L2" + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": " loss between predicted occupancy values " + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "inline_equation", + "content": "\\mathrm{NF}^{(i)}(\\mathbf{x}_j^{(i)})" + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": " and ground-truth occupancy values " + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "inline_equation", + "content": "\\mathrm{O}_j^{(i)}" + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": " for each point, where " + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_j^{(i)}" + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "inline_equation", + "content": "j^{\\mathrm{th}}" + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": " point from the " + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "inline_equation", + "content": "i^{\\mathrm{th}}" + }, + { + "bbox": [ + 304, + 466, + 547, + 581 + ], + "type": "text", + "content": " scene:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 347, + 594, + 547, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 594, + 547, + 628 + ], + "spans": [ + { + "bbox": [ + 347, + 594, + 547, + 628 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {N A I V E}} = \\sum_ {i} ^ {I} \\sum_ {j} ^ {J} \\left\\| \\mathrm {N F} ^ {(i)} \\left(\\mathbf {x} _ {j} ^ {(i)}\\right) - \\mathrm {O} _ {j} ^ {(i)} \\right\\| _ {2} \\tag {2}", + "image_path": "386ae1d5e7c07bb3bc3e4144d3a77daa7a2f71388a5ee8b27a4b3bd00cc37984.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 638, + 545, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 638, + 545, + 674 + ], + "spans": [ + { + "bbox": [ + 304, + 638, + 545, + 674 + ], + "type": "text", + "content": "During training, we optimize Equation 2 for a shared MLP parameterized by " + }, + { + "bbox": [ + 304, + 638, + 545, + 674 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 304, + 638, + 545, + 674 + ], + "type": "text", + "content": ", as well as the feature planes corresponding to every object in our dataset:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 342, + 685, + 547, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 342, + 685, + 547, + 716 + ], + "spans": [ + { + "bbox": [ + 342, + 685, + 547, + 716 + ], + "type": "interline_equation", + "content": "\\left\\{\\phi , \\mathbf {f} _ {x y} ^ {(i)}, \\mathbf {f} _ {x z} ^ {(i)}, \\mathbf {f} _ {y z} ^ {(i)} \\right\\} = \\underset {\\left\\{\\phi , \\mathbf {f} _ {x y} ^ {(i)}, \\mathbf {f} _ {x z} ^ {(i)}, \\mathbf {f} _ {y z} ^ {(i)} \\right\\}} {\\operatorname {a r g m i n}} \\mathcal {L} _ {\\mathrm {N A I V E}} \\tag {3}", + "image_path": "d0021d991a8c13364df7b3ef63c9802732bee36b10f2c84ac9de08de65b244d4.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20877" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 119, + 79, + 541, + 198 + ], + "blocks": [ + { + "bbox": [ + 119, + 79, + 541, + 198 + ], + "lines": [ + { + "bbox": [ + 119, + 79, + 541, + 198 + ], + "spans": [ + { + "bbox": [ + 119, + 79, + 541, + 198 + ], + "type": "image", + "image_path": "089437b1d30d05c909262096994b21a0a3a962f57b354646c1483ff477ef08e7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 213, + 547, + 258 + ], + "lines": [ + { + "bbox": [ + 46, + 213, + 547, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 213, + 547, + 258 + ], + "type": "text", + "content": "Figure 3. Pipeline. Sampling a 3D neural field from our model consists of two decoupled processes: 1) using a trained DDPM to iteratively denoise latent noise into feature maps and 2) using a locally conditioned Occupancy Network to decode the resulting triplane into the final neural field. This architecture allows the DDPM to generate samples with a 3D inductive bias while utilizing existing 2D DDPM backbones and a continuous output representation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 278, + 288, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 278, + 288, + 303 + ], + "spans": [ + { + "bbox": [ + 47, + 278, + 288, + 303 + ], + "type": "text", + "content": "3.3. Regularizing Triplanes for Effective Generalization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 287, + 384 + ], + "type": "text", + "content": "Following the procedure outlined in the previous section, we can learn a dataset of triplane features and a shared triplane decoder; we can then train a diffusion model on these triplane features and sample novel shapes at inference. Unfortunately, the result of this naive training procedure is a generative model for triplanes that produces shapes with significant artifacts." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 385, + 288, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 385, + 288, + 505 + ], + "spans": [ + { + "bbox": [ + 46, + 385, + 288, + 505 + ], + "type": "text", + "content": "We find it necessary to regularize the triplane features during optimization to simplify the data manifold that the diffusion model must learn. Therefore, we include total variation (TV) regularization terms with weight " + }, + { + "bbox": [ + 46, + 385, + 288, + 505 + ], + "type": "inline_equation", + "content": "\\lambda_{1}" + }, + { + "bbox": [ + 46, + 385, + 288, + 505 + ], + "type": "text", + "content": " in the loss function to ensure that the feature planes of each training scene do not contain spurious high-frequency information. This strategy makes the distribution of triplane features more similar to the manifold of natural images (see supplement), which we found necessary to robustly train a diffusion model on them (see Sec. 4)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 508, + 287, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 508, + 287, + 580 + ], + "spans": [ + { + "bbox": [ + 46, + 508, + 287, + 580 + ], + "type": "text", + "content": "While the trained feature values are unbounded, our DDPM backbone requires training inputs with values in the range [-1,1]. We address this by normalizing the feature planes before training, but this process is sensitive to outliers. As a result, we include an L2 regularization term on the triplane features with weight " + }, + { + "bbox": [ + 46, + 508, + 287, + 580 + ], + "type": "inline_equation", + "content": "\\lambda_{2}" + }, + { + "bbox": [ + 46, + 508, + 287, + 580 + ], + "type": "text", + "content": " to discourage outlying values." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "type": "text", + "content": "We also include an explicit density regularization (EDR) term. Due to our ground-truth occupancy data being concentrated on the surface of the shapes, there is often insufficient data to learn a smooth outside-of-shape volume. Our EDR term combats this issue by sampling a set of random points from the volume, offsetting the points by a random vector " + }, + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "type": "text", + "content": ", feeding both sets through the MLP, and calculating the mean squared error. Notationally, this term can be represented as " + }, + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{EDR}(\\mathrm{NF}(\\mathbf{x}), \\omega) = \\| \\mathrm{NF}(\\mathbf{x}) - \\mathrm{NF}(\\mathbf{x} + \\omega) \\|_2^2" + }, + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "type": "text", + "content": ". We find this term necessary to remove floating artifacts in the volume (see Sec. 4)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 279, + 547, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 279, + 547, + 302 + ], + "spans": [ + { + "bbox": [ + 305, + 279, + 547, + 302 + ], + "type": "text", + "content": "Our training objective, with added regularization terms, is as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 318, + 310, + 546, + 411 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 310, + 546, + 411 + ], + "spans": [ + { + "bbox": [ + 318, + 310, + 546, + 411 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} = \\sum_ {i} ^ {N} \\sum_ {j} ^ {M} \\operatorname {B C E} \\left(\\mathrm {N F} ^ {(i)} \\left(\\mathbf {x} _ {j} ^ {(i)}\\right) - \\mathrm {O} _ {j} ^ {(i)}\\right) \\\\ + \\lambda_ {1} \\left(\\operatorname {T V} \\left(\\mathbf {f} _ {x y} ^ {(i)}\\right) + \\operatorname {T V} \\left(\\mathbf {f} _ {x z} ^ {(i)}\\right) + \\operatorname {T V} \\left(\\mathbf {f} _ {y z} ^ {(i)}\\right)\\right) \\\\ + \\lambda_ {2} \\left(\\left| \\left| \\mathbf {f} _ {x y} ^ {(i)} \\right| \\right| _ {2} + \\left| \\left| \\mathbf {f} _ {y z} ^ {(i)} \\right| \\right| _ {2} + \\left| \\left| \\mathbf {f} _ {x z} ^ {(i)} \\right| \\right| _ {2}\\right) \\\\ + \\operatorname {E D R} \\left(\\mathrm {N F} \\left(\\mathbf {x} _ {j} ^ {(i)}\\right), \\boldsymbol {\\omega}\\right) \\tag {4} \\\\ \\end{array}", + "image_path": "3407bb7bb3480812f60f396c889531e0af62e7cca0fd4d955c1afd76c877e8ce.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 418, + 545, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 418, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 306, + 418, + 545, + 441 + ], + "type": "text", + "content": "3.4. Training a Diffusion Model for Triplane Features" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "text", + "content": "For unconditional generation, a diffusion model takes Gaussian noise as input and gradually denoises it in " + }, + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "text", + "content": " steps. In our framework, the diffusion model operates on triplane features " + }, + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{0\\dots T}\\in \\mathbb{R}^{N\\times N\\times 3C}" + }, + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "text", + "content": " that stack the feature channels of all three triplane axes into a single image. In this notation, " + }, + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_T\\sim \\mathcal{N}(\\mathbf{f}_T;0,\\mathbf{I})" + }, + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "text", + "content": " is the triplane feature image consisting of purely Gaussian noise, and " + }, + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_0\\sim q(\\mathbf{f}_0)" + }, + { + "bbox": [ + 304, + 449, + 547, + 580 + ], + "type": "text", + "content": " is a random sample drawn from the data distribution. The data distribution in our framework includes the pre-factored triplanes of the training set, normalized by the mean and variance of the entire dataset such that each channel has a zero mean and a standard deviation of 0.5." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 581, + 547, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 547, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 547, + 617 + ], + "type": "text", + "content": "The forward or diffusion processes is a Markov chain that gradually adds Gaussian noise to the triplane features, according to a variance schedule " + }, + { + "bbox": [ + 304, + 581, + 547, + 617 + ], + "type": "inline_equation", + "content": "\\beta_{1},\\beta_{2},\\dots,\\beta_{T}" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 348, + 625, + 546, + 646 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 625, + 546, + 646 + ], + "spans": [ + { + "bbox": [ + 348, + 625, + 546, + 646 + ], + "type": "interline_equation", + "content": "q \\left(\\mathbf {f} _ {t} \\mid \\mathbf {f} _ {t - 1}\\right) = \\mathcal {N} \\left(\\mathbf {f} _ {t}; \\sqrt {1 - \\beta_ {t}} \\mathbf {f} _ {t - 1}, \\beta_ {t} \\mathbf {I}\\right). \\tag {5}", + "image_path": "35c70574bfaa9c8b2db905fb79f110b6c16d237692b63948313320d572003f1c.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "text", + "content": "This forward process can be directly sampled at step " + }, + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "text", + "content": " using the closed-form solution " + }, + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "inline_equation", + "content": "q(\\mathbf{f}_t|\\mathbf{f}_0) = \\mathcal{N}(\\mathbf{f}_t;\\sqrt{\\bar{\\alpha}_t}\\mathbf{f}_0,(1 - \\bar{\\alpha}_t)\\mathbf{I})" + }, + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "inline_equation", + "content": "\\bar{\\alpha}_{t} = \\prod_{s = 1}^{t}\\alpha_{s}" + }, + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "inline_equation", + "content": "\\alpha_{t} = 1 - \\beta_{t}" + }, + { + "bbox": [ + 304, + 654, + 545, + 690 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 690, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 690, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 690, + 545, + 714 + ], + "type": "text", + "content": "The goal of training a diffusion model is to learn the reverse process. For this purpose, a function approximator" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20878" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": " is needed that predicts the noise " + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": " from its noisy input. Typically, this function approximator is implemented as a variant of a convolutional neural network defined by its parameters " + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 72, + 289, + 145 + ], + "type": "text", + "content": ". Following [26], we train our triplane diffusion model by optimizing the simplified variant of the variational bound on negative log-likelihood:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 59, + 152, + 288, + 174 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 152, + 288, + 174 + ], + "spans": [ + { + "bbox": [ + 59, + 152, + 288, + 174 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {D D P M}} = \\mathbb {E} _ {t, \\mathbf {f} _ {0}, \\epsilon} \\left[ \\left\\| \\boldsymbol {\\epsilon} - \\boldsymbol {\\epsilon} _ {\\theta} \\left(\\sqrt {\\bar {\\alpha} _ {t}} \\mathbf {f} _ {0} + \\sqrt {1 - \\bar {\\alpha} _ {t}} \\boldsymbol {\\epsilon}, t\\right) \\right\\| ^ {2} \\right], \\tag {6}", + "image_path": "6a776ed117a24ecbf312d0bb7112b6582424c6b79062a3e61fa0bdc0b4565c96.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 181, + 236, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 181, + 236, + 194 + ], + "spans": [ + { + "bbox": [ + 47, + 181, + 236, + 194 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 181, + 236, + 194 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 181, + 236, + 194 + ], + "type": "text", + "content": " is sampled uniformly between 1 and " + }, + { + "bbox": [ + 47, + 181, + 236, + 194 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 47, + 181, + 236, + 194 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 200, + 196, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 200, + 196, + 213 + ], + "spans": [ + { + "bbox": [ + 47, + 200, + 196, + 213 + ], + "type": "text", + "content": "3.5. Sampling Novel 3D Shapes" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 219, + 287, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 219, + 287, + 255 + ], + "spans": [ + { + "bbox": [ + 47, + 219, + 287, + 255 + ], + "type": "text", + "content": "The unconditional generation of shapes at inference is a two-stage process that involves sampling a triplane from the trained diffusion model and then querying the neural field." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 255, + 287, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 255, + 287, + 303 + ], + "spans": [ + { + "bbox": [ + 47, + 255, + 287, + 303 + ], + "type": "text", + "content": "Sampling a triplane from the diffusion model is identical to sampling an image from a diffusion model. Beginning with a random Gaussian noise " + }, + { + "bbox": [ + 47, + 255, + 287, + 303 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_T\\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 47, + 255, + 287, + 303 + ], + "type": "text", + "content": ", we iteratively denoise the sample in " + }, + { + "bbox": [ + 47, + 255, + 287, + 303 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 47, + 255, + 287, + 303 + ], + "type": "text", + "content": " steps as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 80, + 311, + 288, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 311, + 288, + 338 + ], + "spans": [ + { + "bbox": [ + 80, + 311, + 288, + 338 + ], + "type": "interline_equation", + "content": "\\mathbf {f} _ {t - 1} = \\frac {1}{\\sqrt {\\alpha_ {t}}} \\left(\\mathbf {f} _ {t} - \\frac {1 - \\alpha_ {t}}{\\sqrt {1 - \\bar {\\alpha} _ {t}}} \\boldsymbol {\\epsilon} _ {\\theta} (\\mathbf {f} _ {t}, t)\\right) + \\sigma_ {t} \\boldsymbol {\\epsilon}, \\qquad (7)", + "image_path": "ba3a2873d5bee22ca50f4121f469ad28b03a5c82c9b21a6721768246fc95e91e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim \\mathcal{N}(\\mathbf{0},\\mathbf{I})" + }, + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "text", + "content": " for all but the very last step (i.e., " + }, + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "inline_equation", + "content": "t = 1" + }, + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "text", + "content": "), at which " + }, + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "inline_equation", + "content": "\\epsilon = 0" + }, + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "inline_equation", + "content": "\\sigma_t^2 = \\beta_t" + }, + { + "bbox": [ + 46, + 346, + 287, + 370 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "text", + "content": "The result of the denoising process, " + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_0" + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "text", + "content": ", is a sample from the normalized triplane feature image distribution. Denormalizing it using the dataset normalization statistics and splitting the generated features into the axis aligned planes " + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{xy}, \\mathbf{f}_{yz}, \\mathbf{f}_{xz}" + }, + { + "bbox": [ + 46, + 370, + 287, + 441 + ], + "type": "text", + "content": " yields a set of triplane features which, when combined with the pre-trained MLP, are used to query the neural field." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 442, + 287, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 442, + 287, + 501 + ], + "spans": [ + { + "bbox": [ + 46, + 442, + 287, + 501 + ], + "type": "text", + "content": "We use the marching cubes algorithm [41] to extract meshes from the resulting neural fields. Note that our framework is largely agnostic to the diffusion backbone used; we choose to use ADM [52], a 2D state-of-the-art diffusion model." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 502, + 287, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 502, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 47, + 502, + 287, + 525 + ], + "type": "text", + "content": "Source code and pre-trained models will be made available." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 536, + 127, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 536, + 127, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 536, + 127, + 550 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 556, + 288, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 556, + 288, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 556, + 288, + 677 + ], + "type": "text", + "content": "Datasets. To compare NFD against existing 3D generative methods, we train our model on three object categories from the ShapeNet dataset individually. Consistent with previous work [85, 86], we choose the categories: cars, chairs and airplanes. Each mesh is normalized to lie within " + }, + { + "bbox": [ + 46, + 556, + 288, + 677 + ], + "type": "inline_equation", + "content": "[-1,1]^3" + }, + { + "bbox": [ + 46, + 556, + 288, + 677 + ], + "type": "text", + "content": " and then passed through watertighting. The generation of ground truth triplanes then works as follows: we precompute the occupancies of 10M points per object, where 5M points are distributed uniformly at random in the volume, and 5M points are sampled within a 0.01 distance from the mesh surface." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 310, + 70, + 543, + 203 + ], + "blocks": [ + { + "bbox": [ + 310, + 70, + 543, + 203 + ], + "lines": [ + { + "bbox": [ + 310, + 70, + 543, + 203 + ], + "spans": [ + { + "bbox": [ + 310, + 70, + 543, + 203 + ], + "type": "table", + "html": "
DataMethodFID ↓Precision ↑Recall ↑
CarsPVD¹335.80.10.2
SDF-StyleGAN98.035.936.2
NFD (Ours)83.649.550.5
ChairsPVD305.80.21.7
SDF-StyleGAN36.590.987.4
NFD (Ours)26.492.494.8
PlanesPVD244.42.73.8
SDF-StyleGAN65.864.572.8
NFD (Ours)32.470.581.1
", + "image_path": "0ee6fdf78f27a9541f5facc73e5d45685bbbc5df0792a0230a6fe677281658c9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 211, + 547, + 267 + ], + "lines": [ + { + "bbox": [ + 304, + 211, + 547, + 267 + ], + "spans": [ + { + "bbox": [ + 304, + 211, + 547, + 267 + ], + "type": "text", + "content": "Table 1. Render quality metrics on ShapeNet. We achieve state-of-the-art FID, which measures overall quality, as well as well as state-of-the-art precision and recall, which measure fidelity and diversity independently. Metrics calculated on shaded renderings of generated and ground-truth shapes." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 288, + 547, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 288, + 547, + 384 + ], + "spans": [ + { + "bbox": [ + 304, + 288, + 547, + 384 + ], + "type": "text", + "content": "We then train an MLP jointly with as many triplanes as we can fit in the GPU memory of a single A6000 GPU. In our case, we initially train on the first 500 objects in the dataset. After this initial joint optimization, we freeze the shared MLP and use it to optimize the triplanes of the remaining objects in the dataset. All triplanes beyond the first 500 are optimized individually with the same shared MLP; thus, the training of these triplanes can be effectively parallelized." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 399, + 547, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 547, + 530 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 547, + 530 + ], + "type": "text", + "content": "Evaluation metrics. As in [85], we choose to evaluate our model using an adapted version of Fréchet inception distance (FID) that utilizes rendered shading images of our generated meshes. Shading-image FID [85] overcomes limitations of other mesh-based evaluation metrics such as the lightfield-descriptor (LFD) [8] by taking human perception into consideration. Zheng et al. [85] provide a detailed discussion of the various evaluation metrics for 3D generative models. Following the method [85], shading images of each shape are rendered from 20 distinct views; FID is then compared across each view and averaged to obtain a final score:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 539, + 545, + 581 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 539, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 312, + 539, + 545, + 581 + ], + "type": "interline_equation", + "content": "\\mathrm {F I D} = \\frac {1}{2 0} \\left[ \\sum_ {i = 1} ^ {2 0} \\| \\mu_ {g} ^ {i} - \\mu_ {r} ^ {i} \\| ^ {2} + \\operatorname {T r} \\left(\\Sigma_ {g} ^ {i} + \\Sigma_ {r} ^ {i} - 2 \\left(\\Sigma_ {r} ^ {i} \\Sigma_ {g} ^ {i}\\right) ^ {\\frac {1}{2}}\\right) \\right], \\tag {8}", + "image_path": "5c8583ebc4f8ae463975496d667c7b4c94fa71667da33636a6c98eed208d59d3.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "text", + "content": " represent the generated and training datasets, while " + }, + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "inline_equation", + "content": "\\mu^i,\\Sigma^i" + }, + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "text", + "content": " represent the mean and covariance matrices for shading images rendered from the " + }, + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "inline_equation", + "content": "i^{\\mathrm{th}}" + }, + { + "bbox": [ + 304, + 582, + 547, + 617 + ], + "type": "text", + "content": " view, respectively." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": "Along with FID, we also report precision and recall scores using the method proposed by Sajjadi et al. [63]. While FID correlates well with perceived image quality, the one-dimensional nature of the metric prevents it from identifying different failure modes. Sajjadi et al. [63] aim to disentangle FID into separate metrics known as precision and recall, where the former correlates to the quality of the generated images and the latter represents the diversity of" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 683, + 287, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 683, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 683, + 287, + 714 + ], + "type": "text", + "content": "1As PVD outputs 098 point clouds, we apply the ball pivoting algorithm (BPA) to PVD outputs before calculating FID. BPA was selected as it achieved a good balance between speed and quality." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "20879" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 73, + 545, + 376 + ], + "blocks": [ + { + "bbox": [ + 51, + 73, + 545, + 376 + ], + "lines": [ + { + "bbox": [ + 51, + 73, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 51, + 73, + 545, + 376 + ], + "type": "image", + "image_path": "773685b9d6e947ccd79535897d9e9843a1d1e959a7ce373df923481ebb168c93.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 384, + 546, + 418 + ], + "lines": [ + { + "bbox": [ + 46, + 384, + 546, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 384, + 546, + 418 + ], + "type": "text", + "content": "Figure 4. We compare 3D shapes generated by our model against generations of state-of-the-art baselines for ShapeNet Cars, Chairs, and Planes. Our model synthesizes shapes with noticeably sharper details than the previous state-of-the-art, while also capturing the broad diversity in each category." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 439, + 135, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 439, + 135, + 451 + ], + "spans": [ + { + "bbox": [ + 47, + 439, + 135, + 451 + ], + "type": "text", + "content": "the generative model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 468, + 288, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 468, + 288, + 577 + ], + "spans": [ + { + "bbox": [ + 46, + 468, + 288, + 577 + ], + "type": "text", + "content": "Baselines. We compare our method against state-of-the-art point-based and neural-field-based 3D generative models, namely PVD [86] and SDF-StyleGAN [85]. For evaluation, we use the pre-trained models for both methods on the three ShapeNet categories listed above. Note that PVD is inherently a point-based generative method and therefore does not output a triangle mesh needed for shading image rendering. To circumvent this, we choose to convert generated point clouds to triangle meshes using the ball-pivoting algorithm [3]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 288, + 714 + ], + "type": "text", + "content": "Results. We provide qualitative results, comparing samples generated by our method to samples generated by baselines, in Figure 4. Our method generates a diverse and finely detailed collection of objects. Objects produced by our method contain sharp edges and features that we would expect to be difficult to accurately reconstruct—note that delicate features, such as the suspension of cars, the slats in chairs, and armaments of planes, are faithfully generated. Perhaps more importantly, samples generated by our model are diverse—our model successfully synthesizes many different types of cars, chairs," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 439, + 545, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 439, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 305, + 439, + 545, + 464 + ], + "type": "text", + "content": "and planes, including reproductions of several varieties that we would expect to be rare in the training dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 466, + 545, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 466, + 545, + 525 + ], + "spans": [ + { + "bbox": [ + 304, + 466, + 545, + 525 + ], + "type": "text", + "content": "In comparison, while " + }, + { + "bbox": [ + 304, + 466, + 545, + 525 + ], + "type": "inline_equation", + "content": "PVD" + }, + { + "bbox": [ + 304, + 466, + 545, + 525 + ], + "type": "text", + "content": " also produces a wide variety of shapes, it is limited by its nature to generating only coarse object shapes. Furthermore, because " + }, + { + "bbox": [ + 304, + 466, + 545, + 525 + ], + "type": "inline_equation", + "content": "PVD" + }, + { + "bbox": [ + 304, + 466, + 545, + 525 + ], + "type": "text", + "content": " produces a fixed-size point cloud with only 2048 points, it cannot synthesize fine elements." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 530, + 546, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 530, + 546, + 697 + ], + "spans": [ + { + "bbox": [ + 304, + 530, + 546, + 697 + ], + "type": "text", + "content": "SDF-StyleGAN creates high-fidelity shapes, accurately reproducing many details, such as airplane engines and chair legs. However, our method is more capable of capturing very fine features. Note that while SDF-StyleGAN smooths over the division between tire and wheel well when generating cars, our method faithfully portrays this gap. Similarly, our method synthesizes the tails and engines of airplanes, and the legs and planks of chairs, with noticeably better definition. Our method also apparently generates a greater diversity of objects than SDF-StyleGAN. While SDF-StyleGAN capably generates varieties of each ShapeNet class, our method reproduces the same classes with greater variation. This is expected, as a noted advantage of diffusion models over GANs is better mode coverage." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 318, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 318, + 701, + 545, + 713 + ], + "type": "text", + "content": "We provide quantitative results in Table 1. The metrics" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20880" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 72, + 542, + 188 + ], + "blocks": [ + { + "bbox": [ + 52, + 72, + 542, + 188 + ], + "lines": [ + { + "bbox": [ + 52, + 72, + 542, + 188 + ], + "spans": [ + { + "bbox": [ + 52, + 72, + 542, + 188 + ], + "type": "image", + "image_path": "e662840be1dd297b5a1cc3012ca032bbe21da126e88633ab1d6a53ae896a00ed.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 199, + 547, + 222 + ], + "lines": [ + { + "bbox": [ + 46, + 199, + 547, + 222 + ], + "spans": [ + { + "bbox": [ + 46, + 199, + 547, + 222 + ], + "type": "text", + "content": "Figure 5. Interpolation. Our model learns a continuous latent space of triplanes. We can smoothly interpolate between two noise triplanes, resulting in semantically meaningful shape interpolation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 243, + 289, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 243, + 289, + 387 + ], + "spans": [ + { + "bbox": [ + 46, + 243, + 289, + 387 + ], + "type": "text", + "content": "tell a similar story to the qualitative results. Quantitatively, NFD outperforms all baselines in FID, precision, and recall for each ShapeNet category. FID is a standard one-number metric for evaluating generative models, and our performance under this evaluation indicates the generally better quality of object renderings. Precision evaluates the renderings' fidelity, and recall evaluates their diversity. Outperforming baselines in both precision and recall suggest that our model produces higher fidelity of shapes and a more diverse distribution of shapes. This is consistent with the qualitative results in Figure 4, where our method produced sharper and more complex objects while also covering more modes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 404, + 287, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 404, + 287, + 536 + ], + "spans": [ + { + "bbox": [ + 46, + 404, + 287, + 536 + ], + "type": "text", + "content": "Semantically meaningful interpolation. Figure 5 shows latent space interpolation between pairs of generated neural fields. As shown in prior work [69], smooth interpolation in the latent space of diffusion models can be achieved by interpolation between noise tensors before they are iteratively denoised by the model. As in their method, we sample from our trained model using a deterministic DDIM, and we use spherical interpolation so that the intermediate latent noise retains the same distribution. Our method is capable of smooth latent space interpolation in the generated triplanes and their corresponding neural fields." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 545, + 147, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 545, + 147, + 556 + ], + "spans": [ + { + "bbox": [ + 47, + 545, + 147, + 556 + ], + "type": "text", + "content": "4.1. Ablation Studies" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 564, + 288, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 564, + 288, + 588 + ], + "spans": [ + { + "bbox": [ + 47, + 564, + 288, + 588 + ], + "type": "text", + "content": "We validate the design of our framework by ablating components of our regularization strategies using the cars dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 715 + ], + "type": "text", + "content": "Explicit density regularization. As discussed by Park et al. [57], the precision of the ground truth decoded meshes is limited by the finite number of point samples guiding the training of the decision boundaries. Because we rely on a limited number of pre-computed coordinate-occupancy pairs to train our triplanes, it is easy to overfit to this limited training set. Even when optimizing a single triplane in isolation (i.e., without learning a generative model), this overfitting manifests in \"floater\" artifacts in the optimized" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 309, + 238, + 425, + 357 + ], + "blocks": [ + { + "bbox": [ + 309, + 238, + 425, + 357 + ], + "lines": [ + { + "bbox": [ + 309, + 238, + 425, + 357 + ], + "spans": [ + { + "bbox": [ + 309, + 238, + 425, + 357 + ], + "type": "image", + "image_path": "7ccde9f7d3c9b05386492caf1e4072c91ff08a6f9e1056d274d93a0cf11dd2c4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 369, + 545, + 414 + ], + "lines": [ + { + "bbox": [ + 305, + 369, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 545, + 414 + ], + "type": "text", + "content": "Figure 6. Ablation over density regularization. Clear artifacts are visible in the resulting occupancy field without explicit density regularization. In this example, we optimize a single triplane on a single shape." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 432, + 249, + 542, + 357 + ], + "blocks": [ + { + "bbox": [ + 432, + 249, + 542, + 357 + ], + "lines": [ + { + "bbox": [ + 432, + 249, + 542, + 357 + ], + "spans": [ + { + "bbox": [ + 432, + 249, + 542, + 357 + ], + "type": "image", + "image_path": "64114b937693119bc137c461458451c0c9d23a9807c5a4fa980cd5a7db6858a0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 435, + 545, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 435, + 545, + 495 + ], + "spans": [ + { + "bbox": [ + 305, + 435, + 545, + 495 + ], + "type": "text", + "content": "neural field. Figure 6 shows an example where we fit a single triplane with and without density regularization. Without density regularization, the learned occupancy field contains significant artifacts; with density regularization, the learned occupancy field captures a clean object." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 510, + 547, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 547, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 547, + 700 + ], + "type": "text", + "content": "Triplane regularization. Regularization of the triplanes is essential for training a well-behaved diffusion model. Figure 7 compares generated samples produced by our entire framework, with and without regularization terms. If we train only with Equation 2, i.e., without regularization terms, we can optimize a dataset of triplane features and train a diffusion model to generate samples. However, while the surfaces of the optimized shapes will appear real, the triplane features themselves will have many high-frequency artifacts, and these convoluted feature images are a difficult manifold for even a powerful diffusion model to learn. Consequently, generated triplane features produced by a trained diffusion model decode into shapes with significant artifacts. We note that these artifacts are present only in generated samples; shapes directly factored from the ground-truth shapes are artifact-free, even without regularization." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "content": "Training with Equation 4 introduces TV, L2, and" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20881" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 71, + 161, + 210 + ], + "blocks": [ + { + "bbox": [ + 51, + 71, + 161, + 210 + ], + "lines": [ + { + "bbox": [ + 51, + 71, + 161, + 210 + ], + "spans": [ + { + "bbox": [ + 51, + 71, + 161, + 210 + ], + "type": "image", + "image_path": "455982f9569880c9f471fd0d558e2c44254bb6a6d11764e49832bbaab8954a0b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 220, + 289, + 331 + ], + "lines": [ + { + "bbox": [ + 46, + 220, + 289, + 331 + ], + "spans": [ + { + "bbox": [ + 46, + 220, + 289, + 331 + ], + "type": "text", + "content": "Figure 7. Ablation over regularized triplanes. A generative model trained on unregularized triplanes produces samples with significant artifacts. Effective regularization of triplane features enables training of a generative model that produces shapes without artifacts. Top left: triplane features learned only with Equation 2 contain many high frequency artifacts. Bottom left: a diffusion model trained on these unregularized triplanes fails to produce convincing samples. Top right: triplane features learned with Equation 4 are noticeably smoother. Bottom right: A diffusion model trained on these regularized triplanes produces high-quality shapes." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 175, + 72, + 284, + 209 + ], + "blocks": [ + { + "bbox": [ + 175, + 72, + 284, + 209 + ], + "lines": [ + { + "bbox": [ + 175, + 72, + 284, + 209 + ], + "spans": [ + { + "bbox": [ + 175, + 72, + 284, + 209 + ], + "type": "image", + "image_path": "c60f75281497649c58e8bb222ebb292080fa3a3368c595a1f50b4347c956e5cf.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 208, + 547, + 243 + ], + "lines": [ + { + "bbox": [ + 305, + 208, + 547, + 243 + ], + "spans": [ + { + "bbox": [ + 305, + 208, + 547, + 243 + ], + "type": "text", + "content": "Figure 8. Failure cases. We observe that our model at times generate axis-aligned artifacts and struggles to account for thin structures, likely caused by the use of a triplane representation." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 61, + 342, + 272, + 378 + ], + "blocks": [ + { + "bbox": [ + 61, + 342, + 272, + 378 + ], + "lines": [ + { + "bbox": [ + 61, + 342, + 272, + 378 + ], + "spans": [ + { + "bbox": [ + 61, + 342, + 272, + 378 + ], + "type": "table", + "html": "
MethodFID ↓Precision ↑Recall ↑
No regularization285.81.60.6
Density + TV + L2 Reg.83.649.550.5
", + "image_path": "ac7b069d0e3d7e90371fd868e440cfe025e2c0fa43c8e6342c4f1842b31debd4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 382, + 288, + 416 + ], + "lines": [ + { + "bbox": [ + 46, + 382, + 288, + 416 + ], + "spans": [ + { + "bbox": [ + 46, + 382, + 288, + 416 + ], + "type": "text", + "content": "Table 2. Quantitative results for the ablation on triplane regularization. Our model performs poorly without explicit regularization on the triplanes." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 435, + 287, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 435, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 46, + 435, + 287, + 533 + ], + "type": "text", + "content": "density regularizing factors. Triplanes learned with these regularization terms are noticeably smoother, with frequency distributions that more closely align with those found in natural images (see supplement). As we would expect, a diffusion model more readily learns the manifold of regularized triplane features. Samples produced by a diffusion model trained on these regularized shapes decode into convincing and artifact-free shapes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 544, + 116, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 544, + 116, + 556 + ], + "spans": [ + { + "bbox": [ + 47, + 544, + 116, + 556 + ], + "type": "text", + "content": "5. Discussion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 564, + 288, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 564, + 288, + 624 + ], + "spans": [ + { + "bbox": [ + 46, + 564, + 288, + 624 + ], + "type": "text", + "content": "In summary, we introduce a 3D-aware diffusion model that uses a 2D diffusion backbone to generate triplane feature maps, which are assembled into 3D neural fields. Our approach improves the quality and diversity of generated objects over existing 3D-aware generative models by a large margin." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 641, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 289, + 714 + ], + "type": "text", + "content": "Limitations. Similarly to other generative methods, training a diffusion model is slow and computationally demanding. Diffusion models, including ours, are also slow to evaluate, whereas GANs, for example, can be evaluated in real-time once trained. Luckily, our method will benefit from improvements to 2D diffusion models in this research area." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 309, + 73, + 545, + 198 + ], + "blocks": [ + { + "bbox": [ + 309, + 73, + 545, + 198 + ], + "lines": [ + { + "bbox": [ + 309, + 73, + 545, + 198 + ], + "spans": [ + { + "bbox": [ + 309, + 73, + 545, + 198 + ], + "type": "image", + "image_path": "6f305f6e728c27e4d5d4c9fd900359c6a2f0b9df74a00411a38f6a4aab854ad2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 262, + 547, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 262, + 547, + 335 + ], + "spans": [ + { + "bbox": [ + 305, + 262, + 547, + 335 + ], + "type": "text", + "content": "Slow sampling at inference could be addressed by more efficient samplers [30] and potentially enable real-time synthesis. While a step forward in quality, some of the samples generated by our method suffer from artifacts, as depicted by Fig. 8. Strategies like guidance [13, 27], which trade off diversity for fidelity, may reduce the prevalence of these outliers." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 350, + 545, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 350, + 545, + 469 + ], + "spans": [ + { + "bbox": [ + 305, + 350, + 545, + 469 + ], + "type": "text", + "content": "Future Work. We have demonstrated an effective way to generate occupancy fields, but in principle, our approach can be extended to generating any type of neural field that can be represented by a triplane. In particular, triplanes have already been shown to be excellent representations for radiance fields, so it seems natural to extend our diffusion approach to generating NeRFs. While we demonstrate successful results for unconditional generation, conditioning our generative model on text, images, or other input would be an exciting avenue for future work." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 486, + 545, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 486, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 305, + 486, + 545, + 546 + ], + "type": "text", + "content": "Ethical Considerations. Generative models, including ours, could be extended to generate DeepFakes. These pose a societal threat, and we do not condone using our work to generate fake images or videos of any person intending to spread misinformation or tarnish their reputation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 562, + 545, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 562, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 305, + 562, + 545, + 622 + ], + "type": "text", + "content": "Conclusion. 3D-aware object synthesis has many exciting applications in vision and graphics. With our work, which is among the first to connect powerful 2D diffusion models and 3D object synthesis, we take a significant step towards utilizing emerging diffusion models for this goal." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 633, + 409, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 633, + 409, + 647 + ], + "spans": [ + { + "bbox": [ + 306, + 633, + 409, + 647 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 654, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 654, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 654, + 547, + 712 + ], + "type": "text", + "content": "We thank Vincent Sitzmann for valuable discussions. This project was in part supported by Samsung, the Stanford Institute for Human-Centered AI (HAI), the Stanford Center for Integrated Facility Engineering (CIFE), NSF RI #2211258, Autodesk, and a PECASE from the ARO." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 757 + ], + "type": "text", + "content": "20882" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] Matan Atzmon and Yaron Lipman. SAL: Sign agnostic learning of shapes from raw data. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 124, + 288, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 124, + 288, + 179 + ], + "spans": [ + { + "bbox": [ + 53, + 124, + 288, + 179 + ], + "type": "text", + "content": "[2] Miguel Ángel Bautista, Pengsheng Guo, Samira Abnar, Walter Talbott, Alexander Toshev, Zhuoyuan Chen, Laurent Dinh, Shuangfei Zhai, Hanlin Goh, Daniel Ulbricht, Afshin Dehghan, and Josh M. Susskind. GAUDI: A neural architect for immersive 3d scene generation. CoRR, abs/2207.13751, 2022. 1, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 179, + 288, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 179, + 288, + 224 + ], + "spans": [ + { + "bbox": [ + 53, + 179, + 288, + 224 + ], + "type": "text", + "content": "[3] Fausto Bernardini, Joshua Mittleman, Holly E. Rushmeier, Cláudio T. Silva, and Gabriel Taubin. The ball-pivoting algorithm for surface reconstruction. IEEE Transactions on Visualization and Computer Graphics, 5:349-359, 1999. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 224, + 288, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 288, + 256 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 288, + 256 + ], + "type": "text", + "content": "[4] Alexandre Boulch and Renaud Marlet. POCO: point convolution for surface reconstruction. CoRR, abs/2201.01831, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 258, + 288, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 288, + 312 + ], + "type": "text", + "content": "[5] Rohan Chabra, Jan Eric Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep local shapes: Learning local SDF priors for detailed 3D reconstruction. In European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 313, + 288, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 313, + 288, + 390 + ], + "spans": [ + { + "bbox": [ + 53, + 313, + 288, + 390 + ], + "type": "text", + "content": "[6] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J. Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 16123-16133, June 2022. 1, 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 390, + 288, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 390, + 288, + 445 + ], + "spans": [ + { + "bbox": [ + 53, + 390, + 288, + 445 + ], + "type": "text", + "content": "[7] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-GAN: Periodic implicit generative adversarial networks for 3D-aware image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 446, + 288, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 446, + 288, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 446, + 288, + 479 + ], + "type": "text", + "content": "[8] Ding-Yun Chen, Xiao-Pei Tian, Edward Yu-Te Shen, and Ming Ouhyoung. On visual similarity based 3d model retrieval. Computer Graphics Forum, 22, 2003. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 479, + 288, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 479, + 288, + 523 + ], + "spans": [ + { + "bbox": [ + 53, + 479, + 288, + 523 + ], + "type": "text", + "content": "[9] Yinbo Chen, Sifei Liu, and Xiaolong Wang. Learning continuous image representation with local implicit image function. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 524, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 524, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 524, + 288, + 557 + ], + "type": "text", + "content": "[10] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 558, + 288, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 288, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 288, + 590 + ], + "type": "text", + "content": "[11] Thomas Davies, Derek Nowrouzehrai, and Alec Jacobson. Overfit neural networks as a compact shape representation. arXiv preprint arXiv:2009.09808, 2020. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 591, + 288, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 288, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 288, + 635 + ], + "type": "text", + "content": "[12] Terrance DeVries, Miguel Angel Bautista, Nitish Srivastava, Graham W. Taylor, and Joshua M. Susskind. Unconstrained scene generation with locally conditioned radiance fields. arXiv preprint arXiv:2104.00670, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 635, + 288, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 288, + 690 + ], + "type": "text", + "content": "[13] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 8780-8794. Curran Associates, Inc., 2021. 2, 8" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 691, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 712 + ], + "type": "text", + "content": "[14] Emilien Dupont, Hyunjik Kim, S. M. Ali Eslami, Danilo J. Rezende, and Dan Rosenbaum. From data to functa: Your" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 326, + 73, + 547, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 547, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 547, + 95 + ], + "type": "text", + "content": "data point is a function and you should treat it like one. CoRR, abs/2201.12204, 2022. 1, 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 95, + 547, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 95, + 547, + 140 + ], + "spans": [ + { + "bbox": [ + 308, + 95, + 547, + 140 + ], + "type": "text", + "content": "[15] SM Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S Morcos, Marta Garnelo, Avraham Ruderman, Andrei A Rusu, Ivo Danihelka, Karol Gregor, et al. Neural scene representation and rendering. Science, 2018. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 141, + 545, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 141, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 308, + 141, + 545, + 163 + ], + "type": "text", + "content": "[16] Patrick Esser, Robin Rombach, and Björn Ommer. Taming transformers for high-resolution image synthesis, 2020. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 163, + 547, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 547, + 197 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 547, + 197 + ], + "type": "text", + "content": "[17] Matheus Gadelha, Subhransu Maji, and Rui Wang. 3d shape induction from 2d views of multiple objects. In 2017 International Conference on 3D Vision (3DV), pages 402-411, 2017. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 198, + 545, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 198, + 545, + 231 + ], + "spans": [ + { + "bbox": [ + 308, + 198, + 545, + 231 + ], + "type": "text", + "content": "[18] Stephan J Garbin, Marek Kowalski, Matthew Johnson, Jamie Shotton, and Julien Valentin. FastNeRF: High-fidelity neural rendering at 200fps. arXiv preprint arXiv:2103.10380, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 231, + 545, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 231, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 308, + 231, + 545, + 275 + ], + "type": "text", + "content": "[19] Kyle Genova, Forrester Cole, Avneesh Sud, Aaron Sarna, and Thomas Funkhouser. Local deep implicit functions for 3D shape. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 276, + 545, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 276, + 545, + 320 + ], + "spans": [ + { + "bbox": [ + 308, + 276, + 545, + 320 + ], + "type": "text", + "content": "[20] Kyle Genova, Forrester Cole, Daniel Vlasic, Aaron Sarna, William T Freeman, and Thomas Funkhouser. Learning shape templates with structured implicit functions. In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 321, + 545, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 321, + 545, + 354 + ], + "spans": [ + { + "bbox": [ + 308, + 321, + 545, + 354 + ], + "type": "text", + "content": "[21] Simon Giebenhain and Bastian Goldlücke. Air-nets: An attention-based framework for locally conditioned implicit representations. In 3DV, pages 1054-1064. IEEE, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 355, + 545, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 355, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 308, + 355, + 545, + 399 + ], + "type": "text", + "content": "[22] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems (NeurIPS), 2014. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 399, + 545, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 545, + 443 + ], + "type": "text", + "content": "[23] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. In International Conference on Machine Learning (ICML), 2020. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 444, + 547, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 444, + 547, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 444, + 547, + 487 + ], + "type": "text", + "content": "[24] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. StyleNeRF: A style-based 3D-aware generator for high-resolution image synthesis. arXiv preprint arXiv:2110.08985, 2021. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 488, + 547, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 488, + 547, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 488, + 547, + 533 + ], + "type": "text", + "content": "[25] Peter Hedman, Pratul P. Srinivasan, Ben Mildenhall, Jonathan T. Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 534, + 547, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 534, + 547, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 534, + 547, + 567 + ], + "type": "text", + "content": "[26] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, volume 33, pages 6840-6851, 2020. 2, 5" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 568, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 568, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 308, + 568, + 545, + 590 + ], + "type": "text", + "content": "[27] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 8" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 590, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 590, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 308, + 590, + 545, + 635 + ], + "type": "text", + "content": "[28] Chiyu Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Nießner, and Thomas Funkhouser. Local implicit grid representations for 3D scenes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 635, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 635, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 308, + 635, + 545, + 679 + ], + "type": "text", + "content": "[29] Yue Jiang, Dantong Ji, Zhizhong Han, and Matthias Zwicker. SDFDiff: Differentiable rendering of signed distance fields for 3D shape optimization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 680, + 545, + 713 + ], + "type": "text", + "content": "[30] Tero Karras, Miika Aittala, Timo Aila, and Samuli Laine. Elucidating the design space of diffusion-based generative models. ArXiv, abs/2206.00364, 2022. 8" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "20883" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[31] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 288, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 288, + 162 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 288, + 162 + ], + "type": "text", + "content": "[32] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 163, + 288, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 163, + 288, + 206 + ], + "spans": [ + { + "bbox": [ + 49, + 163, + 288, + 206 + ], + "type": "text", + "content": "[33] Petr Kellnhofer, Lars Jebe, Andrew Jones, Ryan Spicer, Kari Pulli, and Gordon Wetzstein. Neural lumigraph rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 209, + 288, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 209, + 288, + 284 + ], + "spans": [ + { + "bbox": [ + 49, + 209, + 288, + 284 + ], + "type": "text", + "content": "[34] Adam R Kosiorek, Heiko Strathmann, Daniel Zoran, Pol Moreno, Rosalia Schneider, Sona Mokra, and Danilo Jimenez Rezende. Nerf-vae: A geometry aware 3d scene generative model. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 5742–5752. PMLR, 18–24 Jul 2021. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 286, + 287, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 286, + 287, + 340 + ], + "spans": [ + { + "bbox": [ + 49, + 286, + 287, + 340 + ], + "type": "text", + "content": "[35] Yiyi Liao, Katja Schwarz, Lars Mescheder, and Andreas Geiger. Towards unsupervised learning of generative models for 3d controllable image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 342, + 288, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 288, + 385 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 288, + 385 + ], + "type": "text", + "content": "[36] David B Lindell, Julien NP Martel, and Gordon Wetzstein. AutoInt: Automatic integration for fast neural volume rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 388, + 288, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 288, + 420 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 288, + 420 + ], + "type": "text", + "content": "[37] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 422, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 422, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 422, + 288, + 453 + ], + "type": "text", + "content": "[38] Shichen Liu, Shunsuke Saito, Weikai Chen, and Hao Li. Learning to infer implicit surfaces without 3D supervision. arXiv preprint arXiv:1911.00767, 2019. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "type": "text", + "content": "[39] Shaohui Liu, Yinda Zhang, Songyou Peng, Boxin Shi, Marc Pollefeys, and Zhaopeng Cui. DIST: Rendering deep implicit signed distance function with differentiable sphere tracing. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 512, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 287, + 555 + ], + "type": "text", + "content": "[40] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. ACM Transactions on Graphics (SIGGRAPH), 2019. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 556, + 288, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 556, + 288, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 556, + 288, + 588 + ], + "type": "text", + "content": "[41] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3D surface construction algorithm. ACM Transactions on Graphics (ToG), 1987. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 633 + ], + "type": "text", + "content": "[42] Shitong Luo and Wei Hu. Diffusion probabilistic models for 3d point cloud generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2837-2845, June 2021. 1, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 635, + 288, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 288, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 288, + 678 + ], + "type": "text", + "content": "[43] Julien N.P. Martel, David B. Lindell, Connor Z. Lin, Eric R. Chan, Marco Monteiro, and Gordon Wetzstein. ACORN: Adaptive coordinate networks for neural representation. ACM Transactions on Graphics (SIGGRAPH), 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 681, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 288, + 712 + ], + "type": "text", + "content": "[44] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the wild: Neural radiance fields for" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "unconstrained photo collections. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 96, + 547, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 547, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 547, + 139 + ], + "type": "text", + "content": "[45] Ishit Mehta, Michael Gharbi, Connelly Barnes, Eli Shechtman, Ravi Ramamoorthi, and Manmohan Chandraker. Modulated periodic activations for generalizable local functional representations. In ICCV, pages 14194-14203. IEEE, 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 140, + 547, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 547, + 194 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 547, + 194 + ], + "type": "text", + "content": "[46] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 6351-6361, October 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 195, + 547, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 195, + 547, + 248 + ], + "spans": [ + { + "bbox": [ + 308, + 195, + 547, + 248 + ], + "type": "text", + "content": "[47] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3D reconstruction in function space. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 251, + 547, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 251, + 547, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 251, + 547, + 293 + ], + "type": "text", + "content": "[48] Mateusz Michalkiewicz, Jhony K Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Implicit surface representations as layers in neural networks. In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 295, + 547, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 295, + 547, + 348 + ], + "spans": [ + { + "bbox": [ + 308, + 295, + 547, + 348 + ], + "type": "text", + "content": "[49] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 350, + 547, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 350, + 547, + 415 + ], + "spans": [ + { + "bbox": [ + 308, + 350, + 547, + 415 + ], + "type": "text", + "content": "[50] Thomas Neff, Pascal Stadlbauer, Mathias Parger, Andreas Kurz, Joerg H. Mueller, Chakravarty R. Alla Chaitanya, Anton S. Kaplanyan, and Markus Steinberger. DONeRF: Towards Real-Time Rendering of Compact Neural Radiance Fields using Depth Oracle Networks. Computer Graphics Forum, 40(4), 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 416, + 547, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 416, + 547, + 469 + ], + "spans": [ + { + "bbox": [ + 308, + 416, + 547, + 469 + ], + "type": "text", + "content": "[51] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2019. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 472, + 547, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 472, + 547, + 535 + ], + "spans": [ + { + "bbox": [ + 308, + 472, + 547, + 535 + ], + "type": "text", + "content": "[52] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 8162-8171. PMLR, 18-24 Jul 2021. 2, 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 537, + 545, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 590 + ], + "type": "text", + "content": "[53] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11453-11464, June 2021. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 593, + 547, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 593, + 547, + 645 + ], + "spans": [ + { + "bbox": [ + 308, + 593, + 547, + 645 + ], + "type": "text", + "content": "[54] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 647, + 547, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 547, + 690 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 547, + 690 + ], + "type": "text", + "content": "[55] Michael Oechsle, Songyou Peng, and Andreas Geiger. UNISURF: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 692, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 692, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 692, + 547, + 713 + ], + "type": "text", + "content": "[56] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf:" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "20884" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 288, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 288, + 116 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 288, + 116 + ], + "type": "text", + "content": "High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13503-13513, June 2022. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 119, + 288, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 119, + 288, + 174 + ], + "spans": [ + { + "bbox": [ + 48, + 119, + 288, + 174 + ], + "type": "text", + "content": "[57] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 176, + 288, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 176, + 288, + 220 + ], + "spans": [ + { + "bbox": [ + 48, + 176, + 288, + 220 + ], + "type": "text", + "content": "[58] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 222, + 288, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 222, + 288, + 255 + ], + "spans": [ + { + "bbox": [ + 48, + 222, + 288, + 255 + ], + "type": "text", + "content": "[59] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural radiance fields for dynamic scenes. arXiv preprint arXiv:2011.13961, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 257, + 288, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 257, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 48, + 257, + 288, + 312 + ], + "type": "text", + "content": "[60] Ali Razavi, Aaron van den Oord, and Oriol Vinyals. Generating diverse high-fidelity images with vq-vae-2. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 314, + 288, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 314, + 288, + 369 + ], + "spans": [ + { + "bbox": [ + 48, + 314, + 288, + 369 + ], + "type": "text", + "content": "[61] Daniel Rebain, Mark Matthews, Kwang Moo Yi, Dmitry Lagun, and Andrea Tagliasacchi. Lolnerf: Learn from one look. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1558-1567, June 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 371, + 288, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 371, + 288, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 371, + 288, + 415 + ], + "type": "text", + "content": "[62] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. KiloNeRF: Speeding up neural radiance fields with thousands of tiny MLPs. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 417, + 288, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 417, + 288, + 449 + ], + "spans": [ + { + "bbox": [ + 48, + 417, + 288, + 449 + ], + "type": "text", + "content": "[63] Mehdi S. M. Sajjadi, Olivier Bachem, Mario Lucic, Olivier Bousquet, and Sylvain Gelly. Assessing generative models via precision and recall. In NeurIPS, 2018. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 452, + 288, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 452, + 288, + 496 + ], + "spans": [ + { + "bbox": [ + 48, + 452, + 288, + 496 + ], + "type": "text", + "content": "[64] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. GRAF: Generative radiance fields for 3D-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 498, + 288, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 498, + 288, + 542 + ], + "spans": [ + { + "bbox": [ + 48, + 498, + 288, + 542 + ], + "type": "text", + "content": "[65] Vincent Sitzmann, Julien N.P. Martel, Alexander W. Bergman, David B. Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 544, + 288, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 288, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 288, + 588 + ], + "type": "text", + "content": "[66] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhöfer. Deep Voxels: Learning persistent 3D feature embeddings. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 288, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 288, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 288, + 634 + ], + "type": "text", + "content": "[67] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3D-structure-aware neural scene representations. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 636, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 636, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 636, + 288, + 712 + ], + "type": "text", + "content": "[68] Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In Francis Bach and David Blei, editors, Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, pages 2256-2265, Lille, France, 07-09 Jul 2015. PMLR. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[69] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021. 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 108, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 108, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 108, + 547, + 162 + ], + "type": "text", + "content": "[70] Yang Song and Stefano Ermon. Generative modeling by estimating gradients of the data distribution. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 164, + 547, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 164, + 547, + 218 + ], + "spans": [ + { + "bbox": [ + 307, + 164, + 547, + 218 + ], + "type": "text", + "content": "[71] Yang Song and Stefano Ermon. Improved techniques for training score-based generative models. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 12438-12448. Curran Associates, Inc., 2020. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 220, + 547, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 547, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 547, + 274 + ], + "type": "text", + "content": "[72] Pratul P. Srinivasan, Boyang Deng, Xiuming Zhang, Matthew Tancik, Ben Mildenhall, and Jonathan T. Barron. NeRV: Neural reflectance and visibility fields for relighting and view synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 276, + 547, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 276, + 547, + 341 + ], + "spans": [ + { + "bbox": [ + 307, + 276, + 547, + 341 + ], + "type": "text", + "content": "[73] Towaki Takikawa, Joey Litalien, Kangxue Yin, Karsten Kreis, Charles Loop, Derek Nowrouzezahrai, Alec Jacobson, Morgan McGuire, and Sanja Fidler. Neural geometric level of detail: Real-time rendering with implicit 3D shapes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 343, + 547, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 343, + 547, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 343, + 547, + 398 + ], + "type": "text", + "content": "[74] Ayush Tewari, Justus Thies, Ben Mildenhall, Pratul Srinivasan, Edgar Tretschk, W Yifan, Christoph Lassner, Vincent Sitzmann, Ricardo Martin-Brualla, Stephen Lombardi, et al. Advances in neural rendering. In Computer Graphics Forum, volume 41, pages 703-735. Wiley Online Library, 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 399, + 547, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 547, + 431 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 547, + 431 + ], + "type": "text", + "content": "[75] Hoang Thanh-Tung and Truyen Tran. Catastrophic forgetting and mode collapse in gans. In IJCNN, pages 1-10. IEEE, 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 434, + 547, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 434, + 547, + 488 + ], + "spans": [ + { + "bbox": [ + 307, + 434, + 547, + 488 + ], + "type": "text", + "content": "[76] Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 11287-11302. Curran Associates, Inc., 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 490, + 547, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 490, + 547, + 566 + ], + "spans": [ + { + "bbox": [ + 307, + 490, + 547, + 566 + ], + "type": "text", + "content": "[77] Aäron van den Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. Pixel recurrent neural networks. In Maria Florina Balcan and Kilian Q. Weinberger, editors, Proceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings of Machine Learning Research, pages 1747-1756, New York, New York, USA, 20-22 Jun 2016. PMLR. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 568, + 547, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 547, + 622 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 547, + 622 + ], + "type": "text", + "content": "[78] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. Neural discrete representation learning. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 624, + 547, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 547, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 547, + 689 + ], + "type": "text", + "content": "[79] Jiajun Wu, Chengkai Zhang, Tianfan Xue, Bill Freeman, and Josh Tenenbaum. Learning a probabilistic latent space of object shapes via 3d generative-adversarial modeling. In D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 29. Curran Associates, Inc., 2016. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 547, + 713 + ], + "type": "text", + "content": "[80] Yiheng Xie, Towaki Takikawa, Shunsuke Saito, Or Litany, Shiqin Yan, Numair Khan, Federico Tombari, James Tompkin," + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 317, + 757 + ], + "type": "text", + "content": "20885" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 408 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "text", + "content": "Vincent Sitzmann, and Srinath Sridhar. Neural fields in visual computing and beyond. Comput. Graph. Forum, 41(2):641-676, 2022. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 288, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 288, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 288, + 150 + ], + "type": "text", + "content": "[81] Guangming Yao, Hongzhi Wu, Yi Yuan, and Kun Zhou. Dd-nerf: Double-diffusion neural radiance field as a generalizable implicit body representation. arXiv preprint arXiv:2112.12390, 2021. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 288, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 288, + 206 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 288, + 206 + ], + "type": "text", + "content": "[82] Lior Yariv, Yoni Kasten, Dror Moran, Meirav Galun, Matan Atzmon, Ronen Basri, and Yaron Lipman. Multiview neural surface reconstruction by disentangling geometry and appearance. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 208, + 288, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 288, + 251 + ], + "type": "text", + "content": "[83] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. PlenOctrees for real-time rendering of neural radiance fields. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 253, + 288, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 253, + 288, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 253, + 288, + 285 + ], + "type": "text", + "content": "[84] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 286, + 288, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 288, + 319 + ], + "type": "text", + "content": "[85] Xin-Yang Zheng, Yang Liu, Peng-Shuai Wang, and Xin Tong. Sdf-stylegan: Implicit sdf-based stylegan for 3d shape generation. CoRR, abs/2206.12055, 2022. 2, 5, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 320, + 288, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 288, + 364 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 288, + 364 + ], + "type": "text", + "content": "[86] Linqi Zhou, Yilun Du, and Jiajun Wu. 3d shape generation and completion through point-voxel diffusion. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 5826-5835, October 2021. 1, 3, 5, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 365, + 288, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 288, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 288, + 408 + ], + "type": "text", + "content": "[87] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. CIPS-3D: A 3D-Aware Generator of GANs Based on Conditionally-Independent Pixel Synthesis. arXiv preprint arXiv:2110.09788, 2021. 2" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 749, + 318, + 757 + ], + "type": "text", + "content": "20886" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Registration With Maximal Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_content_list.json b/2023/3D Registration With Maximal Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0159edb4396176e1f9d60d087a6990339b716d84 --- /dev/null +++ b/2023/3D Registration With Maximal Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_content_list.json @@ -0,0 +1,1618 @@ +[ + { + "type": "text", + "text": "3D Registration with Maximal Cliques", + "text_level": 1, + "bbox": [ + 289, + 130, + 679, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiyu Zhang Jiaqi Yang* Shikun Zhang Yanning Zhang \nSchool of Computer Science, Northwestern Polytechnical University, China", + "bbox": [ + 163, + 180, + 782, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{2426988253, zhangshikun}@mail.nwpu.edu.cn; {jqyang, ynzhang}@nwpu.edu.cn", + "bbox": [ + 163, + 218, + 805, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As a fundamental problem in computer vision, 3D point cloud registration (PCR) aims to seek the optimal pose to align a point cloud pair. In this paper, we present a 3D registration method with maximal cliques (MAC). The key insight is to loosen the previous maximum clique constraint, and mine more local consensus information in a graph for accurate pose hypotheses generation: 1) A compatibility graph is constructed to render the affinity relationship between initial correspondences. 2) We search for maximal cliques in the graph, each of which represents a consensus set. We perform node-guided clique selection then, where each node corresponds to the maximal clique with the greatest graph weight. 3) Transformation hypotheses are computed for the selected cliques by the SVD algorithm and the best hypothesis is used to perform registration. Extensive experiments on U3M, 3DMatch, 3DLoMatch and KITTI demonstrate that MAC effectively increases registration accuracy, outperforms various state-of-the-art methods and boosts the performance of deep-learned methods. MAC combined with deep-learned methods achieves state-of-the-art registration recall of $95.7\\%$ / $78.9\\%$ on 3DMatch / 3DLoMatch.", + "bbox": [ + 75, + 300, + 473, + 632 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 661, + 209, + 678 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Point cloud registration (PCR) is an important and fundamental problem in 3D computer vision and has a wide range of applications in localization [13], 3D object detection [17] and 3D reconstruction [25]. Given two 3D scans of the same object (or scene), the goal of PCR is to estimate a six-degree-of-freedom (6-DoF) pose transformation that accurately aligns the two input point clouds. Using point-to-point feature correspondences is a popular and robust solution to the PCR problem. However, due to the limitations of existing 3D keypoint detectors & descriptors, the limited overlap between point clouds and data noise, corre", + "bbox": [ + 75, + 686, + 468, + 854 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ed73bd9dfbf97bfefb637a76132fd13fe6bccc14c240ed6c4df0d7824aa855b4.jpg", + "image_caption": [ + "Figure 1. Comparison of maximal and maximum cliques on a low overlapping point cloud pair. Maximal cliques (MAC) effectively choose the optimal 6-DoF transformation hypothesis with low rotation error (RE) and translation error (TE) for two point clouds with a low inlier ratio, while the maximum clique fails in this case." + ], + "image_footnote": [], + "bbox": [ + 511, + 271, + 888, + 470 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "spondences generated by feature matching usually contain outliers, resulting in great challenges to accurate 3D registration.", + "bbox": [ + 496, + 597, + 892, + 641 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The problem of 3D registration by handling correspondences with outliers has been studied for decades. We classify them into geometric-only and deep-learned methods. For geometric-only methods [5, 6, 21, 30, 31, 38-41], random sample consensus (RANSAC) and its variants perform an iterative sampling strategy for registration. Although RANSAC-based methods are simple and efficient, their performance is highly vulnerable when the outlier rate increases, and it requires a large number of iterations to obtain acceptable results. Also, a series of global registration methods based on branch-and-bound (BnB) are proposed to search the 6D parameter space and obtain the optimal global solution. The main weakness of these methods is the high computational complexity, especially when the correspondence set is of a large magnitude and has an extremely high outlier rate. For deep-learned methods, some [1-4, 9, 10, 14, 16, 18, 19, 27, 35] focus on improving", + "bbox": [ + 496, + 643, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 98, + 863, + 222, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Code will be available at https://github.com/zhangxy0517/", + "bbox": [ + 78, + 876, + 467, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "3D-Registration-with-Maximal-Cliques.", + "bbox": [ + 78, + 888, + 369, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "17745", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "one module in the registration process, such as investigating more discriminate keypoint feature descriptors or more effective correspondence selection techniques, while the others [22, 29, 43] focus on registration in an end-to-end manner. However, deep-learned based methods require a large amount of data for training and usually lack generalization on different datasets. At present, it is still very challenging to achieve accurate registrations in the presence of heavy outliers and in cross-dataset conditions.", + "bbox": [ + 75, + 90, + 468, + 224 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose a geometric-only 3D registration method based on maximal cliques (MAC). The key insight is to loosen the previous maximum clique constraint, and mine more local consensus information in a graph to generate accurate pose hypotheses. We first model the initial correspondence set as a compatibility graph, where each node represents a single correspondence and each edge between two nodes indicates a pair of compatible correspondences. Second, we search for maximal cliques in the graph and then use node-guided clique filtering to match each graph node with the appropriate maximal clique containing it. Compared with the maximum clique, MAC is a looser constraint and is able to mine more local information in a graph. This helps us to achieve plenty of correct hypotheses from a graph. Finally, transformation hypotheses are computed for the selected cliques by the SVD algorithm. The best hypothesis is selected to perform registration using popular hypothesis evaluation metrics in the RANSAC family. To summarize, our main contributions are as follows:", + "bbox": [ + 75, + 226, + 470, + 527 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a hypothesis generation method named MAC. Our MAC method is able to mine more local information in a graph, compared with the previous maximum clique constraint. We demonstrate that hypotheses generated by MAC are of high accuracy even in the presence of heavy outliers.", + "- Based on MAC, we present a novel PCR method, which achieves state-of-the-art performance on U3M, 3DMatch, 3DLoMatch and KITTI datasets. Notably, our geometric-only MAC method outperforms several state-of-the-art deep learning methods [3, 9, 19, 27]. MAC can also be inserted as a module into multiple deep-learning frameworks [1, 10, 18, 29, 43] to boost their performance. MAC combined with GeoTransformer achieves the state-of-the-art registration recall of $95.7\\% / 78.9\\%$ on 3DMatch / 3DLoMatch." + ], + "bbox": [ + 96, + 536, + 468, + 785 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 806, + 218, + 821 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Geometric-only PCR Methods", + "text_level": 1, + "bbox": [ + 76, + 832, + 346, + 848 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Various geometric-only methods [6, 8, 20, 36, 45] have been proposed recently. Typically, RANSAC and its variants [5, 13, 30, 31, 38-40] remain the dominant approaches", + "bbox": [ + 76, + 854, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to the problem of estimating a 6-DoF pose from correspondences. RANSAC iteratively samples correspondences from the initial set, generating and evaluating geometric estimations for each subset until a satisfactory solution is obtained. Efficient and robust evaluation metrics are extremely important for using RANSAC to achieve accurate registration. To address the current problems of time-consuming and noise-sensitive evaluation metrics, [40] analyzes the contribution of inliers and outliers during the computation and proposed several metrics that can effectively improve the registration performance of RANSAC. A large number of variants have also been proposed to achieve further improvement. For example, Rusu et al. [31] presented the simple consensus-based initial alignment (SACIA) method, which samples correspondences spread out on the point cloud and leverages the Huber penalty for evaluation. Graph cut RANSAC (GC-RANSAC) [5] uses the graph-cut algorithm before model re-fitting in the local optimization step. Compatibility-guided sample consensus (CG-SAC) [30] additionally considers the normal information of key points during the sampling process. Yang et al. [39] proposed the sample consensus by sampling compatibility triangles (SAC-COT) method, which generates estimations by ranking and sampling ternary loops from the compatibility graph. Although many previous efforts have been made, these methods suffer from low time efficiency and limited accuracy in cases with high outlier rates.", + "bbox": [ + 496, + 90, + 890, + 498 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A series of globally optimal methods based on BnB have been proposed recently. Yang et al. [41] proposed globally optimal ICP (GO-ICP), which rationalizes the planning of ICP update tasks at different stages, and its biggest advantage is that it minimizes the local optimum. Bustos and Chin [6] presented guaranteed outlier removal (GORE), which calculates the tight lower bound and tight upper bound for each correspondence and reduces the size of correspondence set by rejecting true outliers. Motivated by GORE, Li [21] proposed a polynomial time outlier removal method, which seeks the tight lower and upper bound by calculating the costs of correspondence matrix (CM) and augmented correspondence matrix (ACM). However, BnB techniques are sensitive to the cardinality of the input and are time-consuming for large-scale inputs.", + "bbox": [ + 496, + 500, + 890, + 726 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Deep-learned PCR Methods", + "text_level": 1, + "bbox": [ + 500, + 739, + 751, + 755 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In addition to geometric-only methods, recent works also adopt deep learning techniques to perform PCR. Some methods aim to detect more repeatable keypoints [4, 18] and extract more descriptive features [1, 10]. FCGF [10] computes the features in a single pass through a fully convolutional neural network without keypoint detection. D3Feat [4] uses a fully convolutional network to obtain local information of point clouds and a joint learning framework to achieve 3D local feature detection and description.", + "bbox": [ + 496, + 763, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "17746", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c994fb4968d3c45090143f102140aac197908d5299793f364e48ec4f9e159630.jpg", + "image_caption": [ + "Figure 2. Pipeline of MAC. 1. Construct a graph for the initial correspondence set. 2. Select a set of maximal cliques from the graph as the consistent sets. 3. Generate and evaluate the hypotheses according to the consistent sets. 4. Select the best hypothesis to perform 3D registration." + ], + "image_footnote": [], + "bbox": [ + 89, + 97, + 883, + 223 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Predator [18] applies an attention mechanism to extract salient points in overlapping regions of the point clouds, thus achieving robust registration in the presence of low overlap rates. Spinnet [1] extracts local features which are rotationally invariant and sufficiently informative to enable accurate registration. Some methods [3, 9, 14, 27] focus on efficiently distinguishing correspondences as inliers and outliers. Deep global registration (DGR) [9] and 3DRegNet [27] classify a given correspondence by training end-to-end neural networks and using operators such as sparse convolution and point-by-point MLP. PointDSC [3] explicitly explores spatial consistency for removing outlier correspondences and 3D point cloud registration. Fu et al. [14] proposed a registration framework that utilizes deep graph matching (RGM) that can find robust and accurate point-to-point correspondences. More recently, several methods [29, 43] follow the detection-free methods and estimate the transformation in an end-to-end way. CoFiNet [43] extracts correspondences from coarse to fine without keypoint detection. GeoTransformer [29] learns geometric features for robust superpoint matching and is robust in low-overlap cases and invariant to rigid transformation.", + "bbox": [ + 75, + 308, + 472, + 638 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "While deep learning techniques have demonstrated a great potential for PCR, these methods require a large amount of training data and their generalization is not always promising. By contrast, MAC does not require any training data and achieves more advanced performance than several deep-learned methods. Moreover, MAC can be served as a drop-on module in deep learning frameworks to boost their performance.", + "bbox": [ + 75, + 641, + 472, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. MAC", + "text_level": 1, + "bbox": [ + 76, + 775, + 148, + 790 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Problem Formulation", + "text_level": 1, + "bbox": [ + 76, + 801, + 279, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For two point clouds $\\mathbf{P}^s$ and $\\mathbf{P}^t$ to be aligned, we first extract local features for them using geometric or learned descriptors. Let $\\mathbf{p}^s$ and $\\mathbf{p}^t$ denote the points in the $\\mathbf{P}^s$ and $\\mathbf{P}^t$ , respectively. An initial correspondence set $\\mathbf{C}_{initial} = \\{\\mathbf{c}\\}$ is formed by matching feature descriptors,", + "bbox": [ + 76, + 824, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{c} = (\\mathbf{p}^s, \\mathbf{p}^t)$ . MAC estimates the 6-DoF pose transformation between $\\mathbf{P}^s$ and $\\mathbf{P}^t$ from $\\mathbf{C}_{initial}$ .", + "bbox": [ + 498, + 308, + 890, + 338 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our method is technically very simple, and its pipeline is shown in Fig. 2.", + "bbox": [ + 498, + 338, + 890, + 368 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Graph Construction", + "text_level": 1, + "bbox": [ + 500, + 376, + 692, + 392 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The graph space can more accurately depict the affinity relationship between correspondences than the Euclidean space. Therefore, we model the initial correspondences as a compatibility graph, where correspondences are represented by nodes and edges link nodes that are geometrically compatible. Here, we consider two approaches to construct a compatibility graph.", + "bbox": [ + 496, + 398, + 890, + 506 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- First Order Graph. The first order graph (FOG) is constructed based on the rigid distance constraint between the correspondence pair $(\\mathbf{c}_i, \\mathbf{c}_j)$ , which can be quantitatively measured as:", + "bbox": [ + 517, + 512, + 890, + 573 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nS _ {d i s t} \\left(\\mathbf {c} _ {i}, \\mathbf {c} _ {j}\\right) = \\left| \\left\\| \\mathbf {p} _ {i} ^ {s} - \\mathbf {p} _ {j} ^ {s} \\right\\| - \\left\\| \\mathbf {p} _ {i} ^ {t} - \\mathbf {p} _ {j} ^ {t} \\right\\| \\right|. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 580, + 890, + 614 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The compatibility score between $\\mathbf{c}_i$ and $\\mathbf{c}_j$ is given as:", + "bbox": [ + 531, + 619, + 890, + 636 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nS _ {c m p} (\\mathbf {c} _ {i}, \\mathbf {c} _ {j}) = \\exp (- \\frac {S _ {d i s t} (\\mathbf {c} _ {i} , \\mathbf {c} _ {j}) ^ {2}}{2 d _ {c m p} ^ {2}}), \\qquad (2)\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 646, + 890, + 683 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $d_{cmp}$ is a distance parameter. Notably, if $S_{cmp}(\\mathbf{c}_i,\\mathbf{c}_j)$ is greater than a threshold $t_{cmp}$ , $\\mathbf{c}_i$ and $\\mathbf{c}_j$ form an edge $\\mathbf{e}_{ij}$ and $S_{cmp}(\\mathbf{c}_i,\\mathbf{c}_j)$ is the weight of $\\mathbf{e}_{ij}$ , otherwise $S_{cmp}(\\mathbf{c}_i,\\mathbf{c}_j)$ will be set to 0. Since the compatibility graph is undirected, the weight matrix $\\mathbf{W}_{FOG}$ is symmetric.", + "bbox": [ + 531, + 686, + 890, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Second Order Graph. The previous study [8] proposes a second order compatibility measure, which relates to the number of commonly compatible correspondences in the global set. The second order graph (SOG) evolves from FOG. The weight matrix $\\mathbf{W}_{\\mathit{SOG}}$ can be calculated as:", + "bbox": [ + 517, + 785, + 890, + 875 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {W} _ {S O G} = \\mathbf {W} _ {F O G} \\odot \\left(\\mathbf {W} _ {F O G} \\times \\mathbf {W} _ {F O G}\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 558, + 885, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "17747", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\odot$ represents the element-wise product between two matrices.", + "bbox": [ + 107, + 90, + 468, + 119 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Both graph construction methods can adapt to our frameworks. Compared with FOG, 1) SOG has stricter edge construction conditions and a higher degree of compatibility with adjacent nodes; 2) SOG is sparser, which facilitates a more rapid search of cliques. In Sec. 4.5, we experimentally compare FOG and SOG in our MAC framework.", + "bbox": [ + 76, + 132, + 468, + 223 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Search Maximal Cliques", + "text_level": 1, + "bbox": [ + 76, + 234, + 302, + 250 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given an undirected graph $G = (\\mathbf{V}, \\mathbf{E})$ , clique $C = (\\mathbf{V}', \\mathbf{E}')$ , $\\mathbf{V}' \\subseteq \\mathbf{V}$ , $\\mathbf{E}' \\subseteq \\mathbf{E}$ is a subset of $G$ , in which any two nodes are connected by edges. A maximal clique is a clique that cannot be extended by adding any nodes. In particular, the maximal clique with the most nodes is the maximum clique of a graph.", + "bbox": [ + 76, + 257, + 468, + 348 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Searching for Maximal cliques. To generate hypotheses, RANSAC-based methods repeatedly take random samples from the correspondence set. Nevertheless, they fail to fully mine the affinity relationships between correspondences. Theoretically, inliers would form cliques in the graph, because inliers are usually geometrically compatible with each other. Previous works [23,24,28,36] focus on searching for maximum cliques in the graph, however, the maximum clique is a very tight constraint that only focuses on the global consensus information in a graph. Instead, we loosen the constraint and leverage maximal cliques to mine more local graph information.", + "bbox": [ + 75, + 349, + 468, + 529 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "By using the igraph_maximal cliques function in the igraph $^1$ $\\mathrm{C}++$ library, which makes use of a modified Bron-Kerbosch algorithm [12], the search of maximal cliques can be very efficient. The process's worst time complexity is $\\mathcal{O}(d(n - d)3^{(d / 3)})$ , where $d$ is the degeneracy of the graph. Note that $d$ is typically small in our problem because the graph is usually sparse when dealing with point cloud correspondences.", + "bbox": [ + 76, + 530, + 468, + 650 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Node-guided Clique Selection. After executing the maximal clique searching procedure, we obtain the maximal clique set $MAC_{initial}$ . In practice, $MAC_{initial}$ usually contains tens of thousands of maximal cliques, which will make it very time-consuming if we consider all maximal cliques. We introduce a node-guided clique selection method in this section to reduce $|MAC_{initial}|$ . First, we calculate the weight for each clique in $MAC_{initial}$ . Given a clique $C_i = (\\mathbf{V}_i, \\mathbf{E}_i)$ , the weight $w_{C_i}$ is calculated as:", + "bbox": [ + 76, + 651, + 468, + 787 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nw _ {C _ {i}} = \\sum_ {e _ {j} \\in \\mathbf {E} _ {i}} w _ {e _ {j}}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 800, + 468, + 834 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $w_{e_j}$ represents the weight of edge $e_j$ in $\\mathbf{W}_{SOG}$ . A node may be included by multiple maximal cliques and we", + "bbox": [ + 76, + 845, + 468, + 876 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "only retain the one with the greatest weight for that node. Then, duplicated cliques are removed from the rest, obtaining $MAC_{\\text{selected}}$ . The motivation behind this is to use information about the local geometric structure around graph nodes to find the best consistent set of corresponding nodes. It is clear that the number of maximal cliques $|MAC_{\\text{selected}}|$ will not exceed $|\\mathbf{V}|$ . We could send these maximal cliques directly to the following stages for 3D registration. However, when $|\\mathbf{V}|$ is quite large, the number of retained maximal cliques can still be very large. Here, we propose several techniques to further filter the maximal cliques.", + "bbox": [ + 496, + 90, + 890, + 257 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Normal consistency. In the maximal cliques, we find that the normal consistency is satisfied between each correspondence. Given two correspondences $\\mathbf{c}_i = (\\mathbf{p}_i^s,\\mathbf{p}_i^t)$ , $\\mathbf{c}_j = (\\mathbf{p}_j^s,\\mathbf{p}_j^t)$ and the normal vectors $\\mathbf{n}_i^s,\\mathbf{n}_j^s,\\mathbf{n}_i^t,\\mathbf{n}_j^t$ at the four points, the angular difference $\\alpha_{ij}^{s} = \\angle (\\mathbf{n}_{i}^{s},\\mathbf{n}_{j}^{s})$ , $\\alpha_{ij}^{t} = \\angle (\\mathbf{n}_{i}^{t},\\mathbf{n}_{j}^{t})$ between the normal vectors can be calculated then. The following inequality ought to hold if $\\mathbf{c}_i$ and $\\mathbf{c}_j$ are normal consistent:", + "bbox": [ + 517, + 268, + 890, + 404 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\sin \\alpha_ {i j} ^ {s} - \\sin \\alpha_ {i j} ^ {t} \\right| < t _ {\\alpha}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 630, + 414, + 890, + 439 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $t_{\\alpha}$ is a threshold for determining whether the angular differences are similar.", + "bbox": [ + 531, + 450, + 890, + 481 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Clique ranking. We organize $MAC_{\\text{selected}}$ in a descending order using the clique's weight $w_{C_i}$ . The top- $K$ ones are supposed to be more likely to produce correct hypotheses. This makes it flexible to control the number of hypotheses.", + "bbox": [ + 517, + 494, + 890, + 570 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "These techniques' experimental analysis is presented in Sec. 4.5.", + "bbox": [ + 500, + 582, + 890, + 612 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Hypothesis Generation and Evaluation", + "text_level": 1, + "bbox": [ + 500, + 625, + 833, + 641 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Each maximal clique filtered from the previous step represents a consistent set of correspondences. By applying the SVD algorithm to each consistency set, we can obtain a set of 6-DoF pose hypotheses.", + "bbox": [ + 496, + 648, + 890, + 709 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Instance-equal SVD. Transformation estimation of correspondences is often implemented with SVD. Instance-equal means that the weights of all correspondences are equal.", + "- Weighted SVD. Assigning weights to correspondences is commonly adopted by recent PCR methods [8, 9, 27, 29]. Correspondence weights can be derived by solving the eigenvectors of a compatibility matrix constructed for a compatibility graph. Here, we take the primary eigenvalues of $\\mathbf{W}_{SOG}$ as correspondence weights." + ], + "bbox": [ + 517, + 720, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1https://igraph.org", + "bbox": [ + 94, + 886, + 196, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "17748", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The final goal of MAC is to estimate the optimal 6-DoF rigid transformation (composed of a rotation pose $\\mathbf{R}^{*} \\in SO(3)$ and a translation pose $\\mathbf{t}^{*} \\in \\mathbb{R}^{3}$ ) that maximizes the objective function as follows:", + "bbox": [ + 76, + 90, + 470, + 151 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\mathbf {R} ^ {*}, \\mathbf {t} ^ {*}\\right) = \\arg \\max _ {\\mathbf {R}, \\mathbf {t}} \\sum_ {i = 1} ^ {N} s \\left(\\mathbf {c} _ {i}\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 162, + 468, + 204 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{c}_i \\in \\mathbf{C}_{initial}$ , $N = |\\mathbf{C}_{initial}|$ , and $s(\\mathbf{c}_i)$ represents the score of $\\mathbf{c}_i$ . We consider several RANSAC hypothesis evaluation metrics here [40], including mean average error (MAE), mean square error (MSE) and inlier count. Their behaviors will be experimentally compared in Sec. 4.5. The best hypothesis is taken to perform 3D registration then.", + "bbox": [ + 75, + 215, + 470, + 308 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 321, + 209, + 339 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 347, + 266, + 364 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We consider four datasets, i.e., the object-scale dataset U3M [26], the scene-scale indoor datasets 3DMatch [44] & 3DLoMatch [18], and the scene-scale outdoor dataset KITTI [15]. U3M has 496 point cloud pairs. 3DLoMatch is the subset of 3DMatch, where the overlap rate of the point cloud pairs ranges from $10\\%$ to $30\\%$ , which is very challenging. For KITTI, we follow [3,8] and obtain 555 pairs of point clouds for testing.", + "bbox": [ + 75, + 371, + 468, + 492 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation Criteria. We follow [39] that employs the root mean square error (RMSE) metric to evaluate the 3D point cloud registration performance on the U3M object-scale dataset. In addition, we employ the rotation error (RE) and translation error (TE) to evaluate the registration results on the scene-scale dataset. By referring to the settings in [9], the registration is considered successful when the $\\mathrm{RE} \\leq 15^{\\circ}$ , $\\mathrm{TE} \\leq 30\\mathrm{cm}$ on 3DMatch & 3DLoMatch datasets, and $\\mathrm{RE} \\leq 5^{\\circ}$ , $\\mathrm{TE} \\leq 60\\mathrm{cm}$ on KITTI dataset. We define a dataset's registration accuracy as the ratio of success cases to the number of point cloud pairs to be registered.", + "bbox": [ + 75, + 492, + 468, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation Details. Our method is implemented in $\\mathrm{C + + }$ based on the point cloud library (PCL) [32] and igraph library. For U3M, we use the Harris3D (H3D) [33] keypoint detector and the signatures of histograms of orientation (SHOT) [34] descriptor for initial correspondence generation as in [42]. For 3DMatch and 3DLoMatch datasets, we use the fast point features histograms (FPFH) [31] descriptor and fully convolutional geometric features (FCGF) [10] descriptor to generate the initial correspondence set. The main steps in the comparative experimental sections are SOG construction, searching node-guided maximal cliques, hypotheses generation by instance-equal SVD and evaluation by MAE. Default values for compatibility threshold $t_{cmp}$ and distance parameter $d_{cmp}$ mentioned in Sec. 3.2 are 0.99 and 10 pr respectively; if input matches exceed 5000, $t_{cmp}$ is set to 0.999 to reduce computation. Here, 'pr' is", + "bbox": [ + 75, + 659, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/915becacf5a5f7f86b3b83b2b994f7cce90c29f0959816bf014c5716987f61ea.jpg", + "image_caption": [ + "Figure 3. Registration performance of tested point cloud registration methods on U3M." + ], + "image_footnote": [], + "bbox": [ + 519, + 87, + 870, + 270 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/46c8d1e799c0e70a1a9cce162e9669ace9cb79dbfbe41649d5b43e260720b879.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
FPFHFCGF
RR(%)RE(°)TE(cm)RR(%)RE(°)TE(cm)
i) Traditional SM [20]55.882.948.1586.572.297.07
FGR [45]40.914.9610.2578.932.908.41
RANSAC-1M [13]64.204.0511.3588.423.059.42
RANSAC-4M [13]66.103.9511.0391.442.698.38
GC-RANSAC [5]67.652.336.8792.052.337.11
TEASER++ [36]75.482.487.3185.772.738.66
CG-SAC [30]78.002.406.8987.522.427.66
SC2-PCR [8]83.732.186.7093.162.096.51
ii) Deep learned 3DRegNet [27]26.313.759.6077.762.748.13
DGR [9]32.842.457.5388.852.287.02
DHVR [19]67.102.787.8491.932.257.08
PointDSC [3]72.952.186.4591.872.106.54
MAC84.101.966.1893.721.896.03
", + "bbox": [ + 503, + 332, + 890, + 522 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1. Registration results on 3DMatch dataset.", + "bbox": [ + 547, + 532, + 841, + 547 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "a distance unit called point cloud resolution [42]. Normal vectors are calculated using the NormalEstimation class of PCL with the 20 nearest neighboring points. When searching maximal cliques, the lower bound on clique size is set to 3 with no upper bound defined. All experiments were implemented with an Intel 12700H CPU and 32 GB RAM.", + "bbox": [ + 498, + 584, + 890, + 675 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Results on U3M Dataset", + "text_level": 1, + "bbox": [ + 500, + 700, + 720, + 715 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We perform an extensive comparison in Fig. 3. Here, the following methods are tested, including SAC-COT [39], OSAC [37], SAC-IA [31], RANSAC [13], $\\mathrm{SC^2}$ -PCR [8], FGR [45], GO-ICP [41], and PPF [11], where the former four are RANSAC-based methods. The RMSE threshold is varied from $0.5\\mathrm{pr}$ to $5\\mathrm{pr}$ with a step of $0.5\\mathrm{pr}$ .", + "bbox": [ + 498, + 729, + 890, + 820 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The results indicate that MAC performs best and significantly outperforms all tested RANSAC fashion estimators, such as SAC-COT, OSAC, SAC-IA, and RANSAC. The registration performance of MAC based on the MAE evaluation metric is the best on U3M.", + "bbox": [ + 498, + 824, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "17749", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/2bd16836d7f7a2efb673c9709cea157ecfca83850e2272315882a5f17770a4cf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
RR(%)FPFH RE(°)TE(cm)RR(%)FCGF RE(°)TE(cm)
i) Traditional RANSAC-1M [13]0.6710.2715.069.777.0114.87
RANSAC-4M [13]0.4510.3920.0310.446.9115.14
TEASER++ [36]35.154.3810.9646.764.1212.89
SC2-PCR [8]38.574.0310.3158.733.8010.44
ii) Deep learned DGR [9]19.885.0713.5343.804.1710.82
PointDSC [3]20.384.0410.2556.203.8710.48
MAC40.883.669.4559.853.509.75
", + "bbox": [ + 81, + 88, + 472, + 213 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/7011ef40e2df5f173527d5a49895d687b00a16b3e8e1529e6b785fbbb54a91d9.jpg", + "table_caption": [ + "Table 2. Registration results on 3DLoMatch dataset." + ], + "table_footnote": [], + "table_body": "
# Samples3DMatch RR(%)3DLoMatch RR(%)
500025001000500250500025001000500250
FCGF [10]85.184.783.381.671.440.141.738.235.426.8
SpinNet [1]88.686.685.583.570.259.854.948.339.826.8
Predator [18]89.089.990.688.586.659.861.262.460.858.1
CoFiNet [43]89.388.988.487.487.067.566.264.263.161.0
GeoTransformer [29]92.091.891.891.491.275.074.874.274.173.5
FCGF+MAC91.392.291.690.485.657.256.052.642.432.1
6.2↑7.5↑8.3↑8.8↑14.2↑17.1↑14.3↑14.4↑7.0↑5.3↑
SpinNet+MAC95.395.193.391.481.272.869.959.254.832.1
6.7↑8.5↑7.8↑7.9↑11.0↑13.0↑15.0↑10.9↑15.0↑5.3↑
Predator+MAC94.694.494.093.592.370.970.469.867.264.1
5.6↑4.5↑3.4↑5.0↑5.7↑11.1↑9.2↑7.4↑6.4↑6.0↑
CoFiNet+MAC94.194.494.593.892.771.671.570.669.268.1
4.8↑5.5↑6.1↑6.4↑5.7↑4.1↑5.3↑6.4↑6.1↑7.1↑
GeoTransformer+MAC95.795.795.295.394.678.978.778.277.776.6
3.7↑3.9↑3.4↑3.9↑3.4↑3.9↑3.9↑4.0↑3.6↑3.1↑
", + "bbox": [ + 81, + 253, + 468, + 398 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Results on 3DMatch & 3DLoMatch Datasets", + "text_level": 1, + "bbox": [ + 76, + 464, + 454, + 479 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "PCR methods comparison. Both geometric-only and deep-learned methods are considered for comparison, including SM [20], FGR [45], RANSAC [13], TEASER++ [36], CG-SAC [30], $\\mathrm{SC^2}$ -PCR [8], 3DRegNet [27], DGR [9], DHVR [19] and PointDSC [3]. Results are shown in Tables 1 and 2.", + "bbox": [ + 76, + 489, + 468, + 579 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The following conclusions can be made: 1) regardless of which descriptor is used, MAC outperforms all compared methods on both 3DMatch and 3DLoMatch datasets, indicating its strong ability to register indoor scene point clouds; 2) even compared with deep-learned methods, MAC still achieves better performance without any data training; 3) in addition to the registration recall (RR) metric, MAC achieves the best RE and TE metrics. This indicates that registrations by MAC are very accurate and MAC is able to align low overlapping data.", + "bbox": [ + 75, + 580, + 468, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Boosting deep-learned methods with MAC. Several kinds of state-of-the-art deep-learned methods are integrated with MAC for evaluation. The considered methods are FCGF [10], SpinNet [1], Predator [18], CoFiNet [43] and GeoTransformer [29]. Each method is tested under a different number of samples, which refer to the number of sampled points or correspondences. Results are reported in Table 3.", + "bbox": [ + 75, + 733, + 468, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Remarkably, MAC dramatically improves the registration recall under all tested methods on both 3DMatch and 3DLoMatch datasets. Notably, the performance of SpinNet, Predator and CoFiNet after boosting by MAC exceeds", + "bbox": [ + 76, + 839, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/09d1d0b8deacac927256bd60536890e4e062968851c57158079a570977083844.jpg", + "table_caption": [ + "Table 3. Performance boosting for deep-learned methods when combined with MAC." + ], + "table_footnote": [], + "table_body": "
RR(%)FPFH RE(°)TE(cm)RR(%)FCGF RE(°)TE(cm)
i) Traditional FGR [45]5.230.8643.8489.540.4625.72
TEASER++ [36]91.171.0317.9894.960.3813.69
RANSAC [13]74.411.5530.2080.360.7326.79
CG-SAC [30]74.230.7314.0283.240.5622.96
SC2-PCR [8]99.280.398.6897.840.3320.58
ii) Deep learned DGR [9]77.121.6433.1096.900.3421.70
PointDSC [3]98.920.388.3597.840.3320.32
MAC99.460.408.4697.840.3419.34
", + "bbox": [ + 504, + 88, + 893, + 227 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4. Registration results on KITTI dataset.", + "bbox": [ + 555, + 237, + 834, + 251 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "that of GeoTransformer. MAC working with GeoTransformer achieves state-of-the-art registration recall of $95.7\\%$ / $78.9\\%$ on 3DMatch / 3DLoMatch. The results suggest that: 1) MAC can greatly boost existing deep-learned methods; 2) MAC is not sensitive to the number of samples.", + "bbox": [ + 496, + 277, + 890, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Results on KITTI Dataset", + "text_level": 1, + "bbox": [ + 498, + 362, + 735, + 376 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Table 4, the results of DGR [9], PointDSC [3], TEASER++ [36], RANSAC [13], CG-SAC [30], $\\mathrm{SC^2}$ -PCR [8] and MAC are reported for comparison.", + "bbox": [ + 498, + 385, + 890, + 430 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown by the table, in terms of the registration recall performance, MAC presents the best and is tied for the best results with FPFH and FCGF descriptor settings, respectively. MAC also has a lower TE than the state-of-the-art geometric-only method $\\mathrm{SC^2}$ -PCR. Note that outdoor point clouds are significantly sparse and non-uniformly distributed. The registration experiments on the object, indoor scene, and outdoor scene datasets consistently verify that MAC holds good generalization ability in different application contexts.", + "bbox": [ + 496, + 430, + 892, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.5. Analysis Experiments", + "text_level": 1, + "bbox": [ + 500, + 590, + 702, + 606 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we perform ablation studies and analysis experiments on both 3DMatch and 3DLoMatch datasets. We progressively experiment with the techniques proposed in Sec. 3, and the results are shown in Table 5. The quality of generated hypotheses is analyzed in Table 6. The performance upper bound is studied in Table 7. Table 8 presents the time efficiency analysis of MAC.", + "bbox": [ + 496, + 613, + 890, + 719 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Performing feature matching selection. Before 3D registration, a popular way is to perform outlier rejection to reduce the correspondence set. Here we employ geometric consistency (GC) [7], which is independent of the feature space and associates the largest consistent cluster relating to the compatibility among correspondences.", + "bbox": [ + 496, + 719, + 890, + 809 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "By comparing Row 1 and 2 of Table 5, GC has a negative impact on MAC performance, potentially due to that some inliers are also removed in this process. This demonstrates that MAC can still perform well even if the initial correspondence set is directly utilized as input without any filtering.", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "17750", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/a4c53aac335e5698a29dbdd4ed7ff61ea751638b8b6f79762245e721c5ea5d01.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
FOGSOGGCMCNGNCCRSVDW-SVDMAEMSE#inlierRR(%)RE(°)TE(cm)
FPFH1)83.86/39.142.17/4.016.51 /9.94
2)77.02/26.612.10/3.836.19 /9.49
3)82.26/39.022.12/3.986.43 /9.89
4)83.49/38.912.22/4.116.65 /10.05
5)83.67/38.852.15/4.036.53 /9.82
6)84.10/40.881.96/3.666.18 /9.45
7)82.93/39.981.95/3.666.12 /9.48
8)82.44/38.462.16/3.976.41 /9.85
9)74.06/31.112.08/3.896.17 /9.82
10) Top10082.01/37.792.13/4.026.42 /9.82
11) Top20083.18/38.852.16/4.086.55 /9.91
12) Top50083.06/38.852.14/4.036.47 /9.81
13) Top100083.30/38.912.16/4.056.53 /9.84
14) Top200083.36/38.792.14/4.026.52 /9.78
FCGF1)93.41/59.802.04/3.786.33 /10.16
2)91.68/49.971.99/3.646.23 /9.90
3)93.35/59.242.04/3.676.28 /9.99
4)92.91/59.072.06/3.886.33 /10.20
5)93.16/59.462.04/3.766.26 /10.00
6)93.72/59.851.89/3.506.03 /9.75
7)93.59/59.011.86/3.496.00 /9.61
8)93.28/59.632.02/3.736.24 /9.98
9)87.86/49.352.00/3.616.09 /9.60
10) Top10092.42/57.442.00/3.756.21 /10.00
11) Top20093.22/57.832.01/3.756.29 /10.06
12) Top50093.22/58.902.02/3.786.33 /10.02
13) Top100093.35/59.402.05/3.786.32 /10.18
14) Top200093.35/59.522.04/3.786.33 /10.19
", + "bbox": [ + 81, + 87, + 893, + 395 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Analysis experiments on 3DMatch / 3DLoMatch. FOG: First order compatibility graph. SOG: Second order compatibility graph. GC: Use geometric consistency to preliminarily perform outlier rejection. MC: Search the maximum clique instead of maximal cliques. NG: Node-guided clique selection. NC: Normal consistency. CR: Clique ranking. W-SVD: Weighted SVD.", + "bbox": [ + 75, + 405, + 890, + 448 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Graph construction choices. We test the performance of MAC by using different graph construction approaches.", + "bbox": [ + 75, + 474, + 468, + 503 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Row 1 and 3 of Table 5, the registration recall obtained by using SOG is $1.6\\%$ higher than using FOG when combined with FPFH, and $0.06\\%$ higher when combined with FCGF on 3DMatch. Also, the registration recall obtained by using SOG is $0.12\\%$ higher than using FOG when combined with FPFH, and $0.56\\%$ higher when combined with FCGF on 3DLoMatch. Therefore, SOG is more suitable for MAC. Detailed analyzing descriptions can be found in the supplementary.", + "bbox": [ + 75, + 505, + 468, + 641 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Maximum or maximal clique. To justify the advantages of maximal cliques, we change the search strategy of MAC to the maximum cliques and test the registration performance.", + "bbox": [ + 75, + 642, + 468, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Row 1 and 9 in Table 5, applying maximal cliques surpasses maximum by $9.8\\%$ when combined with FPFH, and $5.55\\%$ higher when combined with FCGF on 3DMatch. Besides, the registration recall obtained by using maximal cliques is $8.03\\%$ higher than using the maximum cliques when combined with FPFH and $10.45\\%$ higher when combined with FCGF on 3DLoMatch. There are several reasons for this: 1) maximal cliques include the maximum cliques and additionally consider local graph constraints, so the search for maximal cliques can make use of both local and global information in the compatibility graph; 2) the maximum clique is a very tight constraint which requires maximizing the number of mutually compatible correspondences, but it does not guarantee the opti", + "bbox": [ + 75, + 688, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "mal result.", + "bbox": [ + 498, + 474, + 573, + 487 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Node-guided clique selection. We compare the performance with and without node-guided (NG) clique selection for maximal cliques search.", + "bbox": [ + 498, + 489, + 890, + 535 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparing Row 1 and 4 in Table 5, using NG achieves a recall improvement of $0.37\\%$ when combined with FPFH, and $0.5\\%$ improvement when combined with FCGF on 3DMatch. Also, using NG achieves a recall improvement of $0.23\\%$ with FPFH and $0.73\\%$ improvement with FCGF on 3DLoMatch. It is worth noting that while NG improves recall, the mean RE and mean TE are also decreasing. For example, NG reduces the mean RE by $0.1^{\\circ}$ and the mean TE by $0.11\\mathrm{cm}$ with FPFH on 3DLoMatch. NG effectively reduces the number of calculations in the subsequent steps and promises accurate hypotheses.", + "bbox": [ + 496, + 536, + 892, + 700 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Different approaches for clique filtering. We test the effectiveness of the two filtering methods, normal consistency and clique ranking.", + "bbox": [ + 496, + 702, + 890, + 748 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "1) Normal consistency: comparing Row 1 and 8 in Table 5, NC slightly degrades MAC's performance. 2) Clique ranking: Row 10 to 14 demonstrate that the registration recall tends to increase as $K$ increases, suggesting that larger $K$ yields a subset of cliques that generate more correct hypotheses. Remarkably, setting $K$ to 100 can already achieve outstanding performance.", + "bbox": [ + 496, + 750, + 890, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Employing instance-equal or weighted SVD. The comparisons of instance-equal and weighted SVD are shown in Rows 1 and 5 of Table 5.", + "bbox": [ + 496, + 854, + 890, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "17751", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/339d81d471e75ee1d72867f6277c3a91726500ea413990952cf3931779285387.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
# hypotheses3DMatch3DLoMatch
RANSACMACRANSACMAC
FCGFFPFHFCGFFPFHFCGFFPFHFCGFFPFH
10010.450.7661.9450.671.250.0530.4712.22
20020.761.50119.2089.272.520.0955.5717.59
50051.743.68269.06162.416.210.21109.3223.32
1000103.657.39456.18217.3212.430.41156.1126.02
2000208.2414.90669.32254.1324.800.81202.1229.31
", + "bbox": [ + 81, + 88, + 467, + 167 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/43db7d0c334d1bcbe2ea65456dc4beebe679b8108bc8fef2d22ca9be347ff73d.jpg", + "table_caption": [ + "Table 6. Comparison of the number of correct hypotheses generated by MAC and RANSAC on 3DMatch and 3DLoMatch." + ], + "table_footnote": [], + "table_body": "
3DMatch RR(%)3DLoMatch RR(%)
MAC-198.4691.24
MAC-597.1083.32
MAC-1096.4377.93
MAC-2094.7070.47
MAC-5091.1356.37
MAC-origin93.7259.85
", + "bbox": [ + 179, + 220, + 370, + 306 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Weighted SVD is slightly inferior to instance-equal SVD. This suggests that samples in MACs are already very consistent, indicating no additional weighting strategies are required.", + "bbox": [ + 76, + 386, + 467, + 446 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Varying hypothesis evaluation metrics. Here we compare three evaluation metrics, including MAE, MSE and inlier count, for MAC hypothesis evaluation.", + "bbox": [ + 76, + 446, + 467, + 491 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Row 1, 6 and 7, MAC with MAE achieves the best performance. In Table 5, MAE achieves a recall improvement of $0.24\\%$ when combined with FPFH, and $0.31\\%$ improvement when combined with FCGF on 3DMatch compared with the commonly used inlier count metric. Also, MAE has a $1.74\\%$ improvement when combined with FPFH, and $0.05\\%$ when combined with FCGF on 3DLoMatch compared with inlier count. MAE is also very effective in reducing RE and TE. For instance, MAE reduces the mean RE by $0.35^{\\circ}$ and the mean TE by $0.49~\\mathrm{cm}$ with FPFH on 3DLoMatch.", + "bbox": [ + 75, + 492, + 467, + 656 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Comparison with RANSAC hypotheses. We evaluate the quality of the generated hypotheses by comparing the hypotheses from RANSAC and MAC with the ground truth transformation. The results are shown in Table 6.", + "bbox": [ + 75, + 657, + 467, + 717 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Compared to RANSAC, which randomly selects correspondences and generates hypotheses from the correspondence set without geometric constraints, MAC effectively generates more convincing hypotheses from maximal cliques in the compatibility graph, which fully exploits the consensus information in the graph.", + "bbox": [ + 75, + 719, + 467, + 809 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The performance upper bound of MAC. Given an ideal hypothesis evaluation metric, allowing a point cloud pair can be aligned as long as correct hypotheses can be generated. This can test the performance upper bound of MAC. We vary the judging threshold for the number of generated correct hypotheses and report the results in Table 7.", + "bbox": [ + 75, + 810, + 467, + 898 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/77b0745206b9113b0c3e7161b1c49ac0f8427f7a7161065de733e0e006f91afe.jpg", + "table_caption": [ + "Table 7. Registration recall on 3DMatch with FCGF setting based on judging MAC's hypotheses. MAC- $n$ : a point cloud pair is considered alignable if at least $n$ hypotheses are correct." + ], + "table_footnote": [], + "table_body": "
# correspondencesGraph ConstructionSearch Maximal CliquesNode-guided Clique SelectionPose EstimationTotal
2501.03 (14.55%)5.24 (74.01%)0.58 (8.19%)0.23 (3.25%)7.08
5004.07 (17.54%)15.67 (67.51%)3.12 (13.44%)0.35 (1.51%)23.21
100016.90 (29.85%)36.60 (64.65%)1.88 (3.32%)1.23 (2.18%)56.61
2500153.92 (53.29%)104.03 (36.02%)4.97 (1.72%)25.93 (8.97%)288.85
5000887.03 (27.16%)1579.61 (48.37%)65.40 (2.00%)733.38 (22.47%)3265.42
", + "bbox": [ + 503, + 88, + 890, + 156 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 8. Average consumed time (ms) per point cloud pair on the 3DMatch dataset. Predator is used for generating correspondences.", + "bbox": [ + 498, + 167, + 890, + 205 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impressively, MAC-1 achieves registration recalls of $98.46\\% / 91.24\\%$ on 3DMatch / 3DLoMatch. This indicates that even on low overlapping datasets, MAC is able to produce correct hypotheses for most point cloud pairs. In addition, we can deduce that MAC's performance can be further improved with better hypothesis evaluation metrics. Time consumption of MAC. We employ Predator [18] to generate correspondences with different magnitudes to test the time performance of MAC. The time consumption is reported in Table 8.", + "bbox": [ + 496, + 238, + 890, + 388 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The following observations can be made. 1) In general, MAC can complete 3D registration in only tens of milliseconds when the number of correspondences is smaller than 1000. Even with an input with 2500 correspondences, the time consumption is about 0.29 seconds. Note that MAC is implemented on the CPU only. 2) As the number of correspondences increases from 250 to 2500, there is an increase in time cost for graph construction due to $\\mathbf{W}_{SOG}$ computation taking more time. 3) When the number of correspondences reaches 5000, there is a large rise in the time cost of MAC's registration. The significant increase in the input size makes the search for maximal cliques more time-consuming. However, MAC is not sensitive to the cardinality of the input correspondence set, as verified in Table 3. Hence, using sparse inputs for MAC can produce outstanding performance while making registration efficient.", + "bbox": [ + 496, + 392, + 890, + 633 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 652, + 617, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we presented MAC to solve PCR by using the maximal clique constraint to generate precise pose hypotheses from correspondences. Our method achieves state-of-the-art performance on all tested datasets and can adapt to deep-learned methods to boost their performance. Limitation. As shown in Table 7 and Table 1, MAC produces accurate hypotheses but may fail to find them. In the future, we plan to develop a more convincing hypothesis evaluation technique utilizing semantic information.", + "bbox": [ + 496, + 676, + 890, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments. This work is supported in part by the National Natural Science Foundation of China (NFSC) (No.U19B2037 and 62002295), Shaanxi Provincial Key R&D Program (No.2021KWZ-03), and the Fundamental Research Funds for the Central Universities (No.D5000220352).", + "bbox": [ + 496, + 803, + 890, + 886 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "17752", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Sheng Ao, Qingyong Hu, Bo Yang, Andrew Markham, and Yulan Guo. Spinnet: Learning a general surface descriptor for 3d point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11753-11762, 2021. 1, 2, 3, 6", + "[2] Yasuhiro Aoki, Hunter Goforth, Rangaprasad Arun Srivatsan, and Simon Lucey. Pointnetlk: Robust & efficient point cloud registration using pointnet. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7163-7172, 2019. 1", + "[3] Xuyang Bai, Zixin Luo, Lei Zhou, Hongkai Chen, Lei Li, Zeyu Hu, Hongbo Fu, and Chiew-Lan Tai. Pointdsc: Robust point cloud registration using deep spatial consistency. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 15859-15869. IEEE, 2021. 1, 2, 3, 5, 6", + "[4] Xuyang Bai, Zixin Luo, Lei Zhou, Hongbo Fu, Long Quan, and Chiew-Lan Tai. D3feat: Joint learning of dense detection and description of 3d local features. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6359-6367, 2020. 1, 2", + "[5] Daniel Barath and Jiri Matas. Graph-cut ransac. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6733-6741, 2018. 1, 2, 5", + "[6] Alvaro Parra Bustos and Tat-Jun Chin. Guaranteed outlier removal for point cloud registration with correspondences. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(12):2868-2882, 2017. 1, 2", + "[7] Hui Chen and Bir Bhanu. 3d free-form object recognition in range images using local surface patches. Pattern Recognition Letters, 28(10):1252-1262, 2007. 6", + "[8] Zhi Chen, Kun Sun, Fan Yang, and Wenbing Tao. Sc2-pcr: A second order spatial compatibility for efficient and robust point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13221-13231, 2022. 2, 3, 4, 5, 6", + "[9] Christopher Choy, Wei Dong, and Vladlen Koltun. Deep global registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2514-2523. IEEE, 2020. 1, 2, 3, 4, 5, 6", + "[10] Christopher Choy, Jaesik Park, and Vladlen Koltun. Fully convolutional geometric features. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8958-8966, 2019. 1, 2, 5, 6", + "[11] Bertram Drost, Markus Ulrich, Nassir Navab, and Slobodan Ilic. Model globally, match locally: Efficient and robust 3d object recognition. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 998-1005. IEEE, 2010. 5", + "[12] David Eppstein, Maarten Löffler, and Darren Strash. Listing all maximal cliques in sparse graphs in near-optimal time. In International Symposium on Algorithms and Computation, pages 403-414. Springer, 2010. 4", + "[13] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to" + ], + "bbox": [ + 78, + 114, + 472, + 902 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 1, 2, 5, 6", + "[14] Kexue Fu, Shaolei Liu, Xiaoyuan Luo, and Manning Wang. Robust point cloud registration framework based on deep graph matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8893-8902, 2021. 1, 3", + "[15] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3354-3361. IEEE, 2012. 5", + "[16] Zan Gojcic, Caifa Zhou, Jan D Wegner, and Andreas Wieser. The perfect match: 3d point cloud matching with smoothed densities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5545-5554, 2019. 1", + "[17] Yulan Guo, Mohammed Bennamoun, Ferdous Sohel, Min Lu, and Jianwei Wan. 3d object recognition in cluttered scenes with local surface features: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(11):2270-2287, 2014. 1", + "[18] Shengyu Huang, Zan Gojcic, Mikhail Usvyatsov, Andreas Wieser, and Konrad Schindler. Predator: Registration of 3d point clouds with low overlap. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4267-4276, 2021. 1, 2, 3, 5, 6, 8", + "[19] Junha Lee, Seungwook Kim, Minsu Cho, and Jaesik Park. Deep hough voting for robust global registration. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15994-16003, 2021. 1, 2, 5, 6", + "[20] Marius Leordeanu and Martial Hebert. A spectral technique for correspondence problems using pairwise constraints. 2005. 2, 5, 6", + "[21] Jiayuan Li. A practical o (n2) outlier removal method for point cloud registration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 1, 2", + "[22] Yang Li and Tatsuya Harada. Lepard: Learning partial point cloud matching in rigid and deformable scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5554-5564, 2022. 2", + "[23] Muyuan Lin, Varun Murali, and Sertac Karaman. A planted clique perspective on hypothesis pruning. IEEE Robotics and Automation Letters, 7(2):5167-5174, 2022. 4", + "[24] Yu-Kai Lin, Wen-Chieh Lin, and Chieh-Chih Wang. Kclosest points and maximum clique pruning for efficient and effective 3-d laser scan matching. IEEE Robotics and Automation Letters, 7(2):1471-1477, 2022. 4", + "[25] Ajmal S Mian, Mohammed Bennamoun, and Robyn A Owens. Automatic correspondence for 3d modeling: an extensive review. International Journal of Shape Modeling, 11(02):253-291, 2005. 1", + "[26] Ajmal S Mian, Mohammed Bennamoun, and Robyn A Owens. A novel representation and feature matching algorithm for automatic pairwise registration of range images. International Journal of Computer Vision, 66(1):19-40, 2006. 5" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "17753", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] G Dias Pais, Srikumar Ramalingam, Venu Madhav Govindu, Jacinto C Nascimento, Rama Chellappa, and Pedro Miraldo. 3dregnet: A deep neural network for 3d point registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7193-7203. IEEE, 2020. 1, 2, 3, 4, 5, 6", + "[28] Alvaro Parra, Tat-Jun Chin, Frank Neumann, Tobias Friedrich, and Maximilian Katzmann. A practical maximum clique algorithm for matching with pairwise constraints. arXiv preprint arXiv:1902.01534, 2019. 4", + "[29] Zheng Qin, Hao Yu, Changjian Wang, Yulan Guo, Yuxing Peng, and Kai Xu. Geometric transformer for fast and robust point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11143-11152, 2022. 2, 3, 4, 6", + "[30] Siwen Quan and Jiaqi Yang. Compatibility-guided sampling consensus for 3-d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 58(10):7380-7392, 2020. 1, 2, 5, 6", + "[31] Radu Bogdan Rusu, Nico Blodow, and Michael Beetz. Fast point feature histograms (fpfh) for 3d registration. In IEEE International Conference on Robotics and Automation, pages 3212-3217. IEEE, 2009. 1, 2, 5", + "[32] Radu Bogdan Rusu and Steve Cousins. 3d is here: Point cloud library (pcl). In IEEE International Conference on Robotics and Automation, pages 1-4. IEEE, 2011. 5", + "[33] Ivan Sipiran and Benjamin Bustos. Harris 3d: a robust extension of the harris operator for interest point detection on 3d meshes. The Visual Computer, 27(11):963-976, 2011. 5", + "[34] Federico Tombari, Samuele Salti, and Luigi Di Stefano. Unique signatures of histograms for local surface description. In European Conference on Computer Vision, pages 356-369. Springer, 2010. 5", + "[35] Haiping Wang, Yuan Liu, Zhen Dong, and Wenping Wang. You only hypothesize once: Point cloud registration with rotation-equivariant descriptors. In Proceedings of the ACM International Conference on Multimedia, pages 1630-1641, 2022. 1", + "[36] Heng Yang, Jingnan Shi, and Luca Carlone. Teaser: Fast and certifiable point cloud registration. IEEE Transactions on Robotics, 37(2):314-333, 2020. 2, 4, 5, 6", + "[37] Jiaqi Yang, Zhiguo Cao, and Qian Zhang. A fast and robust local descriptor for 3d point cloud registration. Information Sciences, 346:163-179, 2016. 5", + "[38] Jiaqi Yang, Jiahao Chen, Siwen Quan, Wei Wang, and Yanning Zhang. Correspondence selection with loose-tight geometric voting for 3d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 2022. 1, 2", + "[39] Jiaqi Yang, Zhiqiang Huang, Siwen Quan, Zhaoshuai Qi, and Yanning Zhang. Sac-cot: Sample consensus by sampling compatibility triangles in graphs for 3-d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 60:1-15, 2021. 1, 2, 5", + "[40] Jiaqi Yang, Zhiqiang Huang, Siwen Quan, Qian Zhang, Yanning Zhang, and Zhiguo Cao. Toward efficient and robust metrics for ransac hypotheses and 3d rigid registration. IEEE Transactions on Circuits and Systems for Video Technology, 32(2):893-906, 2021. 1, 2, 5" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[41] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: A globally optimal solution to 3d icp point-set registration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(11):2241-2254, 2015. 1, 2, 5", + "[42] Jiaqi Yang, Yang Xiao, Zhiguo Cao, and Weidong Yang. Ranking 3d feature correspondences via consistency voting. Pattern Recognition Letters, 117:1-8, 2019. 5", + "[43] Hao Yu, Fu Li, Mahdi Saleh, Benjamin Busam, and Slobodan Ilic. Cofinet: Reliable coarse-to-fine correspondences for robust pointcloud registration. Advances in Neural Information Processing Systems, 34, 2021. 2, 3, 6", + "[44] Andy Zeng, Shuran Song, Matthias Nießner, Matthew Fisher, Jianxiong Xiao, and Thomas Funkhouser. 3dmatch: Learning local geometric descriptors from rgb-d reconstructions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1802-1811, 2017. 5", + "[45] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Fast global registration. In European Conference on Computer Vision, pages 766-782. Springer, 2016. 2, 5, 6" + ], + "bbox": [ + 501, + 90, + 892, + 359 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "17754", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/3D Registration With Maximal Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_model.json b/2023/3D Registration With Maximal Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7a09ee8041e5cf7adb307c5c291511ad5af17589 --- /dev/null +++ b/2023/3D Registration With Maximal Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_model.json @@ -0,0 +1,2145 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.29, + 0.131, + 0.68, + 0.154 + ], + "angle": 0, + "content": "3D Registration with Maximal Cliques" + }, + { + "type": "text", + "bbox": [ + 0.164, + 0.181, + 0.784, + 0.217 + ], + "angle": 0, + "content": "Xiyu Zhang Jiaqi Yang* Shikun Zhang Yanning Zhang \nSchool of Computer Science, Northwestern Polytechnical University, China" + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.219, + 0.806, + 0.235 + ], + "angle": 0, + "content": "{2426988253, zhangshikun}@mail.nwpu.edu.cn; {jqyang, ynzhang}@nwpu.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.301, + 0.474, + 0.633 + ], + "angle": 0, + "content": "As a fundamental problem in computer vision, 3D point cloud registration (PCR) aims to seek the optimal pose to align a point cloud pair. In this paper, we present a 3D registration method with maximal cliques (MAC). The key insight is to loosen the previous maximum clique constraint, and mine more local consensus information in a graph for accurate pose hypotheses generation: 1) A compatibility graph is constructed to render the affinity relationship between initial correspondences. 2) We search for maximal cliques in the graph, each of which represents a consensus set. We perform node-guided clique selection then, where each node corresponds to the maximal clique with the greatest graph weight. 3) Transformation hypotheses are computed for the selected cliques by the SVD algorithm and the best hypothesis is used to perform registration. Extensive experiments on U3M, 3DMatch, 3DLoMatch and KITTI demonstrate that MAC effectively increases registration accuracy, outperforms various state-of-the-art methods and boosts the performance of deep-learned methods. MAC combined with deep-learned methods achieves state-of-the-art registration recall of \\(95.7\\%\\) / \\(78.9\\%\\) on 3DMatch / 3DLoMatch." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.662, + 0.21, + 0.679 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.687, + 0.47, + 0.855 + ], + "angle": 0, + "content": "Point cloud registration (PCR) is an important and fundamental problem in 3D computer vision and has a wide range of applications in localization [13], 3D object detection [17] and 3D reconstruction [25]. Given two 3D scans of the same object (or scene), the goal of PCR is to estimate a six-degree-of-freedom (6-DoF) pose transformation that accurately aligns the two input point clouds. Using point-to-point feature correspondences is a popular and robust solution to the PCR problem. However, due to the limitations of existing 3D keypoint detectors & descriptors, the limited overlap between point clouds and data noise, corre" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.272, + 0.889, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.487, + 0.895, + 0.57 + ], + "angle": 0, + "content": "Figure 1. Comparison of maximal and maximum cliques on a low overlapping point cloud pair. Maximal cliques (MAC) effectively choose the optimal 6-DoF transformation hypothesis with low rotation error (RE) and translation error (TE) for two point clouds with a low inlier ratio, while the maximum clique fails in this case." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.893, + 0.642 + ], + "angle": 0, + "content": "spondences generated by feature matching usually contain outliers, resulting in great challenges to accurate 3D registration." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The problem of 3D registration by handling correspondences with outliers has been studied for decades. We classify them into geometric-only and deep-learned methods. For geometric-only methods [5, 6, 21, 30, 31, 38-41], random sample consensus (RANSAC) and its variants perform an iterative sampling strategy for registration. Although RANSAC-based methods are simple and efficient, their performance is highly vulnerable when the outlier rate increases, and it requires a large number of iterations to obtain acceptable results. Also, a series of global registration methods based on branch-and-bound (BnB) are proposed to search the 6D parameter space and obtain the optimal global solution. The main weakness of these methods is the high computational complexity, especially when the correspondence set is of a large magnitude and has an extremely high outlier rate. For deep-learned methods, some [1-4, 9, 10, 14, 16, 18, 19, 27, 35] focus on improving" + }, + { + "type": "page_footnote", + "bbox": [ + 0.099, + 0.864, + 0.223, + 0.876 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_footnote", + "bbox": [ + 0.08, + 0.877, + 0.468, + 0.888 + ], + "angle": 0, + "content": "Code will be available at https://github.com/zhangxy0517/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.08, + 0.889, + 0.37, + 0.9 + ], + "angle": 0, + "content": "3D-Registration-with-Maximal-Cliques." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.864, + 0.468, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17745" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.226 + ], + "angle": 0, + "content": "one module in the registration process, such as investigating more discriminate keypoint feature descriptors or more effective correspondence selection techniques, while the others [22, 29, 43] focus on registration in an end-to-end manner. However, deep-learned based methods require a large amount of data for training and usually lack generalization on different datasets. At present, it is still very challenging to achieve accurate registrations in the presence of heavy outliers and in cross-dataset conditions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.227, + 0.471, + 0.529 + ], + "angle": 0, + "content": "In this paper, we propose a geometric-only 3D registration method based on maximal cliques (MAC). The key insight is to loosen the previous maximum clique constraint, and mine more local consensus information in a graph to generate accurate pose hypotheses. We first model the initial correspondence set as a compatibility graph, where each node represents a single correspondence and each edge between two nodes indicates a pair of compatible correspondences. Second, we search for maximal cliques in the graph and then use node-guided clique filtering to match each graph node with the appropriate maximal clique containing it. Compared with the maximum clique, MAC is a looser constraint and is able to mine more local information in a graph. This helps us to achieve plenty of correct hypotheses from a graph. Finally, transformation hypotheses are computed for the selected cliques by the SVD algorithm. The best hypothesis is selected to perform registration using popular hypothesis evaluation metrics in the RANSAC family. To summarize, our main contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.537, + 0.47, + 0.628 + ], + "angle": 0, + "content": "- We introduce a hypothesis generation method named MAC. Our MAC method is able to mine more local information in a graph, compared with the previous maximum clique constraint. We demonstrate that hypotheses generated by MAC are of high accuracy even in the presence of heavy outliers." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.637, + 0.47, + 0.786 + ], + "angle": 0, + "content": "- Based on MAC, we present a novel PCR method, which achieves state-of-the-art performance on U3M, 3DMatch, 3DLoMatch and KITTI datasets. Notably, our geometric-only MAC method outperforms several state-of-the-art deep learning methods [3, 9, 19, 27]. MAC can also be inserted as a module into multiple deep-learning frameworks [1, 10, 18, 29, 43] to boost their performance. MAC combined with GeoTransformer achieves the state-of-the-art registration recall of \\(95.7\\% / 78.9\\%\\) on 3DMatch / 3DLoMatch." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.537, + 0.47, + 0.786 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.808, + 0.22, + 0.823 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.833, + 0.347, + 0.849 + ], + "angle": 0, + "content": "2.1. Geometric-only PCR Methods" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Various geometric-only methods [6, 8, 20, 36, 45] have been proposed recently. Typically, RANSAC and its variants [5, 13, 30, 31, 38-40] remain the dominant approaches" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.499 + ], + "angle": 0, + "content": "to the problem of estimating a 6-DoF pose from correspondences. RANSAC iteratively samples correspondences from the initial set, generating and evaluating geometric estimations for each subset until a satisfactory solution is obtained. Efficient and robust evaluation metrics are extremely important for using RANSAC to achieve accurate registration. To address the current problems of time-consuming and noise-sensitive evaluation metrics, [40] analyzes the contribution of inliers and outliers during the computation and proposed several metrics that can effectively improve the registration performance of RANSAC. A large number of variants have also been proposed to achieve further improvement. For example, Rusu et al. [31] presented the simple consensus-based initial alignment (SACIA) method, which samples correspondences spread out on the point cloud and leverages the Huber penalty for evaluation. Graph cut RANSAC (GC-RANSAC) [5] uses the graph-cut algorithm before model re-fitting in the local optimization step. Compatibility-guided sample consensus (CG-SAC) [30] additionally considers the normal information of key points during the sampling process. Yang et al. [39] proposed the sample consensus by sampling compatibility triangles (SAC-COT) method, which generates estimations by ranking and sampling ternary loops from the compatibility graph. Although many previous efforts have been made, these methods suffer from low time efficiency and limited accuracy in cases with high outlier rates." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.501, + 0.892, + 0.727 + ], + "angle": 0, + "content": "A series of globally optimal methods based on BnB have been proposed recently. Yang et al. [41] proposed globally optimal ICP (GO-ICP), which rationalizes the planning of ICP update tasks at different stages, and its biggest advantage is that it minimizes the local optimum. Bustos and Chin [6] presented guaranteed outlier removal (GORE), which calculates the tight lower bound and tight upper bound for each correspondence and reduces the size of correspondence set by rejecting true outliers. Motivated by GORE, Li [21] proposed a polynomial time outlier removal method, which seeks the tight lower and upper bound by calculating the costs of correspondence matrix (CM) and augmented correspondence matrix (ACM). However, BnB techniques are sensitive to the cardinality of the input and are time-consuming for large-scale inputs." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.741, + 0.753, + 0.756 + ], + "angle": 0, + "content": "2.2. Deep-learned PCR Methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In addition to geometric-only methods, recent works also adopt deep learning techniques to perform PCR. Some methods aim to detect more repeatable keypoints [4, 18] and extract more descriptive features [1, 10]. FCGF [10] computes the features in a single pass through a fully convolutional neural network without keypoint detection. D3Feat [4] uses a fully convolutional network to obtain local information of point clouds and a joint learning framework to achieve 3D local feature detection and description." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "17746" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.09, + 0.098, + 0.885, + 0.224 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.24, + 0.895, + 0.281 + ], + "angle": 0, + "content": "Figure 2. Pipeline of MAC. 1. Construct a graph for the initial correspondence set. 2. Select a set of maximal cliques from the graph as the consistent sets. 3. Generate and evaluate the hypotheses according to the consistent sets. 4. Select the best hypothesis to perform 3D registration." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.309, + 0.473, + 0.64 + ], + "angle": 0, + "content": "Predator [18] applies an attention mechanism to extract salient points in overlapping regions of the point clouds, thus achieving robust registration in the presence of low overlap rates. Spinnet [1] extracts local features which are rotationally invariant and sufficiently informative to enable accurate registration. Some methods [3, 9, 14, 27] focus on efficiently distinguishing correspondences as inliers and outliers. Deep global registration (DGR) [9] and 3DRegNet [27] classify a given correspondence by training end-to-end neural networks and using operators such as sparse convolution and point-by-point MLP. PointDSC [3] explicitly explores spatial consistency for removing outlier correspondences and 3D point cloud registration. Fu et al. [14] proposed a registration framework that utilizes deep graph matching (RGM) that can find robust and accurate point-to-point correspondences. More recently, several methods [29, 43] follow the detection-free methods and estimate the transformation in an end-to-end way. CoFiNet [43] extracts correspondences from coarse to fine without keypoint detection. GeoTransformer [29] learns geometric features for robust superpoint matching and is robust in low-overlap cases and invariant to rigid transformation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.642, + 0.473, + 0.763 + ], + "angle": 0, + "content": "While deep learning techniques have demonstrated a great potential for PCR, these methods require a large amount of training data and their generalization is not always promising. By contrast, MAC does not require any training data and achieves more advanced performance than several deep-learned methods. Moreover, MAC can be served as a drop-on module in deep learning frameworks to boost their performance." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.776, + 0.15, + 0.791 + ], + "angle": 0, + "content": "3. MAC" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.802, + 0.281, + 0.817 + ], + "angle": 0, + "content": "3.1. Problem Formulation" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.825, + 0.471, + 0.903 + ], + "angle": 0, + "content": "For two point clouds \\(\\mathbf{P}^s\\) and \\(\\mathbf{P}^t\\) to be aligned, we first extract local features for them using geometric or learned descriptors. Let \\(\\mathbf{p}^s\\) and \\(\\mathbf{p}^t\\) denote the points in the \\(\\mathbf{P}^s\\) and \\(\\mathbf{P}^t\\), respectively. An initial correspondence set \\(\\mathbf{C}_{initial} = \\{\\mathbf{c}\\}\\) is formed by matching feature descriptors," + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.309, + 0.892, + 0.339 + ], + "angle": 0, + "content": "where \\(\\mathbf{c} = (\\mathbf{p}^s, \\mathbf{p}^t)\\). MAC estimates the 6-DoF pose transformation between \\(\\mathbf{P}^s\\) and \\(\\mathbf{P}^t\\) from \\(\\mathbf{C}_{initial}\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.339, + 0.892, + 0.369 + ], + "angle": 0, + "content": "Our method is technically very simple, and its pipeline is shown in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.377, + 0.693, + 0.393 + ], + "angle": 0, + "content": "3.2. Graph Construction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.4, + 0.892, + 0.507 + ], + "angle": 0, + "content": "The graph space can more accurately depict the affinity relationship between correspondences than the Euclidean space. Therefore, we model the initial correspondences as a compatibility graph, where correspondences are represented by nodes and edges link nodes that are geometrically compatible. Here, we consider two approaches to construct a compatibility graph." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.513, + 0.892, + 0.574 + ], + "angle": 0, + "content": "- First Order Graph. The first order graph (FOG) is constructed based on the rigid distance constraint between the correspondence pair \\((\\mathbf{c}_i, \\mathbf{c}_j)\\), which can be quantitatively measured as:" + }, + { + "type": "equation", + "bbox": [ + 0.557, + 0.582, + 0.892, + 0.615 + ], + "angle": 0, + "content": "\\[\nS _ {d i s t} \\left(\\mathbf {c} _ {i}, \\mathbf {c} _ {j}\\right) = \\left| \\left\\| \\mathbf {p} _ {i} ^ {s} - \\mathbf {p} _ {j} ^ {s} \\right\\| - \\left\\| \\mathbf {p} _ {i} ^ {t} - \\mathbf {p} _ {j} ^ {t} \\right\\| \\right|. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.621, + 0.892, + 0.637 + ], + "angle": 0, + "content": "The compatibility score between \\(\\mathbf{c}_i\\) and \\(\\mathbf{c}_j\\) is given as:" + }, + { + "type": "equation", + "bbox": [ + 0.583, + 0.647, + 0.892, + 0.684 + ], + "angle": 0, + "content": "\\[\nS _ {c m p} (\\mathbf {c} _ {i}, \\mathbf {c} _ {j}) = \\exp (- \\frac {S _ {d i s t} (\\mathbf {c} _ {i} , \\mathbf {c} _ {j}) ^ {2}}{2 d _ {c m p} ^ {2}}), \\qquad (2)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.687, + 0.892, + 0.778 + ], + "angle": 0, + "content": "where \\(d_{cmp}\\) is a distance parameter. Notably, if \\(S_{cmp}(\\mathbf{c}_i,\\mathbf{c}_j)\\) is greater than a threshold \\(t_{cmp}\\), \\(\\mathbf{c}_i\\) and \\(\\mathbf{c}_j\\) form an edge \\(\\mathbf{e}_{ij}\\) and \\(S_{cmp}(\\mathbf{c}_i,\\mathbf{c}_j)\\) is the weight of \\(\\mathbf{e}_{ij}\\), otherwise \\(S_{cmp}(\\mathbf{c}_i,\\mathbf{c}_j)\\) will be set to 0. Since the compatibility graph is undirected, the weight matrix \\(\\mathbf{W}_{FOG}\\) is symmetric." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.786, + 0.892, + 0.876 + ], + "angle": 0, + "content": "- Second Order Graph. The previous study [8] proposes a second order compatibility measure, which relates to the number of commonly compatible correspondences in the global set. The second order graph (SOG) evolves from FOG. The weight matrix \\(\\mathbf{W}_{\\mathit{SOG}}\\) can be calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.56, + 0.886, + 0.892, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\mathbf {W} _ {S O G} = \\mathbf {W} _ {F O G} \\odot \\left(\\mathbf {W} _ {F O G} \\times \\mathbf {W} _ {F O G}\\right), \\tag {3}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17747" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.109, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "where \\(\\odot\\) represents the element-wise product between two matrices." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.133, + 0.47, + 0.224 + ], + "angle": 0, + "content": "Both graph construction methods can adapt to our frameworks. Compared with FOG, 1) SOG has stricter edge construction conditions and a higher degree of compatibility with adjacent nodes; 2) SOG is sparser, which facilitates a more rapid search of cliques. In Sec. 4.5, we experimentally compare FOG and SOG in our MAC framework." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.235, + 0.303, + 0.251 + ], + "angle": 0, + "content": "3.3. Search Maximal Cliques" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.258, + 0.469, + 0.349 + ], + "angle": 0, + "content": "Given an undirected graph \\( G = (\\mathbf{V}, \\mathbf{E}) \\), clique \\( C = (\\mathbf{V}', \\mathbf{E}') \\), \\( \\mathbf{V}' \\subseteq \\mathbf{V} \\), \\( \\mathbf{E}' \\subseteq \\mathbf{E} \\) is a subset of \\( G \\), in which any two nodes are connected by edges. A maximal clique is a clique that cannot be extended by adding any nodes. In particular, the maximal clique with the most nodes is the maximum clique of a graph." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.35, + 0.469, + 0.53 + ], + "angle": 0, + "content": "Searching for Maximal cliques. To generate hypotheses, RANSAC-based methods repeatedly take random samples from the correspondence set. Nevertheless, they fail to fully mine the affinity relationships between correspondences. Theoretically, inliers would form cliques in the graph, because inliers are usually geometrically compatible with each other. Previous works [23,24,28,36] focus on searching for maximum cliques in the graph, however, the maximum clique is a very tight constraint that only focuses on the global consensus information in a graph. Instead, we loosen the constraint and leverage maximal cliques to mine more local graph information." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.531, + 0.469, + 0.651 + ], + "angle": 0, + "content": "By using the igraph_maximal cliques function in the igraph\\(^1\\) \\(\\mathrm{C}++\\) library, which makes use of a modified Bron-Kerbosch algorithm [12], the search of maximal cliques can be very efficient. The process's worst time complexity is \\(\\mathcal{O}(d(n - d)3^{(d / 3)})\\), where \\(d\\) is the degeneracy of the graph. Note that \\(d\\) is typically small in our problem because the graph is usually sparse when dealing with point cloud correspondences." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.652, + 0.469, + 0.789 + ], + "angle": 0, + "content": "Node-guided Clique Selection. After executing the maximal clique searching procedure, we obtain the maximal clique set \\(MAC_{initial}\\). In practice, \\(MAC_{initial}\\) usually contains tens of thousands of maximal cliques, which will make it very time-consuming if we consider all maximal cliques. We introduce a node-guided clique selection method in this section to reduce \\(|MAC_{initial}|\\). First, we calculate the weight for each clique in \\(MAC_{initial}\\). Given a clique \\(C_i = (\\mathbf{V}_i, \\mathbf{E}_i)\\), the weight \\(w_{C_i}\\) is calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.801, + 0.469, + 0.835 + ], + "angle": 0, + "content": "\\[\nw _ {C _ {i}} = \\sum_ {e _ {j} \\in \\mathbf {E} _ {i}} w _ {e _ {j}}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.846, + 0.469, + 0.877 + ], + "angle": 0, + "content": "where \\(w_{e_j}\\) represents the weight of edge \\(e_j\\) in \\(\\mathbf{W}_{SOG}\\). A node may be included by multiple maximal cliques and we" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.258 + ], + "angle": 0, + "content": "only retain the one with the greatest weight for that node. Then, duplicated cliques are removed from the rest, obtaining \\( MAC_{\\text{selected}} \\). The motivation behind this is to use information about the local geometric structure around graph nodes to find the best consistent set of corresponding nodes. It is clear that the number of maximal cliques \\( |MAC_{\\text{selected}}| \\) will not exceed \\( |\\mathbf{V}| \\). We could send these maximal cliques directly to the following stages for 3D registration. However, when \\( |\\mathbf{V}| \\) is quite large, the number of retained maximal cliques can still be very large. Here, we propose several techniques to further filter the maximal cliques." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.27, + 0.892, + 0.405 + ], + "angle": 0, + "content": "- Normal consistency. In the maximal cliques, we find that the normal consistency is satisfied between each correspondence. Given two correspondences \\(\\mathbf{c}_i = (\\mathbf{p}_i^s,\\mathbf{p}_i^t)\\), \\(\\mathbf{c}_j = (\\mathbf{p}_j^s,\\mathbf{p}_j^t)\\) and the normal vectors \\(\\mathbf{n}_i^s,\\mathbf{n}_j^s,\\mathbf{n}_i^t,\\mathbf{n}_j^t\\) at the four points, the angular difference \\(\\alpha_{ij}^{s} = \\angle (\\mathbf{n}_{i}^{s},\\mathbf{n}_{j}^{s})\\), \\(\\alpha_{ij}^{t} = \\angle (\\mathbf{n}_{i}^{t},\\mathbf{n}_{j}^{t})\\) between the normal vectors can be calculated then. The following inequality ought to hold if \\(\\mathbf{c}_i\\) and \\(\\mathbf{c}_j\\) are normal consistent:" + }, + { + "type": "equation", + "bbox": [ + 0.632, + 0.415, + 0.892, + 0.44 + ], + "angle": 0, + "content": "\\[\n\\left| \\sin \\alpha_ {i j} ^ {s} - \\sin \\alpha_ {i j} ^ {t} \\right| < t _ {\\alpha}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.452, + 0.892, + 0.482 + ], + "angle": 0, + "content": "where \\(t_{\\alpha}\\) is a threshold for determining whether the angular differences are similar." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.495, + 0.892, + 0.571 + ], + "angle": 0, + "content": "- Clique ranking. We organize \\( MAC_{\\text{selected}} \\) in a descending order using the clique's weight \\( w_{C_i} \\). The top-\\( K \\) ones are supposed to be more likely to produce correct hypotheses. This makes it flexible to control the number of hypotheses." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.583, + 0.892, + 0.613 + ], + "angle": 0, + "content": "These techniques' experimental analysis is presented in Sec. 4.5." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.625, + 0.834, + 0.642 + ], + "angle": 0, + "content": "3.4. Hypothesis Generation and Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.649, + 0.892, + 0.71 + ], + "angle": 0, + "content": "Each maximal clique filtered from the previous step represents a consistent set of correspondences. By applying the SVD algorithm to each consistency set, we can obtain a set of 6-DoF pose hypotheses." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.722, + 0.892, + 0.782 + ], + "angle": 0, + "content": "- Instance-equal SVD. Transformation estimation of correspondences is often implemented with SVD. Instance-equal means that the weights of all correspondences are equal." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.795, + 0.892, + 0.901 + ], + "angle": 0, + "content": "- Weighted SVD. Assigning weights to correspondences is commonly adopted by recent PCR methods [8, 9, 27, 29]. Correspondence weights can be derived by solving the eigenvectors of a compatibility matrix constructed for a compatibility graph. Here, we take the primary eigenvalues of \\(\\mathbf{W}_{SOG}\\) as correspondence weights." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.722, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.197, + 0.901 + ], + "angle": 0, + "content": "1https://igraph.org" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17748" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.091, + 0.471, + 0.152 + ], + "angle": 0, + "content": "The final goal of MAC is to estimate the optimal 6-DoF rigid transformation (composed of a rotation pose \\(\\mathbf{R}^{*} \\in SO(3)\\) and a translation pose \\(\\mathbf{t}^{*} \\in \\mathbb{R}^{3}\\)) that maximizes the objective function as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.164, + 0.47, + 0.205 + ], + "angle": 0, + "content": "\\[\n\\left(\\mathbf {R} ^ {*}, \\mathbf {t} ^ {*}\\right) = \\arg \\max _ {\\mathbf {R}, \\mathbf {t}} \\sum_ {i = 1} ^ {N} s \\left(\\mathbf {c} _ {i}\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.217, + 0.471, + 0.309 + ], + "angle": 0, + "content": "where \\(\\mathbf{c}_i \\in \\mathbf{C}_{initial}\\), \\(N = |\\mathbf{C}_{initial}|\\), and \\(s(\\mathbf{c}_i)\\) represents the score of \\(\\mathbf{c}_i\\). We consider several RANSAC hypothesis evaluation metrics here [40], including mean average error (MAE), mean square error (MSE) and inlier count. Their behaviors will be experimentally compared in Sec. 4.5. The best hypothesis is taken to perform 3D registration then." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.323, + 0.21, + 0.34 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.348, + 0.267, + 0.365 + ], + "angle": 0, + "content": "4.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.372, + 0.469, + 0.493 + ], + "angle": 0, + "content": "Datasets. We consider four datasets, i.e., the object-scale dataset U3M [26], the scene-scale indoor datasets 3DMatch [44] & 3DLoMatch [18], and the scene-scale outdoor dataset KITTI [15]. U3M has 496 point cloud pairs. 3DLoMatch is the subset of 3DMatch, where the overlap rate of the point cloud pairs ranges from \\(10\\%\\) to \\(30\\%\\), which is very challenging. For KITTI, we follow [3,8] and obtain 555 pairs of point clouds for testing." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.493, + 0.469, + 0.659 + ], + "angle": 0, + "content": "Evaluation Criteria. We follow [39] that employs the root mean square error (RMSE) metric to evaluate the 3D point cloud registration performance on the U3M object-scale dataset. In addition, we employ the rotation error (RE) and translation error (TE) to evaluate the registration results on the scene-scale dataset. By referring to the settings in [9], the registration is considered successful when the \\(\\mathrm{RE} \\leq 15^{\\circ}\\), \\(\\mathrm{TE} \\leq 30\\mathrm{cm}\\) on 3DMatch & 3DLoMatch datasets, and \\(\\mathrm{RE} \\leq 5^{\\circ}\\), \\(\\mathrm{TE} \\leq 60\\mathrm{cm}\\) on KITTI dataset. We define a dataset's registration accuracy as the ratio of success cases to the number of point cloud pairs to be registered." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Implementation Details. Our method is implemented in \\(\\mathrm{C + + }\\) based on the point cloud library (PCL) [32] and igraph library. For U3M, we use the Harris3D (H3D) [33] keypoint detector and the signatures of histograms of orientation (SHOT) [34] descriptor for initial correspondence generation as in [42]. For 3DMatch and 3DLoMatch datasets, we use the fast point features histograms (FPFH) [31] descriptor and fully convolutional geometric features (FCGF) [10] descriptor to generate the initial correspondence set. The main steps in the comparative experimental sections are SOG construction, searching node-guided maximal cliques, hypotheses generation by instance-equal SVD and evaluation by MAE. Default values for compatibility threshold \\(t_{cmp}\\) and distance parameter \\(d_{cmp}\\) mentioned in Sec. 3.2 are 0.99 and 10 pr respectively; if input matches exceed 5000, \\(t_{cmp}\\) is set to 0.999 to reduce computation. Here, 'pr' is" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.088, + 0.872, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.281, + 0.892, + 0.309 + ], + "angle": 0, + "content": "Figure 3. Registration performance of tested point cloud registration methods on U3M." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.333, + 0.892, + 0.523 + ], + "angle": 0, + "content": "
FPFHFCGF
RR(%)RE(°)TE(cm)RR(%)RE(°)TE(cm)
i) Traditional SM [20]55.882.948.1586.572.297.07
FGR [45]40.914.9610.2578.932.908.41
RANSAC-1M [13]64.204.0511.3588.423.059.42
RANSAC-4M [13]66.103.9511.0391.442.698.38
GC-RANSAC [5]67.652.336.8792.052.337.11
TEASER++ [36]75.482.487.3185.772.738.66
CG-SAC [30]78.002.406.8987.522.427.66
SC2-PCR [8]83.732.186.7093.162.096.51
ii) Deep learned 3DRegNet [27]26.313.759.6077.762.748.13
DGR [9]32.842.457.5388.852.287.02
DHVR [19]67.102.787.8491.932.257.08
PointDSC [3]72.952.186.4591.872.106.54
MAC84.101.966.1893.721.896.03
" + }, + { + "type": "table_caption", + "bbox": [ + 0.548, + 0.534, + 0.843, + 0.548 + ], + "angle": 0, + "content": "Table 1. Registration results on 3DMatch dataset." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.585, + 0.892, + 0.676 + ], + "angle": 0, + "content": "a distance unit called point cloud resolution [42]. Normal vectors are calculated using the NormalEstimation class of PCL with the 20 nearest neighboring points. When searching maximal cliques, the lower bound on clique size is set to 3 with no upper bound defined. All experiments were implemented with an Intel 12700H CPU and 32 GB RAM." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.701, + 0.722, + 0.716 + ], + "angle": 0, + "content": "4.2. Results on U3M Dataset" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.73, + 0.892, + 0.821 + ], + "angle": 0, + "content": "We perform an extensive comparison in Fig. 3. Here, the following methods are tested, including SAC-COT [39], OSAC [37], SAC-IA [31], RANSAC [13], \\(\\mathrm{SC^2}\\)-PCR [8], FGR [45], GO-ICP [41], and PPF [11], where the former four are RANSAC-based methods. The RMSE threshold is varied from \\(0.5\\mathrm{pr}\\) to \\(5\\mathrm{pr}\\) with a step of \\(0.5\\mathrm{pr}\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.825, + 0.892, + 0.9 + ], + "angle": 0, + "content": "The results indicate that MAC performs best and significantly outperforms all tested RANSAC fashion estimators, such as SAC-COT, OSAC, SAC-IA, and RANSAC. The registration performance of MAC based on the MAE evaluation metric is the best on U3M." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "17749" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.473, + 0.214 + ], + "angle": 0, + "content": "
RR(%)FPFH RE(°)TE(cm)RR(%)FCGF RE(°)TE(cm)
i) Traditional RANSAC-1M [13]0.6710.2715.069.777.0114.87
RANSAC-4M [13]0.4510.3920.0310.446.9115.14
TEASER++ [36]35.154.3810.9646.764.1212.89
SC2-PCR [8]38.574.0310.3158.733.8010.44
ii) Deep learned DGR [9]19.885.0713.5343.804.1710.82
PointDSC [3]20.384.0410.2556.203.8710.48
MAC40.883.669.4559.853.509.75
" + }, + { + "type": "table_caption", + "bbox": [ + 0.118, + 0.224, + 0.428, + 0.239 + ], + "angle": 0, + "content": "Table 2. Registration results on 3DLoMatch dataset." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.254, + 0.47, + 0.399 + ], + "angle": 0, + "content": "
# Samples3DMatch RR(%)3DLoMatch RR(%)
500025001000500250500025001000500250
FCGF [10]85.184.783.381.671.440.141.738.235.426.8
SpinNet [1]88.686.685.583.570.259.854.948.339.826.8
Predator [18]89.089.990.688.586.659.861.262.460.858.1
CoFiNet [43]89.388.988.487.487.067.566.264.263.161.0
GeoTransformer [29]92.091.891.891.491.275.074.874.274.173.5
FCGF+MAC91.392.291.690.485.657.256.052.642.432.1
6.2↑7.5↑8.3↑8.8↑14.2↑17.1↑14.3↑14.4↑7.0↑5.3↑
SpinNet+MAC95.395.193.391.481.272.869.959.254.832.1
6.7↑8.5↑7.8↑7.9↑11.0↑13.0↑15.0↑10.9↑15.0↑5.3↑
Predator+MAC94.694.494.093.592.370.970.469.867.264.1
5.6↑4.5↑3.4↑5.0↑5.7↑11.1↑9.2↑7.4↑6.4↑6.0↑
CoFiNet+MAC94.194.494.593.892.771.671.570.669.268.1
4.8↑5.5↑6.1↑6.4↑5.7↑4.1↑5.3↑6.4↑6.1↑7.1↑
GeoTransformer+MAC95.795.795.295.394.678.978.778.277.776.6
3.7↑3.9↑3.4↑3.9↑3.4↑3.9↑3.9↑4.0↑3.6↑3.1↑
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.409, + 0.47, + 0.438 + ], + "angle": 0, + "content": "Table 3. Performance boosting for deep-learned methods when combined with MAC." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.465, + 0.455, + 0.481 + ], + "angle": 0, + "content": "4.3. Results on 3DMatch & 3DLoMatch Datasets" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.49, + 0.47, + 0.58 + ], + "angle": 0, + "content": "PCR methods comparison. Both geometric-only and deep-learned methods are considered for comparison, including SM [20], FGR [45], RANSAC [13], TEASER++ [36], CG-SAC [30], \\(\\mathrm{SC^2}\\)-PCR [8], 3DRegNet [27], DGR [9], DHVR [19] and PointDSC [3]. Results are shown in Tables 1 and 2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.582, + 0.47, + 0.734 + ], + "angle": 0, + "content": "The following conclusions can be made: 1) regardless of which descriptor is used, MAC outperforms all compared methods on both 3DMatch and 3DLoMatch datasets, indicating its strong ability to register indoor scene point clouds; 2) even compared with deep-learned methods, MAC still achieves better performance without any data training; 3) in addition to the registration recall (RR) metric, MAC achieves the best RE and TE metrics. This indicates that registrations by MAC are very accurate and MAC is able to align low overlapping data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.47, + 0.84 + ], + "angle": 0, + "content": "Boosting deep-learned methods with MAC. Several kinds of state-of-the-art deep-learned methods are integrated with MAC for evaluation. The considered methods are FCGF [10], SpinNet [1], Predator [18], CoFiNet [43] and GeoTransformer [29]. Each method is tested under a different number of samples, which refer to the number of sampled points or correspondences. Results are reported in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.84, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Remarkably, MAC dramatically improves the registration recall under all tested methods on both 3DMatch and 3DLoMatch datasets. Notably, the performance of SpinNet, Predator and CoFiNet after boosting by MAC exceeds" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.089, + 0.895, + 0.228 + ], + "angle": 0, + "content": "
RR(%)FPFH RE(°)TE(cm)RR(%)FCGF RE(°)TE(cm)
i) Traditional FGR [45]5.230.8643.8489.540.4625.72
TEASER++ [36]91.171.0317.9894.960.3813.69
RANSAC [13]74.411.5530.2080.360.7326.79
CG-SAC [30]74.230.7314.0283.240.5622.96
SC2-PCR [8]99.280.398.6897.840.3320.58
ii) Deep learned DGR [9]77.121.6433.1096.900.3421.70
PointDSC [3]98.920.388.3597.840.3320.32
MAC99.460.408.4697.840.3419.34
" + }, + { + "type": "table_caption", + "bbox": [ + 0.557, + 0.238, + 0.835, + 0.252 + ], + "angle": 0, + "content": "Table 4. Registration results on KITTI dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.278, + 0.892, + 0.355 + ], + "angle": 0, + "content": "that of GeoTransformer. MAC working with GeoTransformer achieves state-of-the-art registration recall of \\(95.7\\%\\) / \\(78.9\\%\\) on 3DMatch / 3DLoMatch. The results suggest that: 1) MAC can greatly boost existing deep-learned methods; 2) MAC is not sensitive to the number of samples." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.363, + 0.736, + 0.377 + ], + "angle": 0, + "content": "4.4. Results on KITTI Dataset" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.386, + 0.892, + 0.431 + ], + "angle": 0, + "content": "In Table 4, the results of DGR [9], PointDSC [3], TEASER++ [36], RANSAC [13], CG-SAC [30], \\(\\mathrm{SC^2}\\)-PCR [8] and MAC are reported for comparison." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.431, + 0.893, + 0.581 + ], + "angle": 0, + "content": "As shown by the table, in terms of the registration recall performance, MAC presents the best and is tied for the best results with FPFH and FCGF descriptor settings, respectively. MAC also has a lower TE than the state-of-the-art geometric-only method \\(\\mathrm{SC^2}\\)-PCR. Note that outdoor point clouds are significantly sparse and non-uniformly distributed. The registration experiments on the object, indoor scene, and outdoor scene datasets consistently verify that MAC holds good generalization ability in different application contexts." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.591, + 0.704, + 0.607 + ], + "angle": 0, + "content": "4.5. Analysis Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.892, + 0.72 + ], + "angle": 0, + "content": "In this section, we perform ablation studies and analysis experiments on both 3DMatch and 3DLoMatch datasets. We progressively experiment with the techniques proposed in Sec. 3, and the results are shown in Table 5. The quality of generated hypotheses is analyzed in Table 6. The performance upper bound is studied in Table 7. Table 8 presents the time efficiency analysis of MAC." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.81 + ], + "angle": 0, + "content": "Performing feature matching selection. Before 3D registration, a popular way is to perform outlier rejection to reduce the correspondence set. Here we employ geometric consistency (GC) [7], which is independent of the feature space and associates the largest consistent cluster relating to the compatibility among correspondences." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "By comparing Row 1 and 2 of Table 5, GC has a negative impact on MAC performance, potentially due to that some inliers are also removed in this process. This demonstrates that MAC can still perform well even if the initial correspondence set is directly utilized as input without any filtering." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "17750" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.088, + 0.894, + 0.396 + ], + "angle": 0, + "content": "
FOGSOGGCMCNGNCCRSVDW-SVDMAEMSE#inlierRR(%)RE(°)TE(cm)
FPFH1)83.86/39.142.17/4.016.51 /9.94
2)77.02/26.612.10/3.836.19 /9.49
3)82.26/39.022.12/3.986.43 /9.89
4)83.49/38.912.22/4.116.65 /10.05
5)83.67/38.852.15/4.036.53 /9.82
6)84.10/40.881.96/3.666.18 /9.45
7)82.93/39.981.95/3.666.12 /9.48
8)82.44/38.462.16/3.976.41 /9.85
9)74.06/31.112.08/3.896.17 /9.82
10) Top10082.01/37.792.13/4.026.42 /9.82
11) Top20083.18/38.852.16/4.086.55 /9.91
12) Top50083.06/38.852.14/4.036.47 /9.81
13) Top100083.30/38.912.16/4.056.53 /9.84
14) Top200083.36/38.792.14/4.026.52 /9.78
FCGF1)93.41/59.802.04/3.786.33 /10.16
2)91.68/49.971.99/3.646.23 /9.90
3)93.35/59.242.04/3.676.28 /9.99
4)92.91/59.072.06/3.886.33 /10.20
5)93.16/59.462.04/3.766.26 /10.00
6)93.72/59.851.89/3.506.03 /9.75
7)93.59/59.011.86/3.496.00 /9.61
8)93.28/59.632.02/3.736.24 /9.98
9)87.86/49.352.00/3.616.09 /9.60
10) Top10092.42/57.442.00/3.756.21 /10.00
11) Top20093.22/57.832.01/3.756.29 /10.06
12) Top50093.22/58.902.02/3.786.33 /10.02
13) Top100093.35/59.402.05/3.786.32 /10.18
14) Top200093.35/59.522.04/3.786.33 /10.19
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.406, + 0.892, + 0.449 + ], + "angle": 0, + "content": "Table 5. Analysis experiments on 3DMatch / 3DLoMatch. FOG: First order compatibility graph. SOG: Second order compatibility graph. GC: Use geometric consistency to preliminarily perform outlier rejection. MC: Search the maximum clique instead of maximal cliques. NG: Node-guided clique selection. NC: Normal consistency. CR: Clique ranking. W-SVD: Weighted SVD." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.475, + 0.47, + 0.505 + ], + "angle": 0, + "content": "Graph construction choices. We test the performance of MAC by using different graph construction approaches." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.506, + 0.47, + 0.642 + ], + "angle": 0, + "content": "As shown in Row 1 and 3 of Table 5, the registration recall obtained by using SOG is \\(1.6\\%\\) higher than using FOG when combined with FPFH, and \\(0.06\\%\\) higher when combined with FCGF on 3DMatch. Also, the registration recall obtained by using SOG is \\(0.12\\%\\) higher than using FOG when combined with FPFH, and \\(0.56\\%\\) higher when combined with FCGF on 3DLoMatch. Therefore, SOG is more suitable for MAC. Detailed analyzing descriptions can be found in the supplementary." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.643, + 0.47, + 0.688 + ], + "angle": 0, + "content": "Maximum or maximal clique. To justify the advantages of maximal cliques, we change the search strategy of MAC to the maximum cliques and test the registration performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.471, + 0.902 + ], + "angle": 0, + "content": "As shown in Row 1 and 9 in Table 5, applying maximal cliques surpasses maximum by \\(9.8\\%\\) when combined with FPFH, and \\(5.55\\%\\) higher when combined with FCGF on 3DMatch. Besides, the registration recall obtained by using maximal cliques is \\(8.03\\%\\) higher than using the maximum cliques when combined with FPFH and \\(10.45\\%\\) higher when combined with FCGF on 3DLoMatch. There are several reasons for this: 1) maximal cliques include the maximum cliques and additionally consider local graph constraints, so the search for maximal cliques can make use of both local and global information in the compatibility graph; 2) the maximum clique is a very tight constraint which requires maximizing the number of mutually compatible correspondences, but it does not guarantee the opti" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.475, + 0.574, + 0.488 + ], + "angle": 0, + "content": "mal result." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.49, + 0.892, + 0.536 + ], + "angle": 0, + "content": "Node-guided clique selection. We compare the performance with and without node-guided (NG) clique selection for maximal cliques search." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.537, + 0.893, + 0.702 + ], + "angle": 0, + "content": "Comparing Row 1 and 4 in Table 5, using NG achieves a recall improvement of \\(0.37\\%\\) when combined with FPFH, and \\(0.5\\%\\) improvement when combined with FCGF on 3DMatch. Also, using NG achieves a recall improvement of \\(0.23\\%\\) with FPFH and \\(0.73\\%\\) improvement with FCGF on 3DLoMatch. It is worth noting that while NG improves recall, the mean RE and mean TE are also decreasing. For example, NG reduces the mean RE by \\(0.1^{\\circ}\\) and the mean TE by \\(0.11\\mathrm{cm}\\) with FPFH on 3DLoMatch. NG effectively reduces the number of calculations in the subsequent steps and promises accurate hypotheses." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.703, + 0.892, + 0.749 + ], + "angle": 0, + "content": "Different approaches for clique filtering. We test the effectiveness of the two filtering methods, normal consistency and clique ranking." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.855 + ], + "angle": 0, + "content": "1) Normal consistency: comparing Row 1 and 8 in Table 5, NC slightly degrades MAC's performance. 2) Clique ranking: Row 10 to 14 demonstrate that the registration recall tends to increase as \\( K \\) increases, suggesting that larger \\( K \\) yields a subset of cliques that generate more correct hypotheses. Remarkably, setting \\( K \\) to 100 can already achieve outstanding performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Employing instance-equal or weighted SVD. The comparisons of instance-equal and weighted SVD are shown in Rows 1 and 5 of Table 5." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "17751" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.468, + 0.169 + ], + "angle": 0, + "content": "
# hypotheses3DMatch3DLoMatch
RANSACMACRANSACMAC
FCGFFPFHFCGFFPFHFCGFFPFHFCGFFPFH
10010.450.7661.9450.671.250.0530.4712.22
20020.761.50119.2089.272.520.0955.5717.59
50051.743.68269.06162.416.210.21109.3223.32
1000103.657.39456.18217.3212.430.41156.1126.02
2000208.2414.90669.32254.1324.800.81202.1229.31
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.18, + 0.468, + 0.208 + ], + "angle": 0, + "content": "Table 6. Comparison of the number of correct hypotheses generated by MAC and RANSAC on 3DMatch and 3DLoMatch." + }, + { + "type": "table", + "bbox": [ + 0.18, + 0.222, + 0.372, + 0.307 + ], + "angle": 0, + "content": "
3DMatch RR(%)3DLoMatch RR(%)
MAC-198.4691.24
MAC-597.1083.32
MAC-1096.4377.93
MAC-2094.7070.47
MAC-5091.1356.37
MAC-origin93.7259.85
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.318, + 0.468, + 0.36 + ], + "angle": 0, + "content": "Table 7. Registration recall on 3DMatch with FCGF setting based on judging MAC's hypotheses. MAC-\\(n\\): a point cloud pair is considered alignable if at least \\(n\\) hypotheses are correct." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.387, + 0.468, + 0.447 + ], + "angle": 0, + "content": "Weighted SVD is slightly inferior to instance-equal SVD. This suggests that samples in MACs are already very consistent, indicating no additional weighting strategies are required." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.448, + 0.468, + 0.492 + ], + "angle": 0, + "content": "Varying hypothesis evaluation metrics. Here we compare three evaluation metrics, including MAE, MSE and inlier count, for MAC hypothesis evaluation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.493, + 0.468, + 0.657 + ], + "angle": 0, + "content": "As shown in Row 1, 6 and 7, MAC with MAE achieves the best performance. In Table 5, MAE achieves a recall improvement of \\(0.24\\%\\) when combined with FPFH, and \\(0.31\\%\\) improvement when combined with FCGF on 3DMatch compared with the commonly used inlier count metric. Also, MAE has a \\(1.74\\%\\) improvement when combined with FPFH, and \\(0.05\\%\\) when combined with FCGF on 3DLoMatch compared with inlier count. MAE is also very effective in reducing RE and TE. For instance, MAE reduces the mean RE by \\(0.35^{\\circ}\\) and the mean TE by \\(0.49~\\mathrm{cm}\\) with FPFH on 3DLoMatch." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.468, + 0.718 + ], + "angle": 0, + "content": "Comparison with RANSAC hypotheses. We evaluate the quality of the generated hypotheses by comparing the hypotheses from RANSAC and MAC with the ground truth transformation. The results are shown in Table 6." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.468, + 0.81 + ], + "angle": 0, + "content": "Compared to RANSAC, which randomly selects correspondences and generates hypotheses from the correspondence set without geometric constraints, MAC effectively generates more convincing hypotheses from maximal cliques in the compatibility graph, which fully exploits the consensus information in the graph." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.468, + 0.9 + ], + "angle": 0, + "content": "The performance upper bound of MAC. Given an ideal hypothesis evaluation metric, allowing a point cloud pair can be aligned as long as correct hypotheses can be generated. This can test the performance upper bound of MAC. We vary the judging threshold for the number of generated correct hypotheses and report the results in Table 7." + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.089, + 0.892, + 0.157 + ], + "angle": 0, + "content": "
# correspondencesGraph ConstructionSearch Maximal CliquesNode-guided Clique SelectionPose EstimationTotal
2501.03 (14.55%)5.24 (74.01%)0.58 (8.19%)0.23 (3.25%)7.08
5004.07 (17.54%)15.67 (67.51%)3.12 (13.44%)0.35 (1.51%)23.21
100016.90 (29.85%)36.60 (64.65%)1.88 (3.32%)1.23 (2.18%)56.61
2500153.92 (53.29%)104.03 (36.02%)4.97 (1.72%)25.93 (8.97%)288.85
5000887.03 (27.16%)1579.61 (48.37%)65.40 (2.00%)733.38 (22.47%)3265.42
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.168, + 0.892, + 0.207 + ], + "angle": 0, + "content": "Table 8. Average consumed time (ms) per point cloud pair on the 3DMatch dataset. Predator is used for generating correspondences." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.239, + 0.892, + 0.39 + ], + "angle": 0, + "content": "Impressively, MAC-1 achieves registration recalls of \\(98.46\\% / 91.24\\%\\) on 3DMatch / 3DLoMatch. This indicates that even on low overlapping datasets, MAC is able to produce correct hypotheses for most point cloud pairs. In addition, we can deduce that MAC's performance can be further improved with better hypothesis evaluation metrics. Time consumption of MAC. We employ Predator [18] to generate correspondences with different magnitudes to test the time performance of MAC. The time consumption is reported in Table 8." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.393, + 0.892, + 0.634 + ], + "angle": 0, + "content": "The following observations can be made. 1) In general, MAC can complete 3D registration in only tens of milliseconds when the number of correspondences is smaller than 1000. Even with an input with 2500 correspondences, the time consumption is about 0.29 seconds. Note that MAC is implemented on the CPU only. 2) As the number of correspondences increases from 250 to 2500, there is an increase in time cost for graph construction due to \\(\\mathbf{W}_{SOG}\\) computation taking more time. 3) When the number of correspondences reaches 5000, there is a large rise in the time cost of MAC's registration. The significant increase in the input size makes the search for maximal cliques more time-consuming. However, MAC is not sensitive to the cardinality of the input correspondence set, as verified in Table 3. Hence, using sparse inputs for MAC can produce outstanding performance while making registration efficient." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.653, + 0.618, + 0.669 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.678, + 0.892, + 0.803 + ], + "angle": 0, + "content": "In this paper, we presented MAC to solve PCR by using the maximal clique constraint to generate precise pose hypotheses from correspondences. Our method achieves state-of-the-art performance on all tested datasets and can adapt to deep-learned methods to boost their performance. Limitation. As shown in Table 7 and Table 1, MAC produces accurate hypotheses but may fail to find them. In the future, we plan to develop a more convincing hypothesis evaluation technique utilizing semantic information." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.804, + 0.892, + 0.887 + ], + "angle": 0, + "content": "Acknowledgments. This work is supported in part by the National Natural Science Foundation of China (NFSC) (No.U19B2037 and 62002295), Shaanxi Provincial Key R&D Program (No.2021KWZ-03), and the Fundamental Research Funds for the Central Universities (No.D5000220352)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.956 + ], + "angle": 0, + "content": "17752" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.473, + 0.186 + ], + "angle": 0, + "content": "[1] Sheng Ao, Qingyong Hu, Bo Yang, Andrew Markham, and Yulan Guo. Spinnet: Learning a general surface descriptor for 3d point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11753-11762, 2021. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.187, + 0.472, + 0.257 + ], + "angle": 0, + "content": "[2] Yasuhiro Aoki, Hunter Goforth, Rangaprasad Arun Srivatsan, and Simon Lucey. Pointnetlk: Robust & efficient point cloud registration using pointnet. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7163-7172, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.259, + 0.472, + 0.342 + ], + "angle": 0, + "content": "[3] Xuyang Bai, Zixin Luo, Lei Zhou, Hongkai Chen, Lei Li, Zeyu Hu, Hongbo Fu, and Chiew-Lan Tai. Pointdsc: Robust point cloud registration using deep spatial consistency. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 15859-15869. IEEE, 2021. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.344, + 0.472, + 0.413 + ], + "angle": 0, + "content": "[4] Xuyang Bai, Zixin Luo, Lei Zhou, Hongbo Fu, Long Quan, and Chiew-Lan Tai. D3feat: Joint learning of dense detection and description of 3d local features. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6359-6367, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.415, + 0.472, + 0.457 + ], + "angle": 0, + "content": "[5] Daniel Barath and Jiri Matas. Graph-cut ransac. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6733-6741, 2018. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.458, + 0.472, + 0.514 + ], + "angle": 0, + "content": "[6] Alvaro Parra Bustos and Tat-Jun Chin. Guaranteed outlier removal for point cloud registration with correspondences. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(12):2868-2882, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.515, + 0.472, + 0.557 + ], + "angle": 0, + "content": "[7] Hui Chen and Bir Bhanu. 3d free-form object recognition in range images using local surface patches. Pattern Recognition Letters, 28(10):1252-1262, 2007. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.559, + 0.472, + 0.628 + ], + "angle": 0, + "content": "[8] Zhi Chen, Kun Sun, Fan Yang, and Wenbing Tao. Sc2-pcr: A second order spatial compatibility for efficient and robust point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13221-13231, 2022. 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.63, + 0.472, + 0.685 + ], + "angle": 0, + "content": "[9] Christopher Choy, Wei Dong, and Vladlen Koltun. Deep global registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2514-2523. IEEE, 2020. 1, 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.687, + 0.472, + 0.743 + ], + "angle": 0, + "content": "[10] Christopher Choy, Jaesik Park, and Vladlen Koltun. Fully convolutional geometric features. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8958-8966, 2019. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.745, + 0.472, + 0.813 + ], + "angle": 0, + "content": "[11] Bertram Drost, Markus Ulrich, Nassir Navab, and Slobodan Ilic. Model globally, match locally: Efficient and robust 3d object recognition. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 998-1005. IEEE, 2010. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.816, + 0.472, + 0.872 + ], + "angle": 0, + "content": "[12] David Eppstein, Maarten Löffler, and Darren Strash. Listing all maximal cliques in sparse graphs in near-optimal time. In International Symposium on Algorithms and Computation, pages 403-414. Springer, 2010. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.873, + 0.472, + 0.903 + ], + "angle": 0, + "content": "[13] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.116, + 0.473, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.894, + 0.19 + ], + "angle": 0, + "content": "[14] Kexue Fu, Shaolei Liu, Xiaoyuan Luo, and Manning Wang. Robust point cloud registration framework based on deep graph matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8893-8902, 2021. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.894, + 0.261 + ], + "angle": 0, + "content": "[15] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3354-3361. IEEE, 2012. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.263, + 0.894, + 0.332 + ], + "angle": 0, + "content": "[16] Zan Gojcic, Caifa Zhou, Jan D Wegner, and Andreas Wieser. The perfect match: 3d point cloud matching with smoothed densities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5545-5554, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.334, + 0.894, + 0.403 + ], + "angle": 0, + "content": "[17] Yulan Guo, Mohammed Bennamoun, Ferdous Sohel, Min Lu, and Jianwei Wan. 3d object recognition in cluttered scenes with local surface features: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(11):2270-2287, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.405, + 0.894, + 0.474 + ], + "angle": 0, + "content": "[18] Shengyu Huang, Zan Gojcic, Mikhail Usvyatsov, Andreas Wieser, and Konrad Schindler. Predator: Registration of 3d point clouds with low overlap. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4267-4276, 2021. 1, 2, 3, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.476, + 0.892, + 0.532 + ], + "angle": 0, + "content": "[19] Junha Lee, Seungwook Kim, Minsu Cho, and Jaesik Park. Deep hough voting for robust global registration. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15994-16003, 2021. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.892, + 0.573 + ], + "angle": 0, + "content": "[20] Marius Leordeanu and Martial Hebert. A spectral technique for correspondence problems using pairwise constraints. 2005. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.575, + 0.892, + 0.617 + ], + "angle": 0, + "content": "[21] Jiayuan Li. A practical o (n2) outlier removal method for point cloud registration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.619, + 0.892, + 0.674 + ], + "angle": 0, + "content": "[22] Yang Li and Tatsuya Harada. Lepard: Learning partial point cloud matching in rigid and deformable scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5554-5564, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.676, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[23] Muyuan Lin, Varun Murali, and Sertac Karaman. A planted clique perspective on hypothesis pruning. IEEE Robotics and Automation Letters, 7(2):5167-5174, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.718, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[24] Yu-Kai Lin, Wen-Chieh Lin, and Chieh-Chih Wang. Kclosest points and maximum clique pruning for efficient and effective 3-d laser scan matching. IEEE Robotics and Automation Letters, 7(2):1471-1477, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.775, + 0.892, + 0.83 + ], + "angle": 0, + "content": "[25] Ajmal S Mian, Mohammed Bennamoun, and Robyn A Owens. Automatic correspondence for 3d modeling: an extensive review. International Journal of Shape Modeling, 11(02):253-291, 2005. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.832, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[26] Ajmal S Mian, Mohammed Bennamoun, and Robyn A Owens. A novel representation and feature matching algorithm for automatic pairwise registration of range images. International Journal of Computer Vision, 66(1):19-40, 2006. 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "17753" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.174 + ], + "angle": 0, + "content": "[27] G Dias Pais, Srikumar Ramalingam, Venu Madhav Govindu, Jacinto C Nascimento, Rama Chellappa, and Pedro Miraldo. 3dregnet: A deep neural network for 3d point registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7193-7203. IEEE, 2020. 1, 2, 3, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.176, + 0.47, + 0.232 + ], + "angle": 0, + "content": "[28] Alvaro Parra, Tat-Jun Chin, Frank Neumann, Tobias Friedrich, and Maximilian Katzmann. A practical maximum clique algorithm for matching with pairwise constraints. arXiv preprint arXiv:1902.01534, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.233, + 0.47, + 0.302 + ], + "angle": 0, + "content": "[29] Zheng Qin, Hao Yu, Changjian Wang, Yulan Guo, Yuxing Peng, and Kai Xu. Geometric transformer for fast and robust point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11143-11152, 2022. 2, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.302, + 0.47, + 0.357 + ], + "angle": 0, + "content": "[30] Siwen Quan and Jiaqi Yang. Compatibility-guided sampling consensus for 3-d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 58(10):7380-7392, 2020. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.358, + 0.47, + 0.413 + ], + "angle": 0, + "content": "[31] Radu Bogdan Rusu, Nico Blodow, and Michael Beetz. Fast point feature histograms (fpfh) for 3d registration. In IEEE International Conference on Robotics and Automation, pages 3212-3217. IEEE, 2009. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.414, + 0.47, + 0.455 + ], + "angle": 0, + "content": "[32] Radu Bogdan Rusu and Steve Cousins. 3d is here: Point cloud library (pcl). In IEEE International Conference on Robotics and Automation, pages 1-4. IEEE, 2011. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.456, + 0.47, + 0.497 + ], + "angle": 0, + "content": "[33] Ivan Sipiran and Benjamin Bustos. Harris 3d: a robust extension of the harris operator for interest point detection on 3d meshes. The Visual Computer, 27(11):963-976, 2011. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.47, + 0.553 + ], + "angle": 0, + "content": "[34] Federico Tombari, Samuele Salti, and Luigi Di Stefano. Unique signatures of histograms for local surface description. In European Conference on Computer Vision, pages 356-369. Springer, 2010. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.554, + 0.47, + 0.621 + ], + "angle": 0, + "content": "[35] Haiping Wang, Yuan Liu, Zhen Dong, and Wenping Wang. You only hypothesize once: Point cloud registration with rotation-equivariant descriptors. In Proceedings of the ACM International Conference on Multimedia, pages 1630-1641, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.622, + 0.47, + 0.664 + ], + "angle": 0, + "content": "[36] Heng Yang, Jingnan Shi, and Luca Carlone. Teaser: Fast and certifiable point cloud registration. IEEE Transactions on Robotics, 37(2):314-333, 2020. 2, 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.665, + 0.47, + 0.705 + ], + "angle": 0, + "content": "[37] Jiaqi Yang, Zhiguo Cao, and Qian Zhang. A fast and robust local descriptor for 3d point cloud registration. Information Sciences, 346:163-179, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.706, + 0.47, + 0.762 + ], + "angle": 0, + "content": "[38] Jiaqi Yang, Jiahao Chen, Siwen Quan, Wei Wang, and Yanning Zhang. Correspondence selection with loose-tight geometric voting for 3d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.762, + 0.47, + 0.831 + ], + "angle": 0, + "content": "[39] Jiaqi Yang, Zhiqiang Huang, Siwen Quan, Zhaoshuai Qi, and Yanning Zhang. Sac-cot: Sample consensus by sampling compatibility triangles in graphs for 3-d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 60:1-15, 2021. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[40] Jiaqi Yang, Zhiqiang Huang, Siwen Quan, Qian Zhang, Yanning Zhang, and Zhiguo Cao. Toward efficient and robust metrics for ransac hypotheses and 3d rigid registration. IEEE Transactions on Circuits and Systems for Video Technology, 32(2):893-906, 2021. 1, 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.092, + 0.893, + 0.148 + ], + "angle": 0, + "content": "[41] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: A globally optimal solution to 3d icp point-set registration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(11):2241-2254, 2015. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[42] Jiaqi Yang, Yang Xiao, Zhiguo Cao, and Weidong Yang. Ranking 3d feature correspondences via consistency voting. Pattern Recognition Letters, 117:1-8, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.248 + ], + "angle": 0, + "content": "[43] Hao Yu, Fu Li, Mahdi Saleh, Benjamin Busam, and Slobodan Ilic. Cofinet: Reliable coarse-to-fine correspondences for robust pointcloud registration. Advances in Neural Information Processing Systems, 34, 2021. 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.893, + 0.319 + ], + "angle": 0, + "content": "[44] Andy Zeng, Shuran Song, Matthias Nießner, Matthew Fisher, Jianxiong Xiao, and Thomas Funkhouser. 3dmatch: Learning local geometric descriptors from rgb-d reconstructions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1802-1811, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.32, + 0.893, + 0.361 + ], + "angle": 0, + "content": "[45] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Fast global registration. In European Conference on Computer Vision, pages 766-782. Springer, 2016. 2, 5, 6" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.092, + 0.893, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "17754" + } + ] +] \ No newline at end of file diff --git a/2023/3D Registration With Maximal Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_origin.pdf b/2023/3D Registration With Maximal Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c00274f38a17c15d2012463ec3f91ec84da228b4 --- /dev/null +++ b/2023/3D Registration With Maximal Cliques/6c9eb542-01ea-4edb-baf1-31469bcf7e1e_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:051d289b8e092157770cdbc6eaf218fcb4f3be11c95f353fdb70cd3b5859b34f +size 1410837 diff --git a/2023/3D Registration With Maximal Cliques/full.md b/2023/3D Registration With Maximal Cliques/full.md new file mode 100644 index 0000000000000000000000000000000000000000..debfe53fa0a6523c9511519c1ff232008418cc67 --- /dev/null +++ b/2023/3D Registration With Maximal Cliques/full.md @@ -0,0 +1,305 @@ +# 3D Registration with Maximal Cliques + +Xiyu Zhang Jiaqi Yang* Shikun Zhang Yanning Zhang +School of Computer Science, Northwestern Polytechnical University, China + +{2426988253, zhangshikun}@mail.nwpu.edu.cn; {jqyang, ynzhang}@nwpu.edu.cn + +# Abstract + +As a fundamental problem in computer vision, 3D point cloud registration (PCR) aims to seek the optimal pose to align a point cloud pair. In this paper, we present a 3D registration method with maximal cliques (MAC). The key insight is to loosen the previous maximum clique constraint, and mine more local consensus information in a graph for accurate pose hypotheses generation: 1) A compatibility graph is constructed to render the affinity relationship between initial correspondences. 2) We search for maximal cliques in the graph, each of which represents a consensus set. We perform node-guided clique selection then, where each node corresponds to the maximal clique with the greatest graph weight. 3) Transformation hypotheses are computed for the selected cliques by the SVD algorithm and the best hypothesis is used to perform registration. Extensive experiments on U3M, 3DMatch, 3DLoMatch and KITTI demonstrate that MAC effectively increases registration accuracy, outperforms various state-of-the-art methods and boosts the performance of deep-learned methods. MAC combined with deep-learned methods achieves state-of-the-art registration recall of $95.7\%$ / $78.9\%$ on 3DMatch / 3DLoMatch. + +# 1. Introduction + +Point cloud registration (PCR) is an important and fundamental problem in 3D computer vision and has a wide range of applications in localization [13], 3D object detection [17] and 3D reconstruction [25]. Given two 3D scans of the same object (or scene), the goal of PCR is to estimate a six-degree-of-freedom (6-DoF) pose transformation that accurately aligns the two input point clouds. Using point-to-point feature correspondences is a popular and robust solution to the PCR problem. However, due to the limitations of existing 3D keypoint detectors & descriptors, the limited overlap between point clouds and data noise, corre + +![](images/ed73bd9dfbf97bfefb637a76132fd13fe6bccc14c240ed6c4df0d7824aa855b4.jpg) +Figure 1. Comparison of maximal and maximum cliques on a low overlapping point cloud pair. Maximal cliques (MAC) effectively choose the optimal 6-DoF transformation hypothesis with low rotation error (RE) and translation error (TE) for two point clouds with a low inlier ratio, while the maximum clique fails in this case. + +spondences generated by feature matching usually contain outliers, resulting in great challenges to accurate 3D registration. + +The problem of 3D registration by handling correspondences with outliers has been studied for decades. We classify them into geometric-only and deep-learned methods. For geometric-only methods [5, 6, 21, 30, 31, 38-41], random sample consensus (RANSAC) and its variants perform an iterative sampling strategy for registration. Although RANSAC-based methods are simple and efficient, their performance is highly vulnerable when the outlier rate increases, and it requires a large number of iterations to obtain acceptable results. Also, a series of global registration methods based on branch-and-bound (BnB) are proposed to search the 6D parameter space and obtain the optimal global solution. The main weakness of these methods is the high computational complexity, especially when the correspondence set is of a large magnitude and has an extremely high outlier rate. For deep-learned methods, some [1-4, 9, 10, 14, 16, 18, 19, 27, 35] focus on improving + +one module in the registration process, such as investigating more discriminate keypoint feature descriptors or more effective correspondence selection techniques, while the others [22, 29, 43] focus on registration in an end-to-end manner. However, deep-learned based methods require a large amount of data for training and usually lack generalization on different datasets. At present, it is still very challenging to achieve accurate registrations in the presence of heavy outliers and in cross-dataset conditions. + +In this paper, we propose a geometric-only 3D registration method based on maximal cliques (MAC). The key insight is to loosen the previous maximum clique constraint, and mine more local consensus information in a graph to generate accurate pose hypotheses. We first model the initial correspondence set as a compatibility graph, where each node represents a single correspondence and each edge between two nodes indicates a pair of compatible correspondences. Second, we search for maximal cliques in the graph and then use node-guided clique filtering to match each graph node with the appropriate maximal clique containing it. Compared with the maximum clique, MAC is a looser constraint and is able to mine more local information in a graph. This helps us to achieve plenty of correct hypotheses from a graph. Finally, transformation hypotheses are computed for the selected cliques by the SVD algorithm. The best hypothesis is selected to perform registration using popular hypothesis evaluation metrics in the RANSAC family. To summarize, our main contributions are as follows: + +- We introduce a hypothesis generation method named MAC. Our MAC method is able to mine more local information in a graph, compared with the previous maximum clique constraint. We demonstrate that hypotheses generated by MAC are of high accuracy even in the presence of heavy outliers. +- Based on MAC, we present a novel PCR method, which achieves state-of-the-art performance on U3M, 3DMatch, 3DLoMatch and KITTI datasets. Notably, our geometric-only MAC method outperforms several state-of-the-art deep learning methods [3, 9, 19, 27]. MAC can also be inserted as a module into multiple deep-learning frameworks [1, 10, 18, 29, 43] to boost their performance. MAC combined with GeoTransformer achieves the state-of-the-art registration recall of $95.7\% / 78.9\%$ on 3DMatch / 3DLoMatch. + +# 2. Related Work + +# 2.1. Geometric-only PCR Methods + +Various geometric-only methods [6, 8, 20, 36, 45] have been proposed recently. Typically, RANSAC and its variants [5, 13, 30, 31, 38-40] remain the dominant approaches + +to the problem of estimating a 6-DoF pose from correspondences. RANSAC iteratively samples correspondences from the initial set, generating and evaluating geometric estimations for each subset until a satisfactory solution is obtained. Efficient and robust evaluation metrics are extremely important for using RANSAC to achieve accurate registration. To address the current problems of time-consuming and noise-sensitive evaluation metrics, [40] analyzes the contribution of inliers and outliers during the computation and proposed several metrics that can effectively improve the registration performance of RANSAC. A large number of variants have also been proposed to achieve further improvement. For example, Rusu et al. [31] presented the simple consensus-based initial alignment (SACIA) method, which samples correspondences spread out on the point cloud and leverages the Huber penalty for evaluation. Graph cut RANSAC (GC-RANSAC) [5] uses the graph-cut algorithm before model re-fitting in the local optimization step. Compatibility-guided sample consensus (CG-SAC) [30] additionally considers the normal information of key points during the sampling process. Yang et al. [39] proposed the sample consensus by sampling compatibility triangles (SAC-COT) method, which generates estimations by ranking and sampling ternary loops from the compatibility graph. Although many previous efforts have been made, these methods suffer from low time efficiency and limited accuracy in cases with high outlier rates. + +A series of globally optimal methods based on BnB have been proposed recently. Yang et al. [41] proposed globally optimal ICP (GO-ICP), which rationalizes the planning of ICP update tasks at different stages, and its biggest advantage is that it minimizes the local optimum. Bustos and Chin [6] presented guaranteed outlier removal (GORE), which calculates the tight lower bound and tight upper bound for each correspondence and reduces the size of correspondence set by rejecting true outliers. Motivated by GORE, Li [21] proposed a polynomial time outlier removal method, which seeks the tight lower and upper bound by calculating the costs of correspondence matrix (CM) and augmented correspondence matrix (ACM). However, BnB techniques are sensitive to the cardinality of the input and are time-consuming for large-scale inputs. + +# 2.2. Deep-learned PCR Methods + +In addition to geometric-only methods, recent works also adopt deep learning techniques to perform PCR. Some methods aim to detect more repeatable keypoints [4, 18] and extract more descriptive features [1, 10]. FCGF [10] computes the features in a single pass through a fully convolutional neural network without keypoint detection. D3Feat [4] uses a fully convolutional network to obtain local information of point clouds and a joint learning framework to achieve 3D local feature detection and description. + +![](images/c994fb4968d3c45090143f102140aac197908d5299793f364e48ec4f9e159630.jpg) +Figure 2. Pipeline of MAC. 1. Construct a graph for the initial correspondence set. 2. Select a set of maximal cliques from the graph as the consistent sets. 3. Generate and evaluate the hypotheses according to the consistent sets. 4. Select the best hypothesis to perform 3D registration. + +Predator [18] applies an attention mechanism to extract salient points in overlapping regions of the point clouds, thus achieving robust registration in the presence of low overlap rates. Spinnet [1] extracts local features which are rotationally invariant and sufficiently informative to enable accurate registration. Some methods [3, 9, 14, 27] focus on efficiently distinguishing correspondences as inliers and outliers. Deep global registration (DGR) [9] and 3DRegNet [27] classify a given correspondence by training end-to-end neural networks and using operators such as sparse convolution and point-by-point MLP. PointDSC [3] explicitly explores spatial consistency for removing outlier correspondences and 3D point cloud registration. Fu et al. [14] proposed a registration framework that utilizes deep graph matching (RGM) that can find robust and accurate point-to-point correspondences. More recently, several methods [29, 43] follow the detection-free methods and estimate the transformation in an end-to-end way. CoFiNet [43] extracts correspondences from coarse to fine without keypoint detection. GeoTransformer [29] learns geometric features for robust superpoint matching and is robust in low-overlap cases and invariant to rigid transformation. + +While deep learning techniques have demonstrated a great potential for PCR, these methods require a large amount of training data and their generalization is not always promising. By contrast, MAC does not require any training data and achieves more advanced performance than several deep-learned methods. Moreover, MAC can be served as a drop-on module in deep learning frameworks to boost their performance. + +# 3. MAC + +# 3.1. Problem Formulation + +For two point clouds $\mathbf{P}^s$ and $\mathbf{P}^t$ to be aligned, we first extract local features for them using geometric or learned descriptors. Let $\mathbf{p}^s$ and $\mathbf{p}^t$ denote the points in the $\mathbf{P}^s$ and $\mathbf{P}^t$ , respectively. An initial correspondence set $\mathbf{C}_{initial} = \{\mathbf{c}\}$ is formed by matching feature descriptors, + +where $\mathbf{c} = (\mathbf{p}^s, \mathbf{p}^t)$ . MAC estimates the 6-DoF pose transformation between $\mathbf{P}^s$ and $\mathbf{P}^t$ from $\mathbf{C}_{initial}$ . + +Our method is technically very simple, and its pipeline is shown in Fig. 2. + +# 3.2. Graph Construction + +The graph space can more accurately depict the affinity relationship between correspondences than the Euclidean space. Therefore, we model the initial correspondences as a compatibility graph, where correspondences are represented by nodes and edges link nodes that are geometrically compatible. Here, we consider two approaches to construct a compatibility graph. + +- First Order Graph. The first order graph (FOG) is constructed based on the rigid distance constraint between the correspondence pair $(\mathbf{c}_i, \mathbf{c}_j)$ , which can be quantitatively measured as: + +$$ +S _ {d i s t} \left(\mathbf {c} _ {i}, \mathbf {c} _ {j}\right) = \left| \left\| \mathbf {p} _ {i} ^ {s} - \mathbf {p} _ {j} ^ {s} \right\| - \left\| \mathbf {p} _ {i} ^ {t} - \mathbf {p} _ {j} ^ {t} \right\| \right|. \tag {1} +$$ + +The compatibility score between $\mathbf{c}_i$ and $\mathbf{c}_j$ is given as: + +$$ +S _ {c m p} (\mathbf {c} _ {i}, \mathbf {c} _ {j}) = \exp (- \frac {S _ {d i s t} (\mathbf {c} _ {i} , \mathbf {c} _ {j}) ^ {2}}{2 d _ {c m p} ^ {2}}), \qquad (2) +$$ + +where $d_{cmp}$ is a distance parameter. Notably, if $S_{cmp}(\mathbf{c}_i,\mathbf{c}_j)$ is greater than a threshold $t_{cmp}$ , $\mathbf{c}_i$ and $\mathbf{c}_j$ form an edge $\mathbf{e}_{ij}$ and $S_{cmp}(\mathbf{c}_i,\mathbf{c}_j)$ is the weight of $\mathbf{e}_{ij}$ , otherwise $S_{cmp}(\mathbf{c}_i,\mathbf{c}_j)$ will be set to 0. Since the compatibility graph is undirected, the weight matrix $\mathbf{W}_{FOG}$ is symmetric. + +- Second Order Graph. The previous study [8] proposes a second order compatibility measure, which relates to the number of commonly compatible correspondences in the global set. The second order graph (SOG) evolves from FOG. The weight matrix $\mathbf{W}_{\mathit{SOG}}$ can be calculated as: + +$$ +\mathbf {W} _ {S O G} = \mathbf {W} _ {F O G} \odot \left(\mathbf {W} _ {F O G} \times \mathbf {W} _ {F O G}\right), \tag {3} +$$ + +where $\odot$ represents the element-wise product between two matrices. + +Both graph construction methods can adapt to our frameworks. Compared with FOG, 1) SOG has stricter edge construction conditions and a higher degree of compatibility with adjacent nodes; 2) SOG is sparser, which facilitates a more rapid search of cliques. In Sec. 4.5, we experimentally compare FOG and SOG in our MAC framework. + +# 3.3. Search Maximal Cliques + +Given an undirected graph $G = (\mathbf{V}, \mathbf{E})$ , clique $C = (\mathbf{V}', \mathbf{E}')$ , $\mathbf{V}' \subseteq \mathbf{V}$ , $\mathbf{E}' \subseteq \mathbf{E}$ is a subset of $G$ , in which any two nodes are connected by edges. A maximal clique is a clique that cannot be extended by adding any nodes. In particular, the maximal clique with the most nodes is the maximum clique of a graph. + +Searching for Maximal cliques. To generate hypotheses, RANSAC-based methods repeatedly take random samples from the correspondence set. Nevertheless, they fail to fully mine the affinity relationships between correspondences. Theoretically, inliers would form cliques in the graph, because inliers are usually geometrically compatible with each other. Previous works [23,24,28,36] focus on searching for maximum cliques in the graph, however, the maximum clique is a very tight constraint that only focuses on the global consensus information in a graph. Instead, we loosen the constraint and leverage maximal cliques to mine more local graph information. + +By using the igraph_maximal cliques function in the igraph $^1$ $\mathrm{C}++$ library, which makes use of a modified Bron-Kerbosch algorithm [12], the search of maximal cliques can be very efficient. The process's worst time complexity is $\mathcal{O}(d(n - d)3^{(d / 3)})$ , where $d$ is the degeneracy of the graph. Note that $d$ is typically small in our problem because the graph is usually sparse when dealing with point cloud correspondences. + +Node-guided Clique Selection. After executing the maximal clique searching procedure, we obtain the maximal clique set $MAC_{initial}$ . In practice, $MAC_{initial}$ usually contains tens of thousands of maximal cliques, which will make it very time-consuming if we consider all maximal cliques. We introduce a node-guided clique selection method in this section to reduce $|MAC_{initial}|$ . First, we calculate the weight for each clique in $MAC_{initial}$ . Given a clique $C_i = (\mathbf{V}_i, \mathbf{E}_i)$ , the weight $w_{C_i}$ is calculated as: + +$$ +w _ {C _ {i}} = \sum_ {e _ {j} \in \mathbf {E} _ {i}} w _ {e _ {j}}, \tag {4} +$$ + +where $w_{e_j}$ represents the weight of edge $e_j$ in $\mathbf{W}_{SOG}$ . A node may be included by multiple maximal cliques and we + +only retain the one with the greatest weight for that node. Then, duplicated cliques are removed from the rest, obtaining $MAC_{\text{selected}}$ . The motivation behind this is to use information about the local geometric structure around graph nodes to find the best consistent set of corresponding nodes. It is clear that the number of maximal cliques $|MAC_{\text{selected}}|$ will not exceed $|\mathbf{V}|$ . We could send these maximal cliques directly to the following stages for 3D registration. However, when $|\mathbf{V}|$ is quite large, the number of retained maximal cliques can still be very large. Here, we propose several techniques to further filter the maximal cliques. + +- Normal consistency. In the maximal cliques, we find that the normal consistency is satisfied between each correspondence. Given two correspondences $\mathbf{c}_i = (\mathbf{p}_i^s,\mathbf{p}_i^t)$ , $\mathbf{c}_j = (\mathbf{p}_j^s,\mathbf{p}_j^t)$ and the normal vectors $\mathbf{n}_i^s,\mathbf{n}_j^s,\mathbf{n}_i^t,\mathbf{n}_j^t$ at the four points, the angular difference $\alpha_{ij}^{s} = \angle (\mathbf{n}_{i}^{s},\mathbf{n}_{j}^{s})$ , $\alpha_{ij}^{t} = \angle (\mathbf{n}_{i}^{t},\mathbf{n}_{j}^{t})$ between the normal vectors can be calculated then. The following inequality ought to hold if $\mathbf{c}_i$ and $\mathbf{c}_j$ are normal consistent: + +$$ +\left| \sin \alpha_ {i j} ^ {s} - \sin \alpha_ {i j} ^ {t} \right| < t _ {\alpha}, \tag {5} +$$ + +where $t_{\alpha}$ is a threshold for determining whether the angular differences are similar. + +- Clique ranking. We organize $MAC_{\text{selected}}$ in a descending order using the clique's weight $w_{C_i}$ . The top- $K$ ones are supposed to be more likely to produce correct hypotheses. This makes it flexible to control the number of hypotheses. + +These techniques' experimental analysis is presented in Sec. 4.5. + +# 3.4. Hypothesis Generation and Evaluation + +Each maximal clique filtered from the previous step represents a consistent set of correspondences. By applying the SVD algorithm to each consistency set, we can obtain a set of 6-DoF pose hypotheses. + +- Instance-equal SVD. Transformation estimation of correspondences is often implemented with SVD. Instance-equal means that the weights of all correspondences are equal. +- Weighted SVD. Assigning weights to correspondences is commonly adopted by recent PCR methods [8, 9, 27, 29]. Correspondence weights can be derived by solving the eigenvectors of a compatibility matrix constructed for a compatibility graph. Here, we take the primary eigenvalues of $\mathbf{W}_{SOG}$ as correspondence weights. + +The final goal of MAC is to estimate the optimal 6-DoF rigid transformation (composed of a rotation pose $\mathbf{R}^{*} \in SO(3)$ and a translation pose $\mathbf{t}^{*} \in \mathbb{R}^{3}$ ) that maximizes the objective function as follows: + +$$ +\left(\mathbf {R} ^ {*}, \mathbf {t} ^ {*}\right) = \arg \max _ {\mathbf {R}, \mathbf {t}} \sum_ {i = 1} ^ {N} s \left(\mathbf {c} _ {i}\right), \tag {6} +$$ + +where $\mathbf{c}_i \in \mathbf{C}_{initial}$ , $N = |\mathbf{C}_{initial}|$ , and $s(\mathbf{c}_i)$ represents the score of $\mathbf{c}_i$ . We consider several RANSAC hypothesis evaluation metrics here [40], including mean average error (MAE), mean square error (MSE) and inlier count. Their behaviors will be experimentally compared in Sec. 4.5. The best hypothesis is taken to perform 3D registration then. + +# 4. Experiments + +# 4.1. Experimental Setup + +Datasets. We consider four datasets, i.e., the object-scale dataset U3M [26], the scene-scale indoor datasets 3DMatch [44] & 3DLoMatch [18], and the scene-scale outdoor dataset KITTI [15]. U3M has 496 point cloud pairs. 3DLoMatch is the subset of 3DMatch, where the overlap rate of the point cloud pairs ranges from $10\%$ to $30\%$ , which is very challenging. For KITTI, we follow [3,8] and obtain 555 pairs of point clouds for testing. + +Evaluation Criteria. We follow [39] that employs the root mean square error (RMSE) metric to evaluate the 3D point cloud registration performance on the U3M object-scale dataset. In addition, we employ the rotation error (RE) and translation error (TE) to evaluate the registration results on the scene-scale dataset. By referring to the settings in [9], the registration is considered successful when the $\mathrm{RE} \leq 15^{\circ}$ , $\mathrm{TE} \leq 30\mathrm{cm}$ on 3DMatch & 3DLoMatch datasets, and $\mathrm{RE} \leq 5^{\circ}$ , $\mathrm{TE} \leq 60\mathrm{cm}$ on KITTI dataset. We define a dataset's registration accuracy as the ratio of success cases to the number of point cloud pairs to be registered. + +Implementation Details. Our method is implemented in $\mathrm{C + + }$ based on the point cloud library (PCL) [32] and igraph library. For U3M, we use the Harris3D (H3D) [33] keypoint detector and the signatures of histograms of orientation (SHOT) [34] descriptor for initial correspondence generation as in [42]. For 3DMatch and 3DLoMatch datasets, we use the fast point features histograms (FPFH) [31] descriptor and fully convolutional geometric features (FCGF) [10] descriptor to generate the initial correspondence set. The main steps in the comparative experimental sections are SOG construction, searching node-guided maximal cliques, hypotheses generation by instance-equal SVD and evaluation by MAE. Default values for compatibility threshold $t_{cmp}$ and distance parameter $d_{cmp}$ mentioned in Sec. 3.2 are 0.99 and 10 pr respectively; if input matches exceed 5000, $t_{cmp}$ is set to 0.999 to reduce computation. Here, 'pr' is + +![](images/915becacf5a5f7f86b3b83b2b994f7cce90c29f0959816bf014c5716987f61ea.jpg) +Figure 3. Registration performance of tested point cloud registration methods on U3M. + +
FPFHFCGF
RR(%)RE(°)TE(cm)RR(%)RE(°)TE(cm)
i) Traditional SM [20]55.882.948.1586.572.297.07
FGR [45]40.914.9610.2578.932.908.41
RANSAC-1M [13]64.204.0511.3588.423.059.42
RANSAC-4M [13]66.103.9511.0391.442.698.38
GC-RANSAC [5]67.652.336.8792.052.337.11
TEASER++ [36]75.482.487.3185.772.738.66
CG-SAC [30]78.002.406.8987.522.427.66
SC2-PCR [8]83.732.186.7093.162.096.51
ii) Deep learned 3DRegNet [27]26.313.759.6077.762.748.13
DGR [9]32.842.457.5388.852.287.02
DHVR [19]67.102.787.8491.932.257.08
PointDSC [3]72.952.186.4591.872.106.54
MAC84.101.966.1893.721.896.03
+ +Table 1. Registration results on 3DMatch dataset. + +a distance unit called point cloud resolution [42]. Normal vectors are calculated using the NormalEstimation class of PCL with the 20 nearest neighboring points. When searching maximal cliques, the lower bound on clique size is set to 3 with no upper bound defined. All experiments were implemented with an Intel 12700H CPU and 32 GB RAM. + +# 4.2. Results on U3M Dataset + +We perform an extensive comparison in Fig. 3. Here, the following methods are tested, including SAC-COT [39], OSAC [37], SAC-IA [31], RANSAC [13], $\mathrm{SC^2}$ -PCR [8], FGR [45], GO-ICP [41], and PPF [11], where the former four are RANSAC-based methods. The RMSE threshold is varied from $0.5\mathrm{pr}$ to $5\mathrm{pr}$ with a step of $0.5\mathrm{pr}$ . + +The results indicate that MAC performs best and significantly outperforms all tested RANSAC fashion estimators, such as SAC-COT, OSAC, SAC-IA, and RANSAC. The registration performance of MAC based on the MAE evaluation metric is the best on U3M. + +
RR(%)FPFH RE(°)TE(cm)RR(%)FCGF RE(°)TE(cm)
i) Traditional RANSAC-1M [13]0.6710.2715.069.777.0114.87
RANSAC-4M [13]0.4510.3920.0310.446.9115.14
TEASER++ [36]35.154.3810.9646.764.1212.89
SC2-PCR [8]38.574.0310.3158.733.8010.44
ii) Deep learned DGR [9]19.885.0713.5343.804.1710.82
PointDSC [3]20.384.0410.2556.203.8710.48
MAC40.883.669.4559.853.509.75
+ +Table 2. Registration results on 3DLoMatch dataset. + +
# Samples3DMatch RR(%)3DLoMatch RR(%)
500025001000500250500025001000500250
FCGF [10]85.184.783.381.671.440.141.738.235.426.8
SpinNet [1]88.686.685.583.570.259.854.948.339.826.8
Predator [18]89.089.990.688.586.659.861.262.460.858.1
CoFiNet [43]89.388.988.487.487.067.566.264.263.161.0
GeoTransformer [29]92.091.891.891.491.275.074.874.274.173.5
FCGF+MAC91.392.291.690.485.657.256.052.642.432.1
6.2↑7.5↑8.3↑8.8↑14.2↑17.1↑14.3↑14.4↑7.0↑5.3↑
SpinNet+MAC95.395.193.391.481.272.869.959.254.832.1
6.7↑8.5↑7.8↑7.9↑11.0↑13.0↑15.0↑10.9↑15.0↑5.3↑
Predator+MAC94.694.494.093.592.370.970.469.867.264.1
5.6↑4.5↑3.4↑5.0↑5.7↑11.1↑9.2↑7.4↑6.4↑6.0↑
CoFiNet+MAC94.194.494.593.892.771.671.570.669.268.1
4.8↑5.5↑6.1↑6.4↑5.7↑4.1↑5.3↑6.4↑6.1↑7.1↑
GeoTransformer+MAC95.795.795.295.394.678.978.778.277.776.6
3.7↑3.9↑3.4↑3.9↑3.4↑3.9↑3.9↑4.0↑3.6↑3.1↑
+ +# 4.3. Results on 3DMatch & 3DLoMatch Datasets + +PCR methods comparison. Both geometric-only and deep-learned methods are considered for comparison, including SM [20], FGR [45], RANSAC [13], TEASER++ [36], CG-SAC [30], $\mathrm{SC^2}$ -PCR [8], 3DRegNet [27], DGR [9], DHVR [19] and PointDSC [3]. Results are shown in Tables 1 and 2. + +The following conclusions can be made: 1) regardless of which descriptor is used, MAC outperforms all compared methods on both 3DMatch and 3DLoMatch datasets, indicating its strong ability to register indoor scene point clouds; 2) even compared with deep-learned methods, MAC still achieves better performance without any data training; 3) in addition to the registration recall (RR) metric, MAC achieves the best RE and TE metrics. This indicates that registrations by MAC are very accurate and MAC is able to align low overlapping data. + +Boosting deep-learned methods with MAC. Several kinds of state-of-the-art deep-learned methods are integrated with MAC for evaluation. The considered methods are FCGF [10], SpinNet [1], Predator [18], CoFiNet [43] and GeoTransformer [29]. Each method is tested under a different number of samples, which refer to the number of sampled points or correspondences. Results are reported in Table 3. + +Remarkably, MAC dramatically improves the registration recall under all tested methods on both 3DMatch and 3DLoMatch datasets. Notably, the performance of SpinNet, Predator and CoFiNet after boosting by MAC exceeds + +Table 3. Performance boosting for deep-learned methods when combined with MAC. + +
RR(%)FPFH RE(°)TE(cm)RR(%)FCGF RE(°)TE(cm)
i) Traditional FGR [45]5.230.8643.8489.540.4625.72
TEASER++ [36]91.171.0317.9894.960.3813.69
RANSAC [13]74.411.5530.2080.360.7326.79
CG-SAC [30]74.230.7314.0283.240.5622.96
SC2-PCR [8]99.280.398.6897.840.3320.58
ii) Deep learned DGR [9]77.121.6433.1096.900.3421.70
PointDSC [3]98.920.388.3597.840.3320.32
MAC99.460.408.4697.840.3419.34
+ +Table 4. Registration results on KITTI dataset. + +that of GeoTransformer. MAC working with GeoTransformer achieves state-of-the-art registration recall of $95.7\%$ / $78.9\%$ on 3DMatch / 3DLoMatch. The results suggest that: 1) MAC can greatly boost existing deep-learned methods; 2) MAC is not sensitive to the number of samples. + +# 4.4. Results on KITTI Dataset + +In Table 4, the results of DGR [9], PointDSC [3], TEASER++ [36], RANSAC [13], CG-SAC [30], $\mathrm{SC^2}$ -PCR [8] and MAC are reported for comparison. + +As shown by the table, in terms of the registration recall performance, MAC presents the best and is tied for the best results with FPFH and FCGF descriptor settings, respectively. MAC also has a lower TE than the state-of-the-art geometric-only method $\mathrm{SC^2}$ -PCR. Note that outdoor point clouds are significantly sparse and non-uniformly distributed. The registration experiments on the object, indoor scene, and outdoor scene datasets consistently verify that MAC holds good generalization ability in different application contexts. + +# 4.5. Analysis Experiments + +In this section, we perform ablation studies and analysis experiments on both 3DMatch and 3DLoMatch datasets. We progressively experiment with the techniques proposed in Sec. 3, and the results are shown in Table 5. The quality of generated hypotheses is analyzed in Table 6. The performance upper bound is studied in Table 7. Table 8 presents the time efficiency analysis of MAC. + +Performing feature matching selection. Before 3D registration, a popular way is to perform outlier rejection to reduce the correspondence set. Here we employ geometric consistency (GC) [7], which is independent of the feature space and associates the largest consistent cluster relating to the compatibility among correspondences. + +By comparing Row 1 and 2 of Table 5, GC has a negative impact on MAC performance, potentially due to that some inliers are also removed in this process. This demonstrates that MAC can still perform well even if the initial correspondence set is directly utilized as input without any filtering. + +
FOGSOGGCMCNGNCCRSVDW-SVDMAEMSE#inlierRR(%)RE(°)TE(cm)
FPFH1)83.86/39.142.17/4.016.51 /9.94
2)77.02/26.612.10/3.836.19 /9.49
3)82.26/39.022.12/3.986.43 /9.89
4)83.49/38.912.22/4.116.65 /10.05
5)83.67/38.852.15/4.036.53 /9.82
6)84.10/40.881.96/3.666.18 /9.45
7)82.93/39.981.95/3.666.12 /9.48
8)82.44/38.462.16/3.976.41 /9.85
9)74.06/31.112.08/3.896.17 /9.82
10) Top10082.01/37.792.13/4.026.42 /9.82
11) Top20083.18/38.852.16/4.086.55 /9.91
12) Top50083.06/38.852.14/4.036.47 /9.81
13) Top100083.30/38.912.16/4.056.53 /9.84
14) Top200083.36/38.792.14/4.026.52 /9.78
FCGF1)93.41/59.802.04/3.786.33 /10.16
2)91.68/49.971.99/3.646.23 /9.90
3)93.35/59.242.04/3.676.28 /9.99
4)92.91/59.072.06/3.886.33 /10.20
5)93.16/59.462.04/3.766.26 /10.00
6)93.72/59.851.89/3.506.03 /9.75
7)93.59/59.011.86/3.496.00 /9.61
8)93.28/59.632.02/3.736.24 /9.98
9)87.86/49.352.00/3.616.09 /9.60
10) Top10092.42/57.442.00/3.756.21 /10.00
11) Top20093.22/57.832.01/3.756.29 /10.06
12) Top50093.22/58.902.02/3.786.33 /10.02
13) Top100093.35/59.402.05/3.786.32 /10.18
14) Top200093.35/59.522.04/3.786.33 /10.19
+ +Table 5. Analysis experiments on 3DMatch / 3DLoMatch. FOG: First order compatibility graph. SOG: Second order compatibility graph. GC: Use geometric consistency to preliminarily perform outlier rejection. MC: Search the maximum clique instead of maximal cliques. NG: Node-guided clique selection. NC: Normal consistency. CR: Clique ranking. W-SVD: Weighted SVD. + +Graph construction choices. We test the performance of MAC by using different graph construction approaches. + +As shown in Row 1 and 3 of Table 5, the registration recall obtained by using SOG is $1.6\%$ higher than using FOG when combined with FPFH, and $0.06\%$ higher when combined with FCGF on 3DMatch. Also, the registration recall obtained by using SOG is $0.12\%$ higher than using FOG when combined with FPFH, and $0.56\%$ higher when combined with FCGF on 3DLoMatch. Therefore, SOG is more suitable for MAC. Detailed analyzing descriptions can be found in the supplementary. + +Maximum or maximal clique. To justify the advantages of maximal cliques, we change the search strategy of MAC to the maximum cliques and test the registration performance. + +As shown in Row 1 and 9 in Table 5, applying maximal cliques surpasses maximum by $9.8\%$ when combined with FPFH, and $5.55\%$ higher when combined with FCGF on 3DMatch. Besides, the registration recall obtained by using maximal cliques is $8.03\%$ higher than using the maximum cliques when combined with FPFH and $10.45\%$ higher when combined with FCGF on 3DLoMatch. There are several reasons for this: 1) maximal cliques include the maximum cliques and additionally consider local graph constraints, so the search for maximal cliques can make use of both local and global information in the compatibility graph; 2) the maximum clique is a very tight constraint which requires maximizing the number of mutually compatible correspondences, but it does not guarantee the opti + +mal result. + +Node-guided clique selection. We compare the performance with and without node-guided (NG) clique selection for maximal cliques search. + +Comparing Row 1 and 4 in Table 5, using NG achieves a recall improvement of $0.37\%$ when combined with FPFH, and $0.5\%$ improvement when combined with FCGF on 3DMatch. Also, using NG achieves a recall improvement of $0.23\%$ with FPFH and $0.73\%$ improvement with FCGF on 3DLoMatch. It is worth noting that while NG improves recall, the mean RE and mean TE are also decreasing. For example, NG reduces the mean RE by $0.1^{\circ}$ and the mean TE by $0.11\mathrm{cm}$ with FPFH on 3DLoMatch. NG effectively reduces the number of calculations in the subsequent steps and promises accurate hypotheses. + +Different approaches for clique filtering. We test the effectiveness of the two filtering methods, normal consistency and clique ranking. + +1) Normal consistency: comparing Row 1 and 8 in Table 5, NC slightly degrades MAC's performance. 2) Clique ranking: Row 10 to 14 demonstrate that the registration recall tends to increase as $K$ increases, suggesting that larger $K$ yields a subset of cliques that generate more correct hypotheses. Remarkably, setting $K$ to 100 can already achieve outstanding performance. + +Employing instance-equal or weighted SVD. The comparisons of instance-equal and weighted SVD are shown in Rows 1 and 5 of Table 5. + +
# hypotheses3DMatch3DLoMatch
RANSACMACRANSACMAC
FCGFFPFHFCGFFPFHFCGFFPFHFCGFFPFH
10010.450.7661.9450.671.250.0530.4712.22
20020.761.50119.2089.272.520.0955.5717.59
50051.743.68269.06162.416.210.21109.3223.32
1000103.657.39456.18217.3212.430.41156.1126.02
2000208.2414.90669.32254.1324.800.81202.1229.31
+ +Table 6. Comparison of the number of correct hypotheses generated by MAC and RANSAC on 3DMatch and 3DLoMatch. + +
3DMatch RR(%)3DLoMatch RR(%)
MAC-198.4691.24
MAC-597.1083.32
MAC-1096.4377.93
MAC-2094.7070.47
MAC-5091.1356.37
MAC-origin93.7259.85
+ +Weighted SVD is slightly inferior to instance-equal SVD. This suggests that samples in MACs are already very consistent, indicating no additional weighting strategies are required. + +Varying hypothesis evaluation metrics. Here we compare three evaluation metrics, including MAE, MSE and inlier count, for MAC hypothesis evaluation. + +As shown in Row 1, 6 and 7, MAC with MAE achieves the best performance. In Table 5, MAE achieves a recall improvement of $0.24\%$ when combined with FPFH, and $0.31\%$ improvement when combined with FCGF on 3DMatch compared with the commonly used inlier count metric. Also, MAE has a $1.74\%$ improvement when combined with FPFH, and $0.05\%$ when combined with FCGF on 3DLoMatch compared with inlier count. MAE is also very effective in reducing RE and TE. For instance, MAE reduces the mean RE by $0.35^{\circ}$ and the mean TE by $0.49~\mathrm{cm}$ with FPFH on 3DLoMatch. + +Comparison with RANSAC hypotheses. We evaluate the quality of the generated hypotheses by comparing the hypotheses from RANSAC and MAC with the ground truth transformation. The results are shown in Table 6. + +Compared to RANSAC, which randomly selects correspondences and generates hypotheses from the correspondence set without geometric constraints, MAC effectively generates more convincing hypotheses from maximal cliques in the compatibility graph, which fully exploits the consensus information in the graph. + +The performance upper bound of MAC. Given an ideal hypothesis evaluation metric, allowing a point cloud pair can be aligned as long as correct hypotheses can be generated. This can test the performance upper bound of MAC. We vary the judging threshold for the number of generated correct hypotheses and report the results in Table 7. + +Table 7. Registration recall on 3DMatch with FCGF setting based on judging MAC's hypotheses. MAC- $n$ : a point cloud pair is considered alignable if at least $n$ hypotheses are correct. + +
# correspondencesGraph ConstructionSearch Maximal CliquesNode-guided Clique SelectionPose EstimationTotal
2501.03 (14.55%)5.24 (74.01%)0.58 (8.19%)0.23 (3.25%)7.08
5004.07 (17.54%)15.67 (67.51%)3.12 (13.44%)0.35 (1.51%)23.21
100016.90 (29.85%)36.60 (64.65%)1.88 (3.32%)1.23 (2.18%)56.61
2500153.92 (53.29%)104.03 (36.02%)4.97 (1.72%)25.93 (8.97%)288.85
5000887.03 (27.16%)1579.61 (48.37%)65.40 (2.00%)733.38 (22.47%)3265.42
+ +Table 8. Average consumed time (ms) per point cloud pair on the 3DMatch dataset. Predator is used for generating correspondences. + +Impressively, MAC-1 achieves registration recalls of $98.46\% / 91.24\%$ on 3DMatch / 3DLoMatch. This indicates that even on low overlapping datasets, MAC is able to produce correct hypotheses for most point cloud pairs. In addition, we can deduce that MAC's performance can be further improved with better hypothesis evaluation metrics. Time consumption of MAC. We employ Predator [18] to generate correspondences with different magnitudes to test the time performance of MAC. The time consumption is reported in Table 8. + +The following observations can be made. 1) In general, MAC can complete 3D registration in only tens of milliseconds when the number of correspondences is smaller than 1000. Even with an input with 2500 correspondences, the time consumption is about 0.29 seconds. Note that MAC is implemented on the CPU only. 2) As the number of correspondences increases from 250 to 2500, there is an increase in time cost for graph construction due to $\mathbf{W}_{SOG}$ computation taking more time. 3) When the number of correspondences reaches 5000, there is a large rise in the time cost of MAC's registration. The significant increase in the input size makes the search for maximal cliques more time-consuming. However, MAC is not sensitive to the cardinality of the input correspondence set, as verified in Table 3. Hence, using sparse inputs for MAC can produce outstanding performance while making registration efficient. + +# 5. Conclusion + +In this paper, we presented MAC to solve PCR by using the maximal clique constraint to generate precise pose hypotheses from correspondences. Our method achieves state-of-the-art performance on all tested datasets and can adapt to deep-learned methods to boost their performance. Limitation. As shown in Table 7 and Table 1, MAC produces accurate hypotheses but may fail to find them. In the future, we plan to develop a more convincing hypothesis evaluation technique utilizing semantic information. + +Acknowledgments. This work is supported in part by the National Natural Science Foundation of China (NFSC) (No.U19B2037 and 62002295), Shaanxi Provincial Key R&D Program (No.2021KWZ-03), and the Fundamental Research Funds for the Central Universities (No.D5000220352). + +# References + +[1] Sheng Ao, Qingyong Hu, Bo Yang, Andrew Markham, and Yulan Guo. Spinnet: Learning a general surface descriptor for 3d point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11753-11762, 2021. 1, 2, 3, 6 +[2] Yasuhiro Aoki, Hunter Goforth, Rangaprasad Arun Srivatsan, and Simon Lucey. Pointnetlk: Robust & efficient point cloud registration using pointnet. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7163-7172, 2019. 1 +[3] Xuyang Bai, Zixin Luo, Lei Zhou, Hongkai Chen, Lei Li, Zeyu Hu, Hongbo Fu, and Chiew-Lan Tai. Pointdsc: Robust point cloud registration using deep spatial consistency. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 15859-15869. IEEE, 2021. 1, 2, 3, 5, 6 +[4] Xuyang Bai, Zixin Luo, Lei Zhou, Hongbo Fu, Long Quan, and Chiew-Lan Tai. D3feat: Joint learning of dense detection and description of 3d local features. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6359-6367, 2020. 1, 2 +[5] Daniel Barath and Jiri Matas. Graph-cut ransac. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6733-6741, 2018. 1, 2, 5 +[6] Alvaro Parra Bustos and Tat-Jun Chin. Guaranteed outlier removal for point cloud registration with correspondences. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(12):2868-2882, 2017. 1, 2 +[7] Hui Chen and Bir Bhanu. 3d free-form object recognition in range images using local surface patches. Pattern Recognition Letters, 28(10):1252-1262, 2007. 6 +[8] Zhi Chen, Kun Sun, Fan Yang, and Wenbing Tao. Sc2-pcr: A second order spatial compatibility for efficient and robust point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13221-13231, 2022. 2, 3, 4, 5, 6 +[9] Christopher Choy, Wei Dong, and Vladlen Koltun. Deep global registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2514-2523. IEEE, 2020. 1, 2, 3, 4, 5, 6 +[10] Christopher Choy, Jaesik Park, and Vladlen Koltun. Fully convolutional geometric features. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8958-8966, 2019. 1, 2, 5, 6 +[11] Bertram Drost, Markus Ulrich, Nassir Navab, and Slobodan Ilic. Model globally, match locally: Efficient and robust 3d object recognition. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 998-1005. IEEE, 2010. 5 +[12] David Eppstein, Maarten Löffler, and Darren Strash. Listing all maximal cliques in sparse graphs in near-optimal time. In International Symposium on Algorithms and Computation, pages 403-414. Springer, 2010. 4 +[13] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to + +image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 1, 2, 5, 6 +[14] Kexue Fu, Shaolei Liu, Xiaoyuan Luo, and Manning Wang. Robust point cloud registration framework based on deep graph matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8893-8902, 2021. 1, 3 +[15] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3354-3361. IEEE, 2012. 5 +[16] Zan Gojcic, Caifa Zhou, Jan D Wegner, and Andreas Wieser. The perfect match: 3d point cloud matching with smoothed densities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5545-5554, 2019. 1 +[17] Yulan Guo, Mohammed Bennamoun, Ferdous Sohel, Min Lu, and Jianwei Wan. 3d object recognition in cluttered scenes with local surface features: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(11):2270-2287, 2014. 1 +[18] Shengyu Huang, Zan Gojcic, Mikhail Usvyatsov, Andreas Wieser, and Konrad Schindler. Predator: Registration of 3d point clouds with low overlap. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4267-4276, 2021. 1, 2, 3, 5, 6, 8 +[19] Junha Lee, Seungwook Kim, Minsu Cho, and Jaesik Park. Deep hough voting for robust global registration. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15994-16003, 2021. 1, 2, 5, 6 +[20] Marius Leordeanu and Martial Hebert. A spectral technique for correspondence problems using pairwise constraints. 2005. 2, 5, 6 +[21] Jiayuan Li. A practical o (n2) outlier removal method for point cloud registration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 1, 2 +[22] Yang Li and Tatsuya Harada. Lepard: Learning partial point cloud matching in rigid and deformable scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5554-5564, 2022. 2 +[23] Muyuan Lin, Varun Murali, and Sertac Karaman. A planted clique perspective on hypothesis pruning. IEEE Robotics and Automation Letters, 7(2):5167-5174, 2022. 4 +[24] Yu-Kai Lin, Wen-Chieh Lin, and Chieh-Chih Wang. Kclosest points and maximum clique pruning for efficient and effective 3-d laser scan matching. IEEE Robotics and Automation Letters, 7(2):1471-1477, 2022. 4 +[25] Ajmal S Mian, Mohammed Bennamoun, and Robyn A Owens. Automatic correspondence for 3d modeling: an extensive review. International Journal of Shape Modeling, 11(02):253-291, 2005. 1 +[26] Ajmal S Mian, Mohammed Bennamoun, and Robyn A Owens. A novel representation and feature matching algorithm for automatic pairwise registration of range images. International Journal of Computer Vision, 66(1):19-40, 2006. 5 + +[27] G Dias Pais, Srikumar Ramalingam, Venu Madhav Govindu, Jacinto C Nascimento, Rama Chellappa, and Pedro Miraldo. 3dregnet: A deep neural network for 3d point registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7193-7203. IEEE, 2020. 1, 2, 3, 4, 5, 6 +[28] Alvaro Parra, Tat-Jun Chin, Frank Neumann, Tobias Friedrich, and Maximilian Katzmann. A practical maximum clique algorithm for matching with pairwise constraints. arXiv preprint arXiv:1902.01534, 2019. 4 +[29] Zheng Qin, Hao Yu, Changjian Wang, Yulan Guo, Yuxing Peng, and Kai Xu. Geometric transformer for fast and robust point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11143-11152, 2022. 2, 3, 4, 6 +[30] Siwen Quan and Jiaqi Yang. Compatibility-guided sampling consensus for 3-d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 58(10):7380-7392, 2020. 1, 2, 5, 6 +[31] Radu Bogdan Rusu, Nico Blodow, and Michael Beetz. Fast point feature histograms (fpfh) for 3d registration. In IEEE International Conference on Robotics and Automation, pages 3212-3217. IEEE, 2009. 1, 2, 5 +[32] Radu Bogdan Rusu and Steve Cousins. 3d is here: Point cloud library (pcl). In IEEE International Conference on Robotics and Automation, pages 1-4. IEEE, 2011. 5 +[33] Ivan Sipiran and Benjamin Bustos. Harris 3d: a robust extension of the harris operator for interest point detection on 3d meshes. The Visual Computer, 27(11):963-976, 2011. 5 +[34] Federico Tombari, Samuele Salti, and Luigi Di Stefano. Unique signatures of histograms for local surface description. In European Conference on Computer Vision, pages 356-369. Springer, 2010. 5 +[35] Haiping Wang, Yuan Liu, Zhen Dong, and Wenping Wang. You only hypothesize once: Point cloud registration with rotation-equivariant descriptors. In Proceedings of the ACM International Conference on Multimedia, pages 1630-1641, 2022. 1 +[36] Heng Yang, Jingnan Shi, and Luca Carlone. Teaser: Fast and certifiable point cloud registration. IEEE Transactions on Robotics, 37(2):314-333, 2020. 2, 4, 5, 6 +[37] Jiaqi Yang, Zhiguo Cao, and Qian Zhang. A fast and robust local descriptor for 3d point cloud registration. Information Sciences, 346:163-179, 2016. 5 +[38] Jiaqi Yang, Jiahao Chen, Siwen Quan, Wei Wang, and Yanning Zhang. Correspondence selection with loose-tight geometric voting for 3d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 2022. 1, 2 +[39] Jiaqi Yang, Zhiqiang Huang, Siwen Quan, Zhaoshuai Qi, and Yanning Zhang. Sac-cot: Sample consensus by sampling compatibility triangles in graphs for 3-d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 60:1-15, 2021. 1, 2, 5 +[40] Jiaqi Yang, Zhiqiang Huang, Siwen Quan, Qian Zhang, Yanning Zhang, and Zhiguo Cao. Toward efficient and robust metrics for ransac hypotheses and 3d rigid registration. IEEE Transactions on Circuits and Systems for Video Technology, 32(2):893-906, 2021. 1, 2, 5 + +[41] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: A globally optimal solution to 3d icp point-set registration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(11):2241-2254, 2015. 1, 2, 5 +[42] Jiaqi Yang, Yang Xiao, Zhiguo Cao, and Weidong Yang. Ranking 3d feature correspondences via consistency voting. Pattern Recognition Letters, 117:1-8, 2019. 5 +[43] Hao Yu, Fu Li, Mahdi Saleh, Benjamin Busam, and Slobodan Ilic. Cofinet: Reliable coarse-to-fine correspondences for robust pointcloud registration. Advances in Neural Information Processing Systems, 34, 2021. 2, 3, 6 +[44] Andy Zeng, Shuran Song, Matthias Nießner, Matthew Fisher, Jianxiong Xiao, and Thomas Funkhouser. 3dmatch: Learning local geometric descriptors from rgb-d reconstructions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1802-1811, 2017. 5 +[45] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Fast global registration. In European Conference on Computer Vision, pages 766-782. Springer, 2016. 2, 5, 6 \ No newline at end of file diff --git a/2023/3D Registration With Maximal Cliques/images.zip b/2023/3D Registration With Maximal Cliques/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..7df60adadcfb09f3c10b2882bc6155e0c152f8cc --- /dev/null +++ b/2023/3D Registration With Maximal Cliques/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc6284fa3fba7e1c0cd6a048707f35a7e66d374ad13eb7675da199e077e08ea1 +size 556250 diff --git a/2023/3D Registration With Maximal Cliques/layout.json b/2023/3D Registration With Maximal Cliques/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..50ff5fc705fdbf5c9fc9aa17bc3e3104c1d15da0 --- /dev/null +++ b/2023/3D Registration With Maximal Cliques/layout.json @@ -0,0 +1,8794 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 177, + 103, + 416, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 103, + 416, + 121 + ], + "spans": [ + { + "bbox": [ + 177, + 103, + 416, + 121 + ], + "type": "text", + "content": "3D Registration with Maximal Cliques" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 100, + 143, + 479, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 143, + 479, + 171 + ], + "spans": [ + { + "bbox": [ + 100, + 143, + 479, + 171 + ], + "type": "text", + "content": "Xiyu Zhang Jiaqi Yang* Shikun Zhang Yanning Zhang \nSchool of Computer Science, Northwestern Polytechnical University, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 100, + 173, + 493, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 173, + 493, + 186 + ], + "spans": [ + { + "bbox": [ + 100, + 173, + 493, + 186 + ], + "type": "text", + "content": "{2426988253, zhangshikun}@mail.nwpu.edu.cn; {jqyang, ynzhang}@nwpu.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "type": "text", + "content": "As a fundamental problem in computer vision, 3D point cloud registration (PCR) aims to seek the optimal pose to align a point cloud pair. In this paper, we present a 3D registration method with maximal cliques (MAC). The key insight is to loosen the previous maximum clique constraint, and mine more local consensus information in a graph for accurate pose hypotheses generation: 1) A compatibility graph is constructed to render the affinity relationship between initial correspondences. 2) We search for maximal cliques in the graph, each of which represents a consensus set. We perform node-guided clique selection then, where each node corresponds to the maximal clique with the greatest graph weight. 3) Transformation hypotheses are computed for the selected cliques by the SVD algorithm and the best hypothesis is used to perform registration. Extensive experiments on U3M, 3DMatch, 3DLoMatch and KITTI demonstrate that MAC effectively increases registration accuracy, outperforms various state-of-the-art methods and boosts the performance of deep-learned methods. MAC combined with deep-learned methods achieves state-of-the-art registration recall of " + }, + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "type": "inline_equation", + "content": "95.7\\%" + }, + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "type": "text", + "content": " / " + }, + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "type": "inline_equation", + "content": "78.9\\%" + }, + { + "bbox": [ + 46, + 238, + 290, + 501 + ], + "type": "text", + "content": " on 3DMatch / 3DLoMatch." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 524, + 128, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 524, + 128, + 537 + ], + "spans": [ + { + "bbox": [ + 47, + 524, + 128, + 537 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 544, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 677 + ], + "type": "text", + "content": "Point cloud registration (PCR) is an important and fundamental problem in 3D computer vision and has a wide range of applications in localization [13], 3D object detection [17] and 3D reconstruction [25]. Given two 3D scans of the same object (or scene), the goal of PCR is to estimate a six-degree-of-freedom (6-DoF) pose transformation that accurately aligns the two input point clouds. Using point-to-point feature correspondences is a popular and robust solution to the PCR problem. However, due to the limitations of existing 3D keypoint detectors & descriptors, the limited overlap between point clouds and data noise, corre" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 313, + 215, + 544, + 373 + ], + "blocks": [ + { + "bbox": [ + 313, + 215, + 544, + 373 + ], + "lines": [ + { + "bbox": [ + 313, + 215, + 544, + 373 + ], + "spans": [ + { + "bbox": [ + 313, + 215, + 544, + 373 + ], + "type": "image", + "image_path": "ed73bd9dfbf97bfefb637a76132fd13fe6bccc14c240ed6c4df0d7824aa855b4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 385, + 547, + 451 + ], + "lines": [ + { + "bbox": [ + 304, + 385, + 547, + 451 + ], + "spans": [ + { + "bbox": [ + 304, + 385, + 547, + 451 + ], + "type": "text", + "content": "Figure 1. Comparison of maximal and maximum cliques on a low overlapping point cloud pair. Maximal cliques (MAC) effectively choose the optimal 6-DoF transformation hypothesis with low rotation error (RE) and translation error (TE) for two point clouds with a low inlier ratio, while the maximum clique fails in this case." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 473, + 546, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 546, + 508 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 546, + 508 + ], + "type": "text", + "content": "spondences generated by feature matching usually contain outliers, resulting in great challenges to accurate 3D registration." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 547, + 715 + ], + "type": "text", + "content": "The problem of 3D registration by handling correspondences with outliers has been studied for decades. We classify them into geometric-only and deep-learned methods. For geometric-only methods [5, 6, 21, 30, 31, 38-41], random sample consensus (RANSAC) and its variants perform an iterative sampling strategy for registration. Although RANSAC-based methods are simple and efficient, their performance is highly vulnerable when the outlier rate increases, and it requires a large number of iterations to obtain acceptable results. Also, a series of global registration methods based on branch-and-bound (BnB) are proposed to search the 6D parameter space and obtain the optimal global solution. The main weakness of these methods is the high computational complexity, especially when the correspondence set is of a large magnitude and has an extremely high outlier rate. For deep-learned methods, some [1-4, 9, 10, 14, 16, 18, 19, 27, 35] focus on improving" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 60, + 684, + 136, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 684, + 136, + 693 + ], + "spans": [ + { + "bbox": [ + 60, + 684, + 136, + 693 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 694, + 286, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 694, + 286, + 703 + ], + "spans": [ + { + "bbox": [ + 48, + 694, + 286, + 703 + ], + "type": "text", + "content": "Code will be available at https://github.com/zhangxy0517/" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 704, + 226, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 704, + 226, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 704, + 226, + 712 + ], + "type": "text", + "content": "3D-Registration-with-Maximal-Cliques." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17745" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 178 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 178 + ], + "type": "text", + "content": "one module in the registration process, such as investigating more discriminate keypoint feature descriptors or more effective correspondence selection techniques, while the others [22, 29, 43] focus on registration in an end-to-end manner. However, deep-learned based methods require a large amount of data for training and usually lack generalization on different datasets. At present, it is still very challenging to achieve accurate registrations in the presence of heavy outliers and in cross-dataset conditions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 179, + 288, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 179, + 288, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 179, + 288, + 418 + ], + "type": "text", + "content": "In this paper, we propose a geometric-only 3D registration method based on maximal cliques (MAC). The key insight is to loosen the previous maximum clique constraint, and mine more local consensus information in a graph to generate accurate pose hypotheses. We first model the initial correspondence set as a compatibility graph, where each node represents a single correspondence and each edge between two nodes indicates a pair of compatible correspondences. Second, we search for maximal cliques in the graph and then use node-guided clique filtering to match each graph node with the appropriate maximal clique containing it. Compared with the maximum clique, MAC is a looser constraint and is able to mine more local information in a graph. This helps us to achieve plenty of correct hypotheses from a graph. Finally, transformation hypotheses are computed for the selected cliques by the SVD algorithm. The best hypothesis is selected to perform registration using popular hypothesis evaluation metrics in the RANSAC family. To summarize, our main contributions are as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 425, + 287, + 622 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 59, + 425, + 287, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 425, + 287, + 497 + ], + "spans": [ + { + "bbox": [ + 59, + 425, + 287, + 497 + ], + "type": "text", + "content": "- We introduce a hypothesis generation method named MAC. Our MAC method is able to mine more local information in a graph, compared with the previous maximum clique constraint. We demonstrate that hypotheses generated by MAC are of high accuracy even in the presence of heavy outliers." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 504, + 287, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 504, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 59, + 504, + 287, + 622 + ], + "type": "text", + "content": "- Based on MAC, we present a novel PCR method, which achieves state-of-the-art performance on U3M, 3DMatch, 3DLoMatch and KITTI datasets. Notably, our geometric-only MAC method outperforms several state-of-the-art deep learning methods [3, 9, 19, 27]. MAC can also be inserted as a module into multiple deep-learning frameworks [1, 10, 18, 29, 43] to boost their performance. MAC combined with GeoTransformer achieves the state-of-the-art registration recall of " + }, + { + "bbox": [ + 59, + 504, + 287, + 622 + ], + "type": "inline_equation", + "content": "95.7\\% / 78.9\\%" + }, + { + "bbox": [ + 59, + 504, + 287, + 622 + ], + "type": "text", + "content": " on 3DMatch / 3DLoMatch." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 639, + 134, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 639, + 134, + 651 + ], + "spans": [ + { + "bbox": [ + 47, + 639, + 134, + 651 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 659, + 212, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 659, + 212, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 659, + 212, + 672 + ], + "type": "text", + "content": "2.1. Geometric-only PCR Methods" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "Various geometric-only methods [6, 8, 20, 36, 45] have been proposed recently. Typically, RANSAC and its variants [5, 13, 30, 31, 38-40] remain the dominant approaches" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 395 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 395 + ], + "type": "text", + "content": "to the problem of estimating a 6-DoF pose from correspondences. RANSAC iteratively samples correspondences from the initial set, generating and evaluating geometric estimations for each subset until a satisfactory solution is obtained. Efficient and robust evaluation metrics are extremely important for using RANSAC to achieve accurate registration. To address the current problems of time-consuming and noise-sensitive evaluation metrics, [40] analyzes the contribution of inliers and outliers during the computation and proposed several metrics that can effectively improve the registration performance of RANSAC. A large number of variants have also been proposed to achieve further improvement. For example, Rusu et al. [31] presented the simple consensus-based initial alignment (SACIA) method, which samples correspondences spread out on the point cloud and leverages the Huber penalty for evaluation. Graph cut RANSAC (GC-RANSAC) [5] uses the graph-cut algorithm before model re-fitting in the local optimization step. Compatibility-guided sample consensus (CG-SAC) [30] additionally considers the normal information of key points during the sampling process. Yang et al. [39] proposed the sample consensus by sampling compatibility triangles (SAC-COT) method, which generates estimations by ranking and sampling ternary loops from the compatibility graph. Although many previous efforts have been made, these methods suffer from low time efficiency and limited accuracy in cases with high outlier rates." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 396, + 545, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 396, + 545, + 575 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 545, + 575 + ], + "type": "text", + "content": "A series of globally optimal methods based on BnB have been proposed recently. Yang et al. [41] proposed globally optimal ICP (GO-ICP), which rationalizes the planning of ICP update tasks at different stages, and its biggest advantage is that it minimizes the local optimum. Bustos and Chin [6] presented guaranteed outlier removal (GORE), which calculates the tight lower bound and tight upper bound for each correspondence and reduces the size of correspondence set by rejecting true outliers. Motivated by GORE, Li [21] proposed a polynomial time outlier removal method, which seeks the tight lower and upper bound by calculating the costs of correspondence matrix (CM) and augmented correspondence matrix (ACM). However, BnB techniques are sensitive to the cardinality of the input and are time-consuming for large-scale inputs." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 586, + 460, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 586, + 460, + 598 + ], + "spans": [ + { + "bbox": [ + 306, + 586, + 460, + 598 + ], + "type": "text", + "content": "2.2. Deep-learned PCR Methods" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "content": "In addition to geometric-only methods, recent works also adopt deep learning techniques to perform PCR. Some methods aim to detect more repeatable keypoints [4, 18] and extract more descriptive features [1, 10]. FCGF [10] computes the features in a single pass through a fully convolutional neural network without keypoint detection. D3Feat [4] uses a fully convolutional network to obtain local information of point clouds and a joint learning framework to achieve 3D local feature detection and description." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17746" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 77, + 541, + 177 + ], + "blocks": [ + { + "bbox": [ + 55, + 77, + 541, + 177 + ], + "lines": [ + { + "bbox": [ + 55, + 77, + 541, + 177 + ], + "spans": [ + { + "bbox": [ + 55, + 77, + 541, + 177 + ], + "type": "image", + "image_path": "c994fb4968d3c45090143f102140aac197908d5299793f364e48ec4f9e159630.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 190, + 547, + 222 + ], + "lines": [ + { + "bbox": [ + 46, + 190, + 547, + 222 + ], + "spans": [ + { + "bbox": [ + 46, + 190, + 547, + 222 + ], + "type": "text", + "content": "Figure 2. Pipeline of MAC. 1. Construct a graph for the initial correspondence set. 2. Select a set of maximal cliques from the graph as the consistent sets. 3. Generate and evaluate the hypotheses according to the consistent sets. 4. Select the best hypothesis to perform 3D registration." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 244, + 289, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 244, + 289, + 506 + ], + "spans": [ + { + "bbox": [ + 46, + 244, + 289, + 506 + ], + "type": "text", + "content": "Predator [18] applies an attention mechanism to extract salient points in overlapping regions of the point clouds, thus achieving robust registration in the presence of low overlap rates. Spinnet [1] extracts local features which are rotationally invariant and sufficiently informative to enable accurate registration. Some methods [3, 9, 14, 27] focus on efficiently distinguishing correspondences as inliers and outliers. Deep global registration (DGR) [9] and 3DRegNet [27] classify a given correspondence by training end-to-end neural networks and using operators such as sparse convolution and point-by-point MLP. PointDSC [3] explicitly explores spatial consistency for removing outlier correspondences and 3D point cloud registration. Fu et al. [14] proposed a registration framework that utilizes deep graph matching (RGM) that can find robust and accurate point-to-point correspondences. More recently, several methods [29, 43] follow the detection-free methods and estimate the transformation in an end-to-end way. CoFiNet [43] extracts correspondences from coarse to fine without keypoint detection. GeoTransformer [29] learns geometric features for robust superpoint matching and is robust in low-overlap cases and invariant to rigid transformation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 508, + 289, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 508, + 289, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 508, + 289, + 604 + ], + "type": "text", + "content": "While deep learning techniques have demonstrated a great potential for PCR, these methods require a large amount of training data and their generalization is not always promising. By contrast, MAC does not require any training data and achieves more advanced performance than several deep-learned methods. Moreover, MAC can be served as a drop-on module in deep learning frameworks to boost their performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 614, + 91, + 626 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 614, + 91, + 626 + ], + "spans": [ + { + "bbox": [ + 47, + 614, + 91, + 626 + ], + "type": "text", + "content": "3. MAC" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 635, + 171, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 635, + 171, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 635, + 171, + 647 + ], + "type": "text", + "content": "3.1. Problem Formulation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "text", + "content": "For two point clouds " + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^s" + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^t" + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "text", + "content": " to be aligned, we first extract local features for them using geometric or learned descriptors. Let " + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{p}^s" + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{p}^t" + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "text", + "content": " denote the points in the " + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^s" + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^t" + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "text", + "content": ", respectively. An initial correspondence set " + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_{initial} = \\{\\mathbf{c}\\}" + }, + { + "bbox": [ + 47, + 653, + 288, + 715 + ], + "type": "text", + "content": " is formed by matching feature descriptors," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "spans": [ + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "inline_equation", + "content": "\\mathbf{c} = (\\mathbf{p}^s, \\mathbf{p}^t)" + }, + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "text", + "content": ". MAC estimates the 6-DoF pose transformation between " + }, + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^s" + }, + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^t" + }, + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "inline_equation", + "content": "\\mathbf{C}_{initial}" + }, + { + "bbox": [ + 305, + 244, + 545, + 268 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 268, + 545, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 268, + 545, + 292 + ], + "spans": [ + { + "bbox": [ + 305, + 268, + 545, + 292 + ], + "type": "text", + "content": "Our method is technically very simple, and its pipeline is shown in Fig. 2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 298, + 424, + 311 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 298, + 424, + 311 + ], + "spans": [ + { + "bbox": [ + 306, + 298, + 424, + 311 + ], + "type": "text", + "content": "3.2. Graph Construction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 316, + 545, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 545, + 401 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 545, + 401 + ], + "type": "text", + "content": "The graph space can more accurately depict the affinity relationship between correspondences than the Euclidean space. Therefore, we model the initial correspondences as a compatibility graph, where correspondences are represented by nodes and edges link nodes that are geometrically compatible. Here, we consider two approaches to construct a compatibility graph." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 406, + 545, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 406, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 317, + 406, + 545, + 454 + ], + "type": "text", + "content": "- First Order Graph. The first order graph (FOG) is constructed based on the rigid distance constraint between the correspondence pair " + }, + { + "bbox": [ + 317, + 406, + 545, + 454 + ], + "type": "inline_equation", + "content": "(\\mathbf{c}_i, \\mathbf{c}_j)" + }, + { + "bbox": [ + 317, + 406, + 545, + 454 + ], + "type": "text", + "content": ", which can be quantitatively measured as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 340, + 460, + 545, + 487 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 460, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 340, + 460, + 545, + 487 + ], + "type": "interline_equation", + "content": "S _ {d i s t} \\left(\\mathbf {c} _ {i}, \\mathbf {c} _ {j}\\right) = \\left| \\left\\| \\mathbf {p} _ {i} ^ {s} - \\mathbf {p} _ {j} ^ {s} \\right\\| - \\left\\| \\mathbf {p} _ {i} ^ {t} - \\mathbf {p} _ {j} ^ {t} \\right\\| \\right|. \\tag {1}", + "image_path": "3eb2b2ad4ee17d6d50059e946fa42249d4149dc6585a8c5fc6434315de54c838.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 325, + 491, + 545, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 491, + 545, + 504 + ], + "spans": [ + { + "bbox": [ + 325, + 491, + 545, + 504 + ], + "type": "text", + "content": "The compatibility score between " + }, + { + "bbox": [ + 325, + 491, + 545, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 325, + 491, + 545, + 504 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 325, + 491, + 545, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_j" + }, + { + "bbox": [ + 325, + 491, + 545, + 504 + ], + "type": "text", + "content": " is given as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 356, + 512, + 545, + 541 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 512, + 545, + 541 + ], + "spans": [ + { + "bbox": [ + 356, + 512, + 545, + 541 + ], + "type": "interline_equation", + "content": "S _ {c m p} (\\mathbf {c} _ {i}, \\mathbf {c} _ {j}) = \\exp (- \\frac {S _ {d i s t} (\\mathbf {c} _ {i} , \\mathbf {c} _ {j}) ^ {2}}{2 d _ {c m p} ^ {2}}), \\qquad (2)", + "image_path": "10d2e79ce3143ae6db1fa90fb69b5119f90aad405d154c82e55bad22ca46811f.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "d_{cmp}" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": " is a distance parameter. Notably, if " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "S_{cmp}(\\mathbf{c}_i,\\mathbf{c}_j)" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": " is greater than a threshold " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "t_{cmp}" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_j" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": " form an edge " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_{ij}" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "S_{cmp}(\\mathbf{c}_i,\\mathbf{c}_j)" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": " is the weight of " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{e}_{ij}" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": ", otherwise " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "S_{cmp}(\\mathbf{c}_i,\\mathbf{c}_j)" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": " will be set to 0. Since the compatibility graph is undirected, the weight matrix " + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{FOG}" + }, + { + "bbox": [ + 325, + 544, + 545, + 616 + ], + "type": "text", + "content": " is symmetric." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 317, + 622, + 545, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 622, + 545, + 693 + ], + "spans": [ + { + "bbox": [ + 317, + 622, + 545, + 693 + ], + "type": "text", + "content": "- Second Order Graph. The previous study [8] proposes a second order compatibility measure, which relates to the number of commonly compatible correspondences in the global set. The second order graph (SOG) evolves from FOG. The weight matrix " + }, + { + "bbox": [ + 317, + 622, + 545, + 693 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{\\mathit{SOG}}" + }, + { + "bbox": [ + 317, + 622, + 545, + 693 + ], + "type": "text", + "content": " can be calculated as:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 342, + 701, + 545, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 342, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 342, + 701, + 545, + 714 + ], + "type": "interline_equation", + "content": "\\mathbf {W} _ {S O G} = \\mathbf {W} _ {F O G} \\odot \\left(\\mathbf {W} _ {F O G} \\times \\mathbf {W} _ {F O G}\\right), \\tag {3}", + "image_path": "62c2a54278eba38e90d55b35a82b24d46bfef5c31f5fef86eedc335492d06f45.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17747" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 66, + 72, + 287, + 95 + ], + "type": "text", + "content": " represents the element-wise product between two matrices." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 105, + 287, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 105, + 287, + 177 + ], + "spans": [ + { + "bbox": [ + 47, + 105, + 287, + 177 + ], + "type": "text", + "content": "Both graph construction methods can adapt to our frameworks. Compared with FOG, 1) SOG has stricter edge construction conditions and a higher degree of compatibility with adjacent nodes; 2) SOG is sparser, which facilitates a more rapid search of cliques. In Sec. 4.5, we experimentally compare FOG and SOG in our MAC framework." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 186, + 185, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 186, + 185, + 198 + ], + "spans": [ + { + "bbox": [ + 47, + 186, + 185, + 198 + ], + "type": "text", + "content": "3.3. Search Maximal Cliques" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "spans": [ + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "text", + "content": "Given an undirected graph " + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "inline_equation", + "content": "G = (\\mathbf{V}, \\mathbf{E})" + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "text", + "content": ", clique " + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "inline_equation", + "content": "C = (\\mathbf{V}', \\mathbf{E}')" + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "inline_equation", + "content": "\\mathbf{V}' \\subseteq \\mathbf{V}" + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "inline_equation", + "content": "\\mathbf{E}' \\subseteq \\mathbf{E}" + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "text", + "content": " is a subset of " + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 47, + 204, + 287, + 276 + ], + "type": "text", + "content": ", in which any two nodes are connected by edges. A maximal clique is a clique that cannot be extended by adding any nodes. In particular, the maximal clique with the most nodes is the maximum clique of a graph." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 277, + 287, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 277, + 287, + 419 + ], + "spans": [ + { + "bbox": [ + 46, + 277, + 287, + 419 + ], + "type": "text", + "content": "Searching for Maximal cliques. To generate hypotheses, RANSAC-based methods repeatedly take random samples from the correspondence set. Nevertheless, they fail to fully mine the affinity relationships between correspondences. Theoretically, inliers would form cliques in the graph, because inliers are usually geometrically compatible with each other. Previous works [23,24,28,36] focus on searching for maximum cliques in the graph, however, the maximum clique is a very tight constraint that only focuses on the global consensus information in a graph. Instead, we loosen the constraint and leverage maximal cliques to mine more local graph information." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "spans": [ + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "text", + "content": "By using the igraph_maximal cliques function in the igraph" + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "inline_equation", + "content": "^1" + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "inline_equation", + "content": "\\mathrm{C}++" + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "text", + "content": " library, which makes use of a modified Bron-Kerbosch algorithm [12], the search of maximal cliques can be very efficient. The process's worst time complexity is " + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(d(n - d)3^{(d / 3)})" + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "text", + "content": " is the degeneracy of the graph. Note that " + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 47, + 420, + 287, + 515 + ], + "type": "text", + "content": " is typically small in our problem because the graph is usually sparse when dealing with point cloud correspondences." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "text", + "content": "Node-guided Clique Selection. After executing the maximal clique searching procedure, we obtain the maximal clique set " + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "inline_equation", + "content": "MAC_{initial}" + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "text", + "content": ". In practice, " + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "inline_equation", + "content": "MAC_{initial}" + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "text", + "content": " usually contains tens of thousands of maximal cliques, which will make it very time-consuming if we consider all maximal cliques. We introduce a node-guided clique selection method in this section to reduce " + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "inline_equation", + "content": "|MAC_{initial}|" + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "text", + "content": ". First, we calculate the weight for each clique in " + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "inline_equation", + "content": "MAC_{initial}" + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "text", + "content": ". Given a clique " + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "inline_equation", + "content": "C_i = (\\mathbf{V}_i, \\mathbf{E}_i)" + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "text", + "content": ", the weight " + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "inline_equation", + "content": "w_{C_i}" + }, + { + "bbox": [ + 47, + 516, + 287, + 624 + ], + "type": "text", + "content": " is calculated as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 129, + 634, + 287, + 661 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 634, + 287, + 661 + ], + "spans": [ + { + "bbox": [ + 129, + 634, + 287, + 661 + ], + "type": "interline_equation", + "content": "w _ {C _ {i}} = \\sum_ {e _ {j} \\in \\mathbf {E} _ {i}} w _ {e _ {j}}, \\tag {4}", + "image_path": "740e6785b0a8d35685a89d96031077d5fe4026803bd678b8d93edf4ec30d2380.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 670, + 287, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 287, + 694 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 287, + 694 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 670, + 287, + 694 + ], + "type": "inline_equation", + "content": "w_{e_j}" + }, + { + "bbox": [ + 47, + 670, + 287, + 694 + ], + "type": "text", + "content": " represents the weight of edge " + }, + { + "bbox": [ + 47, + 670, + 287, + 694 + ], + "type": "inline_equation", + "content": "e_j" + }, + { + "bbox": [ + 47, + 670, + 287, + 694 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 47, + 670, + 287, + 694 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{SOG}" + }, + { + "bbox": [ + 47, + 670, + 287, + 694 + ], + "type": "text", + "content": ". A node may be included by multiple maximal cliques and we" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": "only retain the one with the greatest weight for that node. Then, duplicated cliques are removed from the rest, obtaining " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "MAC_{\\text{selected}}" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ". The motivation behind this is to use information about the local geometric structure around graph nodes to find the best consistent set of corresponding nodes. It is clear that the number of maximal cliques " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "|MAC_{\\text{selected}}|" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " will not exceed " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "|\\mathbf{V}|" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": ". We could send these maximal cliques directly to the following stages for 3D registration. However, when " + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "inline_equation", + "content": "|\\mathbf{V}|" + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": " is quite large, the number of retained maximal cliques can still be very large. Here, we propose several techniques to further filter the maximal cliques." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "spans": [ + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "text", + "content": "- Normal consistency. In the maximal cliques, we find that the normal consistency is satisfied between each correspondence. Given two correspondences " + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i = (\\mathbf{p}_i^s,\\mathbf{p}_i^t)" + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_j = (\\mathbf{p}_j^s,\\mathbf{p}_j^t)" + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "text", + "content": " and the normal vectors " + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "inline_equation", + "content": "\\mathbf{n}_i^s,\\mathbf{n}_j^s,\\mathbf{n}_i^t,\\mathbf{n}_j^t" + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "text", + "content": " at the four points, the angular difference " + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "inline_equation", + "content": "\\alpha_{ij}^{s} = \\angle (\\mathbf{n}_{i}^{s},\\mathbf{n}_{j}^{s})" + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "inline_equation", + "content": "\\alpha_{ij}^{t} = \\angle (\\mathbf{n}_{i}^{t},\\mathbf{n}_{j}^{t})" + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "text", + "content": " between the normal vectors can be calculated then. The following inequality ought to hold if " + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_j" + }, + { + "bbox": [ + 317, + 213, + 545, + 320 + ], + "type": "text", + "content": " are normal consistent:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 386, + 328, + 545, + 348 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 328, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 386, + 328, + 545, + 348 + ], + "type": "interline_equation", + "content": "\\left| \\sin \\alpha_ {i j} ^ {s} - \\sin \\alpha_ {i j} ^ {t} \\right| < t _ {\\alpha}, \\tag {5}", + "image_path": "c49001f7fb9db2c045811a6822e39066fbc5dfee88a4841389e30c62ec9ae037.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 325, + 357, + 545, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 357, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 325, + 357, + 545, + 381 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 325, + 357, + 545, + 381 + ], + "type": "inline_equation", + "content": "t_{\\alpha}" + }, + { + "bbox": [ + 325, + 357, + 545, + 381 + ], + "type": "text", + "content": " is a threshold for determining whether the angular differences are similar." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 392, + 545, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 392, + 545, + 452 + ], + "spans": [ + { + "bbox": [ + 317, + 392, + 545, + 452 + ], + "type": "text", + "content": "- Clique ranking. We organize " + }, + { + "bbox": [ + 317, + 392, + 545, + 452 + ], + "type": "inline_equation", + "content": "MAC_{\\text{selected}}" + }, + { + "bbox": [ + 317, + 392, + 545, + 452 + ], + "type": "text", + "content": " in a descending order using the clique's weight " + }, + { + "bbox": [ + 317, + 392, + 545, + 452 + ], + "type": "inline_equation", + "content": "w_{C_i}" + }, + { + "bbox": [ + 317, + 392, + 545, + 452 + ], + "type": "text", + "content": ". The top-" + }, + { + "bbox": [ + 317, + 392, + 545, + 452 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 317, + 392, + 545, + 452 + ], + "type": "text", + "content": " ones are supposed to be more likely to produce correct hypotheses. This makes it flexible to control the number of hypotheses." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 461, + 545, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 461, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 306, + 461, + 545, + 485 + ], + "type": "text", + "content": "These techniques' experimental analysis is presented in Sec. 4.5." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 495, + 510, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 495, + 510, + 508 + ], + "spans": [ + { + "bbox": [ + 306, + 495, + 510, + 508 + ], + "type": "text", + "content": "3.4. Hypothesis Generation and Evaluation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 514, + 545, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 545, + 562 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 545, + 562 + ], + "type": "text", + "content": "Each maximal clique filtered from the previous step represents a consistent set of correspondences. By applying the SVD algorithm to each consistency set, we can obtain a set of 6-DoF pose hypotheses." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 571, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 317, + 571, + 545, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 571, + 545, + 619 + ], + "spans": [ + { + "bbox": [ + 317, + 571, + 545, + 619 + ], + "type": "text", + "content": "- Instance-equal SVD. Transformation estimation of correspondences is often implemented with SVD. Instance-equal means that the weights of all correspondences are equal." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 629, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 629, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 629, + 545, + 713 + ], + "type": "text", + "content": "- Weighted SVD. Assigning weights to correspondences is commonly adopted by recent PCR methods [8, 9, 27, 29]. Correspondence weights can be derived by solving the eigenvectors of a compatibility matrix constructed for a compatibility graph. Here, we take the primary eigenvalues of " + }, + { + "bbox": [ + 317, + 629, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{SOG}" + }, + { + "bbox": [ + 317, + 629, + 545, + 713 + ], + "type": "text", + "content": " as correspondence weights." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 702, + 120, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 120, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 120, + 713 + ], + "type": "text", + "content": "1https://igraph.org" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17748" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 288, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 288, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 288, + 120 + ], + "type": "text", + "content": "The final goal of MAC is to estimate the optimal 6-DoF rigid transformation (composed of a rotation pose " + }, + { + "bbox": [ + 47, + 72, + 288, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^{*} \\in SO(3)" + }, + { + "bbox": [ + 47, + 72, + 288, + 120 + ], + "type": "text", + "content": " and a translation pose " + }, + { + "bbox": [ + 47, + 72, + 288, + 120 + ], + "type": "inline_equation", + "content": "\\mathbf{t}^{*} \\in \\mathbb{R}^{3}" + }, + { + "bbox": [ + 47, + 72, + 288, + 120 + ], + "type": "text", + "content": ") that maximizes the objective function as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 129, + 287, + 162 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 129, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 104, + 129, + 287, + 162 + ], + "type": "interline_equation", + "content": "\\left(\\mathbf {R} ^ {*}, \\mathbf {t} ^ {*}\\right) = \\arg \\max _ {\\mathbf {R}, \\mathbf {t}} \\sum_ {i = 1} ^ {N} s \\left(\\mathbf {c} _ {i}\\right), \\tag {6}", + "image_path": "60e86be5ec49f61737407906d2f2a380ec96803292ea658ca258791162b352f5.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "spans": [ + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i \\in \\mathbf{C}_{initial}" + }, + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "inline_equation", + "content": "N = |\\mathbf{C}_{initial}|" + }, + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "inline_equation", + "content": "s(\\mathbf{c}_i)" + }, + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "text", + "content": " represents the score of " + }, + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 46, + 171, + 288, + 244 + ], + "type": "text", + "content": ". We consider several RANSAC hypothesis evaluation metrics here [40], including mean average error (MAE), mean square error (MSE) and inlier count. Their behaviors will be experimentally compared in Sec. 4.5. The best hypothesis is taken to perform 3D registration then." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 255, + 128, + 269 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 255, + 128, + 269 + ], + "spans": [ + { + "bbox": [ + 47, + 255, + 128, + 269 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 275, + 163, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 275, + 163, + 289 + ], + "spans": [ + { + "bbox": [ + 47, + 275, + 163, + 289 + ], + "type": "text", + "content": "4.1. Experimental Setup" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 294, + 287, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 294, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 46, + 294, + 287, + 390 + ], + "type": "text", + "content": "Datasets. We consider four datasets, i.e., the object-scale dataset U3M [26], the scene-scale indoor datasets 3DMatch [44] & 3DLoMatch [18], and the scene-scale outdoor dataset KITTI [15]. U3M has 496 point cloud pairs. 3DLoMatch is the subset of 3DMatch, where the overlap rate of the point cloud pairs ranges from " + }, + { + "bbox": [ + 46, + 294, + 287, + 390 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 294, + 287, + 390 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 294, + 287, + 390 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 294, + 287, + 390 + ], + "type": "text", + "content": ", which is very challenging. For KITTI, we follow [3,8] and obtain 555 pairs of point clouds for testing." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "text", + "content": "Evaluation Criteria. We follow [39] that employs the root mean square error (RMSE) metric to evaluate the 3D point cloud registration performance on the U3M object-scale dataset. In addition, we employ the rotation error (RE) and translation error (TE) to evaluate the registration results on the scene-scale dataset. By referring to the settings in [9], the registration is considered successful when the " + }, + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathrm{RE} \\leq 15^{\\circ}" + }, + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathrm{TE} \\leq 30\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "text", + "content": " on 3DMatch & 3DLoMatch datasets, and " + }, + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathrm{RE} \\leq 5^{\\circ}" + }, + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathrm{TE} \\leq 60\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 390, + 287, + 521 + ], + "type": "text", + "content": " on KITTI dataset. We define a dataset's registration accuracy as the ratio of success cases to the number of point cloud pairs to be registered." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "text", + "content": "Implementation Details. Our method is implemented in " + }, + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{C + + }" + }, + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "text", + "content": " based on the point cloud library (PCL) [32] and igraph library. For U3M, we use the Harris3D (H3D) [33] keypoint detector and the signatures of histograms of orientation (SHOT) [34] descriptor for initial correspondence generation as in [42]. For 3DMatch and 3DLoMatch datasets, we use the fast point features histograms (FPFH) [31] descriptor and fully convolutional geometric features (FCGF) [10] descriptor to generate the initial correspondence set. The main steps in the comparative experimental sections are SOG construction, searching node-guided maximal cliques, hypotheses generation by instance-equal SVD and evaluation by MAE. Default values for compatibility threshold " + }, + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "inline_equation", + "content": "t_{cmp}" + }, + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "text", + "content": " and distance parameter " + }, + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "inline_equation", + "content": "d_{cmp}" + }, + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "text", + "content": " mentioned in Sec. 3.2 are 0.99 and 10 pr respectively; if input matches exceed 5000, " + }, + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "inline_equation", + "content": "t_{cmp}" + }, + { + "bbox": [ + 46, + 522, + 287, + 715 + ], + "type": "text", + "content": " is set to 0.999 to reduce computation. Here, 'pr' is" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 318, + 69, + 533, + 214 + ], + "blocks": [ + { + "bbox": [ + 318, + 69, + 533, + 214 + ], + "lines": [ + { + "bbox": [ + 318, + 69, + 533, + 214 + ], + "spans": [ + { + "bbox": [ + 318, + 69, + 533, + 214 + ], + "type": "image", + "image_path": "915becacf5a5f7f86b3b83b2b994f7cce90c29f0959816bf014c5716987f61ea.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 222, + 545, + 244 + ], + "lines": [ + { + "bbox": [ + 305, + 222, + 545, + 244 + ], + "spans": [ + { + "bbox": [ + 305, + 222, + 545, + 244 + ], + "type": "text", + "content": "Figure 3. Registration performance of tested point cloud registration methods on U3M." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 308, + 263, + 545, + 414 + ], + "blocks": [ + { + "bbox": [ + 308, + 263, + 545, + 414 + ], + "lines": [ + { + "bbox": [ + 308, + 263, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 308, + 263, + 545, + 414 + ], + "type": "table", + "html": "
FPFHFCGF
RR(%)RE(°)TE(cm)RR(%)RE(°)TE(cm)
i) Traditional SM [20]55.882.948.1586.572.297.07
FGR [45]40.914.9610.2578.932.908.41
RANSAC-1M [13]64.204.0511.3588.423.059.42
RANSAC-4M [13]66.103.9511.0391.442.698.38
GC-RANSAC [5]67.652.336.8792.052.337.11
TEASER++ [36]75.482.487.3185.772.738.66
CG-SAC [30]78.002.406.8987.522.427.66
SC2-PCR [8]83.732.186.7093.162.096.51
ii) Deep learned 3DRegNet [27]26.313.759.6077.762.748.13
DGR [9]32.842.457.5388.852.287.02
DHVR [19]67.102.787.8491.932.257.08
PointDSC [3]72.952.186.4591.872.106.54
MAC84.101.966.1893.721.896.03
", + "image_path": "46c8d1e799c0e70a1a9cce162e9669ace9cb79dbfbe41649d5b43e260720b879.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 335, + 422, + 515, + 434 + ], + "lines": [ + { + "bbox": [ + 335, + 422, + 515, + 434 + ], + "spans": [ + { + "bbox": [ + 335, + 422, + 515, + 434 + ], + "type": "text", + "content": "Table 1. Registration results on 3DMatch dataset." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 463, + 545, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 463, + 545, + 535 + ], + "spans": [ + { + "bbox": [ + 305, + 463, + 545, + 535 + ], + "type": "text", + "content": "a distance unit called point cloud resolution [42]. Normal vectors are calculated using the NormalEstimation class of PCL with the 20 nearest neighboring points. When searching maximal cliques, the lower bound on clique size is set to 3 with no upper bound defined. All experiments were implemented with an Intel 12700H CPU and 32 GB RAM." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 555, + 441, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 555, + 441, + 567 + ], + "spans": [ + { + "bbox": [ + 306, + 555, + 441, + 567 + ], + "type": "text", + "content": "4.2. Results on U3M Dataset" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "spans": [ + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "text", + "content": "We perform an extensive comparison in Fig. 3. Here, the following methods are tested, including SAC-COT [39], OSAC [37], SAC-IA [31], RANSAC [13], " + }, + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{SC^2}" + }, + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "text", + "content": "-PCR [8], FGR [45], GO-ICP [41], and PPF [11], where the former four are RANSAC-based methods. The RMSE threshold is varied from " + }, + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "inline_equation", + "content": "0.5\\mathrm{pr}" + }, + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "inline_equation", + "content": "5\\mathrm{pr}" + }, + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "text", + "content": " with a step of " + }, + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "inline_equation", + "content": "0.5\\mathrm{pr}" + }, + { + "bbox": [ + 305, + 578, + 545, + 650 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 653, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 653, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 653, + 545, + 712 + ], + "type": "text", + "content": "The results indicate that MAC performs best and significantly outperforms all tested RANSAC fashion estimators, such as SAC-COT, OSAC, SAC-IA, and RANSAC. The registration performance of MAC based on the MAE evaluation metric is the best on U3M." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17749" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 289, + 169 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 289, + 169 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 289, + 169 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 289, + 169 + ], + "type": "table", + "html": "
RR(%)FPFH RE(°)TE(cm)RR(%)FCGF RE(°)TE(cm)
i) Traditional RANSAC-1M [13]0.6710.2715.069.777.0114.87
RANSAC-4M [13]0.4510.3920.0310.446.9115.14
TEASER++ [36]35.154.3810.9646.764.1212.89
SC2-PCR [8]38.574.0310.3158.733.8010.44
ii) Deep learned DGR [9]19.885.0713.5343.804.1710.82
PointDSC [3]20.384.0410.2556.203.8710.48
MAC40.883.669.4559.853.509.75
", + "image_path": "2bd16836d7f7a2efb673c9709cea157ecfca83850e2272315882a5f17770a4cf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 50, + 201, + 287, + 316 + ], + "blocks": [ + { + "bbox": [ + 72, + 177, + 261, + 189 + ], + "lines": [ + { + "bbox": [ + 72, + 177, + 261, + 189 + ], + "spans": [ + { + "bbox": [ + 72, + 177, + 261, + 189 + ], + "type": "text", + "content": "Table 2. Registration results on 3DLoMatch dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 201, + 287, + 316 + ], + "lines": [ + { + "bbox": [ + 50, + 201, + 287, + 316 + ], + "spans": [ + { + "bbox": [ + 50, + 201, + 287, + 316 + ], + "type": "table", + "html": "
# Samples3DMatch RR(%)3DLoMatch RR(%)
500025001000500250500025001000500250
FCGF [10]85.184.783.381.671.440.141.738.235.426.8
SpinNet [1]88.686.685.583.570.259.854.948.339.826.8
Predator [18]89.089.990.688.586.659.861.262.460.858.1
CoFiNet [43]89.388.988.487.487.067.566.264.263.161.0
GeoTransformer [29]92.091.891.891.491.275.074.874.274.173.5
FCGF+MAC91.392.291.690.485.657.256.052.642.432.1
6.2↑7.5↑8.3↑8.8↑14.2↑17.1↑14.3↑14.4↑7.0↑5.3↑
SpinNet+MAC95.395.193.391.481.272.869.959.254.832.1
6.7↑8.5↑7.8↑7.9↑11.0↑13.0↑15.0↑10.9↑15.0↑5.3↑
Predator+MAC94.694.494.093.592.370.970.469.867.264.1
5.6↑4.5↑3.4↑5.0↑5.7↑11.1↑9.2↑7.4↑6.4↑6.0↑
CoFiNet+MAC94.194.494.593.892.771.671.570.669.268.1
4.8↑5.5↑6.1↑6.4↑5.7↑4.1↑5.3↑6.4↑6.1↑7.1↑
GeoTransformer+MAC95.795.795.295.394.678.978.778.277.776.6
3.7↑3.9↑3.4↑3.9↑3.4↑3.9↑3.9↑4.0↑3.6↑3.1↑
", + "image_path": "7011ef40e2df5f173527d5a49895d687b00a16b3e8e1529e6b785fbbb54a91d9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 368, + 278, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 368, + 278, + 380 + ], + "spans": [ + { + "bbox": [ + 47, + 368, + 278, + 380 + ], + "type": "text", + "content": "4.3. Results on 3DMatch & 3DLoMatch Datasets" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 388, + 287, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 388, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 47, + 388, + 287, + 459 + ], + "type": "text", + "content": "PCR methods comparison. Both geometric-only and deep-learned methods are considered for comparison, including SM [20], FGR [45], RANSAC [13], TEASER++ [36], CG-SAC [30], " + }, + { + "bbox": [ + 47, + 388, + 287, + 459 + ], + "type": "inline_equation", + "content": "\\mathrm{SC^2}" + }, + { + "bbox": [ + 47, + 388, + 287, + 459 + ], + "type": "text", + "content": "-PCR [8], 3DRegNet [27], DGR [9], DHVR [19] and PointDSC [3]. Results are shown in Tables 1 and 2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 460, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 460, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 460, + 287, + 581 + ], + "type": "text", + "content": "The following conclusions can be made: 1) regardless of which descriptor is used, MAC outperforms all compared methods on both 3DMatch and 3DLoMatch datasets, indicating its strong ability to register indoor scene point clouds; 2) even compared with deep-learned methods, MAC still achieves better performance without any data training; 3) in addition to the registration recall (RR) metric, MAC achieves the best RE and TE metrics. This indicates that registrations by MAC are very accurate and MAC is able to align low overlapping data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 581, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 287, + 665 + ], + "type": "text", + "content": "Boosting deep-learned methods with MAC. Several kinds of state-of-the-art deep-learned methods are integrated with MAC for evaluation. The considered methods are FCGF [10], SpinNet [1], Predator [18], CoFiNet [43] and GeoTransformer [29]. Each method is tested under a different number of samples, which refer to the number of sampled points or correspondences. Results are reported in Table 3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 665, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 665, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 665, + 287, + 714 + ], + "type": "text", + "content": "Remarkably, MAC dramatically improves the registration recall under all tested methods on both 3DMatch and 3DLoMatch datasets. Notably, the performance of SpinNet, Predator and CoFiNet after boosting by MAC exceeds" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 309, + 70, + 547, + 180 + ], + "blocks": [ + { + "bbox": [ + 47, + 323, + 287, + 346 + ], + "lines": [ + { + "bbox": [ + 47, + 323, + 287, + 346 + ], + "spans": [ + { + "bbox": [ + 47, + 323, + 287, + 346 + ], + "type": "text", + "content": "Table 3. Performance boosting for deep-learned methods when combined with MAC." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 70, + 547, + 180 + ], + "lines": [ + { + "bbox": [ + 309, + 70, + 547, + 180 + ], + "spans": [ + { + "bbox": [ + 309, + 70, + 547, + 180 + ], + "type": "table", + "html": "
RR(%)FPFH RE(°)TE(cm)RR(%)FCGF RE(°)TE(cm)
i) Traditional FGR [45]5.230.8643.8489.540.4625.72
TEASER++ [36]91.171.0317.9894.960.3813.69
RANSAC [13]74.411.5530.2080.360.7326.79
CG-SAC [30]74.230.7314.0283.240.5622.96
SC2-PCR [8]99.280.398.6897.840.3320.58
ii) Deep learned DGR [9]77.121.6433.1096.900.3421.70
PointDSC [3]98.920.388.3597.840.3320.32
MAC99.460.408.4697.840.3419.34
", + "image_path": "09d1d0b8deacac927256bd60536890e4e062968851c57158079a570977083844.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 340, + 188, + 511, + 199 + ], + "lines": [ + { + "bbox": [ + 340, + 188, + 511, + 199 + ], + "spans": [ + { + "bbox": [ + 340, + 188, + 511, + 199 + ], + "type": "text", + "content": "Table 4. Registration results on KITTI dataset." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 220, + 545, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 220, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 304, + 220, + 545, + 281 + ], + "type": "text", + "content": "that of GeoTransformer. MAC working with GeoTransformer achieves state-of-the-art registration recall of " + }, + { + "bbox": [ + 304, + 220, + 545, + 281 + ], + "type": "inline_equation", + "content": "95.7\\%" + }, + { + "bbox": [ + 304, + 220, + 545, + 281 + ], + "type": "text", + "content": " / " + }, + { + "bbox": [ + 304, + 220, + 545, + 281 + ], + "type": "inline_equation", + "content": "78.9\\%" + }, + { + "bbox": [ + 304, + 220, + 545, + 281 + ], + "type": "text", + "content": " on 3DMatch / 3DLoMatch. The results suggest that: 1) MAC can greatly boost existing deep-learned methods; 2) MAC is not sensitive to the number of samples." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 287, + 450, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 287, + 450, + 298 + ], + "spans": [ + { + "bbox": [ + 305, + 287, + 450, + 298 + ], + "type": "text", + "content": "4.4. Results on KITTI Dataset" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 305, + 545, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 305, + 545, + 341 + ], + "spans": [ + { + "bbox": [ + 305, + 305, + 545, + 341 + ], + "type": "text", + "content": "In Table 4, the results of DGR [9], PointDSC [3], TEASER++ [36], RANSAC [13], CG-SAC [30], " + }, + { + "bbox": [ + 305, + 305, + 545, + 341 + ], + "type": "inline_equation", + "content": "\\mathrm{SC^2}" + }, + { + "bbox": [ + 305, + 305, + 545, + 341 + ], + "type": "text", + "content": "-PCR [8] and MAC are reported for comparison." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 341, + 546, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 341, + 546, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 341, + 546, + 460 + ], + "type": "text", + "content": "As shown by the table, in terms of the registration recall performance, MAC presents the best and is tied for the best results with FPFH and FCGF descriptor settings, respectively. MAC also has a lower TE than the state-of-the-art geometric-only method " + }, + { + "bbox": [ + 304, + 341, + 546, + 460 + ], + "type": "inline_equation", + "content": "\\mathrm{SC^2}" + }, + { + "bbox": [ + 304, + 341, + 546, + 460 + ], + "type": "text", + "content": "-PCR. Note that outdoor point clouds are significantly sparse and non-uniformly distributed. The registration experiments on the object, indoor scene, and outdoor scene datasets consistently verify that MAC holds good generalization ability in different application contexts." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 468, + 430, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 468, + 430, + 480 + ], + "spans": [ + { + "bbox": [ + 306, + 468, + 430, + 480 + ], + "type": "text", + "content": "4.5. Analysis Experiments" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 486, + 545, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 545, + 570 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 545, + 570 + ], + "type": "text", + "content": "In this section, we perform ablation studies and analysis experiments on both 3DMatch and 3DLoMatch datasets. We progressively experiment with the techniques proposed in Sec. 3, and the results are shown in Table 5. The quality of generated hypotheses is analyzed in Table 6. The performance upper bound is studied in Table 7. Table 8 presents the time efficiency analysis of MAC." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 570, + 545, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 641 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 641 + ], + "type": "text", + "content": "Performing feature matching selection. Before 3D registration, a popular way is to perform outlier rejection to reduce the correspondence set. Here we employ geometric consistency (GC) [7], which is independent of the feature space and associates the largest consistent cluster relating to the compatibility among correspondences." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "By comparing Row 1 and 2 of Table 5, GC has a negative impact on MAC performance, potentially due to that some inliers are also removed in this process. This demonstrates that MAC can still perform well even if the initial correspondence set is directly utilized as input without any filtering." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17750" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 69, + 547, + 313 + ], + "blocks": [ + { + "bbox": [ + 50, + 69, + 547, + 313 + ], + "lines": [ + { + "bbox": [ + 50, + 69, + 547, + 313 + ], + "spans": [ + { + "bbox": [ + 50, + 69, + 547, + 313 + ], + "type": "table", + "html": "
FOGSOGGCMCNGNCCRSVDW-SVDMAEMSE#inlierRR(%)RE(°)TE(cm)
FPFH1)83.86/39.142.17/4.016.51 /9.94
2)77.02/26.612.10/3.836.19 /9.49
3)82.26/39.022.12/3.986.43 /9.89
4)83.49/38.912.22/4.116.65 /10.05
5)83.67/38.852.15/4.036.53 /9.82
6)84.10/40.881.96/3.666.18 /9.45
7)82.93/39.981.95/3.666.12 /9.48
8)82.44/38.462.16/3.976.41 /9.85
9)74.06/31.112.08/3.896.17 /9.82
10) Top10082.01/37.792.13/4.026.42 /9.82
11) Top20083.18/38.852.16/4.086.55 /9.91
12) Top50083.06/38.852.14/4.036.47 /9.81
13) Top100083.30/38.912.16/4.056.53 /9.84
14) Top200083.36/38.792.14/4.026.52 /9.78
FCGF1)93.41/59.802.04/3.786.33 /10.16
2)91.68/49.971.99/3.646.23 /9.90
3)93.35/59.242.04/3.676.28 /9.99
4)92.91/59.072.06/3.886.33 /10.20
5)93.16/59.462.04/3.766.26 /10.00
6)93.72/59.851.89/3.506.03 /9.75
7)93.59/59.011.86/3.496.00 /9.61
8)93.28/59.632.02/3.736.24 /9.98
9)87.86/49.352.00/3.616.09 /9.60
10) Top10092.42/57.442.00/3.756.21 /10.00
11) Top20093.22/57.832.01/3.756.29 /10.06
12) Top50093.22/58.902.02/3.786.33 /10.02
13) Top100093.35/59.402.05/3.786.32 /10.18
14) Top200093.35/59.522.04/3.786.33 /10.19
", + "image_path": "a4c53aac335e5698a29dbdd4ed7ff61ea751638b8b6f79762245e721c5ea5d01.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 321, + 545, + 355 + ], + "lines": [ + { + "bbox": [ + 46, + 321, + 545, + 355 + ], + "spans": [ + { + "bbox": [ + 46, + 321, + 545, + 355 + ], + "type": "text", + "content": "Table 5. Analysis experiments on 3DMatch / 3DLoMatch. FOG: First order compatibility graph. SOG: Second order compatibility graph. GC: Use geometric consistency to preliminarily perform outlier rejection. MC: Search the maximum clique instead of maximal cliques. NG: Node-guided clique selection. NC: Normal consistency. CR: Clique ranking. W-SVD: Weighted SVD." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 376, + 287, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 376, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 46, + 376, + 287, + 399 + ], + "type": "text", + "content": "Graph construction choices. We test the performance of MAC by using different graph construction approaches." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "spans": [ + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "text", + "content": "As shown in Row 1 and 3 of Table 5, the registration recall obtained by using SOG is " + }, + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "inline_equation", + "content": "1.6\\%" + }, + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "text", + "content": " higher than using FOG when combined with FPFH, and " + }, + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "inline_equation", + "content": "0.06\\%" + }, + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "text", + "content": " higher when combined with FCGF on 3DMatch. Also, the registration recall obtained by using SOG is " + }, + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "inline_equation", + "content": "0.12\\%" + }, + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "text", + "content": " higher than using FOG when combined with FPFH, and " + }, + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "inline_equation", + "content": "0.56\\%" + }, + { + "bbox": [ + 46, + 400, + 287, + 508 + ], + "type": "text", + "content": " higher when combined with FCGF on 3DLoMatch. Therefore, SOG is more suitable for MAC. Detailed analyzing descriptions can be found in the supplementary." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 509, + 287, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 509, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 46, + 509, + 287, + 544 + ], + "type": "text", + "content": "Maximum or maximal clique. To justify the advantages of maximal cliques, we change the search strategy of MAC to the maximum cliques and test the registration performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "text", + "content": "As shown in Row 1 and 9 in Table 5, applying maximal cliques surpasses maximum by " + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "inline_equation", + "content": "9.8\\%" + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "text", + "content": " when combined with FPFH, and " + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "inline_equation", + "content": "5.55\\%" + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "text", + "content": " higher when combined with FCGF on 3DMatch. Besides, the registration recall obtained by using maximal cliques is " + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "inline_equation", + "content": "8.03\\%" + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "text", + "content": " higher than using the maximum cliques when combined with FPFH and " + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "inline_equation", + "content": "10.45\\%" + }, + { + "bbox": [ + 46, + 545, + 288, + 714 + ], + "type": "text", + "content": " higher when combined with FCGF on 3DLoMatch. There are several reasons for this: 1) maximal cliques include the maximum cliques and additionally consider local graph constraints, so the search for maximal cliques can make use of both local and global information in the compatibility graph; 2) the maximum clique is a very tight constraint which requires maximizing the number of mutually compatible correspondences, but it does not guarantee the opti" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 376, + 351, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 376, + 351, + 386 + ], + "spans": [ + { + "bbox": [ + 305, + 376, + 351, + 386 + ], + "type": "text", + "content": "mal result." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 388, + 545, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 388, + 545, + 424 + ], + "spans": [ + { + "bbox": [ + 305, + 388, + 545, + 424 + ], + "type": "text", + "content": "Node-guided clique selection. We compare the performance with and without node-guided (NG) clique selection for maximal cliques search." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "text", + "content": "Comparing Row 1 and 4 in Table 5, using NG achieves a recall improvement of " + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "inline_equation", + "content": "0.37\\%" + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "text", + "content": " when combined with FPFH, and " + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "text", + "content": " improvement when combined with FCGF on 3DMatch. Also, using NG achieves a recall improvement of " + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "inline_equation", + "content": "0.23\\%" + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "text", + "content": " with FPFH and " + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "inline_equation", + "content": "0.73\\%" + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "text", + "content": " improvement with FCGF on 3DLoMatch. It is worth noting that while NG improves recall, the mean RE and mean TE are also decreasing. For example, NG reduces the mean RE by " + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "inline_equation", + "content": "0.1^{\\circ}" + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "text", + "content": " and the mean TE by " + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "inline_equation", + "content": "0.11\\mathrm{cm}" + }, + { + "bbox": [ + 304, + 425, + 546, + 555 + ], + "type": "text", + "content": " with FPFH on 3DLoMatch. NG effectively reduces the number of calculations in the subsequent steps and promises accurate hypotheses." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 556, + 545, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 556, + 545, + 593 + ], + "spans": [ + { + "bbox": [ + 304, + 556, + 545, + 593 + ], + "type": "text", + "content": "Different approaches for clique filtering. We test the effectiveness of the two filtering methods, normal consistency and clique ranking." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "text", + "content": "1) Normal consistency: comparing Row 1 and 8 in Table 5, NC slightly degrades MAC's performance. 2) Clique ranking: Row 10 to 14 demonstrate that the registration recall tends to increase as " + }, + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "text", + "content": " increases, suggesting that larger " + }, + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "text", + "content": " yields a subset of cliques that generate more correct hypotheses. Remarkably, setting " + }, + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "text", + "content": " to 100 can already achieve outstanding performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 677, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 712 + ], + "type": "text", + "content": "Employing instance-equal or weighted SVD. The comparisons of instance-equal and weighted SVD are shown in Rows 1 and 5 of Table 5." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17751" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 286, + 133 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 286, + 133 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 286, + 133 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 286, + 133 + ], + "type": "table", + "html": "
# hypotheses3DMatch3DLoMatch
RANSACMACRANSACMAC
FCGFFPFHFCGFFPFHFCGFFPFHFCGFFPFH
10010.450.7661.9450.671.250.0530.4712.22
20020.761.50119.2089.272.520.0955.5717.59
50051.743.68269.06162.416.210.21109.3223.32
1000103.657.39456.18217.3212.430.41156.1126.02
2000208.2414.90669.32254.1324.800.81202.1229.31
", + "image_path": "339d81d471e75ee1d72867f6277c3a91726500ea413990952cf3931779285387.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 110, + 175, + 227, + 243 + ], + "blocks": [ + { + "bbox": [ + 47, + 142, + 286, + 164 + ], + "lines": [ + { + "bbox": [ + 47, + 142, + 286, + 164 + ], + "spans": [ + { + "bbox": [ + 47, + 142, + 286, + 164 + ], + "type": "text", + "content": "Table 6. Comparison of the number of correct hypotheses generated by MAC and RANSAC on 3DMatch and 3DLoMatch." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 110, + 175, + 227, + 243 + ], + "lines": [ + { + "bbox": [ + 110, + 175, + 227, + 243 + ], + "spans": [ + { + "bbox": [ + 110, + 175, + 227, + 243 + ], + "type": "table", + "html": "
3DMatch RR(%)3DLoMatch RR(%)
MAC-198.4691.24
MAC-597.1083.32
MAC-1096.4377.93
MAC-2094.7070.47
MAC-5091.1356.37
MAC-origin93.7259.85
", + "image_path": "43db7d0c334d1bcbe2ea65456dc4beebe679b8108bc8fef2d22ca9be347ff73d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 306, + 286, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 306, + 286, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 306, + 286, + 354 + ], + "type": "text", + "content": "Weighted SVD is slightly inferior to instance-equal SVD. This suggests that samples in MACs are already very consistent, indicating no additional weighting strategies are required." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 354, + 286, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 286, + 389 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 286, + 389 + ], + "type": "text", + "content": "Varying hypothesis evaluation metrics. Here we compare three evaluation metrics, including MAE, MSE and inlier count, for MAC hypothesis evaluation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "spans": [ + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "text", + "content": "As shown in Row 1, 6 and 7, MAC with MAE achieves the best performance. In Table 5, MAE achieves a recall improvement of " + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "inline_equation", + "content": "0.24\\%" + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "text", + "content": " when combined with FPFH, and " + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "inline_equation", + "content": "0.31\\%" + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "text", + "content": " improvement when combined with FCGF on 3DMatch compared with the commonly used inlier count metric. Also, MAE has a " + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "inline_equation", + "content": "1.74\\%" + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "text", + "content": " improvement when combined with FPFH, and " + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "inline_equation", + "content": "0.05\\%" + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "text", + "content": " when combined with FCGF on 3DLoMatch compared with inlier count. MAE is also very effective in reducing RE and TE. For instance, MAE reduces the mean RE by " + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "inline_equation", + "content": "0.35^{\\circ}" + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "text", + "content": " and the mean TE by " + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "inline_equation", + "content": "0.49~\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 390, + 286, + 520 + ], + "type": "text", + "content": " with FPFH on 3DLoMatch." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 521, + 286, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 286, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 286, + 568 + ], + "type": "text", + "content": "Comparison with RANSAC hypotheses. We evaluate the quality of the generated hypotheses by comparing the hypotheses from RANSAC and MAC with the ground truth transformation. The results are shown in Table 6." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 570, + 286, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 286, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 286, + 641 + ], + "type": "text", + "content": "Compared to RANSAC, which randomly selects correspondences and generates hypotheses from the correspondence set without geometric constraints, MAC effectively generates more convincing hypotheses from maximal cliques in the compatibility graph, which fully exploits the consensus information in the graph." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 642, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 286, + 712 + ], + "type": "text", + "content": "The performance upper bound of MAC. Given an ideal hypothesis evaluation metric, allowing a point cloud pair can be aligned as long as correct hypotheses can be generated. This can test the performance upper bound of MAC. We vary the judging threshold for the number of generated correct hypotheses and report the results in Table 7." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 308, + 70, + 545, + 124 + ], + "blocks": [ + { + "bbox": [ + 47, + 251, + 286, + 285 + ], + "lines": [ + { + "bbox": [ + 47, + 251, + 286, + 285 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 286, + 285 + ], + "type": "text", + "content": "Table 7. Registration recall on 3DMatch with FCGF setting based on judging MAC's hypotheses. MAC-" + }, + { + "bbox": [ + 47, + 251, + 286, + 285 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 251, + 286, + 285 + ], + "type": "text", + "content": ": a point cloud pair is considered alignable if at least " + }, + { + "bbox": [ + 47, + 251, + 286, + 285 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 251, + 286, + 285 + ], + "type": "text", + "content": " hypotheses are correct." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 70, + 545, + 124 + ], + "lines": [ + { + "bbox": [ + 308, + 70, + 545, + 124 + ], + "spans": [ + { + "bbox": [ + 308, + 70, + 545, + 124 + ], + "type": "table", + "html": "
# correspondencesGraph ConstructionSearch Maximal CliquesNode-guided Clique SelectionPose EstimationTotal
2501.03 (14.55%)5.24 (74.01%)0.58 (8.19%)0.23 (3.25%)7.08
5004.07 (17.54%)15.67 (67.51%)3.12 (13.44%)0.35 (1.51%)23.21
100016.90 (29.85%)36.60 (64.65%)1.88 (3.32%)1.23 (2.18%)56.61
2500153.92 (53.29%)104.03 (36.02%)4.97 (1.72%)25.93 (8.97%)288.85
5000887.03 (27.16%)1579.61 (48.37%)65.40 (2.00%)733.38 (22.47%)3265.42
", + "image_path": "77b0745206b9113b0c3e7161b1c49ac0f8427f7a7161065de733e0e006f91afe.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 133, + 545, + 163 + ], + "lines": [ + { + "bbox": [ + 305, + 133, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 305, + 133, + 545, + 163 + ], + "type": "text", + "content": "Table 8. Average consumed time (ms) per point cloud pair on the 3DMatch dataset. Predator is used for generating correspondences." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 189, + 545, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 189, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 304, + 189, + 545, + 308 + ], + "type": "text", + "content": "Impressively, MAC-1 achieves registration recalls of " + }, + { + "bbox": [ + 304, + 189, + 545, + 308 + ], + "type": "inline_equation", + "content": "98.46\\% / 91.24\\%" + }, + { + "bbox": [ + 304, + 189, + 545, + 308 + ], + "type": "text", + "content": " on 3DMatch / 3DLoMatch. This indicates that even on low overlapping datasets, MAC is able to produce correct hypotheses for most point cloud pairs. In addition, we can deduce that MAC's performance can be further improved with better hypothesis evaluation metrics. Time consumption of MAC. We employ Predator [18] to generate correspondences with different magnitudes to test the time performance of MAC. The time consumption is reported in Table 8." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 311, + 545, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 311, + 545, + 502 + ], + "spans": [ + { + "bbox": [ + 304, + 311, + 545, + 502 + ], + "type": "text", + "content": "The following observations can be made. 1) In general, MAC can complete 3D registration in only tens of milliseconds when the number of correspondences is smaller than 1000. Even with an input with 2500 correspondences, the time consumption is about 0.29 seconds. Note that MAC is implemented on the CPU only. 2) As the number of correspondences increases from 250 to 2500, there is an increase in time cost for graph construction due to " + }, + { + "bbox": [ + 304, + 311, + 545, + 502 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{SOG}" + }, + { + "bbox": [ + 304, + 311, + 545, + 502 + ], + "type": "text", + "content": " computation taking more time. 3) When the number of correspondences reaches 5000, there is a large rise in the time cost of MAC's registration. The significant increase in the input size makes the search for maximal cliques more time-consuming. However, MAC is not sensitive to the cardinality of the input correspondence set, as verified in Table 3. Hence, using sparse inputs for MAC can produce outstanding performance while making registration efficient." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 517, + 378, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 517, + 378, + 529 + ], + "spans": [ + { + "bbox": [ + 306, + 517, + 378, + 529 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 536, + 545, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 536, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 304, + 536, + 545, + 635 + ], + "type": "text", + "content": "In this paper, we presented MAC to solve PCR by using the maximal clique constraint to generate precise pose hypotheses from correspondences. Our method achieves state-of-the-art performance on all tested datasets and can adapt to deep-learned methods to boost their performance. Limitation. As shown in Table 7 and Table 1, MAC produces accurate hypotheses but may fail to find them. In the future, we plan to develop a more convincing hypothesis evaluation technique utilizing semantic information." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 636, + 545, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 636, + 545, + 702 + ], + "spans": [ + { + "bbox": [ + 304, + 636, + 545, + 702 + ], + "type": "text", + "content": "Acknowledgments. This work is supported in part by the National Natural Science Foundation of China (NFSC) (No.U19B2037 and 62002295), Shaanxi Provincial Key R&D Program (No.2021KWZ-03), and the Fundamental Research Funds for the Central Universities (No.D5000220352)." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "17752" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 289, + 715 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 289, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 289, + 147 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 289, + 147 + ], + "type": "text", + "content": "[1] Sheng Ao, Qingyong Hu, Bo Yang, Andrew Markham, and Yulan Guo. Spinnet: Learning a general surface descriptor for 3d point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11753-11762, 2021. 1, 2, 3, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 148, + 288, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 288, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 288, + 203 + ], + "type": "text", + "content": "[2] Yasuhiro Aoki, Hunter Goforth, Rangaprasad Arun Srivatsan, and Simon Lucey. Pointnetlk: Robust & efficient point cloud registration using pointnet. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7163-7172, 2019. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 205, + 288, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 288, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 288, + 270 + ], + "type": "text", + "content": "[3] Xuyang Bai, Zixin Luo, Lei Zhou, Hongkai Chen, Lei Li, Zeyu Hu, Hongbo Fu, and Chiew-Lan Tai. Pointdsc: Robust point cloud registration using deep spatial consistency. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 15859-15869. IEEE, 2021. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 272, + 288, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 272, + 288, + 327 + ], + "spans": [ + { + "bbox": [ + 53, + 272, + 288, + 327 + ], + "type": "text", + "content": "[4] Xuyang Bai, Zixin Luo, Lei Zhou, Hongbo Fu, Long Quan, and Chiew-Lan Tai. D3feat: Joint learning of dense detection and description of 3d local features. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6359-6367, 2020. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 328, + 288, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 328, + 288, + 361 + ], + "spans": [ + { + "bbox": [ + 53, + 328, + 288, + 361 + ], + "type": "text", + "content": "[5] Daniel Barath and Jiri Matas. Graph-cut ransac. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6733-6741, 2018. 1, 2, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 362, + 288, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 362, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 362, + 288, + 407 + ], + "type": "text", + "content": "[6] Alvaro Parra Bustos and Tat-Jun Chin. Guaranteed outlier removal for point cloud registration with correspondences. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(12):2868-2882, 2017. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 407, + 288, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 407, + 288, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 407, + 288, + 441 + ], + "type": "text", + "content": "[7] Hui Chen and Bir Bhanu. 3d free-form object recognition in range images using local surface patches. Pattern Recognition Letters, 28(10):1252-1262, 2007. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 442, + 288, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 442, + 288, + 497 + ], + "spans": [ + { + "bbox": [ + 53, + 442, + 288, + 497 + ], + "type": "text", + "content": "[8] Zhi Chen, Kun Sun, Fan Yang, and Wenbing Tao. Sc2-pcr: A second order spatial compatibility for efficient and robust point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13221-13231, 2022. 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 498, + 288, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 498, + 288, + 542 + ], + "spans": [ + { + "bbox": [ + 53, + 498, + 288, + 542 + ], + "type": "text", + "content": "[9] Christopher Choy, Wei Dong, and Vladlen Koltun. Deep global registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2514-2523. IEEE, 2020. 1, 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 544, + 288, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 288, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 288, + 588 + ], + "type": "text", + "content": "[10] Christopher Choy, Jaesik Park, and Vladlen Koltun. Fully convolutional geometric features. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8958-8966, 2019. 1, 2, 5, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 288, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 288, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 288, + 643 + ], + "type": "text", + "content": "[11] Bertram Drost, Markus Ulrich, Nassir Navab, and Slobodan Ilic. Model globally, match locally: Efficient and robust 3d object recognition. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 998-1005. IEEE, 2010. 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 646, + 288, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 288, + 690 + ], + "type": "text", + "content": "[12] David Eppstein, Maarten Löffler, and Darren Strash. Listing all maximal cliques in sparse graphs in near-optimal time. In International Symposium on Algorithms and Computation, pages 403-414. Springer, 2010. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "type": "text", + "content": "[13] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 1, 2, 5, 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 96, + 547, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 547, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 547, + 150 + ], + "type": "text", + "content": "[14] Kexue Fu, Shaolei Liu, Xiaoyuan Luo, and Manning Wang. Robust point cloud registration framework based on deep graph matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8893-8902, 2021. 1, 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 152, + 547, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 547, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 547, + 206 + ], + "type": "text", + "content": "[15] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3354-3361. IEEE, 2012. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 208, + 547, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 208, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 208, + 547, + 262 + ], + "type": "text", + "content": "[16] Zan Gojcic, Caifa Zhou, Jan D Wegner, and Andreas Wieser. The perfect match: 3d point cloud matching with smoothed densities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5545-5554, 2019. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 264, + 547, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 264, + 547, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 264, + 547, + 319 + ], + "type": "text", + "content": "[17] Yulan Guo, Mohammed Bennamoun, Ferdous Sohel, Min Lu, and Jianwei Wan. 3d object recognition in cluttered scenes with local surface features: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(11):2270-2287, 2014. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 320, + 547, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 320, + 547, + 375 + ], + "spans": [ + { + "bbox": [ + 307, + 320, + 547, + 375 + ], + "type": "text", + "content": "[18] Shengyu Huang, Zan Gojcic, Mikhail Usvyatsov, Andreas Wieser, and Konrad Schindler. Predator: Registration of 3d point clouds with low overlap. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4267-4276, 2021. 1, 2, 3, 5, 6, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 376, + 545, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 545, + 421 + ], + "type": "text", + "content": "[19] Junha Lee, Seungwook Kim, Minsu Cho, and Jaesik Park. Deep hough voting for robust global registration. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15994-16003, 2021. 1, 2, 5, 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 422, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 453 + ], + "type": "text", + "content": "[20] Marius Leordeanu and Martial Hebert. A spectral technique for correspondence problems using pairwise constraints. 2005. 2, 5, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 455, + 545, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 455, + 545, + 488 + ], + "spans": [ + { + "bbox": [ + 307, + 455, + 545, + 488 + ], + "type": "text", + "content": "[21] Jiayuan Li. A practical o (n2) outlier removal method for point cloud registration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 490, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 490, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 490, + 545, + 533 + ], + "type": "text", + "content": "[22] Yang Li and Tatsuya Harada. Lepard: Learning partial point cloud matching in rigid and deformable scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5554-5564, 2022. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 535, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 535, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 535, + 545, + 567 + ], + "type": "text", + "content": "[23] Muyuan Lin, Varun Murali, and Sertac Karaman. A planted clique perspective on hypothesis pruning. IEEE Robotics and Automation Letters, 7(2):5167-5174, 2022. 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 568, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 545, + 612 + ], + "type": "text", + "content": "[24] Yu-Kai Lin, Wen-Chieh Lin, and Chieh-Chih Wang. Kclosest points and maximum clique pruning for efficient and effective 3-d laser scan matching. IEEE Robotics and Automation Letters, 7(2):1471-1477, 2022. 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 613, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 657 + ], + "type": "text", + "content": "[25] Ajmal S Mian, Mohammed Bennamoun, and Robyn A Owens. Automatic correspondence for 3d modeling: an extensive review. International Journal of Shape Modeling, 11(02):253-291, 2005. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "text", + "content": "[26] Ajmal S Mian, Mohammed Bennamoun, and Robyn A Owens. A novel representation and feature matching algorithm for automatic pairwise registration of range images. International Journal of Computer Vision, 66(1):19-40, 2006. 5" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17753" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 137 + ], + "type": "text", + "content": "[27] G Dias Pais, Srikumar Ramalingam, Venu Madhav Govindu, Jacinto C Nascimento, Rama Chellappa, and Pedro Miraldo. 3dregnet: A deep neural network for 3d point registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7193-7203. IEEE, 2020. 1, 2, 3, 4, 5, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 139, + 287, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 139, + 287, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 139, + 287, + 183 + ], + "type": "text", + "content": "[28] Alvaro Parra, Tat-Jun Chin, Frank Neumann, Tobias Friedrich, and Maximilian Katzmann. A practical maximum clique algorithm for matching with pairwise constraints. arXiv preprint arXiv:1902.01534, 2019. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 184, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 184, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 184, + 287, + 239 + ], + "type": "text", + "content": "[29] Zheng Qin, Hao Yu, Changjian Wang, Yulan Guo, Yuxing Peng, and Kai Xu. Geometric transformer for fast and robust point cloud registration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11143-11152, 2022. 2, 3, 4, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "spans": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "text", + "content": "[30] Siwen Quan and Jiaqi Yang. Compatibility-guided sampling consensus for 3-d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 58(10):7380-7392, 2020. 1, 2, 5, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 283, + 287, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 283, + 287, + 327 + ], + "spans": [ + { + "bbox": [ + 48, + 283, + 287, + 327 + ], + "type": "text", + "content": "[31] Radu Bogdan Rusu, Nico Blodow, and Michael Beetz. Fast point feature histograms (fpfh) for 3d registration. In IEEE International Conference on Robotics and Automation, pages 3212-3217. IEEE, 2009. 1, 2, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 327, + 287, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 287, + 360 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 287, + 360 + ], + "type": "text", + "content": "[32] Radu Bogdan Rusu and Steve Cousins. 3d is here: Point cloud library (pcl). In IEEE International Conference on Robotics and Automation, pages 1-4. IEEE, 2011. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 361, + 287, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 361, + 287, + 393 + ], + "spans": [ + { + "bbox": [ + 48, + 361, + 287, + 393 + ], + "type": "text", + "content": "[33] Ivan Sipiran and Benjamin Bustos. Harris 3d: a robust extension of the harris operator for interest point detection on 3d meshes. The Visual Computer, 27(11):963-976, 2011. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 393, + 287, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 287, + 437 + ], + "type": "text", + "content": "[34] Federico Tombari, Samuele Salti, and Luigi Di Stefano. Unique signatures of histograms for local surface description. In European Conference on Computer Vision, pages 356-369. Springer, 2010. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 438, + 287, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 438, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 438, + 287, + 491 + ], + "type": "text", + "content": "[35] Haiping Wang, Yuan Liu, Zhen Dong, and Wenping Wang. You only hypothesize once: Point cloud registration with rotation-equivariant descriptors. In Proceedings of the ACM International Conference on Multimedia, pages 1630-1641, 2022. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 492, + 287, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 492, + 287, + 525 + ], + "spans": [ + { + "bbox": [ + 48, + 492, + 287, + 525 + ], + "type": "text", + "content": "[36] Heng Yang, Jingnan Shi, and Luca Carlone. Teaser: Fast and certifiable point cloud registration. IEEE Transactions on Robotics, 37(2):314-333, 2020. 2, 4, 5, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 526, + 287, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 526, + 287, + 558 + ], + "spans": [ + { + "bbox": [ + 48, + 526, + 287, + 558 + ], + "type": "text", + "content": "[37] Jiaqi Yang, Zhiguo Cao, and Qian Zhang. A fast and robust local descriptor for 3d point cloud registration. Information Sciences, 346:163-179, 2016. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 559, + 287, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 603 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 603 + ], + "type": "text", + "content": "[38] Jiaqi Yang, Jiahao Chen, Siwen Quan, Wei Wang, and Yanning Zhang. Correspondence selection with loose-tight geometric voting for 3d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 2022. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 603, + 287, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 603, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 48, + 603, + 287, + 658 + ], + "type": "text", + "content": "[39] Jiaqi Yang, Zhiqiang Huang, Siwen Quan, Zhaoshuai Qi, and Yanning Zhang. Sac-cot: Sample consensus by sampling compatibility triangles in graphs for 3-d point cloud registration. IEEE Transactions on Geoscience and Remote Sensing, 60:1-15, 2021. 1, 2, 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 713 + ], + "type": "text", + "content": "[40] Jiaqi Yang, Zhiqiang Huang, Siwen Quan, Qian Zhang, Yanning Zhang, and Zhiguo Cao. Toward efficient and robust metrics for ransac hypotheses and 3d rigid registration. IEEE Transactions on Circuits and Systems for Video Technology, 32(2):893-906, 2021. 1, 2, 5" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 546, + 285 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 307, + 72, + 546, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 72, + 546, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 546, + 117 + ], + "type": "text", + "content": "[41] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: A globally optimal solution to 3d icp point-set registration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(11):2241-2254, 2015. 1, 2, 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 150 + ], + "type": "text", + "content": "[42] Jiaqi Yang, Yang Xiao, Zhiguo Cao, and Weidong Yang. Ranking 3d feature correspondences via consistency voting. Pattern Recognition Letters, 117:1-8, 2019. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 152, + 545, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 196 + ], + "type": "text", + "content": "[43] Hao Yu, Fu Li, Mahdi Saleh, Benjamin Busam, and Slobodan Ilic. Cofinet: Reliable coarse-to-fine correspondences for robust pointcloud registration. Advances in Neural Information Processing Systems, 34, 2021. 2, 3, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 197, + 546, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 546, + 252 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 546, + 252 + ], + "type": "text", + "content": "[44] Andy Zeng, Shuran Song, Matthias Nießner, Matthew Fisher, Jianxiong Xiao, and Thomas Funkhouser. 3dmatch: Learning local geometric descriptors from rgb-d reconstructions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1802-1811, 2017. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 253, + 546, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 253, + 546, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 253, + 546, + 285 + ], + "type": "text", + "content": "[45] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Fast global registration. In European Conference on Computer Vision, pages 766-782. Springer, 2016. 2, 5, 6" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "17754" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_content_list.json b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..679df4471fef3750e2ff669a5aa16e6a3a6b7ede --- /dev/null +++ b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_content_list.json @@ -0,0 +1,1398 @@ +[ + { + "type": "text", + "text": "3D Semantic Segmentation in the Wild: Generalized Models for Adverse-Condition Point Clouds", + "text_level": 1, + "bbox": [ + 284, + 104, + 818, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Aoran Xiao $^{1}$ , Jiaxing Huang $^{1}$ , Weihao Xuan $^{2}$ , Ruijie Ren $^{3}$ , Kangcheng Liu $^{1}$ \nDayan Guan $^{4}$ , Abdulmotaleb El Saddik $^{4,6}$ , Shijian Lu $^{1,\\dagger}$ , Eric Xing $^{4,5}$", + "bbox": [ + 184, + 157, + 782, + 195 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Nanyang Technological University $^{2}$ Waseda University $^{3}$ Technical University of Denmark", + "bbox": [ + 125, + 195, + 843, + 212 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4Mohamed bin Zayed University of Artificial Intelligence", + "bbox": [ + 256, + 212, + 712, + 229 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{5}$ Carnegie Mellon University $^{6}$ University of Ottawa", + "bbox": [ + 279, + 229, + 684, + 247 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dd6573372529ff7b4b3604a3b40f1fbc61e55792c8263f1fc521bb1b5d55cf2f.jpg", + "image_caption": [ + "(a) A LiDAR scan captured in a snowy day" + ], + "image_footnote": [], + "bbox": [ + 81, + 265, + 473, + 404 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dfe09935ad3ef25d96f760f5acfd7c0166ca105a3e08c48838dd43809ef71f78.jpg", + "image_caption": [ + "(b) Point-level annotations", + "Figure 1. We introduce SemanticSTF, an adverse-weather LiDAR point cloud dataset with dense point-level annotations that can be exploited for the study of point cloud semantic segmentation under all-weather conditions (including fog, snow, and rain). The graph on the left shows one scan sample captured on a snowy day, and the one on the right shows the corresponding point-level annotations." + ], + "image_footnote": [], + "bbox": [ + 475, + 266, + 887, + 404 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 478, + 313, + 494 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Robust point cloud parsing under all-weather conditions is crucial to level-5 autonomy in autonomous driving. However, how to learn a universal 3D semantic segmentation (3DSS) model is largely neglected as most existing benchmarks are dominated by point clouds captured under normal weather. We introduce SemanticSTF, an adverse-weather point cloud dataset that provides dense point-level annotations and allows to study 3DSS under various adverse weather conditions. We study all-weather 3DSS modeling under two setups: 1) domain adaptive 3DSS that adapts from normal-weather data to adverse-weather data; 2) domain generalizable 3DSS that learns all-weather 3DSS models from normal-weather data. Our studies reveal the challenge while existing 3DSS methods encounter adverse-weather data, showing the great value of SemanticSTF in steering the future endeavor along this very meaningful research direction. In addition, we design a domain randomization technique that alternatively randomizes the geometry styles of point clouds and aggregates their embeddings, ultimately leading to a generalizable model that can improve 3DSS under various adverse weather effectively. The SemanticSTF and related codes are available at https://github.com/xiaooran/SemanticSTF.", + "bbox": [ + 75, + 510, + 472, + 859 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 478, + 632, + 494 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D LiDAR point clouds play an essential role in semantic scene understanding in various applications such as self-driving vehicles and autonomous drones. With the recent advance of LiDAR sensors, several LiDAR point cloud datasets [2, 11, 49] such as SemanticKITTI [2] have been proposed which greatly advanced the research in 3D semantic segmentation (3DSS) [19, 41, 62] for the task of point cloud parsing. As of today, most existing point cloud datasets for outdoor scenes are dominated by point clouds captured under normal weather. However, 3D vision applications such as autonomous driving require reliable 3D perception under all-weather conditions including various adverse weather such as fog, snow, and rain. How to learn a weather-tolerant 3DSS model is largely neglected due to the absence of related benchmark datasets.", + "bbox": [ + 496, + 505, + 893, + 731 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although several studies [3, 33] attempt to include adverse weather conditions in point cloud datasets, such as the STF dataset [3] that consists of LiDAR point clouds captured under various adverse weather, these efforts focus on object detection benchmarks and do not provide any pointwise annotations which are critical in various tasks such as 3D semantic and instance segmentation. To address this gap, we introduce SemanticSTF, an adverse-weather point cloud dataset that extends the STF Detection Benchmark by providing point-wise annotations of 21 semantic categories, as illustrated in Fig. 1. Similar to STF, SemanticSTF cap", + "bbox": [ + 496, + 734, + 895, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author", + "bbox": [ + 101, + 886, + 230, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "9382", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tures four typical adverse weather conditions that are frequently encountered in autonomous driving including dense fog, light fog, snow, and rain.", + "bbox": [ + 76, + 90, + 468, + 137 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "SemanticSTF provides a great benchmark for the study of 3DSS and robust point cloud parsing under adverse weather conditions. Beyond serving as a well-suited test bed for examining existing fully-supervised 3DSS methods that handle adverse-weather point cloud data, SemanticSTF can be further exploited to study two valuable weather-tolerant 3DSS scenarios: 1) domain adaptive 3DSS that adapts from normal-weather data to adverse-weather data, and 2) domain generalizable 3DSS that learns all-weather 3DSS models from normal-weather data. Our studies reveal the challenges faced by existing 3DSS methods while processing adverse-weather point cloud data, highlighting the significant value of SemanticSTF in guiding future research efforts along this meaningful research direction.", + "bbox": [ + 75, + 148, + 470, + 361 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In addition, we design PointDR, a new baseline framework for the future study and benchmarking of all-weather 3DSS. Our objective is to learn robust 3D representations that can reliably represent points of the same category across different weather conditions while remaining discriminative across categories. However, robust all-weather 3DSS poses two major challenges: 1) LiDAR point clouds are typically sparse, incomplete, and subject to substantial geometric variations and semantic ambiguity. These challenges are further exacerbated under adverse weather conditions, with many missing points and geometric distortions due to fog, snow cover, etc. 2) More noises are introduced under adverse weather due to snow flicks, rain droplets, etc. PointDR addresses the challenges with two iterative operations: 1) Geometry style randomization that expands the geometry distribution of point clouds under various spatial augmentations; 2) Embedding aggregation that introduces contrastive learning to aggregate the encoded embeddings of the randomly augmented point clouds. Despite its simplicity, extensive experiments over point clouds of different adverse weather conditions show that PointDR achieves superior 3DSS generalization performance.", + "bbox": [ + 75, + 375, + 470, + 705 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The contribution of this work can be summarized in three major aspects. First, we introduce SemanticSTF, a large-scale adverse-weather point cloud benchmark that provides high-quality point-wise annotations of 21 semantic categories. Second, we design PointDR, a point cloud domain randomization baseline that can be exploited for future study and benchmarking of 3DSS under all-weather conditions. Third, leveraging SemanticSTF, we benchmark existing 3DSS methods over two challenging tasks on domain adaptive 3DSS and domain generalized 3DSS. The benchmarking efforts lay a solid foundation for future research on this highly meaningful problem.", + "bbox": [ + 75, + 719, + 468, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 89, + 650, + 106 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D semantic segmentation aims to assign point-wise semantic labels for point clouds. It has been developed rapidly over the past few years, largely through the development of various deep neural networks (DNNs) such as standard convolutional network for projection-based methods [9, 30, 46, 50, 59], multi-layer perceptron (MLP)-based networks [19, 34, 34], 3D voxel convolution-based networks [7, 62], or hybrid networks [6, 27, 41, 51, 57]. While existing 3DSS networks are mainly evaluated over normal weather point clouds, their performance for adverse weather point clouds is far under-investigated. The proposed SemanticSTF closes the gap and provides a solid ground for the study and evaluation of all-weather 3DSS. By enabling investigations into various new research directions, SemanticSTF represents a valuable tool for advancing the field.", + "bbox": [ + 496, + 114, + 890, + 340 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Vision recognition under adverse conditions. Scene understanding under adverse conditions has recently attracted increasing attention due to the strict safety demand in various outdoor navigation and perception tasks. In 2D vision, several large-scale datasets have been proposed to investigate perceptions tasks in adverse visual conditions including localization [29], detection [56], and segmentation [36]. On the other hand, learning 3D point clouds of adverse conditions is far under-explored due to the absence of comprehensive dataset benchmarks. The recently proposed datasets such as STF [3] and CADC [33] contain LiDAR point clouds captured under adverse weather conditions. However, these studies focus on the object detection task [15, 16] with bounding-box annotations, without providing any point-wise annotations. Our introduced SemanticSTF is the first large-scale dataset that consists of LiDAR point clouds in adverse weather conditions with high-quality dense annotations, to the best of our knowledge.", + "bbox": [ + 496, + 340, + 890, + 612 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Domain generalization [4,31] aims to learn a generalizable model from single or multiple related but distinct source domains where target data is inaccessible during model learning. It has been widely studied in 2D computer vision tasks [1, 21, 26, 61] while few studies explore it in point cloud learning. Recently, [25] studies domain generalization for 3D object detection by deforming point clouds via vector fields. Differently, this work is the first attempt that explores domain generalization for 3DSS.", + "bbox": [ + 496, + 613, + 890, + 748 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Unsupervised domain adaptation is a method of transferring knowledge learned from a labeled source domain to a target domain by leveraging the unlabeled target data. It has been widely studied in 2D image learning [12,14,20,22-24] and 3D point clouds [15, 16, 28, 39, 52, 53, 58]. Recently, domain adaptive 3D LiDAR segmentation has drawn increasing attention due to the challenge in point-wise annotation. Different UDA approaches have been designed to mitigate discrepancies across LiDAR point clouds of different domains. For example, [46, 60] project point", + "bbox": [ + 496, + 750, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "9383", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "clouds into depth images and leverage 2D UDA techniques while [37, 48, 49, 55] directly work in the 3D space. However, these methods either work for synthetic-to-real UDA scenarios [46, 49] or normal-to-normal point cloud adaptation [55], ignoring normal-to-adverse adaptation which is highly practical in real applications. Our SemanticSTF dataset fills up this blank and will inspire more development of new algorithms for normal-to-adverse adaptation.", + "bbox": [ + 75, + 90, + 470, + 212 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. The SemanticSTF Dataset", + "text_level": 1, + "bbox": [ + 76, + 227, + 321, + 242 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Background", + "text_level": 1, + "bbox": [ + 76, + 253, + 209, + 268 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LiDAR sensors send out laser pulses and measure their flight time based on the echoes it receives from targets. The travel distance as derived from the time-of-flight and the registered angular information (between the LiDAR sensors and the targets) can be combined to compute the 3D coordinates of target surface which form point clouds that capture the 3D shape of the targets. However, the active LiDAR pulse system can be easily affected by the scattering media such as particles of rain droplets and snow [10, 18, 32, 35], leading to shifts of measured distances, variation of echo intensity, point missing, etc. Hence, point clouds captured under adverse weather usually have clear distribution discrepancy as compared with those collected under normal weather as illustrated in Fig. 1. However, existing 3DSS benchmarks are dominated by normal-weather point clouds which are insufficient for the study of universal 3DSS under all-weather conditions. To this end, we propose SemanticSTF, a point-wise annotated large-scale adverse-weather dataset that can be explored for the study of 3DSS and point cloud parsing under various adverse weather conditions.", + "bbox": [ + 75, + 276, + 470, + 580 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Data Selection and Split", + "text_level": 1, + "bbox": [ + 76, + 589, + 299, + 606 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We collect SemanticSTF by leveraging the STF benchmark [3], a multi-modal adverse-weather dataset that was jointly collected in Germany, Sweden, Denmark, and Finland. The data in STF have multiple modalities including LiDAR point clouds and they are collected under various adverse weather conditions such as snow and fog. However, STF provides bounding-box annotations only for the study of 3D detection tasks. In SemanticSTF, we manually selected 2,076 scans captured by a Velodyne HDL64 S3D LiDAR sensor from STF that cover various adverse weather conditions including 694 snowy, 637 dense-foggy, 631 light-foggy, and 114 rainy (all rainy LiDAR scans in STF). During the selection, we pay special attention to the geographical diversity of the point clouds aiming for minimizing data redundancy. We ignore the factor of daytime/nighttime since LiDAR sensors are robust to lighting conditions. We split SemanticSTF into three parts including 1,326 full 3D scans for training, 250 for validating, and 500 for testing. All three splits have approximately the same", + "bbox": [ + 75, + 613, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "proportion of LiDAR scans of different adverse weathers.", + "bbox": [ + 498, + 90, + 879, + 106 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Data Annotation", + "text_level": 1, + "bbox": [ + 500, + 118, + 666, + 132 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Point-wise annotation of LiDAR point clouds is an extremely laborious task due to several factors, such as 3D view changes, inconsistency between point cloud display and human visual perception, sweeping occlusion, point sparsity, etc. However, point-wise annotating of adverse-weather point clouds is even more challenging due to two new factors. First, the perceived distance shifts under adverse weather often lead to various geometry distortions in the collected points which make them different from those collected under normal weather. This presents significant challenges for annotators who must recognize various objects and assign a semantic label to each point. Second, LiDAR point clouds collected under adverse weather often contain a significant portion of invalid regions that consist of indiscernible semantic contents (e.g., thick snow cover) that make it difficult to identify the ground type. The existence of such invalid regions makes point-wise annotation even more challenging.", + "bbox": [ + 496, + 142, + 890, + 414 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We designed a customized labeling pipeline to handle the annotation challenges while performing point-wise annotation of point clouds in SemanticSTF. Specifically, we first provide labeling instructions and demo annotations and train a team of professional annotators to provide pointwise annotations of a set of selected STF LiDAR scans. To achieve reliable high-quality annotations, the annotators leverage the corresponding 2D camera images and Google Street views as extra references while identifying the category of each point in this initial annotation process. After that, the annotators cross-check their initial annotations for identifying and correcting labeling errors. At the final stage, we engaged professional third parties who provide another round of annotation inspection and correction.", + "bbox": [ + 496, + 415, + 890, + 626 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Annotation of SemanticSTF is a highly laborious and time-consuming task. For instance, while labeling downtown areas with the most complex scenery, it took an annotator an average of 4.3 hours to label a single LiDAR scan. Labeling a scan captured in a relatively simpler scenery, such as a highway, also takes an average of 1.6 hours. In addition, an additional 30-60 minutes are required per scan for verification and correction by professional third parties. In total, annotating the entire SemanticSTF dataset takes over 6,600 man-hours.", + "bbox": [ + 496, + 627, + 890, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "While annotating SemanticSTF, we adopted the same set of semantic classes as in the widely-studied semantic segmentation benchmark, SemanticKITTI [2]. Specifically, we annotate the 19 evaluation classes of SemanticKITTI, which encompass most traffic-related objects in autonomous driving scenes. Additionally, following [36], we label points with indiscernible semantic contents caused by adverse weather (e.g. ground covered by snowdrifts) as invalid. Fur", + "bbox": [ + 496, + 779, + 890, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "9384", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/37c17fa2b35ea99bbfcdceb124dba55eb60b3753b7e85a3e8d7da6bc109af458.jpg", + "image_caption": [ + "Figure 2. Number of annotated points per class in SemanticSTF." + ], + "image_footnote": [], + "bbox": [ + 84, + 88, + 460, + 141 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "thermore, we label points that do not belong to the 20 categories or are indistinguishable as ignored, which are not utilized in either training or evaluations. Detailed descriptions of each class can be found in the appendix.", + "bbox": [ + 76, + 194, + 468, + 255 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Data Statistics", + "text_level": 1, + "bbox": [ + 76, + 266, + 225, + 281 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "SemanticSTF consists of point-wise annotations of 21 semantic categories, and Fig. 2 shows the detailed statistics of the point-wise annotations. It can be seen that classes road, sidewalk, building, vegetation, and terrain appear most frequently whereas classes motor, motorcyclist, and bicyclist have clearly lower occurrence frequency. Such class imbalance is largely attributed to the various object sizes and unbalanced distribution of object categories in transportation scenes, and it is also very common in many existing benchmarks. Overall, the statistics and distribution of different object categories are similar to that of other 2D and 3D semantic segmentation benchmarks such as Cityscapes [8], ACDC [36], and SemanticKITTI [2].", + "bbox": [ + 75, + 290, + 468, + 487 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To the best of our knowledge, SemanticSTF is the first large-scale adverse-weather 3DSS benchmark that provides high-quality point-wise annotations. Table 1 compares it with several existing point cloud datasets that have been widely adopted for the study of 3D detection and semantic segmentation. We can observe that existing datasets are either collected under normal weather conditions or collected for object detection studies with bounding-box annotations only. 3DSS benchmark under adverse weather is largely blank, mainly due to the great challenge in point-wise annotations of adverse-weather point clouds as described in previous subsections. From this sense, SemanticSTF fills up this blank by providing a large-scale benchmark and test bed which will be very useful to future research in universal 3DSS under all weather conditions.", + "bbox": [ + 75, + 487, + 470, + 714 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.5. Data illustration", + "text_level": 1, + "bbox": [ + 76, + 724, + 238, + 739 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fig. 3 provides examples of point cloud scans captured under adverse weather conditions in SemanticSTF (in row 1) as well as the corresponding annotations (in row 2). Compared with normal-weather point clouds, point clouds captured under adverse weather exhibit four distinct properties: 1) Snow coverage and snowflakes under snowy weather introduce many white points (labeled as “invalid”) as illustrated in Fig. 3(a). The thick snow coverage may lead to object deformation as well; Rainy conditions may cause specular reflection of laser signals from water on the ground", + "bbox": [ + 75, + 750, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/16eb34ed027cc607142f09ef6a21d7f4dd7ac60674f1bf7b1c1f9fec7863ea7d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset#ClsTypeAnnotationFogRainSnow
KITTI [13]8realbounding boxXXX
nuScenes [5]23realbounding boxXXX
Waymo [40]4realbounding boxXXX
STF [3]5realbounding box
SemanticKITTI [2]25realpoint-wiseXXX
nuScenes-LiDARSeg [11]32realpoint-wiseXXX
Waymo-LiDARSeg [40]21realpoint-wiseXXX
SynLiDAR [49]32synth.point-wiseXXX
SemanticSTF (ours)21realpoint-wise
", + "bbox": [ + 501, + 88, + 890, + 213 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1. Comparison of SemanticSTF against existing outdoor LiDAR benchmarks. #Cls means the class number.", + "bbox": [ + 498, + 223, + 890, + 250 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and produce many noise points as shown in Fig.3(b); 3) Dense fog may greatly reduce the working range of LiDAR sensors, leading to small spatial distribution of the collected LiDAR points as illustrated in Fig. 3(c); 4) Point clouds under light fog have similar characteristics as normal-weather point clouds as illustrated in Fig. 3(d). The distinct properties of point clouds under different adverse weather introduce different types of domain shift from normal-weather point clouds which complicate 3DSS greatly as discussed in Section 5. They also verify the importance of developing universal 3DSS models that can perform well under all weather conditions.", + "bbox": [ + 496, + 277, + 890, + 459 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Point Cloud Domain Randomization", + "text_level": 1, + "bbox": [ + 498, + 474, + 828, + 489 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Leveraging SemanticSTF, we explore domain generalization (DG) for semantic segmentation of LiDAR point clouds under all weather conditions. Specifically, we design PointDR, a domain randomization technique that helps to train a generalizable segmentation model from normal-weather point clouds that can work well for adverse-weather point clouds in SemanticSTF.", + "bbox": [ + 496, + 500, + 890, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Problem Definition", + "text_level": 1, + "bbox": [ + 500, + 616, + 683, + 631 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given labeled point clouds of a source domain $S = \\{S_{k} = \\{x_{k},y_{k}\\} \\}_{k = 1}^{K}$ where $x$ represents a LiDAR point cloud scan and $y$ denotes its point-wise semantic annotations, the goal of domain generalization is to learn a segmentation model $F$ by using the source-domain data only that can perform well on point clouds from an unseen target domain $\\mathcal{T}$ . We consider a 3D point cloud segmentation model $F$ that consists of a feature extractor $E$ and a classifier $G$ . Note under the setup of domain generalization, target data will not be accessed in training as they could be hard and even impossible to acquire at the training stage.", + "bbox": [ + 496, + 638, + 890, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Point Cloud Domain Randomization", + "text_level": 1, + "bbox": [ + 500, + 816, + 815, + 830 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Inspired by domain randomization studies in 2D computer vision research [42, 43], we explore how to employ domain randomization for learning domain generalizable models for point clouds. Specifically, we design PointDR,", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "9385", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a929f1221f0c6ee3a050568ecc05d1ceca8a6396fbc6d3a183b9eeabbf1b090b.jpg", + "image_caption": [ + "Figure 3. Examples of LiDAR point cloud scans captured under different adverse weather including snow, rain, dense fog, and light fog (the first row) and corresponding dense annotations in SemanticSTF (the second row)." + ], + "image_footnote": [], + "bbox": [ + 83, + 97, + 885, + 327 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d86e1b05e3aa8f63417fdc99559944bb2afe80eeb61768e3b7bf6ca0472bc7cd.jpg", + "image_caption": [ + "Figure 4. The framework of our point cloud randomization method (PointDR): Geometry style randomization creates different point cloud views with various spatial perturbations while embedding aggregation encourages the feature extractor to aggregate randomized point embeddings to learn perturbation-invariant representations, ultimately leading to a generalizable segmentation model." + ], + "image_footnote": [], + "bbox": [ + 86, + 375, + 460, + 539 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "a point cloud randomization technique that consists of two complementary designs including geometry style randomization and embedding aggregation as illustrated in Fig. 4.", + "bbox": [ + 75, + 643, + 468, + 688 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Geometry style randomization aims to enrich the geometry styles and expand the distribution of training point cloud data. Given a point-cloud scan $x$ as input, we apply weak and strong spatial augmentation to obtain two copies of $x$ including a weak-view $x^w = \\mathcal{A}^W(x)$ and a strong-view $x^s = \\mathcal{A}^S(x)$ . For the augmentation schemes of $\\mathcal{A}^W$ , we follow existing supervised learning methods [41] and adopt the simple random rotation and random scaling. While for the augmentation schemes of $\\mathcal{A}^S$ , we further adopt random dropout, random flipping, random noise perturbation, and random jittering on top of $\\mathcal{A}^W$ to obtain a more diverse and complex copy of the input point cloud scan $x$ .", + "bbox": [ + 75, + 688, + 468, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Embedding aggregation aims to aggregate encoded embeddings of randomized point clouds for learning domain-", + "bbox": [ + 75, + 869, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "invariant representations. We adopt contrastive learning [17] as illustrated in Fig. 4. Given the randomized point clouds $x^{w}$ and $x^{s}$ , we first feed them into the feature extractor $E$ and a projector $\\mathcal{P}$ (a two-layer MLP) which outputs normalized point feature embeddings $f^{w}$ and $f^{s}$ , respectively $(f = \\mathcal{P}(E(x)))$ . $\\overline{f}_C^w \\in \\mathbb{R}^{D \\times C}$ ( $D$ : feature dimension; $C$ : number of semantic classes) is then derived by class-wise averaging the feature embeddings $f^{w}$ in a batch, which is stored in a memory bank $\\mathcal{B} \\in \\mathbb{R}^{D \\times C}$ that has no backpropagation and is momentum updated by iterations (i.e., $\\mathcal{B} \\gets m \\times \\mathcal{B} + (1 - m) \\times \\overline{f}_C^w$ with a momentum coefficient $m$ ). Finally, we employ each point feature embedding $f_{i}^{s}$ of the strong-view $f^{s}$ as query and feature embeddings in $\\mathcal{B}$ as keys for contrastive learning, where the key sharing the same semantic class as the query is positive key $\\mathcal{B}_{+}$ and the rest are negative keys. The contrastive loss is defined as", + "bbox": [ + 496, + 375, + 892, + 617 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {c t} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} - \\log \\frac {\\exp \\left(f _ {i} ^ {s} \\mathcal {B} _ {+} / \\tau\\right)}{\\sum_ {j = 1} ^ {C} \\exp \\left(f _ {i} ^ {s} \\mathcal {B} _ {j} / \\tau\\right)} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 633, + 890, + 676 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\tau$ is a temperature hyper-parameter [47]. Note there is no back-propagation for the \"ignore\" class in optimizing the contrastive loss.", + "bbox": [ + 496, + 685, + 890, + 729 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Contrastive learning pulls point feature embeddings of the same classes closer while pushing away point feature embeddings of different classes. Therefore, optimizing the proposed contrastive loss will aggregate randomized point cloud features and learn perturbation-invariant representations, ultimately leading to a robust and generalizable segmentation model. The momentum-updated memory bank provides feature prototypes of each semantic class for more robust and stable contrastive learning.", + "bbox": [ + 496, + 732, + 892, + 868 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Combining the supervised cross-entropy loss $\\mathcal{L}_{ce}$ for weakly-augmented point clouds in Eq. 1, the overall train", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "9386", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/6910c743257b5cafc1481209b88c8ed982c3d4483a591d3ec3c823ea0b5890bf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Methodscarbi,clemt,cletruckoth-v.pers.bi,clstmt,clstroadparki.sidew.oth-g.build.fenceveget.trunkterra.poletraf.D-fogL-fogRainSnowmIoU
Oracle89.442.10.059.961.269.639.00.082.221.558.245.686.163.680.252.077.650.161.751.954.657.953.754.7
SemanticKITTI→SemanticSTF
Baseline55.90.00.21.910.910.36.00.061.210.932.00.067.941.649.827.940.829.617.529.526.028.421.424.4
Dropout [38]62.10.015.53.011.55.42.00.058.412.826.71.172.143.652.934.243.528.415.529.325.629.424.825.7
Perturbation74.40.00.023.30.619.70.00.060.310.833.90.772.045.258.717.542.422.19.726.327.830.024.525.9
PolarMix [48]57.81.83.816.73.726.50.02.065.72.932.50.371.048.753.820.545.425.915.829.725.028.625.626.0
MMD [26]63.60.02.60.111.428.10.00.067.014.137.90.367.341.257.127.447.928.216.230.428.132.825.226.9
PCL [54]65.90.00.017.70.48.40.00.059.612.035.01.674.047.560.715.848.926.127.528.927.630.124.626.4
PointDR (Ours)67.30.04.519.69.018.82.70.062.612.938.10.673.343.856.432.245.728.727.431.329.731.926.228.6
SynLiDAR→SemanticSTF
Baseline27.13.00.615.80.125.21.85.623.90.314.60.636.319.937.917.941.89.52.316.917.217.211.915.0
Dropout [38]28.03.01.49.60.017.10.80.734.26.819.10.135.519.142.317.636.014.02.815.316.620.414.015.2
Perturbation27.12.32.316.00.123.71.24.027.03.616.20.829.216.735.322.738.317.95.116.316.719.313.415.2
PolarMix [48]39.21.11.28.31.517.80.80.723.31.317.50.445.224.846.220.138.77.61.916.115.519.215.615.7
MMD [26]25.52.32.113.20.722.11.47.530.80.417.60.230.919.737.619.343.59.92.617.316.320.012.715.1
PCL [54]30.90.81.410.00.423.34.07.928.51.317.71.239.418.540.016.038.612.12.317.816.719.314.115.5
PointDR (Ours)37.82.52.423.60.126.32.23.327.97.717.50.547.625.345.721.037.517.95.519.519.921.116.918.5
", + "bbox": [ + 83, + 88, + 885, + 364 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Experiments on domain generalization with SemanticKITTI [2] or SynLiDAR [49] as source and SemanticSTF as target.", + "bbox": [ + 99, + 375, + 866, + 388 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ing objective of PointDR can be formulated by:", + "bbox": [ + 76, + 416, + 392, + 431 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {P o i n t D R}} = \\mathcal {L} _ {c e} + \\lambda_ {c t} \\mathcal {L} _ {c t} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 440, + 468, + 455 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Evaluation of Semantic Segmentation", + "text_level": 1, + "bbox": [ + 76, + 465, + 415, + 483 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SemanticSTF can be adopted for benchmarking different learning setups and network architectures on point cloud segmentation. We perform experiments over two typical learning setups including domain generalization and unsupervised domain adaptation. In addition, we evaluate several state-of-the-art point-cloud segmentation networks to examine their generalization capabilities.", + "bbox": [ + 75, + 491, + 468, + 597 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Domain Generalization", + "text_level": 1, + "bbox": [ + 76, + 606, + 294, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first study domain generalizable point cloud segmentation. For DG, we can only access an annotated source domain during training and the trained model is expected to generalize well to unseen target domains. Leveraging SemanticSTF, we build two DG benchmarks and examine how PointDR helps learn a universal 3DSS model that can work under different weather conditions.", + "bbox": [ + 75, + 628, + 468, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The first benchmark is SemanticKITTI [2] $\\rightarrow$ SemanticSTF where SemanticKITTI is a large-scale real-world 3DSS dataset collected under normal weather conditions. This benchmark serves as a solid testing ground for evaluating domain generalization performance from normal to adverse weather conditions. The second benchmark is SynLiDAR [49] $\\rightarrow$ SemanticSTF where SynLiDAR is a largescale synthetic 3DSS dataset. The motivation of this benchmark is that learning a universal 3DSS model from synthetic point clouds that can work well across adverse weather is of high research and application value considering the", + "bbox": [ + 75, + 734, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "challenges in point cloud collection and annotation. Note this benchmark is more challenging as the domain discrepancy comes from both normal-to-adverse weather distribution shift and synthetic-to-real distribution shift.", + "bbox": [ + 496, + 416, + 890, + 474 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Setup. We use all 19 evaluating classes of SemanticKITTI in both domain generalization benchmarks. The category of invalid in SemanticSTF is mapped to the ignored since SemanticKITTI and SynLiDAR do not cover this category. We adopt MinkowskiNet [7] (with TorchSparse library [41]) as the backbone model, which is a sparse convolutional network that provides state-of-the-art performance with decent efficiency. We adopt the evaluation metrics of Intersection over the Union (IoU) for each segmentation class and the mean IoU (mIoU) over all classes. All experiments are run over a single NVIDIA 2080Ti (11GB). More implementation details are provided in the appendix.", + "bbox": [ + 496, + 476, + 892, + 656 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baseline Methods. Since domain generalizable 3DSS is far under-explored, there is little existing baseline that can be directly adopted for benchmarking. We thus select two closely related approaches as baseline to evaluate the proposed PointDR. The first approach is data augmentation and we select three related augmentation methods including Dropout [38] that randomly drops out points to simulate LiDAR points missing in adverse weather, Noise perturbation that adds random points in the 3D space to simulate noise points as introduced by particles like falling snow, and PolarMix [48] that mixes point clouds of different sources for augmentation. The second approach is to adapt 2D domain generalization methods for 3DSS. We select two 2D domain generalization methods including the widely studied MMD [26] and the recently proposed PCL [54].", + "bbox": [ + 496, + 657, + 890, + 883 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results. Table 2 shows experimental results over the validation", + "bbox": [ + 500, + 885, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "9387", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/b5272c3a318d65b80948e1a47f50d342eb5a47216bd3e17cf57cc9f4424c59ff.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodLceLctBmIoU
Baseline24.4
PointDR-CT27.4
PointDR28.6
", + "bbox": [ + 125, + 88, + 419, + 152 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. Ablation study of PointDR over domain generalized segmentation task SemanticKITTI $\\rightarrow$ SemanticSTF.", + "bbox": [ + 76, + 162, + 467, + 189 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tion set of SemanticSTF. For both benchmarks, the Baseline is a source-only model that is trained by using the training data of SemanticKITTI or SynLiDAR. We can see that the Baseline achieves very low mIoU while evaluated over the validation set of SemanticSTF, indicating the large domain discrepancy between point clouds of normal and adverse weather conditions. In addition, all three data augmentation methods improve the model generalization consistently but the performance gains are limited especially for the challenging benchmark SynLiDAR $\\rightarrow$ SemanticSTF. The two 2D generalization methods both help SemanticKITTI $\\rightarrow$ SemanticSTF clearly but show very limited improvement over SynLiDAR $\\rightarrow$ SemanticSTF. The proposed PointDR achieves the best generalization consistently across both benchmarks, demonstrating its superior capability to learn perturbation-invariant point cloud representations and effectiveness while handling all-weather 3DSS tasks.", + "bbox": [ + 75, + 219, + 467, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also evaluate the compared domain generalization methods over each individual adverse weather condition as shown in Table 2. It can be observed that the three data augmentation methods work for data captured in rainy and snowy weather only. The 2D generalization method MMD shows clear effectiveness for point clouds under dense fog and rain while PCL works for point clouds under rainy and snowy weather instead. We conjecture that the performance variations are largely attributed to the different properties of point clouds captured under different weather conditions. For example, more points are missing in rain while object points often deform due to the covered snow (more illustrations are provided in the appendix). Such data variations lead to different domain discrepancies across weather which further leads to different performances of the compared methods. As PointDR learns perturbation-tolerant representations, it works effectively across different adverse weather conditions. We also provide qualitative results, please refer to the appendix for details.", + "bbox": [ + 75, + 477, + 467, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation study. We study different PointDR designs to examine how they contribute to the overall generalization performance. As Table 3 shows, we report three models over the benchmark \"SemanticKITTI $\\rightarrow$ SemanticSTF\": 1) Baseline that is trained with $\\mathcal{L}_{ce}$ . 2) PointDR-CT that is jointly trained with $\\mathcal{L}_{ce}$ and $\\mathcal{L}_{ct}$ without using the memory bank $\\mathcal{B}$ . 3) The complete PointDR that is trained with $\\mathcal{L}_{ce}$ , $\\mathcal{L}_{ct}$ and the memory bank $\\mathcal{B}$ . We evaluate the three models over the validation set of SemanticSTF and Table 3", + "bbox": [ + 75, + 763, + 467, + 898 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "shows experimental results. We can see that the Baseline performs poorly at $24.4\\%$ due to clear domain discrepancy between point clouds of normal weather and adverse weather. Leveraging the proposed contrastive loss, $\\mathcal{L}_{ct}$ achieves clearly better performance at $27.4\\%$ , indicating that learning perturbation-invariance is helpful for universal LiDAR segmentation of all-weather conditions. On top of that, introducing the momentum-updated memory bank $\\mathcal{B}$ further improves the segmentation performance at $28.6\\%$ . This is because the feature embeddings in $\\mathcal{B}$ serve as the class prototypes which help the optimization of the segmentation network, finally leading to more robust representations of 3DSS that perform better over adverse weather point clouds.", + "bbox": [ + 496, + 90, + 890, + 303 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Domain Adaptation", + "text_level": 1, + "bbox": [ + 500, + 316, + 687, + 330 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also study SemanticSTF over a domain adaptive point cloud segmentation benchmark SemanticKITTI $\\rightarrow$ SemanticSTF. Specifically, we select four representative UDA methods including ADDA [44], entropy minimization (Ent-Min) [45], self-training [63], and CoSMix [37] for adaptation from the source SemanticKITTI [2] toward the target SemanticSTF. Following the state-of-the-art [37, 48, 49] on synthetic-to-real adaptation, we adopt MinkowskiNet [7] as the segmentation backbone for all compared methods. Table 4 shows experimental results over the validation set of SemanticSTF. We can see that all UDA methods outperform the Source-only consistently under the normal-to-adverse adaptation setup. At the other end, the performance gains are still quite limited, showing the great improvement space along domain adaptive 3DSS from normal to adverse weather conditions.", + "bbox": [ + 496, + 340, + 890, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In addition, we examined the adaptability of the four UDA methods in relation to each individual adverse weather condition. Specifically, we trained each of the four methods for adaptation from SemanticKITTI to SemanticSTF data for each adverse weather condition. Table 5 shows the experimental results over the validation set of SemanticSTF. We can see all four methods outperform the Source-only method under Dense-fog and Light-fog, demonstrating their effectiveness in mitigating domain discrepancies. However, for rain and Snow, only CoSMix achieved marginal performance gains while the other three UDA methods achieved limited performance improvements. We conjecture that snow and rain introduce large deformations on object surfaces or much noise, making adaptation from normal to adverse weather more challenging. CoSMix works in the input space by directly mixing source and target points, allowing it to perform better under heavy snow and rain which have larger domain gaps. However, all methods achieved relatively low segmentation performance, indicating the significance of our research and the large room for improvement in our constructed benchmarks.", + "bbox": [ + 496, + 583, + 890, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "9388", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6188afe4fb3c697d1f64308353702a6823d49f98b6af818723eb7f297d31b2f2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Methodscarbi,clemt,cletruckoth-v.pers.bi,clstmt,clstroadparki.sidew.oth-g.build.fenceveget.trunkterra.poletraf.mIoU
Oracle89.442.10.059.961.269.639.00.082.221.558.245.686.163.680.252.077.650.161.754.7
Source-only64.80.00.013.81.85.02.10.062.77.534.00.066.736.253.931.344.324.014.224.3
ADDA [44]65.60.00.021.01.32.81.316.764.71.235.40.066.541.857.232.642.223.326.426.3
Ent-Min [45]69.20.010.131.05.32.82.60.065.92.635.70.072.542.852.432.544.724.721.127.2
Self-training [63]71.50.010.333.17.45.91.30.065.16.536.60.067.841.351.732.942.925.125.027.6
CoSMix [37]65.01.722.125.27.733.20.00.064.711.531.10.962.537.844.630.541.130.928.628.4
", + "bbox": [ + 84, + 88, + 883, + 214 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3becf7b4c509b2cda77d77811b6db64879260325d20e45b41a2fcd0405db2829.jpg", + "table_caption": [ + "Table 4. Comparison of state-of-the-art domain adaptation methods on SemanticKITTI $\\rightarrow$ SemanticSTF adaptation. SemanticKITTI serves as the source domain and the entire SemanticSTF including all four weather conditions serves as the target domain." + ], + "table_footnote": [], + "table_body": "
MethodDense-fogLight-fogRainSnow
Source-Only26.925.227.723.5
ADDA [44]31.527.927.423.4
Ent-Min [45]31.428.630.324.9
Self-training [63]31.829.327.925.1
CoSMix [37]31.630.333.132.9
", + "bbox": [ + 98, + 270, + 446, + 358 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3. Network Models vs All-Weather 3DSS", + "text_level": 1, + "bbox": [ + 76, + 459, + 406, + 474 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We also study how different 3DSS network architectures generalize when they are trained with normal-weather point clouds and evaluated over SemanticSTF. Specifically, we select five representative 3DSS networks [9, 19, 41, 62] that have been widely adopted in 3D LiDAR segmentation studies. In the experiments, each selected network is first pre-trained with SemanticKITTI [2] and then evaluated over the validation set of SemanticSTF. We directly use the officially released code and the pre-trained weights for evaluation. Table 6 shows experimental results. We can observe that the five pre-trained models perform very differently though they all achieve superior segmentation over SemanticKITTI. Specifically, RandLA-Net [19], SPVCNN [41], and SPVNAS [41] perform clearly better than SalsaNext [9] and Cylinder3D [62]. In addition, none of the five pre-trained models perform well, verifying the clear domain discrepancy between point clouds of normal and adverse weather conditions. The experiments further indicate the great value of SemanticSTF in the future exploration of robust point cloud parsing under all weather conditions. In addition, the supervised performance of these 3DSS networks over SemanticSTF is provided in the appendix.", + "bbox": [ + 73, + 482, + 468, + 830 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion and Outlook", + "text_level": 1, + "bbox": [ + 76, + 845, + 305, + 861 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This paper presents SemanticSTF, a large-scale dataset and benchmark suite for semantic segmentation of LiDAR", + "bbox": [ + 76, + 869, + 468, + 902 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/23364a4d9538b6c235a9ab2049dadb197479768832b1be56571bee0af6af6c1f.jpg", + "table_caption": [ + "Table 5. Comparison of state-of-the-art domain adaptation methods on SemanticKITTI $\\rightarrow$ SemanticSTF adaptation for individual adverse weather conditions. We train a separate model for each weather-specific subset of SemanticSTF and evaluate the trained model on the weather condition it has been trained for." + ], + "table_footnote": [], + "table_body": "
3DSS ModelD-fogL-fogRainSnowAll
RandLA-Net [19]26.526.025.122.725.3
SalsaNext [9]16.09.67.83.59.1
SPVCNN [41]30.422.821.718.322.4
SPVNAS [41]25.518.317.013.018.0
Cylinder3D [62]14.87.45.74.07.3
", + "bbox": [ + 500, + 270, + 903, + 358 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 6. Performance of state-of-the-art 3DSS models that are pre-trained over SemanticKITTI and tested on validation set of SemanticSTF for individual weather conditions and jointly for all weather conditions.", + "bbox": [ + 498, + 364, + 893, + 419 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "point clouds under adverse weather conditions. SemanticSTF provides high-quality point-level annotations for point clouds captured under adverse weather including dense fog, light fog, snow and rain. Extensive studies have been conducted to examine how state-of-the-art 3DSS methods perform over SemanticSTF, demonstrating its significance in directing future research on domain adaptive and domain generalizable 3DSS under all-weather conditions.", + "bbox": [ + 496, + 452, + 892, + 573 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We also design PointDR, a domain randomization technique that aims to use normal-weather point clouds to train a domain generalizable 3DSS model that can work well over adverse-weather point clouds. PointDR consists of two novel designs including geometry style randomization and embedding aggregation which jointly learn perturbation-invariant representations that generalize well to various new point-cloud domains. Extensive experiments show that PointDR achieves superior point cloud segmentation performance as compared with the state-of-the-art.", + "bbox": [ + 496, + 577, + 892, + 729 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 500, + 751, + 660, + 768 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This study is funded BY the Ministry of Education Singapore, under the Tier-1 scheme with project number RG18/22. It is also supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from Singapore Telecommunications Limited (Singtel), through Singtel Cognitive and Artificial Intelligence Lab for Enterprises (SCALE@NTU).", + "bbox": [ + 496, + 779, + 893, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "9389", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using meta regularization. Advances in neural information processing systems, 31, 2018. 2", + "[2] Jens Behley, Martin Garbade, Andres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Jurgen Gall. Semantickitti: A dataset for semantic scene understanding of lidar sequences. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9297-9307, 2019. 1, 3, 4, 6, 7, 8", + "[3] Mario Bijelic, Tobias Gruber, Fahim Mannan, Florian Kraus, Werner Ritter, Klaus Dietmayer, and Felix Heide. Seeing through fog without seeing fog: Deep multimodal sensor fusion in unseen adverse weather. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11682-11692, 2020. 1, 2, 3, 4", + "[4] Gilles Blanchard, Gyemin Lee, and Clayton Scott. Generalizing from several related classification tasks to a new unlabeled sample. Advances in neural information processing systems, 24, 2011. 2", + "[5] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 4", + "[6] Ran Cheng, Ryan Razani, Ehsan Taghavi, Enxu Li, and Bingbing Liu. 2-s3net: Attentive feature fusion with adaptive feature selection for sparse semantic segmentation network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12547-12556, 2021. 2", + "[7] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3075-3084, 2019. 2, 6, 7", + "[8] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3213-3223, 2016. 4", + "[9] Tiago Cortinhal, George Tzelepis, and Eren Erdal Aksoy. Salsanext: Fast, uncertainty-aware semantic segmentation of lidar point clouds. In International Symposium on Visual Computing, pages 207-222. Springer, 2020. 2, 8", + "[10] A Filgueira, H González-Jorge, Susana Lagtuela, L Díaz-Vilarino, and Pedro Arias. Quantifying the influence of rain in lidar performance. Measurement, 95:143-148, 2017. 3", + "[11] Whye Kit Fong, Rohit Mohan, Juana Valeria Hurtado, Lubing Zhou, Holger Caesar, Oscar Beijbom, and Abhinav Valada. Panoptic nuscenes: A large-scale benchmark for lidar panoptic segmentation and tracking. IEEE Robotics and Automation Letters, 7(2):3795-3802, 2022. 1, 4" + ], + "bbox": [ + 78, + 116, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[12] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International conference on machine learning, pages 1180-1189. PMLR, 2015. 2", + "[13] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 4", + "[14] Dayan Guan, Jiaxing Huang, Aoran Xiao, and Shijian Lu. Domain adaptive video segmentation via temporal consistency regularization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8053-8064, 2021. 2", + "[15] Martin Hahner, Christos Sakaridis, Mario Bijelic, Felix Heide, Fisher Yu, Dengxin Dai, and Luc Van Gool. Lidar snowfall simulation for robust 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16364-16374, 2022. 2", + "[16] Martin Hahner, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Fog simulation on real lidar point clouds for 3d object detection in adverse weather. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15283-15292, 2021. 2", + "[17] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 5", + "[18] Robin Heinzler, Philipp Schindler, Jürgen Seekircher, Werner Ritter, and Wilhelm Stork. Weather influence and classification with automotive lidar sensors. In 2019 IEEE intelligent vehicles symposium (IV), pages 1527-1534. IEEE, 2019. 3", + "[19] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11108-11117, 2020. 1, 2, 8", + "[20] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Cross-view regularization for domain adaptive panoptic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10133-10144, 2021. 2", + "[21] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Fsdr: Frequency space domain randomization for domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6891-6902, 2021. 2", + "[22] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Model adaptation: Historical contrastive learning for unsupervised domain adaptation without source data. Advances in Neural Information Processing Systems, 34:3635-3649, 2021. 2", + "[23] Jiaxing Huang, Dayan Guan, Aoran Xiao, Shijian Lu, and Ling Shao. Category contrast for unsupervised domain adaptation in visual tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1203-1214, 2022. 2" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9390", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[24] Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for unsupervised domain adaptation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4893-4902, 2019. 2", + "[25] Alexander Lehner, Stefano Gasperini, Alvaro Marcos-Ramiro, Michael Schmidt, Mohammad-Ali Nikouei Mahani, Nassir Navab, Benjamin Busam, and Federico Tombari. 3d-vfield: Adversarial augmentation of point clouds for domain generalization in 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17295-17304, 2022. 2", + "[26] Haoliang Li, Sinno Jialin Pan, Shiqi Wang, and Alex C Kot. Domain generalization with adversarial feature learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5400-5409, 2018. 2, 6", + "[27] Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Pointvoxel cnn for efficient 3d deep learning. Advances in Neural Information Processing Systems, 32, 2019. 2", + "[28] Zhipeng Luo, Zhongang Cai, Changqing Zhou, Gongjie Zhang, Haiyu Zhao, Shuai Yi, Shijian Lu, Hongsheng Li, Shanghang Zhang, and Ziwei Liu. Unsupervised domain adaptive 3d detection with multi-level consistency. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8866-8875, 2021. 2", + "[29] Will Maddern, Geoffrey Pascoe, Chris Linegar, and Paul Newman. 1 year, $1000\\mathrm{km}$ : The oxford robotcar dataset. The International Journal of Robotics Research, 36(1):3-15, 2017. 2", + "[30] Andres Milioto, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Rangenet++: Fast and accurate lidar semantic segmentation. In 2019 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 4213-4220. IEEE, 2019. 2", + "[31] Krikamol Muandet, David Balduzzi, and Bernhard Scholkopf. Domain generalization via invariant feature representation. In International Conference on Machine Learning, pages 10-18. PMLR, 2013. 2", + "[32] Thierry Peynot, James Underwood, and Steven Scheding. Towards reliable perception for unmanned ground vehicles in challenging conditions. In 2009 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 1170-1176. IEEE, 2009. 3", + "[33] Matthew Pitropov, Danson Evan Garcia, Jason Rebello, Michael Smart, Carlos Wang, Krzysztof Czarnecki, and Steven Waslander. Canadian adverse driving conditions dataset. The International Journal of Robotics Research, 40(4-5):681-690, 2021. 1, 2", + "[34] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2", + "[35] Julian Ryde and Nick Hillier. Performance of laser and radar ranging devices in adverse environmental conditions. Journal of Field Robotics, 26(9):712-727, 2009. 3", + "[36] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Acdc: The adverse conditions dataset with correspondences for se" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "matic driving scene understanding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10765-10775, 2021. 2, 3, 4", + "[37] Cristiano Saltori, Fabio Galasso, Giuseppe Fiameni, Nicu Sebe, Elisa Ricci, and Fabio Poiesi. Cosmix: Compositional semantic mix for domain adaptation in 3d lidar segmentation. ECCV, 2022. 3, 7, 8", + "[38] Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958, 2014. 6", + "[39] Peng Su, Kun Wang, Xingyu Zeng, Shixiang Tang, Dapeng Chen, Di Qiu, and Xiaogang Wang. Adapting object detectors with conditional domain normalization. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XI 16, pages 403-419. Springer, 2020. 2", + "[40] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 4", + "[41] Haotian Tang, Zhijian Liu, Shengyu Zhao, Yujun Lin, Ji Lin, Hanrui Wang, and Song Han. Searching efficient 3d architectures with sparse point-voxel convolution. In European conference on computer vision, pages 685–702. Springer, 2020. 1, 2, 5, 6, 8", + "[42] Josh Tobin, Rachel Fong, Alex Ray, Jonas Schneider, Wojciech Zaremba, and Pieter Abbeel. Domain randomization for transferring deep neural networks from simulation to the real world. In 2017 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 23-30. IEEE, 2017. 4", + "[43] Jonathan Tremblay, Aayush Prakash, David Acuna, Mark Brophy, Varun Jampani, Cem Anil, Thang To, Eric Cameracci, Shaad Boochoon, and Stan Birchfield. Training deep networks with synthetic data: Bridging the reality gap by domain randomization. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 969-977, 2018. 4", + "[44] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7167-7176, 2017. 7, 8", + "[45] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2517-2526, 2019. 7, 8", + "[46] Bichen Wu, Xuanyu Zhou, Sicheng Zhao, Xiangyu Yue, and Kurt Keutzer. Squeezesegv2: Improved model structure and unsupervised domain adaptation for road-object segmentation from a lidar point cloud. In 2019 International Conference on Robotics and Automation (ICRA), pages 4376-4382. IEEE, 2019. 2, 3" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "9391", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3733-3742, 2018. 5", + "[48] Aoran Xiao, Jiaxing Huang, Dayan Guan, Kaiwen Cui, Shijian Lu, and Ling Shao. Polarmix: A general data augmentation technique for lidar point clouds. NeurIPS, 2022. 3, 6, 7", + "[49] Aoran Xiao, Jiaxing Huang, Dayan Guan, Fangneng Zhan, and Shijian Lu. Transfer learning from synthetic to real lidar point cloud for semantic segmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2795-2803, 2022. 1, 3, 4, 6, 7", + "[50] Aoran Xiao, Xiaofei Yang, Shijian Lu, Dayan Guan, and Ji-axing Huang. Fps-net: A convolutional fusion network for large-scale lidar point cloud segmentation. ISPRS Journal of Photogrammetry and Remote Sensing, 176:237–249, 2021. 2", + "[51] Jianyun Xu, Ruixiang Zhang, Jian Dou, Yushi Zhu, Jie Sun, and Shiliang Pu. Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16024–16033, 2021. 2", + "[52] Qiangeng Xu, Yin Zhou, Weiyue Wang, Charles R Qi, and Dragomir Anguelov. Spg: Unsupervised domain adaptation for 3d object detection via semantic point generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15446-15456, 2021. 2", + "[53] Jihan Yang, Shaoshuai Shi, Zhe Wang, Hongsheng Li, and Xiaojuan Qi. St3d: Self-training for unsupervised domain adaptation on 3d object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10368-10378, 2021. 2", + "[54] Xufeng Yao, Yang Bai, Xinyun Zhang, Yuechen Zhang, Qi Sun, Ran Chen, Ruiyu Li, and Bei Yu. Pcl: Proxy-based contrastive learning for domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7097-7107, 2022. 6", + "[55] Li Yi, Boqing Gong, and Thomas Funkhouser. Complete & label: A domain adaptation approach to semantic segmentation of lidar point clouds. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15363-15373, 2021. 3", + "[56] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darrell. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2636-2645, 2020. 2", + "[57] Feihu Zhang, Jin Fang, Benjamin Wah, and Philip Torr. Deep fusionnet for point cloud semantic segmentation. In European Conference on Computer Vision, pages 644-663. Springer, 2020. 2", + "[58] Weichen Zhang, Wen Li, and Dong Xu. Srdan: Scale-aware and range-aware domain adaptation network for cross-dataset 3d object detection. In Proceedings of the IEEE/CVF" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on Computer Vision and Pattern Recognition, pages 6769-6779, 2021. 2", + "[59] Yang Zhang, Zixiang Zhou, Philip David, Xiangyu Yue, Zerong Xi, Boqing Gong, and Hassan Foroosh. Polarnet: An improved grid representation for online lidar point clouds semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9601-9610, 2020. 2", + "[60] Sicheng Zhao, Yezhen Wang, Bo Li, Bichen Wu, Yang Gao, Pengfei Xu, Trevor Darrell, and Kurt Keutzer. *epointda: An end-to-end simulation-to-real domain adaptation framework for lidar point cloud segmentation*. In *Proceedings of the AAAI Conference on Artificial Intelligence*, volume 35, pages 3500–3509, 2021. 2", + "[61] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2", + "[62] Xinge Zhu, Hui Zhou, Tai Wang, Fangzhou Hong, Yuexin Ma, Wei Li, Hongsheng Li, and Dahua Lin. Cylindrical and asymmetrical 3d convolution networks for lidar segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9939-9948, 2021. 1, 2, 8", + "[63] Yang Zou, Zhiding Yu, Xiaofeng Liu, BVK Kumar, and Jinsong Wang. Confidence regularized self-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5982-5991, 2019. 7, 8" + ], + "bbox": [ + 503, + 92, + 890, + 486 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "9392", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_model.json b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f556860aa68f5a4d53a3551324fa5a60d067c3c6 --- /dev/null +++ b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_model.json @@ -0,0 +1,2081 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.285, + 0.106, + 0.82, + 0.15 + ], + "angle": 0, + "content": "3D Semantic Segmentation in the Wild: Generalized Models for Adverse-Condition Point Clouds" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.159, + 0.783, + 0.196 + ], + "angle": 0, + "content": "Aoran Xiao\\(^{1}\\), Jiaxing Huang\\(^{1}\\), Weihao Xuan\\(^{2}\\), Ruijie Ren\\(^{3}\\), Kangcheng Liu\\(^{1}\\) \nDayan Guan\\(^{4}\\), Abdulmotaleb El Saddik\\(^{4,6}\\), Shijian Lu\\(^{1,\\dagger}\\), Eric Xing\\(^{4,5}\\)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.196, + 0.844, + 0.213 + ], + "angle": 0, + "content": "\\(^{1}\\)Nanyang Technological University \\(^{2}\\)Waseda University \\(^{3}\\)Technical University of Denmark" + }, + { + "type": "text", + "bbox": [ + 0.258, + 0.213, + 0.713, + 0.23 + ], + "angle": 0, + "content": "4Mohamed bin Zayed University of Artificial Intelligence" + }, + { + "type": "text", + "bbox": [ + 0.28, + 0.23, + 0.686, + 0.248 + ], + "angle": 0, + "content": "\\(^{5}\\)Carnegie Mellon University \\(^{6}\\)University of Ottawa" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.266, + 0.475, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.176, + 0.408, + 0.395, + 0.42 + ], + "angle": 0, + "content": "(a) A LiDAR scan captured in a snowy day" + }, + { + "type": "image", + "bbox": [ + 0.477, + 0.267, + 0.888, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.618, + 0.408, + 0.754, + 0.419 + ], + "angle": 0, + "content": "(b) Point-level annotations" + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.424, + 0.893, + 0.467 + ], + "angle": 0, + "content": "Figure 1. We introduce SemanticSTF, an adverse-weather LiDAR point cloud dataset with dense point-level annotations that can be exploited for the study of point cloud semantic segmentation under all-weather conditions (including fog, snow, and rain). The graph on the left shows one scan sample captured on a snowy day, and the one on the right shows the corresponding point-level annotations." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.479, + 0.314, + 0.495 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.511, + 0.473, + 0.86 + ], + "angle": 0, + "content": "Robust point cloud parsing under all-weather conditions is crucial to level-5 autonomy in autonomous driving. However, how to learn a universal 3D semantic segmentation (3DSS) model is largely neglected as most existing benchmarks are dominated by point clouds captured under normal weather. We introduce SemanticSTF, an adverse-weather point cloud dataset that provides dense point-level annotations and allows to study 3DSS under various adverse weather conditions. We study all-weather 3DSS modeling under two setups: 1) domain adaptive 3DSS that adapts from normal-weather data to adverse-weather data; 2) domain generalizable 3DSS that learns all-weather 3DSS models from normal-weather data. Our studies reveal the challenge while existing 3DSS methods encounter adverse-weather data, showing the great value of SemanticSTF in steering the future endeavor along this very meaningful research direction. In addition, we design a domain randomization technique that alternatively randomizes the geometry styles of point clouds and aggregates their embeddings, ultimately leading to a generalizable model that can improve 3DSS under various adverse weather effectively. The SemanticSTF and related codes are available at https://github.com/xiaooran/SemanticSTF." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.479, + 0.633, + 0.495 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.506, + 0.895, + 0.732 + ], + "angle": 0, + "content": "3D LiDAR point clouds play an essential role in semantic scene understanding in various applications such as self-driving vehicles and autonomous drones. With the recent advance of LiDAR sensors, several LiDAR point cloud datasets [2, 11, 49] such as SemanticKITTI [2] have been proposed which greatly advanced the research in 3D semantic segmentation (3DSS) [19, 41, 62] for the task of point cloud parsing. As of today, most existing point cloud datasets for outdoor scenes are dominated by point clouds captured under normal weather. However, 3D vision applications such as autonomous driving require reliable 3D perception under all-weather conditions including various adverse weather such as fog, snow, and rain. How to learn a weather-tolerant 3DSS model is largely neglected due to the absence of related benchmark datasets." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.896, + 0.903 + ], + "angle": 0, + "content": "Although several studies [3, 33] attempt to include adverse weather conditions in point cloud datasets, such as the STF dataset [3] that consists of LiDAR point clouds captured under various adverse weather, these efforts focus on object detection benchmarks and do not provide any pointwise annotations which are critical in various tasks such as 3D semantic and instance segmentation. To address this gap, we introduce SemanticSTF, an adverse-weather point cloud dataset that extends the STF Detection Benchmark by providing point-wise annotations of 21 semantic categories, as illustrated in Fig. 1. Similar to STF, SemanticSTF cap" + }, + { + "type": "page_footnote", + "bbox": [ + 0.102, + 0.887, + 0.232, + 0.9 + ], + "angle": 0, + "content": "† Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9382" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.138 + ], + "angle": 0, + "content": "tures four typical adverse weather conditions that are frequently encountered in autonomous driving including dense fog, light fog, snow, and rain." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.15, + 0.471, + 0.362 + ], + "angle": 0, + "content": "SemanticSTF provides a great benchmark for the study of 3DSS and robust point cloud parsing under adverse weather conditions. Beyond serving as a well-suited test bed for examining existing fully-supervised 3DSS methods that handle adverse-weather point cloud data, SemanticSTF can be further exploited to study two valuable weather-tolerant 3DSS scenarios: 1) domain adaptive 3DSS that adapts from normal-weather data to adverse-weather data, and 2) domain generalizable 3DSS that learns all-weather 3DSS models from normal-weather data. Our studies reveal the challenges faced by existing 3DSS methods while processing adverse-weather point cloud data, highlighting the significant value of SemanticSTF in guiding future research efforts along this meaningful research direction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.375, + 0.471, + 0.707 + ], + "angle": 0, + "content": "In addition, we design PointDR, a new baseline framework for the future study and benchmarking of all-weather 3DSS. Our objective is to learn robust 3D representations that can reliably represent points of the same category across different weather conditions while remaining discriminative across categories. However, robust all-weather 3DSS poses two major challenges: 1) LiDAR point clouds are typically sparse, incomplete, and subject to substantial geometric variations and semantic ambiguity. These challenges are further exacerbated under adverse weather conditions, with many missing points and geometric distortions due to fog, snow cover, etc. 2) More noises are introduced under adverse weather due to snow flicks, rain droplets, etc. PointDR addresses the challenges with two iterative operations: 1) Geometry style randomization that expands the geometry distribution of point clouds under various spatial augmentations; 2) Embedding aggregation that introduces contrastive learning to aggregate the encoded embeddings of the randomly augmented point clouds. Despite its simplicity, extensive experiments over point clouds of different adverse weather conditions show that PointDR achieves superior 3DSS generalization performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.47, + 0.902 + ], + "angle": 0, + "content": "The contribution of this work can be summarized in three major aspects. First, we introduce SemanticSTF, a large-scale adverse-weather point cloud benchmark that provides high-quality point-wise annotations of 21 semantic categories. Second, we design PointDR, a point cloud domain randomization baseline that can be exploited for future study and benchmarking of 3DSS under all-weather conditions. Third, leveraging SemanticSTF, we benchmark existing 3DSS methods over two challenging tasks on domain adaptive 3DSS and domain generalized 3DSS. The benchmarking efforts lay a solid foundation for future research on this highly meaningful problem." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.09, + 0.651, + 0.107 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.116, + 0.892, + 0.341 + ], + "angle": 0, + "content": "3D semantic segmentation aims to assign point-wise semantic labels for point clouds. It has been developed rapidly over the past few years, largely through the development of various deep neural networks (DNNs) such as standard convolutional network for projection-based methods [9, 30, 46, 50, 59], multi-layer perceptron (MLP)-based networks [19, 34, 34], 3D voxel convolution-based networks [7, 62], or hybrid networks [6, 27, 41, 51, 57]. While existing 3DSS networks are mainly evaluated over normal weather point clouds, their performance for adverse weather point clouds is far under-investigated. The proposed SemanticSTF closes the gap and provides a solid ground for the study and evaluation of all-weather 3DSS. By enabling investigations into various new research directions, SemanticSTF represents a valuable tool for advancing the field." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.342, + 0.892, + 0.613 + ], + "angle": 0, + "content": "Vision recognition under adverse conditions. Scene understanding under adverse conditions has recently attracted increasing attention due to the strict safety demand in various outdoor navigation and perception tasks. In 2D vision, several large-scale datasets have been proposed to investigate perceptions tasks in adverse visual conditions including localization [29], detection [56], and segmentation [36]. On the other hand, learning 3D point clouds of adverse conditions is far under-explored due to the absence of comprehensive dataset benchmarks. The recently proposed datasets such as STF [3] and CADC [33] contain LiDAR point clouds captured under adverse weather conditions. However, these studies focus on the object detection task [15, 16] with bounding-box annotations, without providing any point-wise annotations. Our introduced SemanticSTF is the first large-scale dataset that consists of LiDAR point clouds in adverse weather conditions with high-quality dense annotations, to the best of our knowledge." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.614, + 0.892, + 0.749 + ], + "angle": 0, + "content": "Domain generalization [4,31] aims to learn a generalizable model from single or multiple related but distinct source domains where target data is inaccessible during model learning. It has been widely studied in 2D computer vision tasks [1, 21, 26, 61] while few studies explore it in point cloud learning. Recently, [25] studies domain generalization for 3D object detection by deforming point clouds via vector fields. Differently, this work is the first attempt that explores domain generalization for 3DSS." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Unsupervised domain adaptation is a method of transferring knowledge learned from a labeled source domain to a target domain by leveraging the unlabeled target data. It has been widely studied in 2D image learning [12,14,20,22-24] and 3D point clouds [15, 16, 28, 39, 52, 53, 58]. Recently, domain adaptive 3D LiDAR segmentation has drawn increasing attention due to the challenge in point-wise annotation. Different UDA approaches have been designed to mitigate discrepancies across LiDAR point clouds of different domains. For example, [46, 60] project point" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9383" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.213 + ], + "angle": 0, + "content": "clouds into depth images and leverage 2D UDA techniques while [37, 48, 49, 55] directly work in the 3D space. However, these methods either work for synthetic-to-real UDA scenarios [46, 49] or normal-to-normal point cloud adaptation [55], ignoring normal-to-adverse adaptation which is highly practical in real applications. Our SemanticSTF dataset fills up this blank and will inspire more development of new algorithms for normal-to-adverse adaptation." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.228, + 0.322, + 0.243 + ], + "angle": 0, + "content": "3. The SemanticSTF Dataset" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.254, + 0.21, + 0.269 + ], + "angle": 0, + "content": "3.1. Background" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.277, + 0.471, + 0.581 + ], + "angle": 0, + "content": "LiDAR sensors send out laser pulses and measure their flight time based on the echoes it receives from targets. The travel distance as derived from the time-of-flight and the registered angular information (between the LiDAR sensors and the targets) can be combined to compute the 3D coordinates of target surface which form point clouds that capture the 3D shape of the targets. However, the active LiDAR pulse system can be easily affected by the scattering media such as particles of rain droplets and snow [10, 18, 32, 35], leading to shifts of measured distances, variation of echo intensity, point missing, etc. Hence, point clouds captured under adverse weather usually have clear distribution discrepancy as compared with those collected under normal weather as illustrated in Fig. 1. However, existing 3DSS benchmarks are dominated by normal-weather point clouds which are insufficient for the study of universal 3DSS under all-weather conditions. To this end, we propose SemanticSTF, a point-wise annotated large-scale adverse-weather dataset that can be explored for the study of 3DSS and point cloud parsing under various adverse weather conditions." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.59, + 0.3, + 0.607 + ], + "angle": 0, + "content": "3.2. Data Selection and Split" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.471, + 0.903 + ], + "angle": 0, + "content": "We collect SemanticSTF by leveraging the STF benchmark [3], a multi-modal adverse-weather dataset that was jointly collected in Germany, Sweden, Denmark, and Finland. The data in STF have multiple modalities including LiDAR point clouds and they are collected under various adverse weather conditions such as snow and fog. However, STF provides bounding-box annotations only for the study of 3D detection tasks. In SemanticSTF, we manually selected 2,076 scans captured by a Velodyne HDL64 S3D LiDAR sensor from STF that cover various adverse weather conditions including 694 snowy, 637 dense-foggy, 631 light-foggy, and 114 rainy (all rainy LiDAR scans in STF). During the selection, we pay special attention to the geographical diversity of the point clouds aiming for minimizing data redundancy. We ignore the factor of daytime/nighttime since LiDAR sensors are robust to lighting conditions. We split SemanticSTF into three parts including 1,326 full 3D scans for training, 250 for validating, and 500 for testing. All three splits have approximately the same" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.88, + 0.107 + ], + "angle": 0, + "content": "proportion of LiDAR scans of different adverse weathers." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.119, + 0.667, + 0.133 + ], + "angle": 0, + "content": "3.3. Data Annotation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.143, + 0.892, + 0.415 + ], + "angle": 0, + "content": "Point-wise annotation of LiDAR point clouds is an extremely laborious task due to several factors, such as 3D view changes, inconsistency between point cloud display and human visual perception, sweeping occlusion, point sparsity, etc. However, point-wise annotating of adverse-weather point clouds is even more challenging due to two new factors. First, the perceived distance shifts under adverse weather often lead to various geometry distortions in the collected points which make them different from those collected under normal weather. This presents significant challenges for annotators who must recognize various objects and assign a semantic label to each point. Second, LiDAR point clouds collected under adverse weather often contain a significant portion of invalid regions that consist of indiscernible semantic contents (e.g., thick snow cover) that make it difficult to identify the ground type. The existence of such invalid regions makes point-wise annotation even more challenging." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.416, + 0.892, + 0.627 + ], + "angle": 0, + "content": "We designed a customized labeling pipeline to handle the annotation challenges while performing point-wise annotation of point clouds in SemanticSTF. Specifically, we first provide labeling instructions and demo annotations and train a team of professional annotators to provide pointwise annotations of a set of selected STF LiDAR scans. To achieve reliable high-quality annotations, the annotators leverage the corresponding 2D camera images and Google Street views as extra references while identifying the category of each point in this initial annotation process. After that, the annotators cross-check their initial annotations for identifying and correcting labeling errors. At the final stage, we engaged professional third parties who provide another round of annotation inspection and correction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.628, + 0.892, + 0.779 + ], + "angle": 0, + "content": "Annotation of SemanticSTF is a highly laborious and time-consuming task. For instance, while labeling downtown areas with the most complex scenery, it took an annotator an average of 4.3 hours to label a single LiDAR scan. Labeling a scan captured in a relatively simpler scenery, such as a highway, also takes an average of 1.6 hours. In addition, an additional 30-60 minutes are required per scan for verification and correction by professional third parties. In total, annotating the entire SemanticSTF dataset takes over 6,600 man-hours." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.902 + ], + "angle": 0, + "content": "While annotating SemanticSTF, we adopted the same set of semantic classes as in the widely-studied semantic segmentation benchmark, SemanticKITTI [2]. Specifically, we annotate the 19 evaluation classes of SemanticKITTI, which encompass most traffic-related objects in autonomous driving scenes. Additionally, following [36], we label points with indiscernible semantic contents caused by adverse weather (e.g. ground covered by snowdrifts) as invalid. Fur" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9384" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.085, + 0.089, + 0.462, + 0.142 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.152, + 0.465, + 0.167 + ], + "angle": 0, + "content": "Figure 2. Number of annotated points per class in SemanticSTF." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.195, + 0.47, + 0.256 + ], + "angle": 0, + "content": "thermore, we label points that do not belong to the 20 categories or are indistinguishable as ignored, which are not utilized in either training or evaluations. Detailed descriptions of each class can be found in the appendix." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.267, + 0.226, + 0.282 + ], + "angle": 0, + "content": "3.4. Data Statistics" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.291, + 0.47, + 0.488 + ], + "angle": 0, + "content": "SemanticSTF consists of point-wise annotations of 21 semantic categories, and Fig. 2 shows the detailed statistics of the point-wise annotations. It can be seen that classes road, sidewalk, building, vegetation, and terrain appear most frequently whereas classes motor, motorcyclist, and bicyclist have clearly lower occurrence frequency. Such class imbalance is largely attributed to the various object sizes and unbalanced distribution of object categories in transportation scenes, and it is also very common in many existing benchmarks. Overall, the statistics and distribution of different object categories are similar to that of other 2D and 3D semantic segmentation benchmarks such as Cityscapes [8], ACDC [36], and SemanticKITTI [2]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.488, + 0.471, + 0.715 + ], + "angle": 0, + "content": "To the best of our knowledge, SemanticSTF is the first large-scale adverse-weather 3DSS benchmark that provides high-quality point-wise annotations. Table 1 compares it with several existing point cloud datasets that have been widely adopted for the study of 3D detection and semantic segmentation. We can observe that existing datasets are either collected under normal weather conditions or collected for object detection studies with bounding-box annotations only. 3DSS benchmark under adverse weather is largely blank, mainly due to the great challenge in point-wise annotations of adverse-weather point clouds as described in previous subsections. From this sense, SemanticSTF fills up this blank by providing a large-scale benchmark and test bed which will be very useful to future research in universal 3DSS under all weather conditions." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.726, + 0.24, + 0.74 + ], + "angle": 0, + "content": "3.5. Data illustration" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Fig. 3 provides examples of point cloud scans captured under adverse weather conditions in SemanticSTF (in row 1) as well as the corresponding annotations (in row 2). Compared with normal-weather point clouds, point clouds captured under adverse weather exhibit four distinct properties: 1) Snow coverage and snowflakes under snowy weather introduce many white points (labeled as “invalid”) as illustrated in Fig. 3(a). The thick snow coverage may lead to object deformation as well; Rainy conditions may cause specular reflection of laser signals from water on the ground" + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.089, + 0.892, + 0.214 + ], + "angle": 0, + "content": "
Dataset#ClsTypeAnnotationFogRainSnow
KITTI [13]8realbounding boxXXX
nuScenes [5]23realbounding boxXXX
Waymo [40]4realbounding boxXXX
STF [3]5realbounding box
SemanticKITTI [2]25realpoint-wiseXXX
nuScenes-LiDARSeg [11]32realpoint-wiseXXX
Waymo-LiDARSeg [40]21realpoint-wiseXXX
SynLiDAR [49]32synth.point-wiseXXX
SemanticSTF (ours)21realpoint-wise
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.224, + 0.892, + 0.251 + ], + "angle": 0, + "content": "Table 1. Comparison of SemanticSTF against existing outdoor LiDAR benchmarks. #Cls means the class number." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.279, + 0.892, + 0.46 + ], + "angle": 0, + "content": "and produce many noise points as shown in Fig.3(b); 3) Dense fog may greatly reduce the working range of LiDAR sensors, leading to small spatial distribution of the collected LiDAR points as illustrated in Fig. 3(c); 4) Point clouds under light fog have similar characteristics as normal-weather point clouds as illustrated in Fig. 3(d). The distinct properties of point clouds under different adverse weather introduce different types of domain shift from normal-weather point clouds which complicate 3DSS greatly as discussed in Section 5. They also verify the importance of developing universal 3DSS models that can perform well under all weather conditions." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.475, + 0.83, + 0.491 + ], + "angle": 0, + "content": "4. Point Cloud Domain Randomization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.501, + 0.892, + 0.606 + ], + "angle": 0, + "content": "Leveraging SemanticSTF, we explore domain generalization (DG) for semantic segmentation of LiDAR point clouds under all weather conditions. Specifically, we design PointDR, a domain randomization technique that helps to train a generalizable segmentation model from normal-weather point clouds that can work well for adverse-weather point clouds in SemanticSTF." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.617, + 0.684, + 0.632 + ], + "angle": 0, + "content": "4.1. Problem Definition" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.64, + 0.892, + 0.807 + ], + "angle": 0, + "content": "Given labeled point clouds of a source domain \\( S = \\{S_{k} = \\{x_{k},y_{k}\\} \\}_{k = 1}^{K} \\) where \\( x \\) represents a LiDAR point cloud scan and \\( y \\) denotes its point-wise semantic annotations, the goal of domain generalization is to learn a segmentation model \\( F \\) by using the source-domain data only that can perform well on point clouds from an unseen target domain \\( \\mathcal{T} \\). We consider a 3D point cloud segmentation model \\( F \\) that consists of a feature extractor \\( E \\) and a classifier \\( G \\). Note under the setup of domain generalization, target data will not be accessed in training as they could be hard and even impossible to acquire at the training stage." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.817, + 0.816, + 0.832 + ], + "angle": 0, + "content": "4.2. Point Cloud Domain Randomization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Inspired by domain randomization studies in 2D computer vision research [42, 43], we explore how to employ domain randomization for learning domain generalizable models for point clouds. Specifically, we design PointDR," + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9385" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.098, + 0.887, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.332, + 0.893, + 0.361 + ], + "angle": 0, + "content": "Figure 3. Examples of LiDAR point cloud scans captured under different adverse weather including snow, rain, dense fog, and light fog (the first row) and corresponding dense annotations in SemanticSTF (the second row)." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.375, + 0.462, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.546, + 0.47, + 0.63 + ], + "angle": 0, + "content": "Figure 4. The framework of our point cloud randomization method (PointDR): Geometry style randomization creates different point cloud views with various spatial perturbations while embedding aggregation encourages the feature extractor to aggregate randomized point embeddings to learn perturbation-invariant representations, ultimately leading to a generalizable segmentation model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.644, + 0.469, + 0.689 + ], + "angle": 0, + "content": "a point cloud randomization technique that consists of two complementary designs including geometry style randomization and embedding aggregation as illustrated in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.689, + 0.469, + 0.87 + ], + "angle": 0, + "content": "Geometry style randomization aims to enrich the geometry styles and expand the distribution of training point cloud data. Given a point-cloud scan \\( x \\) as input, we apply weak and strong spatial augmentation to obtain two copies of \\( x \\) including a weak-view \\( x^w = \\mathcal{A}^W(x) \\) and a strong-view \\( x^s = \\mathcal{A}^S(x) \\). For the augmentation schemes of \\( \\mathcal{A}^W \\), we follow existing supervised learning methods [41] and adopt the simple random rotation and random scaling. While for the augmentation schemes of \\( \\mathcal{A}^S \\), we further adopt random dropout, random flipping, random noise perturbation, and random jittering on top of \\( \\mathcal{A}^W \\) to obtain a more diverse and complex copy of the input point cloud scan \\( x \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Embedding aggregation aims to aggregate encoded embeddings of randomized point clouds for learning domain-" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.375, + 0.893, + 0.618 + ], + "angle": 0, + "content": "invariant representations. We adopt contrastive learning [17] as illustrated in Fig. 4. Given the randomized point clouds \\( x^{w} \\) and \\( x^{s} \\), we first feed them into the feature extractor \\( E \\) and a projector \\( \\mathcal{P} \\) (a two-layer MLP) which outputs normalized point feature embeddings \\( f^{w} \\) and \\( f^{s} \\), respectively \\( (f = \\mathcal{P}(E(x))) \\). \\( \\overline{f}_C^w \\in \\mathbb{R}^{D \\times C} \\) (\\( D \\): feature dimension; \\( C \\): number of semantic classes) is then derived by class-wise averaging the feature embeddings \\( f^{w} \\) in a batch, which is stored in a memory bank \\( \\mathcal{B} \\in \\mathbb{R}^{D \\times C} \\) that has no backpropagation and is momentum updated by iterations (i.e., \\( \\mathcal{B} \\gets m \\times \\mathcal{B} + (1 - m) \\times \\overline{f}_C^w \\) with a momentum coefficient \\( m \\)). Finally, we employ each point feature embedding \\( f_{i}^{s} \\) of the strong-view \\( f^{s} \\) as query and feature embeddings in \\( \\mathcal{B} \\) as keys for contrastive learning, where the key sharing the same semantic class as the query is positive key \\( \\mathcal{B}_{+} \\) and the rest are negative keys. The contrastive loss is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.634, + 0.892, + 0.677 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {c t} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} - \\log \\frac {\\exp \\left(f _ {i} ^ {s} \\mathcal {B} _ {+} / \\tau\\right)}{\\sum_ {j = 1} ^ {C} \\exp \\left(f _ {i} ^ {s} \\mathcal {B} _ {j} / \\tau\\right)} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.686, + 0.892, + 0.731 + ], + "angle": 0, + "content": "where \\(\\tau\\) is a temperature hyper-parameter [47]. Note there is no back-propagation for the \"ignore\" class in optimizing the contrastive loss." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.733, + 0.893, + 0.869 + ], + "angle": 0, + "content": "Contrastive learning pulls point feature embeddings of the same classes closer while pushing away point feature embeddings of different classes. Therefore, optimizing the proposed contrastive loss will aggregate randomized point cloud features and learn perturbation-invariant representations, ultimately leading to a robust and generalizable segmentation model. The momentum-updated memory bank provides feature prototypes of each semantic class for more robust and stable contrastive learning." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Combining the supervised cross-entropy loss \\(\\mathcal{L}_{ce}\\) for weakly-augmented point clouds in Eq. 1, the overall train" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9386" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.084, + 0.089, + 0.887, + 0.365 + ], + "angle": 0, + "content": "
Methodscarbi,clemt,cletruckoth-v.pers.bi,clstmt,clstroadparki.sidew.oth-g.build.fenceveget.trunkterra.poletraf.D-fogL-fogRainSnowmIoU
Oracle89.442.10.059.961.269.639.00.082.221.558.245.686.163.680.252.077.650.161.751.954.657.953.754.7
SemanticKITTI→SemanticSTF
Baseline55.90.00.21.910.910.36.00.061.210.932.00.067.941.649.827.940.829.617.529.526.028.421.424.4
Dropout [38]62.10.015.53.011.55.42.00.058.412.826.71.172.143.652.934.243.528.415.529.325.629.424.825.7
Perturbation74.40.00.023.30.619.70.00.060.310.833.90.772.045.258.717.542.422.19.726.327.830.024.525.9
PolarMix [48]57.81.83.816.73.726.50.02.065.72.932.50.371.048.753.820.545.425.915.829.725.028.625.626.0
MMD [26]63.60.02.60.111.428.10.00.067.014.137.90.367.341.257.127.447.928.216.230.428.132.825.226.9
PCL [54]65.90.00.017.70.48.40.00.059.612.035.01.674.047.560.715.848.926.127.528.927.630.124.626.4
PointDR (Ours)67.30.04.519.69.018.82.70.062.612.938.10.673.343.856.432.245.728.727.431.329.731.926.228.6
SynLiDAR→SemanticSTF
Baseline27.13.00.615.80.125.21.85.623.90.314.60.636.319.937.917.941.89.52.316.917.217.211.915.0
Dropout [38]28.03.01.49.60.017.10.80.734.26.819.10.135.519.142.317.636.014.02.815.316.620.414.015.2
Perturbation27.12.32.316.00.123.71.24.027.03.616.20.829.216.735.322.738.317.95.116.316.719.313.415.2
PolarMix [48]39.21.11.28.31.517.80.80.723.31.317.50.445.224.846.220.138.77.61.916.115.519.215.615.7
MMD [26]25.52.32.113.20.722.11.47.530.80.417.60.230.919.737.619.343.59.92.617.316.320.012.715.1
PCL [54]30.90.81.410.00.423.34.07.928.51.317.71.239.418.540.016.038.612.12.317.816.719.314.115.5
PointDR (Ours)37.82.52.423.60.126.32.23.327.97.717.50.547.625.345.721.037.517.95.519.519.921.116.918.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.1, + 0.375, + 0.867, + 0.39 + ], + "angle": 0, + "content": "Table 2. Experiments on domain generalization with SemanticKITTI [2] or SynLiDAR [49] as source and SemanticSTF as target." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.417, + 0.393, + 0.432 + ], + "angle": 0, + "content": "ing objective of PointDR can be formulated by:" + }, + { + "type": "equation", + "bbox": [ + 0.185, + 0.441, + 0.469, + 0.457 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {P o i n t D R}} = \\mathcal {L} _ {c e} + \\lambda_ {c t} \\mathcal {L} _ {c t} \\tag {2}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.467, + 0.416, + 0.484 + ], + "angle": 0, + "content": "5. Evaluation of Semantic Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.492, + 0.469, + 0.598 + ], + "angle": 0, + "content": "SemanticSTF can be adopted for benchmarking different learning setups and network architectures on point cloud segmentation. We perform experiments over two typical learning setups including domain generalization and unsupervised domain adaptation. In addition, we evaluate several state-of-the-art point-cloud segmentation networks to examine their generalization capabilities." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.607, + 0.295, + 0.621 + ], + "angle": 0, + "content": "5.1. Domain Generalization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.469, + 0.734 + ], + "angle": 0, + "content": "We first study domain generalizable point cloud segmentation. For DG, we can only access an annotated source domain during training and the trained model is expected to generalize well to unseen target domains. Leveraging SemanticSTF, we build two DG benchmarks and examine how PointDR helps learn a universal 3DSS model that can work under different weather conditions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.902 + ], + "angle": 0, + "content": "The first benchmark is SemanticKITTI [2] \\(\\rightarrow\\) SemanticSTF where SemanticKITTI is a large-scale real-world 3DSS dataset collected under normal weather conditions. This benchmark serves as a solid testing ground for evaluating domain generalization performance from normal to adverse weather conditions. The second benchmark is SynLiDAR [49] \\(\\rightarrow\\) SemanticSTF where SynLiDAR is a largescale synthetic 3DSS dataset. The motivation of this benchmark is that learning a universal 3DSS model from synthetic point clouds that can work well across adverse weather is of high research and application value considering the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.417, + 0.892, + 0.476 + ], + "angle": 0, + "content": "challenges in point cloud collection and annotation. Note this benchmark is more challenging as the domain discrepancy comes from both normal-to-adverse weather distribution shift and synthetic-to-real distribution shift." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.477, + 0.893, + 0.657 + ], + "angle": 0, + "content": "Setup. We use all 19 evaluating classes of SemanticKITTI in both domain generalization benchmarks. The category of invalid in SemanticSTF is mapped to the ignored since SemanticKITTI and SynLiDAR do not cover this category. We adopt MinkowskiNet [7] (with TorchSparse library [41]) as the backbone model, which is a sparse convolutional network that provides state-of-the-art performance with decent efficiency. We adopt the evaluation metrics of Intersection over the Union (IoU) for each segmentation class and the mean IoU (mIoU) over all classes. All experiments are run over a single NVIDIA 2080Ti (11GB). More implementation details are provided in the appendix." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.885 + ], + "angle": 0, + "content": "Baseline Methods. Since domain generalizable 3DSS is far under-explored, there is little existing baseline that can be directly adopted for benchmarking. We thus select two closely related approaches as baseline to evaluate the proposed PointDR. The first approach is data augmentation and we select three related augmentation methods including Dropout [38] that randomly drops out points to simulate LiDAR points missing in adverse weather, Noise perturbation that adds random points in the 3D space to simulate noise points as introduced by particles like falling snow, and PolarMix [48] that mixes point clouds of different sources for augmentation. The second approach is to adapt 2D domain generalization methods for 3DSS. We select two 2D domain generalization methods including the widely studied MMD [26] and the recently proposed PCL [54]." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.886, + 0.891, + 0.901 + ], + "angle": 0, + "content": "Results. Table 2 shows experimental results over the validation" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9387" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.127, + 0.089, + 0.421, + 0.153 + ], + "angle": 0, + "content": "
MethodLceLctBmIoU
Baseline24.4
PointDR-CT27.4
PointDR28.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.164, + 0.468, + 0.19 + ], + "angle": 0, + "content": "Table 3. Ablation study of PointDR over domain generalized segmentation task SemanticKITTI \\(\\rightarrow\\) SemanticSTF." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.22, + 0.468, + 0.476 + ], + "angle": 0, + "content": "tion set of SemanticSTF. For both benchmarks, the Baseline is a source-only model that is trained by using the training data of SemanticKITTI or SynLiDAR. We can see that the Baseline achieves very low mIoU while evaluated over the validation set of SemanticSTF, indicating the large domain discrepancy between point clouds of normal and adverse weather conditions. In addition, all three data augmentation methods improve the model generalization consistently but the performance gains are limited especially for the challenging benchmark SynLiDAR \\(\\rightarrow\\) SemanticSTF. The two 2D generalization methods both help SemanticKITTI \\(\\rightarrow\\) SemanticSTF clearly but show very limited improvement over SynLiDAR \\(\\rightarrow\\) SemanticSTF. The proposed PointDR achieves the best generalization consistently across both benchmarks, demonstrating its superior capability to learn perturbation-invariant point cloud representations and effectiveness while handling all-weather 3DSS tasks." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.478, + 0.468, + 0.763 + ], + "angle": 0, + "content": "We also evaluate the compared domain generalization methods over each individual adverse weather condition as shown in Table 2. It can be observed that the three data augmentation methods work for data captured in rainy and snowy weather only. The 2D generalization method MMD shows clear effectiveness for point clouds under dense fog and rain while PCL works for point clouds under rainy and snowy weather instead. We conjecture that the performance variations are largely attributed to the different properties of point clouds captured under different weather conditions. For example, more points are missing in rain while object points often deform due to the covered snow (more illustrations are provided in the appendix). Such data variations lead to different domain discrepancies across weather which further leads to different performances of the compared methods. As PointDR learns perturbation-tolerant representations, it works effectively across different adverse weather conditions. We also provide qualitative results, please refer to the appendix for details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.468, + 0.9 + ], + "angle": 0, + "content": "Ablation study. We study different PointDR designs to examine how they contribute to the overall generalization performance. As Table 3 shows, we report three models over the benchmark \"SemanticKITTI \\(\\rightarrow\\) SemanticSTF\": 1) Baseline that is trained with \\(\\mathcal{L}_{ce}\\). 2) PointDR-CT that is jointly trained with \\(\\mathcal{L}_{ce}\\) and \\(\\mathcal{L}_{ct}\\) without using the memory bank \\(\\mathcal{B}\\). 3) The complete PointDR that is trained with \\(\\mathcal{L}_{ce}\\), \\(\\mathcal{L}_{ct}\\) and the memory bank \\(\\mathcal{B}\\). We evaluate the three models over the validation set of SemanticSTF and Table 3" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.304 + ], + "angle": 0, + "content": "shows experimental results. We can see that the Baseline performs poorly at \\(24.4\\%\\) due to clear domain discrepancy between point clouds of normal weather and adverse weather. Leveraging the proposed contrastive loss, \\(\\mathcal{L}_{ct}\\) achieves clearly better performance at \\(27.4\\%\\), indicating that learning perturbation-invariance is helpful for universal LiDAR segmentation of all-weather conditions. On top of that, introducing the momentum-updated memory bank \\(\\mathcal{B}\\) further improves the segmentation performance at \\(28.6\\%\\). This is because the feature embeddings in \\(\\mathcal{B}\\) serve as the class prototypes which help the optimization of the segmentation network, finally leading to more robust representations of 3DSS that perform better over adverse weather point clouds." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.317, + 0.688, + 0.332 + ], + "angle": 0, + "content": "5.2. Domain Adaptation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.341, + 0.892, + 0.581 + ], + "angle": 0, + "content": "We also study SemanticSTF over a domain adaptive point cloud segmentation benchmark SemanticKITTI \\(\\rightarrow\\) SemanticSTF. Specifically, we select four representative UDA methods including ADDA [44], entropy minimization (Ent-Min) [45], self-training [63], and CoSMix [37] for adaptation from the source SemanticKITTI [2] toward the target SemanticSTF. Following the state-of-the-art [37, 48, 49] on synthetic-to-real adaptation, we adopt MinkowskiNet [7] as the segmentation backbone for all compared methods. Table 4 shows experimental results over the validation set of SemanticSTF. We can see that all UDA methods outperform the Source-only consistently under the normal-to-adverse adaptation setup. At the other end, the performance gains are still quite limited, showing the great improvement space along domain adaptive 3DSS from normal to adverse weather conditions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.584, + 0.892, + 0.9 + ], + "angle": 0, + "content": "In addition, we examined the adaptability of the four UDA methods in relation to each individual adverse weather condition. Specifically, we trained each of the four methods for adaptation from SemanticKITTI to SemanticSTF data for each adverse weather condition. Table 5 shows the experimental results over the validation set of SemanticSTF. We can see all four methods outperform the Source-only method under Dense-fog and Light-fog, demonstrating their effectiveness in mitigating domain discrepancies. However, for rain and Snow, only CoSMix achieved marginal performance gains while the other three UDA methods achieved limited performance improvements. We conjecture that snow and rain introduce large deformations on object surfaces or much noise, making adaptation from normal to adverse weather more challenging. CoSMix works in the input space by directly mixing source and target points, allowing it to perform better under heavy snow and rain which have larger domain gaps. However, all methods achieved relatively low segmentation performance, indicating the significance of our research and the large room for improvement in our constructed benchmarks." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9388" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.086, + 0.089, + 0.885, + 0.215 + ], + "angle": 0, + "content": "
Methodscarbi,clemt,cletruckoth-v.pers.bi,clstmt,clstroadparki.sidew.oth-g.build.fenceveget.trunkterra.poletraf.mIoU
Oracle89.442.10.059.961.269.639.00.082.221.558.245.686.163.680.252.077.650.161.754.7
Source-only64.80.00.013.81.85.02.10.062.77.534.00.066.736.253.931.344.324.014.224.3
ADDA [44]65.60.00.021.01.32.81.316.764.71.235.40.066.541.857.232.642.223.326.426.3
Ent-Min [45]69.20.010.131.05.32.82.60.065.92.635.70.072.542.852.432.544.724.721.127.2
Self-training [63]71.50.010.333.17.45.91.30.065.16.536.60.067.841.351.732.942.925.125.027.6
CoSMix [37]65.01.722.125.27.733.20.00.064.711.531.10.962.537.844.630.541.130.928.628.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.219, + 0.893, + 0.248 + ], + "angle": 0, + "content": "Table 4. Comparison of state-of-the-art domain adaptation methods on SemanticKITTI \\(\\rightarrow\\) SemanticSTF adaptation. SemanticKITTI serves as the source domain and the entire SemanticSTF including all four weather conditions serves as the target domain." + }, + { + "type": "table", + "bbox": [ + 0.099, + 0.271, + 0.447, + 0.359 + ], + "angle": 0, + "content": "
MethodDense-fogLight-fogRainSnow
Source-Only26.925.227.723.5
ADDA [44]31.527.927.423.4
Ent-Min [45]31.428.630.324.9
Self-training [63]31.829.327.925.1
CoSMix [37]31.630.333.132.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.363, + 0.472, + 0.434 + ], + "angle": 0, + "content": "Table 5. Comparison of state-of-the-art domain adaptation methods on SemanticKITTI \\(\\rightarrow\\) SemanticSTF adaptation for individual adverse weather conditions. We train a separate model for each weather-specific subset of SemanticSTF and evaluate the trained model on the weather condition it has been trained for." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.46, + 0.407, + 0.475 + ], + "angle": 0, + "content": "5.3. Network Models vs All-Weather 3DSS" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.483, + 0.47, + 0.832 + ], + "angle": 0, + "content": "We also study how different 3DSS network architectures generalize when they are trained with normal-weather point clouds and evaluated over SemanticSTF. Specifically, we select five representative 3DSS networks [9, 19, 41, 62] that have been widely adopted in 3D LiDAR segmentation studies. In the experiments, each selected network is first pre-trained with SemanticKITTI [2] and then evaluated over the validation set of SemanticSTF. We directly use the officially released code and the pre-trained weights for evaluation. Table 6 shows experimental results. We can observe that the five pre-trained models perform very differently though they all achieve superior segmentation over SemanticKITTI. Specifically, RandLA-Net [19], SPVCNN [41], and SPVNAS [41] perform clearly better than SalsaNext [9] and Cylinder3D [62]. In addition, none of the five pre-trained models perform well, verifying the clear domain discrepancy between point clouds of normal and adverse weather conditions. The experiments further indicate the great value of SemanticSTF in the future exploration of robust point cloud parsing under all weather conditions. In addition, the supervised performance of these 3DSS networks over SemanticSTF is provided in the appendix." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.846, + 0.307, + 0.862 + ], + "angle": 0, + "content": "6. Conclusion and Outlook" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.903 + ], + "angle": 0, + "content": "This paper presents SemanticSTF, a large-scale dataset and benchmark suite for semantic segmentation of LiDAR" + }, + { + "type": "table", + "bbox": [ + 0.501, + 0.271, + 0.905, + 0.359 + ], + "angle": 0, + "content": "
3DSS ModelD-fogL-fogRainSnowAll
RandLA-Net [19]26.526.025.122.725.3
SalsaNext [9]16.09.67.83.59.1
SPVCNN [41]30.422.821.718.322.4
SPVNAS [41]25.518.317.013.018.0
Cylinder3D [62]14.87.45.74.07.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.365, + 0.895, + 0.42 + ], + "angle": 0, + "content": "Table 6. Performance of state-of-the-art 3DSS models that are pre-trained over SemanticKITTI and tested on validation set of SemanticSTF for individual weather conditions and jointly for all weather conditions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.453, + 0.893, + 0.574 + ], + "angle": 0, + "content": "point clouds under adverse weather conditions. SemanticSTF provides high-quality point-level annotations for point clouds captured under adverse weather including dense fog, light fog, snow and rain. Extensive studies have been conducted to examine how state-of-the-art 3DSS methods perform over SemanticSTF, demonstrating its significance in directing future research on domain adaptive and domain generalizable 3DSS under all-weather conditions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.578, + 0.893, + 0.73 + ], + "angle": 0, + "content": "We also design PointDR, a domain randomization technique that aims to use normal-weather point clouds to train a domain generalizable 3DSS model that can work well over adverse-weather point clouds. PointDR consists of two novel designs including geometry style randomization and embedding aggregation which jointly learn perturbation-invariant representations that generalize well to various new point-cloud domains. Extensive experiments show that PointDR achieves superior point cloud segmentation performance as compared with the state-of-the-art." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.752, + 0.661, + 0.769 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.894, + 0.903 + ], + "angle": 0, + "content": "This study is funded BY the Ministry of Education Singapore, under the Tier-1 scheme with project number RG18/22. It is also supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from Singapore Telecommunications Limited (Singtel), through Singtel Cognitive and Artificial Intelligence Lab for Enterprises (SCALE@NTU)." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9389" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.117, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using meta regularization. Advances in neural information processing systems, 31, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.174, + 0.472, + 0.257 + ], + "angle": 0, + "content": "[2] Jens Behley, Martin Garbade, Andres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Jurgen Gall. Semantickitti: A dataset for semantic scene understanding of lidar sequences. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9297-9307, 2019. 1, 3, 4, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.26, + 0.472, + 0.342 + ], + "angle": 0, + "content": "[3] Mario Bijelic, Tobias Gruber, Fahim Mannan, Florian Kraus, Werner Ritter, Klaus Dietmayer, and Felix Heide. Seeing through fog without seeing fog: Deep multimodal sensor fusion in unseen adverse weather. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11682-11692, 2020. 1, 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.345, + 0.47, + 0.4 + ], + "angle": 0, + "content": "[4] Gilles Blanchard, Gyemin Lee, and Clayton Scott. Generalizing from several related classification tasks to a new unlabeled sample. Advances in neural information processing systems, 24, 2011. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.403, + 0.472, + 0.485 + ], + "angle": 0, + "content": "[5] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.488, + 0.47, + 0.569 + ], + "angle": 0, + "content": "[6] Ran Cheng, Ryan Razani, Ehsan Taghavi, Enxu Li, and Bingbing Liu. 2-s3net: Attentive feature fusion with adaptive feature selection for sparse semantic segmentation network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12547-12556, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.573, + 0.47, + 0.642 + ], + "angle": 0, + "content": "[7] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3075-3084, 2019. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.645, + 0.47, + 0.728 + ], + "angle": 0, + "content": "[8] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3213-3223, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.731, + 0.47, + 0.785 + ], + "angle": 0, + "content": "[9] Tiago Cortinhal, George Tzelepis, and Eren Erdal Aksoy. Salsanext: Fast, uncertainty-aware semantic segmentation of lidar point clouds. In International Symposium on Visual Computing, pages 207-222. Springer, 2020. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.469, + 0.829 + ], + "angle": 0, + "content": "[10] A Filgueira, H González-Jorge, Susana Lagtuela, L Díaz-Vilarino, and Pedro Arias. Quantifying the influence of rain in lidar performance. Measurement, 95:143-148, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[11] Whye Kit Fong, Rohit Mohan, Juana Valeria Hurtado, Lubing Zhou, Holger Caesar, Oscar Beijbom, and Abhinav Valada. Panoptic nuscenes: A large-scale benchmark for lidar panoptic segmentation and tracking. IEEE Robotics and Automation Letters, 7(2):3795-3802, 2022. 1, 4" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.117, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[12] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International conference on machine learning, pages 1180-1189. PMLR, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.188 + ], + "angle": 0, + "content": "[13] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.191, + 0.892, + 0.258 + ], + "angle": 0, + "content": "[14] Dayan Guan, Jiaxing Huang, Aoran Xiao, and Shijian Lu. Domain adaptive video segmentation via temporal consistency regularization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8053-8064, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.261, + 0.892, + 0.329 + ], + "angle": 0, + "content": "[15] Martin Hahner, Christos Sakaridis, Mario Bijelic, Felix Heide, Fisher Yu, Dengxin Dai, and Luc Van Gool. Lidar snowfall simulation for robust 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16364-16374, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.331, + 0.892, + 0.399 + ], + "angle": 0, + "content": "[16] Martin Hahner, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Fog simulation on real lidar point clouds for 3d object detection in adverse weather. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15283-15292, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.401, + 0.892, + 0.468 + ], + "angle": 0, + "content": "[17] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.47, + 0.892, + 0.537 + ], + "angle": 0, + "content": "[18] Robin Heinzler, Philipp Schindler, Jürgen Seekircher, Werner Ritter, and Wilhelm Stork. Weather influence and classification with automotive lidar sensors. In 2019 IEEE intelligent vehicles symposium (IV), pages 1527-1534. IEEE, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.54, + 0.892, + 0.621 + ], + "angle": 0, + "content": "[19] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11108-11117, 2020. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.623, + 0.892, + 0.691 + ], + "angle": 0, + "content": "[20] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Cross-view regularization for domain adaptive panoptic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10133-10144, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.693, + 0.892, + 0.76 + ], + "angle": 0, + "content": "[21] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Fsdr: Frequency space domain randomization for domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6891-6902, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.763, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[22] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Model adaptation: Historical contrastive learning for unsupervised domain adaptation without source data. Advances in Neural Information Processing Systems, 34:3635-3649, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.832, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[23] Jiaxing Huang, Dayan Guan, Aoran Xiao, Shijian Lu, and Ling Shao. Category contrast for unsupervised domain adaptation in visual tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1203-1214, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9390" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[24] Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for unsupervised domain adaptation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4893-4902, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.162, + 0.472, + 0.259 + ], + "angle": 0, + "content": "[25] Alexander Lehner, Stefano Gasperini, Alvaro Marcos-Ramiro, Michael Schmidt, Mohammad-Ali Nikouei Mahani, Nassir Navab, Benjamin Busam, and Federico Tombari. 3d-vfield: Adversarial augmentation of point clouds for domain generalization in 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17295-17304, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.26, + 0.472, + 0.315 + ], + "angle": 0, + "content": "[26] Haoliang Li, Sinno Jialin Pan, Shiqi Wang, and Alex C Kot. Domain generalization with adversarial feature learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5400-5409, 2018. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.316, + 0.472, + 0.357 + ], + "angle": 0, + "content": "[27] Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Pointvoxel cnn for efficient 3d deep learning. Advances in Neural Information Processing Systems, 32, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.358, + 0.472, + 0.44 + ], + "angle": 0, + "content": "[28] Zhipeng Luo, Zhongang Cai, Changqing Zhou, Gongjie Zhang, Haiyu Zhao, Shuai Yi, Shijian Lu, Hongsheng Li, Shanghang Zhang, and Ziwei Liu. Unsupervised domain adaptive 3d detection with multi-level consistency. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8866-8875, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.441, + 0.472, + 0.495 + ], + "angle": 0, + "content": "[29] Will Maddern, Geoffrey Pascoe, Chris Linegar, and Paul Newman. 1 year, \\(1000\\mathrm{km}\\): The oxford robotcar dataset. The International Journal of Robotics Research, 36(1):3-15, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.472, + 0.564 + ], + "angle": 0, + "content": "[30] Andres Milioto, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Rangenet++: Fast and accurate lidar semantic segmentation. In 2019 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 4213-4220. IEEE, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.566, + 0.472, + 0.622 + ], + "angle": 0, + "content": "[31] Krikamol Muandet, David Balduzzi, and Bernhard Scholkopf. Domain generalization via invariant feature representation. In International Conference on Machine Learning, pages 10-18. PMLR, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.622, + 0.472, + 0.69 + ], + "angle": 0, + "content": "[32] Thierry Peynot, James Underwood, and Steven Scheding. Towards reliable perception for unmanned ground vehicles in challenging conditions. In 2009 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 1170-1176. IEEE, 2009. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.692, + 0.472, + 0.761 + ], + "angle": 0, + "content": "[33] Matthew Pitropov, Danson Evan Garcia, Jason Rebello, Michael Smart, Carlos Wang, Krzysztof Czarnecki, and Steven Waslander. Canadian adverse driving conditions dataset. The International Journal of Robotics Research, 40(4-5):681-690, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.472, + 0.829 + ], + "angle": 0, + "content": "[34] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.831, + 0.472, + 0.872 + ], + "angle": 0, + "content": "[35] Julian Ryde and Nick Hillier. Performance of laser and radar ranging devices in adverse environmental conditions. Journal of Field Robotics, 26(9):712-727, 2009. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.472, + 0.901 + ], + "angle": 0, + "content": "[36] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Acdc: The adverse conditions dataset with correspondences for se" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "matic driving scene understanding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10765-10775, 2021. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[37] Cristiano Saltori, Fabio Galasso, Giuseppe Fiameni, Nicu Sebe, Elisa Ricci, and Fabio Poiesi. Cosmix: Compositional semantic mix for domain adaptation in 3d lidar segmentation. ECCV, 2022. 3, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.193, + 0.894, + 0.248 + ], + "angle": 0, + "content": "[38] Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.25, + 0.892, + 0.333 + ], + "angle": 0, + "content": "[39] Peng Su, Kun Wang, Xingyu Zeng, Shixiang Tang, Dapeng Chen, Di Qiu, and Xiaogang Wang. Adapting object detectors with conditional domain normalization. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XI 16, pages 403-419. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.335, + 0.892, + 0.419 + ], + "angle": 0, + "content": "[40] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.421, + 0.892, + 0.49 + ], + "angle": 0, + "content": "[41] Haotian Tang, Zhijian Liu, Shengyu Zhao, Yujun Lin, Ji Lin, Hanrui Wang, and Song Han. Searching efficient 3d architectures with sparse point-voxel convolution. In European conference on computer vision, pages 685–702. Springer, 2020. 1, 2, 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.491, + 0.892, + 0.573 + ], + "angle": 0, + "content": "[42] Josh Tobin, Rachel Fong, Alex Ray, Jonas Schneider, Wojciech Zaremba, and Pieter Abbeel. Domain randomization for transferring deep neural networks from simulation to the real world. In 2017 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 23-30. IEEE, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.576, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[43] Jonathan Tremblay, Aayush Prakash, David Acuna, Mark Brophy, Varun Jampani, Cem Anil, Thang To, Eric Cameracci, Shaad Boochoon, and Stan Birchfield. Training deep networks with synthetic data: Bridging the reality gap by domain randomization. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 969-977, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.675, + 0.892, + 0.731 + ], + "angle": 0, + "content": "[44] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7167-7176, 2017. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.733, + 0.892, + 0.815 + ], + "angle": 0, + "content": "[45] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2517-2526, 2019. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[46] Bichen Wu, Xuanyu Zhou, Sicheng Zhao, Xiangyu Yue, and Kurt Keutzer. Squeezesegv2: Improved model structure and unsupervised domain adaptation for road-object segmentation from a lidar point cloud. In 2019 International Conference on Robotics and Automation (ICRA), pages 4376-4382. IEEE, 2019. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.957 + ], + "angle": 0, + "content": "9391" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[47] Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3733-3742, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.164, + 0.472, + 0.219 + ], + "angle": 0, + "content": "[48] Aoran Xiao, Jiaxing Huang, Dayan Guan, Kaiwen Cui, Shijian Lu, and Ling Shao. Polarmix: A general data augmentation technique for lidar point clouds. NeurIPS, 2022. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.221, + 0.472, + 0.29 + ], + "angle": 0, + "content": "[49] Aoran Xiao, Jiaxing Huang, Dayan Guan, Fangneng Zhan, and Shijian Lu. Transfer learning from synthetic to real lidar point cloud for semantic segmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2795-2803, 2022. 1, 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.292, + 0.472, + 0.359 + ], + "angle": 0, + "content": "[50] Aoran Xiao, Xiaofei Yang, Shijian Lu, Dayan Guan, and Ji-axing Huang. Fps-net: A convolutional fusion network for large-scale lidar point cloud segmentation. ISPRS Journal of Photogrammetry and Remote Sensing, 176:237–249, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.47, + 0.432 + ], + "angle": 0, + "content": "[51] Jianyun Xu, Ruixiang Zhang, Jian Dou, Yushi Zhu, Jie Sun, and Shiliang Pu. Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16024–16033, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.47, + 0.503 + ], + "angle": 0, + "content": "[52] Qiangeng Xu, Yin Zhou, Weiyue Wang, Charles R Qi, and Dragomir Anguelov. Spg: Unsupervised domain adaptation for 3d object detection via semantic point generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15446-15456, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.505, + 0.472, + 0.574 + ], + "angle": 0, + "content": "[53] Jihan Yang, Shaoshuai Shi, Zhe Wang, Hongsheng Li, and Xiaojuan Qi. St3d: Self-training for unsupervised domain adaptation on 3d object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10368-10378, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.576, + 0.47, + 0.645 + ], + "angle": 0, + "content": "[54] Xufeng Yao, Yang Bai, Xinyun Zhang, Yuechen Zhang, Qi Sun, Ran Chen, Ruiyu Li, and Bei Yu. Pcl: Proxy-based contrastive learning for domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7097-7107, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.47, + 0.716 + ], + "angle": 0, + "content": "[55] Li Yi, Boqing Gong, and Thomas Funkhouser. Complete & label: A domain adaptation approach to semantic segmentation of lidar point clouds. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15363-15373, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.47, + 0.8 + ], + "angle": 0, + "content": "[56] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darrell. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2636-2645, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.802, + 0.47, + 0.858 + ], + "angle": 0, + "content": "[57] Feihu Zhang, Jin Fang, Benjamin Wah, and Philip Torr. Deep fusionnet for point cloud semantic segmentation. In European Conference on Computer Vision, pages 644-663. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[58] Weichen Zhang, Wen Li, and Dong Xu. Srdan: Scale-aware and range-aware domain adaptation network for cross-dataset 3d object detection. In Proceedings of the IEEE/CVF" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "Conference on Computer Vision and Pattern Recognition, pages 6769-6779, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[59] Yang Zhang, Zixiang Zhou, Philip David, Xiangyu Yue, Zerong Xi, Boqing Gong, and Hassan Foroosh. Polarnet: An improved grid representation for online lidar point clouds semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9601-9610, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.206, + 0.892, + 0.288 + ], + "angle": 0, + "content": "[60] Sicheng Zhao, Yezhen Wang, Bo Li, Bichen Wu, Yang Gao, Pengfei Xu, Trevor Darrell, and Kurt Keutzer. *epointda: An end-to-end simulation-to-real domain adaptation framework for lidar point cloud segmentation*. In *Proceedings of the AAAI Conference on Artificial Intelligence*, volume 35, pages 3500–3509, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.29, + 0.892, + 0.344 + ], + "angle": 0, + "content": "[61] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.346, + 0.892, + 0.429 + ], + "angle": 0, + "content": "[62] Xinge Zhu, Hui Zhou, Tai Wang, Fangzhou Hong, Yuexin Ma, Wei Li, Hongsheng Li, and Dahua Lin. Cylindrical and asymmetrical 3d convolution networks for lidar segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9939-9948, 2021. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.431, + 0.892, + 0.487 + ], + "angle": 0, + "content": "[63] Yang Zou, Zhiding Yu, Xiaofeng Liu, BVK Kumar, and Jinsong Wang. Confidence regularized self-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5982-5991, 2019. 7, 8" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9392" + } + ] +] \ No newline at end of file diff --git a/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_origin.pdf b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0870e57d57a85a23ea7a10180f303e6a8985d06e --- /dev/null +++ b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/a6bb8bb5-8301-40cc-afda-a77312b4139d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb861abe10689bab6cd2779dc48f71c500f1fefdc973ff0d466bf7c7c47f2272 +size 2350203 diff --git a/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/full.md b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ec93612ad6c67ea749cd92d062976bcaecabeb2f --- /dev/null +++ b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/full.md @@ -0,0 +1,273 @@ +# 3D Semantic Segmentation in the Wild: Generalized Models for Adverse-Condition Point Clouds + +Aoran Xiao $^{1}$ , Jiaxing Huang $^{1}$ , Weihao Xuan $^{2}$ , Ruijie Ren $^{3}$ , Kangcheng Liu $^{1}$ +Dayan Guan $^{4}$ , Abdulmotaleb El Saddik $^{4,6}$ , Shijian Lu $^{1,\dagger}$ , Eric Xing $^{4,5}$ + +$^{1}$ Nanyang Technological University $^{2}$ Waseda University $^{3}$ Technical University of Denmark + +4Mohamed bin Zayed University of Artificial Intelligence + +$^{5}$ Carnegie Mellon University $^{6}$ University of Ottawa + +![](images/dd6573372529ff7b4b3604a3b40f1fbc61e55792c8263f1fc521bb1b5d55cf2f.jpg) +(a) A LiDAR scan captured in a snowy day + +![](images/dfe09935ad3ef25d96f760f5acfd7c0166ca105a3e08c48838dd43809ef71f78.jpg) +(b) Point-level annotations +Figure 1. We introduce SemanticSTF, an adverse-weather LiDAR point cloud dataset with dense point-level annotations that can be exploited for the study of point cloud semantic segmentation under all-weather conditions (including fog, snow, and rain). The graph on the left shows one scan sample captured on a snowy day, and the one on the right shows the corresponding point-level annotations. + +# Abstract + +Robust point cloud parsing under all-weather conditions is crucial to level-5 autonomy in autonomous driving. However, how to learn a universal 3D semantic segmentation (3DSS) model is largely neglected as most existing benchmarks are dominated by point clouds captured under normal weather. We introduce SemanticSTF, an adverse-weather point cloud dataset that provides dense point-level annotations and allows to study 3DSS under various adverse weather conditions. We study all-weather 3DSS modeling under two setups: 1) domain adaptive 3DSS that adapts from normal-weather data to adverse-weather data; 2) domain generalizable 3DSS that learns all-weather 3DSS models from normal-weather data. Our studies reveal the challenge while existing 3DSS methods encounter adverse-weather data, showing the great value of SemanticSTF in steering the future endeavor along this very meaningful research direction. In addition, we design a domain randomization technique that alternatively randomizes the geometry styles of point clouds and aggregates their embeddings, ultimately leading to a generalizable model that can improve 3DSS under various adverse weather effectively. The SemanticSTF and related codes are available at https://github.com/xiaooran/SemanticSTF. + +# 1. Introduction + +3D LiDAR point clouds play an essential role in semantic scene understanding in various applications such as self-driving vehicles and autonomous drones. With the recent advance of LiDAR sensors, several LiDAR point cloud datasets [2, 11, 49] such as SemanticKITTI [2] have been proposed which greatly advanced the research in 3D semantic segmentation (3DSS) [19, 41, 62] for the task of point cloud parsing. As of today, most existing point cloud datasets for outdoor scenes are dominated by point clouds captured under normal weather. However, 3D vision applications such as autonomous driving require reliable 3D perception under all-weather conditions including various adverse weather such as fog, snow, and rain. How to learn a weather-tolerant 3DSS model is largely neglected due to the absence of related benchmark datasets. + +Although several studies [3, 33] attempt to include adverse weather conditions in point cloud datasets, such as the STF dataset [3] that consists of LiDAR point clouds captured under various adverse weather, these efforts focus on object detection benchmarks and do not provide any pointwise annotations which are critical in various tasks such as 3D semantic and instance segmentation. To address this gap, we introduce SemanticSTF, an adverse-weather point cloud dataset that extends the STF Detection Benchmark by providing point-wise annotations of 21 semantic categories, as illustrated in Fig. 1. Similar to STF, SemanticSTF cap + +tures four typical adverse weather conditions that are frequently encountered in autonomous driving including dense fog, light fog, snow, and rain. + +SemanticSTF provides a great benchmark for the study of 3DSS and robust point cloud parsing under adverse weather conditions. Beyond serving as a well-suited test bed for examining existing fully-supervised 3DSS methods that handle adverse-weather point cloud data, SemanticSTF can be further exploited to study two valuable weather-tolerant 3DSS scenarios: 1) domain adaptive 3DSS that adapts from normal-weather data to adverse-weather data, and 2) domain generalizable 3DSS that learns all-weather 3DSS models from normal-weather data. Our studies reveal the challenges faced by existing 3DSS methods while processing adverse-weather point cloud data, highlighting the significant value of SemanticSTF in guiding future research efforts along this meaningful research direction. + +In addition, we design PointDR, a new baseline framework for the future study and benchmarking of all-weather 3DSS. Our objective is to learn robust 3D representations that can reliably represent points of the same category across different weather conditions while remaining discriminative across categories. However, robust all-weather 3DSS poses two major challenges: 1) LiDAR point clouds are typically sparse, incomplete, and subject to substantial geometric variations and semantic ambiguity. These challenges are further exacerbated under adverse weather conditions, with many missing points and geometric distortions due to fog, snow cover, etc. 2) More noises are introduced under adverse weather due to snow flicks, rain droplets, etc. PointDR addresses the challenges with two iterative operations: 1) Geometry style randomization that expands the geometry distribution of point clouds under various spatial augmentations; 2) Embedding aggregation that introduces contrastive learning to aggregate the encoded embeddings of the randomly augmented point clouds. Despite its simplicity, extensive experiments over point clouds of different adverse weather conditions show that PointDR achieves superior 3DSS generalization performance. + +The contribution of this work can be summarized in three major aspects. First, we introduce SemanticSTF, a large-scale adverse-weather point cloud benchmark that provides high-quality point-wise annotations of 21 semantic categories. Second, we design PointDR, a point cloud domain randomization baseline that can be exploited for future study and benchmarking of 3DSS under all-weather conditions. Third, leveraging SemanticSTF, we benchmark existing 3DSS methods over two challenging tasks on domain adaptive 3DSS and domain generalized 3DSS. The benchmarking efforts lay a solid foundation for future research on this highly meaningful problem. + +# 2. Related Works + +3D semantic segmentation aims to assign point-wise semantic labels for point clouds. It has been developed rapidly over the past few years, largely through the development of various deep neural networks (DNNs) such as standard convolutional network for projection-based methods [9, 30, 46, 50, 59], multi-layer perceptron (MLP)-based networks [19, 34, 34], 3D voxel convolution-based networks [7, 62], or hybrid networks [6, 27, 41, 51, 57]. While existing 3DSS networks are mainly evaluated over normal weather point clouds, their performance for adverse weather point clouds is far under-investigated. The proposed SemanticSTF closes the gap and provides a solid ground for the study and evaluation of all-weather 3DSS. By enabling investigations into various new research directions, SemanticSTF represents a valuable tool for advancing the field. + +Vision recognition under adverse conditions. Scene understanding under adverse conditions has recently attracted increasing attention due to the strict safety demand in various outdoor navigation and perception tasks. In 2D vision, several large-scale datasets have been proposed to investigate perceptions tasks in adverse visual conditions including localization [29], detection [56], and segmentation [36]. On the other hand, learning 3D point clouds of adverse conditions is far under-explored due to the absence of comprehensive dataset benchmarks. The recently proposed datasets such as STF [3] and CADC [33] contain LiDAR point clouds captured under adverse weather conditions. However, these studies focus on the object detection task [15, 16] with bounding-box annotations, without providing any point-wise annotations. Our introduced SemanticSTF is the first large-scale dataset that consists of LiDAR point clouds in adverse weather conditions with high-quality dense annotations, to the best of our knowledge. + +Domain generalization [4,31] aims to learn a generalizable model from single or multiple related but distinct source domains where target data is inaccessible during model learning. It has been widely studied in 2D computer vision tasks [1, 21, 26, 61] while few studies explore it in point cloud learning. Recently, [25] studies domain generalization for 3D object detection by deforming point clouds via vector fields. Differently, this work is the first attempt that explores domain generalization for 3DSS. + +Unsupervised domain adaptation is a method of transferring knowledge learned from a labeled source domain to a target domain by leveraging the unlabeled target data. It has been widely studied in 2D image learning [12,14,20,22-24] and 3D point clouds [15, 16, 28, 39, 52, 53, 58]. Recently, domain adaptive 3D LiDAR segmentation has drawn increasing attention due to the challenge in point-wise annotation. Different UDA approaches have been designed to mitigate discrepancies across LiDAR point clouds of different domains. For example, [46, 60] project point + +clouds into depth images and leverage 2D UDA techniques while [37, 48, 49, 55] directly work in the 3D space. However, these methods either work for synthetic-to-real UDA scenarios [46, 49] or normal-to-normal point cloud adaptation [55], ignoring normal-to-adverse adaptation which is highly practical in real applications. Our SemanticSTF dataset fills up this blank and will inspire more development of new algorithms for normal-to-adverse adaptation. + +# 3. The SemanticSTF Dataset + +# 3.1. Background + +LiDAR sensors send out laser pulses and measure their flight time based on the echoes it receives from targets. The travel distance as derived from the time-of-flight and the registered angular information (between the LiDAR sensors and the targets) can be combined to compute the 3D coordinates of target surface which form point clouds that capture the 3D shape of the targets. However, the active LiDAR pulse system can be easily affected by the scattering media such as particles of rain droplets and snow [10, 18, 32, 35], leading to shifts of measured distances, variation of echo intensity, point missing, etc. Hence, point clouds captured under adverse weather usually have clear distribution discrepancy as compared with those collected under normal weather as illustrated in Fig. 1. However, existing 3DSS benchmarks are dominated by normal-weather point clouds which are insufficient for the study of universal 3DSS under all-weather conditions. To this end, we propose SemanticSTF, a point-wise annotated large-scale adverse-weather dataset that can be explored for the study of 3DSS and point cloud parsing under various adverse weather conditions. + +# 3.2. Data Selection and Split + +We collect SemanticSTF by leveraging the STF benchmark [3], a multi-modal adverse-weather dataset that was jointly collected in Germany, Sweden, Denmark, and Finland. The data in STF have multiple modalities including LiDAR point clouds and they are collected under various adverse weather conditions such as snow and fog. However, STF provides bounding-box annotations only for the study of 3D detection tasks. In SemanticSTF, we manually selected 2,076 scans captured by a Velodyne HDL64 S3D LiDAR sensor from STF that cover various adverse weather conditions including 694 snowy, 637 dense-foggy, 631 light-foggy, and 114 rainy (all rainy LiDAR scans in STF). During the selection, we pay special attention to the geographical diversity of the point clouds aiming for minimizing data redundancy. We ignore the factor of daytime/nighttime since LiDAR sensors are robust to lighting conditions. We split SemanticSTF into three parts including 1,326 full 3D scans for training, 250 for validating, and 500 for testing. All three splits have approximately the same + +proportion of LiDAR scans of different adverse weathers. + +# 3.3. Data Annotation + +Point-wise annotation of LiDAR point clouds is an extremely laborious task due to several factors, such as 3D view changes, inconsistency between point cloud display and human visual perception, sweeping occlusion, point sparsity, etc. However, point-wise annotating of adverse-weather point clouds is even more challenging due to two new factors. First, the perceived distance shifts under adverse weather often lead to various geometry distortions in the collected points which make them different from those collected under normal weather. This presents significant challenges for annotators who must recognize various objects and assign a semantic label to each point. Second, LiDAR point clouds collected under adverse weather often contain a significant portion of invalid regions that consist of indiscernible semantic contents (e.g., thick snow cover) that make it difficult to identify the ground type. The existence of such invalid regions makes point-wise annotation even more challenging. + +We designed a customized labeling pipeline to handle the annotation challenges while performing point-wise annotation of point clouds in SemanticSTF. Specifically, we first provide labeling instructions and demo annotations and train a team of professional annotators to provide pointwise annotations of a set of selected STF LiDAR scans. To achieve reliable high-quality annotations, the annotators leverage the corresponding 2D camera images and Google Street views as extra references while identifying the category of each point in this initial annotation process. After that, the annotators cross-check their initial annotations for identifying and correcting labeling errors. At the final stage, we engaged professional third parties who provide another round of annotation inspection and correction. + +Annotation of SemanticSTF is a highly laborious and time-consuming task. For instance, while labeling downtown areas with the most complex scenery, it took an annotator an average of 4.3 hours to label a single LiDAR scan. Labeling a scan captured in a relatively simpler scenery, such as a highway, also takes an average of 1.6 hours. In addition, an additional 30-60 minutes are required per scan for verification and correction by professional third parties. In total, annotating the entire SemanticSTF dataset takes over 6,600 man-hours. + +While annotating SemanticSTF, we adopted the same set of semantic classes as in the widely-studied semantic segmentation benchmark, SemanticKITTI [2]. Specifically, we annotate the 19 evaluation classes of SemanticKITTI, which encompass most traffic-related objects in autonomous driving scenes. Additionally, following [36], we label points with indiscernible semantic contents caused by adverse weather (e.g. ground covered by snowdrifts) as invalid. Fur + +![](images/37c17fa2b35ea99bbfcdceb124dba55eb60b3753b7e85a3e8d7da6bc109af458.jpg) +Figure 2. Number of annotated points per class in SemanticSTF. + +thermore, we label points that do not belong to the 20 categories or are indistinguishable as ignored, which are not utilized in either training or evaluations. Detailed descriptions of each class can be found in the appendix. + +# 3.4. Data Statistics + +SemanticSTF consists of point-wise annotations of 21 semantic categories, and Fig. 2 shows the detailed statistics of the point-wise annotations. It can be seen that classes road, sidewalk, building, vegetation, and terrain appear most frequently whereas classes motor, motorcyclist, and bicyclist have clearly lower occurrence frequency. Such class imbalance is largely attributed to the various object sizes and unbalanced distribution of object categories in transportation scenes, and it is also very common in many existing benchmarks. Overall, the statistics and distribution of different object categories are similar to that of other 2D and 3D semantic segmentation benchmarks such as Cityscapes [8], ACDC [36], and SemanticKITTI [2]. + +To the best of our knowledge, SemanticSTF is the first large-scale adverse-weather 3DSS benchmark that provides high-quality point-wise annotations. Table 1 compares it with several existing point cloud datasets that have been widely adopted for the study of 3D detection and semantic segmentation. We can observe that existing datasets are either collected under normal weather conditions or collected for object detection studies with bounding-box annotations only. 3DSS benchmark under adverse weather is largely blank, mainly due to the great challenge in point-wise annotations of adverse-weather point clouds as described in previous subsections. From this sense, SemanticSTF fills up this blank by providing a large-scale benchmark and test bed which will be very useful to future research in universal 3DSS under all weather conditions. + +# 3.5. Data illustration + +Fig. 3 provides examples of point cloud scans captured under adverse weather conditions in SemanticSTF (in row 1) as well as the corresponding annotations (in row 2). Compared with normal-weather point clouds, point clouds captured under adverse weather exhibit four distinct properties: 1) Snow coverage and snowflakes under snowy weather introduce many white points (labeled as “invalid”) as illustrated in Fig. 3(a). The thick snow coverage may lead to object deformation as well; Rainy conditions may cause specular reflection of laser signals from water on the ground + +
Dataset#ClsTypeAnnotationFogRainSnow
KITTI [13]8realbounding boxXXX
nuScenes [5]23realbounding boxXXX
Waymo [40]4realbounding boxXXX
STF [3]5realbounding box
SemanticKITTI [2]25realpoint-wiseXXX
nuScenes-LiDARSeg [11]32realpoint-wiseXXX
Waymo-LiDARSeg [40]21realpoint-wiseXXX
SynLiDAR [49]32synth.point-wiseXXX
SemanticSTF (ours)21realpoint-wise
+ +Table 1. Comparison of SemanticSTF against existing outdoor LiDAR benchmarks. #Cls means the class number. + +and produce many noise points as shown in Fig.3(b); 3) Dense fog may greatly reduce the working range of LiDAR sensors, leading to small spatial distribution of the collected LiDAR points as illustrated in Fig. 3(c); 4) Point clouds under light fog have similar characteristics as normal-weather point clouds as illustrated in Fig. 3(d). The distinct properties of point clouds under different adverse weather introduce different types of domain shift from normal-weather point clouds which complicate 3DSS greatly as discussed in Section 5. They also verify the importance of developing universal 3DSS models that can perform well under all weather conditions. + +# 4. Point Cloud Domain Randomization + +Leveraging SemanticSTF, we explore domain generalization (DG) for semantic segmentation of LiDAR point clouds under all weather conditions. Specifically, we design PointDR, a domain randomization technique that helps to train a generalizable segmentation model from normal-weather point clouds that can work well for adverse-weather point clouds in SemanticSTF. + +# 4.1. Problem Definition + +Given labeled point clouds of a source domain $S = \{S_{k} = \{x_{k},y_{k}\} \}_{k = 1}^{K}$ where $x$ represents a LiDAR point cloud scan and $y$ denotes its point-wise semantic annotations, the goal of domain generalization is to learn a segmentation model $F$ by using the source-domain data only that can perform well on point clouds from an unseen target domain $\mathcal{T}$ . We consider a 3D point cloud segmentation model $F$ that consists of a feature extractor $E$ and a classifier $G$ . Note under the setup of domain generalization, target data will not be accessed in training as they could be hard and even impossible to acquire at the training stage. + +# 4.2. Point Cloud Domain Randomization + +Inspired by domain randomization studies in 2D computer vision research [42, 43], we explore how to employ domain randomization for learning domain generalizable models for point clouds. Specifically, we design PointDR, + +![](images/a929f1221f0c6ee3a050568ecc05d1ceca8a6396fbc6d3a183b9eeabbf1b090b.jpg) +Figure 3. Examples of LiDAR point cloud scans captured under different adverse weather including snow, rain, dense fog, and light fog (the first row) and corresponding dense annotations in SemanticSTF (the second row). + +![](images/d86e1b05e3aa8f63417fdc99559944bb2afe80eeb61768e3b7bf6ca0472bc7cd.jpg) +Figure 4. The framework of our point cloud randomization method (PointDR): Geometry style randomization creates different point cloud views with various spatial perturbations while embedding aggregation encourages the feature extractor to aggregate randomized point embeddings to learn perturbation-invariant representations, ultimately leading to a generalizable segmentation model. + +a point cloud randomization technique that consists of two complementary designs including geometry style randomization and embedding aggregation as illustrated in Fig. 4. + +Geometry style randomization aims to enrich the geometry styles and expand the distribution of training point cloud data. Given a point-cloud scan $x$ as input, we apply weak and strong spatial augmentation to obtain two copies of $x$ including a weak-view $x^w = \mathcal{A}^W(x)$ and a strong-view $x^s = \mathcal{A}^S(x)$ . For the augmentation schemes of $\mathcal{A}^W$ , we follow existing supervised learning methods [41] and adopt the simple random rotation and random scaling. While for the augmentation schemes of $\mathcal{A}^S$ , we further adopt random dropout, random flipping, random noise perturbation, and random jittering on top of $\mathcal{A}^W$ to obtain a more diverse and complex copy of the input point cloud scan $x$ . + +Embedding aggregation aims to aggregate encoded embeddings of randomized point clouds for learning domain- + +invariant representations. We adopt contrastive learning [17] as illustrated in Fig. 4. Given the randomized point clouds $x^{w}$ and $x^{s}$ , we first feed them into the feature extractor $E$ and a projector $\mathcal{P}$ (a two-layer MLP) which outputs normalized point feature embeddings $f^{w}$ and $f^{s}$ , respectively $(f = \mathcal{P}(E(x)))$ . $\overline{f}_C^w \in \mathbb{R}^{D \times C}$ ( $D$ : feature dimension; $C$ : number of semantic classes) is then derived by class-wise averaging the feature embeddings $f^{w}$ in a batch, which is stored in a memory bank $\mathcal{B} \in \mathbb{R}^{D \times C}$ that has no backpropagation and is momentum updated by iterations (i.e., $\mathcal{B} \gets m \times \mathcal{B} + (1 - m) \times \overline{f}_C^w$ with a momentum coefficient $m$ ). Finally, we employ each point feature embedding $f_{i}^{s}$ of the strong-view $f^{s}$ as query and feature embeddings in $\mathcal{B}$ as keys for contrastive learning, where the key sharing the same semantic class as the query is positive key $\mathcal{B}_{+}$ and the rest are negative keys. The contrastive loss is defined as + +$$ +\mathcal {L} _ {c t} = \frac {1}{N} \sum_ {i = 1} ^ {N} - \log \frac {\exp \left(f _ {i} ^ {s} \mathcal {B} _ {+} / \tau\right)}{\sum_ {j = 1} ^ {C} \exp \left(f _ {i} ^ {s} \mathcal {B} _ {j} / \tau\right)} \tag {1} +$$ + +where $\tau$ is a temperature hyper-parameter [47]. Note there is no back-propagation for the "ignore" class in optimizing the contrastive loss. + +Contrastive learning pulls point feature embeddings of the same classes closer while pushing away point feature embeddings of different classes. Therefore, optimizing the proposed contrastive loss will aggregate randomized point cloud features and learn perturbation-invariant representations, ultimately leading to a robust and generalizable segmentation model. The momentum-updated memory bank provides feature prototypes of each semantic class for more robust and stable contrastive learning. + +Combining the supervised cross-entropy loss $\mathcal{L}_{ce}$ for weakly-augmented point clouds in Eq. 1, the overall train + +
Methodscarbi,clemt,cletruckoth-v.pers.bi,clstmt,clstroadparki.sidew.oth-g.build.fenceveget.trunkterra.poletraf.D-fogL-fogRainSnowmIoU
Oracle89.442.10.059.961.269.639.00.082.221.558.245.686.163.680.252.077.650.161.751.954.657.953.754.7
SemanticKITTI→SemanticSTF
Baseline55.90.00.21.910.910.36.00.061.210.932.00.067.941.649.827.940.829.617.529.526.028.421.424.4
Dropout [38]62.10.015.53.011.55.42.00.058.412.826.71.172.143.652.934.243.528.415.529.325.629.424.825.7
Perturbation74.40.00.023.30.619.70.00.060.310.833.90.772.045.258.717.542.422.19.726.327.830.024.525.9
PolarMix [48]57.81.83.816.73.726.50.02.065.72.932.50.371.048.753.820.545.425.915.829.725.028.625.626.0
MMD [26]63.60.02.60.111.428.10.00.067.014.137.90.367.341.257.127.447.928.216.230.428.132.825.226.9
PCL [54]65.90.00.017.70.48.40.00.059.612.035.01.674.047.560.715.848.926.127.528.927.630.124.626.4
PointDR (Ours)67.30.04.519.69.018.82.70.062.612.938.10.673.343.856.432.245.728.727.431.329.731.926.228.6
SynLiDAR→SemanticSTF
Baseline27.13.00.615.80.125.21.85.623.90.314.60.636.319.937.917.941.89.52.316.917.217.211.915.0
Dropout [38]28.03.01.49.60.017.10.80.734.26.819.10.135.519.142.317.636.014.02.815.316.620.414.015.2
Perturbation27.12.32.316.00.123.71.24.027.03.616.20.829.216.735.322.738.317.95.116.316.719.313.415.2
PolarMix [48]39.21.11.28.31.517.80.80.723.31.317.50.445.224.846.220.138.77.61.916.115.519.215.615.7
MMD [26]25.52.32.113.20.722.11.47.530.80.417.60.230.919.737.619.343.59.92.617.316.320.012.715.1
PCL [54]30.90.81.410.00.423.34.07.928.51.317.71.239.418.540.016.038.612.12.317.816.719.314.115.5
PointDR (Ours)37.82.52.423.60.126.32.23.327.97.717.50.547.625.345.721.037.517.95.519.519.921.116.918.5
+ +Table 2. Experiments on domain generalization with SemanticKITTI [2] or SynLiDAR [49] as source and SemanticSTF as target. + +ing objective of PointDR can be formulated by: + +$$ +\mathcal {L} _ {\text {P o i n t D R}} = \mathcal {L} _ {c e} + \lambda_ {c t} \mathcal {L} _ {c t} \tag {2} +$$ + +# 5. Evaluation of Semantic Segmentation + +SemanticSTF can be adopted for benchmarking different learning setups and network architectures on point cloud segmentation. We perform experiments over two typical learning setups including domain generalization and unsupervised domain adaptation. In addition, we evaluate several state-of-the-art point-cloud segmentation networks to examine their generalization capabilities. + +# 5.1. Domain Generalization + +We first study domain generalizable point cloud segmentation. For DG, we can only access an annotated source domain during training and the trained model is expected to generalize well to unseen target domains. Leveraging SemanticSTF, we build two DG benchmarks and examine how PointDR helps learn a universal 3DSS model that can work under different weather conditions. + +The first benchmark is SemanticKITTI [2] $\rightarrow$ SemanticSTF where SemanticKITTI is a large-scale real-world 3DSS dataset collected under normal weather conditions. This benchmark serves as a solid testing ground for evaluating domain generalization performance from normal to adverse weather conditions. The second benchmark is SynLiDAR [49] $\rightarrow$ SemanticSTF where SynLiDAR is a largescale synthetic 3DSS dataset. The motivation of this benchmark is that learning a universal 3DSS model from synthetic point clouds that can work well across adverse weather is of high research and application value considering the + +challenges in point cloud collection and annotation. Note this benchmark is more challenging as the domain discrepancy comes from both normal-to-adverse weather distribution shift and synthetic-to-real distribution shift. + +Setup. We use all 19 evaluating classes of SemanticKITTI in both domain generalization benchmarks. The category of invalid in SemanticSTF is mapped to the ignored since SemanticKITTI and SynLiDAR do not cover this category. We adopt MinkowskiNet [7] (with TorchSparse library [41]) as the backbone model, which is a sparse convolutional network that provides state-of-the-art performance with decent efficiency. We adopt the evaluation metrics of Intersection over the Union (IoU) for each segmentation class and the mean IoU (mIoU) over all classes. All experiments are run over a single NVIDIA 2080Ti (11GB). More implementation details are provided in the appendix. + +Baseline Methods. Since domain generalizable 3DSS is far under-explored, there is little existing baseline that can be directly adopted for benchmarking. We thus select two closely related approaches as baseline to evaluate the proposed PointDR. The first approach is data augmentation and we select three related augmentation methods including Dropout [38] that randomly drops out points to simulate LiDAR points missing in adverse weather, Noise perturbation that adds random points in the 3D space to simulate noise points as introduced by particles like falling snow, and PolarMix [48] that mixes point clouds of different sources for augmentation. The second approach is to adapt 2D domain generalization methods for 3DSS. We select two 2D domain generalization methods including the widely studied MMD [26] and the recently proposed PCL [54]. + +Results. Table 2 shows experimental results over the validation + +
MethodLceLctBmIoU
Baseline24.4
PointDR-CT27.4
PointDR28.6
+ +Table 3. Ablation study of PointDR over domain generalized segmentation task SemanticKITTI $\rightarrow$ SemanticSTF. + +tion set of SemanticSTF. For both benchmarks, the Baseline is a source-only model that is trained by using the training data of SemanticKITTI or SynLiDAR. We can see that the Baseline achieves very low mIoU while evaluated over the validation set of SemanticSTF, indicating the large domain discrepancy between point clouds of normal and adverse weather conditions. In addition, all three data augmentation methods improve the model generalization consistently but the performance gains are limited especially for the challenging benchmark SynLiDAR $\rightarrow$ SemanticSTF. The two 2D generalization methods both help SemanticKITTI $\rightarrow$ SemanticSTF clearly but show very limited improvement over SynLiDAR $\rightarrow$ SemanticSTF. The proposed PointDR achieves the best generalization consistently across both benchmarks, demonstrating its superior capability to learn perturbation-invariant point cloud representations and effectiveness while handling all-weather 3DSS tasks. + +We also evaluate the compared domain generalization methods over each individual adverse weather condition as shown in Table 2. It can be observed that the three data augmentation methods work for data captured in rainy and snowy weather only. The 2D generalization method MMD shows clear effectiveness for point clouds under dense fog and rain while PCL works for point clouds under rainy and snowy weather instead. We conjecture that the performance variations are largely attributed to the different properties of point clouds captured under different weather conditions. For example, more points are missing in rain while object points often deform due to the covered snow (more illustrations are provided in the appendix). Such data variations lead to different domain discrepancies across weather which further leads to different performances of the compared methods. As PointDR learns perturbation-tolerant representations, it works effectively across different adverse weather conditions. We also provide qualitative results, please refer to the appendix for details. + +Ablation study. We study different PointDR designs to examine how they contribute to the overall generalization performance. As Table 3 shows, we report three models over the benchmark "SemanticKITTI $\rightarrow$ SemanticSTF": 1) Baseline that is trained with $\mathcal{L}_{ce}$ . 2) PointDR-CT that is jointly trained with $\mathcal{L}_{ce}$ and $\mathcal{L}_{ct}$ without using the memory bank $\mathcal{B}$ . 3) The complete PointDR that is trained with $\mathcal{L}_{ce}$ , $\mathcal{L}_{ct}$ and the memory bank $\mathcal{B}$ . We evaluate the three models over the validation set of SemanticSTF and Table 3 + +shows experimental results. We can see that the Baseline performs poorly at $24.4\%$ due to clear domain discrepancy between point clouds of normal weather and adverse weather. Leveraging the proposed contrastive loss, $\mathcal{L}_{ct}$ achieves clearly better performance at $27.4\%$ , indicating that learning perturbation-invariance is helpful for universal LiDAR segmentation of all-weather conditions. On top of that, introducing the momentum-updated memory bank $\mathcal{B}$ further improves the segmentation performance at $28.6\%$ . This is because the feature embeddings in $\mathcal{B}$ serve as the class prototypes which help the optimization of the segmentation network, finally leading to more robust representations of 3DSS that perform better over adverse weather point clouds. + +# 5.2. Domain Adaptation + +We also study SemanticSTF over a domain adaptive point cloud segmentation benchmark SemanticKITTI $\rightarrow$ SemanticSTF. Specifically, we select four representative UDA methods including ADDA [44], entropy minimization (Ent-Min) [45], self-training [63], and CoSMix [37] for adaptation from the source SemanticKITTI [2] toward the target SemanticSTF. Following the state-of-the-art [37, 48, 49] on synthetic-to-real adaptation, we adopt MinkowskiNet [7] as the segmentation backbone for all compared methods. Table 4 shows experimental results over the validation set of SemanticSTF. We can see that all UDA methods outperform the Source-only consistently under the normal-to-adverse adaptation setup. At the other end, the performance gains are still quite limited, showing the great improvement space along domain adaptive 3DSS from normal to adverse weather conditions. + +In addition, we examined the adaptability of the four UDA methods in relation to each individual adverse weather condition. Specifically, we trained each of the four methods for adaptation from SemanticKITTI to SemanticSTF data for each adverse weather condition. Table 5 shows the experimental results over the validation set of SemanticSTF. We can see all four methods outperform the Source-only method under Dense-fog and Light-fog, demonstrating their effectiveness in mitigating domain discrepancies. However, for rain and Snow, only CoSMix achieved marginal performance gains while the other three UDA methods achieved limited performance improvements. We conjecture that snow and rain introduce large deformations on object surfaces or much noise, making adaptation from normal to adverse weather more challenging. CoSMix works in the input space by directly mixing source and target points, allowing it to perform better under heavy snow and rain which have larger domain gaps. However, all methods achieved relatively low segmentation performance, indicating the significance of our research and the large room for improvement in our constructed benchmarks. + +
Methodscarbi,clemt,cletruckoth-v.pers.bi,clstmt,clstroadparki.sidew.oth-g.build.fenceveget.trunkterra.poletraf.mIoU
Oracle89.442.10.059.961.269.639.00.082.221.558.245.686.163.680.252.077.650.161.754.7
Source-only64.80.00.013.81.85.02.10.062.77.534.00.066.736.253.931.344.324.014.224.3
ADDA [44]65.60.00.021.01.32.81.316.764.71.235.40.066.541.857.232.642.223.326.426.3
Ent-Min [45]69.20.010.131.05.32.82.60.065.92.635.70.072.542.852.432.544.724.721.127.2
Self-training [63]71.50.010.333.17.45.91.30.065.16.536.60.067.841.351.732.942.925.125.027.6
CoSMix [37]65.01.722.125.27.733.20.00.064.711.531.10.962.537.844.630.541.130.928.628.4
+ +Table 4. Comparison of state-of-the-art domain adaptation methods on SemanticKITTI $\rightarrow$ SemanticSTF adaptation. SemanticKITTI serves as the source domain and the entire SemanticSTF including all four weather conditions serves as the target domain. + +
MethodDense-fogLight-fogRainSnow
Source-Only26.925.227.723.5
ADDA [44]31.527.927.423.4
Ent-Min [45]31.428.630.324.9
Self-training [63]31.829.327.925.1
CoSMix [37]31.630.333.132.9
+ +# 5.3. Network Models vs All-Weather 3DSS + +We also study how different 3DSS network architectures generalize when they are trained with normal-weather point clouds and evaluated over SemanticSTF. Specifically, we select five representative 3DSS networks [9, 19, 41, 62] that have been widely adopted in 3D LiDAR segmentation studies. In the experiments, each selected network is first pre-trained with SemanticKITTI [2] and then evaluated over the validation set of SemanticSTF. We directly use the officially released code and the pre-trained weights for evaluation. Table 6 shows experimental results. We can observe that the five pre-trained models perform very differently though they all achieve superior segmentation over SemanticKITTI. Specifically, RandLA-Net [19], SPVCNN [41], and SPVNAS [41] perform clearly better than SalsaNext [9] and Cylinder3D [62]. In addition, none of the five pre-trained models perform well, verifying the clear domain discrepancy between point clouds of normal and adverse weather conditions. The experiments further indicate the great value of SemanticSTF in the future exploration of robust point cloud parsing under all weather conditions. In addition, the supervised performance of these 3DSS networks over SemanticSTF is provided in the appendix. + +# 6. Conclusion and Outlook + +This paper presents SemanticSTF, a large-scale dataset and benchmark suite for semantic segmentation of LiDAR + +Table 5. Comparison of state-of-the-art domain adaptation methods on SemanticKITTI $\rightarrow$ SemanticSTF adaptation for individual adverse weather conditions. We train a separate model for each weather-specific subset of SemanticSTF and evaluate the trained model on the weather condition it has been trained for. + +
3DSS ModelD-fogL-fogRainSnowAll
RandLA-Net [19]26.526.025.122.725.3
SalsaNext [9]16.09.67.83.59.1
SPVCNN [41]30.422.821.718.322.4
SPVNAS [41]25.518.317.013.018.0
Cylinder3D [62]14.87.45.74.07.3
+ +Table 6. Performance of state-of-the-art 3DSS models that are pre-trained over SemanticKITTI and tested on validation set of SemanticSTF for individual weather conditions and jointly for all weather conditions. + +point clouds under adverse weather conditions. SemanticSTF provides high-quality point-level annotations for point clouds captured under adverse weather including dense fog, light fog, snow and rain. Extensive studies have been conducted to examine how state-of-the-art 3DSS methods perform over SemanticSTF, demonstrating its significance in directing future research on domain adaptive and domain generalizable 3DSS under all-weather conditions. + +We also design PointDR, a domain randomization technique that aims to use normal-weather point clouds to train a domain generalizable 3DSS model that can work well over adverse-weather point clouds. PointDR consists of two novel designs including geometry style randomization and embedding aggregation which jointly learn perturbation-invariant representations that generalize well to various new point-cloud domains. Extensive experiments show that PointDR achieves superior point cloud segmentation performance as compared with the state-of-the-art. + +# Acknowledgement + +This study is funded BY the Ministry of Education Singapore, under the Tier-1 scheme with project number RG18/22. It is also supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from Singapore Telecommunications Limited (Singtel), through Singtel Cognitive and Artificial Intelligence Lab for Enterprises (SCALE@NTU). + +# References + +[1] Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using meta regularization. Advances in neural information processing systems, 31, 2018. 2 +[2] Jens Behley, Martin Garbade, Andres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Jurgen Gall. Semantickitti: A dataset for semantic scene understanding of lidar sequences. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9297-9307, 2019. 1, 3, 4, 6, 7, 8 +[3] Mario Bijelic, Tobias Gruber, Fahim Mannan, Florian Kraus, Werner Ritter, Klaus Dietmayer, and Felix Heide. Seeing through fog without seeing fog: Deep multimodal sensor fusion in unseen adverse weather. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11682-11692, 2020. 1, 2, 3, 4 +[4] Gilles Blanchard, Gyemin Lee, and Clayton Scott. Generalizing from several related classification tasks to a new unlabeled sample. Advances in neural information processing systems, 24, 2011. 2 +[5] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 4 +[6] Ran Cheng, Ryan Razani, Ehsan Taghavi, Enxu Li, and Bingbing Liu. 2-s3net: Attentive feature fusion with adaptive feature selection for sparse semantic segmentation network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12547-12556, 2021. 2 +[7] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3075-3084, 2019. 2, 6, 7 +[8] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3213-3223, 2016. 4 +[9] Tiago Cortinhal, George Tzelepis, and Eren Erdal Aksoy. Salsanext: Fast, uncertainty-aware semantic segmentation of lidar point clouds. In International Symposium on Visual Computing, pages 207-222. Springer, 2020. 2, 8 +[10] A Filgueira, H González-Jorge, Susana Lagtuela, L Díaz-Vilarino, and Pedro Arias. Quantifying the influence of rain in lidar performance. Measurement, 95:143-148, 2017. 3 +[11] Whye Kit Fong, Rohit Mohan, Juana Valeria Hurtado, Lubing Zhou, Holger Caesar, Oscar Beijbom, and Abhinav Valada. Panoptic nuscenes: A large-scale benchmark for lidar panoptic segmentation and tracking. IEEE Robotics and Automation Letters, 7(2):3795-3802, 2022. 1, 4 + +[12] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International conference on machine learning, pages 1180-1189. PMLR, 2015. 2 +[13] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 4 +[14] Dayan Guan, Jiaxing Huang, Aoran Xiao, and Shijian Lu. Domain adaptive video segmentation via temporal consistency regularization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8053-8064, 2021. 2 +[15] Martin Hahner, Christos Sakaridis, Mario Bijelic, Felix Heide, Fisher Yu, Dengxin Dai, and Luc Van Gool. Lidar snowfall simulation for robust 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16364-16374, 2022. 2 +[16] Martin Hahner, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Fog simulation on real lidar point clouds for 3d object detection in adverse weather. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15283-15292, 2021. 2 +[17] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 5 +[18] Robin Heinzler, Philipp Schindler, Jürgen Seekircher, Werner Ritter, and Wilhelm Stork. Weather influence and classification with automotive lidar sensors. In 2019 IEEE intelligent vehicles symposium (IV), pages 1527-1534. IEEE, 2019. 3 +[19] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11108-11117, 2020. 1, 2, 8 +[20] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Cross-view regularization for domain adaptive panoptic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10133-10144, 2021. 2 +[21] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Fsdr: Frequency space domain randomization for domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6891-6902, 2021. 2 +[22] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Model adaptation: Historical contrastive learning for unsupervised domain adaptation without source data. Advances in Neural Information Processing Systems, 34:3635-3649, 2021. 2 +[23] Jiaxing Huang, Dayan Guan, Aoran Xiao, Shijian Lu, and Ling Shao. Category contrast for unsupervised domain adaptation in visual tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1203-1214, 2022. 2 + +[24] Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for unsupervised domain adaptation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4893-4902, 2019. 2 +[25] Alexander Lehner, Stefano Gasperini, Alvaro Marcos-Ramiro, Michael Schmidt, Mohammad-Ali Nikouei Mahani, Nassir Navab, Benjamin Busam, and Federico Tombari. 3d-vfield: Adversarial augmentation of point clouds for domain generalization in 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17295-17304, 2022. 2 +[26] Haoliang Li, Sinno Jialin Pan, Shiqi Wang, and Alex C Kot. Domain generalization with adversarial feature learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5400-5409, 2018. 2, 6 +[27] Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Pointvoxel cnn for efficient 3d deep learning. Advances in Neural Information Processing Systems, 32, 2019. 2 +[28] Zhipeng Luo, Zhongang Cai, Changqing Zhou, Gongjie Zhang, Haiyu Zhao, Shuai Yi, Shijian Lu, Hongsheng Li, Shanghang Zhang, and Ziwei Liu. Unsupervised domain adaptive 3d detection with multi-level consistency. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8866-8875, 2021. 2 +[29] Will Maddern, Geoffrey Pascoe, Chris Linegar, and Paul Newman. 1 year, $1000\mathrm{km}$ : The oxford robotcar dataset. The International Journal of Robotics Research, 36(1):3-15, 2017. 2 +[30] Andres Milioto, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Rangenet++: Fast and accurate lidar semantic segmentation. In 2019 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 4213-4220. IEEE, 2019. 2 +[31] Krikamol Muandet, David Balduzzi, and Bernhard Scholkopf. Domain generalization via invariant feature representation. In International Conference on Machine Learning, pages 10-18. PMLR, 2013. 2 +[32] Thierry Peynot, James Underwood, and Steven Scheding. Towards reliable perception for unmanned ground vehicles in challenging conditions. In 2009 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 1170-1176. IEEE, 2009. 3 +[33] Matthew Pitropov, Danson Evan Garcia, Jason Rebello, Michael Smart, Carlos Wang, Krzysztof Czarnecki, and Steven Waslander. Canadian adverse driving conditions dataset. The International Journal of Robotics Research, 40(4-5):681-690, 2021. 1, 2 +[34] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2 +[35] Julian Ryde and Nick Hillier. Performance of laser and radar ranging devices in adverse environmental conditions. Journal of Field Robotics, 26(9):712-727, 2009. 3 +[36] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Acdc: The adverse conditions dataset with correspondences for se + +matic driving scene understanding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10765-10775, 2021. 2, 3, 4 +[37] Cristiano Saltori, Fabio Galasso, Giuseppe Fiameni, Nicu Sebe, Elisa Ricci, and Fabio Poiesi. Cosmix: Compositional semantic mix for domain adaptation in 3d lidar segmentation. ECCV, 2022. 3, 7, 8 +[38] Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958, 2014. 6 +[39] Peng Su, Kun Wang, Xingyu Zeng, Shixiang Tang, Dapeng Chen, Di Qiu, and Xiaogang Wang. Adapting object detectors with conditional domain normalization. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XI 16, pages 403-419. Springer, 2020. 2 +[40] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 4 +[41] Haotian Tang, Zhijian Liu, Shengyu Zhao, Yujun Lin, Ji Lin, Hanrui Wang, and Song Han. Searching efficient 3d architectures with sparse point-voxel convolution. In European conference on computer vision, pages 685–702. Springer, 2020. 1, 2, 5, 6, 8 +[42] Josh Tobin, Rachel Fong, Alex Ray, Jonas Schneider, Wojciech Zaremba, and Pieter Abbeel. Domain randomization for transferring deep neural networks from simulation to the real world. In 2017 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 23-30. IEEE, 2017. 4 +[43] Jonathan Tremblay, Aayush Prakash, David Acuna, Mark Brophy, Varun Jampani, Cem Anil, Thang To, Eric Cameracci, Shaad Boochoon, and Stan Birchfield. Training deep networks with synthetic data: Bridging the reality gap by domain randomization. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 969-977, 2018. 4 +[44] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7167-7176, 2017. 7, 8 +[45] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2517-2526, 2019. 7, 8 +[46] Bichen Wu, Xuanyu Zhou, Sicheng Zhao, Xiangyu Yue, and Kurt Keutzer. Squeezesegv2: Improved model structure and unsupervised domain adaptation for road-object segmentation from a lidar point cloud. In 2019 International Conference on Robotics and Automation (ICRA), pages 4376-4382. IEEE, 2019. 2, 3 + +[47] Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3733-3742, 2018. 5 +[48] Aoran Xiao, Jiaxing Huang, Dayan Guan, Kaiwen Cui, Shijian Lu, and Ling Shao. Polarmix: A general data augmentation technique for lidar point clouds. NeurIPS, 2022. 3, 6, 7 +[49] Aoran Xiao, Jiaxing Huang, Dayan Guan, Fangneng Zhan, and Shijian Lu. Transfer learning from synthetic to real lidar point cloud for semantic segmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2795-2803, 2022. 1, 3, 4, 6, 7 +[50] Aoran Xiao, Xiaofei Yang, Shijian Lu, Dayan Guan, and Ji-axing Huang. Fps-net: A convolutional fusion network for large-scale lidar point cloud segmentation. ISPRS Journal of Photogrammetry and Remote Sensing, 176:237–249, 2021. 2 +[51] Jianyun Xu, Ruixiang Zhang, Jian Dou, Yushi Zhu, Jie Sun, and Shiliang Pu. Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16024–16033, 2021. 2 +[52] Qiangeng Xu, Yin Zhou, Weiyue Wang, Charles R Qi, and Dragomir Anguelov. Spg: Unsupervised domain adaptation for 3d object detection via semantic point generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15446-15456, 2021. 2 +[53] Jihan Yang, Shaoshuai Shi, Zhe Wang, Hongsheng Li, and Xiaojuan Qi. St3d: Self-training for unsupervised domain adaptation on 3d object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10368-10378, 2021. 2 +[54] Xufeng Yao, Yang Bai, Xinyun Zhang, Yuechen Zhang, Qi Sun, Ran Chen, Ruiyu Li, and Bei Yu. Pcl: Proxy-based contrastive learning for domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7097-7107, 2022. 6 +[55] Li Yi, Boqing Gong, and Thomas Funkhouser. Complete & label: A domain adaptation approach to semantic segmentation of lidar point clouds. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15363-15373, 2021. 3 +[56] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darrell. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2636-2645, 2020. 2 +[57] Feihu Zhang, Jin Fang, Benjamin Wah, and Philip Torr. Deep fusionnet for point cloud semantic segmentation. In European Conference on Computer Vision, pages 644-663. Springer, 2020. 2 +[58] Weichen Zhang, Wen Li, and Dong Xu. Srdan: Scale-aware and range-aware domain adaptation network for cross-dataset 3d object detection. In Proceedings of the IEEE/CVF + +Conference on Computer Vision and Pattern Recognition, pages 6769-6779, 2021. 2 +[59] Yang Zhang, Zixiang Zhou, Philip David, Xiangyu Yue, Zerong Xi, Boqing Gong, and Hassan Foroosh. Polarnet: An improved grid representation for online lidar point clouds semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9601-9610, 2020. 2 +[60] Sicheng Zhao, Yezhen Wang, Bo Li, Bichen Wu, Yang Gao, Pengfei Xu, Trevor Darrell, and Kurt Keutzer. *epointda: An end-to-end simulation-to-real domain adaptation framework for lidar point cloud segmentation*. In *Proceedings of the AAAI Conference on Artificial Intelligence*, volume 35, pages 3500–3509, 2021. 2 +[61] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2 +[62] Xinge Zhu, Hui Zhou, Tai Wang, Fangzhou Hong, Yuexin Ma, Wei Li, Hongsheng Li, and Dahua Lin. Cylindrical and asymmetrical 3d convolution networks for lidar segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9939-9948, 2021. 1, 2, 8 +[63] Yang Zou, Zhiding Yu, Xiaofeng Liu, BVK Kumar, and Jinsong Wang. Confidence regularized self-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5982-5991, 2019. 7, 8 \ No newline at end of file diff --git a/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/images.zip b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..7c78358f024509fa11d4d390a026d46091b11bc9 --- /dev/null +++ b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d989e2d02ad655c7ae6a4f726351595bb1fb6fbcab98b01a9c60685b7835e28 +size 542733 diff --git a/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/layout.json b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..db0456aa849fc2149376a2eaefb2f0b50736d650 --- /dev/null +++ b/2023/3D Semantic Segmentation in the Wild_ Learning Generalized Models for Adverse-Condition Point Clouds/layout.json @@ -0,0 +1,7774 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 174, + 83, + 501, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 83, + 501, + 118 + ], + "spans": [ + { + "bbox": [ + 174, + 83, + 501, + 118 + ], + "type": "text", + "content": "3D Semantic Segmentation in the Wild: Generalized Models for Adverse-Condition Point Clouds" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "spans": [ + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "content": "Aoran Xiao" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "content": ", Jiaxing Huang" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "content": ", Weihao Xuan" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "content": ", Ruijie Ren" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "content": ", Kangcheng Liu" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "content": " \nDayan Guan" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "content": ", Abdulmotaleb El Saddik" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "inline_equation", + "content": "^{4,6}" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "content": ", Shijian Lu" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "inline_equation", + "content": "^{1,\\dagger}" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "text", + "content": ", Eric Xing" + }, + { + "bbox": [ + 113, + 125, + 479, + 155 + ], + "type": "inline_equation", + "content": "^{4,5}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 155, + 516, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 155, + 516, + 168 + ], + "spans": [ + { + "bbox": [ + 77, + 155, + 516, + 168 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 77, + 155, + 516, + 168 + ], + "type": "text", + "content": "Nanyang Technological University " + }, + { + "bbox": [ + 77, + 155, + 516, + 168 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 77, + 155, + 516, + 168 + ], + "type": "text", + "content": "Waseda University " + }, + { + "bbox": [ + 77, + 155, + 516, + 168 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 77, + 155, + 516, + 168 + ], + "type": "text", + "content": "Technical University of Denmark" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 157, + 168, + 436, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 168, + 436, + 182 + ], + "spans": [ + { + "bbox": [ + 157, + 168, + 436, + 182 + ], + "type": "text", + "content": "4Mohamed bin Zayed University of Artificial Intelligence" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 171, + 182, + 419, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 182, + 419, + 196 + ], + "spans": [ + { + "bbox": [ + 171, + 182, + 419, + 196 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 171, + 182, + 419, + 196 + ], + "type": "text", + "content": "Carnegie Mellon University " + }, + { + "bbox": [ + 171, + 182, + 419, + 196 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 171, + 182, + 419, + 196 + ], + "type": "text", + "content": "University of Ottawa" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 50, + 210, + 290, + 320 + ], + "blocks": [ + { + "bbox": [ + 50, + 210, + 290, + 320 + ], + "lines": [ + { + "bbox": [ + 50, + 210, + 290, + 320 + ], + "spans": [ + { + "bbox": [ + 50, + 210, + 290, + 320 + ], + "type": "image", + "image_path": "dd6573372529ff7b4b3604a3b40f1fbc61e55792c8263f1fc521bb1b5d55cf2f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 323, + 241, + 332 + ], + "lines": [ + { + "bbox": [ + 107, + 323, + 241, + 332 + ], + "spans": [ + { + "bbox": [ + 107, + 323, + 241, + 332 + ], + "type": "text", + "content": "(a) A LiDAR scan captured in a snowy day" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 291, + 211, + 543, + 320 + ], + "blocks": [ + { + "bbox": [ + 291, + 211, + 543, + 320 + ], + "lines": [ + { + "bbox": [ + 291, + 211, + 543, + 320 + ], + "spans": [ + { + "bbox": [ + 291, + 211, + 543, + 320 + ], + "type": "image", + "image_path": "dfe09935ad3ef25d96f760f5acfd7c0166ca105a3e08c48838dd43809ef71f78.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 323, + 461, + 331 + ], + "lines": [ + { + "bbox": [ + 378, + 323, + 461, + 331 + ], + "spans": [ + { + "bbox": [ + 378, + 323, + 461, + 331 + ], + "type": "text", + "content": "(b) Point-level annotations" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 45, + 335, + 546, + 369 + ], + "lines": [ + { + "bbox": [ + 45, + 335, + 546, + 369 + ], + "spans": [ + { + "bbox": [ + 45, + 335, + 546, + 369 + ], + "type": "text", + "content": "Figure 1. We introduce SemanticSTF, an adverse-weather LiDAR point cloud dataset with dense point-level annotations that can be exploited for the study of point cloud semantic segmentation under all-weather conditions (including fog, snow, and rain). The graph on the left shows one scan sample captured on a snowy day, and the one on the right shows the corresponding point-level annotations." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 143, + 379, + 192, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 379, + 192, + 392 + ], + "spans": [ + { + "bbox": [ + 143, + 379, + 192, + 392 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 404, + 289, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 404, + 289, + 681 + ], + "spans": [ + { + "bbox": [ + 46, + 404, + 289, + 681 + ], + "type": "text", + "content": "Robust point cloud parsing under all-weather conditions is crucial to level-5 autonomy in autonomous driving. However, how to learn a universal 3D semantic segmentation (3DSS) model is largely neglected as most existing benchmarks are dominated by point clouds captured under normal weather. We introduce SemanticSTF, an adverse-weather point cloud dataset that provides dense point-level annotations and allows to study 3DSS under various adverse weather conditions. We study all-weather 3DSS modeling under two setups: 1) domain adaptive 3DSS that adapts from normal-weather data to adverse-weather data; 2) domain generalizable 3DSS that learns all-weather 3DSS models from normal-weather data. Our studies reveal the challenge while existing 3DSS methods encounter adverse-weather data, showing the great value of SemanticSTF in steering the future endeavor along this very meaningful research direction. In addition, we design a domain randomization technique that alternatively randomizes the geometry styles of point clouds and aggregates their embeddings, ultimately leading to a generalizable model that can improve 3DSS under various adverse weather effectively. The SemanticSTF and related codes are available at https://github.com/xiaooran/SemanticSTF." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 379, + 387, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 379, + 387, + 392 + ], + "spans": [ + { + "bbox": [ + 307, + 379, + 387, + 392 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 400, + 547, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 400, + 547, + 579 + ], + "spans": [ + { + "bbox": [ + 304, + 400, + 547, + 579 + ], + "type": "text", + "content": "3D LiDAR point clouds play an essential role in semantic scene understanding in various applications such as self-driving vehicles and autonomous drones. With the recent advance of LiDAR sensors, several LiDAR point cloud datasets [2, 11, 49] such as SemanticKITTI [2] have been proposed which greatly advanced the research in 3D semantic segmentation (3DSS) [19, 41, 62] for the task of point cloud parsing. As of today, most existing point cloud datasets for outdoor scenes are dominated by point clouds captured under normal weather. However, 3D vision applications such as autonomous driving require reliable 3D perception under all-weather conditions including various adverse weather such as fog, snow, and rain. How to learn a weather-tolerant 3DSS model is largely neglected due to the absence of related benchmark datasets." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 582, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 548, + 715 + ], + "type": "text", + "content": "Although several studies [3, 33] attempt to include adverse weather conditions in point cloud datasets, such as the STF dataset [3] that consists of LiDAR point clouds captured under various adverse weather, these efforts focus on object detection benchmarks and do not provide any pointwise annotations which are critical in various tasks such as 3D semantic and instance segmentation. To address this gap, we introduce SemanticSTF, an adverse-weather point cloud dataset that extends the STF Detection Benchmark by providing point-wise annotations of 21 semantic categories, as illustrated in Fig. 1. Similar to STF, SemanticSTF cap" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 702, + 141, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 702, + 141, + 712 + ], + "spans": [ + { + "bbox": [ + 62, + 702, + 141, + 712 + ], + "type": "text", + "content": "† Corresponding author" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "9382" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 109 + ], + "type": "text", + "content": "tures four typical adverse weather conditions that are frequently encountered in autonomous driving including dense fog, light fog, snow, and rain." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 118, + 288, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 118, + 288, + 286 + ], + "spans": [ + { + "bbox": [ + 46, + 118, + 288, + 286 + ], + "type": "text", + "content": "SemanticSTF provides a great benchmark for the study of 3DSS and robust point cloud parsing under adverse weather conditions. Beyond serving as a well-suited test bed for examining existing fully-supervised 3DSS methods that handle adverse-weather point cloud data, SemanticSTF can be further exploited to study two valuable weather-tolerant 3DSS scenarios: 1) domain adaptive 3DSS that adapts from normal-weather data to adverse-weather data, and 2) domain generalizable 3DSS that learns all-weather 3DSS models from normal-weather data. Our studies reveal the challenges faced by existing 3DSS methods while processing adverse-weather point cloud data, highlighting the significant value of SemanticSTF in guiding future research efforts along this meaningful research direction." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 297, + 288, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 297, + 288, + 559 + ], + "spans": [ + { + "bbox": [ + 46, + 297, + 288, + 559 + ], + "type": "text", + "content": "In addition, we design PointDR, a new baseline framework for the future study and benchmarking of all-weather 3DSS. Our objective is to learn robust 3D representations that can reliably represent points of the same category across different weather conditions while remaining discriminative across categories. However, robust all-weather 3DSS poses two major challenges: 1) LiDAR point clouds are typically sparse, incomplete, and subject to substantial geometric variations and semantic ambiguity. These challenges are further exacerbated under adverse weather conditions, with many missing points and geometric distortions due to fog, snow cover, etc. 2) More noises are introduced under adverse weather due to snow flicks, rain droplets, etc. PointDR addresses the challenges with two iterative operations: 1) Geometry style randomization that expands the geometry distribution of point clouds under various spatial augmentations; 2) Embedding aggregation that introduces contrastive learning to aggregate the encoded embeddings of the randomly augmented point clouds. Despite its simplicity, extensive experiments over point clouds of different adverse weather conditions show that PointDR achieves superior 3DSS generalization performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 714 + ], + "type": "text", + "content": "The contribution of this work can be summarized in three major aspects. First, we introduce SemanticSTF, a large-scale adverse-weather point cloud benchmark that provides high-quality point-wise annotations of 21 semantic categories. Second, we design PointDR, a point cloud domain randomization baseline that can be exploited for future study and benchmarking of 3DSS under all-weather conditions. Third, leveraging SemanticSTF, we benchmark existing 3DSS methods over two challenging tasks on domain adaptive 3DSS and domain generalized 3DSS. The benchmarking efforts lay a solid foundation for future research on this highly meaningful problem." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 71, + 398, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 71, + 398, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 398, + 84 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 91, + 545, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 545, + 270 + ], + "type": "text", + "content": "3D semantic segmentation aims to assign point-wise semantic labels for point clouds. It has been developed rapidly over the past few years, largely through the development of various deep neural networks (DNNs) such as standard convolutional network for projection-based methods [9, 30, 46, 50, 59], multi-layer perceptron (MLP)-based networks [19, 34, 34], 3D voxel convolution-based networks [7, 62], or hybrid networks [6, 27, 41, 51, 57]. While existing 3DSS networks are mainly evaluated over normal weather point clouds, their performance for adverse weather point clouds is far under-investigated. The proposed SemanticSTF closes the gap and provides a solid ground for the study and evaluation of all-weather 3DSS. By enabling investigations into various new research directions, SemanticSTF represents a valuable tool for advancing the field." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 270, + 545, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 270, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 304, + 270, + 545, + 485 + ], + "type": "text", + "content": "Vision recognition under adverse conditions. Scene understanding under adverse conditions has recently attracted increasing attention due to the strict safety demand in various outdoor navigation and perception tasks. In 2D vision, several large-scale datasets have been proposed to investigate perceptions tasks in adverse visual conditions including localization [29], detection [56], and segmentation [36]. On the other hand, learning 3D point clouds of adverse conditions is far under-explored due to the absence of comprehensive dataset benchmarks. The recently proposed datasets such as STF [3] and CADC [33] contain LiDAR point clouds captured under adverse weather conditions. However, these studies focus on the object detection task [15, 16] with bounding-box annotations, without providing any point-wise annotations. Our introduced SemanticSTF is the first large-scale dataset that consists of LiDAR point clouds in adverse weather conditions with high-quality dense annotations, to the best of our knowledge." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 486, + 545, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 486, + 545, + 593 + ], + "spans": [ + { + "bbox": [ + 304, + 486, + 545, + 593 + ], + "type": "text", + "content": "Domain generalization [4,31] aims to learn a generalizable model from single or multiple related but distinct source domains where target data is inaccessible during model learning. It has been widely studied in 2D computer vision tasks [1, 21, 26, 61] while few studies explore it in point cloud learning. Recently, [25] studies domain generalization for 3D object detection by deforming point clouds via vector fields. Differently, this work is the first attempt that explores domain generalization for 3DSS." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": "Unsupervised domain adaptation is a method of transferring knowledge learned from a labeled source domain to a target domain by leveraging the unlabeled target data. It has been widely studied in 2D image learning [12,14,20,22-24] and 3D point clouds [15, 16, 28, 39, 52, 53, 58]. Recently, domain adaptive 3D LiDAR segmentation has drawn increasing attention due to the challenge in point-wise annotation. Different UDA approaches have been designed to mitigate discrepancies across LiDAR point clouds of different domains. For example, [46, 60] project point" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9383" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": "clouds into depth images and leverage 2D UDA techniques while [37, 48, 49, 55] directly work in the 3D space. However, these methods either work for synthetic-to-real UDA scenarios [46, 49] or normal-to-normal point cloud adaptation [55], ignoring normal-to-adverse adaptation which is highly practical in real applications. Our SemanticSTF dataset fills up this blank and will inspire more development of new algorithms for normal-to-adverse adaptation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 180, + 197, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 180, + 197, + 192 + ], + "spans": [ + { + "bbox": [ + 47, + 180, + 197, + 192 + ], + "type": "text", + "content": "3. The SemanticSTF Dataset" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 201, + 128, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 201, + 128, + 213 + ], + "spans": [ + { + "bbox": [ + 47, + 201, + 128, + 213 + ], + "type": "text", + "content": "3.1. Background" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 219, + 288, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 219, + 288, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 219, + 288, + 460 + ], + "type": "text", + "content": "LiDAR sensors send out laser pulses and measure their flight time based on the echoes it receives from targets. The travel distance as derived from the time-of-flight and the registered angular information (between the LiDAR sensors and the targets) can be combined to compute the 3D coordinates of target surface which form point clouds that capture the 3D shape of the targets. However, the active LiDAR pulse system can be easily affected by the scattering media such as particles of rain droplets and snow [10, 18, 32, 35], leading to shifts of measured distances, variation of echo intensity, point missing, etc. Hence, point clouds captured under adverse weather usually have clear distribution discrepancy as compared with those collected under normal weather as illustrated in Fig. 1. However, existing 3DSS benchmarks are dominated by normal-weather point clouds which are insufficient for the study of universal 3DSS under all-weather conditions. To this end, we propose SemanticSTF, a point-wise annotated large-scale adverse-weather dataset that can be explored for the study of 3DSS and point cloud parsing under various adverse weather conditions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 467, + 183, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 467, + 183, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 467, + 183, + 480 + ], + "type": "text", + "content": "3.2. Data Selection and Split" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 486, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 288, + 715 + ], + "type": "text", + "content": "We collect SemanticSTF by leveraging the STF benchmark [3], a multi-modal adverse-weather dataset that was jointly collected in Germany, Sweden, Denmark, and Finland. The data in STF have multiple modalities including LiDAR point clouds and they are collected under various adverse weather conditions such as snow and fog. However, STF provides bounding-box annotations only for the study of 3D detection tasks. In SemanticSTF, we manually selected 2,076 scans captured by a Velodyne HDL64 S3D LiDAR sensor from STF that cover various adverse weather conditions including 694 snowy, 637 dense-foggy, 631 light-foggy, and 114 rainy (all rainy LiDAR scans in STF). During the selection, we pay special attention to the geographical diversity of the point clouds aiming for minimizing data redundancy. We ignore the factor of daytime/nighttime since LiDAR sensors are robust to lighting conditions. We split SemanticSTF into three parts including 1,326 full 3D scans for training, 250 for validating, and 500 for testing. All three splits have approximately the same" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 72, + 538, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 538, + 84 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 538, + 84 + ], + "type": "text", + "content": "proportion of LiDAR scans of different adverse weathers." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 94, + 408, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 94, + 408, + 105 + ], + "spans": [ + { + "bbox": [ + 306, + 94, + 408, + 105 + ], + "type": "text", + "content": "3.3. Data Annotation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 113, + 545, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 113, + 545, + 328 + ], + "spans": [ + { + "bbox": [ + 304, + 113, + 545, + 328 + ], + "type": "text", + "content": "Point-wise annotation of LiDAR point clouds is an extremely laborious task due to several factors, such as 3D view changes, inconsistency between point cloud display and human visual perception, sweeping occlusion, point sparsity, etc. However, point-wise annotating of adverse-weather point clouds is even more challenging due to two new factors. First, the perceived distance shifts under adverse weather often lead to various geometry distortions in the collected points which make them different from those collected under normal weather. This presents significant challenges for annotators who must recognize various objects and assign a semantic label to each point. Second, LiDAR point clouds collected under adverse weather often contain a significant portion of invalid regions that consist of indiscernible semantic contents (e.g., thick snow cover) that make it difficult to identify the ground type. The existence of such invalid regions makes point-wise annotation even more challenging." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 329, + 545, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 329, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 304, + 329, + 545, + 496 + ], + "type": "text", + "content": "We designed a customized labeling pipeline to handle the annotation challenges while performing point-wise annotation of point clouds in SemanticSTF. Specifically, we first provide labeling instructions and demo annotations and train a team of professional annotators to provide pointwise annotations of a set of selected STF LiDAR scans. To achieve reliable high-quality annotations, the annotators leverage the corresponding 2D camera images and Google Street views as extra references while identifying the category of each point in this initial annotation process. After that, the annotators cross-check their initial annotations for identifying and correcting labeling errors. At the final stage, we engaged professional third parties who provide another round of annotation inspection and correction." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 497, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 497, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 497, + 545, + 616 + ], + "type": "text", + "content": "Annotation of SemanticSTF is a highly laborious and time-consuming task. For instance, while labeling downtown areas with the most complex scenery, it took an annotator an average of 4.3 hours to label a single LiDAR scan. Labeling a scan captured in a relatively simpler scenery, such as a highway, also takes an average of 1.6 hours. In addition, an additional 30-60 minutes are required per scan for verification and correction by professional third parties. In total, annotating the entire SemanticSTF dataset takes over 6,600 man-hours." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": "While annotating SemanticSTF, we adopted the same set of semantic classes as in the widely-studied semantic segmentation benchmark, SemanticKITTI [2]. Specifically, we annotate the 19 evaluation classes of SemanticKITTI, which encompass most traffic-related objects in autonomous driving scenes. Additionally, following [36], we label points with indiscernible semantic contents caused by adverse weather (e.g. ground covered by snowdrifts) as invalid. Fur" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9384" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 70, + 282, + 112 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 282, + 112 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 282, + 112 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 282, + 112 + ], + "type": "image", + "image_path": "37c17fa2b35ea99bbfcdceb124dba55eb60b3753b7e85a3e8d7da6bc109af458.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 120, + 284, + 132 + ], + "lines": [ + { + "bbox": [ + 50, + 120, + 284, + 132 + ], + "spans": [ + { + "bbox": [ + 50, + 120, + 284, + 132 + ], + "type": "text", + "content": "Figure 2. Number of annotated points per class in SemanticSTF." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 154, + 287, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 154, + 287, + 202 + ], + "spans": [ + { + "bbox": [ + 47, + 154, + 287, + 202 + ], + "type": "text", + "content": "thermore, we label points that do not belong to the 20 categories or are indistinguishable as ignored, which are not utilized in either training or evaluations. Detailed descriptions of each class can be found in the appendix." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 211, + 138, + 223 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 211, + 138, + 223 + ], + "spans": [ + { + "bbox": [ + 47, + 211, + 138, + 223 + ], + "type": "text", + "content": "3.4. Data Statistics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 230, + 287, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 230, + 287, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 230, + 287, + 386 + ], + "type": "text", + "content": "SemanticSTF consists of point-wise annotations of 21 semantic categories, and Fig. 2 shows the detailed statistics of the point-wise annotations. It can be seen that classes road, sidewalk, building, vegetation, and terrain appear most frequently whereas classes motor, motorcyclist, and bicyclist have clearly lower occurrence frequency. Such class imbalance is largely attributed to the various object sizes and unbalanced distribution of object categories in transportation scenes, and it is also very common in many existing benchmarks. Overall, the statistics and distribution of different object categories are similar to that of other 2D and 3D semantic segmentation benchmarks such as Cityscapes [8], ACDC [36], and SemanticKITTI [2]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 386, + 288, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 386, + 288, + 566 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 288, + 566 + ], + "type": "text", + "content": "To the best of our knowledge, SemanticSTF is the first large-scale adverse-weather 3DSS benchmark that provides high-quality point-wise annotations. Table 1 compares it with several existing point cloud datasets that have been widely adopted for the study of 3D detection and semantic segmentation. We can observe that existing datasets are either collected under normal weather conditions or collected for object detection studies with bounding-box annotations only. 3DSS benchmark under adverse weather is largely blank, mainly due to the great challenge in point-wise annotations of adverse-weather point clouds as described in previous subsections. From this sense, SemanticSTF fills up this blank by providing a large-scale benchmark and test bed which will be very useful to future research in universal 3DSS under all weather conditions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 574, + 146, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 146, + 586 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 146, + 586 + ], + "type": "text", + "content": "3.5. Data illustration" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "text", + "content": "Fig. 3 provides examples of point cloud scans captured under adverse weather conditions in SemanticSTF (in row 1) as well as the corresponding annotations (in row 2). Compared with normal-weather point clouds, point clouds captured under adverse weather exhibit four distinct properties: 1) Snow coverage and snowflakes under snowy weather introduce many white points (labeled as “invalid”) as illustrated in Fig. 3(a). The thick snow coverage may lead to object deformation as well; Rainy conditions may cause specular reflection of laser signals from water on the ground" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 307, + 70, + 545, + 169 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 545, + 169 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 545, + 169 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 545, + 169 + ], + "type": "table", + "html": "
Dataset#ClsTypeAnnotationFogRainSnow
KITTI [13]8realbounding boxXXX
nuScenes [5]23realbounding boxXXX
Waymo [40]4realbounding boxXXX
STF [3]5realbounding box
SemanticKITTI [2]25realpoint-wiseXXX
nuScenes-LiDARSeg [11]32realpoint-wiseXXX
Waymo-LiDARSeg [40]21realpoint-wiseXXX
SynLiDAR [49]32synth.point-wiseXXX
SemanticSTF (ours)21realpoint-wise
", + "image_path": "16eb34ed027cc607142f09ef6a21d7f4dd7ac60674f1bf7b1c1f9fec7863ea7d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 177, + 545, + 198 + ], + "lines": [ + { + "bbox": [ + 305, + 177, + 545, + 198 + ], + "spans": [ + { + "bbox": [ + 305, + 177, + 545, + 198 + ], + "type": "text", + "content": "Table 1. Comparison of SemanticSTF against existing outdoor LiDAR benchmarks. #Cls means the class number." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 220, + 545, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 220, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 304, + 220, + 545, + 364 + ], + "type": "text", + "content": "and produce many noise points as shown in Fig.3(b); 3) Dense fog may greatly reduce the working range of LiDAR sensors, leading to small spatial distribution of the collected LiDAR points as illustrated in Fig. 3(c); 4) Point clouds under light fog have similar characteristics as normal-weather point clouds as illustrated in Fig. 3(d). The distinct properties of point clouds under different adverse weather introduce different types of domain shift from normal-weather point clouds which complicate 3DSS greatly as discussed in Section 5. They also verify the importance of developing universal 3DSS models that can perform well under all weather conditions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 376, + 507, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 376, + 507, + 388 + ], + "spans": [ + { + "bbox": [ + 305, + 376, + 507, + 388 + ], + "type": "text", + "content": "4. Point Cloud Domain Randomization" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 396, + 545, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 396, + 545, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 545, + 479 + ], + "type": "text", + "content": "Leveraging SemanticSTF, we explore domain generalization (DG) for semantic segmentation of LiDAR point clouds under all weather conditions. Specifically, we design PointDR, a domain randomization technique that helps to train a generalizable segmentation model from normal-weather point clouds that can work well for adverse-weather point clouds in SemanticSTF." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 488, + 418, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 488, + 418, + 500 + ], + "spans": [ + { + "bbox": [ + 306, + 488, + 418, + 500 + ], + "type": "text", + "content": "4.1. Problem Definition" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "content": "Given labeled point clouds of a source domain " + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "inline_equation", + "content": "S = \\{S_{k} = \\{x_{k},y_{k}\\} \\}_{k = 1}^{K}" + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "content": " represents a LiDAR point cloud scan and " + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "content": " denotes its point-wise semantic annotations, the goal of domain generalization is to learn a segmentation model " + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "content": " by using the source-domain data only that can perform well on point clouds from an unseen target domain " + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "content": ". We consider a 3D point cloud segmentation model " + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "content": " that consists of a feature extractor " + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "content": " and a classifier " + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 304, + 506, + 545, + 639 + ], + "type": "text", + "content": ". Note under the setup of domain generalization, target data will not be accessed in training as they could be hard and even impossible to acquire at the training stage." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 647, + 499, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 647, + 499, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 647, + 499, + 658 + ], + "type": "text", + "content": "4.2. Point Cloud Domain Randomization" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Inspired by domain randomization studies in 2D computer vision research [42, 43], we explore how to employ domain randomization for learning domain generalizable models for point clouds. Specifically, we design PointDR," + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9385" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 77, + 542, + 259 + ], + "blocks": [ + { + "bbox": [ + 51, + 77, + 542, + 259 + ], + "lines": [ + { + "bbox": [ + 51, + 77, + 542, + 259 + ], + "spans": [ + { + "bbox": [ + 51, + 77, + 542, + 259 + ], + "type": "image", + "image_path": "a929f1221f0c6ee3a050568ecc05d1ceca8a6396fbc6d3a183b9eeabbf1b090b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 262, + 546, + 285 + ], + "lines": [ + { + "bbox": [ + 46, + 262, + 546, + 285 + ], + "spans": [ + { + "bbox": [ + 46, + 262, + 546, + 285 + ], + "type": "text", + "content": "Figure 3. Examples of LiDAR point cloud scans captured under different adverse weather including snow, rain, dense fog, and light fog (the first row) and corresponding dense annotations in SemanticSTF (the second row)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 53, + 297, + 282, + 427 + ], + "blocks": [ + { + "bbox": [ + 53, + 297, + 282, + 427 + ], + "lines": [ + { + "bbox": [ + 53, + 297, + 282, + 427 + ], + "spans": [ + { + "bbox": [ + 53, + 297, + 282, + 427 + ], + "type": "image", + "image_path": "d86e1b05e3aa8f63417fdc99559944bb2afe80eeb61768e3b7bf6ca0472bc7cd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 432, + 287, + 498 + ], + "lines": [ + { + "bbox": [ + 46, + 432, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 46, + 432, + 287, + 498 + ], + "type": "text", + "content": "Figure 4. The framework of our point cloud randomization method (PointDR): Geometry style randomization creates different point cloud views with various spatial perturbations while embedding aggregation encourages the feature extractor to aggregate randomized point embeddings to learn perturbation-invariant representations, ultimately leading to a generalizable segmentation model." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 510, + 287, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 287, + 545 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 287, + 545 + ], + "type": "text", + "content": "a point cloud randomization technique that consists of two complementary designs including geometry style randomization and embedding aggregation as illustrated in Fig. 4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": "Geometry style randomization aims to enrich the geometry styles and expand the distribution of training point cloud data. Given a point-cloud scan " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": " as input, we apply weak and strong spatial augmentation to obtain two copies of " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": " including a weak-view " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "x^w = \\mathcal{A}^W(x)" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": " and a strong-view " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "x^s = \\mathcal{A}^S(x)" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": ". For the augmentation schemes of " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^W" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": ", we follow existing supervised learning methods [41] and adopt the simple random rotation and random scaling. While for the augmentation schemes of " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^S" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": ", we further adopt random dropout, random flipping, random noise perturbation, and random jittering on top of " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^W" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": " to obtain a more diverse and complex copy of the input point cloud scan " + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 545, + 287, + 689 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": "Embedding aggregation aims to aggregate encoded embeddings of randomized point clouds for learning domain-" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "spans": [ + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": "invariant representations. We adopt contrastive learning [17] as illustrated in Fig. 4. Given the randomized point clouds " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "x^{w}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "x^{s}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": ", we first feed them into the feature extractor " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " and a projector " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " (a two-layer MLP) which outputs normalized point feature embeddings " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "f^{w}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "f^{s}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": ", respectively " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "(f = \\mathcal{P}(E(x)))" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "\\overline{f}_C^w \\in \\mathbb{R}^{D \\times C}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": ": feature dimension; " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": ": number of semantic classes) is then derived by class-wise averaging the feature embeddings " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "f^{w}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " in a batch, which is stored in a memory bank " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{B} \\in \\mathbb{R}^{D \\times C}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " that has no backpropagation and is momentum updated by iterations (i.e., " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{B} \\gets m \\times \\mathcal{B} + (1 - m) \\times \\overline{f}_C^w" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " with a momentum coefficient " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": "). Finally, we employ each point feature embedding " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "f_{i}^{s}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " of the strong-view " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "f^{s}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " as query and feature embeddings in " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " as keys for contrastive learning, where the key sharing the same semantic class as the query is positive key " + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_{+}" + }, + { + "bbox": [ + 304, + 297, + 546, + 489 + ], + "type": "text", + "content": " and the rest are negative keys. The contrastive loss is defined as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 341, + 502, + 545, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 502, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 341, + 502, + 545, + 536 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {c t} = \\frac {1}{N} \\sum_ {i = 1} ^ {N} - \\log \\frac {\\exp \\left(f _ {i} ^ {s} \\mathcal {B} _ {+} / \\tau\\right)}{\\sum_ {j = 1} ^ {C} \\exp \\left(f _ {i} ^ {s} \\mathcal {B} _ {j} / \\tau\\right)} \\tag {1}", + "image_path": "cf141e5577c92fb1f494125e9bf5e122f54ac60e878c156935ef5dd4bdfef217.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 543, + 545, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 543, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 304, + 543, + 545, + 578 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 543, + 545, + 578 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 543, + 545, + 578 + ], + "type": "text", + "content": " is a temperature hyper-parameter [47]. Note there is no back-propagation for the \"ignore\" class in optimizing the contrastive loss." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 580, + 546, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 580, + 546, + 688 + ], + "spans": [ + { + "bbox": [ + 304, + 580, + 546, + 688 + ], + "type": "text", + "content": "Contrastive learning pulls point feature embeddings of the same classes closer while pushing away point feature embeddings of different classes. Therefore, optimizing the proposed contrastive loss will aggregate randomized point cloud features and learn perturbation-invariant representations, ultimately leading to a robust and generalizable segmentation model. The momentum-updated memory bank provides feature prototypes of each semantic class for more robust and stable contrastive learning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Combining the supervised cross-entropy loss " + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ce}" + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": " for weakly-augmented point clouds in Eq. 1, the overall train" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9386" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 51, + 70, + 542, + 289 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 542, + 289 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 542, + 289 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 542, + 289 + ], + "type": "table", + "html": "
Methodscarbi,clemt,cletruckoth-v.pers.bi,clstmt,clstroadparki.sidew.oth-g.build.fenceveget.trunkterra.poletraf.D-fogL-fogRainSnowmIoU
Oracle89.442.10.059.961.269.639.00.082.221.558.245.686.163.680.252.077.650.161.751.954.657.953.754.7
SemanticKITTI→SemanticSTF
Baseline55.90.00.21.910.910.36.00.061.210.932.00.067.941.649.827.940.829.617.529.526.028.421.424.4
Dropout [38]62.10.015.53.011.55.42.00.058.412.826.71.172.143.652.934.243.528.415.529.325.629.424.825.7
Perturbation74.40.00.023.30.619.70.00.060.310.833.90.772.045.258.717.542.422.19.726.327.830.024.525.9
PolarMix [48]57.81.83.816.73.726.50.02.065.72.932.50.371.048.753.820.545.425.915.829.725.028.625.626.0
MMD [26]63.60.02.60.111.428.10.00.067.014.137.90.367.341.257.127.447.928.216.230.428.132.825.226.9
PCL [54]65.90.00.017.70.48.40.00.059.612.035.01.674.047.560.715.848.926.127.528.927.630.124.626.4
PointDR (Ours)67.30.04.519.69.018.82.70.062.612.938.10.673.343.856.432.245.728.727.431.329.731.926.228.6
SynLiDAR→SemanticSTF
Baseline27.13.00.615.80.125.21.85.623.90.314.60.636.319.937.917.941.89.52.316.917.217.211.915.0
Dropout [38]28.03.01.49.60.017.10.80.734.26.819.10.135.519.142.317.636.014.02.815.316.620.414.015.2
Perturbation27.12.32.316.00.123.71.24.027.03.616.20.829.216.735.322.738.317.95.116.316.719.313.415.2
PolarMix [48]39.21.11.28.31.517.80.80.723.31.317.50.445.224.846.220.138.77.61.916.115.519.215.615.7
MMD [26]25.52.32.113.20.722.11.47.530.80.417.60.230.919.737.619.343.59.92.617.316.320.012.715.1
PCL [54]30.90.81.410.00.423.34.07.928.51.317.71.239.418.540.016.038.612.12.317.816.719.314.115.5
PointDR (Ours)37.82.52.423.60.126.32.23.327.97.717.50.547.625.345.721.037.517.95.519.519.921.116.918.5
", + "image_path": "6910c743257b5cafc1481209b88c8ed982c3d4483a591d3ec3c823ea0b5890bf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 297, + 530, + 308 + ], + "lines": [ + { + "bbox": [ + 61, + 297, + 530, + 308 + ], + "spans": [ + { + "bbox": [ + 61, + 297, + 530, + 308 + ], + "type": "text", + "content": "Table 2. Experiments on domain generalization with SemanticKITTI [2] or SynLiDAR [49] as source and SemanticSTF as target." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 330, + 240, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 330, + 240, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 330, + 240, + 342 + ], + "type": "text", + "content": "ing objective of PointDR can be formulated by:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 349, + 287, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 349, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 113, + 349, + 287, + 361 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {P o i n t D R}} = \\mathcal {L} _ {c e} + \\lambda_ {c t} \\mathcal {L} _ {c t} \\tag {2}", + "image_path": "3f9802f01a49c9facae267fb0ab4aeaadcaa1c6096c42d43c01b6203a5a50b84.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 369, + 254, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 369, + 254, + 383 + ], + "spans": [ + { + "bbox": [ + 47, + 369, + 254, + 383 + ], + "type": "text", + "content": "5. Evaluation of Semantic Segmentation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 389, + 287, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 389, + 287, + 473 + ], + "spans": [ + { + "bbox": [ + 46, + 389, + 287, + 473 + ], + "type": "text", + "content": "SemanticSTF can be adopted for benchmarking different learning setups and network architectures on point cloud segmentation. We perform experiments over two typical learning setups including domain generalization and unsupervised domain adaptation. In addition, we evaluate several state-of-the-art point-cloud segmentation networks to examine their generalization capabilities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 480, + 180, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 480, + 180, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 480, + 180, + 491 + ], + "type": "text", + "content": "5.1. Domain Generalization" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 498, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 287, + 581 + ], + "type": "text", + "content": "We first study domain generalizable point cloud segmentation. For DG, we can only access an annotated source domain during training and the trained model is expected to generalize well to unseen target domains. Leveraging SemanticSTF, we build two DG benchmarks and examine how PointDR helps learn a universal 3DSS model that can work under different weather conditions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "content": "The first benchmark is SemanticKITTI [2] " + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "content": " SemanticSTF where SemanticKITTI is a large-scale real-world 3DSS dataset collected under normal weather conditions. This benchmark serves as a solid testing ground for evaluating domain generalization performance from normal to adverse weather conditions. The second benchmark is SynLiDAR [49] " + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "content": " SemanticSTF where SynLiDAR is a largescale synthetic 3DSS dataset. The motivation of this benchmark is that learning a universal 3DSS model from synthetic point clouds that can work well across adverse weather is of high research and application value considering the" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 330, + 545, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 330, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 304, + 330, + 545, + 376 + ], + "type": "text", + "content": "challenges in point cloud collection and annotation. Note this benchmark is more challenging as the domain discrepancy comes from both normal-to-adverse weather distribution shift and synthetic-to-real distribution shift." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 377, + 546, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 377, + 546, + 520 + ], + "spans": [ + { + "bbox": [ + 304, + 377, + 546, + 520 + ], + "type": "text", + "content": "Setup. We use all 19 evaluating classes of SemanticKITTI in both domain generalization benchmarks. The category of invalid in SemanticSTF is mapped to the ignored since SemanticKITTI and SynLiDAR do not cover this category. We adopt MinkowskiNet [7] (with TorchSparse library [41]) as the backbone model, which is a sparse convolutional network that provides state-of-the-art performance with decent efficiency. We adopt the evaluation metrics of Intersection over the Union (IoU) for each segmentation class and the mean IoU (mIoU) over all classes. All experiments are run over a single NVIDIA 2080Ti (11GB). More implementation details are provided in the appendix." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 521, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 700 + ], + "type": "text", + "content": "Baseline Methods. Since domain generalizable 3DSS is far under-explored, there is little existing baseline that can be directly adopted for benchmarking. We thus select two closely related approaches as baseline to evaluate the proposed PointDR. The first approach is data augmentation and we select three related augmentation methods including Dropout [38] that randomly drops out points to simulate LiDAR points missing in adverse weather, Noise perturbation that adds random points in the 3D space to simulate noise points as introduced by particles like falling snow, and PolarMix [48] that mixes point clouds of different sources for augmentation. The second approach is to adapt 2D domain generalization methods for 3DSS. We select two 2D domain generalization methods including the widely studied MMD [26] and the recently proposed PCL [54]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 701, + 545, + 713 + ], + "type": "text", + "content": "Results. Table 2 shows experimental results over the validation" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9387" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 77, + 70, + 257, + 121 + ], + "blocks": [ + { + "bbox": [ + 77, + 70, + 257, + 121 + ], + "lines": [ + { + "bbox": [ + 77, + 70, + 257, + 121 + ], + "spans": [ + { + "bbox": [ + 77, + 70, + 257, + 121 + ], + "type": "table", + "html": "
MethodLceLctBmIoU
Baseline24.4
PointDR-CT27.4
PointDR28.6
", + "image_path": "b5272c3a318d65b80948e1a47f50d342eb5a47216bd3e17cf57cc9f4424c59ff.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 129, + 286, + 150 + ], + "lines": [ + { + "bbox": [ + 47, + 129, + 286, + 150 + ], + "spans": [ + { + "bbox": [ + 47, + 129, + 286, + 150 + ], + "type": "text", + "content": "Table 3. Ablation study of PointDR over domain generalized segmentation task SemanticKITTI " + }, + { + "bbox": [ + 47, + 129, + 286, + 150 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 47, + 129, + 286, + 150 + ], + "type": "text", + "content": " SemanticSTF." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 174, + 286, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 174, + 286, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 174, + 286, + 376 + ], + "type": "text", + "content": "tion set of SemanticSTF. For both benchmarks, the Baseline is a source-only model that is trained by using the training data of SemanticKITTI or SynLiDAR. We can see that the Baseline achieves very low mIoU while evaluated over the validation set of SemanticSTF, indicating the large domain discrepancy between point clouds of normal and adverse weather conditions. In addition, all three data augmentation methods improve the model generalization consistently but the performance gains are limited especially for the challenging benchmark SynLiDAR " + }, + { + "bbox": [ + 46, + 174, + 286, + 376 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 174, + 286, + 376 + ], + "type": "text", + "content": " SemanticSTF. The two 2D generalization methods both help SemanticKITTI " + }, + { + "bbox": [ + 46, + 174, + 286, + 376 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 174, + 286, + 376 + ], + "type": "text", + "content": " SemanticSTF clearly but show very limited improvement over SynLiDAR " + }, + { + "bbox": [ + 46, + 174, + 286, + 376 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 174, + 286, + 376 + ], + "type": "text", + "content": " SemanticSTF. The proposed PointDR achieves the best generalization consistently across both benchmarks, demonstrating its superior capability to learn perturbation-invariant point cloud representations and effectiveness while handling all-weather 3DSS tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 378, + 286, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 378, + 286, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 378, + 286, + 604 + ], + "type": "text", + "content": "We also evaluate the compared domain generalization methods over each individual adverse weather condition as shown in Table 2. It can be observed that the three data augmentation methods work for data captured in rainy and snowy weather only. The 2D generalization method MMD shows clear effectiveness for point clouds under dense fog and rain while PCL works for point clouds under rainy and snowy weather instead. We conjecture that the performance variations are largely attributed to the different properties of point clouds captured under different weather conditions. For example, more points are missing in rain while object points often deform due to the covered snow (more illustrations are provided in the appendix). Such data variations lead to different domain discrepancies across weather which further leads to different performances of the compared methods. As PointDR learns perturbation-tolerant representations, it works effectively across different adverse weather conditions. We also provide qualitative results, please refer to the appendix for details." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "content": "Ablation study. We study different PointDR designs to examine how they contribute to the overall generalization performance. As Table 3 shows, we report three models over the benchmark \"SemanticKITTI " + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "content": " SemanticSTF\": 1) Baseline that is trained with " + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ce}" + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "content": ". 2) PointDR-CT that is jointly trained with " + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ce}" + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ct}" + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "content": " without using the memory bank " + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "content": ". 3) The complete PointDR that is trained with " + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ce}" + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ct}" + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "content": " and the memory bank " + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 46, + 605, + 286, + 712 + ], + "type": "text", + "content": ". We evaluate the three models over the validation set of SemanticSTF and Table 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": "shows experimental results. We can see that the Baseline performs poorly at " + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "inline_equation", + "content": "24.4\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": " due to clear domain discrepancy between point clouds of normal weather and adverse weather. Leveraging the proposed contrastive loss, " + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ct}" + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": " achieves clearly better performance at " + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "inline_equation", + "content": "27.4\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": ", indicating that learning perturbation-invariance is helpful for universal LiDAR segmentation of all-weather conditions. On top of that, introducing the momentum-updated memory bank " + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": " further improves the segmentation performance at " + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "inline_equation", + "content": "28.6\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": ". This is because the feature embeddings in " + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 304, + 72, + 545, + 240 + ], + "type": "text", + "content": " serve as the class prototypes which help the optimization of the segmentation network, finally leading to more robust representations of 3DSS that perform better over adverse weather point clouds." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 251, + 421, + 262 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 251, + 421, + 262 + ], + "spans": [ + { + "bbox": [ + 306, + 251, + 421, + 262 + ], + "type": "text", + "content": "5.2. Domain Adaptation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 270, + 545, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 270, + 545, + 460 + ], + "spans": [ + { + "bbox": [ + 304, + 270, + 545, + 460 + ], + "type": "text", + "content": "We also study SemanticSTF over a domain adaptive point cloud segmentation benchmark SemanticKITTI " + }, + { + "bbox": [ + 304, + 270, + 545, + 460 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 270, + 545, + 460 + ], + "type": "text", + "content": " SemanticSTF. Specifically, we select four representative UDA methods including ADDA [44], entropy minimization (Ent-Min) [45], self-training [63], and CoSMix [37] for adaptation from the source SemanticKITTI [2] toward the target SemanticSTF. Following the state-of-the-art [37, 48, 49] on synthetic-to-real adaptation, we adopt MinkowskiNet [7] as the segmentation backbone for all compared methods. Table 4 shows experimental results over the validation set of SemanticSTF. We can see that all UDA methods outperform the Source-only consistently under the normal-to-adverse adaptation setup. At the other end, the performance gains are still quite limited, showing the great improvement space along domain adaptive 3DSS from normal to adverse weather conditions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 462, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 545, + 712 + ], + "type": "text", + "content": "In addition, we examined the adaptability of the four UDA methods in relation to each individual adverse weather condition. Specifically, we trained each of the four methods for adaptation from SemanticKITTI to SemanticSTF data for each adverse weather condition. Table 5 shows the experimental results over the validation set of SemanticSTF. We can see all four methods outperform the Source-only method under Dense-fog and Light-fog, demonstrating their effectiveness in mitigating domain discrepancies. However, for rain and Snow, only CoSMix achieved marginal performance gains while the other three UDA methods achieved limited performance improvements. We conjecture that snow and rain introduce large deformations on object surfaces or much noise, making adaptation from normal to adverse weather more challenging. CoSMix works in the input space by directly mixing source and target points, allowing it to perform better under heavy snow and rain which have larger domain gaps. However, all methods achieved relatively low segmentation performance, indicating the significance of our research and the large room for improvement in our constructed benchmarks." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9388" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 70, + 541, + 170 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 541, + 170 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 541, + 170 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 541, + 170 + ], + "type": "table", + "html": "
Methodscarbi,clemt,cletruckoth-v.pers.bi,clstmt,clstroadparki.sidew.oth-g.build.fenceveget.trunkterra.poletraf.mIoU
Oracle89.442.10.059.961.269.639.00.082.221.558.245.686.163.680.252.077.650.161.754.7
Source-only64.80.00.013.81.85.02.10.062.77.534.00.066.736.253.931.344.324.014.224.3
ADDA [44]65.60.00.021.01.32.81.316.764.71.235.40.066.541.857.232.642.223.326.426.3
Ent-Min [45]69.20.010.131.05.32.82.60.065.92.635.70.072.542.852.432.544.724.721.127.2
Self-training [63]71.50.010.333.17.45.91.30.065.16.536.60.067.841.351.732.942.925.125.027.6
CoSMix [37]65.01.722.125.27.733.20.00.064.711.531.10.962.537.844.630.541.130.928.628.4
", + "image_path": "6188afe4fb3c697d1f64308353702a6823d49f98b6af818723eb7f297d31b2f2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 60, + 214, + 273, + 284 + ], + "blocks": [ + { + "bbox": [ + 46, + 173, + 546, + 196 + ], + "lines": [ + { + "bbox": [ + 46, + 173, + 546, + 196 + ], + "spans": [ + { + "bbox": [ + 46, + 173, + 546, + 196 + ], + "type": "text", + "content": "Table 4. Comparison of state-of-the-art domain adaptation methods on SemanticKITTI " + }, + { + "bbox": [ + 46, + 173, + 546, + 196 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 173, + 546, + 196 + ], + "type": "text", + "content": " SemanticSTF adaptation. SemanticKITTI serves as the source domain and the entire SemanticSTF including all four weather conditions serves as the target domain." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 60, + 214, + 273, + 284 + ], + "lines": [ + { + "bbox": [ + 60, + 214, + 273, + 284 + ], + "spans": [ + { + "bbox": [ + 60, + 214, + 273, + 284 + ], + "type": "table", + "html": "
MethodDense-fogLight-fogRainSnow
Source-Only26.925.227.723.5
ADDA [44]31.527.927.423.4
Ent-Min [45]31.428.630.324.9
Self-training [63]31.829.327.925.1
CoSMix [37]31.630.333.132.9
", + "image_path": "3becf7b4c509b2cda77d77811b6db64879260325d20e45b41a2fcd0405db2829.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 364, + 249, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 364, + 249, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 364, + 249, + 376 + ], + "type": "text", + "content": "5.3. Network Models vs All-Weather 3DSS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 382, + 287, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 382, + 287, + 658 + ], + "spans": [ + { + "bbox": [ + 45, + 382, + 287, + 658 + ], + "type": "text", + "content": "We also study how different 3DSS network architectures generalize when they are trained with normal-weather point clouds and evaluated over SemanticSTF. Specifically, we select five representative 3DSS networks [9, 19, 41, 62] that have been widely adopted in 3D LiDAR segmentation studies. In the experiments, each selected network is first pre-trained with SemanticKITTI [2] and then evaluated over the validation set of SemanticSTF. We directly use the officially released code and the pre-trained weights for evaluation. Table 6 shows experimental results. We can observe that the five pre-trained models perform very differently though they all achieve superior segmentation over SemanticKITTI. Specifically, RandLA-Net [19], SPVCNN [41], and SPVNAS [41] perform clearly better than SalsaNext [9] and Cylinder3D [62]. In addition, none of the five pre-trained models perform well, verifying the clear domain discrepancy between point clouds of normal and adverse weather conditions. The experiments further indicate the great value of SemanticSTF in the future exploration of robust point cloud parsing under all weather conditions. In addition, the supervised performance of these 3DSS networks over SemanticSTF is provided in the appendix." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 670, + 187, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 670, + 187, + 682 + ], + "spans": [ + { + "bbox": [ + 47, + 670, + 187, + 682 + ], + "type": "text", + "content": "6. Conclusion and Outlook" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": "This paper presents SemanticSTF, a large-scale dataset and benchmark suite for semantic segmentation of LiDAR" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 306, + 214, + 553, + 284 + ], + "blocks": [ + { + "bbox": [ + 46, + 287, + 288, + 343 + ], + "lines": [ + { + "bbox": [ + 46, + 287, + 288, + 343 + ], + "spans": [ + { + "bbox": [ + 46, + 287, + 288, + 343 + ], + "type": "text", + "content": "Table 5. Comparison of state-of-the-art domain adaptation methods on SemanticKITTI " + }, + { + "bbox": [ + 46, + 287, + 288, + 343 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 287, + 288, + 343 + ], + "type": "text", + "content": " SemanticSTF adaptation for individual adverse weather conditions. We train a separate model for each weather-specific subset of SemanticSTF and evaluate the trained model on the weather condition it has been trained for." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 306, + 214, + 553, + 284 + ], + "lines": [ + { + "bbox": [ + 306, + 214, + 553, + 284 + ], + "spans": [ + { + "bbox": [ + 306, + 214, + 553, + 284 + ], + "type": "table", + "html": "
3DSS ModelD-fogL-fogRainSnowAll
RandLA-Net [19]26.526.025.122.725.3
SalsaNext [9]16.09.67.83.59.1
SPVCNN [41]30.422.821.718.322.4
SPVNAS [41]25.518.317.013.018.0
Cylinder3D [62]14.87.45.74.07.3
", + "image_path": "23364a4d9538b6c235a9ab2049dadb197479768832b1be56571bee0af6af6c1f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 289, + 547, + 332 + ], + "lines": [ + { + "bbox": [ + 305, + 289, + 547, + 332 + ], + "spans": [ + { + "bbox": [ + 305, + 289, + 547, + 332 + ], + "type": "text", + "content": "Table 6. Performance of state-of-the-art 3DSS models that are pre-trained over SemanticKITTI and tested on validation set of SemanticSTF for individual weather conditions and jointly for all weather conditions." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 358, + 546, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 546, + 454 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 546, + 454 + ], + "type": "text", + "content": "point clouds under adverse weather conditions. SemanticSTF provides high-quality point-level annotations for point clouds captured under adverse weather including dense fog, light fog, snow and rain. Extensive studies have been conducted to examine how state-of-the-art 3DSS methods perform over SemanticSTF, demonstrating its significance in directing future research on domain adaptive and domain generalizable 3DSS under all-weather conditions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 457, + 546, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 457, + 546, + 578 + ], + "spans": [ + { + "bbox": [ + 304, + 457, + 546, + 578 + ], + "type": "text", + "content": "We also design PointDR, a domain randomization technique that aims to use normal-weather point clouds to train a domain generalizable 3DSS model that can work well over adverse-weather point clouds. PointDR consists of two novel designs including geometry style randomization and embedding aggregation which jointly learn perturbation-invariant representations that generalize well to various new point-cloud domains. Extensive experiments show that PointDR achieves superior point cloud segmentation performance as compared with the state-of-the-art." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 595, + 404, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 595, + 404, + 609 + ], + "spans": [ + { + "bbox": [ + 306, + 595, + 404, + 609 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": "This study is funded BY the Ministry of Education Singapore, under the Tier-1 scheme with project number RG18/22. It is also supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from Singapore Telecommunications Limited (Singtel), through Singtel Cognitive and Artificial Intelligence Lab for Enterprises (SCALE@NTU)." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9389" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 92, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 92, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 92, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 92, + 287, + 135 + ], + "type": "text", + "content": "[1] Yogesh Balaji, Swami Sankaranarayanan, and Rama Chellappa. Metareg: Towards domain generalization using meta regularization. Advances in neural information processing systems, 31, 2018. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 137, + 288, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 137, + 288, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 137, + 288, + 203 + ], + "type": "text", + "content": "[2] Jens Behley, Martin Garbade, Andres Milioto, Jan Quenzel, Sven Behnke, Cyril Stachniss, and Jurgen Gall. Semantickitti: A dataset for semantic scene understanding of lidar sequences. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9297-9307, 2019. 1, 3, 4, 6, 7, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 205, + 288, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 288, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 288, + 270 + ], + "type": "text", + "content": "[3] Mario Bijelic, Tobias Gruber, Fahim Mannan, Florian Kraus, Werner Ritter, Klaus Dietmayer, and Felix Heide. Seeing through fog without seeing fog: Deep multimodal sensor fusion in unseen adverse weather. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11682-11692, 2020. 1, 2, 3, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 273, + 287, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 273, + 287, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 273, + 287, + 316 + ], + "type": "text", + "content": "[4] Gilles Blanchard, Gyemin Lee, and Clayton Scott. Generalizing from several related classification tasks to a new unlabeled sample. Advances in neural information processing systems, 24, 2011. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 319, + 288, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 319, + 288, + 384 + ], + "spans": [ + { + "bbox": [ + 53, + 319, + 288, + 384 + ], + "type": "text", + "content": "[5] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 386, + 287, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 386, + 287, + 450 + ], + "spans": [ + { + "bbox": [ + 53, + 386, + 287, + 450 + ], + "type": "text", + "content": "[6] Ran Cheng, Ryan Razani, Ehsan Taghavi, Enxu Li, and Bingbing Liu. 2-s3net: Attentive feature fusion with adaptive feature selection for sparse semantic segmentation network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12547-12556, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 453, + 287, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 453, + 287, + 508 + ], + "spans": [ + { + "bbox": [ + 53, + 453, + 287, + 508 + ], + "type": "text", + "content": "[7] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3075-3084, 2019. 2, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 510, + 287, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 510, + 287, + 576 + ], + "spans": [ + { + "bbox": [ + 53, + 510, + 287, + 576 + ], + "type": "text", + "content": "[8] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3213-3223, 2016. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 578, + 287, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 578, + 287, + 621 + ], + "spans": [ + { + "bbox": [ + 53, + 578, + 287, + 621 + ], + "type": "text", + "content": "[9] Tiago Cortinhal, George Tzelepis, and Eren Erdal Aksoy. Salsanext: Fast, uncertainty-aware semantic segmentation of lidar point clouds. In International Symposium on Visual Computing, pages 207-222. Springer, 2020. 2, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 624, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 656 + ], + "type": "text", + "content": "[10] A Filgueira, H González-Jorge, Susana Lagtuela, L Díaz-Vilarino, and Pedro Arias. Quantifying the influence of rain in lidar performance. Measurement, 95:143-148, 2017. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 712 + ], + "type": "text", + "content": "[11] Whye Kit Fong, Rohit Mohan, Juana Valeria Hurtado, Lubing Zhou, Holger Caesar, Oscar Beijbom, and Abhinav Valada. Panoptic nuscenes: A large-scale benchmark for lidar panoptic segmentation and tracking. IEEE Robotics and Automation Letters, 7(2):3795-3802, 2022. 1, 4" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[12] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International conference on machine learning, pages 1180-1189. PMLR, 2015. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 107, + 545, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 148 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 148 + ], + "type": "text", + "content": "[13] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 151, + 545, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 151, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 307, + 151, + 545, + 204 + ], + "type": "text", + "content": "[14] Dayan Guan, Jiaxing Huang, Aoran Xiao, and Shijian Lu. Domain adaptive video segmentation via temporal consistency regularization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8053-8064, 2021. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 206, + 545, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 206, + 545, + 260 + ], + "spans": [ + { + "bbox": [ + 307, + 206, + 545, + 260 + ], + "type": "text", + "content": "[15] Martin Hahner, Christos Sakaridis, Mario Bijelic, Felix Heide, Fisher Yu, Dengxin Dai, and Luc Van Gool. Lidar snowfall simulation for robust 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16364-16374, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 262, + 545, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 262, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 307, + 262, + 545, + 316 + ], + "type": "text", + "content": "[16] Martin Hahner, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Fog simulation on real lidar point clouds for 3d object detection in adverse weather. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15283-15292, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 317, + 545, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 317, + 545, + 370 + ], + "spans": [ + { + "bbox": [ + 307, + 317, + 545, + 370 + ], + "type": "text", + "content": "[17] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 372, + 545, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 372, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 307, + 372, + 545, + 425 + ], + "type": "text", + "content": "[18] Robin Heinzler, Philipp Schindler, Jürgen Seekircher, Werner Ritter, and Wilhelm Stork. Weather influence and classification with automotive lidar sensors. In 2019 IEEE intelligent vehicles symposium (IV), pages 1527-1534. IEEE, 2019. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 427, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 427, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 427, + 545, + 491 + ], + "type": "text", + "content": "[19] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11108-11117, 2020. 1, 2, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 493, + 545, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 493, + 545, + 547 + ], + "spans": [ + { + "bbox": [ + 307, + 493, + 545, + 547 + ], + "type": "text", + "content": "[20] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Cross-view regularization for domain adaptive panoptic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10133-10144, 2021. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 548, + 545, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 548, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 307, + 548, + 545, + 601 + ], + "type": "text", + "content": "[21] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Fsdr: Frequency space domain randomization for domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6891-6902, 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 604, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 604, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 307, + 604, + 545, + 656 + ], + "type": "text", + "content": "[22] Jiaxing Huang, Dayan Guan, Aoran Xiao, and Shijian Lu. Model adaptation: Historical contrastive learning for unsupervised domain adaptation without source data. Advances in Neural Information Processing Systems, 34:3635-3649, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "text", + "content": "[23] Jiaxing Huang, Dayan Guan, Aoran Xiao, Shijian Lu, and Ling Shao. Category contrast for unsupervised domain adaptation in visual tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1203-1214, 2022. 2" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "9390" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[24] Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for unsupervised domain adaptation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4893-4902, 2019. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 128, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 128, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 128, + 288, + 205 + ], + "type": "text", + "content": "[25] Alexander Lehner, Stefano Gasperini, Alvaro Marcos-Ramiro, Michael Schmidt, Mohammad-Ali Nikouei Mahani, Nassir Navab, Benjamin Busam, and Federico Tombari. 3d-vfield: Adversarial augmentation of point clouds for domain generalization in 3d object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17295-17304, 2022. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 205, + 288, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 205, + 288, + 249 + ], + "spans": [ + { + "bbox": [ + 48, + 205, + 288, + 249 + ], + "type": "text", + "content": "[26] Haoliang Li, Sinno Jialin Pan, Shiqi Wang, and Alex C Kot. Domain generalization with adversarial feature learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5400-5409, 2018. 2, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 250, + 288, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 250, + 288, + 282 + ], + "spans": [ + { + "bbox": [ + 48, + 250, + 288, + 282 + ], + "type": "text", + "content": "[27] Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Pointvoxel cnn for efficient 3d deep learning. Advances in Neural Information Processing Systems, 32, 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 283, + 288, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 283, + 288, + 348 + ], + "spans": [ + { + "bbox": [ + 48, + 283, + 288, + 348 + ], + "type": "text", + "content": "[28] Zhipeng Luo, Zhongang Cai, Changqing Zhou, Gongjie Zhang, Haiyu Zhao, Shuai Yi, Shijian Lu, Hongsheng Li, Shanghang Zhang, and Ziwei Liu. Unsupervised domain adaptive 3d detection with multi-level consistency. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8866-8875, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 349, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 349, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 349, + 288, + 392 + ], + "type": "text", + "content": "[29] Will Maddern, Geoffrey Pascoe, Chris Linegar, and Paul Newman. 1 year, " + }, + { + "bbox": [ + 48, + 349, + 288, + 392 + ], + "type": "inline_equation", + "content": "1000\\mathrm{km}" + }, + { + "bbox": [ + 48, + 349, + 288, + 392 + ], + "type": "text", + "content": ": The oxford robotcar dataset. The International Journal of Robotics Research, 36(1):3-15, 2017. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 393, + 288, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 288, + 446 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 288, + 446 + ], + "type": "text", + "content": "[30] Andres Milioto, Ignacio Vizzo, Jens Behley, and Cyril Stachniss. Rangenet++: Fast and accurate lidar semantic segmentation. In 2019 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 4213-4220. IEEE, 2019. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 448, + 288, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 448, + 288, + 492 + ], + "spans": [ + { + "bbox": [ + 48, + 448, + 288, + 492 + ], + "type": "text", + "content": "[31] Krikamol Muandet, David Balduzzi, and Bernhard Scholkopf. Domain generalization via invariant feature representation. In International Conference on Machine Learning, pages 10-18. PMLR, 2013. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 492, + 288, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 492, + 288, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 492, + 288, + 546 + ], + "type": "text", + "content": "[32] Thierry Peynot, James Underwood, and Steven Scheding. Towards reliable perception for unmanned ground vehicles in challenging conditions. In 2009 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 1170-1176. IEEE, 2009. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 548, + 288, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 548, + 288, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 548, + 288, + 602 + ], + "type": "text", + "content": "[33] Matthew Pitropov, Danson Evan Garcia, Jason Rebello, Michael Smart, Carlos Wang, Krzysztof Czarnecki, and Steven Waslander. Canadian adverse driving conditions dataset. The International Journal of Robotics Research, 40(4-5):681-690, 2021. 1, 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 602, + 288, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 288, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 288, + 656 + ], + "type": "text", + "content": "[34] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 658, + 288, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 690 + ], + "type": "text", + "content": "[35] Julian Ryde and Nick Hillier. Performance of laser and radar ranging devices in adverse environmental conditions. Journal of Field Robotics, 26(9):712-727, 2009. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "type": "text", + "content": "[36] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Acdc: The adverse conditions dataset with correspondences for se" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "matic driving scene understanding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10765-10775, 2021. 2, 3, 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "text", + "content": "[37] Cristiano Saltori, Fabio Galasso, Giuseppe Fiameni, Nicu Sebe, Elisa Ricci, and Fabio Poiesi. Cosmix: Compositional semantic mix for domain adaptation in 3d lidar segmentation. ECCV, 2022. 3, 7, 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 152, + 547, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 547, + 196 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 547, + 196 + ], + "type": "text", + "content": "[38] Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958, 2014. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 198, + 545, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 198, + 545, + 263 + ], + "spans": [ + { + "bbox": [ + 307, + 198, + 545, + 263 + ], + "type": "text", + "content": "[39] Peng Su, Kun Wang, Xingyu Zeng, Shixiang Tang, Dapeng Chen, Di Qiu, and Xiaogang Wang. Adapting object detectors with conditional domain normalization. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XI 16, pages 403-419. Springer, 2020. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 265, + 545, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 265, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 307, + 265, + 545, + 331 + ], + "type": "text", + "content": "[40] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2446-2454, 2020. 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 333, + 545, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 333, + 545, + 388 + ], + "spans": [ + { + "bbox": [ + 307, + 333, + 545, + 388 + ], + "type": "text", + "content": "[41] Haotian Tang, Zhijian Liu, Shengyu Zhao, Yujun Lin, Ji Lin, Hanrui Wang, and Song Han. Searching efficient 3d architectures with sparse point-voxel convolution. In European conference on computer vision, pages 685–702. Springer, 2020. 1, 2, 5, 6, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 388, + 545, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 453 + ], + "type": "text", + "content": "[42] Josh Tobin, Rachel Fong, Alex Ray, Jonas Schneider, Wojciech Zaremba, and Pieter Abbeel. Domain randomization for transferring deep neural networks from simulation to the real world. In 2017 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 23-30. IEEE, 2017. 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 456, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 456, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 456, + 545, + 533 + ], + "type": "text", + "content": "[43] Jonathan Tremblay, Aayush Prakash, David Acuna, Mark Brophy, Varun Jampani, Cem Anil, Thang To, Eric Cameracci, Shaad Boochoon, and Stan Birchfield. Training deep networks with synthetic data: Bridging the reality gap by domain randomization. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 969-977, 2018. 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 534, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 534, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 534, + 545, + 578 + ], + "type": "text", + "content": "[44] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7167-7176, 2017. 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 580, + 545, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 580, + 545, + 645 + ], + "spans": [ + { + "bbox": [ + 307, + 580, + 545, + 645 + ], + "type": "text", + "content": "[45] Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick Pérez. Advent: Adversarial entropy minimization for domain adaptation in semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2517-2526, 2019. 7, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 647, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 712 + ], + "type": "text", + "content": "[46] Bichen Wu, Xuanyu Zhou, Sicheng Zhao, Xiangyu Yue, and Kurt Keutzer. Squeezesegv2: Improved model structure and unsupervised domain adaptation for road-object segmentation from a lidar point cloud. In 2019 International Conference on Robotics and Automation (ICRA), pages 4376-4382. IEEE, 2019. 2, 3" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "9391" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[47] Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3733-3742, 2018. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 173 + ], + "type": "text", + "content": "[48] Aoran Xiao, Jiaxing Huang, Dayan Guan, Kaiwen Cui, Shijian Lu, and Ling Shao. Polarmix: A general data augmentation technique for lidar point clouds. NeurIPS, 2022. 3, 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 175, + 288, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 288, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 288, + 229 + ], + "type": "text", + "content": "[49] Aoran Xiao, Jiaxing Huang, Dayan Guan, Fangneng Zhan, and Shijian Lu. Transfer learning from synthetic to real lidar point cloud for semantic segmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2795-2803, 2022. 1, 3, 4, 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 231, + 288, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 231, + 288, + 284 + ], + "spans": [ + { + "bbox": [ + 48, + 231, + 288, + 284 + ], + "type": "text", + "content": "[50] Aoran Xiao, Xiaofei Yang, Shijian Lu, Dayan Guan, and Ji-axing Huang. Fps-net: A convolutional fusion network for large-scale lidar point cloud segmentation. ISPRS Journal of Photogrammetry and Remote Sensing, 176:237–249, 2021. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 286, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 287, + 342 + ], + "type": "text", + "content": "[51] Jianyun Xu, Ruixiang Zhang, Jian Dou, Yushi Zhu, Jie Sun, and Shiliang Pu. Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16024–16033, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 343, + 287, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 287, + 398 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 287, + 398 + ], + "type": "text", + "content": "[52] Qiangeng Xu, Yin Zhou, Weiyue Wang, Charles R Qi, and Dragomir Anguelov. Spg: Unsupervised domain adaptation for 3d object detection via semantic point generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15446-15456, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 399, + 288, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 288, + 454 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 288, + 454 + ], + "type": "text", + "content": "[53] Jihan Yang, Shaoshuai Shi, Zhe Wang, Hongsheng Li, and Xiaojuan Qi. St3d: Self-training for unsupervised domain adaptation on 3d object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10368-10378, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 287, + 510 + ], + "type": "text", + "content": "[54] Xufeng Yao, Yang Bai, Xinyun Zhang, Yuechen Zhang, Qi Sun, Ran Chen, Ruiyu Li, and Bei Yu. Pcl: Proxy-based contrastive learning for domain generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7097-7107, 2022. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 287, + 567 + ], + "type": "text", + "content": "[55] Li Yi, Boqing Gong, and Thomas Funkhouser. Complete & label: A domain adaptation approach to semantic segmentation of lidar point clouds. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15363-15373, 2021. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 568, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 287, + 633 + ], + "type": "text", + "content": "[56] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darrell. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2636-2645, 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 635, + 287, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 679 + ], + "type": "text", + "content": "[57] Feihu Zhang, Jin Fang, Benjamin Wah, and Philip Torr. Deep fusionnet for point cloud semantic segmentation. In European Conference on Computer Vision, pages 644-663. Springer, 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "type": "text", + "content": "[58] Weichen Zhang, Wen Li, and Dong Xu. Srdan: Scale-aware and range-aware domain adaptation network for cross-dataset 3d object detection. In Proceedings of the IEEE/CVF" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 385 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "Conference on Computer Vision and Pattern Recognition, pages 6769-6779, 2021. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 96, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 545, + 161 + ], + "type": "text", + "content": "[59] Yang Zhang, Zixiang Zhou, Philip David, Xiangyu Yue, Zerong Xi, Boqing Gong, and Hassan Foroosh. Polarnet: An improved grid representation for online lidar point clouds semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9601-9610, 2020. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 163, + 545, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 545, + 228 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 545, + 228 + ], + "type": "text", + "content": "[60] Sicheng Zhao, Yezhen Wang, Bo Li, Bichen Wu, Yang Gao, Pengfei Xu, Trevor Darrell, and Kurt Keutzer. *epointda: An end-to-end simulation-to-real domain adaptation framework for lidar point cloud segmentation*. In *Proceedings of the AAAI Conference on Artificial Intelligence*, volume 35, pages 3500–3509, 2021. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 229, + 545, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 229, + 545, + 272 + ], + "spans": [ + { + "bbox": [ + 308, + 229, + 545, + 272 + ], + "type": "text", + "content": "[61] Kaiyang Zhou, Ziwei Liu, Yu Qiao, Tao Xiang, and Chen Change Loy. Domain generalization: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 274, + 545, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 274, + 545, + 339 + ], + "spans": [ + { + "bbox": [ + 308, + 274, + 545, + 339 + ], + "type": "text", + "content": "[62] Xinge Zhu, Hui Zhou, Tai Wang, Fangzhou Hong, Yuexin Ma, Wei Li, Hongsheng Li, and Dahua Lin. Cylindrical and asymmetrical 3d convolution networks for lidar segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9939-9948, 2021. 1, 2, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 341, + 545, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 341, + 545, + 385 + ], + "spans": [ + { + "bbox": [ + 308, + 341, + 545, + 385 + ], + "type": "text", + "content": "[63] Yang Zou, Zhiding Yu, Xiaofeng Liu, BVK Kumar, and Jinsong Wang. Confidence regularized self-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5982-5991, 2019. 7, 8" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "9392" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Shape Reconstruction of Semi-Transparent Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_content_list.json b/2023/3D Shape Reconstruction of Semi-Transparent Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e06daac27a206b729c83c315c6f73e85ff45bc89 --- /dev/null +++ b/2023/3D Shape Reconstruction of Semi-Transparent Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_content_list.json @@ -0,0 +1,1541 @@ +[ + { + "type": "text", + "text": "3D shape reconstruction of semi-transparent worms", + "text_level": 1, + "bbox": [ + 222, + 131, + 746, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Thomas P. Ilett* Omer Yuval* Thomas Ranner* Netta Cohen*† David C. Hogg*† \nUniversity of Leeds, Leeds, United Kingdom", + "bbox": [ + 135, + 179, + 833, + 218 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/24e8530d7c90f914cf9b9d1e88d8e21baec40e5fb507bb37bdae070083204e53.jpg", + "image_caption": [ + "Figure 1. Posture reconstruction pipeline and imaging setup." + ], + "image_footnote": [], + "bbox": [ + 84, + 252, + 890, + 452 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 489, + 313, + 506 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D shape reconstruction typically requires identifying object features or textures in multiple images of a subject. This approach is not viable when the subject is semitransparent and moving in and out of focus. Here we overcome these challenges by rendering a candidate shape with adaptive blurring and transparency for comparison with the images. We use the microscopic nematode Caenorhabditis elegans as a case study as it freely explores a 3D complex fluid with constantly changing optical properties. We model the slender worm as a 3D curve using an intrinsic parametrisation that naturally admits biologically-informed constraints and regularisation. To account for the changing optics we develop a novel differentiable renderer to construct images from 2D projections and compare", + "bbox": [ + 75, + 525, + 470, + 736 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "\\*T.Ilett, O.Yuval, T.Ranner, N.Cohen, D.C.Hogg}@leeds.ac.uk Funding This work was supported by University of Leeds and EPSRC. Author contributions Conceptualisation, Methodology, Formal analysis, Investigation, Software, Visualisation: TPI. Data curation, Validation: TPI, OY. Writing: TPI (original), all (review and editing). Funding acquisition, Supervision: NC, DCH, TR. $\\dagger$ Equal contribution. Acknowledgements Additional thanks to Matan Braunstein (for help with Fig. 1), Robert I. Holbrook (data), Felix Salfelder (discussions and data), Lukas Deutz (discussions) and Jen Kruger (proof reading). Data availability Supplementary movies are available here: https://doi.org/10.6084/m9.figshare.22310650.", + "bbox": [ + 75, + 753, + 468, + 900 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "against raw images to generate a pixel-wise error to jointly update the curve, camera and renderer parameters using gradient descent. The method is robust to interference such as bubbles and dirt trapped in the fluid, stays consistent through complex sequences of postures, recovers reliable estimates from blurry images and provides a significant improvement on previous attempts to track C. elegans in 3D. Our results demonstrate the potential of direct approaches to shape estimation in complex physical environments in the absence of ground-truth data.", + "bbox": [ + 496, + 491, + 893, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 676, + 632, + 691 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Many creatures such as fish, birds and insects move in all directions to search and navigate volumetric environments. Acquiring 3D data of their motion has informed models of locomotion, behaviour and neural and mechanical control [3,22]. While technological advances have made the collection of large quantities of multi-viewpoint visual data more attainable, methods for extracting and modelling 3D information remain largely domain-dependant as few species share common geometric models or exist within the same spatial and temporal scales [4, 11, 14, 26, 37, 41, 50, 54, 65]. Furthermore, while humans and some domesticated animals [30, 60] may act naturally while wearing special markers, marker-less observations of many species makes fea", + "bbox": [ + 496, + 703, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "12565", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ture extraction more challenging and means pose estimation generally lacks ground-truth data [48].", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As a case study in marker-less 3D shape reconstruction, we consider $C$ elegans, a hair-thick, $\\sim 1$ mm long animal with a simple tapered cylinder shape, which can be constructed from a midline \"skeleton\". In the wild, $C$ elegans can be found in a wide range of complex 3D environments, e.g. decomposing organic matter, with continually changing physical properties [15, 17, 46]. However, to date, experiments have focused nearly exclusively on locomotion on a plane, limiting insight to the constrained, planar behaviours.", + "bbox": [ + 75, + 123, + 467, + 258 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We obtained a large dataset (4 hours 53 minutes $\\simeq$ 440,000 frames at $25\\mathrm{Hz}$ ) of experimental recordings of individual worms moving freely inside a glass cube filled with a gelatin solution. The cube is positioned between three nearly-orthogonal static cameras fitted with telecentric lenses. Initial pinhole camera model parameter estimates are provided [45] but are imprecise and require continuous adjustment across the course of a recording to account for small vibrations and optical changes to the gel. We aim to simultaneously reconstruct a 3D shape and find corrected camera parameters to match these recordings in a process akin to bundle adjustment [56].", + "bbox": [ + 75, + 261, + 467, + 443 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D reconstruction typically involves the identification and triangulation of common features from multiple viewpoints or the synthesis of full images including texture and shading information to match given scenes [16, 21, 47, 66]. Imaging animals with length $\\sim 1\\mathrm{mm}$ requires sufficient magnification, but simultaneously capturing long-term trajectories up to 25 minutes requires a large volume of view (10-20 worm lengths per axis). As the worm explores the cube it frequently appears out of focus in one or more of the cameras. Air bubbles and dirt trapped in the gel along with old tracks are difficult to differentiate from the transparent worm, particularly at the tapered ends. Self occlusion invariably appears in a least one view, where hidden parts darken the foreground while the ordering of fore/backparts is not discernible. As the semi-transparent and self-occluding subject moves in the volume, photometric information in one view bears little relevance to the appearance in the others making feature identification and photometric matching particularly challenging. We found that standard approaches may suffice for limited sub-clips, but lose parts of the object or fail catastrophically for much of the data and the solution requires a degree of adaptation.", + "bbox": [ + 75, + 445, + 467, + 777 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We present an integrated \"project-render-score\" algorithm to obtain a midline curve for each image-triplet (Fig. 1). Discrete curve vertices are projected through a triplet of pinhole camera models, rendered to produce an image-triplet for direct comparison against the recorded images and scored according to their intersection with worm-like pixels in all three views. The differentiable renderer stacks 2D super-Gaussian blobs at the projected locations", + "bbox": [ + 75, + 779, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "of each vertex to approximate the transparency along the worm, accounting for the variable focus and providing soft edges that direct the geometric model towards the midline. The scoring allows the detection of incongruities and keeps the curve aligned to the worm in all views. Regularisation terms ensure smoothness along the body and in time. Curve, camera and rendering parameters are jointly optimised using gradient descent to convergence. Once the worm shape has been resolved, it is generally only lost during image degradation or significant self-occlusions that make the posture unresolvable by eye.", + "bbox": [ + 496, + 90, + 890, + 257 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our main contributions are:", + "bbox": [ + 517, + 257, + 785, + 271 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A robust pipeline for 3D posture reconstruction of a freely deforming semi-transparent object from noisy images.", + "- A novel viewpoint renderer to capture optical distortions and transparency.", + "- A feature-free bundle adjustment algorithm using direct image comparison and gradient descent." + ], + "bbox": [ + 517, + 272, + 890, + 377 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 500, + 388, + 635, + 404 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Bundle adjustment (BA) is a procedure to jointly optimise 3D geometry and camera parameters [21, 56]. BA typically identifies common features of an object from multiple viewpoints in order to minimise a prediction error between projections of the corresponding 3D points and their 2D observations. BA is frequently used in conjunction with other methods to find camera parameters using multiple images of a 3D calibration object with known control points or for fine-tuning results [13, 23, 36, 40, 57, 59].", + "bbox": [ + 496, + 414, + 890, + 550 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Feature detection converts photometric information into image coordinates. In BA, coordinates of common features are used to solve a geometric optimisation problem. Photometric bundle adjustment methods additionally require objects to have the same appearance in all views [12, 18]. Our method is entirely photometric, as such differing from BA. As our objects appear differently across views, all pixel information is used and the geometry is solved intrinsically.", + "bbox": [ + 496, + 551, + 890, + 672 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Pose estimation Deep network approaches have proved well-suited to 2D human-pose estimation as they are potent feature extractors and large annotated training sets are available [1, 51, 55]. For 3D postures, ground truth multiview datasets are less common. Recent progress [35] relies on end-to-end architectures [19, 27, 29, 32, 42, 61] or splitting the problem into 2D pose estimation and then constructing the 3D pose [10, 38]. Despite similar approaches used for non-human pose estimation, the huge variability in scales and shapes among species introduces a variety of challenges [26]. Motion capture in controlled settings with markers (providing ground truth skeleton and joint angle data for humans, horses and dogs [30, 60]), are not available for most animals. Generalised mesh surfaces may be used,", + "bbox": [ + 496, + 689, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "12566", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "but often require multiple views and thousands of parameters, and do not guarantee consistency through time. In contrast, approximating an animal shape using a few-parameter morphable model can be both tractable and robust. Successful examples include swimmers [9, 43], birds [27, 58], mammals [2,6,28,39] and generic quadrupeds [7,67]. However, these methods expect opaque subjects with consistent textural appearances between views.", + "bbox": [ + 75, + 90, + 468, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "C. elegans has a simple geometric shape that can be well reconstructed from a midline skeleton and parametrised by curvature values along the body (see Sec. 3). This is the deformable template we look to fit to the data. Despite the apparent simplicity, each vertex of the discretised curve has two degrees of freedom (two curvature values) and as we use 128 vertices, our model is highly deformable and requires many parameters (although smoothness regularisation simplifies the problem somewhat). In contrast to deep-learning approaches, our model includes only a small number of explainable parameters and direct optimisation avoids lengthy training and dataset requirements.", + "bbox": [ + 75, + 212, + 470, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "C. elegans Numerous freely available software packages are capable of simultaneous tracking and skeletonising single or multiple worms in 2D using inexpensive microscopic imaging [5,25,44,52,53,62] (see [24] for a review). Most of these skeletonisers combine image segmentation to separate the animal from the background with thinning of the mask to some midline pixels and fitting a spline.", + "bbox": [ + 75, + 415, + 468, + 521 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The 3D reconstruction problem has received relatively little attention. Using at first two views [34] and then three, Kwon et al. [33] designed a motorised stage coupled with a real-time tracker to keep a worm in focus under high magnification in a 3D environment while capturing trajectories of up to 3 minutes. Thresholded images are lifted into 3D, intersected in voxel space and thinned [20] to produce a final skeleton. Kwon et al. omit camera modelling and assume perfectly parallel projections – assumptions that result in large errors for the data we use. Shaw et al. [49] employed light field microscopy to generate depth maps alongside images from a single viewpoint. A midline skeleton is generated by fitting a spline to the 3D coordinates of the central voxels. However, self-occlusions cannot be resolved and only relatively planar postures were investigated.", + "bbox": [ + 75, + 521, + 468, + 748 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Salfelder et al. [45] and Yuval [63] both present 3D reconstruction algorithms using the three-camera set up and calibration described in [45]. In Salfelder et al. [45], a neural network is trained to identify 2D midlines from individual camera images before lifting into 3D voxel space. To account for changing camera parameters, a relative axial shift $(dx,dy,dz)$ is optimised for each frame-triplet to maximise the voxel intersection before thinning. Remaining voxel coordinates are used as control points to fit a curve using a finite-element formulation. This approach works well when", + "bbox": [ + 75, + 750, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the midline is well detected in each of the views, but can fail on occluded postures or low-resolution, blurry images.", + "bbox": [ + 498, + 90, + 890, + 121 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Yuval [63] uses a neural network to track head and tail points in 3D lab coordinates and a curve is fit between these fixed end points using a hill-climbing optimisation algorithm. Scoring is based on curve smoothness and pixel intensities at the projected curve points. This method works well when the head and tail are correctly identified but struggles, or requires manual correction, otherwise.", + "bbox": [ + 496, + 121, + 890, + 227 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In our approach we find that incorporating the camera model parameters into the optimisation results in more robust and accurate results. This extends the idea proposed in Salfelder et al. [45] that adjusting the relative positions of the cameras could result in large gains in accuracy. It is likely that the relative shift adjustments, presented there, account for the changing optical properties.", + "bbox": [ + 496, + 227, + 890, + 334 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Geometric model", + "text_level": 1, + "bbox": [ + 500, + 348, + 668, + 364 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Nematode shapes can be well approximated by a tapered cylinder and computed from a midline. We construct the midline curve in 3D using an object-centric parametrisation, separating shape from position and orientation to allow us to easily constrain and regularise the shape to stay within biologically-reasonable bounds. We discretise the curve into $N$ equidistant vertices and encode the posture in curvature $K \\in \\mathbb{R}^{N \\times 2}$ and length $l \\in \\mathbb{R}$ that fully define the shape up to a rigid-body transformation.", + "bbox": [ + 496, + 375, + 890, + 508 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We express the 3D curve using the Bishop frame [8], given by $TM^{1}M^{2}$ where $T$ is the normalised tangent of the curve and $M^1, M^2$ form an orthogonal basis along the midline. At vertex $n$ , the curvature is $K_{n} = (m_{n}^{1}, m_{n}^{2})$ , where $m_{n}^{1}, m_{n}^{2} \\in \\mathbb{R}$ are the curvature components along $M^1, M^2$ . (The more familiar Frenet frame is less stable as it is undefined at zero-curvature points.) Numerical integration of a system of difference equations from starting point $P_{\\mathrm{init}}$ and initial orientation $(T_{\\mathrm{init}}, M_{\\mathrm{init}}^{1}, M_{\\mathrm{init}}^{2})$ yields the curve path $P \\in \\mathbb{R}^{N \\times 3}$ . See supplementary material (SM) for details.", + "bbox": [ + 496, + 510, + 890, + 661 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "During optimisation, errors accumulate near the starting point, $P_{\\mathrm{init}}$ , resulting in either parts of the curve moving faster than other or kinks developing (even with strong regularisation). To resolve this we sample an initial vertex index $n_0$ from a Gaussian distribution (subject to rounding) centred at the middle index at every optimisation step. Setting the starting point $P_{\\mathrm{init}} = P_{n_0}$ has the effect of continually shifting the discontinuity so kinks are never given the opportunity to develop (Fig. 2). Summarising the integration as $F$ , the 3D curve is generated from the parameters:", + "bbox": [ + 496, + 662, + 890, + 813 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\hat {P}, \\hat {T}, \\hat {M} ^ {1}\\right) = F \\left(P _ {n _ {0}}, T _ {n _ {0}}, M _ {n _ {0}} ^ {1}, K, l, n _ {0}\\right). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 547, + 823, + 890, + 844 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Each gradient update adjusts all curvature values $K$ but the position and orientation only at the randomly selected $n_0$ vertex $(P_{n_0}, T_{n_0}, M_{n_0}^1)$ . Updating $(P, T, M^1)$ at only", + "bbox": [ + 498, + 854, + 890, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "12567", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0dcc05d271cd6d8e13bce02ed463b36ccd006aa26f499d492e1cdf599860693c.jpg", + "image_caption": [ + "Figure 2. The 3D curve is traced out from initial point $P_{n_0}$ and orientation frame $(T_{n_0}, M_{n_0}^1, M_{n_0}^2)$ . The index $n_0$ of the initial point is drawn from a normal distribution at each iteration to prevent kinks developing through repeated use of the same starting point. The final curve $\\hat{P}$ is computed in two parts by integrating the Bishop equations with curvature $K$ towards the head and tail separately." + ], + "image_footnote": [], + "bbox": [ + 117, + 87, + 851, + 250 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "this vertex produces a $P$ that is inconsistent with the updated $K$ . Therefore, after applying gradient updates we re-compute the full curve and orientation from $n_0$ and set $(P,T,M^1)$ to the output $(\\hat{P},\\hat{T},\\hat{M}^1)$ .", + "bbox": [ + 75, + 329, + 468, + 390 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Since the curve describes a biological creature, we constrain the length $l$ to $(l_{\\min}, l_{\\max})$ and limit the curvature by $|K_n| < 2\\pi k_{\\max}$ . The values of $(l_{\\min}, l_{\\max})$ we use vary depending on magnification but the bounds do not need to be tight and are in the range $0.5 - 2\\mathrm{mm}$ . The curvature constraint $k_{\\max}$ is set by considering the number of circle achieved by a constant curvature curve and is fixed at 3.", + "bbox": [ + 75, + 391, + 470, + 496 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Project, Render, Score", + "text_level": 1, + "bbox": [ + 75, + 510, + 289, + 527 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The core of the optimisation pipeline is separable into three main stages; project, render and score. The 3D curve $\\hat{P}$ generated in Eq. (1) is projected through the camera models into 2D points that are rendered into images and then scored against the three views.", + "bbox": [ + 75, + 536, + 468, + 612 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Project", + "text_level": 1, + "bbox": [ + 75, + 619, + 169, + 636 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The cameras are modelled using a triplet of pinhole camera models with tangential and radial distortion that project 3D points into image planes using perspective transformations. Each pinhole camera model offers a simple (15 parameters, $\\{\\eta_c\\}$ ), tractable, approximation to the optical transformation. We also include relative shifts along the local coordinate axes, $\\eta^s = (dx, dy, dz)$ , shared between the three models, as proposed by Salfelder et al. [45]. Initial camera coefficients for the triplet-model are provided along with the recordings and typically give root mean squared reprojection errors up to 10 pixels ( $\\sim \\mathcal{O}(\\text{worm radius})$ ).", + "bbox": [ + 75, + 643, + 468, + 809 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Due to the initial calibration errors and changes in optical properties as the gelatin sets and is disturbed by the worms we re-calibrate the cameras at every frame by including the camera parameters in the optimisation step. To avoid an under-determined problem, after we have found a configuration that supports good reconstructions for a recording", + "bbox": [ + 75, + 810, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9949594766b891731be95dfe988f6e9463f57ebc1bfd025d98dad9c6ad941fa2.jpg", + "image_caption": [ + "Figure 3. The rendering stage generates super-Gaussian blobs at each vertex position on the image. The shape of the blobs depends on the optimisable parameters: the scale $\\sigma$ , the intensity $\\iota$ and the exponent used in the Gaussian $\\rho$ . $\\sigma$ and $\\iota$ are tapered down to fixed minimum values at the head and tail. The effects of varying these parameters from a converged solution (blue curves) are shown above (green curves) and below (orange curves) each." + ], + "image_footnote": [], + "bbox": [ + 522, + 327, + 867, + 573 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "we fix all but the $\\eta^s$ parameters. Interestingly, we still see changes (up to $30\\mathrm{px}\\sim 0.15\\mathrm{mm}$ ) in $\\eta^s$ but as this relates to the relative positioning it does not affect the posture reconstruction or long-term trajectories.", + "bbox": [ + 496, + 710, + 890, + 771 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Projecting the 3D curve $\\hat{P}$ through the camera-triplet model $\\Gamma$ with parameters $\\eta = \\{\\eta_0, \\eta_1, \\eta_2, \\eta^s\\}$ generates 2D image points per view, which we combine as $Q = \\Gamma(\\hat{P}, \\eta) \\in \\mathbb{R}^{3 \\times N \\times 2}$ .", + "bbox": [ + 498, + 772, + 890, + 834 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Render", + "text_level": 1, + "bbox": [ + 500, + 845, + 594, + 859 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In order to evaluate the reconstruction directly against the raw data, we render the projected 2D midline points into", + "bbox": [ + 500, + 869, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "12568", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "images using optimisable shape and rendering parameters. Since worm bodies are well approximated by tapered cylinders, in theory we only require maximum and minimum radius values and a tapering function. However, $C$ elegans are semi-transparent – increasingly so at the head and tail – and their internal anatomy has varying optical properties that diffract and distort the light. These challenges are further exacerbated by the worms often being out of focus in at least one of the views, therefore even an anatomically accurate model stands little chance of being correctly resolved.", + "bbox": [ + 75, + 90, + 468, + 241 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We render realistic images by combining 2D super-Gaussian functions centred on each projected vertex. Crucially, we allow the rendering parameters to differ between cameras since the animal seldom has the same photometric qualities in different views. We optimise three parameters for each camera view $c$ : $\\sigma_c \\in \\mathbb{R}$ controls the spread, $\\iota_c \\in \\mathbb{R}$ scales the intensity, and $\\rho_c \\in \\mathbb{R}$ sharpens or softens the edges (Fig. 3). To capture the tapered shape we weight $\\sigma_c$ and $\\iota_c$ from their optimisable values along the middle $60\\%$ to minimum values $\\sigma_{\\mathrm{min}}$ and $\\iota_{\\mathrm{min}}$ at the ends and define the tapered outputs $\\bar{\\sigma}_c \\in \\mathbb{R}^N$ and $\\bar{\\iota}_c \\in \\mathbb{R}^N$ (SM). $\\sigma_{\\mathrm{min}}$ and $\\iota_{\\mathrm{min}}$ are manually fixed for each recording to account for different magnification factors and worm size variability.", + "bbox": [ + 75, + 242, + 470, + 436 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For each camera index $c$ and vertex index $n$ we define the rendered blob $B_{c,n} \\in \\mathbb{R}^{w \\times w}$ (image size $w$ ) for pixel $(i,j)$ as:", + "bbox": [ + 76, + 438, + 468, + 484 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nB _ {c, n} (i, j) = \\bar {\\iota} _ {c, n} \\exp \\left[ - \\left(\\frac {(i - Q _ {c , n , 0}) ^ {2} + (j - Q _ {c , n , 1}) ^ {2}}{2 \\bar {\\sigma} _ {c , n} ^ {2}}\\right) ^ {\\rho_ {c}} \\right]. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 494, + 496, + 541 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The stacks of blobs are combined to generate the complete renderings $R \\in \\mathbb{R}^{3 \\times w \\times w}$ by taking the maximum pixel value across all blobs: for pixel $(i,j)$ ,", + "bbox": [ + 76, + 542, + 468, + 588 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nR _ {c} (i, j) = \\max \\left\\{B _ {c, n} (i, j) \\right\\} _ {n = 0, \\dots , N - 1}. \\qquad (3)\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 598, + 468, + 619 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The orientation of the body directly affects the pixel intensity of both raw and rendered images. When pointing directly at a camera the peaks of the blobs cluster closely together and appear as a high-intensity (opaque) circle. Pointing laterally causes the peaks to spread out on the image revealing more of the lower-intensity tails. In both situations our blob-rendering approach approximates transparency effects in the raw images without the need to model complex intensity-orientation responses. Moreover, super-Gaussian blobs allow sharp outlines to be produced in one view by using a large exponent and flat-top blobs, and blurry images to be produced for another, using low intensity and high variance.", + "bbox": [ + 75, + 627, + 468, + 821 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Score", + "text_level": 1, + "bbox": [ + 76, + 832, + 156, + 845 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In order to evaluate how well the curve represents the worm we require a way of distinguishing between worm-pixels and non-worm pixels such as dirt, bubbles, old tracks", + "bbox": [ + 76, + 854, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6ad8ea5c520c974e0bf8a0cbb92b1ae4e9ffec6b645fb5269155c3229f9d1dd1.jpg", + "image_caption": [ + "Figure 4. The 3D curve points are scored individually according to how well they match the three views. The triplet of blobs associated with vertex $n$ ( $B_{.,n}$ ) are multiplied with the images $I$ and summed. We take the minimum of the three sums and then taper these values from the midpoint-out." + ], + "image_footnote": [], + "bbox": [ + 513, + 90, + 880, + 318 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and even other worms. When the animal truly intersects with environmental interference it can be impossible to differentiate between the two, but in the majority of cases there exists a gap between the worm and the noise that is visible in at least one of the views. By ensuring that the curve corresponds to a single contiguous pixel mass in all of the images we are able to safely ignore other artefacts (Fig. 4).", + "bbox": [ + 496, + 426, + 890, + 531 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To detect if the curve is bridging a gap, each vertex $\\hat{P}_n$ is scored by correlating its corresponding blobs $B_{.,n}$ (Sec. 4.2) with the images $I$ . The raw score $S_n \\in \\mathbb{R}$ is defined:", + "bbox": [ + 498, + 531, + 890, + 577 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {n} = \\min \\left\\{\\frac {\\sum_ {i , j} B _ {c , n} \\cdot I _ {c}}{\\bar {\\sigma} _ {c , n} \\bar {\\iota} _ {c , n}} \\right\\} _ {c = 0, 1, 2} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 575, + 585, + 890, + 623 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\cdot$ is element-wise multiplication and the sum is taken over the image dimensions. By taking the minimum we ensure that vertices failing to match pixels in any one of the views will receive low scores regardless of how well they match pixels in the other views.", + "bbox": [ + 496, + 631, + 890, + 705 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "If the curve is bridging two disjoint groups of pixels that are visible in all three views this will present as two peaks in $S$ . Since we are only interested in finding one object we restrict the scores to contain just one peak by tapering $S$ from the middle-out to form the intermediate $S'$ . Finally we normalise $S'$ to get scores $\\hat{S}$ relative to the peak:", + "bbox": [ + 496, + 705, + 890, + 797 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {n} ^ {\\prime} = \\left\\{ \\begin{array}{l l} \\min \\left\\{S _ {n}, S _ {n + 1} ^ {\\prime} \\right\\} & 0 \\leq n < N / 2 \\\\ S _ {n} & n = N / 2 \\\\ \\min \\left\\{S _ {n}, S _ {n - 1} ^ {\\prime} \\right\\} & N / 2 < n < N \\end{array} \\right. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 806, + 890, + 862 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {S} = \\frac {S ^ {\\prime}}{\\operatorname* {m a x} _ {n} \\left\\{S ^ {\\prime} \\right\\}}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 562, + 864, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "12569", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/dd9322366813a8574c7ebdaf0f880c1441aa7f65292dad1147b259010c6b000e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 86, + 89, + 467, + 284 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6babeae149d596324c3e3ec55073e03281b9b384484f51b3ef81bb8c0b5b6246.jpg", + "image_caption": [ + "Without masking:", + "With masking:" + ], + "image_footnote": [], + "bbox": [ + 93, + 297, + 454, + 354 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b1e9a7ce05932bbf5b4abe3f033da0081e6aea57f174773a46dff8a9c2f1e9f5.jpg", + "image_caption": [ + "Figure 5. The noisy input images are cleaned by applying masks that force pixel-errors to be local to the current estimate. The blobs $B$ are scaled by the relative scores $\\hat{S}$ , combined using the maximum pixel value across blobs and thresholded to form the masks $M$ . The masks are applied to the raw input images $I$ to generate the targets: $I^{\\star}$ . Masking ensures only a single contiguous pixel mass is detected. Without it, parts of the reconstruction can \"stick\" to nearby bubbles and other artefacts as shown below." + ], + "image_footnote": [], + "bbox": [ + 93, + 364, + 454, + 422 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The final score profile $\\hat{S}$ provides insight into how well the curve matches a contiguous pixel mass across all three views and how evenly that mass is distributed.", + "bbox": [ + 76, + 573, + 468, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Masking From the score profile $\\hat{S}$ we identify image areas that are more likely to contain the pixel masses that correspond to the worm. Masks $M\\in \\mathbb{R}^{3\\times w\\times w}$ applied to the input, $I^{\\star} = M\\cdot I$ , focuses attention (and gradient) to only these areas of interest, consistently across all three views and exclude interference outside the masks (Fig. 5, see SM). Pixel intensities outside the masks are significantly reduced, but not zeroed in order to avoid stagnation in case the reconstruction completely misses the worm.", + "bbox": [ + 75, + 638, + 468, + 775 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Centre-shifting The scores $\\hat{S}$ also indicate the relative positioning of the curve over the target object. As the curve aligns with a pixel mass, vertices with high scores (apparently \"converged\") tend to lock into place thus hindering convergence of the rest of the object. For each frame, we use the previous frame solution as the starting point, so the majority of points rapidly converge. However, errors intro", + "bbox": [ + 75, + 794, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1e6f429edb951cc48624b9c2fe48b31b93f95333652758e73949cb2e3952d8e8.jpg", + "image_caption": [ + "Figure 6. As the animal moves along the path of its midline the tail may be left behind (left column). This can be identified from an unbalanced score profile $\\hat{S}$ . By periodically shifting the curve along its length (adding new curvature values at one end and discarding from the other) the centroid index $(\\bar{n})$ of the scores can be centred. Gradient descent optimisation then updates the new curvature values so the curve matches the target (right column)." + ], + "image_footnote": [], + "bbox": [ + 516, + 87, + 879, + 311 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "duced at the tips remain as they are insufficient to generate the collective shift required. The effect can easily be identified from an unbalanced score profile (Fig. 6) and rectified by periodically shifting the curve along its length between gradient descent optimisation steps (see SM).", + "bbox": [ + 496, + 448, + 893, + 523 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Optimisation", + "text_level": 1, + "bbox": [ + 500, + 537, + 635, + 554 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The main pixel-loss to be minimised is defined as:", + "bbox": [ + 519, + 563, + 852, + 578 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {p x}} = \\frac {1}{3 w ^ {2}} \\sum_ {c, i, j} \\left(R _ {c} (i, j) - I _ {c} ^ {\\star} (i, j)\\right) ^ {2}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 588, + 890, + 626 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To improve head and tail detection we also minimise a scores-loss,", + "bbox": [ + 496, + 637, + 890, + 667 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {s c}} = \\frac {\\operatorname* {m a x} \\left(S ^ {\\prime}\\right) N}{\\sum_ {n} S _ {n} ^ {\\prime \\prime}}, \\text {w h e r e} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 676, + 890, + 710 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nS _ {n} ^ {\\prime \\prime} = S _ {n} ^ {\\prime} \\left(\\frac {2 n - (N - 1)}{N - 1}\\right) ^ {2}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 594, + 713, + 890, + 750 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "that is quadratically weighted towards the tips where the scores are naturally lower due to the transparency.", + "bbox": [ + 496, + 758, + 890, + 789 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In addition we include a number of regularisation terms. To keep the curve smooth we define", + "bbox": [ + 500, + 790, + 890, + 819 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {s m}} = \\sum_ {n = 1} ^ {N - 1} \\left| K _ {n} - K _ {n - 1} \\right| ^ {2}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 830, + 890, + 872 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $|\\cdot |$ is the $l^2$ -norm. To ensure all parameters change", + "bbox": [ + 500, + 885, + 890, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "12570", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "smoothly between frames we set", + "bbox": [ + 76, + 90, + 295, + 104 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {t}} = \\sum_ {x \\in \\{l, K, \\hat {P}, \\eta , \\sigma , \\iota , \\rho \\}} | x ^ {\\text {p r e v}} - x | ^ {2}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 119, + 468, + 155 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $x^{\\mathrm{prev}}$ refers to the frozen value of the variable from the previous frame. And to avoid self-intersections, we use", + "bbox": [ + 76, + 166, + 468, + 196 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} d _ {n, m} = \\left| \\hat {P} _ {n} - \\hat {P} _ {m} \\right|, (12) \\\\ d _ {n, m} ^ {\\prime} = \\frac {1}{3} \\sum_ {c} \\bar {\\sigma} _ {c, n} + \\frac {1}{3} \\sum_ {c} \\bar {\\sigma} _ {c, m}, \\text {a n d} (13) \\\\ \\mathcal {L} _ {\\mathrm {i}} = \\sum_ {n = 0} ^ {N - N / k _ {\\max } - 1} \\sum_ {m = n + N / k _ {\\max }} ^ {N - 1} \\left\\{ \\begin{array}{l l} \\frac {d _ {n , m} ^ {\\prime}}{d _ {n , m}}, & \\text {i f} d _ {n, m} < d _ {n, m} ^ {\\prime} \\\\ 0, & \\text {o t h e r w i s e .} \\end{array} \\right. (14) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 223, + 467, + 342 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A loss is incurred, $\\mathcal{L}_{\\mathrm{i}} > 0$ , when two points which are sufficiently far apart ( $>N / k_{\\max}$ ) along the curve come within a distance defined by the sum of their mean rendering variances (since these approximate the worm's radius). Eq. (14) forces the algorithm to find postures that are always feasible even during self-occlusions and complex manoeuvres.", + "bbox": [ + 76, + 356, + 467, + 446 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The losses are combined in a weighted sum to yield the final optimisation target:", + "bbox": [ + 76, + 448, + 467, + 478 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\omega_ {\\mathrm {p x}} \\mathcal {L} _ {\\mathrm {p x}} + \\omega_ {\\mathrm {s c}} \\mathcal {L} _ {\\mathrm {s c}} + \\omega_ {\\mathrm {s m}} \\mathcal {L} _ {\\mathrm {s m}} + \\omega_ {\\mathrm {t}} \\mathcal {L} _ {\\mathrm {t}} + \\omega_ {\\mathrm {i}} \\mathcal {L} _ {\\mathrm {i}}. \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 492, + 468, + 508 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Values of $\\omega$ used in our experiments are included in the SM.", + "bbox": [ + 76, + 521, + 467, + 535 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To achieve robust reconstructions it is important that the curve parameters learn fastest, then the rendering parameters and finally the camera parameters. Imposing this hierarchy of rates ensures camera model stability and prevents the renderer from over-blurring the edges (as it tries to \"reach\" the pixels). Thus, movement between frames is primarily captured through curve deformations. We use learning rates $\\lambda_{p} = 1\\mathrm{e} - 3$ for the curve parameters $\\{P,T,M^1,K,l\\}$ , $\\lambda_r = 1\\mathrm{e} - 4$ for the rendering parameters $\\{\\sigma ,\\iota ,\\rho \\}$ and $\\lambda_{\\eta} = 1\\mathrm{e} - 5$ for the camera parameters $\\eta$ .", + "bbox": [ + 76, + 536, + 467, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The curve is initialised as a small $(\\sim 0.2\\mathrm{mm})$ , randomly oriented straight line centred in the field of view of all three cameras. We slowly increase the length to $l_{\\mathrm{min}}$ over the first 200-500 steps as the curve gets positioned and orientated.", + "bbox": [ + 76, + 688, + 467, + 748 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The pipeline is constructed using PyTorch [64] and the loss minimised is using Adam [31] with periodic centre-shifting of the curve vertices. Learning rates are decreased by a factor of 0.8 for every 5 steps taken without improvement in $\\mathcal{L}$ to a minimum of $1\\mathrm{e} - 6$ until convergence is detected. Subsequent frames are instantiated with the solution from the previous frame for efficiency and to maintain consistency through complex sequences of self-occluding postures. Example videos showing the effects of varying some of the options on the optimisation are described in SM.", + "bbox": [ + 75, + 750, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/67915852fe48a0b1a909ac42a61baea8c9510dde3148fed59046adac7f48b8e9.jpg", + "image_caption": [ + "Figure 7. Validation against 487 manual annotations. At the top we show an example of an annotated frame (left, orange) alongside a projection of our matching 3D midline (right, blue). Below we plot the sample averages $\\pm 2\\mathrm{std}$ . We find our midlines are consistently close to annotated points (blue curve), but annotations typically extend further into the head and tail regions (orange curve)." + ], + "image_footnote": [], + "bbox": [ + 526, + 89, + 864, + 290 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6. Results", + "text_level": 1, + "bbox": [ + 500, + 415, + 584, + 430 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Using our method we generate high quality 3D midline reconstructions for 43 of 44 recordings. One fails due to excessive coiling of the worm. Significant occlusions also occur during successful reconstructions and when combined with loss of focus can cause the shape to be lost. Video clips of good and poor reconstructions through challenging environmental conditions are described in SM along with ablation results to show benefits of each component.", + "bbox": [ + 496, + 441, + 890, + 564 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We compare 2D reprojections of our midlines against 487 manual annotations that were produced from single images in isolation and contain a varying number of unordered points. We calculate the minimum distance from each annotated point to any reconstructed point and vice-versa and find that our midlines consistently come close ( $\\sim$ 2px) to hand-annotated points (Fig. 7). Annotated points at the ends show an increased distance ( $\\sim$ 10px) to our midline points. This shows that our curves generally fall short of reaching the very tips of the worm by $\\sim$ O(worm radius).", + "bbox": [ + 496, + 566, + 890, + 715 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our method significantly outperforms previous methods developed using the same dataset [45, 63] when evaluated against the manual annotations (SM), but these only cover a selection of hand-picked examples. For a large-scale comparison we take 3D midlines and camera parameters found by each method and, using our pipeline, render them to generate comparable images (re-optimising the render parameters for their midlines, see SM). We skip the scoring and masking and calculate $\\mathcal{L}_{\\mathrm{px}}$ . The results (Fig. 8) show our method consistently produces shapes that more closely match the raw images. The biggest advantage over previous approaches is the improvement in robustness; we recover", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "12571", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6d4533a14537cc613de6396d0b23513ca3d4ef77e19f8377543bddaf9b1160cf.jpg", + "image_caption": [ + "Figure 8. A comparison between our Midline Finder (MF), Yuval's Worm-Tracker 3D (WT3D) [63] and Salfelder et al.'s 'reconst' [45] methods across a single trial ( $\\sim 13$ min). In the majority of cases our method generates midlines that better match the data (lower pixel losses, $\\mathcal{L}_{\\mathrm{px}}$ ). We show moving averages over 25 frames ( $\\sim 1$ s) with shaded areas indicating $\\pm 2$ std." + ], + "image_footnote": [], + "bbox": [ + 104, + 89, + 869, + 220 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ef58c27265949ca3d73c2dc14206e8051d5ef4f7d6e02e142cb1168b097d4533.jpg", + "image_caption": [ + "Figure 9. The rendering parameters change continually over the course of a recording to capture optical changes. Clear images (e.g. early frames in cameras 0 and 1, switching to late frames in camera 2) are consistent with small values of $\\sigma$ and large values of $\\rho$ . Blurry images (early camera 2, late camera 1) use high $\\sigma$ and small $\\rho$ . We show moving averages over 25 frames ( $\\sim 1$ s) with shaded areas indicating $\\pm 2$ std. Example comparisons between the renders (red) and raw images (grey) are shown on either side." + ], + "image_footnote": [], + "bbox": [ + 96, + 290, + 875, + 506 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 h 37 min (ours) versus 1 h 32 min [45] and 45 min [63].", + "bbox": [ + 76, + 603, + 460, + 618 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Fig. 9 shows the rendering parameters during a trial as the worm moves in and out of focus in the different cameras. Clearer images result in smaller values of $\\sigma$ and larger values of $\\rho$ . The fluctuations in intensity $\\iota$ are due in part to the posture of the worm in relation to the camera; when it is pointing directly towards the camera we see higher values of $\\iota$ used to capture the darker image observed and when the shape is perpendicular to the camera we see lower values of $\\iota$ to emulate the worm's transparency. All three parameters work in tandem to produce the final effect.", + "bbox": [ + 75, + 619, + 470, + 770 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 784, + 197, + 800 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present a robust and reliable framework for the 3D reconstruction of a microscopic, semi-transparent subject moving through a fluid and evaluate against two other algorithms and manually annotations. The key contribution of our approach - constructing unique differentiable renderings for each view - allows us to solve shape recon", + "bbox": [ + 75, + 810, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "struction and camera parameter optimisation by direct image comparison. This avoids feature extraction and correspondence matching, and hence offers a powerful alternative when those approaches are not well-suited, e.g. due to the variation in appearance between views.", + "bbox": [ + 496, + 603, + 892, + 678 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Multi-view microscopic camera calibration, imaging through fluids and parametric model fitting of semitransparent subjects are challenges that have received little attention in the literature. While we have focused here on constructing a curve to fit a microscopic worm from three views, our method could be applied to the 3D reconstruction of arbitrary shape models at any scale using any number of viewpoints. Rendering points with adaptable super-Gaussian functions presents an effective solution to transparency and focal issues, but more generally, our results indicate that our direct optimisation approach may offer an effective alternative to contemporary methods for 3D approximation of generic objects from a limited number of silhouette-like images.", + "bbox": [ + 496, + 679, + 893, + 891 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "12572", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Mykhaylo Andriluka, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2D human pose estimation: New benchmark and state of the art analysis. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 3686-3693. IEEE, June 2014. 2", + "[2] Praneet C. Bala, Benjamin R. Eisenreich, Seng Bum Michael Yoo, Benjamin Y. Hayden, Hyun Soo Park, and Jan Zimmermann. Automated markerless pose estimation in freely moving macaques with OpenMonkeyStudio. Nat Commun, 11(1):4560, Sept. 2020. 3", + "[3] Jerrold L Belant, Joshua J Millspaugh, James A Martin, and Robert A Gitzen. Multi-dimensional space use: The final frontier. Front. Ecol. Environ., 10(1):11-12, Feb. 2012. 1", + "[4] Florian Berlinger, Melvin Gauci, and Radhika Nagpal. Implicit coordination for 3D underwater collective behaviors in a fish-inspired robot swarm. Sci. Robot., 6(50):eabd8668, Jan. 2021. 1", + "[5] Stefano Berri, Jordan H. Boyle, Manlio Tassieri, Ian A. Hope, and Netta Cohen. Forward locomotion of the nematode C. elegans is achieved through modulation of a single gait. Hfsp J., 3(3):186-193, June 2009. 3", + "[6] Benjamin Biggs, Oliver Boyne, James Charles, Andrew Fitzgibbon, and Roberto Cipolla. Who left the dogs out? 3d animal reconstruction with expectation maximization in the loop. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XI 16, pages 195-211. Springer, 2020. 3", + "[7] Benjamin Biggs, Thomas Roddick, Andrew Fitzgibbon, and Roberto Cipolla. Creatures great and small: Recovering the shape and motion of animals from video. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2-6, 2018, Revised Selected Papers, Part V 14, pages 3-19. Springer, 2019. 3", + "[8] Richard L. Bishop. There is more than one way to frame a curve. Amer. Math. Monthly, 82(3):246-251, Mar. 1975. 3", + "[9] Thomas J. Cashman and Andrew W. Fitzgibbon. What shape are dolphins? building 3D morphable models from 2D images. IEEE Trans. Pattern Anal. Mach. Intell., 35(1):232-244, Jan. 2013. 3", + "[10] Ching-Hang Chen and Deva Ramanan. 3D human pose estimation = 2D pose estimation + matching. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7035-7043. IEEE, July 2017. 2", + "[11] Nathan W. Cooper, Thomas W. Sherry, and Peter P. Marra. Modeling three-dimensional space use and overlap in birds. *Auk*, 131(4):681–693, Oct. 2014. 1", + "[12] Amael Delaunoy and Marc Pollefeys. Photometric bundle adjustment for dense multi-view 3D modeling. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 1486-1493. IEEE, June 2014. 2", + "[13] Olivier Faugeras and Quang-Tuan Luong. The Geometry of Multiple Images. The MIT Press, 2001. 2", + "[14] Alessandro Ferrarini, Giuseppe Giglio, Stefania Caterina Pellegrino, Anna Grazia Frassanito, and Marco Gustin. A new methodology for computing birds' 3D home ranges. Avian Res, 9(1):1-6, May 2018. 1" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Lise Frézal and Marie-Anne Félix. The natural history of model organisms: C. elegans outside the petri dish. eLife, 4:e05849, Mar. 2015. 2", + "[16] Kui Fu, Jiansheng Peng, Qiwen He, and Hanxiao Zhang. Single image 3D object reconstruction based on deep learning: A review. Multimed Tools Appl, 80(1):463-498, Sept. 2020. 2", + "[17] Marie-Anne Félix and Christian Braendle. The natural history of caenorhabditis elegans. Curr. Biol., 20(22):R965–R969, Nov. 2010. 2", + "[18] P. Georgel, S. Benhimane, and N. Navab. A unified approach combining photometric and geometric information for pose estimation. In Proceedings of the British Machine Vision Conference 2008, pages 1-10. CiteSeer, British Machine Vision Association, 2008. 2", + "[19] Riza Alp Guler, Natalia Neverova, and Iasonas Kokkinos. DensePose: Dense human pose estimation in the wild. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7297-7306. IEEE, June 2018. 2", + "[20] Zicheng Guo and Richard W. Hall. Parallel thinning with two-subiteration algorithms. Commun. ACM, 32(3):359-373, Mar. 1989. 3", + "[21] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, Mar. 2004. 2", + "[22] Robert I. Holbrook and Theresa Burt de Perera. Three-dimensional spatial cognition: Information in the vertical dimension overrides information from the horizontal. Anim Cogn, 14(4):613-619, Mar. 2011. 1", + "[23] C.T. Huang and O.R. Mitchell. Dynamic camera calibration. In Proceedings of International Symposium on Computer Vision - ISCV, pages 169-174. IEEE, IEEE Comput. Soc. Press, 1995. 2", + "[24] Steven J. Husson, Wagner S. Costa, Cornelia Schmitt, and Alexander Gottschalk. Keeping track of worm trackers. WormBook, pages 1-17, Sept. 2012. 3", + "[25] Avelino Javer, Michael Currie, Chee Wai Lee, Jim Hokanson, Kezhi Li, Céline N. Martineau, Eviatar Yemini, Laura J. Grundy, Chris Li, QueeLim Ch'ng, William R. Schafer, Ellen A. A. Nollen, Rex Kerr, and André E. X. Brown. An open-source platform for analyzing and sharing worm-behavior data. Nat Methods, 15(9):645-646, Aug. 2018. 3", + "[26] Le Jiang, Caleb Lee, Divyang Teotia, and Sarah Ostadabbas. Animal pose estimation: A closer look at the state-of-the-art, existing gaps and opportunities. Comput. Vis. Image Und., 222:103483, Sept. 2022. 1, 2", + "[27] Angjoo Kanazawa, Michael J. Black, David W. Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7122-7131. IEEE, June 2018. 2, 3", + "[28] Angjoo Kanazawa, Shahar Kovalsky, Ronen Basri, and David Jacobs. Learning 3d deformation of animals from 2d images. In Computer Graphics Forum, volume 35, pages 365-374. Wiley Online Library, 2016. 3", + "[29] Isinsu Katircioglu, Bugra Tekin, Mathieu Salzmann, Vincent Lepetit, and Pascal Fua. Learning latent representations of" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "12573", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "3D human pose with deep neural networks. Int J Comput Vis, 126(12):1326-1341, Jan. 2018. 2", + "[30] Sinead Kearney, Wenbin Li, Martin Parsons, Kwang In Kim, and Darren Cosker. RGBD-dog: Predicting canine pose from RGBD sensors. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8336-8345. IEEE, June 2020. 1, 2", + "[31] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 7", + "[32] Nikos Kolotouros, Georgios Pavlakos, Michael Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 2252-2261. IEEE, Oct. 2019. 2", + "[33] Namseop Kwon, Ara B. Hwang, Young-Jai You, Seung-Jae V. Lee, and Jung Ho Je. Dissection of C. elegans behavioral genetics in 3-d environments. Sci Rep, 5(1):1-9, May 2015. 3", + "[34] Namseop Kwon, Jaeyeon Pyo, Seung-Jae Lee, and Jung Ho Je. 3-d worm tracker for freely moving C. elegans. PLoS ONE, 8(2):e57484, Feb. 2013. 3", + "[35] Wu Liu, Qian Bao, Yu Sun, and Tao Mei. Recent advances of monocular 2D and 3D human pose estimation: A deep learning perspective. ACM Comput. Surv., 55(4):1-41, Nov. 2022. 2", + "[36] H. C. Longuet-Higgins. A computer algorithm for reconstructing a scene from two projections. Nature, 293(5828):133-135, Sept. 1981. 2", + "[37] Simone Macri, Daniele Neri, Tommaso Ruberto, Violet Mwaffo, Sachit Butail, and Maurizio Porfiri. Three-dimensional scoring of zebrafish behavior unveils biological phenomena hidden by two-dimensional analyses. Sci Rep, 7(1):1-10, May 2017. 1", + "[38] Julieta Martinez, Rayat Hossain, Javier Romero, and James J. Little. A simple yet effective baseline for 3d human pose estimation. In 2017 IEEE International Conference on Computer Vision (ICCV), pages 2640-2649. IEEE, Oct. 2017. 2", + "[39] Valsamis Ntouskos, Marta Sanzari, Bruno Cafaro, Federico Nardi, Fabrizio Natola, Fiora Pirri, and Manuel Ruiz. Component-wise modeling of articulated objects. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 2327-2335. IEEE, Dec. 2015. 3", + "[40] Onur Özyesil, Vladislav Voroninski, Ronen Basri, and Amit Singer. A survey of structure from motion. Acta Numer., 26:305-364, May 2017. 2", + "[41] Brian L. Partridge, Tony Pitcher, J. Michael Cullen, and John Wilson. The three-dimensional structure of fish schools. *Behav Ecol Sociobiol*, 6(4):277-288, Mar. 1980. 1", + "[42] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G. Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3D human pose. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7025-7034. IEEE, July 2017. 2", + "[43] Mukta Prasad, Andrew Fitzgibbon, Andrew Zisserman, and Luc Van Gool. Finding nemo: Deformable object class mod-" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "elling using curve matching. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1720-1727. IEEE, IEEE, June 2010. 3", + "[44] Daniel Ramot, Brandon E. Johnson, Tommie L. Berry, Lucinda Carnell, and Miriam B. Goodman. The parallel worm tracker: A platform for measuring average speed and drug-induced paralysis in nematodes. PLoS ONE, 3(5):e2208, May 2008. 3", + "[45] Felix Salfelder, Omer Yuval, Thomas P Ilett, David C Hogg, Thomas Ranner, and Netta Cohen. Markerless 3D spatio-temporal reconstruction of microscopic swimmers from video. In Visual observation and analysis of Vertebrate And Insect Behavior 2020, 2021. 2, 3, 4, 7, 8", + "[46] Hinrich Schulenburg and Marie-Anne Félix. The natural biotic environment of Caenorhabditis elegans. Genetics, 206(1):55-86, May 2017. 2", + "[47] S.M. Seitz, B. Curless, J. Diebel, D. Scharstein, and R. Szeliski. A comparison and evaluation of multi-view stereo reconstruction algorithms. In 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Volume 1 (CVPR'06), volume 1, pages 519-528. IEEE, IEEE, 2006. 2", + "[48] William Irvin Sellers and Eishi Hirasaki. Markerless 3D motion capture for animal locomotion studies. *Biology Open*, 3(7):656-668, June 2014. 2", + "[49] Michael Shaw, Haoyun Zhan, Muna Elmi, Vijay Pawar, Clara Essmann, and Mandayam A. Srinivasan. Three-dimensional behavioural phenotyping of freely moving C. elegans using quantitative light field microscopy. PLoS ONE, 13(7):e0200108, July 2018. 3", + "[50] Colin A. Simpfendorfer, Esben M. Olsen, Michelle R. Heupel, and Even Moland. Three-dimensional kernel utilization distributions improve estimates of space use in aquatic animals. Can. J. Fish. Aquat. Sci., 69(3):565-572, Mar. 2012. 1", + "[51] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5693-5703. IEEE, June 2019. 2", + "[52] Nicholas A Swierczek, Andrew C Giles, Catharine H Rankin, and Rex A Kerr. High-throughput behavioral analysis in C. elegans. Nat Methods, 8(7):592-598, June 2011. 3", + "[53] Raphael Sznitman, Manaswi Gupta, Gregory D. Hager, Paulo E. Arratia, and Josué Sznitman. Multi-environment model estimation for motility analysis of caenorhabditis elegans. PLoS ONE, 5(7):e11631, July 2010. 3", + "[54] Diane Theriault, Zheng Wu, Nickolay I Hristov, Sharon M Swartz, Kenneth S Breuer, Thomas H Kunz, and Margrit Betke. Reconstruction and analysis of 3D trajectories of Brazilian free-tailed bats in flight. In 20th Int. Conf. on Pattern Recognition, pages 1-4, 2010. 1", + "[55] Alexander Toshev and Christian Szegedy. DeepPose: Human pose estimation via deep neural networks. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 1653-1660. IEEE, June 2014. 2" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "12574", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[56] Bill Triggs, Philip F. McLauchlan, Richard I. Hartley, and Andrew W. Fitzgibbon. Bundle adjustment — a modern synthesis. In Vision Algorithms: Theory and Practice, pages 298–372. Springer Berlin Heidelberg, 2000. 2", + "[57] R. Tsai. A versatile camera calibration technique for high-accuracy 3D machine vision metrology using off-the-shelf TV cameras and lenses. IEEE J. Robot. Automat., 3(4):323–344, Aug. 1987. 2", + "[58] Sara Vicente and Lourdes Agapito. Balloon shapes: Reconstructing and deforming objects with volume from images. In 2013 International Conference on 3D Vision, pages 223-230. IEEE, IEEE, June 2013. 3", + "[59] J. Weng, P. Cohen, and M. Herniou. Camera calibration with distortion models and accuracy evaluation. IEEE Trans. Pattern Anal. Machine Intell., 14(10):965-980, 1992. 2", + "[60] Nils Wilhelm, Anna Vögele, Rebeka Zsoldos, Theresia Licka, Björn Krüger, and Jürgen Bernard. FuryExplorer: Visual-interactive exploration of horse motion capture data. In SPIE Proceedings, volume 9397, pages 148–162. SPIE, SPIE, Feb. 2015. 1, 2", + "[61] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3D objects from images in the wild. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1-10. IEEE, June 2020. 2", + "[62] Eviatar Yemini, Rex A. Kerr, and William R. Schafer. Tracking movement behavior of multiple worms on food. Cold Spring Harb Protoc, 2011(12):pdb.prot067025, Dec. 2011. 3", + "[63] Omer Yuval. The neuromechanical control of Caenorhabditis elegans head motor behaviour in 3D environments. PhD thesis, University of Leeds, 2022. 3, 7, 8", + "[64] Sergey Zagoruyko, Adam Lerer, Tsung-Yi Lin, PedroO. Pinheiro, Sam Gross, Soumith Chintala, and Piotr Dollar. A MultiPath network for object detection. In Proceedings of the British Machine Vision Conference 2016. British Machine Vision Association, 2016. 7", + "[65] Liquun Zhu and Wei Weng. Catadioptric stereo-vision system for the real-time monitoring of 3D behavior in aquatic animals. *Physiology & Behavior*, 91(1):106-119, May 2007. 1", + "[66] Michael Zollhöfer, Patrick Stotko, Andreas Görtlitz, Christian Theobalt, Matthias Nießner, Reinhard Klein, and Andreas Kolb. State of the art on 3D reconstruction with RGB-d cameras. In Computer graphics forum, volume 37, pages 625-652. Wiley Online Library, 2018. 2", + "[67] Silvia Zuffi, Angjoo Kanazawa, David W. Jacobs, and Michael J. Black. 3D menagerie: Modeling the 3D shape and pose of animals. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6365-6373. IEEE, July 2017. 3" + ], + "bbox": [ + 78, + 90, + 470, + 811 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "12575", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3D Shape Reconstruction of Semi-Transparent Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_model.json b/2023/3D Shape Reconstruction of Semi-Transparent Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4f513197bcb53a5a22921ad4aa9456728c25d19a --- /dev/null +++ b/2023/3D Shape Reconstruction of Semi-Transparent Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_model.json @@ -0,0 +1,2323 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.223, + 0.132, + 0.748, + 0.153 + ], + "angle": 0, + "content": "3D shape reconstruction of semi-transparent worms" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.18, + 0.834, + 0.219 + ], + "angle": 0, + "content": "Thomas P. Ilett* Omer Yuval* Thomas Ranner* Netta Cohen*† David C. Hogg*† \nUniversity of Leeds, Leeds, United Kingdom" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.253, + 0.891, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.304, + 0.461, + 0.665, + 0.476 + ], + "angle": 0, + "content": "Figure 1. Posture reconstruction pipeline and imaging setup." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.491, + 0.314, + 0.507 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.526, + 0.471, + 0.737 + ], + "angle": 0, + "content": "3D shape reconstruction typically requires identifying object features or textures in multiple images of a subject. This approach is not viable when the subject is semitransparent and moving in and out of focus. Here we overcome these challenges by rendering a candidate shape with adaptive blurring and transparency for comparison with the images. We use the microscopic nematode Caenorhabditis elegans as a case study as it freely explores a 3D complex fluid with constantly changing optical properties. We model the slender worm as a 3D curve using an intrinsic parametrisation that naturally admits biologically-informed constraints and regularisation. To account for the changing optics we develop a novel differentiable renderer to construct images from 2D projections and compare" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.755, + 0.47, + 0.901 + ], + "angle": 0, + "content": "\\*T.Ilett, O.Yuval, T.Ranner, N.Cohen, D.C.Hogg}@leeds.ac.uk Funding This work was supported by University of Leeds and EPSRC. Author contributions Conceptualisation, Methodology, Formal analysis, Investigation, Software, Visualisation: TPI. Data curation, Validation: TPI, OY. Writing: TPI (original), all (review and editing). Funding acquisition, Supervision: NC, DCH, TR. \\(\\dagger\\) Equal contribution. Acknowledgements Additional thanks to Matan Braunstein (for help with Fig. 1), Robert I. Holbrook (data), Felix Salfelder (discussions and data), Lukas Deutz (discussions) and Jen Kruger (proof reading). Data availability Supplementary movies are available here: https://doi.org/10.6084/m9.figshare.22310650." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.492, + 0.895, + 0.645 + ], + "angle": 0, + "content": "against raw images to generate a pixel-wise error to jointly update the curve, camera and renderer parameters using gradient descent. The method is robust to interference such as bubbles and dirt trapped in the fluid, stays consistent through complex sequences of postures, recovers reliable estimates from blurry images and provides a significant improvement on previous attempts to track C. elegans in 3D. Our results demonstrate the potential of direct approaches to shape estimation in complex physical environments in the absence of ground-truth data." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.678, + 0.633, + 0.693 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.704, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Many creatures such as fish, birds and insects move in all directions to search and navigate volumetric environments. Acquiring 3D data of their motion has informed models of locomotion, behaviour and neural and mechanical control [3,22]. While technological advances have made the collection of large quantities of multi-viewpoint visual data more attainable, methods for extracting and modelling 3D information remain largely domain-dependant as few species share common geometric models or exist within the same spatial and temporal scales [4, 11, 14, 26, 37, 41, 50, 54, 65]. Furthermore, while humans and some domesticated animals [30, 60] may act naturally while wearing special markers, marker-less observations of many species makes fea" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12565" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "ture extraction more challenging and means pose estimation generally lacks ground-truth data [48]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.124, + 0.468, + 0.26 + ], + "angle": 0, + "content": "As a case study in marker-less 3D shape reconstruction, we consider \\( C \\) elegans, a hair-thick, \\( \\sim 1 \\) mm long animal with a simple tapered cylinder shape, which can be constructed from a midline \"skeleton\". In the wild, \\( C \\) elegans can be found in a wide range of complex 3D environments, e.g. decomposing organic matter, with continually changing physical properties [15, 17, 46]. However, to date, experiments have focused nearly exclusively on locomotion on a plane, limiting insight to the constrained, planar behaviours." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.262, + 0.468, + 0.444 + ], + "angle": 0, + "content": "We obtained a large dataset (4 hours 53 minutes \\(\\simeq\\) 440,000 frames at \\(25\\mathrm{Hz}\\)) of experimental recordings of individual worms moving freely inside a glass cube filled with a gelatin solution. The cube is positioned between three nearly-orthogonal static cameras fitted with telecentric lenses. Initial pinhole camera model parameter estimates are provided [45] but are imprecise and require continuous adjustment across the course of a recording to account for small vibrations and optical changes to the gel. We aim to simultaneously reconstruct a 3D shape and find corrected camera parameters to match these recordings in a process akin to bundle adjustment [56]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.446, + 0.468, + 0.779 + ], + "angle": 0, + "content": "3D reconstruction typically involves the identification and triangulation of common features from multiple viewpoints or the synthesis of full images including texture and shading information to match given scenes [16, 21, 47, 66]. Imaging animals with length \\(\\sim 1\\mathrm{mm}\\) requires sufficient magnification, but simultaneously capturing long-term trajectories up to 25 minutes requires a large volume of view (10-20 worm lengths per axis). As the worm explores the cube it frequently appears out of focus in one or more of the cameras. Air bubbles and dirt trapped in the gel along with old tracks are difficult to differentiate from the transparent worm, particularly at the tapered ends. Self occlusion invariably appears in a least one view, where hidden parts darken the foreground while the ordering of fore/backparts is not discernible. As the semi-transparent and self-occluding subject moves in the volume, photometric information in one view bears little relevance to the appearance in the others making feature identification and photometric matching particularly challenging. We found that standard approaches may suffice for limited sub-clips, but lose parts of the object or fail catastrophically for much of the data and the solution requires a degree of adaptation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.468, + 0.901 + ], + "angle": 0, + "content": "We present an integrated \"project-render-score\" algorithm to obtain a midline curve for each image-triplet (Fig. 1). Discrete curve vertices are projected through a triplet of pinhole camera models, rendered to produce an image-triplet for direct comparison against the recorded images and scored according to their intersection with worm-like pixels in all three views. The differentiable renderer stacks 2D super-Gaussian blobs at the projected locations" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.258 + ], + "angle": 0, + "content": "of each vertex to approximate the transparency along the worm, accounting for the variable focus and providing soft edges that direct the geometric model towards the midline. The scoring allows the detection of incongruities and keeps the curve aligned to the worm in all views. Regularisation terms ensure smoothness along the body and in time. Curve, camera and rendering parameters are jointly optimised using gradient descent to convergence. Once the worm shape has been resolved, it is generally only lost during image degradation or significant self-occlusions that make the posture unresolvable by eye." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.258, + 0.786, + 0.272 + ], + "angle": 0, + "content": "In summary, our main contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.273, + 0.892, + 0.317 + ], + "angle": 0, + "content": "- A robust pipeline for 3D posture reconstruction of a freely deforming semi-transparent object from noisy images." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.318, + 0.89, + 0.348 + ], + "angle": 0, + "content": "- A novel viewpoint renderer to capture optical distortions and transparency." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.348, + 0.89, + 0.378 + ], + "angle": 0, + "content": "- A feature-free bundle adjustment algorithm using direct image comparison and gradient descent." + }, + { + "type": "list", + "bbox": [ + 0.519, + 0.273, + 0.892, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.39, + 0.637, + 0.405 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.415, + 0.892, + 0.551 + ], + "angle": 0, + "content": "Bundle adjustment (BA) is a procedure to jointly optimise 3D geometry and camera parameters [21, 56]. BA typically identifies common features of an object from multiple viewpoints in order to minimise a prediction error between projections of the corresponding 3D points and their 2D observations. BA is frequently used in conjunction with other methods to find camera parameters using multiple images of a 3D calibration object with known control points or for fine-tuning results [13, 23, 36, 40, 57, 59]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.552, + 0.892, + 0.673 + ], + "angle": 0, + "content": "Feature detection converts photometric information into image coordinates. In BA, coordinates of common features are used to solve a geometric optimisation problem. Photometric bundle adjustment methods additionally require objects to have the same appearance in all views [12, 18]. Our method is entirely photometric, as such differing from BA. As our objects appear differently across views, all pixel information is used and the geometry is solved intrinsically." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Pose estimation Deep network approaches have proved well-suited to 2D human-pose estimation as they are potent feature extractors and large annotated training sets are available [1, 51, 55]. For 3D postures, ground truth multiview datasets are less common. Recent progress [35] relies on end-to-end architectures [19, 27, 29, 32, 42, 61] or splitting the problem into 2D pose estimation and then constructing the 3D pose [10, 38]. Despite similar approaches used for non-human pose estimation, the huge variability in scales and shapes among species introduces a variety of challenges [26]. Motion capture in controlled settings with markers (providing ground truth skeleton and joint angle data for humans, horses and dogs [30, 60]), are not available for most animals. Generalised mesh surfaces may be used," + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12566" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.212 + ], + "angle": 0, + "content": "but often require multiple views and thousands of parameters, and do not guarantee consistency through time. In contrast, approximating an animal shape using a few-parameter morphable model can be both tractable and robust. Successful examples include swimmers [9, 43], birds [27, 58], mammals [2,6,28,39] and generic quadrupeds [7,67]. However, these methods expect opaque subjects with consistent textural appearances between views." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.213, + 0.471, + 0.395 + ], + "angle": 0, + "content": "C. elegans has a simple geometric shape that can be well reconstructed from a midline skeleton and parametrised by curvature values along the body (see Sec. 3). This is the deformable template we look to fit to the data. Despite the apparent simplicity, each vertex of the discretised curve has two degrees of freedom (two curvature values) and as we use 128 vertices, our model is highly deformable and requires many parameters (although smoothness regularisation simplifies the problem somewhat). In contrast to deep-learning approaches, our model includes only a small number of explainable parameters and direct optimisation avoids lengthy training and dataset requirements." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.416, + 0.469, + 0.522 + ], + "angle": 0, + "content": "C. elegans Numerous freely available software packages are capable of simultaneous tracking and skeletonising single or multiple worms in 2D using inexpensive microscopic imaging [5,25,44,52,53,62] (see [24] for a review). Most of these skeletonisers combine image segmentation to separate the animal from the background with thinning of the mask to some midline pixels and fitting a spline." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.522, + 0.469, + 0.749 + ], + "angle": 0, + "content": "The 3D reconstruction problem has received relatively little attention. Using at first two views [34] and then three, Kwon et al. [33] designed a motorised stage coupled with a real-time tracker to keep a worm in focus under high magnification in a 3D environment while capturing trajectories of up to 3 minutes. Thresholded images are lifted into 3D, intersected in voxel space and thinned [20] to produce a final skeleton. Kwon et al. omit camera modelling and assume perfectly parallel projections – assumptions that result in large errors for the data we use. Shaw et al. [49] employed light field microscopy to generate depth maps alongside images from a single viewpoint. A midline skeleton is generated by fitting a spline to the 3D coordinates of the central voxels. However, self-occlusions cannot be resolved and only relatively planar postures were investigated." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Salfelder et al. [45] and Yuval [63] both present 3D reconstruction algorithms using the three-camera set up and calibration described in [45]. In Salfelder et al. [45], a neural network is trained to identify 2D midlines from individual camera images before lifting into 3D voxel space. To account for changing camera parameters, a relative axial shift \\((dx,dy,dz)\\) is optimised for each frame-triplet to maximise the voxel intersection before thinning. Remaining voxel coordinates are used as control points to fit a curve using a finite-element formulation. This approach works well when" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.122 + ], + "angle": 0, + "content": "the midline is well detected in each of the views, but can fail on occluded postures or low-resolution, blurry images." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.122, + 0.892, + 0.228 + ], + "angle": 0, + "content": "Yuval [63] uses a neural network to track head and tail points in 3D lab coordinates and a curve is fit between these fixed end points using a hill-climbing optimisation algorithm. Scoring is based on curve smoothness and pixel intensities at the projected curve points. This method works well when the head and tail are correctly identified but struggles, or requires manual correction, otherwise." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.228, + 0.892, + 0.335 + ], + "angle": 0, + "content": "In our approach we find that incorporating the camera model parameters into the optimisation results in more robust and accurate results. This extends the idea proposed in Salfelder et al. [45] that adjusting the relative positions of the cameras could result in large gains in accuracy. It is likely that the relative shift adjustments, presented there, account for the changing optical properties." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.349, + 0.669, + 0.365 + ], + "angle": 0, + "content": "3. Geometric model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.375, + 0.892, + 0.51 + ], + "angle": 0, + "content": "Nematode shapes can be well approximated by a tapered cylinder and computed from a midline. We construct the midline curve in 3D using an object-centric parametrisation, separating shape from position and orientation to allow us to easily constrain and regularise the shape to stay within biologically-reasonable bounds. We discretise the curve into \\( N \\) equidistant vertices and encode the posture in curvature \\( K \\in \\mathbb{R}^{N \\times 2} \\) and length \\( l \\in \\mathbb{R} \\) that fully define the shape up to a rigid-body transformation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.511, + 0.892, + 0.662 + ], + "angle": 0, + "content": "We express the 3D curve using the Bishop frame [8], given by \\(TM^{1}M^{2}\\) where \\(T\\) is the normalised tangent of the curve and \\(M^1, M^2\\) form an orthogonal basis along the midline. At vertex \\(n\\), the curvature is \\(K_{n} = (m_{n}^{1}, m_{n}^{2})\\), where \\(m_{n}^{1}, m_{n}^{2} \\in \\mathbb{R}\\) are the curvature components along \\(M^1, M^2\\). (The more familiar Frenet frame is less stable as it is undefined at zero-curvature points.) Numerical integration of a system of difference equations from starting point \\(P_{\\mathrm{init}}\\) and initial orientation \\((T_{\\mathrm{init}}, M_{\\mathrm{init}}^{1}, M_{\\mathrm{init}}^{2})\\) yields the curve path \\(P \\in \\mathbb{R}^{N \\times 3}\\). See supplementary material (SM) for details." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.663, + 0.892, + 0.814 + ], + "angle": 0, + "content": "During optimisation, errors accumulate near the starting point, \\( P_{\\mathrm{init}} \\), resulting in either parts of the curve moving faster than other or kinks developing (even with strong regularisation). To resolve this we sample an initial vertex index \\( n_0 \\) from a Gaussian distribution (subject to rounding) centred at the middle index at every optimisation step. Setting the starting point \\( P_{\\mathrm{init}} = P_{n_0} \\) has the effect of continually shifting the discontinuity so kinks are never given the opportunity to develop (Fig. 2). Summarising the integration as \\( F \\), the 3D curve is generated from the parameters:" + }, + { + "type": "equation", + "bbox": [ + 0.548, + 0.824, + 0.892, + 0.845 + ], + "angle": 0, + "content": "\\[\n\\left(\\hat {P}, \\hat {T}, \\hat {M} ^ {1}\\right) = F \\left(P _ {n _ {0}}, T _ {n _ {0}}, M _ {n _ {0}} ^ {1}, K, l, n _ {0}\\right). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Each gradient update adjusts all curvature values \\(K\\) but the position and orientation only at the randomly selected \\(n_0\\) vertex \\((P_{n_0}, T_{n_0}, M_{n_0}^1)\\). Updating \\((P, T, M^1)\\) at only" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12567" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.118, + 0.088, + 0.852, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.26, + 0.895, + 0.306 + ], + "angle": 0, + "content": "Figure 2. The 3D curve is traced out from initial point \\( P_{n_0} \\) and orientation frame \\( (T_{n_0}, M_{n_0}^1, M_{n_0}^2) \\). The index \\( n_0 \\) of the initial point is drawn from a normal distribution at each iteration to prevent kinks developing through repeated use of the same starting point. The final curve \\( \\hat{P} \\) is computed in two parts by integrating the Bishop equations with curvature \\( K \\) towards the head and tail separately." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.33, + 0.47, + 0.391 + ], + "angle": 0, + "content": "this vertex produces a \\(P\\) that is inconsistent with the updated \\(K\\). Therefore, after applying gradient updates we re-compute the full curve and orientation from \\(n_0\\) and set \\((P,T,M^1)\\) to the output \\((\\hat{P},\\hat{T},\\hat{M}^1)\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.392, + 0.471, + 0.497 + ], + "angle": 0, + "content": "Since the curve describes a biological creature, we constrain the length \\( l \\) to \\( (l_{\\min}, l_{\\max}) \\) and limit the curvature by \\( |K_n| < 2\\pi k_{\\max} \\). The values of \\( (l_{\\min}, l_{\\max}) \\) we use vary depending on magnification but the bounds do not need to be tight and are in the range \\( 0.5 - 2\\mathrm{mm} \\). The curvature constraint \\( k_{\\max} \\) is set by considering the number of circle achieved by a constant curvature curve and is fixed at 3." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.511, + 0.29, + 0.528 + ], + "angle": 0, + "content": "4. Project, Render, Score" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.537, + 0.47, + 0.613 + ], + "angle": 0, + "content": "The core of the optimisation pipeline is separable into three main stages; project, render and score. The 3D curve \\(\\hat{P}\\) generated in Eq. (1) is projected through the camera models into 2D points that are rendered into images and then scored against the three views." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.621, + 0.171, + 0.637 + ], + "angle": 0, + "content": "4.1. Project" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.644, + 0.47, + 0.81 + ], + "angle": 0, + "content": "The cameras are modelled using a triplet of pinhole camera models with tangential and radial distortion that project 3D points into image planes using perspective transformations. Each pinhole camera model offers a simple (15 parameters, \\(\\{\\eta_c\\}\\)), tractable, approximation to the optical transformation. We also include relative shifts along the local coordinate axes, \\(\\eta^s = (dx, dy, dz)\\), shared between the three models, as proposed by Salfelder et al. [45]. Initial camera coefficients for the triplet-model are provided along with the recordings and typically give root mean squared reprojection errors up to 10 pixels (\\(\\sim \\mathcal{O}(\\text{worm radius})\\))." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Due to the initial calibration errors and changes in optical properties as the gelatin sets and is disturbed by the worms we re-calibrate the cameras at every frame by including the camera parameters in the optimisation step. To avoid an under-determined problem, after we have found a configuration that supports good reconstructions for a recording" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.328, + 0.868, + 0.574 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.586, + 0.892, + 0.684 + ], + "angle": 0, + "content": "Figure 3. The rendering stage generates super-Gaussian blobs at each vertex position on the image. The shape of the blobs depends on the optimisable parameters: the scale \\(\\sigma\\), the intensity \\(\\iota\\) and the exponent used in the Gaussian \\(\\rho\\). \\(\\sigma\\) and \\(\\iota\\) are tapered down to fixed minimum values at the head and tail. The effects of varying these parameters from a converged solution (blue curves) are shown above (green curves) and below (orange curves) each." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.712, + 0.892, + 0.772 + ], + "angle": 0, + "content": "we fix all but the \\(\\eta^s\\) parameters. Interestingly, we still see changes (up to \\(30\\mathrm{px}\\sim 0.15\\mathrm{mm}\\)) in \\(\\eta^s\\) but as this relates to the relative positioning it does not affect the posture reconstruction or long-term trajectories." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.773, + 0.892, + 0.835 + ], + "angle": 0, + "content": "Projecting the 3D curve \\(\\hat{P}\\) through the camera-triplet model \\(\\Gamma\\) with parameters \\(\\eta = \\{\\eta_0, \\eta_1, \\eta_2, \\eta^s\\}\\) generates 2D image points per view, which we combine as \\(Q = \\Gamma(\\hat{P}, \\eta) \\in \\mathbb{R}^{3 \\times N \\times 2}\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.847, + 0.595, + 0.861 + ], + "angle": 0, + "content": "4.2. Render" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "In order to evaluate the reconstruction directly against the raw data, we render the projected 2D midline points into" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12568" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.242 + ], + "angle": 0, + "content": "images using optimisable shape and rendering parameters. Since worm bodies are well approximated by tapered cylinders, in theory we only require maximum and minimum radius values and a tapering function. However, \\( C \\) elegans are semi-transparent – increasingly so at the head and tail – and their internal anatomy has varying optical properties that diffract and distort the light. These challenges are further exacerbated by the worms often being out of focus in at least one of the views, therefore even an anatomically accurate model stands little chance of being correctly resolved." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.243, + 0.471, + 0.438 + ], + "angle": 0, + "content": "We render realistic images by combining 2D super-Gaussian functions centred on each projected vertex. Crucially, we allow the rendering parameters to differ between cameras since the animal seldom has the same photometric qualities in different views. We optimise three parameters for each camera view \\(c\\): \\(\\sigma_c \\in \\mathbb{R}\\) controls the spread, \\(\\iota_c \\in \\mathbb{R}\\) scales the intensity, and \\(\\rho_c \\in \\mathbb{R}\\) sharpens or softens the edges (Fig. 3). To capture the tapered shape we weight \\(\\sigma_c\\) and \\(\\iota_c\\) from their optimisable values along the middle \\(60\\%\\) to minimum values \\(\\sigma_{\\mathrm{min}}\\) and \\(\\iota_{\\mathrm{min}}\\) at the ends and define the tapered outputs \\(\\bar{\\sigma}_c \\in \\mathbb{R}^N\\) and \\(\\bar{\\iota}_c \\in \\mathbb{R}^N\\) (SM). \\(\\sigma_{\\mathrm{min}}\\) and \\(\\iota_{\\mathrm{min}}\\) are manually fixed for each recording to account for different magnification factors and worm size variability." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.439, + 0.47, + 0.485 + ], + "angle": 0, + "content": "For each camera index \\(c\\) and vertex index \\(n\\) we define the rendered blob \\(B_{c,n} \\in \\mathbb{R}^{w \\times w}\\) (image size \\(w\\)) for pixel \\((i,j)\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.495, + 0.498, + 0.542 + ], + "angle": 0, + "content": "\\[\nB _ {c, n} (i, j) = \\bar {\\iota} _ {c, n} \\exp \\left[ - \\left(\\frac {(i - Q _ {c , n , 0}) ^ {2} + (j - Q _ {c , n , 1}) ^ {2}}{2 \\bar {\\sigma} _ {c , n} ^ {2}}\\right) ^ {\\rho_ {c}} \\right]. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.543, + 0.47, + 0.589 + ], + "angle": 0, + "content": "The stacks of blobs are combined to generate the complete renderings \\( R \\in \\mathbb{R}^{3 \\times w \\times w} \\) by taking the maximum pixel value across all blobs: for pixel \\( (i,j) \\)," + }, + { + "type": "equation", + "bbox": [ + 0.137, + 0.599, + 0.47, + 0.62 + ], + "angle": 0, + "content": "\\[\nR _ {c} (i, j) = \\max \\left\\{B _ {c, n} (i, j) \\right\\} _ {n = 0, \\dots , N - 1}. \\qquad (3)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.628, + 0.47, + 0.822 + ], + "angle": 0, + "content": "The orientation of the body directly affects the pixel intensity of both raw and rendered images. When pointing directly at a camera the peaks of the blobs cluster closely together and appear as a high-intensity (opaque) circle. Pointing laterally causes the peaks to spread out on the image revealing more of the lower-intensity tails. In both situations our blob-rendering approach approximates transparency effects in the raw images without the need to model complex intensity-orientation responses. Moreover, super-Gaussian blobs allow sharp outlines to be produced in one view by using a large exponent and flat-top blobs, and blurry images to be produced for another, using low intensity and high variance." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.833, + 0.158, + 0.847 + ], + "angle": 0, + "content": "4.3. Score" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "In order to evaluate how well the curve represents the worm we require a way of distinguishing between worm-pixels and non-worm pixels such as dirt, bubbles, old tracks" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.092, + 0.882, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.332, + 0.894, + 0.403 + ], + "angle": 0, + "content": "Figure 4. The 3D curve points are scored individually according to how well they match the three views. The triplet of blobs associated with vertex \\( n \\) (\\( B_{.,n} \\)) are multiplied with the images \\( I \\) and summed. We take the minimum of the three sums and then taper these values from the midpoint-out." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.427, + 0.892, + 0.532 + ], + "angle": 0, + "content": "and even other worms. When the animal truly intersects with environmental interference it can be impossible to differentiate between the two, but in the majority of cases there exists a gap between the worm and the noise that is visible in at least one of the views. By ensuring that the curve corresponds to a single contiguous pixel mass in all of the images we are able to safely ignore other artefacts (Fig. 4)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.532, + 0.892, + 0.578 + ], + "angle": 0, + "content": "To detect if the curve is bridging a gap, each vertex \\(\\hat{P}_n\\) is scored by correlating its corresponding blobs \\(B_{.,n}\\) (Sec. 4.2) with the images \\(I\\). The raw score \\(S_n \\in \\mathbb{R}\\) is defined:" + }, + { + "type": "equation", + "bbox": [ + 0.576, + 0.586, + 0.892, + 0.624 + ], + "angle": 0, + "content": "\\[\nS _ {n} = \\min \\left\\{\\frac {\\sum_ {i , j} B _ {c , n} \\cdot I _ {c}}{\\bar {\\sigma} _ {c , n} \\bar {\\iota} _ {c , n}} \\right\\} _ {c = 0, 1, 2} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.632, + 0.892, + 0.706 + ], + "angle": 0, + "content": "where \\( \\cdot \\) is element-wise multiplication and the sum is taken over the image dimensions. By taking the minimum we ensure that vertices failing to match pixels in any one of the views will receive low scores regardless of how well they match pixels in the other views." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.707, + 0.892, + 0.798 + ], + "angle": 0, + "content": "If the curve is bridging two disjoint groups of pixels that are visible in all three views this will present as two peaks in \\( S \\). Since we are only interested in finding one object we restrict the scores to contain just one peak by tapering \\( S \\) from the middle-out to form the intermediate \\( S' \\). Finally we normalise \\( S' \\) to get scores \\( \\hat{S} \\) relative to the peak:" + }, + { + "type": "equation", + "bbox": [ + 0.555, + 0.807, + 0.892, + 0.863 + ], + "angle": 0, + "content": "\\[\nS _ {n} ^ {\\prime} = \\left\\{ \\begin{array}{l l} \\min \\left\\{S _ {n}, S _ {n + 1} ^ {\\prime} \\right\\} & 0 \\leq n < N / 2 \\\\ S _ {n} & n = N / 2 \\\\ \\min \\left\\{S _ {n}, S _ {n - 1} ^ {\\prime} \\right\\} & N / 2 < n < N \\end{array} \\right. \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.563, + 0.865, + 0.892, + 0.899 + ], + "angle": 0, + "content": "\\[\n\\hat {S} = \\frac {S ^ {\\prime}}{\\operatorname* {m a x} _ {n} \\left\\{S ^ {\\prime} \\right\\}}. \\tag {6}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12569" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.09, + 0.468, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.093, + 0.287, + 0.195, + 0.298 + ], + "angle": 0, + "content": "Without masking:" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.299, + 0.455, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.094, + 0.356, + 0.177, + 0.366 + ], + "angle": 0, + "content": "With masking:" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.366, + 0.455, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.435, + 0.47, + 0.546 + ], + "angle": 0, + "content": "Figure 5. The noisy input images are cleaned by applying masks that force pixel-errors to be local to the current estimate. The blobs \\( B \\) are scaled by the relative scores \\( \\hat{S} \\), combined using the maximum pixel value across blobs and thresholded to form the masks \\( M \\). The masks are applied to the raw input images \\( I \\) to generate the targets: \\( I^{\\star} \\). Masking ensures only a single contiguous pixel mass is detected. Without it, parts of the reconstruction can \"stick\" to nearby bubbles and other artefacts as shown below." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.574, + 0.47, + 0.62 + ], + "angle": 0, + "content": "The final score profile \\(\\hat{S}\\) provides insight into how well the curve matches a contiguous pixel mass across all three views and how evenly that mass is distributed." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.639, + 0.47, + 0.776 + ], + "angle": 0, + "content": "Masking From the score profile \\(\\hat{S}\\) we identify image areas that are more likely to contain the pixel masses that correspond to the worm. Masks \\(M\\in \\mathbb{R}^{3\\times w\\times w}\\) applied to the input, \\(I^{\\star} = M\\cdot I\\), focuses attention (and gradient) to only these areas of interest, consistently across all three views and exclude interference outside the masks (Fig. 5, see SM). Pixel intensities outside the masks are significantly reduced, but not zeroed in order to avoid stagnation in case the reconstruction completely misses the worm." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Centre-shifting The scores \\(\\hat{S}\\) also indicate the relative positioning of the curve over the target object. As the curve aligns with a pixel mass, vertices with high scores (apparently \"converged\") tend to lock into place thus hindering convergence of the rest of the object. For each frame, we use the previous frame solution as the starting point, so the majority of points rapidly converge. However, errors intro" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.088, + 0.88, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.324, + 0.895, + 0.422 + ], + "angle": 0, + "content": "Figure 6. As the animal moves along the path of its midline the tail may be left behind (left column). This can be identified from an unbalanced score profile \\(\\hat{S}\\). By periodically shifting the curve along its length (adding new curvature values at one end and discarding from the other) the centroid index \\((\\bar{n})\\) of the scores can be centred. Gradient descent optimisation then updates the new curvature values so the curve matches the target (right column)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.449, + 0.894, + 0.525 + ], + "angle": 0, + "content": "duced at the tips remain as they are insufficient to generate the collective shift required. The effect can easily be identified from an unbalanced score profile (Fig. 6) and rectified by periodically shifting the curve along its length between gradient descent optimisation steps (see SM)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.538, + 0.637, + 0.555 + ], + "angle": 0, + "content": "5. Optimisation" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.564, + 0.854, + 0.579 + ], + "angle": 0, + "content": "The main pixel-loss to be minimised is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.589, + 0.892, + 0.627 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {p x}} = \\frac {1}{3 w ^ {2}} \\sum_ {c, i, j} \\left(R _ {c} (i, j) - I _ {c} ^ {\\star} (i, j)\\right) ^ {2}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.638, + 0.892, + 0.668 + ], + "angle": 0, + "content": "To improve head and tail detection we also minimise a scores-loss," + }, + { + "type": "equation", + "bbox": [ + 0.592, + 0.678, + 0.892, + 0.711 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {s c}} = \\frac {\\operatorname* {m a x} \\left(S ^ {\\prime}\\right) N}{\\sum_ {n} S _ {n} ^ {\\prime \\prime}}, \\text {w h e r e} \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.595, + 0.714, + 0.892, + 0.75 + ], + "angle": 0, + "content": "\\[\nS _ {n} ^ {\\prime \\prime} = S _ {n} ^ {\\prime} \\left(\\frac {2 n - (N - 1)}{N - 1}\\right) ^ {2}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.76, + 0.891, + 0.79 + ], + "angle": 0, + "content": "that is quadratically weighted towards the tips where the scores are naturally lower due to the transparency." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.791, + 0.892, + 0.82 + ], + "angle": 0, + "content": "In addition we include a number of regularisation terms. To keep the curve smooth we define" + }, + { + "type": "equation", + "bbox": [ + 0.602, + 0.832, + 0.892, + 0.873 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {s m}} = \\sum_ {n = 1} ^ {N - 1} \\left| K _ {n} - K _ {n - 1} \\right| ^ {2}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.886, + 0.892, + 0.903 + ], + "angle": 0, + "content": "where \\(|\\cdot |\\) is the \\(l^2\\) -norm. To ensure all parameters change" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12570" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.078, + 0.092, + 0.296, + 0.106 + ], + "angle": 0, + "content": "smoothly between frames we set" + }, + { + "type": "equation", + "bbox": [ + 0.158, + 0.12, + 0.469, + 0.156 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {t}} = \\sum_ {x \\in \\{l, K, \\hat {P}, \\eta , \\sigma , \\iota , \\rho \\}} | x ^ {\\text {p r e v}} - x | ^ {2}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.167, + 0.47, + 0.198 + ], + "angle": 0, + "content": "where \\( x^{\\mathrm{prev}} \\) refers to the frozen value of the variable from the previous frame. And to avoid self-intersections, we use" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.224, + 0.468, + 0.343 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} d _ {n, m} = \\left| \\hat {P} _ {n} - \\hat {P} _ {m} \\right|, (12) \\\\ d _ {n, m} ^ {\\prime} = \\frac {1}{3} \\sum_ {c} \\bar {\\sigma} _ {c, n} + \\frac {1}{3} \\sum_ {c} \\bar {\\sigma} _ {c, m}, \\text {a n d} (13) \\\\ \\mathcal {L} _ {\\mathrm {i}} = \\sum_ {n = 0} ^ {N - N / k _ {\\max } - 1} \\sum_ {m = n + N / k _ {\\max }} ^ {N - 1} \\left\\{ \\begin{array}{l l} \\frac {d _ {n , m} ^ {\\prime}}{d _ {n , m}}, & \\text {i f} d _ {n, m} < d _ {n, m} ^ {\\prime} \\\\ 0, & \\text {o t h e r w i s e .} \\end{array} \\right. (14) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.357, + 0.468, + 0.447 + ], + "angle": 0, + "content": "A loss is incurred, \\(\\mathcal{L}_{\\mathrm{i}} > 0\\), when two points which are sufficiently far apart (\\(>N / k_{\\max}\\)) along the curve come within a distance defined by the sum of their mean rendering variances (since these approximate the worm's radius). Eq. (14) forces the algorithm to find postures that are always feasible even during self-occlusions and complex manoeuvres." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.449, + 0.468, + 0.479 + ], + "angle": 0, + "content": "The losses are combined in a weighted sum to yield the final optimisation target:" + }, + { + "type": "equation", + "bbox": [ + 0.101, + 0.493, + 0.469, + 0.509 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\omega_ {\\mathrm {p x}} \\mathcal {L} _ {\\mathrm {p x}} + \\omega_ {\\mathrm {s c}} \\mathcal {L} _ {\\mathrm {s c}} + \\omega_ {\\mathrm {s m}} \\mathcal {L} _ {\\mathrm {s m}} + \\omega_ {\\mathrm {t}} \\mathcal {L} _ {\\mathrm {t}} + \\omega_ {\\mathrm {i}} \\mathcal {L} _ {\\mathrm {i}}. \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.522, + 0.468, + 0.536 + ], + "angle": 0, + "content": "Values of \\(\\omega\\) used in our experiments are included in the SM." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.537, + 0.468, + 0.688 + ], + "angle": 0, + "content": "To achieve robust reconstructions it is important that the curve parameters learn fastest, then the rendering parameters and finally the camera parameters. Imposing this hierarchy of rates ensures camera model stability and prevents the renderer from over-blurring the edges (as it tries to \"reach\" the pixels). Thus, movement between frames is primarily captured through curve deformations. We use learning rates \\(\\lambda_{p} = 1\\mathrm{e} - 3\\) for the curve parameters \\(\\{P,T,M^1,K,l\\}\\), \\(\\lambda_r = 1\\mathrm{e} - 4\\) for the rendering parameters \\(\\{\\sigma ,\\iota ,\\rho \\}\\) and \\(\\lambda_{\\eta} = 1\\mathrm{e} - 5\\) for the camera parameters \\(\\eta\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.689, + 0.468, + 0.749 + ], + "angle": 0, + "content": "The curve is initialised as a small \\((\\sim 0.2\\mathrm{mm})\\) , randomly oriented straight line centred in the field of view of all three cameras. We slowly increase the length to \\(l_{\\mathrm{min}}\\) over the first 200-500 steps as the curve gets positioned and orientated." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.468, + 0.901 + ], + "angle": 0, + "content": "The pipeline is constructed using PyTorch [64] and the loss minimised is using Adam [31] with periodic centre-shifting of the curve vertices. Learning rates are decreased by a factor of 0.8 for every 5 steps taken without improvement in \\(\\mathcal{L}\\) to a minimum of \\(1\\mathrm{e} - 6\\) until convergence is detected. Subsequent frames are instantiated with the solution from the previous frame for efficiency and to maintain consistency through complex sequences of self-occluding postures. Example videos showing the effects of varying some of the options on the optimisation are described in SM." + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.09, + 0.865, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.303, + 0.892, + 0.387 + ], + "angle": 0, + "content": "Figure 7. Validation against 487 manual annotations. At the top we show an example of an annotated frame (left, orange) alongside a projection of our matching 3D midline (right, blue). Below we plot the sample averages \\(\\pm 2\\mathrm{std}\\). We find our midlines are consistently close to annotated points (blue curve), but annotations typically extend further into the head and tail regions (orange curve)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.416, + 0.586, + 0.431 + ], + "angle": 0, + "content": "6. Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.443, + 0.892, + 0.565 + ], + "angle": 0, + "content": "Using our method we generate high quality 3D midline reconstructions for 43 of 44 recordings. One fails due to excessive coiling of the worm. Significant occlusions also occur during successful reconstructions and when combined with loss of focus can cause the shape to be lost. Video clips of good and poor reconstructions through challenging environmental conditions are described in SM along with ablation results to show benefits of each component." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.892, + 0.717 + ], + "angle": 0, + "content": "We compare 2D reprojections of our midlines against 487 manual annotations that were produced from single images in isolation and contain a varying number of unordered points. We calculate the minimum distance from each annotated point to any reconstructed point and vice-versa and find that our midlines consistently come close (\\(\\sim\\)2px) to hand-annotated points (Fig. 7). Annotated points at the ends show an increased distance (\\(\\sim\\)10px) to our midline points. This shows that our curves generally fall short of reaching the very tips of the worm by \\(\\sim\\)O(worm radius)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Our method significantly outperforms previous methods developed using the same dataset [45, 63] when evaluated against the manual annotations (SM), but these only cover a selection of hand-picked examples. For a large-scale comparison we take 3D midlines and camera parameters found by each method and, using our pipeline, render them to generate comparable images (re-optimising the render parameters for their midlines, see SM). We skip the scoring and masking and calculate \\(\\mathcal{L}_{\\mathrm{px}}\\). The results (Fig. 8) show our method consistently produces shapes that more closely match the raw images. The biggest advantage over previous approaches is the improvement in robustness; we recover" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "12571" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.105, + 0.09, + 0.87, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.231, + 0.896, + 0.276 + ], + "angle": 0, + "content": "Figure 8. A comparison between our Midline Finder (MF), Yuval's Worm-Tracker 3D (WT3D) [63] and Salfelder et al.'s 'reconst' [45] methods across a single trial (\\(\\sim 13\\) min). In the majority of cases our method generates midlines that better match the data (lower pixel losses, \\(\\mathcal{L}_{\\mathrm{px}}\\)). We show moving averages over 25 frames (\\(\\sim 1\\) s) with shaded areas indicating \\(\\pm 2\\) std." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.291, + 0.877, + 0.507 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.522, + 0.895, + 0.581 + ], + "angle": 0, + "content": "Figure 9. The rendering parameters change continually over the course of a recording to capture optical changes. Clear images (e.g. early frames in cameras 0 and 1, switching to late frames in camera 2) are consistent with small values of \\(\\sigma\\) and large values of \\(\\rho\\). Blurry images (early camera 2, late camera 1) use high \\(\\sigma\\) and small \\(\\rho\\). We show moving averages over 25 frames (\\(\\sim 1\\) s) with shaded areas indicating \\(\\pm 2\\) std. Example comparisons between the renders (red) and raw images (grey) are shown on either side." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.604, + 0.461, + 0.619 + ], + "angle": 0, + "content": "4 h 37 min (ours) versus 1 h 32 min [45] and 45 min [63]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.62, + 0.471, + 0.771 + ], + "angle": 0, + "content": "Fig. 9 shows the rendering parameters during a trial as the worm moves in and out of focus in the different cameras. Clearer images result in smaller values of \\(\\sigma\\) and larger values of \\(\\rho\\). The fluctuations in intensity \\(\\iota\\) are due in part to the posture of the worm in relation to the camera; when it is pointing directly towards the camera we see higher values of \\(\\iota\\) used to capture the darker image observed and when the shape is perpendicular to the camera we see lower values of \\(\\iota\\) to emulate the worm's transparency. All three parameters work in tandem to produce the final effect." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.785, + 0.199, + 0.801 + ], + "angle": 0, + "content": "7. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.903 + ], + "angle": 0, + "content": "We present a robust and reliable framework for the 3D reconstruction of a microscopic, semi-transparent subject moving through a fluid and evaluate against two other algorithms and manually annotations. The key contribution of our approach - constructing unique differentiable renderings for each view - allows us to solve shape recon" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.604, + 0.893, + 0.679 + ], + "angle": 0, + "content": "struction and camera parameter optimisation by direct image comparison. This avoids feature extraction and correspondence matching, and hence offers a powerful alternative when those approaches are not well-suited, e.g. due to the variation in appearance between views." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.68, + 0.895, + 0.892 + ], + "angle": 0, + "content": "Multi-view microscopic camera calibration, imaging through fluids and parametric model fitting of semitransparent subjects are challenges that have received little attention in the literature. While we have focused here on constructing a curve to fit a microscopic worm from three views, our method could be applied to the 3D reconstruction of arbitrary shape models at any scale using any number of viewpoints. Rendering points with adaptable super-Gaussian functions presents an effective solution to transparency and focal issues, but more generally, our results indicate that our direct optimisation approach may offer an effective alternative to contemporary methods for 3D approximation of generic objects from a limited number of silhouette-like images." + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.521, + 0.958 + ], + "angle": 0, + "content": "12572" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Mykhaylo Andriluka, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2D human pose estimation: New benchmark and state of the art analysis. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 3686-3693. IEEE, June 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.185, + 0.472, + 0.254 + ], + "angle": 0, + "content": "[2] Praneet C. Bala, Benjamin R. Eisenreich, Seng Bum Michael Yoo, Benjamin Y. Hayden, Hyun Soo Park, and Jan Zimmermann. Automated markerless pose estimation in freely moving macaques with OpenMonkeyStudio. Nat Commun, 11(1):4560, Sept. 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.471, + 0.297 + ], + "angle": 0, + "content": "[3] Jerrold L Belant, Joshua J Millspaugh, James A Martin, and Robert A Gitzen. Multi-dimensional space use: The final frontier. Front. Ecol. Environ., 10(1):11-12, Feb. 2012. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.47, + 0.352 + ], + "angle": 0, + "content": "[4] Florian Berlinger, Melvin Gauci, and Radhika Nagpal. Implicit coordination for 3D underwater collective behaviors in a fish-inspired robot swarm. Sci. Robot., 6(50):eabd8668, Jan. 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.354, + 0.47, + 0.409 + ], + "angle": 0, + "content": "[5] Stefano Berri, Jordan H. Boyle, Manlio Tassieri, Ian A. Hope, and Netta Cohen. Forward locomotion of the nematode C. elegans is achieved through modulation of a single gait. Hfsp J., 3(3):186-193, June 2009. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.41, + 0.47, + 0.493 + ], + "angle": 0, + "content": "[6] Benjamin Biggs, Oliver Boyne, James Charles, Andrew Fitzgibbon, and Roberto Cipolla. Who left the dogs out? 3d animal reconstruction with expectation maximization in the loop. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XI 16, pages 195-211. Springer, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.494, + 0.471, + 0.577 + ], + "angle": 0, + "content": "[7] Benjamin Biggs, Thomas Roddick, Andrew Fitzgibbon, and Roberto Cipolla. Creatures great and small: Recovering the shape and motion of animals from video. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2-6, 2018, Revised Selected Papers, Part V 14, pages 3-19. Springer, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.578, + 0.47, + 0.605 + ], + "angle": 0, + "content": "[8] Richard L. Bishop. There is more than one way to frame a curve. Amer. Math. Monthly, 82(3):246-251, Mar. 1975. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.607, + 0.47, + 0.661 + ], + "angle": 0, + "content": "[9] Thomas J. Cashman and Andrew W. Fitzgibbon. What shape are dolphins? building 3D morphable models from 2D images. IEEE Trans. Pattern Anal. Mach. Intell., 35(1):232-244, Jan. 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.663, + 0.47, + 0.718 + ], + "angle": 0, + "content": "[10] Ching-Hang Chen and Deva Ramanan. 3D human pose estimation = 2D pose estimation + matching. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7035-7043. IEEE, July 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.719, + 0.469, + 0.759 + ], + "angle": 0, + "content": "[11] Nathan W. Cooper, Thomas W. Sherry, and Peter P. Marra. Modeling three-dimensional space use and overlap in birds. *Auk*, 131(4):681–693, Oct. 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.469, + 0.816 + ], + "angle": 0, + "content": "[12] Amael Delaunoy and Marc Pollefeys. Photometric bundle adjustment for dense multi-view 3D modeling. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 1486-1493. IEEE, June 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.471, + 0.844 + ], + "angle": 0, + "content": "[13] Olivier Faugeras and Quang-Tuan Luong. The Geometry of Multiple Images. The MIT Press, 2001. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[14] Alessandro Ferrarini, Giuseppe Giglio, Stefania Caterina Pellegrino, Anna Grazia Frassanito, and Marco Gustin. A new methodology for computing birds' 3D home ranges. Avian Res, 9(1):1-6, May 2018. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[15] Lise Frézal and Marie-Anne Félix. The natural history of model organisms: C. elegans outside the petri dish. eLife, 4:e05849, Mar. 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[16] Kui Fu, Jiansheng Peng, Qiwen He, and Hanxiao Zhang. Single image 3D object reconstruction based on deep learning: A review. Multimed Tools Appl, 80(1):463-498, Sept. 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.234 + ], + "angle": 0, + "content": "[17] Marie-Anne Félix and Christian Braendle. The natural history of caenorhabditis elegans. Curr. Biol., 20(22):R965–R969, Nov. 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.892, + 0.303 + ], + "angle": 0, + "content": "[18] P. Georgel, S. Benhimane, and N. Navab. A unified approach combining photometric and geometric information for pose estimation. In Proceedings of the British Machine Vision Conference 2008, pages 1-10. CiteSeer, British Machine Vision Association, 2008. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.892, + 0.361 + ], + "angle": 0, + "content": "[19] Riza Alp Guler, Natalia Neverova, and Iasonas Kokkinos. DensePose: Dense human pose estimation in the wild. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7297-7306. IEEE, June 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.362, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[20] Zicheng Guo and Richard W. Hall. Parallel thinning with two-subiteration algorithms. Commun. ACM, 32(3):359-373, Mar. 1989. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.405, + 0.892, + 0.446 + ], + "angle": 0, + "content": "[21] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, Mar. 2004. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.448, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[22] Robert I. Holbrook and Theresa Burt de Perera. Three-dimensional spatial cognition: Information in the vertical dimension overrides information from the horizontal. Anim Cogn, 14(4):613-619, Mar. 2011. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.505, + 0.892, + 0.56 + ], + "angle": 0, + "content": "[23] C.T. Huang and O.R. Mitchell. Dynamic camera calibration. In Proceedings of International Symposium on Computer Vision - ISCV, pages 169-174. IEEE, IEEE Comput. Soc. Press, 1995. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.562, + 0.892, + 0.604 + ], + "angle": 0, + "content": "[24] Steven J. Husson, Wagner S. Costa, Cornelia Schmitt, and Alexander Gottschalk. Keeping track of worm trackers. WormBook, pages 1-17, Sept. 2012. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.605, + 0.892, + 0.688 + ], + "angle": 0, + "content": "[25] Avelino Javer, Michael Currie, Chee Wai Lee, Jim Hokanson, Kezhi Li, Céline N. Martineau, Eviatar Yemini, Laura J. Grundy, Chris Li, QueeLim Ch'ng, William R. Schafer, Ellen A. A. Nollen, Rex Kerr, and André E. X. Brown. An open-source platform for analyzing and sharing worm-behavior data. Nat Methods, 15(9):645-646, Aug. 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.892, + 0.744 + ], + "angle": 0, + "content": "[26] Le Jiang, Caleb Lee, Divyang Teotia, and Sarah Ostadabbas. Animal pose estimation: A closer look at the state-of-the-art, existing gaps and opportunities. Comput. Vis. Image Und., 222:103483, Sept. 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.746, + 0.892, + 0.814 + ], + "angle": 0, + "content": "[27] Angjoo Kanazawa, Michael J. Black, David W. Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7122-7131. IEEE, June 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.816, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[28] Angjoo Kanazawa, Shahar Kovalsky, Ronen Basri, and David Jacobs. Learning 3d deformation of animals from 2d images. In Computer Graphics Forum, volume 35, pages 365-374. Wiley Online Library, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[29] Isinsu Katircioglu, Bugra Tekin, Mathieu Salzmann, Vincent Lepetit, and Pascal Fua. Learning latent representations of" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12573" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.47, + 0.12 + ], + "angle": 0, + "content": "3D human pose with deep neural networks. Int J Comput Vis, 126(12):1326-1341, Jan. 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.471, + 0.19 + ], + "angle": 0, + "content": "[30] Sinead Kearney, Wenbin Li, Martin Parsons, Kwang In Kim, and Darren Cosker. RGBD-dog: Predicting canine pose from RGBD sensors. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8336-8345. IEEE, June 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.471, + 0.233 + ], + "angle": 0, + "content": "[31] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.471, + 0.303 + ], + "angle": 0, + "content": "[32] Nikos Kolotouros, Georgios Pavlakos, Michael Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 2252-2261. IEEE, Oct. 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.471, + 0.359 + ], + "angle": 0, + "content": "[33] Namseop Kwon, Ara B. Hwang, Young-Jai You, Seung-Jae V. Lee, and Jung Ho Je. Dissection of C. elegans behavioral genetics in 3-d environments. Sci Rep, 5(1):1-9, May 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.471, + 0.403 + ], + "angle": 0, + "content": "[34] Namseop Kwon, Jaeyeon Pyo, Seung-Jae Lee, and Jung Ho Je. 3-d worm tracker for freely moving C. elegans. PLoS ONE, 8(2):e57484, Feb. 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.405, + 0.471, + 0.459 + ], + "angle": 0, + "content": "[35] Wu Liu, Qian Bao, Yu Sun, and Tao Mei. Recent advances of monocular 2D and 3D human pose estimation: A deep learning perspective. ACM Comput. Surv., 55(4):1-41, Nov. 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.462, + 0.471, + 0.504 + ], + "angle": 0, + "content": "[36] H. C. Longuet-Higgins. A computer algorithm for reconstructing a scene from two projections. Nature, 293(5828):133-135, Sept. 1981. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.505, + 0.471, + 0.574 + ], + "angle": 0, + "content": "[37] Simone Macri, Daniele Neri, Tommaso Ruberto, Violet Mwaffo, Sachit Butail, and Maurizio Porfiri. Three-dimensional scoring of zebrafish behavior unveils biological phenomena hidden by two-dimensional analyses. Sci Rep, 7(1):1-10, May 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.576, + 0.471, + 0.644 + ], + "angle": 0, + "content": "[38] Julieta Martinez, Rayat Hossain, Javier Romero, and James J. Little. A simple yet effective baseline for 3d human pose estimation. In 2017 IEEE International Conference on Computer Vision (ICCV), pages 2640-2649. IEEE, Oct. 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.646, + 0.471, + 0.715 + ], + "angle": 0, + "content": "[39] Valsamis Ntouskos, Marta Sanzari, Bruno Cafaro, Federico Nardi, Fabrizio Natola, Fiora Pirri, and Manuel Ruiz. Component-wise modeling of articulated objects. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 2327-2335. IEEE, Dec. 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.717, + 0.471, + 0.758 + ], + "angle": 0, + "content": "[40] Onur Özyesil, Vladislav Voroninski, Ronen Basri, and Amit Singer. A survey of structure from motion. Acta Numer., 26:305-364, May 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.76, + 0.471, + 0.801 + ], + "angle": 0, + "content": "[41] Brian L. Partridge, Tony Pitcher, J. Michael Cullen, and John Wilson. The three-dimensional structure of fish schools. *Behav Ecol Sociobiol*, 6(4):277-288, Mar. 1980. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.802, + 0.471, + 0.871 + ], + "angle": 0, + "content": "[42] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G. Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3D human pose. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7025-7034. IEEE, July 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.471, + 0.901 + ], + "angle": 0, + "content": "[43] Mukta Prasad, Andrew Fitzgibbon, Andrew Zisserman, and Luc Van Gool. Finding nemo: Deformable object class mod-" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "elling using curve matching. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1720-1727. IEEE, IEEE, June 2010. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[44] Daniel Ramot, Brandon E. Johnson, Tommie L. Berry, Lucinda Carnell, and Miriam B. Goodman. The parallel worm tracker: A platform for measuring average speed and drug-induced paralysis in nematodes. PLoS ONE, 3(5):e2208, May 2008. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[45] Felix Salfelder, Omer Yuval, Thomas P Ilett, David C Hogg, Thomas Ranner, and Netta Cohen. Markerless 3D spatio-temporal reconstruction of microscopic swimmers from video. In Visual observation and analysis of Vertebrate And Insect Behavior 2020, 2021. 2, 3, 4, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.319 + ], + "angle": 0, + "content": "[46] Hinrich Schulenburg and Marie-Anne Félix. The natural biotic environment of Caenorhabditis elegans. Genetics, 206(1):55-86, May 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.321, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[47] S.M. Seitz, B. Curless, J. Diebel, D. Scharstein, and R. Szeliski. A comparison and evaluation of multi-view stereo reconstruction algorithms. In 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Volume 1 (CVPR'06), volume 1, pages 519-528. IEEE, IEEE, 2006. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.405, + 0.892, + 0.446 + ], + "angle": 0, + "content": "[48] William Irvin Sellers and Eishi Hirasaki. Markerless 3D motion capture for animal locomotion studies. *Biology Open*, 3(7):656-668, June 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.448, + 0.892, + 0.517 + ], + "angle": 0, + "content": "[49] Michael Shaw, Haoyun Zhan, Muna Elmi, Vijay Pawar, Clara Essmann, and Mandayam A. Srinivasan. Three-dimensional behavioural phenotyping of freely moving C. elegans using quantitative light field microscopy. PLoS ONE, 13(7):e0200108, July 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.519, + 0.892, + 0.586 + ], + "angle": 0, + "content": "[50] Colin A. Simpfendorfer, Esben M. Olsen, Michelle R. Heupel, and Even Moland. Three-dimensional kernel utilization distributions improve estimates of space use in aquatic animals. Can. J. Fish. Aquat. Sci., 69(3):565-572, Mar. 2012. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.59, + 0.892, + 0.658 + ], + "angle": 0, + "content": "[51] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5693-5703. IEEE, June 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.66, + 0.892, + 0.715 + ], + "angle": 0, + "content": "[52] Nicholas A Swierczek, Andrew C Giles, Catharine H Rankin, and Rex A Kerr. High-throughput behavioral analysis in C. elegans. Nat Methods, 8(7):592-598, June 2011. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.717, + 0.892, + 0.773 + ], + "angle": 0, + "content": "[53] Raphael Sznitman, Manaswi Gupta, Gregory D. Hager, Paulo E. Arratia, and Josué Sznitman. Multi-environment model estimation for motility analysis of caenorhabditis elegans. PLoS ONE, 5(7):e11631, July 2010. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.775, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[54] Diane Theriault, Zheng Wu, Nickolay I Hristov, Sharon M Swartz, Kenneth S Breuer, Thomas H Kunz, and Margrit Betke. Reconstruction and analysis of 3D trajectories of Brazilian free-tailed bats in flight. In 20th Int. Conf. on Pattern Recognition, pages 1-4, 2010. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[55] Alexander Toshev and Christian Szegedy. DeepPose: Human pose estimation via deep neural networks. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 1653-1660. IEEE, June 2014. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12574" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[56] Bill Triggs, Philip F. McLauchlan, Richard I. Hartley, and Andrew W. Fitzgibbon. Bundle adjustment — a modern synthesis. In Vision Algorithms: Theory and Practice, pages 298–372. Springer Berlin Heidelberg, 2000. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.471, + 0.205 + ], + "angle": 0, + "content": "[57] R. Tsai. A versatile camera calibration technique for high-accuracy 3D machine vision metrology using off-the-shelf TV cameras and lenses. IEEE J. Robot. Automat., 3(4):323–344, Aug. 1987. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.207, + 0.471, + 0.261 + ], + "angle": 0, + "content": "[58] Sara Vicente and Lourdes Agapito. Balloon shapes: Reconstructing and deforming objects with volume from images. In 2013 International Conference on 3D Vision, pages 223-230. IEEE, IEEE, June 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.263, + 0.471, + 0.303 + ], + "angle": 0, + "content": "[59] J. Weng, P. Cohen, and M. Herniou. Camera calibration with distortion models and accuracy evaluation. IEEE Trans. Pattern Anal. Machine Intell., 14(10):965-980, 1992. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.305, + 0.471, + 0.374 + ], + "angle": 0, + "content": "[60] Nils Wilhelm, Anna Vögele, Rebeka Zsoldos, Theresia Licka, Björn Krüger, and Jürgen Bernard. FuryExplorer: Visual-interactive exploration of horse motion capture data. In SPIE Proceedings, volume 9397, pages 148–162. SPIE, SPIE, Feb. 2015. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.375, + 0.471, + 0.445 + ], + "angle": 0, + "content": "[61] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3D objects from images in the wild. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1-10. IEEE, June 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.446, + 0.471, + 0.5 + ], + "angle": 0, + "content": "[62] Eviatar Yemini, Rex A. Kerr, and William R. Schafer. Tracking movement behavior of multiple worms on food. Cold Spring Harb Protoc, 2011(12):pdb.prot067025, Dec. 2011. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.502, + 0.471, + 0.544 + ], + "angle": 0, + "content": "[63] Omer Yuval. The neuromechanical control of Caenorhabditis elegans head motor behaviour in 3D environments. PhD thesis, University of Leeds, 2022. 3, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.546, + 0.471, + 0.613 + ], + "angle": 0, + "content": "[64] Sergey Zagoruyko, Adam Lerer, Tsung-Yi Lin, PedroO. Pinheiro, Sam Gross, Soumith Chintala, and Piotr Dollar. A MultiPath network for object detection. In Proceedings of the British Machine Vision Conference 2016. British Machine Vision Association, 2016. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.616, + 0.471, + 0.669 + ], + "angle": 0, + "content": "[65] Liquun Zhu and Wei Weng. Catadioptric stereo-vision system for the real-time monitoring of 3D behavior in aquatic animals. *Physiology & Behavior*, 91(1):106-119, May 2007. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.672, + 0.471, + 0.741 + ], + "angle": 0, + "content": "[66] Michael Zollhöfer, Patrick Stotko, Andreas Görtlitz, Christian Theobalt, Matthias Nießner, Reinhard Klein, and Andreas Kolb. State of the art on 3D reconstruction with RGB-d cameras. In Computer graphics forum, volume 37, pages 625-652. Wiley Online Library, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.743, + 0.471, + 0.812 + ], + "angle": 0, + "content": "[67] Silvia Zuffi, Angjoo Kanazawa, David W. Jacobs, and Michael J. Black. 3D menagerie: Modeling the 3D shape and pose of animals. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6365-6373. IEEE, July 2017. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.812 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12575" + } + ] +] \ No newline at end of file diff --git a/2023/3D Shape Reconstruction of Semi-Transparent Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_origin.pdf b/2023/3D Shape Reconstruction of Semi-Transparent Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7ab91de79c99bac2e4d8c11b2fe75a0dce32a817 --- /dev/null +++ b/2023/3D Shape Reconstruction of Semi-Transparent Worms/541a37a3-ad08-4ec0-acf7-4ca83662c9c6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:273c66b115a33f7106e063639706f984c0525d7f648feaf6b9718199c0eae94f +size 3045664 diff --git a/2023/3D Shape Reconstruction of Semi-Transparent Worms/full.md b/2023/3D Shape Reconstruction of Semi-Transparent Worms/full.md new file mode 100644 index 0000000000000000000000000000000000000000..eb4db8fe91de9c7154a5b5b6b2116805c9c91b47 --- /dev/null +++ b/2023/3D Shape Reconstruction of Semi-Transparent Worms/full.md @@ -0,0 +1,329 @@ +# 3D shape reconstruction of semi-transparent worms + +Thomas P. Ilett* Omer Yuval* Thomas Ranner* Netta Cohen*† David C. Hogg*† +University of Leeds, Leeds, United Kingdom + +![](images/24e8530d7c90f914cf9b9d1e88d8e21baec40e5fb507bb37bdae070083204e53.jpg) +Figure 1. Posture reconstruction pipeline and imaging setup. + +# Abstract + +3D shape reconstruction typically requires identifying object features or textures in multiple images of a subject. This approach is not viable when the subject is semitransparent and moving in and out of focus. Here we overcome these challenges by rendering a candidate shape with adaptive blurring and transparency for comparison with the images. We use the microscopic nematode Caenorhabditis elegans as a case study as it freely explores a 3D complex fluid with constantly changing optical properties. We model the slender worm as a 3D curve using an intrinsic parametrisation that naturally admits biologically-informed constraints and regularisation. To account for the changing optics we develop a novel differentiable renderer to construct images from 2D projections and compare + +\*T.Ilett, O.Yuval, T.Ranner, N.Cohen, D.C.Hogg}@leeds.ac.uk Funding This work was supported by University of Leeds and EPSRC. Author contributions Conceptualisation, Methodology, Formal analysis, Investigation, Software, Visualisation: TPI. Data curation, Validation: TPI, OY. Writing: TPI (original), all (review and editing). Funding acquisition, Supervision: NC, DCH, TR. $\dagger$ Equal contribution. Acknowledgements Additional thanks to Matan Braunstein (for help with Fig. 1), Robert I. Holbrook (data), Felix Salfelder (discussions and data), Lukas Deutz (discussions) and Jen Kruger (proof reading). Data availability Supplementary movies are available here: https://doi.org/10.6084/m9.figshare.22310650. + +against raw images to generate a pixel-wise error to jointly update the curve, camera and renderer parameters using gradient descent. The method is robust to interference such as bubbles and dirt trapped in the fluid, stays consistent through complex sequences of postures, recovers reliable estimates from blurry images and provides a significant improvement on previous attempts to track C. elegans in 3D. Our results demonstrate the potential of direct approaches to shape estimation in complex physical environments in the absence of ground-truth data. + +# 1. Introduction + +Many creatures such as fish, birds and insects move in all directions to search and navigate volumetric environments. Acquiring 3D data of their motion has informed models of locomotion, behaviour and neural and mechanical control [3,22]. While technological advances have made the collection of large quantities of multi-viewpoint visual data more attainable, methods for extracting and modelling 3D information remain largely domain-dependant as few species share common geometric models or exist within the same spatial and temporal scales [4, 11, 14, 26, 37, 41, 50, 54, 65]. Furthermore, while humans and some domesticated animals [30, 60] may act naturally while wearing special markers, marker-less observations of many species makes fea + +ture extraction more challenging and means pose estimation generally lacks ground-truth data [48]. + +As a case study in marker-less 3D shape reconstruction, we consider $C$ elegans, a hair-thick, $\sim 1$ mm long animal with a simple tapered cylinder shape, which can be constructed from a midline "skeleton". In the wild, $C$ elegans can be found in a wide range of complex 3D environments, e.g. decomposing organic matter, with continually changing physical properties [15, 17, 46]. However, to date, experiments have focused nearly exclusively on locomotion on a plane, limiting insight to the constrained, planar behaviours. + +We obtained a large dataset (4 hours 53 minutes $\simeq$ 440,000 frames at $25\mathrm{Hz}$ ) of experimental recordings of individual worms moving freely inside a glass cube filled with a gelatin solution. The cube is positioned between three nearly-orthogonal static cameras fitted with telecentric lenses. Initial pinhole camera model parameter estimates are provided [45] but are imprecise and require continuous adjustment across the course of a recording to account for small vibrations and optical changes to the gel. We aim to simultaneously reconstruct a 3D shape and find corrected camera parameters to match these recordings in a process akin to bundle adjustment [56]. + +3D reconstruction typically involves the identification and triangulation of common features from multiple viewpoints or the synthesis of full images including texture and shading information to match given scenes [16, 21, 47, 66]. Imaging animals with length $\sim 1\mathrm{mm}$ requires sufficient magnification, but simultaneously capturing long-term trajectories up to 25 minutes requires a large volume of view (10-20 worm lengths per axis). As the worm explores the cube it frequently appears out of focus in one or more of the cameras. Air bubbles and dirt trapped in the gel along with old tracks are difficult to differentiate from the transparent worm, particularly at the tapered ends. Self occlusion invariably appears in a least one view, where hidden parts darken the foreground while the ordering of fore/backparts is not discernible. As the semi-transparent and self-occluding subject moves in the volume, photometric information in one view bears little relevance to the appearance in the others making feature identification and photometric matching particularly challenging. We found that standard approaches may suffice for limited sub-clips, but lose parts of the object or fail catastrophically for much of the data and the solution requires a degree of adaptation. + +We present an integrated "project-render-score" algorithm to obtain a midline curve for each image-triplet (Fig. 1). Discrete curve vertices are projected through a triplet of pinhole camera models, rendered to produce an image-triplet for direct comparison against the recorded images and scored according to their intersection with worm-like pixels in all three views. The differentiable renderer stacks 2D super-Gaussian blobs at the projected locations + +of each vertex to approximate the transparency along the worm, accounting for the variable focus and providing soft edges that direct the geometric model towards the midline. The scoring allows the detection of incongruities and keeps the curve aligned to the worm in all views. Regularisation terms ensure smoothness along the body and in time. Curve, camera and rendering parameters are jointly optimised using gradient descent to convergence. Once the worm shape has been resolved, it is generally only lost during image degradation or significant self-occlusions that make the posture unresolvable by eye. + +In summary, our main contributions are: + +- A robust pipeline for 3D posture reconstruction of a freely deforming semi-transparent object from noisy images. +- A novel viewpoint renderer to capture optical distortions and transparency. +- A feature-free bundle adjustment algorithm using direct image comparison and gradient descent. + +# 2. Related work + +Bundle adjustment (BA) is a procedure to jointly optimise 3D geometry and camera parameters [21, 56]. BA typically identifies common features of an object from multiple viewpoints in order to minimise a prediction error between projections of the corresponding 3D points and their 2D observations. BA is frequently used in conjunction with other methods to find camera parameters using multiple images of a 3D calibration object with known control points or for fine-tuning results [13, 23, 36, 40, 57, 59]. + +Feature detection converts photometric information into image coordinates. In BA, coordinates of common features are used to solve a geometric optimisation problem. Photometric bundle adjustment methods additionally require objects to have the same appearance in all views [12, 18]. Our method is entirely photometric, as such differing from BA. As our objects appear differently across views, all pixel information is used and the geometry is solved intrinsically. + +Pose estimation Deep network approaches have proved well-suited to 2D human-pose estimation as they are potent feature extractors and large annotated training sets are available [1, 51, 55]. For 3D postures, ground truth multiview datasets are less common. Recent progress [35] relies on end-to-end architectures [19, 27, 29, 32, 42, 61] or splitting the problem into 2D pose estimation and then constructing the 3D pose [10, 38]. Despite similar approaches used for non-human pose estimation, the huge variability in scales and shapes among species introduces a variety of challenges [26]. Motion capture in controlled settings with markers (providing ground truth skeleton and joint angle data for humans, horses and dogs [30, 60]), are not available for most animals. Generalised mesh surfaces may be used, + +but often require multiple views and thousands of parameters, and do not guarantee consistency through time. In contrast, approximating an animal shape using a few-parameter morphable model can be both tractable and robust. Successful examples include swimmers [9, 43], birds [27, 58], mammals [2,6,28,39] and generic quadrupeds [7,67]. However, these methods expect opaque subjects with consistent textural appearances between views. + +C. elegans has a simple geometric shape that can be well reconstructed from a midline skeleton and parametrised by curvature values along the body (see Sec. 3). This is the deformable template we look to fit to the data. Despite the apparent simplicity, each vertex of the discretised curve has two degrees of freedom (two curvature values) and as we use 128 vertices, our model is highly deformable and requires many parameters (although smoothness regularisation simplifies the problem somewhat). In contrast to deep-learning approaches, our model includes only a small number of explainable parameters and direct optimisation avoids lengthy training and dataset requirements. + +C. elegans Numerous freely available software packages are capable of simultaneous tracking and skeletonising single or multiple worms in 2D using inexpensive microscopic imaging [5,25,44,52,53,62] (see [24] for a review). Most of these skeletonisers combine image segmentation to separate the animal from the background with thinning of the mask to some midline pixels and fitting a spline. + +The 3D reconstruction problem has received relatively little attention. Using at first two views [34] and then three, Kwon et al. [33] designed a motorised stage coupled with a real-time tracker to keep a worm in focus under high magnification in a 3D environment while capturing trajectories of up to 3 minutes. Thresholded images are lifted into 3D, intersected in voxel space and thinned [20] to produce a final skeleton. Kwon et al. omit camera modelling and assume perfectly parallel projections – assumptions that result in large errors for the data we use. Shaw et al. [49] employed light field microscopy to generate depth maps alongside images from a single viewpoint. A midline skeleton is generated by fitting a spline to the 3D coordinates of the central voxels. However, self-occlusions cannot be resolved and only relatively planar postures were investigated. + +Salfelder et al. [45] and Yuval [63] both present 3D reconstruction algorithms using the three-camera set up and calibration described in [45]. In Salfelder et al. [45], a neural network is trained to identify 2D midlines from individual camera images before lifting into 3D voxel space. To account for changing camera parameters, a relative axial shift $(dx,dy,dz)$ is optimised for each frame-triplet to maximise the voxel intersection before thinning. Remaining voxel coordinates are used as control points to fit a curve using a finite-element formulation. This approach works well when + +the midline is well detected in each of the views, but can fail on occluded postures or low-resolution, blurry images. + +Yuval [63] uses a neural network to track head and tail points in 3D lab coordinates and a curve is fit between these fixed end points using a hill-climbing optimisation algorithm. Scoring is based on curve smoothness and pixel intensities at the projected curve points. This method works well when the head and tail are correctly identified but struggles, or requires manual correction, otherwise. + +In our approach we find that incorporating the camera model parameters into the optimisation results in more robust and accurate results. This extends the idea proposed in Salfelder et al. [45] that adjusting the relative positions of the cameras could result in large gains in accuracy. It is likely that the relative shift adjustments, presented there, account for the changing optical properties. + +# 3. Geometric model + +Nematode shapes can be well approximated by a tapered cylinder and computed from a midline. We construct the midline curve in 3D using an object-centric parametrisation, separating shape from position and orientation to allow us to easily constrain and regularise the shape to stay within biologically-reasonable bounds. We discretise the curve into $N$ equidistant vertices and encode the posture in curvature $K \in \mathbb{R}^{N \times 2}$ and length $l \in \mathbb{R}$ that fully define the shape up to a rigid-body transformation. + +We express the 3D curve using the Bishop frame [8], given by $TM^{1}M^{2}$ where $T$ is the normalised tangent of the curve and $M^1, M^2$ form an orthogonal basis along the midline. At vertex $n$ , the curvature is $K_{n} = (m_{n}^{1}, m_{n}^{2})$ , where $m_{n}^{1}, m_{n}^{2} \in \mathbb{R}$ are the curvature components along $M^1, M^2$ . (The more familiar Frenet frame is less stable as it is undefined at zero-curvature points.) Numerical integration of a system of difference equations from starting point $P_{\mathrm{init}}$ and initial orientation $(T_{\mathrm{init}}, M_{\mathrm{init}}^{1}, M_{\mathrm{init}}^{2})$ yields the curve path $P \in \mathbb{R}^{N \times 3}$ . See supplementary material (SM) for details. + +During optimisation, errors accumulate near the starting point, $P_{\mathrm{init}}$ , resulting in either parts of the curve moving faster than other or kinks developing (even with strong regularisation). To resolve this we sample an initial vertex index $n_0$ from a Gaussian distribution (subject to rounding) centred at the middle index at every optimisation step. Setting the starting point $P_{\mathrm{init}} = P_{n_0}$ has the effect of continually shifting the discontinuity so kinks are never given the opportunity to develop (Fig. 2). Summarising the integration as $F$ , the 3D curve is generated from the parameters: + +$$ +\left(\hat {P}, \hat {T}, \hat {M} ^ {1}\right) = F \left(P _ {n _ {0}}, T _ {n _ {0}}, M _ {n _ {0}} ^ {1}, K, l, n _ {0}\right). \tag {1} +$$ + +Each gradient update adjusts all curvature values $K$ but the position and orientation only at the randomly selected $n_0$ vertex $(P_{n_0}, T_{n_0}, M_{n_0}^1)$ . Updating $(P, T, M^1)$ at only + +![](images/0dcc05d271cd6d8e13bce02ed463b36ccd006aa26f499d492e1cdf599860693c.jpg) +Figure 2. The 3D curve is traced out from initial point $P_{n_0}$ and orientation frame $(T_{n_0}, M_{n_0}^1, M_{n_0}^2)$ . The index $n_0$ of the initial point is drawn from a normal distribution at each iteration to prevent kinks developing through repeated use of the same starting point. The final curve $\hat{P}$ is computed in two parts by integrating the Bishop equations with curvature $K$ towards the head and tail separately. + +this vertex produces a $P$ that is inconsistent with the updated $K$ . Therefore, after applying gradient updates we re-compute the full curve and orientation from $n_0$ and set $(P,T,M^1)$ to the output $(\hat{P},\hat{T},\hat{M}^1)$ . + +Since the curve describes a biological creature, we constrain the length $l$ to $(l_{\min}, l_{\max})$ and limit the curvature by $|K_n| < 2\pi k_{\max}$ . The values of $(l_{\min}, l_{\max})$ we use vary depending on magnification but the bounds do not need to be tight and are in the range $0.5 - 2\mathrm{mm}$ . The curvature constraint $k_{\max}$ is set by considering the number of circle achieved by a constant curvature curve and is fixed at 3. + +# 4. Project, Render, Score + +The core of the optimisation pipeline is separable into three main stages; project, render and score. The 3D curve $\hat{P}$ generated in Eq. (1) is projected through the camera models into 2D points that are rendered into images and then scored against the three views. + +# 4.1. Project + +The cameras are modelled using a triplet of pinhole camera models with tangential and radial distortion that project 3D points into image planes using perspective transformations. Each pinhole camera model offers a simple (15 parameters, $\{\eta_c\}$ ), tractable, approximation to the optical transformation. We also include relative shifts along the local coordinate axes, $\eta^s = (dx, dy, dz)$ , shared between the three models, as proposed by Salfelder et al. [45]. Initial camera coefficients for the triplet-model are provided along with the recordings and typically give root mean squared reprojection errors up to 10 pixels ( $\sim \mathcal{O}(\text{worm radius})$ ). + +Due to the initial calibration errors and changes in optical properties as the gelatin sets and is disturbed by the worms we re-calibrate the cameras at every frame by including the camera parameters in the optimisation step. To avoid an under-determined problem, after we have found a configuration that supports good reconstructions for a recording + +![](images/9949594766b891731be95dfe988f6e9463f57ebc1bfd025d98dad9c6ad941fa2.jpg) +Figure 3. The rendering stage generates super-Gaussian blobs at each vertex position on the image. The shape of the blobs depends on the optimisable parameters: the scale $\sigma$ , the intensity $\iota$ and the exponent used in the Gaussian $\rho$ . $\sigma$ and $\iota$ are tapered down to fixed minimum values at the head and tail. The effects of varying these parameters from a converged solution (blue curves) are shown above (green curves) and below (orange curves) each. + +we fix all but the $\eta^s$ parameters. Interestingly, we still see changes (up to $30\mathrm{px}\sim 0.15\mathrm{mm}$ ) in $\eta^s$ but as this relates to the relative positioning it does not affect the posture reconstruction or long-term trajectories. + +Projecting the 3D curve $\hat{P}$ through the camera-triplet model $\Gamma$ with parameters $\eta = \{\eta_0, \eta_1, \eta_2, \eta^s\}$ generates 2D image points per view, which we combine as $Q = \Gamma(\hat{P}, \eta) \in \mathbb{R}^{3 \times N \times 2}$ . + +# 4.2. Render + +In order to evaluate the reconstruction directly against the raw data, we render the projected 2D midline points into + +images using optimisable shape and rendering parameters. Since worm bodies are well approximated by tapered cylinders, in theory we only require maximum and minimum radius values and a tapering function. However, $C$ elegans are semi-transparent – increasingly so at the head and tail – and their internal anatomy has varying optical properties that diffract and distort the light. These challenges are further exacerbated by the worms often being out of focus in at least one of the views, therefore even an anatomically accurate model stands little chance of being correctly resolved. + +We render realistic images by combining 2D super-Gaussian functions centred on each projected vertex. Crucially, we allow the rendering parameters to differ between cameras since the animal seldom has the same photometric qualities in different views. We optimise three parameters for each camera view $c$ : $\sigma_c \in \mathbb{R}$ controls the spread, $\iota_c \in \mathbb{R}$ scales the intensity, and $\rho_c \in \mathbb{R}$ sharpens or softens the edges (Fig. 3). To capture the tapered shape we weight $\sigma_c$ and $\iota_c$ from their optimisable values along the middle $60\%$ to minimum values $\sigma_{\mathrm{min}}$ and $\iota_{\mathrm{min}}$ at the ends and define the tapered outputs $\bar{\sigma}_c \in \mathbb{R}^N$ and $\bar{\iota}_c \in \mathbb{R}^N$ (SM). $\sigma_{\mathrm{min}}$ and $\iota_{\mathrm{min}}$ are manually fixed for each recording to account for different magnification factors and worm size variability. + +For each camera index $c$ and vertex index $n$ we define the rendered blob $B_{c,n} \in \mathbb{R}^{w \times w}$ (image size $w$ ) for pixel $(i,j)$ as: + +$$ +B _ {c, n} (i, j) = \bar {\iota} _ {c, n} \exp \left[ - \left(\frac {(i - Q _ {c , n , 0}) ^ {2} + (j - Q _ {c , n , 1}) ^ {2}}{2 \bar {\sigma} _ {c , n} ^ {2}}\right) ^ {\rho_ {c}} \right]. \tag {2} +$$ + +The stacks of blobs are combined to generate the complete renderings $R \in \mathbb{R}^{3 \times w \times w}$ by taking the maximum pixel value across all blobs: for pixel $(i,j)$ , + +$$ +R _ {c} (i, j) = \max \left\{B _ {c, n} (i, j) \right\} _ {n = 0, \dots , N - 1}. \qquad (3) +$$ + +The orientation of the body directly affects the pixel intensity of both raw and rendered images. When pointing directly at a camera the peaks of the blobs cluster closely together and appear as a high-intensity (opaque) circle. Pointing laterally causes the peaks to spread out on the image revealing more of the lower-intensity tails. In both situations our blob-rendering approach approximates transparency effects in the raw images without the need to model complex intensity-orientation responses. Moreover, super-Gaussian blobs allow sharp outlines to be produced in one view by using a large exponent and flat-top blobs, and blurry images to be produced for another, using low intensity and high variance. + +# 4.3. Score + +In order to evaluate how well the curve represents the worm we require a way of distinguishing between worm-pixels and non-worm pixels such as dirt, bubbles, old tracks + +![](images/6ad8ea5c520c974e0bf8a0cbb92b1ae4e9ffec6b645fb5269155c3229f9d1dd1.jpg) +Figure 4. The 3D curve points are scored individually according to how well they match the three views. The triplet of blobs associated with vertex $n$ ( $B_{.,n}$ ) are multiplied with the images $I$ and summed. We take the minimum of the three sums and then taper these values from the midpoint-out. + +and even other worms. When the animal truly intersects with environmental interference it can be impossible to differentiate between the two, but in the majority of cases there exists a gap between the worm and the noise that is visible in at least one of the views. By ensuring that the curve corresponds to a single contiguous pixel mass in all of the images we are able to safely ignore other artefacts (Fig. 4). + +To detect if the curve is bridging a gap, each vertex $\hat{P}_n$ is scored by correlating its corresponding blobs $B_{.,n}$ (Sec. 4.2) with the images $I$ . The raw score $S_n \in \mathbb{R}$ is defined: + +$$ +S _ {n} = \min \left\{\frac {\sum_ {i , j} B _ {c , n} \cdot I _ {c}}{\bar {\sigma} _ {c , n} \bar {\iota} _ {c , n}} \right\} _ {c = 0, 1, 2} \tag {4} +$$ + +where $\cdot$ is element-wise multiplication and the sum is taken over the image dimensions. By taking the minimum we ensure that vertices failing to match pixels in any one of the views will receive low scores regardless of how well they match pixels in the other views. + +If the curve is bridging two disjoint groups of pixels that are visible in all three views this will present as two peaks in $S$ . Since we are only interested in finding one object we restrict the scores to contain just one peak by tapering $S$ from the middle-out to form the intermediate $S'$ . Finally we normalise $S'$ to get scores $\hat{S}$ relative to the peak: + +$$ +S _ {n} ^ {\prime} = \left\{ \begin{array}{l l} \min \left\{S _ {n}, S _ {n + 1} ^ {\prime} \right\} & 0 \leq n < N / 2 \\ S _ {n} & n = N / 2 \\ \min \left\{S _ {n}, S _ {n - 1} ^ {\prime} \right\} & N / 2 < n < N \end{array} \right. \tag {5} +$$ + +$$ +\hat {S} = \frac {S ^ {\prime}}{\operatorname* {m a x} _ {n} \left\{S ^ {\prime} \right\}}. \tag {6} +$$ + +![](images/dd9322366813a8574c7ebdaf0f880c1441aa7f65292dad1147b259010c6b000e.jpg) + +![](images/6babeae149d596324c3e3ec55073e03281b9b384484f51b3ef81bb8c0b5b6246.jpg) +Without masking: +With masking: + +![](images/b1e9a7ce05932bbf5b4abe3f033da0081e6aea57f174773a46dff8a9c2f1e9f5.jpg) +Figure 5. The noisy input images are cleaned by applying masks that force pixel-errors to be local to the current estimate. The blobs $B$ are scaled by the relative scores $\hat{S}$ , combined using the maximum pixel value across blobs and thresholded to form the masks $M$ . The masks are applied to the raw input images $I$ to generate the targets: $I^{\star}$ . Masking ensures only a single contiguous pixel mass is detected. Without it, parts of the reconstruction can "stick" to nearby bubbles and other artefacts as shown below. + +The final score profile $\hat{S}$ provides insight into how well the curve matches a contiguous pixel mass across all three views and how evenly that mass is distributed. + +Masking From the score profile $\hat{S}$ we identify image areas that are more likely to contain the pixel masses that correspond to the worm. Masks $M\in \mathbb{R}^{3\times w\times w}$ applied to the input, $I^{\star} = M\cdot I$ , focuses attention (and gradient) to only these areas of interest, consistently across all three views and exclude interference outside the masks (Fig. 5, see SM). Pixel intensities outside the masks are significantly reduced, but not zeroed in order to avoid stagnation in case the reconstruction completely misses the worm. + +Centre-shifting The scores $\hat{S}$ also indicate the relative positioning of the curve over the target object. As the curve aligns with a pixel mass, vertices with high scores (apparently "converged") tend to lock into place thus hindering convergence of the rest of the object. For each frame, we use the previous frame solution as the starting point, so the majority of points rapidly converge. However, errors intro + +![](images/1e6f429edb951cc48624b9c2fe48b31b93f95333652758e73949cb2e3952d8e8.jpg) +Figure 6. As the animal moves along the path of its midline the tail may be left behind (left column). This can be identified from an unbalanced score profile $\hat{S}$ . By periodically shifting the curve along its length (adding new curvature values at one end and discarding from the other) the centroid index $(\bar{n})$ of the scores can be centred. Gradient descent optimisation then updates the new curvature values so the curve matches the target (right column). + +duced at the tips remain as they are insufficient to generate the collective shift required. The effect can easily be identified from an unbalanced score profile (Fig. 6) and rectified by periodically shifting the curve along its length between gradient descent optimisation steps (see SM). + +# 5. Optimisation + +The main pixel-loss to be minimised is defined as: + +$$ +\mathcal {L} _ {\mathrm {p x}} = \frac {1}{3 w ^ {2}} \sum_ {c, i, j} \left(R _ {c} (i, j) - I _ {c} ^ {\star} (i, j)\right) ^ {2}. \tag {7} +$$ + +To improve head and tail detection we also minimise a scores-loss, + +$$ +\mathcal {L} _ {\mathrm {s c}} = \frac {\operatorname* {m a x} \left(S ^ {\prime}\right) N}{\sum_ {n} S _ {n} ^ {\prime \prime}}, \text {w h e r e} \tag {8} +$$ + +$$ +S _ {n} ^ {\prime \prime} = S _ {n} ^ {\prime} \left(\frac {2 n - (N - 1)}{N - 1}\right) ^ {2}, \tag {9} +$$ + +that is quadratically weighted towards the tips where the scores are naturally lower due to the transparency. + +In addition we include a number of regularisation terms. To keep the curve smooth we define + +$$ +\mathcal {L} _ {\mathrm {s m}} = \sum_ {n = 1} ^ {N - 1} \left| K _ {n} - K _ {n - 1} \right| ^ {2}, \tag {10} +$$ + +where $|\cdot |$ is the $l^2$ -norm. To ensure all parameters change + +smoothly between frames we set + +$$ +\mathcal {L} _ {\mathrm {t}} = \sum_ {x \in \{l, K, \hat {P}, \eta , \sigma , \iota , \rho \}} | x ^ {\text {p r e v}} - x | ^ {2}, \tag {11} +$$ + +where $x^{\mathrm{prev}}$ refers to the frozen value of the variable from the previous frame. And to avoid self-intersections, we use + +$$ +\begin{array}{l} d _ {n, m} = \left| \hat {P} _ {n} - \hat {P} _ {m} \right|, (12) \\ d _ {n, m} ^ {\prime} = \frac {1}{3} \sum_ {c} \bar {\sigma} _ {c, n} + \frac {1}{3} \sum_ {c} \bar {\sigma} _ {c, m}, \text {a n d} (13) \\ \mathcal {L} _ {\mathrm {i}} = \sum_ {n = 0} ^ {N - N / k _ {\max } - 1} \sum_ {m = n + N / k _ {\max }} ^ {N - 1} \left\{ \begin{array}{l l} \frac {d _ {n , m} ^ {\prime}}{d _ {n , m}}, & \text {i f} d _ {n, m} < d _ {n, m} ^ {\prime} \\ 0, & \text {o t h e r w i s e .} \end{array} \right. (14) \\ \end{array} +$$ + +A loss is incurred, $\mathcal{L}_{\mathrm{i}} > 0$ , when two points which are sufficiently far apart ( $>N / k_{\max}$ ) along the curve come within a distance defined by the sum of their mean rendering variances (since these approximate the worm's radius). Eq. (14) forces the algorithm to find postures that are always feasible even during self-occlusions and complex manoeuvres. + +The losses are combined in a weighted sum to yield the final optimisation target: + +$$ +\mathcal {L} = \omega_ {\mathrm {p x}} \mathcal {L} _ {\mathrm {p x}} + \omega_ {\mathrm {s c}} \mathcal {L} _ {\mathrm {s c}} + \omega_ {\mathrm {s m}} \mathcal {L} _ {\mathrm {s m}} + \omega_ {\mathrm {t}} \mathcal {L} _ {\mathrm {t}} + \omega_ {\mathrm {i}} \mathcal {L} _ {\mathrm {i}}. \tag {15} +$$ + +Values of $\omega$ used in our experiments are included in the SM. + +To achieve robust reconstructions it is important that the curve parameters learn fastest, then the rendering parameters and finally the camera parameters. Imposing this hierarchy of rates ensures camera model stability and prevents the renderer from over-blurring the edges (as it tries to "reach" the pixels). Thus, movement between frames is primarily captured through curve deformations. We use learning rates $\lambda_{p} = 1\mathrm{e} - 3$ for the curve parameters $\{P,T,M^1,K,l\}$ , $\lambda_r = 1\mathrm{e} - 4$ for the rendering parameters $\{\sigma ,\iota ,\rho \}$ and $\lambda_{\eta} = 1\mathrm{e} - 5$ for the camera parameters $\eta$ . + +The curve is initialised as a small $(\sim 0.2\mathrm{mm})$ , randomly oriented straight line centred in the field of view of all three cameras. We slowly increase the length to $l_{\mathrm{min}}$ over the first 200-500 steps as the curve gets positioned and orientated. + +The pipeline is constructed using PyTorch [64] and the loss minimised is using Adam [31] with periodic centre-shifting of the curve vertices. Learning rates are decreased by a factor of 0.8 for every 5 steps taken without improvement in $\mathcal{L}$ to a minimum of $1\mathrm{e} - 6$ until convergence is detected. Subsequent frames are instantiated with the solution from the previous frame for efficiency and to maintain consistency through complex sequences of self-occluding postures. Example videos showing the effects of varying some of the options on the optimisation are described in SM. + +![](images/67915852fe48a0b1a909ac42a61baea8c9510dde3148fed59046adac7f48b8e9.jpg) +Figure 7. Validation against 487 manual annotations. At the top we show an example of an annotated frame (left, orange) alongside a projection of our matching 3D midline (right, blue). Below we plot the sample averages $\pm 2\mathrm{std}$ . We find our midlines are consistently close to annotated points (blue curve), but annotations typically extend further into the head and tail regions (orange curve). + +# 6. Results + +Using our method we generate high quality 3D midline reconstructions for 43 of 44 recordings. One fails due to excessive coiling of the worm. Significant occlusions also occur during successful reconstructions and when combined with loss of focus can cause the shape to be lost. Video clips of good and poor reconstructions through challenging environmental conditions are described in SM along with ablation results to show benefits of each component. + +We compare 2D reprojections of our midlines against 487 manual annotations that were produced from single images in isolation and contain a varying number of unordered points. We calculate the minimum distance from each annotated point to any reconstructed point and vice-versa and find that our midlines consistently come close ( $\sim$ 2px) to hand-annotated points (Fig. 7). Annotated points at the ends show an increased distance ( $\sim$ 10px) to our midline points. This shows that our curves generally fall short of reaching the very tips of the worm by $\sim$ O(worm radius). + +Our method significantly outperforms previous methods developed using the same dataset [45, 63] when evaluated against the manual annotations (SM), but these only cover a selection of hand-picked examples. For a large-scale comparison we take 3D midlines and camera parameters found by each method and, using our pipeline, render them to generate comparable images (re-optimising the render parameters for their midlines, see SM). We skip the scoring and masking and calculate $\mathcal{L}_{\mathrm{px}}$ . The results (Fig. 8) show our method consistently produces shapes that more closely match the raw images. The biggest advantage over previous approaches is the improvement in robustness; we recover + +![](images/6d4533a14537cc613de6396d0b23513ca3d4ef77e19f8377543bddaf9b1160cf.jpg) +Figure 8. A comparison between our Midline Finder (MF), Yuval's Worm-Tracker 3D (WT3D) [63] and Salfelder et al.'s 'reconst' [45] methods across a single trial ( $\sim 13$ min). In the majority of cases our method generates midlines that better match the data (lower pixel losses, $\mathcal{L}_{\mathrm{px}}$ ). We show moving averages over 25 frames ( $\sim 1$ s) with shaded areas indicating $\pm 2$ std. + +![](images/ef58c27265949ca3d73c2dc14206e8051d5ef4f7d6e02e142cb1168b097d4533.jpg) +Figure 9. The rendering parameters change continually over the course of a recording to capture optical changes. Clear images (e.g. early frames in cameras 0 and 1, switching to late frames in camera 2) are consistent with small values of $\sigma$ and large values of $\rho$ . Blurry images (early camera 2, late camera 1) use high $\sigma$ and small $\rho$ . We show moving averages over 25 frames ( $\sim 1$ s) with shaded areas indicating $\pm 2$ std. Example comparisons between the renders (red) and raw images (grey) are shown on either side. + +4 h 37 min (ours) versus 1 h 32 min [45] and 45 min [63]. + +Fig. 9 shows the rendering parameters during a trial as the worm moves in and out of focus in the different cameras. Clearer images result in smaller values of $\sigma$ and larger values of $\rho$ . The fluctuations in intensity $\iota$ are due in part to the posture of the worm in relation to the camera; when it is pointing directly towards the camera we see higher values of $\iota$ used to capture the darker image observed and when the shape is perpendicular to the camera we see lower values of $\iota$ to emulate the worm's transparency. All three parameters work in tandem to produce the final effect. + +# 7. Conclusion + +We present a robust and reliable framework for the 3D reconstruction of a microscopic, semi-transparent subject moving through a fluid and evaluate against two other algorithms and manually annotations. The key contribution of our approach - constructing unique differentiable renderings for each view - allows us to solve shape recon + +struction and camera parameter optimisation by direct image comparison. This avoids feature extraction and correspondence matching, and hence offers a powerful alternative when those approaches are not well-suited, e.g. due to the variation in appearance between views. + +Multi-view microscopic camera calibration, imaging through fluids and parametric model fitting of semitransparent subjects are challenges that have received little attention in the literature. While we have focused here on constructing a curve to fit a microscopic worm from three views, our method could be applied to the 3D reconstruction of arbitrary shape models at any scale using any number of viewpoints. Rendering points with adaptable super-Gaussian functions presents an effective solution to transparency and focal issues, but more generally, our results indicate that our direct optimisation approach may offer an effective alternative to contemporary methods for 3D approximation of generic objects from a limited number of silhouette-like images. + +# References + +[1] Mykhaylo Andriluka, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2D human pose estimation: New benchmark and state of the art analysis. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 3686-3693. IEEE, June 2014. 2 +[2] Praneet C. Bala, Benjamin R. Eisenreich, Seng Bum Michael Yoo, Benjamin Y. Hayden, Hyun Soo Park, and Jan Zimmermann. Automated markerless pose estimation in freely moving macaques with OpenMonkeyStudio. Nat Commun, 11(1):4560, Sept. 2020. 3 +[3] Jerrold L Belant, Joshua J Millspaugh, James A Martin, and Robert A Gitzen. Multi-dimensional space use: The final frontier. Front. Ecol. Environ., 10(1):11-12, Feb. 2012. 1 +[4] Florian Berlinger, Melvin Gauci, and Radhika Nagpal. Implicit coordination for 3D underwater collective behaviors in a fish-inspired robot swarm. Sci. Robot., 6(50):eabd8668, Jan. 2021. 1 +[5] Stefano Berri, Jordan H. Boyle, Manlio Tassieri, Ian A. Hope, and Netta Cohen. Forward locomotion of the nematode C. elegans is achieved through modulation of a single gait. Hfsp J., 3(3):186-193, June 2009. 3 +[6] Benjamin Biggs, Oliver Boyne, James Charles, Andrew Fitzgibbon, and Roberto Cipolla. Who left the dogs out? 3d animal reconstruction with expectation maximization in the loop. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XI 16, pages 195-211. Springer, 2020. 3 +[7] Benjamin Biggs, Thomas Roddick, Andrew Fitzgibbon, and Roberto Cipolla. Creatures great and small: Recovering the shape and motion of animals from video. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2-6, 2018, Revised Selected Papers, Part V 14, pages 3-19. Springer, 2019. 3 +[8] Richard L. Bishop. There is more than one way to frame a curve. Amer. Math. Monthly, 82(3):246-251, Mar. 1975. 3 +[9] Thomas J. Cashman and Andrew W. Fitzgibbon. What shape are dolphins? building 3D morphable models from 2D images. IEEE Trans. Pattern Anal. Mach. Intell., 35(1):232-244, Jan. 2013. 3 +[10] Ching-Hang Chen and Deva Ramanan. 3D human pose estimation = 2D pose estimation + matching. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7035-7043. IEEE, July 2017. 2 +[11] Nathan W. Cooper, Thomas W. Sherry, and Peter P. Marra. Modeling three-dimensional space use and overlap in birds. *Auk*, 131(4):681–693, Oct. 2014. 1 +[12] Amael Delaunoy and Marc Pollefeys. Photometric bundle adjustment for dense multi-view 3D modeling. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 1486-1493. IEEE, June 2014. 2 +[13] Olivier Faugeras and Quang-Tuan Luong. The Geometry of Multiple Images. The MIT Press, 2001. 2 +[14] Alessandro Ferrarini, Giuseppe Giglio, Stefania Caterina Pellegrino, Anna Grazia Frassanito, and Marco Gustin. A new methodology for computing birds' 3D home ranges. Avian Res, 9(1):1-6, May 2018. 1 + +[15] Lise Frézal and Marie-Anne Félix. The natural history of model organisms: C. elegans outside the petri dish. eLife, 4:e05849, Mar. 2015. 2 +[16] Kui Fu, Jiansheng Peng, Qiwen He, and Hanxiao Zhang. Single image 3D object reconstruction based on deep learning: A review. Multimed Tools Appl, 80(1):463-498, Sept. 2020. 2 +[17] Marie-Anne Félix and Christian Braendle. The natural history of caenorhabditis elegans. Curr. Biol., 20(22):R965–R969, Nov. 2010. 2 +[18] P. Georgel, S. Benhimane, and N. Navab. A unified approach combining photometric and geometric information for pose estimation. In Proceedings of the British Machine Vision Conference 2008, pages 1-10. CiteSeer, British Machine Vision Association, 2008. 2 +[19] Riza Alp Guler, Natalia Neverova, and Iasonas Kokkinos. DensePose: Dense human pose estimation in the wild. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7297-7306. IEEE, June 2018. 2 +[20] Zicheng Guo and Richard W. Hall. Parallel thinning with two-subiteration algorithms. Commun. ACM, 32(3):359-373, Mar. 1989. 3 +[21] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, Mar. 2004. 2 +[22] Robert I. Holbrook and Theresa Burt de Perera. Three-dimensional spatial cognition: Information in the vertical dimension overrides information from the horizontal. Anim Cogn, 14(4):613-619, Mar. 2011. 1 +[23] C.T. Huang and O.R. Mitchell. Dynamic camera calibration. In Proceedings of International Symposium on Computer Vision - ISCV, pages 169-174. IEEE, IEEE Comput. Soc. Press, 1995. 2 +[24] Steven J. Husson, Wagner S. Costa, Cornelia Schmitt, and Alexander Gottschalk. Keeping track of worm trackers. WormBook, pages 1-17, Sept. 2012. 3 +[25] Avelino Javer, Michael Currie, Chee Wai Lee, Jim Hokanson, Kezhi Li, Céline N. Martineau, Eviatar Yemini, Laura J. Grundy, Chris Li, QueeLim Ch'ng, William R. Schafer, Ellen A. A. Nollen, Rex Kerr, and André E. X. Brown. An open-source platform for analyzing and sharing worm-behavior data. Nat Methods, 15(9):645-646, Aug. 2018. 3 +[26] Le Jiang, Caleb Lee, Divyang Teotia, and Sarah Ostadabbas. Animal pose estimation: A closer look at the state-of-the-art, existing gaps and opportunities. Comput. Vis. Image Und., 222:103483, Sept. 2022. 1, 2 +[27] Angjoo Kanazawa, Michael J. Black, David W. Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7122-7131. IEEE, June 2018. 2, 3 +[28] Angjoo Kanazawa, Shahar Kovalsky, Ronen Basri, and David Jacobs. Learning 3d deformation of animals from 2d images. In Computer Graphics Forum, volume 35, pages 365-374. Wiley Online Library, 2016. 3 +[29] Isinsu Katircioglu, Bugra Tekin, Mathieu Salzmann, Vincent Lepetit, and Pascal Fua. Learning latent representations of + +3D human pose with deep neural networks. Int J Comput Vis, 126(12):1326-1341, Jan. 2018. 2 +[30] Sinead Kearney, Wenbin Li, Martin Parsons, Kwang In Kim, and Darren Cosker. RGBD-dog: Predicting canine pose from RGBD sensors. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8336-8345. IEEE, June 2020. 1, 2 +[31] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 7 +[32] Nikos Kolotouros, Georgios Pavlakos, Michael Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 2252-2261. IEEE, Oct. 2019. 2 +[33] Namseop Kwon, Ara B. Hwang, Young-Jai You, Seung-Jae V. Lee, and Jung Ho Je. Dissection of C. elegans behavioral genetics in 3-d environments. Sci Rep, 5(1):1-9, May 2015. 3 +[34] Namseop Kwon, Jaeyeon Pyo, Seung-Jae Lee, and Jung Ho Je. 3-d worm tracker for freely moving C. elegans. PLoS ONE, 8(2):e57484, Feb. 2013. 3 +[35] Wu Liu, Qian Bao, Yu Sun, and Tao Mei. Recent advances of monocular 2D and 3D human pose estimation: A deep learning perspective. ACM Comput. Surv., 55(4):1-41, Nov. 2022. 2 +[36] H. C. Longuet-Higgins. A computer algorithm for reconstructing a scene from two projections. Nature, 293(5828):133-135, Sept. 1981. 2 +[37] Simone Macri, Daniele Neri, Tommaso Ruberto, Violet Mwaffo, Sachit Butail, and Maurizio Porfiri. Three-dimensional scoring of zebrafish behavior unveils biological phenomena hidden by two-dimensional analyses. Sci Rep, 7(1):1-10, May 2017. 1 +[38] Julieta Martinez, Rayat Hossain, Javier Romero, and James J. Little. A simple yet effective baseline for 3d human pose estimation. In 2017 IEEE International Conference on Computer Vision (ICCV), pages 2640-2649. IEEE, Oct. 2017. 2 +[39] Valsamis Ntouskos, Marta Sanzari, Bruno Cafaro, Federico Nardi, Fabrizio Natola, Fiora Pirri, and Manuel Ruiz. Component-wise modeling of articulated objects. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 2327-2335. IEEE, Dec. 2015. 3 +[40] Onur Özyesil, Vladislav Voroninski, Ronen Basri, and Amit Singer. A survey of structure from motion. Acta Numer., 26:305-364, May 2017. 2 +[41] Brian L. Partridge, Tony Pitcher, J. Michael Cullen, and John Wilson. The three-dimensional structure of fish schools. *Behav Ecol Sociobiol*, 6(4):277-288, Mar. 1980. 1 +[42] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G. Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3D human pose. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7025-7034. IEEE, July 2017. 2 +[43] Mukta Prasad, Andrew Fitzgibbon, Andrew Zisserman, and Luc Van Gool. Finding nemo: Deformable object class mod- + +elling using curve matching. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1720-1727. IEEE, IEEE, June 2010. 3 +[44] Daniel Ramot, Brandon E. Johnson, Tommie L. Berry, Lucinda Carnell, and Miriam B. Goodman. The parallel worm tracker: A platform for measuring average speed and drug-induced paralysis in nematodes. PLoS ONE, 3(5):e2208, May 2008. 3 +[45] Felix Salfelder, Omer Yuval, Thomas P Ilett, David C Hogg, Thomas Ranner, and Netta Cohen. Markerless 3D spatio-temporal reconstruction of microscopic swimmers from video. In Visual observation and analysis of Vertebrate And Insect Behavior 2020, 2021. 2, 3, 4, 7, 8 +[46] Hinrich Schulenburg and Marie-Anne Félix. The natural biotic environment of Caenorhabditis elegans. Genetics, 206(1):55-86, May 2017. 2 +[47] S.M. Seitz, B. Curless, J. Diebel, D. Scharstein, and R. Szeliski. A comparison and evaluation of multi-view stereo reconstruction algorithms. In 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Volume 1 (CVPR'06), volume 1, pages 519-528. IEEE, IEEE, 2006. 2 +[48] William Irvin Sellers and Eishi Hirasaki. Markerless 3D motion capture for animal locomotion studies. *Biology Open*, 3(7):656-668, June 2014. 2 +[49] Michael Shaw, Haoyun Zhan, Muna Elmi, Vijay Pawar, Clara Essmann, and Mandayam A. Srinivasan. Three-dimensional behavioural phenotyping of freely moving C. elegans using quantitative light field microscopy. PLoS ONE, 13(7):e0200108, July 2018. 3 +[50] Colin A. Simpfendorfer, Esben M. Olsen, Michelle R. Heupel, and Even Moland. Three-dimensional kernel utilization distributions improve estimates of space use in aquatic animals. Can. J. Fish. Aquat. Sci., 69(3):565-572, Mar. 2012. 1 +[51] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5693-5703. IEEE, June 2019. 2 +[52] Nicholas A Swierczek, Andrew C Giles, Catharine H Rankin, and Rex A Kerr. High-throughput behavioral analysis in C. elegans. Nat Methods, 8(7):592-598, June 2011. 3 +[53] Raphael Sznitman, Manaswi Gupta, Gregory D. Hager, Paulo E. Arratia, and Josué Sznitman. Multi-environment model estimation for motility analysis of caenorhabditis elegans. PLoS ONE, 5(7):e11631, July 2010. 3 +[54] Diane Theriault, Zheng Wu, Nickolay I Hristov, Sharon M Swartz, Kenneth S Breuer, Thomas H Kunz, and Margrit Betke. Reconstruction and analysis of 3D trajectories of Brazilian free-tailed bats in flight. In 20th Int. Conf. on Pattern Recognition, pages 1-4, 2010. 1 +[55] Alexander Toshev and Christian Szegedy. DeepPose: Human pose estimation via deep neural networks. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 1653-1660. IEEE, June 2014. 2 + +[56] Bill Triggs, Philip F. McLauchlan, Richard I. Hartley, and Andrew W. Fitzgibbon. Bundle adjustment — a modern synthesis. In Vision Algorithms: Theory and Practice, pages 298–372. Springer Berlin Heidelberg, 2000. 2 +[57] R. Tsai. A versatile camera calibration technique for high-accuracy 3D machine vision metrology using off-the-shelf TV cameras and lenses. IEEE J. Robot. Automat., 3(4):323–344, Aug. 1987. 2 +[58] Sara Vicente and Lourdes Agapito. Balloon shapes: Reconstructing and deforming objects with volume from images. In 2013 International Conference on 3D Vision, pages 223-230. IEEE, IEEE, June 2013. 3 +[59] J. Weng, P. Cohen, and M. Herniou. Camera calibration with distortion models and accuracy evaluation. IEEE Trans. Pattern Anal. Machine Intell., 14(10):965-980, 1992. 2 +[60] Nils Wilhelm, Anna Vögele, Rebeka Zsoldos, Theresia Licka, Björn Krüger, and Jürgen Bernard. FuryExplorer: Visual-interactive exploration of horse motion capture data. In SPIE Proceedings, volume 9397, pages 148–162. SPIE, SPIE, Feb. 2015. 1, 2 +[61] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3D objects from images in the wild. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1-10. IEEE, June 2020. 2 +[62] Eviatar Yemini, Rex A. Kerr, and William R. Schafer. Tracking movement behavior of multiple worms on food. Cold Spring Harb Protoc, 2011(12):pdb.prot067025, Dec. 2011. 3 +[63] Omer Yuval. The neuromechanical control of Caenorhabditis elegans head motor behaviour in 3D environments. PhD thesis, University of Leeds, 2022. 3, 7, 8 +[64] Sergey Zagoruyko, Adam Lerer, Tsung-Yi Lin, PedroO. Pinheiro, Sam Gross, Soumith Chintala, and Piotr Dollar. A MultiPath network for object detection. In Proceedings of the British Machine Vision Conference 2016. British Machine Vision Association, 2016. 7 +[65] Liquun Zhu and Wei Weng. Catadioptric stereo-vision system for the real-time monitoring of 3D behavior in aquatic animals. *Physiology & Behavior*, 91(1):106-119, May 2007. 1 +[66] Michael Zollhöfer, Patrick Stotko, Andreas Görtlitz, Christian Theobalt, Matthias Nießner, Reinhard Klein, and Andreas Kolb. State of the art on 3D reconstruction with RGB-d cameras. In Computer graphics forum, volume 37, pages 625-652. Wiley Online Library, 2018. 2 +[67] Silvia Zuffi, Angjoo Kanazawa, David W. Jacobs, and Michael J. Black. 3D menagerie: Modeling the 3D shape and pose of animals. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6365-6373. IEEE, July 2017. 3 \ No newline at end of file diff --git a/2023/3D Shape Reconstruction of Semi-Transparent Worms/images.zip b/2023/3D Shape Reconstruction of Semi-Transparent Worms/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..28fea81119d9a281243364665285e92c4b0cd64a --- /dev/null +++ b/2023/3D Shape Reconstruction of Semi-Transparent Worms/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3cc7862e6f281f06537d08b115ea67c870d485b04d2ab6f53fbf39320d439e4 +size 458146 diff --git a/2023/3D Shape Reconstruction of Semi-Transparent Worms/layout.json b/2023/3D Shape Reconstruction of Semi-Transparent Worms/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..bd63861d5a26e27a9c211023176697b582eafe71 --- /dev/null +++ b/2023/3D Shape Reconstruction of Semi-Transparent Worms/layout.json @@ -0,0 +1,9915 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 136, + 104, + 457, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 104, + 457, + 121 + ], + "spans": [ + { + "bbox": [ + 136, + 104, + 457, + 121 + ], + "type": "text", + "content": "3D shape reconstruction of semi-transparent worms" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 142, + 510, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 142, + 510, + 173 + ], + "spans": [ + { + "bbox": [ + 83, + 142, + 510, + 173 + ], + "type": "text", + "content": "Thomas P. Ilett* Omer Yuval* Thomas Ranner* Netta Cohen*† David C. Hogg*† \nUniversity of Leeds, Leeds, United Kingdom" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 52, + 200, + 545, + 358 + ], + "blocks": [ + { + "bbox": [ + 52, + 200, + 545, + 358 + ], + "lines": [ + { + "bbox": [ + 52, + 200, + 545, + 358 + ], + "spans": [ + { + "bbox": [ + 52, + 200, + 545, + 358 + ], + "type": "image", + "image_path": "24e8530d7c90f914cf9b9d1e88d8e21baec40e5fb507bb37bdae070083204e53.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 186, + 365, + 406, + 376 + ], + "lines": [ + { + "bbox": [ + 186, + 365, + 406, + 376 + ], + "spans": [ + { + "bbox": [ + 186, + 365, + 406, + 376 + ], + "type": "text", + "content": "Figure 1. Posture reconstruction pipeline and imaging setup." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 388, + 192, + 401 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 388, + 192, + 401 + ], + "spans": [ + { + "bbox": [ + 143, + 388, + 192, + 401 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 416, + 288, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 416, + 288, + 583 + ], + "spans": [ + { + "bbox": [ + 46, + 416, + 288, + 583 + ], + "type": "text", + "content": "3D shape reconstruction typically requires identifying object features or textures in multiple images of a subject. This approach is not viable when the subject is semitransparent and moving in and out of focus. Here we overcome these challenges by rendering a candidate shape with adaptive blurring and transparency for comparison with the images. We use the microscopic nematode Caenorhabditis elegans as a case study as it freely explores a 3D complex fluid with constantly changing optical properties. We model the slender worm as a 3D curve using an intrinsic parametrisation that naturally admits biologically-informed constraints and regularisation. To account for the changing optics we develop a novel differentiable renderer to construct images from 2D projections and compare" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 597, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 597, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 597, + 287, + 713 + ], + "type": "text", + "content": "\\*T.Ilett, O.Yuval, T.Ranner, N.Cohen, D.C.Hogg}@leeds.ac.uk Funding This work was supported by University of Leeds and EPSRC. Author contributions Conceptualisation, Methodology, Formal analysis, Investigation, Software, Visualisation: TPI. Data curation, Validation: TPI, OY. Writing: TPI (original), all (review and editing). Funding acquisition, Supervision: NC, DCH, TR. " + }, + { + "bbox": [ + 46, + 597, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 46, + 597, + 287, + 713 + ], + "type": "text", + "content": " Equal contribution. Acknowledgements Additional thanks to Matan Braunstein (for help with Fig. 1), Robert I. Holbrook (data), Felix Salfelder (discussions and data), Lukas Deutz (discussions) and Jen Kruger (proof reading). Data availability Supplementary movies are available here: https://doi.org/10.6084/m9.figshare.22310650." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 389, + 547, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 389, + 547, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 389, + 547, + 510 + ], + "type": "text", + "content": "against raw images to generate a pixel-wise error to jointly update the curve, camera and renderer parameters using gradient descent. The method is robust to interference such as bubbles and dirt trapped in the fluid, stays consistent through complex sequences of postures, recovers reliable estimates from blurry images and provides a significant improvement on previous attempts to track C. elegans in 3D. Our results demonstrate the potential of direct approaches to shape estimation in complex physical environments in the absence of ground-truth data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 536, + 387, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 536, + 387, + 548 + ], + "spans": [ + { + "bbox": [ + 306, + 536, + 387, + 548 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 557, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 557, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 557, + 547, + 713 + ], + "type": "text", + "content": "Many creatures such as fish, birds and insects move in all directions to search and navigate volumetric environments. Acquiring 3D data of their motion has informed models of locomotion, behaviour and neural and mechanical control [3,22]. While technological advances have made the collection of large quantities of multi-viewpoint visual data more attainable, methods for extracting and modelling 3D information remain largely domain-dependant as few species share common geometric models or exist within the same spatial and temporal scales [4, 11, 14, 26, 37, 41, 50, 54, 65]. Furthermore, while humans and some domesticated animals [30, 60] may act naturally while wearing special markers, marker-less observations of many species makes fea" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12565" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "ture extraction more challenging and means pose estimation generally lacks ground-truth data [48]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 98, + 286, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 98, + 286, + 205 + ], + "spans": [ + { + "bbox": [ + 46, + 98, + 286, + 205 + ], + "type": "text", + "content": "As a case study in marker-less 3D shape reconstruction, we consider " + }, + { + "bbox": [ + 46, + 98, + 286, + 205 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 98, + 286, + 205 + ], + "type": "text", + "content": " elegans, a hair-thick, " + }, + { + "bbox": [ + 46, + 98, + 286, + 205 + ], + "type": "inline_equation", + "content": "\\sim 1" + }, + { + "bbox": [ + 46, + 98, + 286, + 205 + ], + "type": "text", + "content": " mm long animal with a simple tapered cylinder shape, which can be constructed from a midline \"skeleton\". In the wild, " + }, + { + "bbox": [ + 46, + 98, + 286, + 205 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 98, + 286, + 205 + ], + "type": "text", + "content": " elegans can be found in a wide range of complex 3D environments, e.g. decomposing organic matter, with continually changing physical properties [15, 17, 46]. However, to date, experiments have focused nearly exclusively on locomotion on a plane, limiting insight to the constrained, planar behaviours." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 207, + 286, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 207, + 286, + 351 + ], + "spans": [ + { + "bbox": [ + 46, + 207, + 286, + 351 + ], + "type": "text", + "content": "We obtained a large dataset (4 hours 53 minutes " + }, + { + "bbox": [ + 46, + 207, + 286, + 351 + ], + "type": "inline_equation", + "content": "\\simeq" + }, + { + "bbox": [ + 46, + 207, + 286, + 351 + ], + "type": "text", + "content": " 440,000 frames at " + }, + { + "bbox": [ + 46, + 207, + 286, + 351 + ], + "type": "inline_equation", + "content": "25\\mathrm{Hz}" + }, + { + "bbox": [ + 46, + 207, + 286, + 351 + ], + "type": "text", + "content": ") of experimental recordings of individual worms moving freely inside a glass cube filled with a gelatin solution. The cube is positioned between three nearly-orthogonal static cameras fitted with telecentric lenses. Initial pinhole camera model parameter estimates are provided [45] but are imprecise and require continuous adjustment across the course of a recording to account for small vibrations and optical changes to the gel. We aim to simultaneously reconstruct a 3D shape and find corrected camera parameters to match these recordings in a process akin to bundle adjustment [56]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 353, + 286, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 353, + 286, + 616 + ], + "spans": [ + { + "bbox": [ + 46, + 353, + 286, + 616 + ], + "type": "text", + "content": "3D reconstruction typically involves the identification and triangulation of common features from multiple viewpoints or the synthesis of full images including texture and shading information to match given scenes [16, 21, 47, 66]. Imaging animals with length " + }, + { + "bbox": [ + 46, + 353, + 286, + 616 + ], + "type": "inline_equation", + "content": "\\sim 1\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 353, + 286, + 616 + ], + "type": "text", + "content": " requires sufficient magnification, but simultaneously capturing long-term trajectories up to 25 minutes requires a large volume of view (10-20 worm lengths per axis). As the worm explores the cube it frequently appears out of focus in one or more of the cameras. Air bubbles and dirt trapped in the gel along with old tracks are difficult to differentiate from the transparent worm, particularly at the tapered ends. Self occlusion invariably appears in a least one view, where hidden parts darken the foreground while the ordering of fore/backparts is not discernible. As the semi-transparent and self-occluding subject moves in the volume, photometric information in one view bears little relevance to the appearance in the others making feature identification and photometric matching particularly challenging. We found that standard approaches may suffice for limited sub-clips, but lose parts of the object or fail catastrophically for much of the data and the solution requires a degree of adaptation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "content": "We present an integrated \"project-render-score\" algorithm to obtain a midline curve for each image-triplet (Fig. 1). Discrete curve vertices are projected through a triplet of pinhole camera models, rendered to produce an image-triplet for direct comparison against the recorded images and scored according to their intersection with worm-like pixels in all three views. The differentiable renderer stacks 2D super-Gaussian blobs at the projected locations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": "of each vertex to approximate the transparency along the worm, accounting for the variable focus and providing soft edges that direct the geometric model towards the midline. The scoring allows the detection of incongruities and keeps the curve aligned to the worm in all views. Regularisation terms ensure smoothness along the body and in time. Curve, camera and rendering parameters are jointly optimised using gradient descent to convergence. Once the worm shape has been resolved, it is generally only lost during image degradation or significant self-occlusions that make the posture unresolvable by eye." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 204, + 481, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 204, + 481, + 215 + ], + "spans": [ + { + "bbox": [ + 317, + 204, + 481, + 215 + ], + "type": "text", + "content": "In summary, our main contributions are:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 216, + 545, + 299 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 317, + 216, + 545, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 216, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 317, + 216, + 545, + 251 + ], + "type": "text", + "content": "- A robust pipeline for 3D posture reconstruction of a freely deforming semi-transparent object from noisy images." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 251, + 544, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 251, + 544, + 275 + ], + "spans": [ + { + "bbox": [ + 317, + 251, + 544, + 275 + ], + "type": "text", + "content": "- A novel viewpoint renderer to capture optical distortions and transparency." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 275, + 544, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 275, + 544, + 299 + ], + "spans": [ + { + "bbox": [ + 317, + 275, + 544, + 299 + ], + "type": "text", + "content": "- A feature-free bundle adjustment algorithm using direct image comparison and gradient descent." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 308, + 389, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 308, + 389, + 320 + ], + "spans": [ + { + "bbox": [ + 306, + 308, + 389, + 320 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 328, + 545, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 328, + 545, + 436 + ], + "spans": [ + { + "bbox": [ + 304, + 328, + 545, + 436 + ], + "type": "text", + "content": "Bundle adjustment (BA) is a procedure to jointly optimise 3D geometry and camera parameters [21, 56]. BA typically identifies common features of an object from multiple viewpoints in order to minimise a prediction error between projections of the corresponding 3D points and their 2D observations. BA is frequently used in conjunction with other methods to find camera parameters using multiple images of a 3D calibration object with known control points or for fine-tuning results [13, 23, 36, 40, 57, 59]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 437, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 545, + 533 + ], + "type": "text", + "content": "Feature detection converts photometric information into image coordinates. In BA, coordinates of common features are used to solve a geometric optimisation problem. Photometric bundle adjustment methods additionally require objects to have the same appearance in all views [12, 18]. Our method is entirely photometric, as such differing from BA. As our objects appear differently across views, all pixel information is used and the geometry is solved intrinsically." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": "Pose estimation Deep network approaches have proved well-suited to 2D human-pose estimation as they are potent feature extractors and large annotated training sets are available [1, 51, 55]. For 3D postures, ground truth multiview datasets are less common. Recent progress [35] relies on end-to-end architectures [19, 27, 29, 32, 42, 61] or splitting the problem into 2D pose estimation and then constructing the 3D pose [10, 38]. Despite similar approaches used for non-human pose estimation, the huge variability in scales and shapes among species introduces a variety of challenges [26]. Motion capture in controlled settings with markers (providing ground truth skeleton and joint angle data for humans, horses and dogs [30, 60]), are not available for most animals. Generalised mesh surfaces may be used," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12566" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 167 + ], + "type": "text", + "content": "but often require multiple views and thousands of parameters, and do not guarantee consistency through time. In contrast, approximating an animal shape using a few-parameter morphable model can be both tractable and robust. Successful examples include swimmers [9, 43], birds [27, 58], mammals [2,6,28,39] and generic quadrupeds [7,67]. However, these methods expect opaque subjects with consistent textural appearances between views." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 168, + 288, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 168, + 288, + 312 + ], + "spans": [ + { + "bbox": [ + 46, + 168, + 288, + 312 + ], + "type": "text", + "content": "C. elegans has a simple geometric shape that can be well reconstructed from a midline skeleton and parametrised by curvature values along the body (see Sec. 3). This is the deformable template we look to fit to the data. Despite the apparent simplicity, each vertex of the discretised curve has two degrees of freedom (two curvature values) and as we use 128 vertices, our model is highly deformable and requires many parameters (although smoothness regularisation simplifies the problem somewhat). In contrast to deep-learning approaches, our model includes only a small number of explainable parameters and direct optimisation avoids lengthy training and dataset requirements." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 329, + 287, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 329, + 287, + 413 + ], + "spans": [ + { + "bbox": [ + 46, + 329, + 287, + 413 + ], + "type": "text", + "content": "C. elegans Numerous freely available software packages are capable of simultaneous tracking and skeletonising single or multiple worms in 2D using inexpensive microscopic imaging [5,25,44,52,53,62] (see [24] for a review). Most of these skeletonisers combine image segmentation to separate the animal from the background with thinning of the mask to some midline pixels and fitting a spline." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 413, + 287, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 413, + 287, + 593 + ], + "spans": [ + { + "bbox": [ + 46, + 413, + 287, + 593 + ], + "type": "text", + "content": "The 3D reconstruction problem has received relatively little attention. Using at first two views [34] and then three, Kwon et al. [33] designed a motorised stage coupled with a real-time tracker to keep a worm in focus under high magnification in a 3D environment while capturing trajectories of up to 3 minutes. Thresholded images are lifted into 3D, intersected in voxel space and thinned [20] to produce a final skeleton. Kwon et al. omit camera modelling and assume perfectly parallel projections – assumptions that result in large errors for the data we use. Shaw et al. [49] employed light field microscopy to generate depth maps alongside images from a single viewpoint. A midline skeleton is generated by fitting a spline to the 3D coordinates of the central voxels. However, self-occlusions cannot be resolved and only relatively planar postures were investigated." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "text", + "content": "Salfelder et al. [45] and Yuval [63] both present 3D reconstruction algorithms using the three-camera set up and calibration described in [45]. In Salfelder et al. [45], a neural network is trained to identify 2D midlines from individual camera images before lifting into 3D voxel space. To account for changing camera parameters, a relative axial shift " + }, + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "inline_equation", + "content": "(dx,dy,dz)" + }, + { + "bbox": [ + 46, + 594, + 287, + 714 + ], + "type": "text", + "content": " is optimised for each frame-triplet to maximise the voxel intersection before thinning. Remaining voxel coordinates are used as control points to fit a curve using a finite-element formulation. This approach works well when" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 96 + ], + "type": "text", + "content": "the midline is well detected in each of the views, but can fail on occluded postures or low-resolution, blurry images." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 96, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 96, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 96, + 545, + 180 + ], + "type": "text", + "content": "Yuval [63] uses a neural network to track head and tail points in 3D lab coordinates and a curve is fit between these fixed end points using a hill-climbing optimisation algorithm. Scoring is based on curve smoothness and pixel intensities at the projected curve points. This method works well when the head and tail are correctly identified but struggles, or requires manual correction, otherwise." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 180, + 545, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 180, + 545, + 265 + ], + "spans": [ + { + "bbox": [ + 304, + 180, + 545, + 265 + ], + "type": "text", + "content": "In our approach we find that incorporating the camera model parameters into the optimisation results in more robust and accurate results. This extends the idea proposed in Salfelder et al. [45] that adjusting the relative positions of the cameras could result in large gains in accuracy. It is likely that the relative shift adjustments, presented there, account for the changing optical properties." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 276, + 409, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 276, + 409, + 289 + ], + "spans": [ + { + "bbox": [ + 306, + 276, + 409, + 289 + ], + "type": "text", + "content": "3. Geometric model" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 297, + 545, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 297, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 304, + 297, + 545, + 403 + ], + "type": "text", + "content": "Nematode shapes can be well approximated by a tapered cylinder and computed from a midline. We construct the midline curve in 3D using an object-centric parametrisation, separating shape from position and orientation to allow us to easily constrain and regularise the shape to stay within biologically-reasonable bounds. We discretise the curve into " + }, + { + "bbox": [ + 304, + 297, + 545, + 403 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 297, + 545, + 403 + ], + "type": "text", + "content": " equidistant vertices and encode the posture in curvature " + }, + { + "bbox": [ + 304, + 297, + 545, + 403 + ], + "type": "inline_equation", + "content": "K \\in \\mathbb{R}^{N \\times 2}" + }, + { + "bbox": [ + 304, + 297, + 545, + 403 + ], + "type": "text", + "content": " and length " + }, + { + "bbox": [ + 304, + 297, + 545, + 403 + ], + "type": "inline_equation", + "content": "l \\in \\mathbb{R}" + }, + { + "bbox": [ + 304, + 297, + 545, + 403 + ], + "type": "text", + "content": " that fully define the shape up to a rigid-body transformation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "spans": [ + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": "We express the 3D curve using the Bishop frame [8], given by " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "TM^{1}M^{2}" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": " is the normalised tangent of the curve and " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "M^1, M^2" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": " form an orthogonal basis along the midline. At vertex " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": ", the curvature is " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "K_{n} = (m_{n}^{1}, m_{n}^{2})" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "m_{n}^{1}, m_{n}^{2} \\in \\mathbb{R}" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": " are the curvature components along " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "M^1, M^2" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": ". (The more familiar Frenet frame is less stable as it is undefined at zero-curvature points.) Numerical integration of a system of difference equations from starting point " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{init}}" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": " and initial orientation " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "(T_{\\mathrm{init}}, M_{\\mathrm{init}}^{1}, M_{\\mathrm{init}}^{2})" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": " yields the curve path " + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "inline_equation", + "content": "P \\in \\mathbb{R}^{N \\times 3}" + }, + { + "bbox": [ + 304, + 404, + 545, + 524 + ], + "type": "text", + "content": ". See supplementary material (SM) for details." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "text", + "content": "During optimisation, errors accumulate near the starting point, " + }, + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{init}}" + }, + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "text", + "content": ", resulting in either parts of the curve moving faster than other or kinks developing (even with strong regularisation). To resolve this we sample an initial vertex index " + }, + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "text", + "content": " from a Gaussian distribution (subject to rounding) centred at the middle index at every optimisation step. Setting the starting point " + }, + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{init}} = P_{n_0}" + }, + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "text", + "content": " has the effect of continually shifting the discontinuity so kinks are never given the opportunity to develop (Fig. 2). Summarising the integration as " + }, + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 525, + 545, + 644 + ], + "type": "text", + "content": ", the 3D curve is generated from the parameters:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 335, + 652, + 545, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 652, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 335, + 652, + 545, + 669 + ], + "type": "interline_equation", + "content": "\\left(\\hat {P}, \\hat {T}, \\hat {M} ^ {1}\\right) = F \\left(P _ {n _ {0}}, T _ {n _ {0}}, M _ {n _ {0}} ^ {1}, K, l, n _ {0}\\right). \\tag {1}", + "image_path": "2d53db648f2574d9525245878ddb6c103a9eed0a375bd7985cf642a6993aea39.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "text", + "content": "Each gradient update adjusts all curvature values " + }, + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "text", + "content": " but the position and orientation only at the randomly selected " + }, + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "text", + "content": " vertex " + }, + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "inline_equation", + "content": "(P_{n_0}, T_{n_0}, M_{n_0}^1)" + }, + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "text", + "content": ". Updating " + }, + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "inline_equation", + "content": "(P, T, M^1)" + }, + { + "bbox": [ + 305, + 677, + 545, + 715 + ], + "type": "text", + "content": " at only" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12567" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 69, + 521, + 198 + ], + "blocks": [ + { + "bbox": [ + 72, + 69, + 521, + 198 + ], + "lines": [ + { + "bbox": [ + 72, + 69, + 521, + 198 + ], + "spans": [ + { + "bbox": [ + 72, + 69, + 521, + 198 + ], + "type": "image", + "image_path": "0dcc05d271cd6d8e13bce02ed463b36ccd006aa26f499d492e1cdf599860693c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "lines": [ + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "spans": [ + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "text", + "content": "Figure 2. The 3D curve is traced out from initial point " + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "inline_equation", + "content": "P_{n_0}" + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "text", + "content": " and orientation frame " + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "inline_equation", + "content": "(T_{n_0}, M_{n_0}^1, M_{n_0}^2)" + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "text", + "content": ". The index " + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "text", + "content": " of the initial point is drawn from a normal distribution at each iteration to prevent kinks developing through repeated use of the same starting point. The final curve " + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "inline_equation", + "content": "\\hat{P}" + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "text", + "content": " is computed in two parts by integrating the Bishop equations with curvature " + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 45, + 205, + 547, + 242 + ], + "type": "text", + "content": " towards the head and tail separately." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "spans": [ + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "text", + "content": "this vertex produces a " + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "text", + "content": " that is inconsistent with the updated " + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "text", + "content": ". Therefore, after applying gradient updates we re-compute the full curve and orientation from " + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "text", + "content": " and set " + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "inline_equation", + "content": "(P,T,M^1)" + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "text", + "content": " to the output " + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "inline_equation", + "content": "(\\hat{P},\\hat{T},\\hat{M}^1)" + }, + { + "bbox": [ + 46, + 261, + 287, + 309 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "spans": [ + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "text", + "content": "Since the curve describes a biological creature, we constrain the length " + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "inline_equation", + "content": "(l_{\\min}, l_{\\max})" + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "text", + "content": " and limit the curvature by " + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "inline_equation", + "content": "|K_n| < 2\\pi k_{\\max}" + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "text", + "content": ". The values of " + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "inline_equation", + "content": "(l_{\\min}, l_{\\max})" + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "text", + "content": " we use vary depending on magnification but the bounds do not need to be tight and are in the range " + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "inline_equation", + "content": "0.5 - 2\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "text", + "content": ". The curvature constraint " + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "inline_equation", + "content": "k_{\\max}" + }, + { + "bbox": [ + 46, + 310, + 288, + 393 + ], + "type": "text", + "content": " is set by considering the number of circle achieved by a constant curvature curve and is fixed at 3." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 404, + 177, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 404, + 177, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 404, + 177, + 418 + ], + "type": "text", + "content": "4. Project, Render, Score" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 425, + 287, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 425, + 287, + 485 + ], + "spans": [ + { + "bbox": [ + 46, + 425, + 287, + 485 + ], + "type": "text", + "content": "The core of the optimisation pipeline is separable into three main stages; project, render and score. The 3D curve " + }, + { + "bbox": [ + 46, + 425, + 287, + 485 + ], + "type": "inline_equation", + "content": "\\hat{P}" + }, + { + "bbox": [ + 46, + 425, + 287, + 485 + ], + "type": "text", + "content": " generated in Eq. (1) is projected through the camera models into 2D points that are rendered into images and then scored against the three views." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 491, + 104, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 491, + 104, + 504 + ], + "spans": [ + { + "bbox": [ + 46, + 491, + 104, + 504 + ], + "type": "text", + "content": "4.1. Project" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "text", + "content": "The cameras are modelled using a triplet of pinhole camera models with tangential and radial distortion that project 3D points into image planes using perspective transformations. Each pinhole camera model offers a simple (15 parameters, " + }, + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "inline_equation", + "content": "\\{\\eta_c\\}" + }, + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "text", + "content": "), tractable, approximation to the optical transformation. We also include relative shifts along the local coordinate axes, " + }, + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "inline_equation", + "content": "\\eta^s = (dx, dy, dz)" + }, + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "text", + "content": ", shared between the three models, as proposed by Salfelder et al. [45]. Initial camera coefficients for the triplet-model are provided along with the recordings and typically give root mean squared reprojection errors up to 10 pixels (" + }, + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "inline_equation", + "content": "\\sim \\mathcal{O}(\\text{worm radius})" + }, + { + "bbox": [ + 46, + 510, + 287, + 641 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "content": "Due to the initial calibration errors and changes in optical properties as the gelatin sets and is disturbed by the worms we re-calibrate the cameras at every frame by including the camera parameters in the optimisation step. To avoid an under-determined problem, after we have found a configuration that supports good reconstructions for a recording" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 320, + 259, + 531, + 454 + ], + "blocks": [ + { + "bbox": [ + 320, + 259, + 531, + 454 + ], + "lines": [ + { + "bbox": [ + 320, + 259, + 531, + 454 + ], + "spans": [ + { + "bbox": [ + 320, + 259, + 531, + 454 + ], + "type": "image", + "image_path": "9949594766b891731be95dfe988f6e9463f57ebc1bfd025d98dad9c6ad941fa2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "lines": [ + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "spans": [ + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "text", + "content": "Figure 3. The rendering stage generates super-Gaussian blobs at each vertex position on the image. The shape of the blobs depends on the optimisable parameters: the scale " + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "text", + "content": ", the intensity " + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "inline_equation", + "content": "\\iota" + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "text", + "content": " and the exponent used in the Gaussian " + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "inline_equation", + "content": "\\iota" + }, + { + "bbox": [ + 305, + 464, + 545, + 541 + ], + "type": "text", + "content": " are tapered down to fixed minimum values at the head and tail. The effects of varying these parameters from a converged solution (blue curves) are shown above (green curves) and below (orange curves) each." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 563, + 545, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 563, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 304, + 563, + 545, + 611 + ], + "type": "text", + "content": "we fix all but the " + }, + { + "bbox": [ + 304, + 563, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\eta^s" + }, + { + "bbox": [ + 304, + 563, + 545, + 611 + ], + "type": "text", + "content": " parameters. Interestingly, we still see changes (up to " + }, + { + "bbox": [ + 304, + 563, + 545, + 611 + ], + "type": "inline_equation", + "content": "30\\mathrm{px}\\sim 0.15\\mathrm{mm}" + }, + { + "bbox": [ + 304, + 563, + 545, + 611 + ], + "type": "text", + "content": ") in " + }, + { + "bbox": [ + 304, + 563, + 545, + 611 + ], + "type": "inline_equation", + "content": "\\eta^s" + }, + { + "bbox": [ + 304, + 563, + 545, + 611 + ], + "type": "text", + "content": " but as this relates to the relative positioning it does not affect the posture reconstruction or long-term trajectories." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "spans": [ + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "text", + "content": "Projecting the 3D curve " + }, + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "inline_equation", + "content": "\\hat{P}" + }, + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "text", + "content": " through the camera-triplet model " + }, + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "text", + "content": " with parameters " + }, + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "inline_equation", + "content": "\\eta = \\{\\eta_0, \\eta_1, \\eta_2, \\eta^s\\}" + }, + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "text", + "content": " generates 2D image points per view, which we combine as " + }, + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "inline_equation", + "content": "Q = \\Gamma(\\hat{P}, \\eta) \\in \\mathbb{R}^{3 \\times N \\times 2}" + }, + { + "bbox": [ + 305, + 612, + 545, + 661 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 670, + 364, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 670, + 364, + 681 + ], + "spans": [ + { + "bbox": [ + 306, + 670, + 364, + 681 + ], + "type": "text", + "content": "4.2. Render" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 714 + ], + "type": "text", + "content": "In order to evaluate the reconstruction directly against the raw data, we render the projected 2D midline points into" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12568" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 191 + ], + "type": "text", + "content": "images using optimisable shape and rendering parameters. Since worm bodies are well approximated by tapered cylinders, in theory we only require maximum and minimum radius values and a tapering function. However, " + }, + { + "bbox": [ + 46, + 72, + 287, + 191 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 72, + 287, + 191 + ], + "type": "text", + "content": " elegans are semi-transparent – increasingly so at the head and tail – and their internal anatomy has varying optical properties that diffract and distort the light. These challenges are further exacerbated by the worms often being out of focus in at least one of the views, therefore even an anatomically accurate model stands little chance of being correctly resolved." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "spans": [ + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": "We render realistic images by combining 2D super-Gaussian functions centred on each projected vertex. Crucially, we allow the rendering parameters to differ between cameras since the animal seldom has the same photometric qualities in different views. We optimise three parameters for each camera view " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\sigma_c \\in \\mathbb{R}" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " controls the spread, " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\iota_c \\in \\mathbb{R}" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " scales the intensity, and " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\rho_c \\in \\mathbb{R}" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " sharpens or softens the edges (Fig. 3). To capture the tapered shape we weight " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\sigma_c" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\iota_c" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " from their optimisable values along the middle " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " to minimum values " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\sigma_{\\mathrm{min}}" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\iota_{\\mathrm{min}}" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " at the ends and define the tapered outputs " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\bar{\\sigma}_c \\in \\mathbb{R}^N" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\bar{\\iota}_c \\in \\mathbb{R}^N" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " (SM). " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\sigma_{\\mathrm{min}}" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "inline_equation", + "content": "\\iota_{\\mathrm{min}}" + }, + { + "bbox": [ + 46, + 192, + 288, + 346 + ], + "type": "text", + "content": " are manually fixed for each recording to account for different magnification factors and worm size variability." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "spans": [ + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "text", + "content": "For each camera index " + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "text", + "content": " and vertex index " + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "text", + "content": " we define the rendered blob " + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "inline_equation", + "content": "B_{c,n} \\in \\mathbb{R}^{w \\times w}" + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "text", + "content": " (image size " + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "text", + "content": ") for pixel " + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 47, + 347, + 287, + 384 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 392, + 304, + 429 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 392, + 304, + 429 + ], + "spans": [ + { + "bbox": [ + 47, + 392, + 304, + 429 + ], + "type": "interline_equation", + "content": "B _ {c, n} (i, j) = \\bar {\\iota} _ {c, n} \\exp \\left[ - \\left(\\frac {(i - Q _ {c , n , 0}) ^ {2} + (j - Q _ {c , n , 1}) ^ {2}}{2 \\bar {\\sigma} _ {c , n} ^ {2}}\\right) ^ {\\rho_ {c}} \\right]. \\tag {2}", + "image_path": "83cd40ae0776419747d8518a264070a819e4adf123211cae54bb3404f9b839c4.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 430, + 287, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 430, + 287, + 466 + ], + "spans": [ + { + "bbox": [ + 47, + 430, + 287, + 466 + ], + "type": "text", + "content": "The stacks of blobs are combined to generate the complete renderings " + }, + { + "bbox": [ + 47, + 430, + 287, + 466 + ], + "type": "inline_equation", + "content": "R \\in \\mathbb{R}^{3 \\times w \\times w}" + }, + { + "bbox": [ + 47, + 430, + 287, + 466 + ], + "type": "text", + "content": " by taking the maximum pixel value across all blobs: for pixel " + }, + { + "bbox": [ + 47, + 430, + 287, + 466 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 47, + 430, + 287, + 466 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 474, + 287, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 474, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 83, + 474, + 287, + 491 + ], + "type": "interline_equation", + "content": "R _ {c} (i, j) = \\max \\left\\{B _ {c, n} (i, j) \\right\\} _ {n = 0, \\dots , N - 1}. \\qquad (3)", + "image_path": "926fcd102b605e9558ff498a6c932112144bfa31eb432dcfea46496d315ece75.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 497, + 287, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 497, + 287, + 651 + ], + "spans": [ + { + "bbox": [ + 46, + 497, + 287, + 651 + ], + "type": "text", + "content": "The orientation of the body directly affects the pixel intensity of both raw and rendered images. When pointing directly at a camera the peaks of the blobs cluster closely together and appear as a high-intensity (opaque) circle. Pointing laterally causes the peaks to spread out on the image revealing more of the lower-intensity tails. In both situations our blob-rendering approach approximates transparency effects in the raw images without the need to model complex intensity-orientation responses. Moreover, super-Gaussian blobs allow sharp outlines to be produced in one view by using a large exponent and flat-top blobs, and blurry images to be produced for another, using low intensity and high variance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 659, + 96, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 659, + 96, + 670 + ], + "spans": [ + { + "bbox": [ + 47, + 659, + 96, + 670 + ], + "type": "text", + "content": "4.3. Score" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "In order to evaluate how well the curve represents the worm we require a way of distinguishing between worm-pixels and non-worm pixels such as dirt, bubbles, old tracks" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 314, + 72, + 539, + 252 + ], + "blocks": [ + { + "bbox": [ + 314, + 72, + 539, + 252 + ], + "lines": [ + { + "bbox": [ + 314, + 72, + 539, + 252 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 539, + 252 + ], + "type": "image", + "image_path": "6ad8ea5c520c974e0bf8a0cbb92b1ae4e9ffec6b645fb5269155c3229f9d1dd1.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 262, + 547, + 319 + ], + "lines": [ + { + "bbox": [ + 305, + 262, + 547, + 319 + ], + "spans": [ + { + "bbox": [ + 305, + 262, + 547, + 319 + ], + "type": "text", + "content": "Figure 4. The 3D curve points are scored individually according to how well they match the three views. The triplet of blobs associated with vertex " + }, + { + "bbox": [ + 305, + 262, + 547, + 319 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 305, + 262, + 547, + 319 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 305, + 262, + 547, + 319 + ], + "type": "inline_equation", + "content": "B_{.,n}" + }, + { + "bbox": [ + 305, + 262, + 547, + 319 + ], + "type": "text", + "content": ") are multiplied with the images " + }, + { + "bbox": [ + 305, + 262, + 547, + 319 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 305, + 262, + 547, + 319 + ], + "type": "text", + "content": " and summed. We take the minimum of the three sums and then taper these values from the midpoint-out." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 338, + 545, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 338, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 304, + 338, + 545, + 421 + ], + "type": "text", + "content": "and even other worms. When the animal truly intersects with environmental interference it can be impossible to differentiate between the two, but in the majority of cases there exists a gap between the worm and the noise that is visible in at least one of the views. By ensuring that the curve corresponds to a single contiguous pixel mass in all of the images we are able to safely ignore other artefacts (Fig. 4)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "spans": [ + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "text", + "content": "To detect if the curve is bridging a gap, each vertex " + }, + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "inline_equation", + "content": "\\hat{P}_n" + }, + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "text", + "content": " is scored by correlating its corresponding blobs " + }, + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "inline_equation", + "content": "B_{.,n}" + }, + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "text", + "content": " (Sec. 4.2) with the images " + }, + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "text", + "content": ". The raw score " + }, + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "inline_equation", + "content": "S_n \\in \\mathbb{R}" + }, + { + "bbox": [ + 305, + 421, + 545, + 457 + ], + "type": "text", + "content": " is defined:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 352, + 464, + 545, + 494 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 464, + 545, + 494 + ], + "spans": [ + { + "bbox": [ + 352, + 464, + 545, + 494 + ], + "type": "interline_equation", + "content": "S _ {n} = \\min \\left\\{\\frac {\\sum_ {i , j} B _ {c , n} \\cdot I _ {c}}{\\bar {\\sigma} _ {c , n} \\bar {\\iota} _ {c , n}} \\right\\} _ {c = 0, 1, 2} \\tag {4}", + "image_path": "7be658db7398f641952ebaade7a6141e80a5f64bc8084993563dbc378af91ede.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 500, + 545, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 500, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 500, + 545, + 559 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 500, + 545, + 559 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 304, + 500, + 545, + 559 + ], + "type": "text", + "content": " is element-wise multiplication and the sum is taken over the image dimensions. By taking the minimum we ensure that vertices failing to match pixels in any one of the views will receive low scores regardless of how well they match pixels in the other views." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "text", + "content": "If the curve is bridging two disjoint groups of pixels that are visible in all three views this will present as two peaks in " + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "text", + "content": ". Since we are only interested in finding one object we restrict the scores to contain just one peak by tapering " + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "text", + "content": " from the middle-out to form the intermediate " + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "inline_equation", + "content": "S'" + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "text", + "content": ". Finally we normalise " + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "inline_equation", + "content": "S'" + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "text", + "content": " to get scores " + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 304, + 559, + 545, + 632 + ], + "type": "text", + "content": " relative to the peak:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 339, + 639, + 545, + 683 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 639, + 545, + 683 + ], + "spans": [ + { + "bbox": [ + 339, + 639, + 545, + 683 + ], + "type": "interline_equation", + "content": "S _ {n} ^ {\\prime} = \\left\\{ \\begin{array}{l l} \\min \\left\\{S _ {n}, S _ {n + 1} ^ {\\prime} \\right\\} & 0 \\leq n < N / 2 \\\\ S _ {n} & n = N / 2 \\\\ \\min \\left\\{S _ {n}, S _ {n - 1} ^ {\\prime} \\right\\} & N / 2 < n < N \\end{array} \\right. \\tag {5}", + "image_path": "e3b1b50d80ca4134b94e8ac08cf3aad8fde02bec69198f8070e55310e1578520.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 344, + 685, + 545, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 685, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 344, + 685, + 545, + 712 + ], + "type": "interline_equation", + "content": "\\hat {S} = \\frac {S ^ {\\prime}}{\\operatorname* {m a x} _ {n} \\left\\{S ^ {\\prime} \\right\\}}. \\tag {6}", + "image_path": "48c4136dbadc5d02fde0d063b1a52d13e1dc179ab9d8996447ef288fd1cd2365.jpg" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12569" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 71, + 286, + 225 + ], + "blocks": [ + { + "bbox": [ + 53, + 71, + 286, + 225 + ], + "lines": [ + { + "bbox": [ + 53, + 71, + 286, + 225 + ], + "spans": [ + { + "bbox": [ + 53, + 71, + 286, + 225 + ], + "type": "image", + "image_path": "dd9322366813a8574c7ebdaf0f880c1441aa7f65292dad1147b259010c6b000e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 57, + 236, + 278, + 281 + ], + "blocks": [ + { + "bbox": [ + 56, + 227, + 119, + 236 + ], + "lines": [ + { + "bbox": [ + 56, + 227, + 119, + 236 + ], + "spans": [ + { + "bbox": [ + 56, + 227, + 119, + 236 + ], + "type": "text", + "content": "Without masking:" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 57, + 236, + 278, + 281 + ], + "lines": [ + { + "bbox": [ + 57, + 236, + 278, + 281 + ], + "spans": [ + { + "bbox": [ + 57, + 236, + 278, + 281 + ], + "type": "image", + "image_path": "6babeae149d596324c3e3ec55073e03281b9b384484f51b3ef81bb8c0b5b6246.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 57, + 281, + 108, + 289 + ], + "lines": [ + { + "bbox": [ + 57, + 281, + 108, + 289 + ], + "spans": [ + { + "bbox": [ + 57, + 281, + 108, + 289 + ], + "type": "text", + "content": "With masking:" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 57, + 289, + 278, + 335 + ], + "blocks": [ + { + "bbox": [ + 57, + 289, + 278, + 335 + ], + "lines": [ + { + "bbox": [ + 57, + 289, + 278, + 335 + ], + "spans": [ + { + "bbox": [ + 57, + 289, + 278, + 335 + ], + "type": "image", + "image_path": "b1e9a7ce05932bbf5b4abe3f033da0081e6aea57f174773a46dff8a9c2f1e9f5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "lines": [ + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "spans": [ + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "text", + "content": "Figure 5. The noisy input images are cleaned by applying masks that force pixel-errors to be local to the current estimate. The blobs " + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "text", + "content": " are scaled by the relative scores " + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "text", + "content": ", combined using the maximum pixel value across blobs and thresholded to form the masks " + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "text", + "content": ". The masks are applied to the raw input images " + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "text", + "content": " to generate the targets: " + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "inline_equation", + "content": "I^{\\star}" + }, + { + "bbox": [ + 46, + 344, + 287, + 432 + ], + "type": "text", + "content": ". Masking ensures only a single contiguous pixel mass is detected. Without it, parts of the reconstruction can \"stick\" to nearby bubbles and other artefacts as shown below." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 454, + 287, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 454, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 454, + 287, + 491 + ], + "type": "text", + "content": "The final score profile " + }, + { + "bbox": [ + 47, + 454, + 287, + 491 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 47, + 454, + 287, + 491 + ], + "type": "text", + "content": " provides insight into how well the curve matches a contiguous pixel mass across all three views and how evenly that mass is distributed." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 506, + 287, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 287, + 614 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 287, + 614 + ], + "type": "text", + "content": "Masking From the score profile " + }, + { + "bbox": [ + 46, + 506, + 287, + 614 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 46, + 506, + 287, + 614 + ], + "type": "text", + "content": " we identify image areas that are more likely to contain the pixel masses that correspond to the worm. Masks " + }, + { + "bbox": [ + 46, + 506, + 287, + 614 + ], + "type": "inline_equation", + "content": "M\\in \\mathbb{R}^{3\\times w\\times w}" + }, + { + "bbox": [ + 46, + 506, + 287, + 614 + ], + "type": "text", + "content": " applied to the input, " + }, + { + "bbox": [ + 46, + 506, + 287, + 614 + ], + "type": "inline_equation", + "content": "I^{\\star} = M\\cdot I" + }, + { + "bbox": [ + 46, + 506, + 287, + 614 + ], + "type": "text", + "content": ", focuses attention (and gradient) to only these areas of interest, consistently across all three views and exclude interference outside the masks (Fig. 5, see SM). Pixel intensities outside the masks are significantly reduced, but not zeroed in order to avoid stagnation in case the reconstruction completely misses the worm." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": "Centre-shifting The scores " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": " also indicate the relative positioning of the curve over the target object. As the curve aligns with a pixel mass, vertices with high scores (apparently \"converged\") tend to lock into place thus hindering convergence of the rest of the object. For each frame, we use the previous frame solution as the starting point, so the majority of points rapidly converge. However, errors intro" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 316, + 69, + 538, + 247 + ], + "blocks": [ + { + "bbox": [ + 316, + 69, + 538, + 247 + ], + "lines": [ + { + "bbox": [ + 316, + 69, + 538, + 247 + ], + "spans": [ + { + "bbox": [ + 316, + 69, + 538, + 247 + ], + "type": "image", + "image_path": "1e6f429edb951cc48624b9c2fe48b31b93f95333652758e73949cb2e3952d8e8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "lines": [ + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "spans": [ + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "type": "text", + "content": "Figure 6. As the animal moves along the path of its midline the tail may be left behind (left column). This can be identified from an unbalanced score profile " + }, + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "type": "inline_equation", + "content": "\\hat{S}" + }, + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "type": "text", + "content": ". By periodically shifting the curve along its length (adding new curvature values at one end and discarding from the other) the centroid index " + }, + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "type": "inline_equation", + "content": "(\\bar{n})" + }, + { + "bbox": [ + 304, + 256, + 547, + 334 + ], + "type": "text", + "content": " of the scores can be centred. Gradient descent optimisation then updates the new curvature values so the curve matches the target (right column)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 355, + 547, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 355, + 547, + 415 + ], + "spans": [ + { + "bbox": [ + 304, + 355, + 547, + 415 + ], + "type": "text", + "content": "duced at the tips remain as they are insufficient to generate the collective shift required. The effect can easily be identified from an unbalanced score profile (Fig. 6) and rectified by periodically shifting the curve along its length between gradient descent optimisation steps (see SM)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 426, + 389, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 426, + 389, + 439 + ], + "spans": [ + { + "bbox": [ + 306, + 426, + 389, + 439 + ], + "type": "text", + "content": "5. Optimisation" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 446, + 522, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 446, + 522, + 458 + ], + "spans": [ + { + "bbox": [ + 318, + 446, + 522, + 458 + ], + "type": "text", + "content": "The main pixel-loss to be minimised is defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 347, + 466, + 545, + 496 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 466, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 347, + 466, + 545, + 496 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {p x}} = \\frac {1}{3 w ^ {2}} \\sum_ {c, i, j} \\left(R _ {c} (i, j) - I _ {c} ^ {\\star} (i, j)\\right) ^ {2}. \\tag {7}", + "image_path": "499a42c498860379a4a610e6cb26667a181489e24e14393deb594efec170761a.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 505, + 545, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 505, + 545, + 529 + ], + "spans": [ + { + "bbox": [ + 304, + 505, + 545, + 529 + ], + "type": "text", + "content": "To improve head and tail detection we also minimise a scores-loss," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 362, + 536, + 545, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 536, + 545, + 563 + ], + "spans": [ + { + "bbox": [ + 362, + 536, + 545, + 563 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {s c}} = \\frac {\\operatorname* {m a x} \\left(S ^ {\\prime}\\right) N}{\\sum_ {n} S _ {n} ^ {\\prime \\prime}}, \\text {w h e r e} \\tag {8}", + "image_path": "ffab8f1681d1aca1d627508ea51fc12e422548c370a2d4f5edd1bc4aceec6398.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 364, + 565, + 545, + 594 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 565, + 545, + 594 + ], + "spans": [ + { + "bbox": [ + 364, + 565, + 545, + 594 + ], + "type": "interline_equation", + "content": "S _ {n} ^ {\\prime \\prime} = S _ {n} ^ {\\prime} \\left(\\frac {2 n - (N - 1)}{N - 1}\\right) ^ {2}, \\tag {9}", + "image_path": "7d640837a45fb8451acf4dc4a7d159668b55396ec3c0bd329b48047ce5dc6deb.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 601, + 545, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 601, + 545, + 625 + ], + "spans": [ + { + "bbox": [ + 304, + 601, + 545, + 625 + ], + "type": "text", + "content": "that is quadratically weighted towards the tips where the scores are naturally lower due to the transparency." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 626, + 545, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 626, + 545, + 649 + ], + "spans": [ + { + "bbox": [ + 306, + 626, + 545, + 649 + ], + "type": "text", + "content": "In addition we include a number of regularisation terms. To keep the curve smooth we define" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 368, + 658, + 545, + 691 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 658, + 545, + 691 + ], + "spans": [ + { + "bbox": [ + 368, + 658, + 545, + 691 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {s m}} = \\sum_ {n = 1} ^ {N - 1} \\left| K _ {n} - K _ {n - 1} \\right| ^ {2}, \\tag {10}", + "image_path": "0630fbba780f6e6594ca2c1e4fb9a81fdbb13616bec7e8ef83c88abb2cda3dd2.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 701, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 701, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 306, + 701, + 545, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 701, + 545, + 715 + ], + "type": "inline_equation", + "content": "|\\cdot |" + }, + { + "bbox": [ + 306, + 701, + 545, + 715 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 306, + 701, + 545, + 715 + ], + "type": "inline_equation", + "content": "l^2" + }, + { + "bbox": [ + 306, + 701, + 545, + 715 + ], + "type": "text", + "content": " -norm. To ensure all parameters change" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "12570" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 181, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 181, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 181, + 83 + ], + "type": "text", + "content": "smoothly between frames we set" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 96, + 95, + 287, + 123 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 95, + 287, + 123 + ], + "spans": [ + { + "bbox": [ + 96, + 95, + 287, + 123 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {t}} = \\sum_ {x \\in \\{l, K, \\hat {P}, \\eta , \\sigma , \\iota , \\rho \\}} | x ^ {\\text {p r e v}} - x | ^ {2}, \\tag {11}", + "image_path": "ea1bbce71c95655790a93ec2e7ff0f6a03e6919eae1c780f0df05fbccec3c431.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 132, + 287, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 132, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 47, + 132, + 287, + 156 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 132, + 287, + 156 + ], + "type": "inline_equation", + "content": "x^{\\mathrm{prev}}" + }, + { + "bbox": [ + 47, + 132, + 287, + 156 + ], + "type": "text", + "content": " refers to the frozen value of the variable from the previous frame. And to avoid self-intersections, we use" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 177, + 286, + 271 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 177, + 286, + 271 + ], + "spans": [ + { + "bbox": [ + 47, + 177, + 286, + 271 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} d _ {n, m} = \\left| \\hat {P} _ {n} - \\hat {P} _ {m} \\right|, (12) \\\\ d _ {n, m} ^ {\\prime} = \\frac {1}{3} \\sum_ {c} \\bar {\\sigma} _ {c, n} + \\frac {1}{3} \\sum_ {c} \\bar {\\sigma} _ {c, m}, \\text {a n d} (13) \\\\ \\mathcal {L} _ {\\mathrm {i}} = \\sum_ {n = 0} ^ {N - N / k _ {\\max } - 1} \\sum_ {m = n + N / k _ {\\max }} ^ {N - 1} \\left\\{ \\begin{array}{l l} \\frac {d _ {n , m} ^ {\\prime}}{d _ {n , m}}, & \\text {i f} d _ {n, m} < d _ {n, m} ^ {\\prime} \\\\ 0, & \\text {o t h e r w i s e .} \\end{array} \\right. (14) \\\\ \\end{array}", + "image_path": "9b1ef43e4f9150ffeef56d92e8ceb1a596479ae9f8b3abe247363422aad35e64.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 282, + 286, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 282, + 286, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 282, + 286, + 354 + ], + "type": "text", + "content": "A loss is incurred, " + }, + { + "bbox": [ + 47, + 282, + 286, + 354 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{i}} > 0" + }, + { + "bbox": [ + 47, + 282, + 286, + 354 + ], + "type": "text", + "content": ", when two points which are sufficiently far apart (" + }, + { + "bbox": [ + 47, + 282, + 286, + 354 + ], + "type": "inline_equation", + "content": ">N / k_{\\max}" + }, + { + "bbox": [ + 47, + 282, + 286, + 354 + ], + "type": "text", + "content": ") along the curve come within a distance defined by the sum of their mean rendering variances (since these approximate the worm's radius). Eq. (14) forces the algorithm to find postures that are always feasible even during self-occlusions and complex manoeuvres." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 355, + 286, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 355, + 286, + 379 + ], + "spans": [ + { + "bbox": [ + 47, + 355, + 286, + 379 + ], + "type": "text", + "content": "The losses are combined in a weighted sum to yield the final optimisation target:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 390, + 287, + 403 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 390, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 61, + 390, + 287, + 403 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\omega_ {\\mathrm {p x}} \\mathcal {L} _ {\\mathrm {p x}} + \\omega_ {\\mathrm {s c}} \\mathcal {L} _ {\\mathrm {s c}} + \\omega_ {\\mathrm {s m}} \\mathcal {L} _ {\\mathrm {s m}} + \\omega_ {\\mathrm {t}} \\mathcal {L} _ {\\mathrm {t}} + \\omega_ {\\mathrm {i}} \\mathcal {L} _ {\\mathrm {i}}. \\tag {15}", + "image_path": "5b99dd0a18dbdb8f9168a151d1ee8ff9b805d3439ca76929b75b3401e178cc46.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 413, + 286, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 413, + 286, + 424 + ], + "spans": [ + { + "bbox": [ + 47, + 413, + 286, + 424 + ], + "type": "text", + "content": "Values of " + }, + { + "bbox": [ + 47, + 413, + 286, + 424 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 47, + 413, + 286, + 424 + ], + "type": "text", + "content": " used in our experiments are included in the SM." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "text", + "content": "To achieve robust reconstructions it is important that the curve parameters learn fastest, then the rendering parameters and finally the camera parameters. Imposing this hierarchy of rates ensures camera model stability and prevents the renderer from over-blurring the edges (as it tries to \"reach\" the pixels). Thus, movement between frames is primarily captured through curve deformations. We use learning rates " + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\lambda_{p} = 1\\mathrm{e} - 3" + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "text", + "content": " for the curve parameters " + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\{P,T,M^1,K,l\\}" + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\lambda_r = 1\\mathrm{e} - 4" + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "text", + "content": " for the rendering parameters " + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\{\\sigma ,\\iota ,\\rho \\}" + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\lambda_{\\eta} = 1\\mathrm{e} - 5" + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "text", + "content": " for the camera parameters " + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 47, + 425, + 286, + 544 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 545, + 286, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 545, + 286, + 593 + ], + "spans": [ + { + "bbox": [ + 47, + 545, + 286, + 593 + ], + "type": "text", + "content": "The curve is initialised as a small " + }, + { + "bbox": [ + 47, + 545, + 286, + 593 + ], + "type": "inline_equation", + "content": "(\\sim 0.2\\mathrm{mm})" + }, + { + "bbox": [ + 47, + 545, + 286, + 593 + ], + "type": "text", + "content": " , randomly oriented straight line centred in the field of view of all three cameras. We slowly increase the length to " + }, + { + "bbox": [ + 47, + 545, + 286, + 593 + ], + "type": "inline_equation", + "content": "l_{\\mathrm{min}}" + }, + { + "bbox": [ + 47, + 545, + 286, + 593 + ], + "type": "text", + "content": " over the first 200-500 steps as the curve gets positioned and orientated." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "content": "The pipeline is constructed using PyTorch [64] and the loss minimised is using Adam [31] with periodic centre-shifting of the curve vertices. Learning rates are decreased by a factor of 0.8 for every 5 steps taken without improvement in " + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "content": " to a minimum of " + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "inline_equation", + "content": "1\\mathrm{e} - 6" + }, + { + "bbox": [ + 46, + 594, + 286, + 713 + ], + "type": "text", + "content": " until convergence is detected. Subsequent frames are instantiated with the solution from the previous frame for efficiency and to maintain consistency through complex sequences of self-occluding postures. Example videos showing the effects of varying some of the options on the optimisation are described in SM." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 322, + 71, + 529, + 230 + ], + "blocks": [ + { + "bbox": [ + 322, + 71, + 529, + 230 + ], + "lines": [ + { + "bbox": [ + 322, + 71, + 529, + 230 + ], + "spans": [ + { + "bbox": [ + 322, + 71, + 529, + 230 + ], + "type": "image", + "image_path": "67915852fe48a0b1a909ac42a61baea8c9510dde3148fed59046adac7f48b8e9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 239, + 545, + 306 + ], + "lines": [ + { + "bbox": [ + 305, + 239, + 545, + 306 + ], + "spans": [ + { + "bbox": [ + 305, + 239, + 545, + 306 + ], + "type": "text", + "content": "Figure 7. Validation against 487 manual annotations. At the top we show an example of an annotated frame (left, orange) alongside a projection of our matching 3D midline (right, blue). Below we plot the sample averages " + }, + { + "bbox": [ + 305, + 239, + 545, + 306 + ], + "type": "inline_equation", + "content": "\\pm 2\\mathrm{std}" + }, + { + "bbox": [ + 305, + 239, + 545, + 306 + ], + "type": "text", + "content": ". We find our midlines are consistently close to annotated points (blue curve), but annotations typically extend further into the head and tail regions (orange curve)." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 329, + 358, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 329, + 358, + 341 + ], + "spans": [ + { + "bbox": [ + 306, + 329, + 358, + 341 + ], + "type": "text", + "content": "6. Results" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 350, + 545, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 545, + 447 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 545, + 447 + ], + "type": "text", + "content": "Using our method we generate high quality 3D midline reconstructions for 43 of 44 recordings. One fails due to excessive coiling of the worm. Significant occlusions also occur during successful reconstructions and when combined with loss of focus can cause the shape to be lost. Video clips of good and poor reconstructions through challenging environmental conditions are described in SM along with ablation results to show benefits of each component." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "text", + "content": "We compare 2D reprojections of our midlines against 487 manual annotations that were produced from single images in isolation and contain a varying number of unordered points. We calculate the minimum distance from each annotated point to any reconstructed point and vice-versa and find that our midlines consistently come close (" + }, + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "text", + "content": "2px) to hand-annotated points (Fig. 7). Annotated points at the ends show an increased distance (" + }, + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "text", + "content": "10px) to our midline points. This shows that our curves generally fall short of reaching the very tips of the worm by " + }, + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 304, + 449, + 545, + 567 + ], + "type": "text", + "content": "O(worm radius)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "Our method significantly outperforms previous methods developed using the same dataset [45, 63] when evaluated against the manual annotations (SM), but these only cover a selection of hand-picked examples. For a large-scale comparison we take 3D midlines and camera parameters found by each method and, using our pipeline, render them to generate comparable images (re-optimising the render parameters for their midlines, see SM). We skip the scoring and masking and calculate " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{px}}" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": ". The results (Fig. 8) show our method consistently produces shapes that more closely match the raw images. The biggest advantage over previous approaches is the improvement in robustness; we recover" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12571" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 64, + 71, + 532, + 175 + ], + "blocks": [ + { + "bbox": [ + 64, + 71, + 532, + 175 + ], + "lines": [ + { + "bbox": [ + 64, + 71, + 532, + 175 + ], + "spans": [ + { + "bbox": [ + 64, + 71, + 532, + 175 + ], + "type": "image", + "image_path": "6d4533a14537cc613de6396d0b23513ca3d4ef77e19f8377543bddaf9b1160cf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "lines": [ + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "spans": [ + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "type": "text", + "content": "Figure 8. A comparison between our Midline Finder (MF), Yuval's Worm-Tracker 3D (WT3D) [63] and Salfelder et al.'s 'reconst' [45] methods across a single trial (" + }, + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "type": "inline_equation", + "content": "\\sim 13" + }, + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "type": "text", + "content": " min). In the majority of cases our method generates midlines that better match the data (lower pixel losses, " + }, + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{px}}" + }, + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "type": "text", + "content": "). We show moving averages over 25 frames (" + }, + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "type": "inline_equation", + "content": "\\sim 1" + }, + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "type": "text", + "content": " s) with shaded areas indicating " + }, + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "type": "inline_equation", + "content": "\\pm 2" + }, + { + "bbox": [ + 46, + 182, + 548, + 218 + ], + "type": "text", + "content": " std." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 230, + 536, + 401 + ], + "blocks": [ + { + "bbox": [ + 59, + 230, + 536, + 401 + ], + "lines": [ + { + "bbox": [ + 59, + 230, + 536, + 401 + ], + "spans": [ + { + "bbox": [ + 59, + 230, + 536, + 401 + ], + "type": "image", + "image_path": "ef58c27265949ca3d73c2dc14206e8051d5ef4f7d6e02e142cb1168b097d4533.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "lines": [ + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "spans": [ + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "text", + "content": "Figure 9. The rendering parameters change continually over the course of a recording to capture optical changes. Clear images (e.g. early frames in cameras 0 and 1, switching to late frames in camera 2) are consistent with small values of " + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "text", + "content": " and large values of " + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "text", + "content": ". Blurry images (early camera 2, late camera 1) use high " + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "text", + "content": " and small " + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "text", + "content": ". We show moving averages over 25 frames (" + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "inline_equation", + "content": "\\sim 1" + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "text", + "content": " s) with shaded areas indicating " + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "inline_equation", + "content": "\\pm 2" + }, + { + "bbox": [ + 46, + 413, + 547, + 460 + ], + "type": "text", + "content": " std. Example comparisons between the renders (red) and raw images (grey) are shown on either side." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 478, + 282, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 478, + 282, + 490 + ], + "spans": [ + { + "bbox": [ + 47, + 478, + 282, + 490 + ], + "type": "text", + "content": "4 h 37 min (ours) versus 1 h 32 min [45] and 45 min [63]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "spans": [ + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "text", + "content": "Fig. 9 shows the rendering parameters during a trial as the worm moves in and out of focus in the different cameras. Clearer images result in smaller values of " + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "text", + "content": " and larger values of " + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "text", + "content": ". The fluctuations in intensity " + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "inline_equation", + "content": "\\iota" + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "text", + "content": " are due in part to the posture of the worm in relation to the camera; when it is pointing directly towards the camera we see higher values of " + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "inline_equation", + "content": "\\iota" + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "text", + "content": " used to capture the darker image observed and when the shape is perpendicular to the camera we see lower values of " + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "inline_equation", + "content": "\\iota" + }, + { + "bbox": [ + 46, + 491, + 288, + 610 + ], + "type": "text", + "content": " to emulate the worm's transparency. All three parameters work in tandem to produce the final effect." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 621, + 121, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 621, + 121, + 634 + ], + "spans": [ + { + "bbox": [ + 47, + 621, + 121, + 634 + ], + "type": "text", + "content": "7. Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "content": "We present a robust and reliable framework for the 3D reconstruction of a microscopic, semi-transparent subject moving through a fluid and evaluate against two other algorithms and manually annotations. The key contribution of our approach - constructing unique differentiable renderings for each view - allows us to solve shape recon" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 478, + 546, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 478, + 546, + 537 + ], + "spans": [ + { + "bbox": [ + 304, + 478, + 546, + 537 + ], + "type": "text", + "content": "struction and camera parameter optimisation by direct image comparison. This avoids feature extraction and correspondence matching, and hence offers a powerful alternative when those approaches are not well-suited, e.g. due to the variation in appearance between views." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 538, + 547, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 538, + 547, + 706 + ], + "spans": [ + { + "bbox": [ + 304, + 538, + 547, + 706 + ], + "type": "text", + "content": "Multi-view microscopic camera calibration, imaging through fluids and parametric model fitting of semitransparent subjects are challenges that have received little attention in the literature. While we have focused here on constructing a curve to fit a microscopic worm from three views, our method could be applied to the 3D reconstruction of arbitrary shape models at any scale using any number of viewpoints. Rendering points with adaptable super-Gaussian functions presents an effective solution to transparency and focal issues, but more generally, our results indicate that our direct optimisation approach may offer an effective alternative to contemporary methods for 3D approximation of generic objects from a limited number of silhouette-like images." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "12572" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Mykhaylo Andriluka, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2D human pose estimation: New benchmark and state of the art analysis. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 3686-3693. IEEE, June 2014. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "type": "text", + "content": "[2] Praneet C. Bala, Benjamin R. Eisenreich, Seng Bum Michael Yoo, Benjamin Y. Hayden, Hyun Soo Park, and Jan Zimmermann. Automated markerless pose estimation in freely moving macaques with OpenMonkeyStudio. Nat Commun, 11(1):4560, Sept. 2020. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "type": "text", + "content": "[3] Jerrold L Belant, Joshua J Millspaugh, James A Martin, and Robert A Gitzen. Multi-dimensional space use: The final frontier. Front. Ecol. Environ., 10(1):11-12, Feb. 2012. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 236, + 287, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 287, + 278 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 287, + 278 + ], + "type": "text", + "content": "[4] Florian Berlinger, Melvin Gauci, and Radhika Nagpal. Implicit coordination for 3D underwater collective behaviors in a fish-inspired robot swarm. Sci. Robot., 6(50):eabd8668, Jan. 2021. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 280, + 287, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 280, + 287, + 323 + ], + "spans": [ + { + "bbox": [ + 53, + 280, + 287, + 323 + ], + "type": "text", + "content": "[5] Stefano Berri, Jordan H. Boyle, Manlio Tassieri, Ian A. Hope, and Netta Cohen. Forward locomotion of the nematode C. elegans is achieved through modulation of a single gait. Hfsp J., 3(3):186-193, June 2009. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 324, + 287, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 324, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 53, + 324, + 287, + 390 + ], + "type": "text", + "content": "[6] Benjamin Biggs, Oliver Boyne, James Charles, Andrew Fitzgibbon, and Roberto Cipolla. Who left the dogs out? 3d animal reconstruction with expectation maximization in the loop. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XI 16, pages 195-211. Springer, 2020. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 391, + 288, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 391, + 288, + 456 + ], + "spans": [ + { + "bbox": [ + 53, + 391, + 288, + 456 + ], + "type": "text", + "content": "[7] Benjamin Biggs, Thomas Roddick, Andrew Fitzgibbon, and Roberto Cipolla. Creatures great and small: Recovering the shape and motion of animals from video. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2-6, 2018, Revised Selected Papers, Part V 14, pages 3-19. Springer, 2019. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 457, + 287, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 457, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 457, + 287, + 479 + ], + "type": "text", + "content": "[8] Richard L. Bishop. There is more than one way to frame a curve. Amer. Math. Monthly, 82(3):246-251, Mar. 1975. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 480, + 287, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 480, + 287, + 523 + ], + "spans": [ + { + "bbox": [ + 53, + 480, + 287, + 523 + ], + "type": "text", + "content": "[9] Thomas J. Cashman and Andrew W. Fitzgibbon. What shape are dolphins? building 3D morphable models from 2D images. IEEE Trans. Pattern Anal. Mach. Intell., 35(1):232-244, Jan. 2013. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 525, + 287, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 525, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 48, + 525, + 287, + 568 + ], + "type": "text", + "content": "[10] Ching-Hang Chen and Deva Ramanan. 3D human pose estimation = 2D pose estimation + matching. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7035-7043. IEEE, July 2017. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 569, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 569, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 569, + 287, + 601 + ], + "type": "text", + "content": "[11] Nathan W. Cooper, Thomas W. Sherry, and Peter P. Marra. Modeling three-dimensional space use and overlap in birds. *Auk*, 131(4):681–693, Oct. 2014. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 602, + 287, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 287, + 646 + ], + "type": "text", + "content": "[12] Amael Delaunoy and Marc Pollefeys. Photometric bundle adjustment for dense multi-view 3D modeling. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 1486-1493. IEEE, June 2014. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 288, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 668 + ], + "type": "text", + "content": "[13] Olivier Faugeras and Quang-Tuan Luong. The Geometry of Multiple Images. The MIT Press, 2001. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 713 + ], + "type": "text", + "content": "[14] Alessandro Ferrarini, Giuseppe Giglio, Stefania Caterina Pellegrino, Anna Grazia Frassanito, and Marco Gustin. A new methodology for computing birds' 3D home ranges. Avian Res, 9(1):1-6, May 2018. 1" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[15] Lise Frézal and Marie-Anne Félix. The natural history of model organisms: C. elegans outside the petri dish. eLife, 4:e05849, Mar. 2015. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 150 + ], + "type": "text", + "content": "[16] Kui Fu, Jiansheng Peng, Qiwen He, and Hanxiao Zhang. Single image 3D object reconstruction based on deep learning: A review. Multimed Tools Appl, 80(1):463-498, Sept. 2020. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 152, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 185 + ], + "type": "text", + "content": "[17] Marie-Anne Félix and Christian Braendle. The natural history of caenorhabditis elegans. Curr. Biol., 20(22):R965–R969, Nov. 2010. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 186, + 545, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 239 + ], + "type": "text", + "content": "[18] P. Georgel, S. Benhimane, and N. Navab. A unified approach combining photometric and geometric information for pose estimation. In Proceedings of the British Machine Vision Conference 2008, pages 1-10. CiteSeer, British Machine Vision Association, 2008. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 285 + ], + "type": "text", + "content": "[19] Riza Alp Guler, Natalia Neverova, and Iasonas Kokkinos. DensePose: Dense human pose estimation in the wild. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7297-7306. IEEE, June 2018. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 286, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 286, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 545, + 319 + ], + "type": "text", + "content": "[20] Zicheng Guo and Richard W. Hall. Parallel thinning with two-subiteration algorithms. Commun. ACM, 32(3):359-373, Mar. 1989. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 320, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 320, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 307, + 320, + 545, + 353 + ], + "type": "text", + "content": "[21] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, Mar. 2004. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 354, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 398 + ], + "type": "text", + "content": "[22] Robert I. Holbrook and Theresa Burt de Perera. Three-dimensional spatial cognition: Information in the vertical dimension overrides information from the horizontal. Anim Cogn, 14(4):613-619, Mar. 2011. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 399, + 545, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 443 + ], + "type": "text", + "content": "[23] C.T. Huang and O.R. Mitchell. Dynamic camera calibration. In Proceedings of International Symposium on Computer Vision - ISCV, pages 169-174. IEEE, IEEE Comput. Soc. Press, 1995. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 445, + 545, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 445, + 545, + 478 + ], + "spans": [ + { + "bbox": [ + 307, + 445, + 545, + 478 + ], + "type": "text", + "content": "[24] Steven J. Husson, Wagner S. Costa, Cornelia Schmitt, and Alexander Gottschalk. Keeping track of worm trackers. WormBook, pages 1-17, Sept. 2012. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 479, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 479, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 479, + 545, + 544 + ], + "type": "text", + "content": "[25] Avelino Javer, Michael Currie, Chee Wai Lee, Jim Hokanson, Kezhi Li, Céline N. Martineau, Eviatar Yemini, Laura J. Grundy, Chris Li, QueeLim Ch'ng, William R. Schafer, Ellen A. A. Nollen, Rex Kerr, and André E. X. Brown. An open-source platform for analyzing and sharing worm-behavior data. Nat Methods, 15(9):645-646, Aug. 2018. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 545, + 589 + ], + "type": "text", + "content": "[26] Le Jiang, Caleb Lee, Divyang Teotia, and Sarah Ostadabbas. Animal pose estimation: A closer look at the state-of-the-art, existing gaps and opportunities. Comput. Vis. Image Und., 222:103483, Sept. 2022. 1, 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 545, + 644 + ], + "type": "text", + "content": "[27] Angjoo Kanazawa, Michael J. Black, David W. Jacobs, and Jitendra Malik. End-to-end recovery of human shape and pose. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7122-7131. IEEE, June 2018. 2, 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 646, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 646, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 646, + 545, + 689 + ], + "type": "text", + "content": "[28] Angjoo Kanazawa, Shahar Kovalsky, Ronen Basri, and David Jacobs. Learning 3d deformation of animals from 2d images. In Computer Graphics Forum, volume 35, pages 365-374. Wiley Online Library, 2016. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 713 + ], + "type": "text", + "content": "[29] Isinsu Katircioglu, Bugra Tekin, Mathieu Salzmann, Vincent Lepetit, and Pascal Fua. Learning latent representations of" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12573" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "text", + "content": "3D human pose with deep neural networks. Int J Comput Vis, 126(12):1326-1341, Jan. 2018. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 288, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 288, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 288, + 150 + ], + "type": "text", + "content": "[30] Sinead Kearney, Wenbin Li, Martin Parsons, Kwang In Kim, and Darren Cosker. RGBD-dog: Predicting canine pose from RGBD sensors. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8336-8345. IEEE, June 2020. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 288, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 288, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 288, + 184 + ], + "type": "text", + "content": "[31] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 288, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 288, + 239 + ], + "type": "text", + "content": "[32] Nikos Kolotouros, Georgios Pavlakos, Michael Black, and Kostas Daniilidis. Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In 2019 IEEE/CVF International Conference on Computer Vision (ICCV), pages 2252-2261. IEEE, Oct. 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 241, + 288, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 288, + 284 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 288, + 284 + ], + "type": "text", + "content": "[33] Namseop Kwon, Ara B. Hwang, Young-Jai You, Seung-Jae V. Lee, and Jung Ho Je. Dissection of C. elegans behavioral genetics in 3-d environments. Sci Rep, 5(1):1-9, May 2015. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 286, + 288, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 288, + 319 + ], + "type": "text", + "content": "[34] Namseop Kwon, Jaeyeon Pyo, Seung-Jae Lee, and Jung Ho Je. 3-d worm tracker for freely moving C. elegans. PLoS ONE, 8(2):e57484, Feb. 2013. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 320, + 288, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 320, + 288, + 363 + ], + "spans": [ + { + "bbox": [ + 48, + 320, + 288, + 363 + ], + "type": "text", + "content": "[35] Wu Liu, Qian Bao, Yu Sun, and Tao Mei. Recent advances of monocular 2D and 3D human pose estimation: A deep learning perspective. ACM Comput. Surv., 55(4):1-41, Nov. 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 365, + 288, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 288, + 399 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 288, + 399 + ], + "type": "text", + "content": "[36] H. C. Longuet-Higgins. A computer algorithm for reconstructing a scene from two projections. Nature, 293(5828):133-135, Sept. 1981. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 399, + 288, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 288, + 454 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 288, + 454 + ], + "type": "text", + "content": "[37] Simone Macri, Daniele Neri, Tommaso Ruberto, Violet Mwaffo, Sachit Butail, and Maurizio Porfiri. Three-dimensional scoring of zebrafish behavior unveils biological phenomena hidden by two-dimensional analyses. Sci Rep, 7(1):1-10, May 2017. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 456, + 288, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 288, + 510 + ], + "type": "text", + "content": "[38] Julieta Martinez, Rayat Hossain, Javier Romero, and James J. Little. A simple yet effective baseline for 3d human pose estimation. In 2017 IEEE International Conference on Computer Vision (ICCV), pages 2640-2649. IEEE, Oct. 2017. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 511, + 288, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 511, + 288, + 566 + ], + "spans": [ + { + "bbox": [ + 48, + 511, + 288, + 566 + ], + "type": "text", + "content": "[39] Valsamis Ntouskos, Marta Sanzari, Bruno Cafaro, Federico Nardi, Fabrizio Natola, Fiora Pirri, and Manuel Ruiz. Component-wise modeling of articulated objects. In 2015 IEEE International Conference on Computer Vision (ICCV), pages 2327-2335. IEEE, Dec. 2015. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 567, + 288, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 288, + 600 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 288, + 600 + ], + "type": "text", + "content": "[40] Onur Özyesil, Vladislav Voroninski, Ronen Basri, and Amit Singer. A survey of structure from motion. Acta Numer., 26:305-364, May 2017. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 601, + 288, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 288, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 288, + 634 + ], + "type": "text", + "content": "[41] Brian L. Partridge, Tony Pitcher, J. Michael Cullen, and John Wilson. The three-dimensional structure of fish schools. *Behav Ecol Sociobiol*, 6(4):277-288, Mar. 1980. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 635, + 288, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 288, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 288, + 689 + ], + "type": "text", + "content": "[42] Georgios Pavlakos, Xiaowei Zhou, Konstantinos G. Derpanis, and Kostas Daniilidis. Coarse-to-fine volumetric prediction for single-image 3D human pose. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 7025-7034. IEEE, July 2017. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 713 + ], + "type": "text", + "content": "[43] Mukta Prasad, Andrew Fitzgibbon, Andrew Zisserman, and Luc Van Gool. Finding nemo: Deformable object class mod-" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "elling using curve matching. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pages 1720-1727. IEEE, IEEE, June 2010. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 107, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 162 + ], + "type": "text", + "content": "[44] Daniel Ramot, Brandon E. Johnson, Tommie L. Berry, Lucinda Carnell, and Miriam B. Goodman. The parallel worm tracker: A platform for measuring average speed and drug-induced paralysis in nematodes. PLoS ONE, 3(5):e2208, May 2008. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 217 + ], + "type": "text", + "content": "[45] Felix Salfelder, Omer Yuval, Thomas P Ilett, David C Hogg, Thomas Ranner, and Netta Cohen. Markerless 3D spatio-temporal reconstruction of microscopic swimmers from video. In Visual observation and analysis of Vertebrate And Insect Behavior 2020, 2021. 2, 3, 4, 7, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 220, + 545, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 252 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 252 + ], + "type": "text", + "content": "[46] Hinrich Schulenburg and Marie-Anne Félix. The natural biotic environment of Caenorhabditis elegans. Genetics, 206(1):55-86, May 2017. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 254, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 254, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 254, + 545, + 319 + ], + "type": "text", + "content": "[47] S.M. Seitz, B. Curless, J. Diebel, D. Scharstein, and R. Szeliski. A comparison and evaluation of multi-view stereo reconstruction algorithms. In 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Volume 1 (CVPR'06), volume 1, pages 519-528. IEEE, IEEE, 2006. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 320, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 320, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 307, + 320, + 545, + 353 + ], + "type": "text", + "content": "[48] William Irvin Sellers and Eishi Hirasaki. Markerless 3D motion capture for animal locomotion studies. *Biology Open*, 3(7):656-668, June 2014. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 354, + 545, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 409 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 409 + ], + "type": "text", + "content": "[49] Michael Shaw, Haoyun Zhan, Muna Elmi, Vijay Pawar, Clara Essmann, and Mandayam A. Srinivasan. Three-dimensional behavioural phenotyping of freely moving C. elegans using quantitative light field microscopy. PLoS ONE, 13(7):e0200108, July 2018. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 411, + 545, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 545, + 464 + ], + "type": "text", + "content": "[50] Colin A. Simpfendorfer, Esben M. Olsen, Michelle R. Heupel, and Even Moland. Three-dimensional kernel utilization distributions improve estimates of space use in aquatic animals. Can. J. Fish. Aquat. Sci., 69(3):565-572, Mar. 2012. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 467, + 545, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 467, + 545, + 521 + ], + "spans": [ + { + "bbox": [ + 307, + 467, + 545, + 521 + ], + "type": "text", + "content": "[51] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5693-5703. IEEE, June 2019. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 522, + 545, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 522, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 307, + 522, + 545, + 566 + ], + "type": "text", + "content": "[52] Nicholas A Swierczek, Andrew C Giles, Catharine H Rankin, and Rex A Kerr. High-throughput behavioral analysis in C. elegans. Nat Methods, 8(7):592-598, June 2011. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 567, + 545, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 567, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 307, + 567, + 545, + 612 + ], + "type": "text", + "content": "[53] Raphael Sznitman, Manaswi Gupta, Gregory D. Hager, Paulo E. Arratia, and Josué Sznitman. Multi-environment model estimation for motility analysis of caenorhabditis elegans. PLoS ONE, 5(7):e11631, July 2010. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 613, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 668 + ], + "type": "text", + "content": "[54] Diane Theriault, Zheng Wu, Nickolay I Hristov, Sharon M Swartz, Kenneth S Breuer, Thomas H Kunz, and Margrit Betke. Reconstruction and analysis of 3D trajectories of Brazilian free-tailed bats in flight. In 20th Int. Conf. on Pattern Recognition, pages 1-4, 2010. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 713 + ], + "type": "text", + "content": "[55] Alexander Toshev and Christian Szegedy. DeepPose: Human pose estimation via deep neural networks. In 2014 IEEE Conference on Computer Vision and Pattern Recognition, pages 1653-1660. IEEE, June 2014. 2" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "12574" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 643 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[56] Bill Triggs, Philip F. McLauchlan, Richard I. Hartley, and Andrew W. Fitzgibbon. Bundle adjustment — a modern synthesis. In Vision Algorithms: Theory and Practice, pages 298–372. Springer Berlin Heidelberg, 2000. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 288, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 288, + 162 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 288, + 162 + ], + "type": "text", + "content": "[57] R. Tsai. A versatile camera calibration technique for high-accuracy 3D machine vision metrology using off-the-shelf TV cameras and lenses. IEEE J. Robot. Automat., 3(4):323–344, Aug. 1987. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 163, + 288, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 163, + 288, + 206 + ], + "spans": [ + { + "bbox": [ + 49, + 163, + 288, + 206 + ], + "type": "text", + "content": "[58] Sara Vicente and Lourdes Agapito. Balloon shapes: Reconstructing and deforming objects with volume from images. In 2013 International Conference on 3D Vision, pages 223-230. IEEE, IEEE, June 2013. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 208, + 288, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 208, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 49, + 208, + 288, + 239 + ], + "type": "text", + "content": "[59] J. Weng, P. Cohen, and M. Herniou. Camera calibration with distortion models and accuracy evaluation. IEEE Trans. Pattern Anal. Machine Intell., 14(10):965-980, 1992. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 241, + 288, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 241, + 288, + 296 + ], + "spans": [ + { + "bbox": [ + 49, + 241, + 288, + 296 + ], + "type": "text", + "content": "[60] Nils Wilhelm, Anna Vögele, Rebeka Zsoldos, Theresia Licka, Björn Krüger, and Jürgen Bernard. FuryExplorer: Visual-interactive exploration of horse motion capture data. In SPIE Proceedings, volume 9397, pages 148–162. SPIE, SPIE, Feb. 2015. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 297, + 288, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 297, + 288, + 352 + ], + "spans": [ + { + "bbox": [ + 49, + 297, + 288, + 352 + ], + "type": "text", + "content": "[61] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3D objects from images in the wild. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1-10. IEEE, June 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 353, + 288, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 353, + 288, + 396 + ], + "spans": [ + { + "bbox": [ + 49, + 353, + 288, + 396 + ], + "type": "text", + "content": "[62] Eviatar Yemini, Rex A. Kerr, and William R. Schafer. Tracking movement behavior of multiple worms on food. Cold Spring Harb Protoc, 2011(12):pdb.prot067025, Dec. 2011. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 397, + 288, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 397, + 288, + 430 + ], + "spans": [ + { + "bbox": [ + 49, + 397, + 288, + 430 + ], + "type": "text", + "content": "[63] Omer Yuval. The neuromechanical control of Caenorhabditis elegans head motor behaviour in 3D environments. PhD thesis, University of Leeds, 2022. 3, 7, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 432, + 288, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 432, + 288, + 485 + ], + "spans": [ + { + "bbox": [ + 49, + 432, + 288, + 485 + ], + "type": "text", + "content": "[64] Sergey Zagoruyko, Adam Lerer, Tsung-Yi Lin, PedroO. Pinheiro, Sam Gross, Soumith Chintala, and Piotr Dollar. A MultiPath network for object detection. In Proceedings of the British Machine Vision Conference 2016. British Machine Vision Association, 2016. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 487, + 288, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 487, + 288, + 529 + ], + "spans": [ + { + "bbox": [ + 49, + 487, + 288, + 529 + ], + "type": "text", + "content": "[65] Liquun Zhu and Wei Weng. Catadioptric stereo-vision system for the real-time monitoring of 3D behavior in aquatic animals. *Physiology & Behavior*, 91(1):106-119, May 2007. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 532, + 288, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 532, + 288, + 586 + ], + "spans": [ + { + "bbox": [ + 49, + 532, + 288, + 586 + ], + "type": "text", + "content": "[66] Michael Zollhöfer, Patrick Stotko, Andreas Görtlitz, Christian Theobalt, Matthias Nießner, Reinhard Klein, and Andreas Kolb. State of the art on 3D reconstruction with RGB-d cameras. In Computer graphics forum, volume 37, pages 625-652. Wiley Online Library, 2018. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 588, + 288, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 588, + 288, + 643 + ], + "spans": [ + { + "bbox": [ + 49, + 588, + 288, + 643 + ], + "type": "text", + "content": "[67] Silvia Zuffi, Angjoo Kanazawa, David W. Jacobs, and Michael J. Black. 3D menagerie: Modeling the 3D shape and pose of animals. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6365-6373. IEEE, July 2017. 3" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12575" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_content_list.json b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..6d2ed0f4002800e3bac584b8dafeb10a0f49f006 --- /dev/null +++ b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_content_list.json @@ -0,0 +1,1467 @@ +[ + { + "type": "text", + "text": "3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud", + "text_level": 1, + "bbox": [ + 150, + 130, + 818, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mingtao Feng $^{1*}$ Haoran Hou $^{1*}$ Liang Zhang $^{1\\dagger}$ Zijie Wu $^{2\\dagger}$ Yulan Guo $^{3}$ Ajmal Mian $^{4}$ $^{1}$ Xidian University, $^{2}$ Hunan University, $^{3}$ Sun Yat-Sen University, $^{4}$ The University of Western Australia", + "bbox": [ + 84, + 202, + 924, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 273, + 310, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In-depth understanding of a 3D scene not only involves locating/recognizing individual objects, but also requires to infer the relationships and interactions among them. However, since 3D scenes contain partially scanned objects with physical connections, dense placement, changing sizes, and a wide variety of challenging relationships, existing methods perform quite poorly with limited training samples. In this work, we find that the inherently hierarchical structures of physical space in 3D scenes aid in the automatic association of semantic and spatial arrangements, specifying clear patterns and leading to less ambiguous predictions. Thus, they well meet the challenges due to the rich variations within scene categories. To achieve this, we explicitly unify these structural cues of 3D physical spaces into deep neural networks to facilitate scene graph prediction. Specifically, we exploit an external knowledge base as a baseline to accumulate both contextualized visual content and textual facts to form a 3D spatial multimodal knowledge graph. Moreover, we propose a knowledge-enabled scene graph prediction module benefiting from the 3D spatial knowledge to effectively regularize semantic space of relationships. Extensive experiments demonstrate the superiority of the proposed method over current state-of-the-art competitors. Our code is available at https://github.com/HHrEtvP/SMKA.", + "bbox": [ + 75, + 287, + 473, + 664 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 678, + 209, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, much success has been achieved on 3D point cloud scene understanding such as semantic segmentation [9, 11, 15, 16, 21, 28, 29, 49] and object detection [10, 22, 25, 27, 43]. However, the 3D world is not only defined by objects but also by the relationships between objects. A 3D scene graph can abstract the environment as a graph where nodes represent objects and edges characterize the relationships between object pairs, which has already been recognized in recent seminal works [1, 30, 37, 38, 41, 46]. However, relationship graphs predicted by current methods are far from satisfactory due to the noisy, cluttered and par", + "bbox": [ + 75, + 700, + 470, + 867 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8492445031b3f950e3dd7e8bc7d3d1476da3cdd7e5c625d9f8de6c45f71b4669.jpg", + "image_caption": [ + "Figure 1. A brief overview of our method." + ], + "image_footnote": [], + "bbox": [ + 506, + 271, + 880, + 441 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tial nature of real 3D scans. Moreover, these data-driven methods treat sophisticated relationships in 3D space independently for classification using the geometric features proximity or fit, and are ignorant of commonsense or other useful 3D spatial cues beyond visual information. 3D objects in real scenes commonly have strongly structured regularities [33,39], whose semantic and spatial arrangements follow clear patterns, but still exhibit rich structural variations even within the scene category.", + "bbox": [ + 496, + 460, + 893, + 597 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The key observation is that 3D scene structures are inherently hierarchical [20]. By definition, an instance can have multiple supports, lamps are standing on a table, chairs are supported by the floor and only the floor does not have any support, and it is unlikely that a pillow is supporting a couch. Although relationships themselves cast no light on the human eyes, a growing body of works [14, 31] suggest that even very complex relationship information is reasoned hierarchically and systemically according to the role of the prefrontal cortex. Relationships, such as support, can be extracted rapidly, are hard to ignore, and influence other relationships in the perceptual process. For example, a TV and a sofa are related since they together serve the function of 'watching TV', but these two objects can be far apart in a scene. Relationships of this kind are much more difficult, if not possible, to infer based on geometric analysis alone. The model can relate the table easily which supports the TV and use the table as a bridge to predict the 'front' relationship with sofa, where table and sofa are all supported by the floor and relationships within them is intuitive.", + "bbox": [ + 496, + 597, + 895, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution", + "bbox": [ + 94, + 875, + 202, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author", + "bbox": [ + 96, + 887, + 218, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "9182", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The underlying hierarchical structures in 3D scenes are label free and reliable, and can hence play an essential role in scene understanding at no additional cost. Existing 3D scene graph prediction models [1, 30, 37, 38, 41, 46] are oblivious to the underlying structures in the point cloud scenes. The question is how to take this prior knowledge into consideration to make the 3D scene graph achieve higher accuracy? KISG [47] proposes a graph auto-encoder to learn a closed set and ground truth prior knowledge from relationship triplets in data for 3D scene graph prediction. Although KISG [47] takes note of knowledge, it captures relevant prior knowledge from text-only ground truth labels, which merely contain facts expressed by label descriptions while lacking complex but indispensable multimodal knowledge for 3D scene graph prediction. In addition, noises contained in the manually annotated labels are easily included in the knowledge base and affects the prediction of relationships.", + "bbox": [ + 76, + 90, + 472, + 347 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the above problems, we show that the implicit hierarchical structure correlations between object pairs and their relationships can be explicitly represented by a knowledge base. As shown in Fig. 1, we propose a 3D spatial multimodal knowledge accumulation module to explicitly merge the hierarchical structures of 3D scenes into the network to strengthen the 3D scene graph prediction process. Firstly, we filter the external commonsense knowledge base, classify the hierarchical tokens for each node, and add new support edges to form the hierarchical symbolic knowledge graph for 3D scenes. Secondly, we retrieve the hierarchical token from the reconstructed symbolic knowledge graph for object instances in 3D scenes to build a visual graph, and extract contextual features for nodes and edges using a region-aware graph network. Finally, to bridge the heterogeneous gap between the symbolic knowledge and visual information, we propose a graph reasoning network to correlate 3D spatial visual contents of scenes with textual facts. Conditioned on the learned vision-relevant 3D spatial multimodal knowledge, we incorporate this network into the relationships prediction stage as extra guidance, which can effectively regularize the distribution of possible relationships of object pairs and thus make the predictions less ambiguous.", + "bbox": [ + 76, + 347, + 472, + 694 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our main contributions are: 1) We are the first to explicitly unify the regular patterns of 3D physical spaces with the deep architecture to facilitate 3D scene graph prediction. 2) We propose a hierarchical symbolic knowledge construction module that exploits extra knowledge as the baseline to admit the hierarchical structure cues of 3D scene. 3) We introduce a knowledge-guided visual context encoding module to construct hierarchical visual graph and learn the contextualized features by a region-aware graph network. 4) We propose a 3D spatial multimodal knowledge accumulation module to regularize the semantic space of relationship prediction. Results show that the learned knowledge and proposed modules consistently boost 3D scene graph prediction performance.", + "bbox": [ + 76, + 696, + 472, + 893 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 498, + 89, + 640, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2D Image-based Scene Graph Generation. Scene graph was first proposed for image retrieval [17], and subsequently received increasing attention in the vision community to produce graphical abstractions of images. Mainstream approaches [5, 36, 42, 44, 45] follow a two-step pipeline that first detects objects followed by classification of the relationship for each object pair. However, research on scene graphs has focused primarily on 2D images, ignoring 3D spatial characteristics such as position and geometry, and with limited spatial coverage. Our proposed method extends 2D scene graphs to 3D spaces, where the scene representation, network architecture and training mechanism all have to be altered in fundamental ways to meet the challenges arising from learning 3D scene structures and relationships. More detailed discussions can be found in the survey [4].", + "bbox": [ + 496, + 108, + 893, + 333 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Knowledge Representation has been extensively studied to incorporate prior knowledge, e.g. DBPedia [2], ConceptNet [35], WordNet [24], VisualGenome [19] and hasPart [3], to aid numerous vision tasks [23]. Gao et al. [12] incorporated commonsense knowledge to learn the internal-external correlations among room and object entities for an agent to take proper decisions at each viewpoint. Zhang et al. [48] addressed the explainability of visual reasoning by introducing the explicit integration of external knowledge. Ding et al. [8] extracted the multimodal knowledge triplet to boost the performance of visual question answering. Chen et al. [6] constructed the prior knowledge of statistical correlations between object pairs and their relationships to address the issue of the uneven distribution over different relationships. Although previous studies have taken notice of knowledge in different vision tasks, they only implicitly mine the extra knowledge base or count the frequency of relationship pairs in datasets to strengthen the iterative message propagation between relationships and objects while ignoring the intrinsic properties of the data.", + "bbox": [ + 496, + 334, + 895, + 635 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Scene Graph Prediction in Point Clouds. With the recently proposed 3DSSG datasets containing 3D scene graph annotations [37], the community started to explore semantic relationship prediction in 3D real world data. SGPN [37, 38] is the first work to build a 3D scene graph using both objects and their interrelations as graph nodes. It then performs message propagation using graph convolutional networks. Kimera [30] proposed a 3D dynamic scene graph that captures metric and semantic aspects of a dynamic environment, where nodes represent spatial concepts at different levels of abstraction, and edges represent spatial-temporal relations among the nodes. EdgeGCN [46] exploits multi-dimensional edge features for explicit relationship modeling and explores two associated twinning interaction mechanisms for the independent evolution of scene graph representations. Wu et al. [41] proposed a method to incrementally build semantic scene graphs from a 3D environment given a sequence of", + "bbox": [ + 496, + 636, + 895, + 893 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "9183", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bdfdc2effd33af1603a1a213bab4bd1bcdf4e8dcf99263018eacc7818e8c103f.jpg", + "image_caption": [ + "Figure 2. Method pipeline. (a) A hierarchical symbolic knowledge is firstly reconstructed to exploit external knowledge as the baseline and admit the hierarchical structure cues of 3D scene. (b) We then build a hierarchical visual graph and learn the contextualized features by the region-aware graph network. (c) Finally, a 3D spatial multimodal knowledge is accumulated to strengthen relationship predictions." + ], + "image_footnote": [], + "bbox": [ + 76, + 85, + 893, + 258 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RGB-D frames. KISG [47] uses the ground truth relationship triplets in the dataset to extract the prior knowledge and then fuses it in the scene graph prediction stage. One limitation of KISG [47] is that its relevant prior knowledge depends on the text-only dataset label while ignoring hierarchical and indispensable structures in the 3D scene for visual understanding. Our method differentiates itself from these related studies by exploring the 3D implicit structure pattern and introducing 3D spatial multimodal knowledge, which enables our model to predict relationships more accurately.", + "bbox": [ + 75, + 308, + 473, + 460 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 75, + 469, + 212, + 487 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Problem Formulation: The goal of 3D scene graph generation is to describe a given 3D point cloud scene $\\mathcal{I}$ with a semantic scene graph $\\mathcal{G} = \\{\\mathcal{V},\\mathcal{R}\\}$ , where $\\mathcal{V}$ and $\\mathcal{R}$ represent instance object nodes and their inner relationship edges respectively. $\\mathcal{G}$ forms a structured representation of the semantic content of the 3D scene. The nodes $\\mathcal{V}$ consist of a set of objects $O = \\{o_1,o_2,\\dots ,o_n\\}$ with object $o_i$ assigned to a certain class label $C$ , a corresponding set of bounding boxes $B = \\{b_{1},b_{2},\\dots ,b_{n}\\}$ with $b_{i}\\in \\mathbb{R}^{6}$ , and a set of relationship edges $\\mathcal{R} = \\{r_1,r_2,\\dots ,r_n\\}$ with each $r_i$ represents a predicate between a pair of objects. Our proposed model can be decomposed as:", + "bbox": [ + 75, + 491, + 473, + 674 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP (\\mathcal {G} | \\mathcal {I}) = P \\left(\\mathcal {K} _ {s} | \\mathcal {I}\\right) P \\left(\\mathcal {G} _ {v} \\mid \\mathcal {K} _ {s}, \\mathcal {I}\\right) P \\left(\\mathcal {R}, \\mathcal {K} _ {m} \\mid \\mathcal {G} _ {v}, \\mathcal {K} _ {s}, \\mathcal {I}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 684, + 468, + 714 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this equation, the component $P(\\mathcal{K}_s|\\mathcal{I})$ collects all the symbolic entities from the datasets, filters the extra knowledge bases, and combines the hierarchical structure patterns of 3D scenes to construct the hierarchical symbolic knowledge $\\mathcal{K}_s$ . The component $P(\\mathcal{G}_v|\\mathcal{K}_s,\\mathcal{I})$ builds visual graphs for scenes under the guidance of knowledge $\\mathcal{K}_s$ , where contextual features for each node are extracted. Conditioned on the knowledge $\\mathcal{K}_s$ and visual graph $\\mathcal{G}_v$ , the component $P(\\mathcal{R},\\mathcal{K}_m|\\mathcal{G}_v,\\mathcal{K}_s,\\mathcal{I})$ accumulates the 3D spatial multimodal knowledge by correlating the knowledge $\\mathcal{K}_s$ with visual content and predicts relationships simultaneously. Fig. 2 illustrates the overall pipeline of the proposed model.", + "bbox": [ + 75, + 715, + 473, + 898 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Hierarchical Symbolic Knowledge Initialization", + "text_level": 1, + "bbox": [ + 498, + 308, + 890, + 337 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Unlike KISG [47], we do not use a closed set or ground truth relationship triplets from labels to learn prior knowledge. Hence, we must make an additional choice of what knowledge sources to use and how to clean them. Prior knowledge of object classes can be reliable predictors of the likelihoods of physical support relationships. For instance, it is unlikely that a cup is supported by a wall while tables are almost always supported by the floor. Therefore, given a set of objects, we can classify each object based on whether it is directly supported by the floor. The result is a three-layer hierarchical structure about objects in the 3D scene. In particular, the first layer only contains the floor since it does not have any support. The second layer contains objects directly supported by the floor, e.g. bed, table, and sofa. The third layer contains the remaining objects usually supported by objects in the second layer, e.g. pillow, cup, and cushion.", + "bbox": [ + 496, + 342, + 893, + 584 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To exploit the regular structure patterns in 3D spaces and construct the scene graph hierarchically, we construct a hierarchical symbolic knowledge graph to guide the 3D spatial knowledge reasoning. Knowledge sources, such as ConceptNet [35] and DBPedia [2], are a valuable tool containing commonsense knowledge about the real world. In this work, we use ConceptNet as our external knowledge base which gives us more spatial relationships and common pairwise objects. While ConceptNet contains very useful information, it also includes some knowledge that is irrelevant to our model. To mitigate this issue, we limit the ConceptNet to common object categories in 3D point cloud scenes. We collect object categories from two widely-used 3D point cloud datasets, SUNRGBD [34] and Scannet [7], and then include edges that only include these objects. After filtering, we have a total of about 5,000 edges and 760 nodes.", + "bbox": [ + 496, + 585, + 895, + 825 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We denote the external knowledge graph as $\\mathcal{K}_e = \\{\\mathcal{V}_e, \\mathcal{E}_e\\}$ where $\\mathcal{V}_e$ and $\\mathcal{E}_e$ represent nodes and edges respectively. To merge the hierarchical structures in 3D spaces into the external knowledge graph and construct the hierarchical", + "bbox": [ + 498, + 825, + 895, + 887 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "9184", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "symbolic knowledge graph $\\kappa_{s}$ , we first use a pre-trained multi-layer perceptron (MLP) to classify the hierarchical tokens for each node in the external knowledge graph to distinguish the discrepancy among different layers of nodes. The hierarchical token of each node denotes its corresponding layer in the hierarchical structure. Each node is then initialized as the concatenation of its trainable hierarchical token and the word2vec (GloVe [26]) representation of the object category. Since the hierarchical structure of 3D spaces is built based on the physical support relationships between objects, we add additional edges representing support relationships between nodes to the external knowledge graph $\\kappa_{e}$ . Specifically, we define a new edge type: given two nodes $s_i$ and $s_j$ , we connect $s_i$ to $s_j$ using a support edge to represent the physical support relationship between $s_i$ and $s_j$ . By definition, each node in the hierarchical structure is supported by the node in neighboring layers. Therefore, we add a support edge between two correlated nodes in neighboring layers. Each edge is initialized as the trainable GloVe representation of its edge type. Finally, we formulate the updated external knowledge graph as hierarchical symbolic knowledge graph $\\kappa_{s}$ . Additional details can be found in supplementary.", + "bbox": [ + 76, + 90, + 472, + 424 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Knowledge-guided Visual Context Encoding", + "text_level": 1, + "bbox": [ + 76, + 428, + 452, + 444 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Fig. 2, taking a scene point cloud with object instance annotations as input, we build a hierarchical visual graph $\\mathcal{G}_v = \\{\\mathcal{V}_v,\\mathcal{E}_v\\}$ where $\\nu_{v}$ and $\\mathcal{E}_v$ denotes object instances and edges of object pairs respectively, under the guidance of the hierarchical symbolic knowledge graph $\\kappa_{s}$ . Then, a region-aware graph network is employed to propagate node messages through the visual graph $\\mathcal{G}_v$ to learn the contextualized feature representation.", + "bbox": [ + 75, + 446, + 472, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Visual graph construction. We use Point Cloud Transformer [13] to extract spatial-aware visual features $f_{v}$ for each object instance. To encode the spatial features $f_{t}$ of each bounding box, we use an MLP to lift the parameters of each bounding box (i.e., center and size) to feature space. We assign the semantic features $f_{w}$ for each object using an embedding table initialized by GloVe [26]. Each node in the visual graph is initialized as the concatenation of features $f_{v}, f_{t}$ and $f_{w}$ . To capture the implicit structure of the point cloud scene, we route each node in the visual graph $\\mathcal{G}_{v}$ into its corresponding layer according to the hierarchical tokens in hierarchical symbolic knowledge graph $\\kappa_{s}$ . Then, we complete the edge set $\\mathcal{E}_{v}$ of visual graph $\\mathcal{G}_{v}$ by extracting potential physical relationships between nodes in the adjacent layers. Specifically, we add an edge representing physical support relationship between node pair in the visual graph $\\mathcal{G}_{v}$ if a support edge also exists between the corresponding nodes in the hierarchical symbolic knowledge graph $\\kappa_{s}$ . Similar to [46], we model the spatial interactions between node pairs and encode the initial edge embedding for node pairs using an MLP.", + "bbox": [ + 76, + 569, + 472, + 885 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Contextualized features encoding. Objects sharing the", + "bbox": [ + 76, + 885, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "same physical support are correlated since they have similar functional role in the environment and are generally in close proximity to each other. For instance, both pillow and clothes are usually supported by a bed. Therefore, we propose a region-aware graph network to jointly highlight the interrelated regions of each node in the visual graph $\\mathcal{G}_v$ and encode the hierarchical contexts of the input scene.", + "bbox": [ + 496, + 90, + 893, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given the initial representations of nodes and edges in the visual graph $\\mathcal{G}_v$ , the region-aware graph network iteratively updates the hidden state $\\mathbf{h}_i^{o,t}$ of each node $v_i$ and $\\mathbf{h}_{ij}^{e,t}$ of each edge $(v_i,v_j)$ at each time step $t$ via message passing. Since the contextual regions around each node in the visual graph $\\mathcal{G}_v$ can be defined as other nodes sharing the same physical support with it, each node first gathers information from nodes within the same contextual region to enrich its current hidden state before propagating messages along the edges in the visual graph $\\mathcal{G}_v$ . Specifically, the enriched hidden state $\\tilde{\\mathbf{h}}_i^{o,t}$ of each node is:", + "bbox": [ + 496, + 198, + 893, + 364 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\mathbf {h}} _ {i} ^ {o, t} = \\mathbf {h} _ {i} ^ {o, t} + \\sum_ {j \\in N _ {r} (i)} \\psi \\left(\\mathbf {h} _ {j} ^ {o, t}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 594, + 376, + 893, + 411 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$N_{r}(i)$ contains nodes that share the same level support with node $v_{i}$ and $\\psi$ is a feed forward network for non-linear transformation. For edge $(v_{i},v_{j})$ , its enriched hidden state $\\tilde{\\mathbf{h}}_{ij}^{e,t}$ is computed by:", + "bbox": [ + 496, + 420, + 893, + 484 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\mathbf {h}} _ {i j} ^ {e, t} = \\mathbf {h} _ {i j} ^ {e, t} + \\sum_ {k \\in N _ {r} (i)} \\psi \\left(\\mathbf {h} _ {k} ^ {o, t}\\right) + \\sum_ {s \\in N _ {r} (j)} \\psi \\left(\\mathbf {h} _ {s} ^ {o, t}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 534, + 497, + 893, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After the feature representation enhancements, the message passing of nodes and edges can be formulated as:", + "bbox": [ + 496, + 541, + 890, + 571 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {i} ^ {o, t + 1} = G R U \\left(\\tilde {\\mathbf {h}} _ {i} ^ {o, t}, \\mathbf {m} _ {i} ^ {o, t}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 579, + 893, + 598 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {h} _ {i j} ^ {e, t + 1} = G R U \\left(\\tilde {\\mathbf {h}} _ {i j} ^ {e, t}, \\mathbf {m} _ {i j} ^ {e, t}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 619, + 890, + 641 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{m}_i^{o,t}$ and $\\mathbf{m}_{ij}^{e,t}$ are the incoming messages for updating each node and edge. The calculation of the message for each node is:", + "bbox": [ + 496, + 645, + 893, + 691 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {m} _ {i} ^ {o, t} = \\sum_ {j \\in N _ {v} (i)} \\left(\\varphi_ {n} \\left(\\tilde {\\mathbf {h}} _ {j} ^ {o, t}\\right) + \\varphi_ {e} \\left(\\tilde {\\mathbf {h}} _ {i j} ^ {e, t}\\right)\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 699, + 890, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{N}_v(i)$ denotes the neighbor nodes of $v_{i}$ in the visual graph $\\mathcal{G}_v$ , $\\varphi_{n}$ and $\\varphi_{e}$ are two non-linear transformation for associated nodes and edges. For each edge, we transform the hidden state of subject and object node by two MLPs before fusing them to obtain the message:", + "bbox": [ + 496, + 742, + 890, + 819 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {m} _ {i j} ^ {e, t} = \\varphi_ {s} \\left(\\tilde {\\mathbf {h}} _ {i} ^ {o, t}\\right) + \\varphi_ {o} \\left(\\tilde {\\mathbf {h}} _ {j} ^ {o, t}\\right) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 598, + 827, + 890, + 848 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We take the final hidden states of nodes and edges as the contextual feature $\\mathbf{c}_i^o$ for each node $v_{i}\\in \\mathcal{V}_{v}$ and $\\mathbf{c}_{ij}^{e}$ for each edge $(v_{i},v_{j})\\in \\mathcal{E}_{v}$ .", + "bbox": [ + 496, + 854, + 890, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "9185", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Spatial Multimodal Knowledge Accumulation", + "text_level": 1, + "bbox": [ + 76, + 90, + 464, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Though our hierarchical symbolic knowledge graph $\\kappa_{s}$ can provide high-quality knowledge about the hierarchical structures of point cloud scene, this information is largely limited to symbolic knowledge that can only be explicitly expressed by text-relevant labels for relationship triplets. Therefore, we propose a novel schema to accumulate 3D spatial multimodal knowledge $\\kappa_{m}$ progressively from the visual context via a graph reasoning network. We then incorporate the learned multimodal knowledge $\\kappa_{m}$ and the contextual features to predict the possible relationships.", + "bbox": [ + 75, + 109, + 468, + 260 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reasoning on knowledge graph. Since the contextual features encode the implicit hierarchical structure patterns in 3D spaces, we design a graph reasoning network which utilizes the visual contextual features and textual facts from the hierarchical symbolic knowledge graph $\\kappa_{s}$ to accumulate 3D spatial multimodal knowledge $\\kappa_{m}$ by aligning the entities in the symbolic knowledge graph with related visual contextual features.", + "bbox": [ + 75, + 261, + 470, + 380 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The graph reasoning network generates context for 3D spatial multimodal knowledge $\\mathcal{K}_m$ , which is in the form of embeddings that capture the regular structure patterns in 3D scenes for each node and edge in the hierarchical symbolic knowledge graph $\\mathcal{K}_s$ . Given the contextual features of nodes and edges in visual graph $\\mathcal{G}_v$ , each node and edge in the graph reasoning network receives three inputs: (1) the trainable node or edge embedding in the hierarchical symbolic knowledge graph $\\mathcal{K}_s$ , (2) a $0/1$ indicator of whether this node or edge appears in the visual graph $\\mathcal{G}_v$ , (3) the contextual feature $\\mathbf{c}_i^o$ and $\\mathbf{c}_{ij}^e$ in the visual graph $\\mathcal{G}_v$ corresponding to this node or edge, missing nodes and edges are padded with zero vectors. The graph reasoning network uses message passing to perform reasoning on hierarchical symbolic knowledge graph $\\mathcal{K}_s$ . Specifically, at each time step $t$ , to calculate the hidden states $\\mathbf{d}_i^{o,t}$ for all nodes $s_i \\in \\mathcal{V}_s$ and $\\mathbf{d}_{ij}^{e,t}$ for all edges $(s_i, s_j) \\in \\mathcal{E}_s$ , each node and edge first gather messages from their neighbors through the graph structure then update their hidden states:", + "bbox": [ + 75, + 381, + 470, + 667 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {d} _ {i} ^ {o, t + 1} = G R U \\left(\\mathbf {d} _ {i} ^ {o, t}, \\mathbf {m} _ {i} ^ {o, t}\\right), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 676, + 468, + 696 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {d} _ {i j} ^ {e, t + 1} = G R U \\left(\\mathbf {d} _ {i j} ^ {e, t}, \\mathbf {m} _ {i j} ^ {e, t}\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 718, + 468, + 739 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{m}_i^{o,t}$ and $\\mathbf{m}_{ij}^{e,t}$ are the incoming messages for nodes and edges. The incoming message for each node is", + "bbox": [ + 76, + 743, + 468, + 776 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {m} _ {i} ^ {o, t} = \\sum_ {j \\in N _ {k} (i)} \\left(\\varphi_ {n} \\left(\\mathbf {d} _ {j} ^ {o, t}\\right) + \\varphi_ {e} \\left(\\mathbf {d} _ {i j} ^ {e, t}\\right)\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 785, + 468, + 819 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $N_{k}(i)$ denotes the neighbor nodes of node $s_i$ in the knowledge graph $\\mathcal{K}_s$ . Similar to Eq. (7), the incoming message for each edge is", + "bbox": [ + 76, + 828, + 468, + 875 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {m} _ {i j} ^ {e, t} = \\varphi_ {s} \\left(\\mathbf {d} _ {i} ^ {o, t}\\right) + \\varphi_ {o} \\left(\\mathbf {d} _ {j} ^ {o, t}\\right). \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 883, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We take the sum of the stacked hidden states as the 3D spatial multimodal knowledge embedding $\\mathbf{b}_i^o$ for all nodes and $\\mathbf{b}_{ij}^{e}$ for all edges in the symbolic knowledge graph $\\kappa_{s}$ .", + "bbox": [ + 496, + 90, + 890, + 136 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Knowledge-enabled Scene Graph Prediction. To incorporate the 3D spatial multimodal knowledge $\\mathcal{K}_m$ into scene graph inference, we propose fusing the multimodal knowledge embedding with the contextual features in the visual graph to facilitate 3D scene graph prediction. Towards this goal, we utilize an MLP as object detection head to predict confident initial class guesses given the contextual node features. We then select the three most confident multimodal knowledge embeddings for each node. For edges in the visual graph, we select the three most confident object categories for the subject and object node based on the initial guesses. We then retrieve the multimodal knowledge embedding using the predicted subject and object categories. Since the multimodal knowledge embedding and the contextual features are in different feature spaces, we transform them by two MLPs $\\varphi_b$ and $\\varphi_c$ respectively before fusing them. For each node in the visual graph, we fuse the retrieved multimodal knowledge embedding $\\{\\mathbf{b}^k\\}_{k=1,2,3}$ and the contextual node feature $\\mathbf{c}_i^o$ to obtain the knowledge-enabled contextual feature $\\mathbf{f}_i^o$ :", + "bbox": [ + 496, + 136, + 893, + 441 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} _ {i} ^ {o} = \\phi \\left(\\varphi_ {c} \\left(\\mathbf {c} _ {i} ^ {o}\\right) + \\varphi_ {b} \\left(\\sum_ {k = 1} ^ {3} \\mathbf {b} ^ {k}\\right)\\right). \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 450, + 890, + 492 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For each edge in the visual graph, the multimodal knowledge embedding is fused with its contextual feature in the same way as the node.", + "bbox": [ + 496, + 503, + 890, + 547 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Equipped with the 3D spatial multimodal knowledge-enabled contextual features $\\mathbf{f}_i^o$ for nodes and $\\mathbf{f}_{ij}^{e}$ for edges in the visual graph, we generate the scene graph by decoding the contextual features using a standard graph convolution network (GCN) [18]. We assume that each object pair can have a relationship (including none) and fully connect them as a graph where relationships are represented as edges. Each node is initialized by its contextual node feature $\\mathbf{f}_i^o$ , and each edge is initialized either by the contextual edge feature $\\mathbf{f}_{ij}^{e}$ or the contextual features of its subject and object nodes if the edge is not presented in the visual graph. The last part of the GCN consists of two detection heads for object and relationship classification. The object detection head takes the decoded node features as input to predict the object classification possibilities. The relationship prediction head first fuses the decoded subject and object node features with the decoded edge features, then predicts a discrete distribution over all possible relationship classes.", + "bbox": [ + 496, + 547, + 890, + 819 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Loss Function. We adopt the standard cross entropy loss for object and relationship classification in our model. Since the contextual node feature $\\mathbf{c}_i^o$ is used to predict the initial class guesses, we use a cross entropy loss $\\mathcal{L}_{init}^o$ for the initial detection. For the final prediction, we use two cross entropy", + "bbox": [ + 496, + 820, + 890, + 896 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "9186", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/e80cfd67ae8d81971ea27900b24278d2e23f969be222d50115ec29ab31b8ac10.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsPredClsSGClsSGDet
R@50/100mR@50/100R@50/100mR@50/100R@50/100mR@50/100
3D+IMP [42]48.15 / 48.7221.56 / 21.8517.41 / 17.899.06 / 9.2324.54 / 24.5721.71 / 21.72
3D+MOTIFS [45]52.43 / 53.3724.35 / 24.5218.34 / 18.579.74 / 9.8626.58 / 26.5924.12 / 24.17
3D+VCTree [36]53.12 / 54.3824.75 / 24.9119.93 / 20.2410.34 / 10.5527.58 / 27.6224.92 / 24.94
3D+KERN [6]54.74 / 56.5325.21 / 25.8321.41 / 21.7811.02 / 11.3627.75 / 27.7824.03 / 24.05
3D+Schemata [32]58.13 / 59.1142.11 / 42.8328.72 / 28.9726.72 / 27.0528.12 / 28.1325.29 / 25.30
3D+HetH [40]58.24 / 58.7542.53 / 42.7428.83 / 29.0526.68 / 26.8528.17 / 28.1825.31 / 25.32
Ours68.32 / 69.4966.54 / 66.9231.50 / 31.6430.29 / 30.5629.41 / 29.4425.35 / 25.36
", + "bbox": [ + 156, + 88, + 816, + 219 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/f92ec95e66176bd4201bd39b7cfdd1688b4b6763c74acadaa01bb246b07acf34.jpg", + "table_caption": [ + "Table 1. Comparison with state-of-the-art 2D scene graph prediction methods re-implemented to work on 3DSSG dataset." + ], + "table_footnote": [], + "table_body": "
MethodsPredClsSGClsSGDet
R@50/100mR@50/100R@50/100mR@50/100R@50/100mR@50/100
SGPN [37]57.71 / 58.0538.12 / 38.6728.39 / 28.7422.23 / 22.57- / -- / -
EdgeGCN [46]58.42 / 59.1138.84 / 39.3528.58 / 28.9322.67 / 23.33- / -- / -
KISG [47]64.47 / 64.9363.19 / 63.5229.46 / 29.6528.20 / 28.64- / -- / -
Ours68.32 / 69.4966.54 / 66.9231.50 / 31.6430.29 / 30.5629.41 / 29.4425.35 / 25.36
", + "bbox": [ + 165, + 243, + 803, + 339 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. Comparison with 3D scene graph prediction methods on the 3DSSG dataset.", + "bbox": [ + 230, + 340, + 736, + 356 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "losses $\\mathcal{L}_{final}^{o}$ and $\\mathcal{L}_{final}^{r}$ for the object and relationship classification:", + "bbox": [ + 75, + 361, + 470, + 390 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {f i n a l}} = w _ {o} \\mathcal {L} _ {\\text {f i n a l}} ^ {o} + w _ {r} \\mathcal {L} _ {\\text {f i n a l}} ^ {r} \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 398, + 470, + 417 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $w_{o}$ and $w_{r}$ are the weights for object and relation loss. In our experiment, we set $w_{o}$ to 0.75 and $w_{r}$ to 1. Our final loss function can be formulated as:", + "bbox": [ + 75, + 422, + 470, + 468 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\text {i n i t}} ^ {o} + \\mathcal {L} _ {\\text {f i n a l}} \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 477, + 470, + 494 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 497, + 210, + 515 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experimental Configuration", + "text_level": 1, + "bbox": [ + 76, + 518, + 331, + 535 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate our model on 3DSSG dataset [37]. Following [47], we select 160 object categories and 27 relationship classes for detection. We compare our model with others in three standard tasks proposed in [42]. (1) Predicate Classification (PredCls): Given the ground truth 3D bounding boxes and their corresponding semantic labels, our model classifies the relationship between each object pair. (2) Scene Graph Classification (SGCls): Given the ground truth 3D bounding boxes, our model predicts the relationships as well as the object categories jointly. (3) Scene Graph Generation (SGDet): Given the raw point cloud, our model detects 3D objects, their semantic information, as well as their relationships in an end-to-end manner. Following existing 2D and 3D scene graph generation works, we adopt the constrained evaluation metric recall@K (R@K) and mean recall@K (mR@K).", + "bbox": [ + 75, + 539, + 470, + 765 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our model is implemented in PyTorch, and trained using one NVIDIA GTX TITAN X GPU for 40 epochs with the ADAM optimizer. We use an initial learning rate of 0.0001, weight decay of 0.5, and mini-batch of 4. After 15, 25, and 40 epochs, we multiply the learning rate by 0.1. We adopt VoteNet [27] as the 3D object detection backbone to generate an initial set of 256 object candidates in the SGDet task. The Point Cloud Transformer is pre-trained on the 3DSSG dataset using the same settings in [13].", + "bbox": [ + 75, + 766, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Comparison to State-of-the-Art", + "text_level": 1, + "bbox": [ + 498, + 359, + 777, + 376 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first compare our model with the following state-of-art 2D image scene graph generation models, modified to fit the 3DSSG dataset: IMP [42], MOTIFS [45] and VC-Tree [36] which creatively devise various message passing methods for improving graph representations. KERN [6], Schemata [32], and HetH [40] incorporate statistical priors and learning-based commonsense knowledge into the scene graph prediction. Therefore, we include these models to illustrate the superiority of the 3D spatial multimodal knowledge about the implicit hierarchical structure correlations between object pairs in the 3D scene.", + "bbox": [ + 496, + 380, + 893, + 546 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our results in Tab. 1 lead to a few key observations: (1) Our model consistently outperforms all the existing approaches on all metrics and achieve $3.57\\%$ boost on mR@50 in SGCls task and $10.08\\%$ boost on R@50 in PredCls task. This indicates that leveraging regular patterns of 3D physical spaces is beneficial for scene graph prediction. (2) Our model outperforms traditional message passing model IMP and MOTIFS. Furthermore, our method achieves considerable improvement when compared to VCTree. (3) Compared to Schemata, our model achieves an improvement of $2.78\\%$ and $10.19\\%$ on R@50 in SGCls and PredCls, suggesting that our multimodal knowledge embedding is a better approach compared to the class-level prototypical representations learned from perceptual outputs in Schemata. (4) Compared with KERN and HetH, our proposed hierarchical structure of 3D spaces is superior to the graph structure they adopted to represent the input as our model outperforms them with a significant margin. (5) The performance has been saturated in the SGDet task. This is mainly because object detection performance on this dataset is a bottleneck that limits the performance.", + "bbox": [ + 496, + 547, + 895, + 863 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We also compare the performance of our model with the state-of-the-art 3D point cloud-based scene graph predic", + "bbox": [ + 498, + 863, + 893, + 893 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "9187", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/a3ba697e93c79f947fbd19d8b97f836a4c6fe9331d965fdf1589dd897920098a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsR@50/100mR@50/100
Knowledge Ks
w/o Hierarchical Tokens30.47 / 30.6728.94 / 29.19
w/o Support Edge30.55 / 30.7429.17 / 29.47
w/o Both28.41 / 28.4727.13 / 27.52
Visual Context Encoding
Gv replaced w/ Gfc28.17 / 28.3226.28 / 26.29
w/o RaGN26.43 / 26.5724.23 / 24.36
RaGN replaced w/ GCN31.03 / 31.2129.67 / 29.88
Knowledge Km
w/o bjo and bceij26.27 / 26.3522.93 / 23.18
w/o cjo and cceij as input28.14 / 28.3125.05 / 25.31
Ours31.50 / 31.6430.29 / 30.56
", + "bbox": [ + 107, + 88, + 436, + 321 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tion models to demonstrate the effectiveness of 3D spatial multimodal knowledge. We include several existing works such as SGPN [37], EdgeGCN [46] and KISG [47] since they all report competitive results. SGPN and EdgeGCN exploit multi-dimensional edge features for explicit relationship modeling whereas KISG learns a group of class-dependent prototypical representations for each semantic class. As shown in Tab. 2, our model dominantly surpasses all methods. Benefiting from the hierarchical structure of 3D spaces, our model is able to reason complex relationship hierarchically and systematically. Compared to SGPN and EdgeGCN, our model improves the R@50 by $2.92\\%$ and $9.90\\%$ in SGCls and PredCls tasks. We can also see that our method outperforms KISG by $2.04\\%$ on R@50 in SGCls. KISG captures class-related priors in the scene from text-only ground truth labels. Such knowledge cannot efficiently represent diverse relationships and complex 3D environments. In contrast, our model extracts indispensable 3D spatial multimodal knowledge which benefits the scene graph prediction.", + "bbox": [ + 75, + 358, + 472, + 660 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 666, + 230, + 683 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We only report the performance results in the Recall and mean Recall metrics on the SGCs task for ablation studies. The results are shown in Tab. 3.", + "bbox": [ + 75, + 686, + 468, + 729 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Hierarchical symbolic knowledge. We first look at the hierarchical symbolic knowledge graph $\\kappa_{s}$ to investigate its effectiveness. Specifically, we find that using ConceptNet without classifying the hierarchical tokens or adding support edges leads to sub-optimal performance. Furthermore, using ConceptNet without any augmentation drops the performance significantly, indicating that both the hierarchical tokens and support edges are crucial elements of the hierarchical structures in 3D scene.", + "bbox": [ + 75, + 731, + 470, + 866 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Knowledge-guided visual context encoding. Next, we analyse the knowledge-guided visual context encoding mod", + "bbox": [ + 76, + 867, + 470, + 898 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3e9b7d32cf9049d67971221692f00c3d6bc227f543ee94fd2c83bf247e003f8f.jpg", + "table_caption": [ + "Table 3. Quantitative results of different module configurations on the SGCls task." + ], + "table_footnote": [], + "table_body": "
VariantsPredCISSGCIS
R@50mR@50R@50mR@50
Gr62.7458.2528.1727.28
Gt68.4166.5931.5930.35
Gv(original)68.3266.5431.5030.29
", + "bbox": [ + 532, + 89, + 857, + 170 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/e017f8e38feb4e81ee83d1ba928d1da22712e7a7007b78b95a6a95a9ff0f3f13.jpg", + "table_caption": [ + "Table 4. Comparison of different variants of the visual graph." + ], + "table_footnote": [], + "table_body": "
MethodsHeadBodyTail
SGPN [37]39.4223.6413.03
EdgeGCN [46]39.5123.8513.15
KISG [37]40.3624.5613.61
Ours44.2326.2714.73
", + "bbox": [ + 568, + 196, + 821, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. The R@50 metric of biased relationship prediction on the SGCIs task.", + "bbox": [ + 498, + 281, + 890, + 309 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ule. We can see that replacing the hierarchical visual graph $\\mathcal{G}_v$ with a fully-connected graph $\\mathcal{G}_{fc}$ decreases the performance by a margin of $3.33\\%$ on R@50, indicating that the hierarchical structure is superior to a plain fully-connected graph in terms of modeling context. Furthermore, removing the subsequent region-aware graph network (RaGN) and directly fusing the multimodal knowledge embedding with the initial representation of each node and edge in the visual graph negatively impacts the performance on all metrics. Replacing the region-aware graph network with a standard graph convolution network also hurts the performance.", + "bbox": [ + 496, + 315, + 893, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3D spatial multimodal knowledge accumulation. Lastly, we examine the accumulated multimodal knowledge $\\mathcal{K}_m$ to learn about how $\\mathcal{K}_m$ and rest of the model interact. We first see how much of the improvement comes from the 3D spatial multimodal knowledge $\\mathcal{K}_m$ . As shown in Tab. 3, the multimodal knowledge embedding significantly improves the R@50 and mR@50 by $5.23\\%$ and $7.36\\%$ respectively. In addition, dropping the contextual feature input $\\mathbf{c}_i^o$ for nodes and $\\mathbf{c}_{ij}^{e}$ for edges in the graph reasoning network decreases the performance by a margin of $3.36\\%$ and $5.24\\%$ on R@50 and mR@50 in SGCls. This drop in performance indicates that the contextual feature plays a pivotal role in bridging the heterogeneous gap between the symbolic knowledge and visual information.", + "bbox": [ + 496, + 481, + 893, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Further Analysis", + "text_level": 1, + "bbox": [ + 500, + 698, + 668, + 714 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Analysis on the hierarchical structure of 3D spaces. To validate the potential of the hierarchical visual graph $\\mathcal{G}_v$ in capturing the inherent hierarchical structure of a 3D scene, we design two visual graph variants and compare them to the hierarchical visual $\\mathcal{G}_v$ : (1) Instead of using the hierarchical symbolic knowledge graph $\\kappa_s$ , we build a ground truth graph $\\mathcal{G}_t$ based on the ground truth labels for support relations. In particular, each edge in $\\mathcal{G}_t$ represents the ground truth support relationship of the input scene. (2) We also design a randomly connected graph $\\mathcal{G}_r$ , where we keep all of the nodes the same but randomize the edges that connect them. As shown in Tab. 4, both $\\mathcal{G}_v$ and $\\mathcal{G}_t$ outperform $\\mathcal{G}_r$", + "bbox": [ + 496, + 718, + 893, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "9188", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/91d8452bce554c9b263e50fd3cec6a362c62fd0729918728c32ceb9937a8def1.jpg", + "image_caption": [ + "Figure 3. Comparison of our model and KISG on the SGCs task when trained with noisy labels." + ], + "image_footnote": [], + "bbox": [ + 114, + 88, + 433, + 227 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "with a significant margin on all metrics. More importantly, we observe that $\\mathcal{G}_t$ and $\\mathcal{G}_v$ perform mostly similar while $\\mathcal{G}_t$ slightly outperforms $\\mathcal{G}_v$ . The results confirm that the hierarchical visual graph $\\mathcal{G}_v$ is one of the more optimal ways of extracting the hierarchical structure patterns of 3D spaces.", + "bbox": [ + 75, + 260, + 472, + 335 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Robustness of 3D spatial multimodal knowledge. Additionally, we investigate the robustness of the 3D spatial multimodal knowledge $\\mathcal{K}_m$ by training our model with noisy labels. Specifically, we add different proportions of noises into the 3DSSG training set by replacing part of ground truth relationships with the randomly selected wrong relationships for input scenes. The performance of our model and KISG [47] on the SGCs task is reported in Fig. 3. We can see that, the performance of KISG decreases drastically while ours decreases slowly with increasing noise rate. Under the $30\\%$ noise rate condition, our model improves the R@50 metric by about $6.89\\%$ over KISG, which indicates that our model achieves improved robustness over KISG. The main reason is that KISG captures relevant prior knowledge from text-only ground truth labels and noises contained in the labels are easily included in their knowledge base and affects the prediction of relationships. Different with KISG, our model leverages the inherently hierarchical structures of 3D scenes and accumulates multimodal knowledge which is both label free and reliable.", + "bbox": [ + 75, + 335, + 473, + 636 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Long-tail analysis. We also investigate how our model performs on the long-tail part of the dataset. To do this, we order all the relationships based on the frequency of each relationship category occurring in triplets. We select the 5 most common relationship categories as the head, the 5 least common relationship categories as the tail, and the rest of the categories as the body. Tab. 5 reports the R@50 metric on each long-tail category groups of our model. Moreover, our model achieves best performance when evaluating the R@50 metric on the tail relationship categories, which shows that our model has the ability to mitigate the effect of sample imbalance. The main reason is that the hierarchical structures can be extracted accurately which influence other relationships in the prediction process.", + "bbox": [ + 75, + 638, + 473, + 849 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5. Qualitative Results", + "text_level": 1, + "bbox": [ + 76, + 849, + 261, + 866 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We visualize intermediate results in Fig. 4(a-c). We can see that both the hierarchical visual graph $\\mathcal{G}_v$ and 3D scene", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f391d0b44e1c763cc3b12171ec320e87da05b2d1452fa24e2ed50d9464e6783f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 616, + 88, + 751, + 159 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e53a70bc74cbd4d472bf57013b28d150ff1f3b03de0c8bc31ac7df70d5eadc8c.jpg", + "image_caption": [ + "(a) Input scene", + "(b) Hierarchical visual graph" + ], + "image_footnote": [], + "bbox": [ + 501, + 165, + 678, + 308 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0452d5a6262ed068e721491e14b7f1c5bec28cc63e4aa23e55f642c389483f70.jpg", + "image_caption": [ + "(c) 3D scene graph", + "Figure 4. Visualizations of our predicted scene graph on 3DSSG dataset. Red indicates the misclassified objects or relationships." + ], + "image_footnote": [], + "bbox": [ + 678, + 165, + 888, + 310 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "graph $\\mathcal{G}$ are well constructed. However, our model incorrectly classifies the relationship between Window1 and Floor. This is mainly because our model fails to extract discriminative features for Window1 as there are few points within its bounding box. The token of Window1 is classified incorrectly in the second layer while it should be in the third layer. We provide more visualization samples in the supplementary.", + "bbox": [ + 496, + 361, + 893, + 467 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 473, + 619, + 488 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We proposed a method for 3D scene graph prediction from raw point clouds. Our method explores the regular patterns of 3D physical spaces into the deep network to facilitate 3D scene graph prediction. Hierarchical symbolic knowledge is first reconstructed via exploiting external knowledge as the baseline to admit the hierarchical structure cues of a 3D scene. A knowledge-guided visual context encoding module then builds a hierarchical visual graph and learns the contextualized features by a region-aware graph network. Finally, a 3D spatial multimodal knowledge accumulation module is proposed to regularize the semantic space of relationship prediction. Extensive experiments on the 3DSSG dataset show that our method outperforms existing state-of-the-art and can mitigate the effect of data imbalance and label noises. In the future, we plan to exploit the attributes of 3D objects to build richer knowledge graphs to improve the prediction performances of attribute-focused relationships, such as same symmetric as and same texture as.", + "bbox": [ + 496, + 494, + 895, + 767 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Acknowledgments", + "text_level": 1, + "bbox": [ + 500, + 773, + 679, + 790 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported in part by the National Natural Science Foundation of China under Grant 62003253, Grant 61973106, Grant U2013203, Grant U21A20482 and Grant U20A20185. Professor Ajmal Mian is the recipient of an Australian Research Council Future Fellowship Award (project number FT210100268) funded by the Australian Government.", + "bbox": [ + 496, + 795, + 893, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "9189", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Iro Armeni, Zhi-Yang He, JunYoung Gwak, Amir R Zamir, Martin Fischer, Jitendra Malik, and Silvio Savarese. 3d scene graph: A structure for unified semantics, 3d space, and camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5664-5673, 2019. 1, 2", + "[2] Soren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. 2007. 2, 3", + "[3] Sumithra Bhakthavatsalam, Kyle Richardson, Niket Tandon, and Peter Clark. Do dogs have whiskers? a new knowledge base of haspart relations. arXiv preprint arXiv:2006.07510, 2020. 2", + "[4] Xiaojun Chang, Pengzhen Ren, Pengfei Xu, Zhihui Li, Xiaojiang Chen, and Alexander G Hauptmann. A comprehensive survey of scene graphs: Generation and application. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 2", + "[5] Long Chen, Hanwang Zhang, Jun Xiao, Xiangnan He, Shiliang Pu, and Shih-Fu Chang. Counterfactual critic multi-agent training for scene graph generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4613-4623, 2019. 2", + "[6] Tianshui Chen, Weihao Yu, Riquan Chen, and Liang Lin. Knowledge-embedded routing network for scene graph generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6163–6171, 2019. 2, 6", + "[7] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5828-5839, 2017. 3", + "[8] Yang Ding, Jing Yu, Bang Liu, Yue Hu, Mingxin Cui, and Qi Wu. Mukea: Multimodal knowledge extraction and accumulation for knowledge-based visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5089-5098, 2022. 2", + "[9] Zijin Du, Hailiang Ye, and Feilong Cao. A novel local-global graph convolutional method for point cloud semantic segmentation. IEEE Transactions on Neural Networks and Learning Systems, 2022. 1", + "[10] Mingtao Feng, Syed Zulqarnain Gilani, Yaonan Wang, Liang Zhang, and Ajmal Mian. Relation graph network for 3d object detection in point clouds. IEEE Transactions on Image Processing, 30:92-107, 2020. 1", + "[11] Mingtao Feng, Liang Zhang, Xuefei Lin, Syed Zulqarnain Gilani, and Ajmal Mian. Point attention network for semantic segmentation of 3d point clouds. Pattern Recognition, 107:107446, 2020. 1", + "[12] Chen Gao, Jinyu Chen, Si Liu, Luting Wang, Qiong Zhang, and Qi Wu. Room-and-object aware knowledge reasoning for remote embodied referring expression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3064–3073, 2021. 2" + ], + "bbox": [ + 78, + 114, + 470, + 890 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Meng-Hao Guo, Jun-Xiong Cai, Zheng-Ning Liu, Tai-Jiang Mu, Ralph R Martin, and Shi-Min Hu. Pct: Point cloud transformer. Computational Visual Media, 7(2):187-199, 2021. 4, 6", + "[14] Alon Hafri and Chaz Firestone. The perception of relations. Trends in Cognitive Sciences, 2021. 1", + "[15] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11108-11117, 2020. 1", + "[16] Maximilian Jaritz, Tuan-Hung Vu, Raoul De Charette, Emilie Wirbel, and Patrick Pérez. Cross-modal learning for domain adaptation in 3d semantic segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1", + "[17] Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei-Fei. Image retrieval using scene graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3668–3678, 2015. 2", + "[18] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016. 5", + "[19] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123(1):32-73, 2017. 2", + "[20] Manyi Li, Akshay Gadi Patil, Kai Xu, Siddhartha Chaudhuri, Owais Khan, Ariel Shamir, Changhe Tu, Baoquan Chen, Daniel Cohen-Or, and Hao Zhang. Grains: Generative recursive autoencoders for indoor scenes. ACM Transactions on Graphics (TOG), 38(2):1-16, 2019. 1", + "[21] Mengtian Li, Yuan Xie, Yunhang Shen, Bo Ke, Ruizhi Qiao, Bo Ren, Shaohui Lin, and Lizhuang Ma. Hybridcr: Weakly-supervised 3d point cloud semantic segmentation via hybrid contrastive regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14930-14939, 2022. 1", + "[22] Ze Liu, Zheng Zhang, Yue Cao, Han Hu, and Xin Tong. Group-free 3d object detection via transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2949-2958, 2021. 1", + "[23] Kenneth Marino, Xinlei Chen, Devi Parikh, Abhinav Gupta, and Marcus Rohrbach. Krisp: Integrating implicit and symbolic knowledge for open-domain knowledge-based vqa. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14111-14121, 2021. 2", + "[24] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 2", + "[25] Xuran Pan, Zhuofan Xia, Shiji Song, Li Erran Li, and Gao Huang. 3d object detection with pointformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7463-7472, 2021. 1", + "[26] Jeffrey Pennington, Richard Socher, and Christopher D Manning. Glove: Global vectors for word representation. In" + ], + "bbox": [ + 501, + 92, + 893, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9190", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543, 2014. 4", + "[27] Charles R Qi, Or Litany, Kaiming He, and Leonidas J Guibas. Deep hough voting for 3d object detection in point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9277-9286, 2019. 1, 6", + "[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 1", + "[29] Charles R Qi, Li Yi, Hao Su, and Leonidas J Guibas. Point-net++ deep hierarchical feature learning on point sets in a metric space. In Proceedings of the 31st International Conference on Neural Information Processing Systems, pages 5105-5114, 2017. 1", + "[30] Antoni Rosinol, Andrew Violette, Marcus Abate, Nathan Hughes, Yun Chang, Jingnan Shi, Arjun Gupta, and Luca Carlone. Kimera: From slam to spatial perception with 3d dynamic scene graphs. The International Journal of Robotics Research, 40(12-14):1510-1546, 2021. 1, 2", + "[31] Morteza Sarafyazd and Mehrdad Jazayeri. Hierarchical reasoning by neural circuits in the frontal cortex. Science, 364(6441), 2019. 1", + "[32] Sahand Sharifzadeh, Sina Moayed Baharlou, and Volker Tresp. Classification by attention: Scene graph classification with prior knowledge. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 5025-5033, 2021. 6", + "[33] Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In European conference on computer vision, pages 746-760. Springer, 2012. 1", + "[34] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 567-576, 2015. 3", + "[35] Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI conference on artificial intelligence, 2017. 2, 3", + "[36] Kaihua Tang, Hanwang Zhang, Baoyuan Wu, Wenhan Luo, and Wei Liu. Learning to compose dynamic tree structures for visual contexts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6619-6628, 2019. 2, 6", + "[37] Johanna Wald, Helisa Dhamo, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs from 3d indoor reconstructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3961-3970, 2020. 1, 2, 6, 7", + "[38] Johanna Wald, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs with instance embeddings. International Journal of Computer Vision, pages 1-22, 2022. 1, 2" + ], + "bbox": [ + 78, + 90, + 470, + 878 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[39] Kai Wang, Yu-An Lin, Ben Weissmann, Manolis Savva, Angel X Chang, and Daniel Ritchie. Planit: Planning and instantiating indoor scenes with relation graph and spatial prior networks. ACM Transactions on Graphics (TOG), 38(4):1-15, 2019. 1", + "[40] Wenbin Wang, Ruiping Wang, Shiguang Shan, and Xilin Chen. Sketching image gist: Human-mimetic hierarchical scene graph generation. In European Conference on Computer Vision, pages 222-239. Springer, 2020. 6", + "[41] Shun-Cheng Wu, Johanna Wald, Keisuke Tateno, Nassir Navab, and Federico Tombari. Scenegraphfusion: Incremental 3d scene graph prediction from rgb-d sequences. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7515-7525, 2021. 1, 2", + "[42] Danfei Xu, Yuke Zhu, Christopher B Choy, and Li Fei-Fei. Scene graph generation by iterative message passing. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5410–5419, 2017. 2, 6", + "[43] Qiangeng Xu, Yiqi Zhong, and Ulrich Neumann. Behind the curtain: Learning occluded shapes for 3d object detection. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2893-2901, 2022. 1", + "[44] Jianwei Yang, Jiasen Lu, Stefan Lee, Dhruv Batra, and Devi Parikh. Graph r-cnn for scene graph generation. In Proceedings of the European conference on computer vision (ECCV), pages 670-685, 2018. 2", + "[45] Rowan Zellers, Mark Yatskar, Sam Thomson, and Yejin Choi. Neural motifs: Scene graph parsing with global context. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5831-5840, 2018. 2, 6", + "[46] Chaoyi Zhang, Jianhui Yu, Yang Song, and Weidong Cai. Exploiting edge-oriented reasoning for 3d point-based scene graph analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9705-9715, 2021. 1, 2, 4, 6, 7", + "[47] Shoulong Zhang, Aimin Hao, Hong Qin, et al. Knowledge-inspired 3d scene graph prediction in point cloud. Advances in Neural Information Processing Systems, 34, 2021. 2, 3, 6, 7, 8", + "[48] Yifeng Zhang, Ming Jiang, and Qi Zhao. Explicit knowledge incorporation for visual reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1356-1365, 2021. 2", + "[49] Na Zhao, Tat-Seng Chua, and Gim Hee Lee. Few-shot 3d point cloud semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8873-8882, 2021. 1" + ], + "bbox": [ + 501, + 92, + 893, + 768 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "9191", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_model.json b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_model.json new file mode 100644 index 0000000000000000000000000000000000000000..06159f97b76d275e049f20974b9ff0d6f622f306 --- /dev/null +++ b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_model.json @@ -0,0 +1,2024 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.151, + 0.131, + 0.82, + 0.175 + ], + "angle": 0, + "content": "3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.203, + 0.926, + 0.242 + ], + "angle": 0, + "content": "Mingtao Feng\\(^{1*}\\) Haoran Hou\\(^{1*}\\) Liang Zhang\\(^{1\\dagger}\\) Zijie Wu\\(^{2\\dagger}\\) Yulan Guo\\(^{3}\\) Ajmal Mian\\(^{4}\\) \n\\(^{1}\\)Xidian University, \\(^{2}\\)Hunan University, \\(^{3}\\)Sun Yat-Sen University, \\(^{4}\\)The University of Western Australia" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.275, + 0.312, + 0.289 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.289, + 0.474, + 0.665 + ], + "angle": 0, + "content": "In-depth understanding of a 3D scene not only involves locating/recognizing individual objects, but also requires to infer the relationships and interactions among them. However, since 3D scenes contain partially scanned objects with physical connections, dense placement, changing sizes, and a wide variety of challenging relationships, existing methods perform quite poorly with limited training samples. In this work, we find that the inherently hierarchical structures of physical space in 3D scenes aid in the automatic association of semantic and spatial arrangements, specifying clear patterns and leading to less ambiguous predictions. Thus, they well meet the challenges due to the rich variations within scene categories. To achieve this, we explicitly unify these structural cues of 3D physical spaces into deep neural networks to facilitate scene graph prediction. Specifically, we exploit an external knowledge base as a baseline to accumulate both contextualized visual content and textual facts to form a 3D spatial multimodal knowledge graph. Moreover, we propose a knowledge-enabled scene graph prediction module benefiting from the 3D spatial knowledge to effectively regularize semantic space of relationships. Extensive experiments demonstrate the superiority of the proposed method over current state-of-the-art competitors. Our code is available at https://github.com/HHrEtvP/SMKA." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.679, + 0.21, + 0.695 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.701, + 0.472, + 0.868 + ], + "angle": 0, + "content": "In recent years, much success has been achieved on 3D point cloud scene understanding such as semantic segmentation [9, 11, 15, 16, 21, 28, 29, 49] and object detection [10, 22, 25, 27, 43]. However, the 3D world is not only defined by objects but also by the relationships between objects. A 3D scene graph can abstract the environment as a graph where nodes represent objects and edges characterize the relationships between object pairs, which has already been recognized in recent seminal works [1, 30, 37, 38, 41, 46]. However, relationship graphs predicted by current methods are far from satisfactory due to the noisy, cluttered and par" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.272, + 0.882, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.567, + 0.443, + 0.825, + 0.457 + ], + "angle": 0, + "content": "Figure 1. A brief overview of our method." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.461, + 0.895, + 0.598 + ], + "angle": 0, + "content": "tial nature of real 3D scans. Moreover, these data-driven methods treat sophisticated relationships in 3D space independently for classification using the geometric features proximity or fit, and are ignorant of commonsense or other useful 3D spatial cues beyond visual information. 3D objects in real scenes commonly have strongly structured regularities [33,39], whose semantic and spatial arrangements follow clear patterns, but still exhibit rich structural variations even within the scene category." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.896, + 0.901 + ], + "angle": 0, + "content": "The key observation is that 3D scene structures are inherently hierarchical [20]. By definition, an instance can have multiple supports, lamps are standing on a table, chairs are supported by the floor and only the floor does not have any support, and it is unlikely that a pillow is supporting a couch. Although relationships themselves cast no light on the human eyes, a growing body of works [14, 31] suggest that even very complex relationship information is reasoned hierarchically and systemically according to the role of the prefrontal cortex. Relationships, such as support, can be extracted rapidly, are hard to ignore, and influence other relationships in the perceptual process. For example, a TV and a sofa are related since they together serve the function of 'watching TV', but these two objects can be far apart in a scene. Relationships of this kind are much more difficult, if not possible, to infer based on geometric analysis alone. The model can relate the table easily which supports the TV and use the table as a bridge to predict the 'front' relationship with sofa, where table and sofa are all supported by the floor and relationships within them is intuitive." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.875, + 0.204, + 0.888 + ], + "angle": 0, + "content": "*Equal contribution" + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.888, + 0.22, + 0.9 + ], + "angle": 0, + "content": "† Corresponding author" + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.875, + 0.22, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9182" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.348 + ], + "angle": 0, + "content": "The underlying hierarchical structures in 3D scenes are label free and reliable, and can hence play an essential role in scene understanding at no additional cost. Existing 3D scene graph prediction models [1, 30, 37, 38, 41, 46] are oblivious to the underlying structures in the point cloud scenes. The question is how to take this prior knowledge into consideration to make the 3D scene graph achieve higher accuracy? KISG [47] proposes a graph auto-encoder to learn a closed set and ground truth prior knowledge from relationship triplets in data for 3D scene graph prediction. Although KISG [47] takes note of knowledge, it captures relevant prior knowledge from text-only ground truth labels, which merely contain facts expressed by label descriptions while lacking complex but indispensable multimodal knowledge for 3D scene graph prediction. In addition, noises contained in the manually annotated labels are easily included in the knowledge base and affects the prediction of relationships." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.348, + 0.473, + 0.695 + ], + "angle": 0, + "content": "To address the above problems, we show that the implicit hierarchical structure correlations between object pairs and their relationships can be explicitly represented by a knowledge base. As shown in Fig. 1, we propose a 3D spatial multimodal knowledge accumulation module to explicitly merge the hierarchical structures of 3D scenes into the network to strengthen the 3D scene graph prediction process. Firstly, we filter the external commonsense knowledge base, classify the hierarchical tokens for each node, and add new support edges to form the hierarchical symbolic knowledge graph for 3D scenes. Secondly, we retrieve the hierarchical token from the reconstructed symbolic knowledge graph for object instances in 3D scenes to build a visual graph, and extract contextual features for nodes and edges using a region-aware graph network. Finally, to bridge the heterogeneous gap between the symbolic knowledge and visual information, we propose a graph reasoning network to correlate 3D spatial visual contents of scenes with textual facts. Conditioned on the learned vision-relevant 3D spatial multimodal knowledge, we incorporate this network into the relationships prediction stage as extra guidance, which can effectively regularize the distribution of possible relationships of object pairs and thus make the predictions less ambiguous." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.697, + 0.473, + 0.894 + ], + "angle": 0, + "content": "Our main contributions are: 1) We are the first to explicitly unify the regular patterns of 3D physical spaces with the deep architecture to facilitate 3D scene graph prediction. 2) We propose a hierarchical symbolic knowledge construction module that exploits extra knowledge as the baseline to admit the hierarchical structure cues of 3D scene. 3) We introduce a knowledge-guided visual context encoding module to construct hierarchical visual graph and learn the contextualized features by a region-aware graph network. 4) We propose a 3D spatial multimodal knowledge accumulation module to regularize the semantic space of relationship prediction. Results show that the learned knowledge and proposed modules consistently boost 3D scene graph prediction performance." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.09, + 0.642, + 0.105 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.109, + 0.895, + 0.334 + ], + "angle": 0, + "content": "2D Image-based Scene Graph Generation. Scene graph was first proposed for image retrieval [17], and subsequently received increasing attention in the vision community to produce graphical abstractions of images. Mainstream approaches [5, 36, 42, 44, 45] follow a two-step pipeline that first detects objects followed by classification of the relationship for each object pair. However, research on scene graphs has focused primarily on 2D images, ignoring 3D spatial characteristics such as position and geometry, and with limited spatial coverage. Our proposed method extends 2D scene graphs to 3D spaces, where the scene representation, network architecture and training mechanism all have to be altered in fundamental ways to meet the challenges arising from learning 3D scene structures and relationships. More detailed discussions can be found in the survey [4]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.335, + 0.897, + 0.636 + ], + "angle": 0, + "content": "Knowledge Representation has been extensively studied to incorporate prior knowledge, e.g. DBPedia [2], ConceptNet [35], WordNet [24], VisualGenome [19] and hasPart [3], to aid numerous vision tasks [23]. Gao et al. [12] incorporated commonsense knowledge to learn the internal-external correlations among room and object entities for an agent to take proper decisions at each viewpoint. Zhang et al. [48] addressed the explainability of visual reasoning by introducing the explicit integration of external knowledge. Ding et al. [8] extracted the multimodal knowledge triplet to boost the performance of visual question answering. Chen et al. [6] constructed the prior knowledge of statistical correlations between object pairs and their relationships to address the issue of the uneven distribution over different relationships. Although previous studies have taken notice of knowledge in different vision tasks, they only implicitly mine the extra knowledge base or count the frequency of relationship pairs in datasets to strengthen the iterative message propagation between relationships and objects while ignoring the intrinsic properties of the data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.637, + 0.897, + 0.895 + ], + "angle": 0, + "content": "Scene Graph Prediction in Point Clouds. With the recently proposed 3DSSG datasets containing 3D scene graph annotations [37], the community started to explore semantic relationship prediction in 3D real world data. SGPN [37, 38] is the first work to build a 3D scene graph using both objects and their interrelations as graph nodes. It then performs message propagation using graph convolutional networks. Kimera [30] proposed a 3D dynamic scene graph that captures metric and semantic aspects of a dynamic environment, where nodes represent spatial concepts at different levels of abstraction, and edges represent spatial-temporal relations among the nodes. EdgeGCN [46] exploits multi-dimensional edge features for explicit relationship modeling and explores two associated twinning interaction mechanisms for the independent evolution of scene graph representations. Wu et al. [41] proposed a method to incrementally build semantic scene graphs from a 3D environment given a sequence of" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9183" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.087, + 0.895, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.262, + 0.895, + 0.306 + ], + "angle": 0, + "content": "Figure 2. Method pipeline. (a) A hierarchical symbolic knowledge is firstly reconstructed to exploit external knowledge as the baseline and admit the hierarchical structure cues of 3D scene. (b) We then build a hierarchical visual graph and learn the contextualized features by the region-aware graph network. (c) Finally, a 3D spatial multimodal knowledge is accumulated to strengthen relationship predictions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.309, + 0.474, + 0.462 + ], + "angle": 0, + "content": "RGB-D frames. KISG [47] uses the ground truth relationship triplets in the dataset to extract the prior knowledge and then fuses it in the scene graph prediction stage. One limitation of KISG [47] is that its relevant prior knowledge depends on the text-only dataset label while ignoring hierarchical and indispensable structures in the 3D scene for visual understanding. Our method differentiates itself from these related studies by exploring the 3D implicit structure pattern and introducing 3D spatial multimodal knowledge, which enables our model to predict relationships more accurately." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.47, + 0.214, + 0.488 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.492, + 0.474, + 0.675 + ], + "angle": 0, + "content": "Problem Formulation: The goal of 3D scene graph generation is to describe a given 3D point cloud scene \\(\\mathcal{I}\\) with a semantic scene graph \\(\\mathcal{G} = \\{\\mathcal{V},\\mathcal{R}\\}\\), where \\(\\mathcal{V}\\) and \\(\\mathcal{R}\\) represent instance object nodes and their inner relationship edges respectively. \\(\\mathcal{G}\\) forms a structured representation of the semantic content of the 3D scene. The nodes \\(\\mathcal{V}\\) consist of a set of objects \\(O = \\{o_1,o_2,\\dots ,o_n\\}\\) with object \\(o_i\\) assigned to a certain class label \\(C\\), a corresponding set of bounding boxes \\(B = \\{b_{1},b_{2},\\dots ,b_{n}\\}\\) with \\(b_{i}\\in \\mathbb{R}^{6}\\), and a set of relationship edges \\(\\mathcal{R} = \\{r_1,r_2,\\dots ,r_n\\}\\) with each \\(r_i\\) represents a predicate between a pair of objects. Our proposed model can be decomposed as:" + }, + { + "type": "equation", + "bbox": [ + 0.095, + 0.685, + 0.47, + 0.715 + ], + "angle": 0, + "content": "\\[\nP (\\mathcal {G} | \\mathcal {I}) = P \\left(\\mathcal {K} _ {s} | \\mathcal {I}\\right) P \\left(\\mathcal {G} _ {v} \\mid \\mathcal {K} _ {s}, \\mathcal {I}\\right) P \\left(\\mathcal {R}, \\mathcal {K} _ {m} \\mid \\mathcal {G} _ {v}, \\mathcal {K} _ {s}, \\mathcal {I}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.716, + 0.474, + 0.899 + ], + "angle": 0, + "content": "In this equation, the component \\( P(\\mathcal{K}_s|\\mathcal{I}) \\) collects all the symbolic entities from the datasets, filters the extra knowledge bases, and combines the hierarchical structure patterns of 3D scenes to construct the hierarchical symbolic knowledge \\( \\mathcal{K}_s \\). The component \\( P(\\mathcal{G}_v|\\mathcal{K}_s,\\mathcal{I}) \\) builds visual graphs for scenes under the guidance of knowledge \\( \\mathcal{K}_s \\), where contextual features for each node are extracted. Conditioned on the knowledge \\( \\mathcal{K}_s \\) and visual graph \\( \\mathcal{G}_v \\), the component \\( P(\\mathcal{R},\\mathcal{K}_m|\\mathcal{G}_v,\\mathcal{K}_s,\\mathcal{I}) \\) accumulates the 3D spatial multimodal knowledge by correlating the knowledge \\( \\mathcal{K}_s \\) with visual content and predicts relationships simultaneously. Fig. 2 illustrates the overall pipeline of the proposed model." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.309, + 0.892, + 0.338 + ], + "angle": 0, + "content": "3.1. Hierarchical Symbolic Knowledge Initialization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.343, + 0.895, + 0.585 + ], + "angle": 0, + "content": "Unlike KISG [47], we do not use a closed set or ground truth relationship triplets from labels to learn prior knowledge. Hence, we must make an additional choice of what knowledge sources to use and how to clean them. Prior knowledge of object classes can be reliable predictors of the likelihoods of physical support relationships. For instance, it is unlikely that a cup is supported by a wall while tables are almost always supported by the floor. Therefore, given a set of objects, we can classify each object based on whether it is directly supported by the floor. The result is a three-layer hierarchical structure about objects in the 3D scene. In particular, the first layer only contains the floor since it does not have any support. The second layer contains objects directly supported by the floor, e.g. bed, table, and sofa. The third layer contains the remaining objects usually supported by objects in the second layer, e.g. pillow, cup, and cushion." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.586, + 0.896, + 0.827 + ], + "angle": 0, + "content": "To exploit the regular structure patterns in 3D spaces and construct the scene graph hierarchically, we construct a hierarchical symbolic knowledge graph to guide the 3D spatial knowledge reasoning. Knowledge sources, such as ConceptNet [35] and DBPedia [2], are a valuable tool containing commonsense knowledge about the real world. In this work, we use ConceptNet as our external knowledge base which gives us more spatial relationships and common pairwise objects. While ConceptNet contains very useful information, it also includes some knowledge that is irrelevant to our model. To mitigate this issue, we limit the ConceptNet to common object categories in 3D point cloud scenes. We collect object categories from two widely-used 3D point cloud datasets, SUNRGBD [34] and Scannet [7], and then include edges that only include these objects. After filtering, we have a total of about 5,000 edges and 760 nodes." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.827, + 0.897, + 0.888 + ], + "angle": 0, + "content": "We denote the external knowledge graph as \\(\\mathcal{K}_e = \\{\\mathcal{V}_e, \\mathcal{E}_e\\}\\) where \\(\\mathcal{V}_e\\) and \\(\\mathcal{E}_e\\) represent nodes and edges respectively. To merge the hierarchical structures in 3D spaces into the external knowledge graph and construct the hierarchical" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9184" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.425 + ], + "angle": 0, + "content": "symbolic knowledge graph \\(\\kappa_{s}\\), we first use a pre-trained multi-layer perceptron (MLP) to classify the hierarchical tokens for each node in the external knowledge graph to distinguish the discrepancy among different layers of nodes. The hierarchical token of each node denotes its corresponding layer in the hierarchical structure. Each node is then initialized as the concatenation of its trainable hierarchical token and the word2vec (GloVe [26]) representation of the object category. Since the hierarchical structure of 3D spaces is built based on the physical support relationships between objects, we add additional edges representing support relationships between nodes to the external knowledge graph \\(\\kappa_{e}\\). Specifically, we define a new edge type: given two nodes \\(s_i\\) and \\(s_j\\), we connect \\(s_i\\) to \\(s_j\\) using a support edge to represent the physical support relationship between \\(s_i\\) and \\(s_j\\). By definition, each node in the hierarchical structure is supported by the node in neighboring layers. Therefore, we add a support edge between two correlated nodes in neighboring layers. Each edge is initialized as the trainable GloVe representation of its edge type. Finally, we formulate the updated external knowledge graph as hierarchical symbolic knowledge graph \\(\\kappa_{s}\\). Additional details can be found in supplementary." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.429, + 0.453, + 0.445 + ], + "angle": 0, + "content": "3.2. Knowledge-guided Visual Context Encoding" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.448, + 0.473, + 0.569 + ], + "angle": 0, + "content": "As shown in Fig. 2, taking a scene point cloud with object instance annotations as input, we build a hierarchical visual graph \\(\\mathcal{G}_v = \\{\\mathcal{V}_v,\\mathcal{E}_v\\}\\) where \\(\\nu_{v}\\) and \\(\\mathcal{E}_v\\) denotes object instances and edges of object pairs respectively, under the guidance of the hierarchical symbolic knowledge graph \\(\\kappa_{s}\\). Then, a region-aware graph network is employed to propagate node messages through the visual graph \\(\\mathcal{G}_v\\) to learn the contextualized feature representation." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.57, + 0.473, + 0.886 + ], + "angle": 0, + "content": "Visual graph construction. We use Point Cloud Transformer [13] to extract spatial-aware visual features \\( f_{v} \\) for each object instance. To encode the spatial features \\( f_{t} \\) of each bounding box, we use an MLP to lift the parameters of each bounding box (i.e., center and size) to feature space. We assign the semantic features \\( f_{w} \\) for each object using an embedding table initialized by GloVe [26]. Each node in the visual graph is initialized as the concatenation of features \\( f_{v}, f_{t} \\) and \\( f_{w} \\). To capture the implicit structure of the point cloud scene, we route each node in the visual graph \\( \\mathcal{G}_{v} \\) into its corresponding layer according to the hierarchical tokens in hierarchical symbolic knowledge graph \\( \\kappa_{s} \\). Then, we complete the edge set \\( \\mathcal{E}_{v} \\) of visual graph \\( \\mathcal{G}_{v} \\) by extracting potential physical relationships between nodes in the adjacent layers. Specifically, we add an edge representing physical support relationship between node pair in the visual graph \\( \\mathcal{G}_{v} \\) if a support edge also exists between the corresponding nodes in the hierarchical symbolic knowledge graph \\( \\kappa_{s} \\). Similar to [46], we model the spatial interactions between node pairs and encode the initial edge embedding for node pairs using an MLP." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.886, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Contextualized features encoding. Objects sharing the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.198 + ], + "angle": 0, + "content": "same physical support are correlated since they have similar functional role in the environment and are generally in close proximity to each other. For instance, both pillow and clothes are usually supported by a bed. Therefore, we propose a region-aware graph network to jointly highlight the interrelated regions of each node in the visual graph \\(\\mathcal{G}_v\\) and encode the hierarchical contexts of the input scene." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.199, + 0.895, + 0.366 + ], + "angle": 0, + "content": "Given the initial representations of nodes and edges in the visual graph \\(\\mathcal{G}_v\\), the region-aware graph network iteratively updates the hidden state \\(\\mathbf{h}_i^{o,t}\\) of each node \\(v_i\\) and \\(\\mathbf{h}_{ij}^{e,t}\\) of each edge \\((v_i,v_j)\\) at each time step \\(t\\) via message passing. Since the contextual regions around each node in the visual graph \\(\\mathcal{G}_v\\) can be defined as other nodes sharing the same physical support with it, each node first gathers information from nodes within the same contextual region to enrich its current hidden state before propagating messages along the edges in the visual graph \\(\\mathcal{G}_v\\). Specifically, the enriched hidden state \\(\\tilde{\\mathbf{h}}_i^{o,t}\\) of each node is:" + }, + { + "type": "equation", + "bbox": [ + 0.595, + 0.377, + 0.895, + 0.412 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\mathbf {h}} _ {i} ^ {o, t} = \\mathbf {h} _ {i} ^ {o, t} + \\sum_ {j \\in N _ {r} (i)} \\psi \\left(\\mathbf {h} _ {j} ^ {o, t}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.421, + 0.895, + 0.485 + ], + "angle": 0, + "content": "\\(N_{r}(i)\\) contains nodes that share the same level support with node \\(v_{i}\\) and \\(\\psi\\) is a feed forward network for non-linear transformation. For edge \\((v_{i},v_{j})\\), its enriched hidden state \\(\\tilde{\\mathbf{h}}_{ij}^{e,t}\\) is computed by:" + }, + { + "type": "equation", + "bbox": [ + 0.535, + 0.498, + 0.895, + 0.533 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\mathbf {h}} _ {i j} ^ {e, t} = \\mathbf {h} _ {i j} ^ {e, t} + \\sum_ {k \\in N _ {r} (i)} \\psi \\left(\\mathbf {h} _ {k} ^ {o, t}\\right) + \\sum_ {s \\in N _ {r} (j)} \\psi \\left(\\mathbf {h} _ {s} ^ {o, t}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.542, + 0.892, + 0.573 + ], + "angle": 0, + "content": "After the feature representation enhancements, the message passing of nodes and edges can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.602, + 0.58, + 0.895, + 0.599 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {i} ^ {o, t + 1} = G R U \\left(\\tilde {\\mathbf {h}} _ {i} ^ {o, t}, \\mathbf {m} _ {i} ^ {o, t}\\right) \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.603, + 0.621, + 0.892, + 0.642 + ], + "angle": 0, + "content": "\\[\n\\mathbf {h} _ {i j} ^ {e, t + 1} = G R U \\left(\\tilde {\\mathbf {h}} _ {i j} ^ {e, t}, \\mathbf {m} _ {i j} ^ {e, t}\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.646, + 0.895, + 0.692 + ], + "angle": 0, + "content": "where \\(\\mathbf{m}_i^{o,t}\\) and \\(\\mathbf{m}_{ij}^{e,t}\\) are the incoming messages for updating each node and edge. The calculation of the message for each node is:" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.7, + 0.892, + 0.735 + ], + "angle": 0, + "content": "\\[\n\\mathbf {m} _ {i} ^ {o, t} = \\sum_ {j \\in N _ {v} (i)} \\left(\\varphi_ {n} \\left(\\tilde {\\mathbf {h}} _ {j} ^ {o, t}\\right) + \\varphi_ {e} \\left(\\tilde {\\mathbf {h}} _ {i j} ^ {e, t}\\right)\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.743, + 0.892, + 0.82 + ], + "angle": 0, + "content": "where \\(\\mathcal{N}_v(i)\\) denotes the neighbor nodes of \\(v_{i}\\) in the visual graph \\(\\mathcal{G}_v\\), \\(\\varphi_{n}\\) and \\(\\varphi_{e}\\) are two non-linear transformation for associated nodes and edges. For each edge, we transform the hidden state of subject and object node by two MLPs before fusing them to obtain the message:" + }, + { + "type": "equation", + "bbox": [ + 0.599, + 0.828, + 0.892, + 0.849 + ], + "angle": 0, + "content": "\\[\n\\mathbf {m} _ {i j} ^ {e, t} = \\varphi_ {s} \\left(\\tilde {\\mathbf {h}} _ {i} ^ {o, t}\\right) + \\varphi_ {o} \\left(\\tilde {\\mathbf {h}} _ {j} ^ {o, t}\\right) \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.903 + ], + "angle": 0, + "content": "We take the final hidden states of nodes and edges as the contextual feature \\(\\mathbf{c}_i^o\\) for each node \\(v_{i}\\in \\mathcal{V}_{v}\\) and \\(\\mathbf{c}_{ij}^{e}\\) for each edge \\((v_{i},v_{j})\\in \\mathcal{E}_{v}\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9185" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.091, + 0.465, + 0.108 + ], + "angle": 0, + "content": "3.3. Spatial Multimodal Knowledge Accumulation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.111, + 0.47, + 0.261 + ], + "angle": 0, + "content": "Though our hierarchical symbolic knowledge graph \\(\\kappa_{s}\\) can provide high-quality knowledge about the hierarchical structures of point cloud scene, this information is largely limited to symbolic knowledge that can only be explicitly expressed by text-relevant labels for relationship triplets. Therefore, we propose a novel schema to accumulate 3D spatial multimodal knowledge \\(\\kappa_{m}\\) progressively from the visual context via a graph reasoning network. We then incorporate the learned multimodal knowledge \\(\\kappa_{m}\\) and the contextual features to predict the possible relationships." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.262, + 0.471, + 0.381 + ], + "angle": 0, + "content": "Reasoning on knowledge graph. Since the contextual features encode the implicit hierarchical structure patterns in 3D spaces, we design a graph reasoning network which utilizes the visual contextual features and textual facts from the hierarchical symbolic knowledge graph \\(\\kappa_{s}\\) to accumulate 3D spatial multimodal knowledge \\(\\kappa_{m}\\) by aligning the entities in the symbolic knowledge graph with related visual contextual features." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.382, + 0.471, + 0.669 + ], + "angle": 0, + "content": "The graph reasoning network generates context for 3D spatial multimodal knowledge \\(\\mathcal{K}_m\\), which is in the form of embeddings that capture the regular structure patterns in 3D scenes for each node and edge in the hierarchical symbolic knowledge graph \\(\\mathcal{K}_s\\). Given the contextual features of nodes and edges in visual graph \\(\\mathcal{G}_v\\), each node and edge in the graph reasoning network receives three inputs: (1) the trainable node or edge embedding in the hierarchical symbolic knowledge graph \\(\\mathcal{K}_s\\), (2) a \\(0/1\\) indicator of whether this node or edge appears in the visual graph \\(\\mathcal{G}_v\\), (3) the contextual feature \\(\\mathbf{c}_i^o\\) and \\(\\mathbf{c}_{ij}^e\\) in the visual graph \\(\\mathcal{G}_v\\) corresponding to this node or edge, missing nodes and edges are padded with zero vectors. The graph reasoning network uses message passing to perform reasoning on hierarchical symbolic knowledge graph \\(\\mathcal{K}_s\\). Specifically, at each time step \\(t\\), to calculate the hidden states \\(\\mathbf{d}_i^{o,t}\\) for all nodes \\(s_i \\in \\mathcal{V}_s\\) and \\(\\mathbf{d}_{ij}^{e,t}\\) for all edges \\((s_i, s_j) \\in \\mathcal{E}_s\\), each node and edge first gather messages from their neighbors through the graph structure then update their hidden states:" + }, + { + "type": "equation", + "bbox": [ + 0.177, + 0.678, + 0.47, + 0.697 + ], + "angle": 0, + "content": "\\[\n\\mathbf {d} _ {i} ^ {o, t + 1} = G R U \\left(\\mathbf {d} _ {i} ^ {o, t}, \\mathbf {m} _ {i} ^ {o, t}\\right), \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.178, + 0.719, + 0.47, + 0.74 + ], + "angle": 0, + "content": "\\[\n\\mathbf {d} _ {i j} ^ {e, t + 1} = G R U \\left(\\mathbf {d} _ {i j} ^ {e, t}, \\mathbf {m} _ {i j} ^ {e, t}\\right), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.744, + 0.469, + 0.777 + ], + "angle": 0, + "content": "where \\(\\mathbf{m}_i^{o,t}\\) and \\(\\mathbf{m}_{ij}^{e,t}\\) are the incoming messages for nodes and edges. The incoming message for each node is" + }, + { + "type": "equation", + "bbox": [ + 0.144, + 0.786, + 0.47, + 0.82 + ], + "angle": 0, + "content": "\\[\n\\mathbf {m} _ {i} ^ {o, t} = \\sum_ {j \\in N _ {k} (i)} \\left(\\varphi_ {n} \\left(\\mathbf {d} _ {j} ^ {o, t}\\right) + \\varphi_ {e} \\left(\\mathbf {d} _ {i j} ^ {e, t}\\right)\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.829, + 0.47, + 0.876 + ], + "angle": 0, + "content": "where \\(N_{k}(i)\\) denotes the neighbor nodes of node \\(s_i\\) in the knowledge graph \\(\\mathcal{K}_s\\). Similar to Eq. (7), the incoming message for each edge is" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.884, + 0.47, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\mathbf {m} _ {i j} ^ {e, t} = \\varphi_ {s} \\left(\\mathbf {d} _ {i} ^ {o, t}\\right) + \\varphi_ {o} \\left(\\mathbf {d} _ {j} ^ {o, t}\\right). \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "We take the sum of the stacked hidden states as the 3D spatial multimodal knowledge embedding \\(\\mathbf{b}_i^o\\) for all nodes and \\(\\mathbf{b}_{ij}^{e}\\) for all edges in the symbolic knowledge graph \\(\\kappa_{s}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.137, + 0.895, + 0.442 + ], + "angle": 0, + "content": "Knowledge-enabled Scene Graph Prediction. To incorporate the 3D spatial multimodal knowledge \\(\\mathcal{K}_m\\) into scene graph inference, we propose fusing the multimodal knowledge embedding with the contextual features in the visual graph to facilitate 3D scene graph prediction. Towards this goal, we utilize an MLP as object detection head to predict confident initial class guesses given the contextual node features. We then select the three most confident multimodal knowledge embeddings for each node. For edges in the visual graph, we select the three most confident object categories for the subject and object node based on the initial guesses. We then retrieve the multimodal knowledge embedding using the predicted subject and object categories. Since the multimodal knowledge embedding and the contextual features are in different feature spaces, we transform them by two MLPs \\(\\varphi_b\\) and \\(\\varphi_c\\) respectively before fusing them. For each node in the visual graph, we fuse the retrieved multimodal knowledge embedding \\(\\{\\mathbf{b}^k\\}_{k=1,2,3}\\) and the contextual node feature \\(\\mathbf{c}_i^o\\) to obtain the knowledge-enabled contextual feature \\(\\mathbf{f}_i^o\\):" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.452, + 0.892, + 0.493 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} _ {i} ^ {o} = \\phi \\left(\\varphi_ {c} \\left(\\mathbf {c} _ {i} ^ {o}\\right) + \\varphi_ {b} \\left(\\sum_ {k = 1} ^ {3} \\mathbf {b} ^ {k}\\right)\\right). \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.504, + 0.891, + 0.548 + ], + "angle": 0, + "content": "For each edge in the visual graph, the multimodal knowledge embedding is fused with its contextual feature in the same way as the node." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.549, + 0.892, + 0.82 + ], + "angle": 0, + "content": "Equipped with the 3D spatial multimodal knowledge-enabled contextual features \\(\\mathbf{f}_i^o\\) for nodes and \\(\\mathbf{f}_{ij}^{e}\\) for edges in the visual graph, we generate the scene graph by decoding the contextual features using a standard graph convolution network (GCN) [18]. We assume that each object pair can have a relationship (including none) and fully connect them as a graph where relationships are represented as edges. Each node is initialized by its contextual node feature \\(\\mathbf{f}_i^o\\), and each edge is initialized either by the contextual edge feature \\(\\mathbf{f}_{ij}^{e}\\) or the contextual features of its subject and object nodes if the edge is not presented in the visual graph. The last part of the GCN consists of two detection heads for object and relationship classification. The object detection head takes the decoded node features as input to predict the object classification possibilities. The relationship prediction head first fuses the decoded subject and object node features with the decoded edge features, then predicts a discrete distribution over all possible relationship classes." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.821, + 0.892, + 0.897 + ], + "angle": 0, + "content": "Loss Function. We adopt the standard cross entropy loss for object and relationship classification in our model. Since the contextual node feature \\(\\mathbf{c}_i^o\\) is used to predict the initial class guesses, we use a cross entropy loss \\(\\mathcal{L}_{init}^o\\) for the initial detection. For the final prediction, we use two cross entropy" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "9186" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.158, + 0.089, + 0.817, + 0.22 + ], + "angle": 0, + "content": "
MethodsPredClsSGClsSGDet
R@50/100mR@50/100R@50/100mR@50/100R@50/100mR@50/100
3D+IMP [42]48.15 / 48.7221.56 / 21.8517.41 / 17.899.06 / 9.2324.54 / 24.5721.71 / 21.72
3D+MOTIFS [45]52.43 / 53.3724.35 / 24.5218.34 / 18.579.74 / 9.8626.58 / 26.5924.12 / 24.17
3D+VCTree [36]53.12 / 54.3824.75 / 24.9119.93 / 20.2410.34 / 10.5527.58 / 27.6224.92 / 24.94
3D+KERN [6]54.74 / 56.5325.21 / 25.8321.41 / 21.7811.02 / 11.3627.75 / 27.7824.03 / 24.05
3D+Schemata [32]58.13 / 59.1142.11 / 42.8328.72 / 28.9726.72 / 27.0528.12 / 28.1325.29 / 25.30
3D+HetH [40]58.24 / 58.7542.53 / 42.7428.83 / 29.0526.68 / 26.8528.17 / 28.1825.31 / 25.32
Ours68.32 / 69.4966.54 / 66.9231.50 / 31.6430.29 / 30.5629.41 / 29.4425.35 / 25.36
" + }, + { + "type": "table_caption", + "bbox": [ + 0.125, + 0.223, + 0.844, + 0.239 + ], + "angle": 0, + "content": "Table 1. Comparison with state-of-the-art 2D scene graph prediction methods re-implemented to work on 3DSSG dataset." + }, + { + "type": "table", + "bbox": [ + 0.166, + 0.244, + 0.805, + 0.34 + ], + "angle": 0, + "content": "
MethodsPredClsSGClsSGDet
R@50/100mR@50/100R@50/100mR@50/100R@50/100mR@50/100
SGPN [37]57.71 / 58.0538.12 / 38.6728.39 / 28.7422.23 / 22.57- / -- / -
EdgeGCN [46]58.42 / 59.1138.84 / 39.3528.58 / 28.9322.67 / 23.33- / -- / -
KISG [47]64.47 / 64.9363.19 / 63.5229.46 / 29.6528.20 / 28.64- / -- / -
Ours68.32 / 69.4966.54 / 66.9231.50 / 31.6430.29 / 30.5629.41 / 29.4425.35 / 25.36
" + }, + { + "type": "table_caption", + "bbox": [ + 0.231, + 0.342, + 0.738, + 0.357 + ], + "angle": 0, + "content": "Table 2. Comparison with 3D scene graph prediction methods on the 3DSSG dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.362, + 0.471, + 0.391 + ], + "angle": 0, + "content": "losses \\(\\mathcal{L}_{final}^{o}\\) and \\(\\mathcal{L}_{final}^{r}\\) for the object and relationship classification:" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.4, + 0.471, + 0.419 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {f i n a l}} = w _ {o} \\mathcal {L} _ {\\text {f i n a l}} ^ {o} + w _ {r} \\mathcal {L} _ {\\text {f i n a l}} ^ {r} \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.424, + 0.472, + 0.469 + ], + "angle": 0, + "content": "where \\( w_{o} \\) and \\( w_{r} \\) are the weights for object and relation loss. In our experiment, we set \\( w_{o} \\) to 0.75 and \\( w_{r} \\) to 1. Our final loss function can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.478, + 0.471, + 0.495 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\text {i n i t}} ^ {o} + \\mathcal {L} _ {\\text {f i n a l}} \\tag {14}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.498, + 0.211, + 0.516 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.52, + 0.332, + 0.536 + ], + "angle": 0, + "content": "4.1. Experimental Configuration" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.54, + 0.472, + 0.766 + ], + "angle": 0, + "content": "We evaluate our model on 3DSSG dataset [37]. Following [47], we select 160 object categories and 27 relationship classes for detection. We compare our model with others in three standard tasks proposed in [42]. (1) Predicate Classification (PredCls): Given the ground truth 3D bounding boxes and their corresponding semantic labels, our model classifies the relationship between each object pair. (2) Scene Graph Classification (SGCls): Given the ground truth 3D bounding boxes, our model predicts the relationships as well as the object categories jointly. (3) Scene Graph Generation (SGDet): Given the raw point cloud, our model detects 3D objects, their semantic information, as well as their relationships in an end-to-end manner. Following existing 2D and 3D scene graph generation works, we adopt the constrained evaluation metric recall@K (R@K) and mean recall@K (mR@K)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.767, + 0.471, + 0.904 + ], + "angle": 0, + "content": "Our model is implemented in PyTorch, and trained using one NVIDIA GTX TITAN X GPU for 40 epochs with the ADAM optimizer. We use an initial learning rate of 0.0001, weight decay of 0.5, and mini-batch of 4. After 15, 25, and 40 epochs, we multiply the learning rate by 0.1. We adopt VoteNet [27] as the 3D object detection backbone to generate an initial set of 256 object candidates in the SGDet task. The Point Cloud Transformer is pre-trained on the 3DSSG dataset using the same settings in [13]." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.361, + 0.779, + 0.377 + ], + "angle": 0, + "content": "4.2. Comparison to State-of-the-Art" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.381, + 0.895, + 0.547 + ], + "angle": 0, + "content": "We first compare our model with the following state-of-art 2D image scene graph generation models, modified to fit the 3DSSG dataset: IMP [42], MOTIFS [45] and VC-Tree [36] which creatively devise various message passing methods for improving graph representations. KERN [6], Schemata [32], and HetH [40] incorporate statistical priors and learning-based commonsense knowledge into the scene graph prediction. Therefore, we include these models to illustrate the superiority of the 3D spatial multimodal knowledge about the implicit hierarchical structure correlations between object pairs in the 3D scene." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.548, + 0.896, + 0.864 + ], + "angle": 0, + "content": "Our results in Tab. 1 lead to a few key observations: (1) Our model consistently outperforms all the existing approaches on all metrics and achieve \\(3.57\\%\\) boost on mR@50 in SGCls task and \\(10.08\\%\\) boost on R@50 in PredCls task. This indicates that leveraging regular patterns of 3D physical spaces is beneficial for scene graph prediction. (2) Our model outperforms traditional message passing model IMP and MOTIFS. Furthermore, our method achieves considerable improvement when compared to VCTree. (3) Compared to Schemata, our model achieves an improvement of \\(2.78\\%\\) and \\(10.19\\%\\) on R@50 in SGCls and PredCls, suggesting that our multimodal knowledge embedding is a better approach compared to the class-level prototypical representations learned from perceptual outputs in Schemata. (4) Compared with KERN and HetH, our proposed hierarchical structure of 3D spaces is superior to the graph structure they adopted to represent the input as our model outperforms them with a significant margin. (5) The performance has been saturated in the SGDet task. This is mainly because object detection performance on this dataset is a bottleneck that limits the performance." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.864, + 0.895, + 0.895 + ], + "angle": 0, + "content": "We also compare the performance of our model with the state-of-the-art 3D point cloud-based scene graph predic" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9187" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.108, + 0.089, + 0.437, + 0.322 + ], + "angle": 0, + "content": "
MethodsR@50/100mR@50/100
Knowledge Ks
w/o Hierarchical Tokens30.47 / 30.6728.94 / 29.19
w/o Support Edge30.55 / 30.7429.17 / 29.47
w/o Both28.41 / 28.4727.13 / 27.52
Visual Context Encoding
Gv replaced w/ Gfc28.17 / 28.3226.28 / 26.29
w/o RaGN26.43 / 26.5724.23 / 24.36
RaGN replaced w/ GCN31.03 / 31.2129.67 / 29.88
Knowledge Km
w/o bjo and bceij26.27 / 26.3522.93 / 23.18
w/o cjo and cceij as input28.14 / 28.3125.05 / 25.31
Ours31.50 / 31.6430.29 / 30.56
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.326, + 0.47, + 0.354 + ], + "angle": 0, + "content": "Table 3. Quantitative results of different module configurations on the SGCls task." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.359, + 0.473, + 0.661 + ], + "angle": 0, + "content": "tion models to demonstrate the effectiveness of 3D spatial multimodal knowledge. We include several existing works such as SGPN [37], EdgeGCN [46] and KISG [47] since they all report competitive results. SGPN and EdgeGCN exploit multi-dimensional edge features for explicit relationship modeling whereas KISG learns a group of class-dependent prototypical representations for each semantic class. As shown in Tab. 2, our model dominantly surpasses all methods. Benefiting from the hierarchical structure of 3D spaces, our model is able to reason complex relationship hierarchically and systematically. Compared to SGPN and EdgeGCN, our model improves the R@50 by \\(2.92\\%\\) and \\(9.90\\%\\) in SGCls and PredCls tasks. We can also see that our method outperforms KISG by \\(2.04\\%\\) on R@50 in SGCls. KISG captures class-related priors in the scene from text-only ground truth labels. Such knowledge cannot efficiently represent diverse relationships and complex 3D environments. In contrast, our model extracts indispensable 3D spatial multimodal knowledge which benefits the scene graph prediction." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.667, + 0.232, + 0.684 + ], + "angle": 0, + "content": "4.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.687, + 0.47, + 0.731 + ], + "angle": 0, + "content": "We only report the performance results in the Recall and mean Recall metrics on the SGCs task for ablation studies. The results are shown in Tab. 3." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.732, + 0.471, + 0.867 + ], + "angle": 0, + "content": "Hierarchical symbolic knowledge. We first look at the hierarchical symbolic knowledge graph \\(\\kappa_{s}\\) to investigate its effectiveness. Specifically, we find that using ConceptNet without classifying the hierarchical tokens or adding support edges leads to sub-optimal performance. Furthermore, using ConceptNet without any augmentation drops the performance significantly, indicating that both the hierarchical tokens and support edges are crucial elements of the hierarchical structures in 3D scene." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.868, + 0.472, + 0.899 + ], + "angle": 0, + "content": "Knowledge-guided visual context encoding. Next, we analyse the knowledge-guided visual context encoding mod" + }, + { + "type": "table", + "bbox": [ + 0.534, + 0.09, + 0.858, + 0.171 + ], + "angle": 0, + "content": "
VariantsPredCISSGCIS
R@50mR@50R@50mR@50
Gr62.7458.2528.1727.28
Gt68.4166.5931.5930.35
Gv(original)68.3266.5431.5030.29
" + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.175, + 0.877, + 0.189 + ], + "angle": 0, + "content": "Table 4. Comparison of different variants of the visual graph." + }, + { + "type": "table", + "bbox": [ + 0.57, + 0.197, + 0.822, + 0.279 + ], + "angle": 0, + "content": "
MethodsHeadBodyTail
SGPN [37]39.4223.6413.03
EdgeGCN [46]39.5123.8513.15
KISG [37]40.3624.5613.61
Ours44.2326.2714.73
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.282, + 0.892, + 0.31 + ], + "angle": 0, + "content": "Table 5. The R@50 metric of biased relationship prediction on the SGCIs task." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.316, + 0.895, + 0.481 + ], + "angle": 0, + "content": "ule. We can see that replacing the hierarchical visual graph \\(\\mathcal{G}_v\\) with a fully-connected graph \\(\\mathcal{G}_{fc}\\) decreases the performance by a margin of \\(3.33\\%\\) on R@50, indicating that the hierarchical structure is superior to a plain fully-connected graph in terms of modeling context. Furthermore, removing the subsequent region-aware graph network (RaGN) and directly fusing the multimodal knowledge embedding with the initial representation of each node and edge in the visual graph negatively impacts the performance on all metrics. Replacing the region-aware graph network with a standard graph convolution network also hurts the performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.482, + 0.895, + 0.692 + ], + "angle": 0, + "content": "3D spatial multimodal knowledge accumulation. Lastly, we examine the accumulated multimodal knowledge \\(\\mathcal{K}_m\\) to learn about how \\(\\mathcal{K}_m\\) and rest of the model interact. We first see how much of the improvement comes from the 3D spatial multimodal knowledge \\(\\mathcal{K}_m\\). As shown in Tab. 3, the multimodal knowledge embedding significantly improves the R@50 and mR@50 by \\(5.23\\%\\) and \\(7.36\\%\\) respectively. In addition, dropping the contextual feature input \\(\\mathbf{c}_i^o\\) for nodes and \\(\\mathbf{c}_{ij}^{e}\\) for edges in the graph reasoning network decreases the performance by a margin of \\(3.36\\%\\) and \\(5.24\\%\\) on R@50 and mR@50 in SGCls. This drop in performance indicates that the contextual feature plays a pivotal role in bridging the heterogeneous gap between the symbolic knowledge and visual information." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.699, + 0.669, + 0.715 + ], + "angle": 0, + "content": "4.4. Further Analysis" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.719, + 0.895, + 0.9 + ], + "angle": 0, + "content": "Analysis on the hierarchical structure of 3D spaces. To validate the potential of the hierarchical visual graph \\(\\mathcal{G}_v\\) in capturing the inherent hierarchical structure of a 3D scene, we design two visual graph variants and compare them to the hierarchical visual \\(\\mathcal{G}_v\\): (1) Instead of using the hierarchical symbolic knowledge graph \\(\\kappa_s\\), we build a ground truth graph \\(\\mathcal{G}_t\\) based on the ground truth labels for support relations. In particular, each edge in \\(\\mathcal{G}_t\\) represents the ground truth support relationship of the input scene. (2) We also design a randomly connected graph \\(\\mathcal{G}_r\\), where we keep all of the nodes the same but randomize the edges that connect them. As shown in Tab. 4, both \\(\\mathcal{G}_v\\) and \\(\\mathcal{G}_t\\) outperform \\(\\mathcal{G}_r\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9188" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.089, + 0.434, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.229, + 0.47, + 0.257 + ], + "angle": 0, + "content": "Figure 3. Comparison of our model and KISG on the SGCs task when trained with noisy labels." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.261, + 0.473, + 0.337 + ], + "angle": 0, + "content": "with a significant margin on all metrics. More importantly, we observe that \\(\\mathcal{G}_t\\) and \\(\\mathcal{G}_v\\) perform mostly similar while \\(\\mathcal{G}_t\\) slightly outperforms \\(\\mathcal{G}_v\\). The results confirm that the hierarchical visual graph \\(\\mathcal{G}_v\\) is one of the more optimal ways of extracting the hierarchical structure patterns of 3D spaces." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.337, + 0.474, + 0.637 + ], + "angle": 0, + "content": "Robustness of 3D spatial multimodal knowledge. Additionally, we investigate the robustness of the 3D spatial multimodal knowledge \\(\\mathcal{K}_m\\) by training our model with noisy labels. Specifically, we add different proportions of noises into the 3DSSG training set by replacing part of ground truth relationships with the randomly selected wrong relationships for input scenes. The performance of our model and KISG [47] on the SGCs task is reported in Fig. 3. We can see that, the performance of KISG decreases drastically while ours decreases slowly with increasing noise rate. Under the \\(30\\%\\) noise rate condition, our model improves the R@50 metric by about \\(6.89\\%\\) over KISG, which indicates that our model achieves improved robustness over KISG. The main reason is that KISG captures relevant prior knowledge from text-only ground truth labels and noises contained in the labels are easily included in their knowledge base and affects the prediction of relationships. Different with KISG, our model leverages the inherently hierarchical structures of 3D scenes and accumulates multimodal knowledge which is both label free and reliable." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.639, + 0.474, + 0.85 + ], + "angle": 0, + "content": "Long-tail analysis. We also investigate how our model performs on the long-tail part of the dataset. To do this, we order all the relationships based on the frequency of each relationship category occurring in triplets. We select the 5 most common relationship categories as the head, the 5 least common relationship categories as the tail, and the rest of the categories as the body. Tab. 5 reports the R@50 metric on each long-tail category groups of our model. Moreover, our model achieves best performance when evaluating the R@50 metric on the tail relationship categories, which shows that our model has the ability to mitigate the effect of sample imbalance. The main reason is that the hierarchical structures can be extracted accurately which influence other relationships in the prediction process." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.851, + 0.262, + 0.867 + ], + "angle": 0, + "content": "4.5. Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We visualize intermediate results in Fig. 4(a-c). We can see that both the hierarchical visual graph \\(\\mathcal{G}_v\\) and 3D scene" + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.089, + 0.752, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.647, + 0.16, + 0.718, + 0.169 + ], + "angle": 0, + "content": "(a) Input scene" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.166, + 0.679, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.313, + 0.67, + 0.324 + ], + "angle": 0, + "content": "(b) Hierarchical visual graph" + }, + { + "type": "image", + "bbox": [ + 0.679, + 0.166, + 0.89, + 0.311 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.738, + 0.313, + 0.826, + 0.324 + ], + "angle": 0, + "content": "(c) 3D scene graph" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.328, + 0.894, + 0.357 + ], + "angle": 0, + "content": "Figure 4. Visualizations of our predicted scene graph on 3DSSG dataset. Red indicates the misclassified objects or relationships." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.362, + 0.895, + 0.468 + ], + "angle": 0, + "content": "graph \\(\\mathcal{G}\\) are well constructed. However, our model incorrectly classifies the relationship between Window1 and Floor. This is mainly because our model fails to extract discriminative features for Window1 as there are few points within its bounding box. The token of Window1 is classified incorrectly in the second layer while it should be in the third layer. We provide more visualization samples in the supplementary." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.474, + 0.62, + 0.489 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.495, + 0.896, + 0.768 + ], + "angle": 0, + "content": "We proposed a method for 3D scene graph prediction from raw point clouds. Our method explores the regular patterns of 3D physical spaces into the deep network to facilitate 3D scene graph prediction. Hierarchical symbolic knowledge is first reconstructed via exploiting external knowledge as the baseline to admit the hierarchical structure cues of a 3D scene. A knowledge-guided visual context encoding module then builds a hierarchical visual graph and learns the contextualized features by a region-aware graph network. Finally, a 3D spatial multimodal knowledge accumulation module is proposed to regularize the semantic space of relationship prediction. Extensive experiments on the 3DSSG dataset show that our method outperforms existing state-of-the-art and can mitigate the effect of data imbalance and label noises. In the future, we plan to exploit the attributes of 3D objects to build richer knowledge graphs to improve the prediction performances of attribute-focused relationships, such as same symmetric as and same texture as." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.774, + 0.681, + 0.791 + ], + "angle": 0, + "content": "6. Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.895, + 0.9 + ], + "angle": 0, + "content": "This work was supported in part by the National Natural Science Foundation of China under Grant 62003253, Grant 61973106, Grant U2013203, Grant U21A20482 and Grant U20A20185. Professor Ajmal Mian is the recipient of an Australian Research Council Future Fellowship Award (project number FT210100268) funded by the Australian Government." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9189" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.472, + 0.184 + ], + "angle": 0, + "content": "[1] Iro Armeni, Zhi-Yang He, JunYoung Gwak, Amir R Zamir, Martin Fischer, Jitendra Malik, and Silvio Savarese. 3d scene graph: A structure for unified semantics, 3d space, and camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5664-5673, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.186, + 0.472, + 0.24 + ], + "angle": 0, + "content": "[2] Soren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. 2007. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.243, + 0.472, + 0.297 + ], + "angle": 0, + "content": "[3] Sumithra Bhakthavatsalam, Kyle Richardson, Niket Tandon, and Peter Clark. Do dogs have whiskers? a new knowledge base of haspart relations. arXiv preprint arXiv:2006.07510, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.3, + 0.472, + 0.367 + ], + "angle": 0, + "content": "[4] Xiaojun Chang, Pengzhen Ren, Pengfei Xu, Zhihui Li, Xiaojiang Chen, and Alexander G Hauptmann. A comprehensive survey of scene graphs: Generation and application. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.37, + 0.472, + 0.438 + ], + "angle": 0, + "content": "[5] Long Chen, Hanwang Zhang, Jun Xiao, Xiangnan He, Shiliang Pu, and Shih-Fu Chang. Counterfactual critic multi-agent training for scene graph generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4613-4623, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.44, + 0.472, + 0.508 + ], + "angle": 0, + "content": "[6] Tianshui Chen, Weihao Yu, Riquan Chen, and Liang Lin. Knowledge-embedded routing network for scene graph generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6163–6171, 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.511, + 0.472, + 0.58 + ], + "angle": 0, + "content": "[7] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5828-5839, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.581, + 0.472, + 0.65 + ], + "angle": 0, + "content": "[8] Yang Ding, Jing Yu, Bang Liu, Yue Hu, Mingxin Cui, and Qi Wu. Mukea: Multimodal knowledge extraction and accumulation for knowledge-based visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5089-5098, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.652, + 0.472, + 0.706 + ], + "angle": 0, + "content": "[9] Zijin Du, Hailiang Ye, and Feilong Cao. A novel local-global graph convolutional method for point cloud semantic segmentation. IEEE Transactions on Neural Networks and Learning Systems, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.709, + 0.472, + 0.763 + ], + "angle": 0, + "content": "[10] Mingtao Feng, Syed Zulqarnain Gilani, Yaonan Wang, Liang Zhang, and Ajmal Mian. Relation graph network for 3d object detection in point clouds. IEEE Transactions on Image Processing, 30:92-107, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.765, + 0.472, + 0.82 + ], + "angle": 0, + "content": "[11] Mingtao Feng, Liang Zhang, Xuefei Lin, Syed Zulqarnain Gilani, and Ajmal Mian. Point attention network for semantic segmentation of 3d point clouds. Pattern Recognition, 107:107446, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.821, + 0.472, + 0.891 + ], + "angle": 0, + "content": "[12] Chen Gao, Jinyu Chen, Si Liu, Luting Wang, Qiong Zhang, and Qi Wu. Room-and-object aware knowledge reasoning for remote embodied referring expression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3064–3073, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.891 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.147 + ], + "angle": 0, + "content": "[13] Meng-Hao Guo, Jun-Xiong Cai, Zheng-Ning Liu, Tai-Jiang Mu, Ralph R Martin, and Shi-Min Hu. Pct: Point cloud transformer. Computational Visual Media, 7(2):187-199, 2021. 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.894, + 0.176 + ], + "angle": 0, + "content": "[14] Alon Hafri and Chaz Firestone. The perception of relations. Trends in Cognitive Sciences, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.178, + 0.894, + 0.259 + ], + "angle": 0, + "content": "[15] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11108-11117, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.261, + 0.893, + 0.315 + ], + "angle": 0, + "content": "[16] Maximilian Jaritz, Tuan-Hung Vu, Raoul De Charette, Emilie Wirbel, and Patrick Pérez. Cross-modal learning for domain adaptation in 3d semantic segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.317, + 0.893, + 0.384 + ], + "angle": 0, + "content": "[17] Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei-Fei. Image retrieval using scene graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3668–3678, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.386, + 0.893, + 0.426 + ], + "angle": 0, + "content": "[18] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.428, + 0.894, + 0.509 + ], + "angle": 0, + "content": "[19] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123(1):32-73, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.511, + 0.894, + 0.579 + ], + "angle": 0, + "content": "[20] Manyi Li, Akshay Gadi Patil, Kai Xu, Siddhartha Chaudhuri, Owais Khan, Ariel Shamir, Changhe Tu, Baoquan Chen, Daniel Cohen-Or, and Hao Zhang. Grains: Generative recursive autoencoders for indoor scenes. ACM Transactions on Graphics (TOG), 38(2):1-16, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.581, + 0.894, + 0.663 + ], + "angle": 0, + "content": "[21] Mengtian Li, Yuan Xie, Yunhang Shen, Bo Ke, Ruizhi Qiao, Bo Ren, Shaohui Lin, and Lizhuang Ma. Hybridcr: Weakly-supervised 3d point cloud semantic segmentation via hybrid contrastive regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14930-14939, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.665, + 0.894, + 0.719 + ], + "angle": 0, + "content": "[22] Ze Liu, Zheng Zhang, Yue Cao, Han Hu, and Xin Tong. Group-free 3d object detection via transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2949-2958, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.721, + 0.894, + 0.789 + ], + "angle": 0, + "content": "[23] Kenneth Marino, Xinlei Chen, Devi Parikh, Abhinav Gupta, and Marcus Rohrbach. Krisp: Integrating implicit and symbolic knowledge for open-domain knowledge-based vqa. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14111-14121, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.79, + 0.894, + 0.816 + ], + "angle": 0, + "content": "[24] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.818, + 0.893, + 0.872 + ], + "angle": 0, + "content": "[25] Xuran Pan, Zhuofan Xia, Shiji Song, Li Erran Li, and Gao Huang. 3d object detection with pointformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7463-7472, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.874, + 0.894, + 0.901 + ], + "angle": 0, + "content": "[26] Jeffrey Pennington, Richard Socher, and Christopher D Manning. Glove: Global vectors for word representation. In" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "9190" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.471, + 0.133 + ], + "angle": 0, + "content": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543, 2014. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.471, + 0.19 + ], + "angle": 0, + "content": "[27] Charles R Qi, Or Litany, Kaiming He, and Leonidas J Guibas. Deep hough voting for 3d object detection in point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9277-9286, 2019. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.192, + 0.471, + 0.26 + ], + "angle": 0, + "content": "[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.263, + 0.471, + 0.331 + ], + "angle": 0, + "content": "[29] Charles R Qi, Li Yi, Hao Su, and Leonidas J Guibas. Point-net++ deep hierarchical feature learning on point sets in a metric space. In Proceedings of the 31st International Conference on Neural Information Processing Systems, pages 5105-5114, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.333, + 0.471, + 0.402 + ], + "angle": 0, + "content": "[30] Antoni Rosinol, Andrew Violette, Marcus Abate, Nathan Hughes, Yun Chang, Jingnan Shi, Arjun Gupta, and Luca Carlone. Kimera: From slam to spatial perception with 3d dynamic scene graphs. The International Journal of Robotics Research, 40(12-14):1510-1546, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.404, + 0.471, + 0.444 + ], + "angle": 0, + "content": "[31] Morteza Sarafyazd and Mehrdad Jazayeri. Hierarchical reasoning by neural circuits in the frontal cortex. Science, 364(6441), 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.446, + 0.471, + 0.514 + ], + "angle": 0, + "content": "[32] Sahand Sharifzadeh, Sina Moayed Baharlou, and Volker Tresp. Classification by attention: Scene graph classification with prior knowledge. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 5025-5033, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.517, + 0.471, + 0.572 + ], + "angle": 0, + "content": "[33] Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In European conference on computer vision, pages 746-760. Springer, 2012. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.574, + 0.471, + 0.628 + ], + "angle": 0, + "content": "[34] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 567-576, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.63, + 0.471, + 0.684 + ], + "angle": 0, + "content": "[35] Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI conference on artificial intelligence, 2017. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.687, + 0.471, + 0.755 + ], + "angle": 0, + "content": "[36] Kaihua Tang, Hanwang Zhang, Baoyuan Wu, Wenhan Luo, and Wei Liu. Learning to compose dynamic tree structures for visual contexts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6619-6628, 2019. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.757, + 0.471, + 0.825 + ], + "angle": 0, + "content": "[37] Johanna Wald, Helisa Dhamo, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs from 3d indoor reconstructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3961-3970, 2020. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.828, + 0.471, + 0.88 + ], + "angle": 0, + "content": "[38] Johanna Wald, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs with instance embeddings. International Journal of Computer Vision, pages 1-22, 2022. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.88 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.16 + ], + "angle": 0, + "content": "[39] Kai Wang, Yu-An Lin, Ben Weissmann, Manolis Savva, Angel X Chang, and Daniel Ritchie. Planit: Planning and instantiating indoor scenes with relation graph and spatial prior networks. ACM Transactions on Graphics (TOG), 38(4):1-15, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.894, + 0.219 + ], + "angle": 0, + "content": "[40] Wenbin Wang, Ruiping Wang, Shiguang Shan, and Xilin Chen. Sketching image gist: Human-mimetic hierarchical scene graph generation. In European Conference on Computer Vision, pages 222-239. Springer, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.894, + 0.3 + ], + "angle": 0, + "content": "[41] Shun-Cheng Wu, Johanna Wald, Keisuke Tateno, Nassir Navab, and Federico Tombari. Scenegraphfusion: Incremental 3d scene graph prediction from rgb-d sequences. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7515-7525, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.304, + 0.894, + 0.359 + ], + "angle": 0, + "content": "[42] Danfei Xu, Yuke Zhu, Christopher B Choy, and Li Fei-Fei. Scene graph generation by iterative message passing. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5410–5419, 2017. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.361, + 0.894, + 0.415 + ], + "angle": 0, + "content": "[43] Qiangeng Xu, Yiqi Zhong, and Ulrich Neumann. Behind the curtain: Learning occluded shapes for 3d object detection. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2893-2901, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.418, + 0.894, + 0.472 + ], + "angle": 0, + "content": "[44] Jianwei Yang, Jiasen Lu, Stefan Lee, Dhruv Batra, and Devi Parikh. Graph r-cnn for scene graph generation. In Proceedings of the European conference on computer vision (ECCV), pages 670-685, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.474, + 0.894, + 0.529 + ], + "angle": 0, + "content": "[45] Rowan Zellers, Mark Yatskar, Sam Thomson, and Yejin Choi. Neural motifs: Scene graph parsing with global context. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5831-5840, 2018. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.531, + 0.894, + 0.598 + ], + "angle": 0, + "content": "[46] Chaoyi Zhang, Jianhui Yu, Yang Song, and Weidong Cai. Exploiting edge-oriented reasoning for 3d point-based scene graph analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9705-9715, 2021. 1, 2, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.601, + 0.894, + 0.655 + ], + "angle": 0, + "content": "[47] Shoulong Zhang, Aimin Hao, Hong Qin, et al. Knowledge-inspired 3d scene graph prediction in point cloud. Advances in Neural Information Processing Systems, 34, 2021. 2, 3, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.657, + 0.894, + 0.713 + ], + "angle": 0, + "content": "[48] Yifeng Zhang, Ming Jiang, and Qi Zhao. Explicit knowledge incorporation for visual reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1356-1365, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.715, + 0.894, + 0.77 + ], + "angle": 0, + "content": "[49] Na Zhao, Tat-Seng Chua, and Gim Hee Lee. Few-shot 3d point cloud semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8873-8882, 2021. 1" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.77 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "9191" + } + ] +] \ No newline at end of file diff --git a/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_origin.pdf b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8bd1c5b8e338e088a855272d31246557e147dbc9 --- /dev/null +++ b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/8b57cee0-fdf2-4526-9ea1-36db5e008e92_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1e3f890b9fc7a399e91622aa2bfa22b2ac48d51ae5028deabd5b2ae4c23ec73 +size 8234822 diff --git a/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/full.md b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/full.md new file mode 100644 index 0000000000000000000000000000000000000000..27a565e9ae9a06533b4cb5ab09e2c1efc4bc7cfc --- /dev/null +++ b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/full.md @@ -0,0 +1,300 @@ +# 3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud + +Mingtao Feng $^{1*}$ Haoran Hou $^{1*}$ Liang Zhang $^{1\dagger}$ Zijie Wu $^{2\dagger}$ Yulan Guo $^{3}$ Ajmal Mian $^{4}$ $^{1}$ Xidian University, $^{2}$ Hunan University, $^{3}$ Sun Yat-Sen University, $^{4}$ The University of Western Australia + +# Abstract + +In-depth understanding of a 3D scene not only involves locating/recognizing individual objects, but also requires to infer the relationships and interactions among them. However, since 3D scenes contain partially scanned objects with physical connections, dense placement, changing sizes, and a wide variety of challenging relationships, existing methods perform quite poorly with limited training samples. In this work, we find that the inherently hierarchical structures of physical space in 3D scenes aid in the automatic association of semantic and spatial arrangements, specifying clear patterns and leading to less ambiguous predictions. Thus, they well meet the challenges due to the rich variations within scene categories. To achieve this, we explicitly unify these structural cues of 3D physical spaces into deep neural networks to facilitate scene graph prediction. Specifically, we exploit an external knowledge base as a baseline to accumulate both contextualized visual content and textual facts to form a 3D spatial multimodal knowledge graph. Moreover, we propose a knowledge-enabled scene graph prediction module benefiting from the 3D spatial knowledge to effectively regularize semantic space of relationships. Extensive experiments demonstrate the superiority of the proposed method over current state-of-the-art competitors. Our code is available at https://github.com/HHrEtvP/SMKA. + +# 1. Introduction + +In recent years, much success has been achieved on 3D point cloud scene understanding such as semantic segmentation [9, 11, 15, 16, 21, 28, 29, 49] and object detection [10, 22, 25, 27, 43]. However, the 3D world is not only defined by objects but also by the relationships between objects. A 3D scene graph can abstract the environment as a graph where nodes represent objects and edges characterize the relationships between object pairs, which has already been recognized in recent seminal works [1, 30, 37, 38, 41, 46]. However, relationship graphs predicted by current methods are far from satisfactory due to the noisy, cluttered and par + +![](images/8492445031b3f950e3dd7e8bc7d3d1476da3cdd7e5c625d9f8de6c45f71b4669.jpg) +Figure 1. A brief overview of our method. + +tial nature of real 3D scans. Moreover, these data-driven methods treat sophisticated relationships in 3D space independently for classification using the geometric features proximity or fit, and are ignorant of commonsense or other useful 3D spatial cues beyond visual information. 3D objects in real scenes commonly have strongly structured regularities [33,39], whose semantic and spatial arrangements follow clear patterns, but still exhibit rich structural variations even within the scene category. + +The key observation is that 3D scene structures are inherently hierarchical [20]. By definition, an instance can have multiple supports, lamps are standing on a table, chairs are supported by the floor and only the floor does not have any support, and it is unlikely that a pillow is supporting a couch. Although relationships themselves cast no light on the human eyes, a growing body of works [14, 31] suggest that even very complex relationship information is reasoned hierarchically and systemically according to the role of the prefrontal cortex. Relationships, such as support, can be extracted rapidly, are hard to ignore, and influence other relationships in the perceptual process. For example, a TV and a sofa are related since they together serve the function of 'watching TV', but these two objects can be far apart in a scene. Relationships of this kind are much more difficult, if not possible, to infer based on geometric analysis alone. The model can relate the table easily which supports the TV and use the table as a bridge to predict the 'front' relationship with sofa, where table and sofa are all supported by the floor and relationships within them is intuitive. + +The underlying hierarchical structures in 3D scenes are label free and reliable, and can hence play an essential role in scene understanding at no additional cost. Existing 3D scene graph prediction models [1, 30, 37, 38, 41, 46] are oblivious to the underlying structures in the point cloud scenes. The question is how to take this prior knowledge into consideration to make the 3D scene graph achieve higher accuracy? KISG [47] proposes a graph auto-encoder to learn a closed set and ground truth prior knowledge from relationship triplets in data for 3D scene graph prediction. Although KISG [47] takes note of knowledge, it captures relevant prior knowledge from text-only ground truth labels, which merely contain facts expressed by label descriptions while lacking complex but indispensable multimodal knowledge for 3D scene graph prediction. In addition, noises contained in the manually annotated labels are easily included in the knowledge base and affects the prediction of relationships. + +To address the above problems, we show that the implicit hierarchical structure correlations between object pairs and their relationships can be explicitly represented by a knowledge base. As shown in Fig. 1, we propose a 3D spatial multimodal knowledge accumulation module to explicitly merge the hierarchical structures of 3D scenes into the network to strengthen the 3D scene graph prediction process. Firstly, we filter the external commonsense knowledge base, classify the hierarchical tokens for each node, and add new support edges to form the hierarchical symbolic knowledge graph for 3D scenes. Secondly, we retrieve the hierarchical token from the reconstructed symbolic knowledge graph for object instances in 3D scenes to build a visual graph, and extract contextual features for nodes and edges using a region-aware graph network. Finally, to bridge the heterogeneous gap between the symbolic knowledge and visual information, we propose a graph reasoning network to correlate 3D spatial visual contents of scenes with textual facts. Conditioned on the learned vision-relevant 3D spatial multimodal knowledge, we incorporate this network into the relationships prediction stage as extra guidance, which can effectively regularize the distribution of possible relationships of object pairs and thus make the predictions less ambiguous. + +Our main contributions are: 1) We are the first to explicitly unify the regular patterns of 3D physical spaces with the deep architecture to facilitate 3D scene graph prediction. 2) We propose a hierarchical symbolic knowledge construction module that exploits extra knowledge as the baseline to admit the hierarchical structure cues of 3D scene. 3) We introduce a knowledge-guided visual context encoding module to construct hierarchical visual graph and learn the contextualized features by a region-aware graph network. 4) We propose a 3D spatial multimodal knowledge accumulation module to regularize the semantic space of relationship prediction. Results show that the learned knowledge and proposed modules consistently boost 3D scene graph prediction performance. + +# 2. Related Work + +2D Image-based Scene Graph Generation. Scene graph was first proposed for image retrieval [17], and subsequently received increasing attention in the vision community to produce graphical abstractions of images. Mainstream approaches [5, 36, 42, 44, 45] follow a two-step pipeline that first detects objects followed by classification of the relationship for each object pair. However, research on scene graphs has focused primarily on 2D images, ignoring 3D spatial characteristics such as position and geometry, and with limited spatial coverage. Our proposed method extends 2D scene graphs to 3D spaces, where the scene representation, network architecture and training mechanism all have to be altered in fundamental ways to meet the challenges arising from learning 3D scene structures and relationships. More detailed discussions can be found in the survey [4]. + +Knowledge Representation has been extensively studied to incorporate prior knowledge, e.g. DBPedia [2], ConceptNet [35], WordNet [24], VisualGenome [19] and hasPart [3], to aid numerous vision tasks [23]. Gao et al. [12] incorporated commonsense knowledge to learn the internal-external correlations among room and object entities for an agent to take proper decisions at each viewpoint. Zhang et al. [48] addressed the explainability of visual reasoning by introducing the explicit integration of external knowledge. Ding et al. [8] extracted the multimodal knowledge triplet to boost the performance of visual question answering. Chen et al. [6] constructed the prior knowledge of statistical correlations between object pairs and their relationships to address the issue of the uneven distribution over different relationships. Although previous studies have taken notice of knowledge in different vision tasks, they only implicitly mine the extra knowledge base or count the frequency of relationship pairs in datasets to strengthen the iterative message propagation between relationships and objects while ignoring the intrinsic properties of the data. + +Scene Graph Prediction in Point Clouds. With the recently proposed 3DSSG datasets containing 3D scene graph annotations [37], the community started to explore semantic relationship prediction in 3D real world data. SGPN [37, 38] is the first work to build a 3D scene graph using both objects and their interrelations as graph nodes. It then performs message propagation using graph convolutional networks. Kimera [30] proposed a 3D dynamic scene graph that captures metric and semantic aspects of a dynamic environment, where nodes represent spatial concepts at different levels of abstraction, and edges represent spatial-temporal relations among the nodes. EdgeGCN [46] exploits multi-dimensional edge features for explicit relationship modeling and explores two associated twinning interaction mechanisms for the independent evolution of scene graph representations. Wu et al. [41] proposed a method to incrementally build semantic scene graphs from a 3D environment given a sequence of + +![](images/bdfdc2effd33af1603a1a213bab4bd1bcdf4e8dcf99263018eacc7818e8c103f.jpg) +Figure 2. Method pipeline. (a) A hierarchical symbolic knowledge is firstly reconstructed to exploit external knowledge as the baseline and admit the hierarchical structure cues of 3D scene. (b) We then build a hierarchical visual graph and learn the contextualized features by the region-aware graph network. (c) Finally, a 3D spatial multimodal knowledge is accumulated to strengthen relationship predictions. + +RGB-D frames. KISG [47] uses the ground truth relationship triplets in the dataset to extract the prior knowledge and then fuses it in the scene graph prediction stage. One limitation of KISG [47] is that its relevant prior knowledge depends on the text-only dataset label while ignoring hierarchical and indispensable structures in the 3D scene for visual understanding. Our method differentiates itself from these related studies by exploring the 3D implicit structure pattern and introducing 3D spatial multimodal knowledge, which enables our model to predict relationships more accurately. + +# 3. Methodology + +Problem Formulation: The goal of 3D scene graph generation is to describe a given 3D point cloud scene $\mathcal{I}$ with a semantic scene graph $\mathcal{G} = \{\mathcal{V},\mathcal{R}\}$ , where $\mathcal{V}$ and $\mathcal{R}$ represent instance object nodes and their inner relationship edges respectively. $\mathcal{G}$ forms a structured representation of the semantic content of the 3D scene. The nodes $\mathcal{V}$ consist of a set of objects $O = \{o_1,o_2,\dots ,o_n\}$ with object $o_i$ assigned to a certain class label $C$ , a corresponding set of bounding boxes $B = \{b_{1},b_{2},\dots ,b_{n}\}$ with $b_{i}\in \mathbb{R}^{6}$ , and a set of relationship edges $\mathcal{R} = \{r_1,r_2,\dots ,r_n\}$ with each $r_i$ represents a predicate between a pair of objects. Our proposed model can be decomposed as: + +$$ +P (\mathcal {G} | \mathcal {I}) = P \left(\mathcal {K} _ {s} | \mathcal {I}\right) P \left(\mathcal {G} _ {v} \mid \mathcal {K} _ {s}, \mathcal {I}\right) P \left(\mathcal {R}, \mathcal {K} _ {m} \mid \mathcal {G} _ {v}, \mathcal {K} _ {s}, \mathcal {I}\right) \tag {1} +$$ + +In this equation, the component $P(\mathcal{K}_s|\mathcal{I})$ collects all the symbolic entities from the datasets, filters the extra knowledge bases, and combines the hierarchical structure patterns of 3D scenes to construct the hierarchical symbolic knowledge $\mathcal{K}_s$ . The component $P(\mathcal{G}_v|\mathcal{K}_s,\mathcal{I})$ builds visual graphs for scenes under the guidance of knowledge $\mathcal{K}_s$ , where contextual features for each node are extracted. Conditioned on the knowledge $\mathcal{K}_s$ and visual graph $\mathcal{G}_v$ , the component $P(\mathcal{R},\mathcal{K}_m|\mathcal{G}_v,\mathcal{K}_s,\mathcal{I})$ accumulates the 3D spatial multimodal knowledge by correlating the knowledge $\mathcal{K}_s$ with visual content and predicts relationships simultaneously. Fig. 2 illustrates the overall pipeline of the proposed model. + +# 3.1. Hierarchical Symbolic Knowledge Initialization + +Unlike KISG [47], we do not use a closed set or ground truth relationship triplets from labels to learn prior knowledge. Hence, we must make an additional choice of what knowledge sources to use and how to clean them. Prior knowledge of object classes can be reliable predictors of the likelihoods of physical support relationships. For instance, it is unlikely that a cup is supported by a wall while tables are almost always supported by the floor. Therefore, given a set of objects, we can classify each object based on whether it is directly supported by the floor. The result is a three-layer hierarchical structure about objects in the 3D scene. In particular, the first layer only contains the floor since it does not have any support. The second layer contains objects directly supported by the floor, e.g. bed, table, and sofa. The third layer contains the remaining objects usually supported by objects in the second layer, e.g. pillow, cup, and cushion. + +To exploit the regular structure patterns in 3D spaces and construct the scene graph hierarchically, we construct a hierarchical symbolic knowledge graph to guide the 3D spatial knowledge reasoning. Knowledge sources, such as ConceptNet [35] and DBPedia [2], are a valuable tool containing commonsense knowledge about the real world. In this work, we use ConceptNet as our external knowledge base which gives us more spatial relationships and common pairwise objects. While ConceptNet contains very useful information, it also includes some knowledge that is irrelevant to our model. To mitigate this issue, we limit the ConceptNet to common object categories in 3D point cloud scenes. We collect object categories from two widely-used 3D point cloud datasets, SUNRGBD [34] and Scannet [7], and then include edges that only include these objects. After filtering, we have a total of about 5,000 edges and 760 nodes. + +We denote the external knowledge graph as $\mathcal{K}_e = \{\mathcal{V}_e, \mathcal{E}_e\}$ where $\mathcal{V}_e$ and $\mathcal{E}_e$ represent nodes and edges respectively. To merge the hierarchical structures in 3D spaces into the external knowledge graph and construct the hierarchical + +symbolic knowledge graph $\kappa_{s}$ , we first use a pre-trained multi-layer perceptron (MLP) to classify the hierarchical tokens for each node in the external knowledge graph to distinguish the discrepancy among different layers of nodes. The hierarchical token of each node denotes its corresponding layer in the hierarchical structure. Each node is then initialized as the concatenation of its trainable hierarchical token and the word2vec (GloVe [26]) representation of the object category. Since the hierarchical structure of 3D spaces is built based on the physical support relationships between objects, we add additional edges representing support relationships between nodes to the external knowledge graph $\kappa_{e}$ . Specifically, we define a new edge type: given two nodes $s_i$ and $s_j$ , we connect $s_i$ to $s_j$ using a support edge to represent the physical support relationship between $s_i$ and $s_j$ . By definition, each node in the hierarchical structure is supported by the node in neighboring layers. Therefore, we add a support edge between two correlated nodes in neighboring layers. Each edge is initialized as the trainable GloVe representation of its edge type. Finally, we formulate the updated external knowledge graph as hierarchical symbolic knowledge graph $\kappa_{s}$ . Additional details can be found in supplementary. + +# 3.2. Knowledge-guided Visual Context Encoding + +As shown in Fig. 2, taking a scene point cloud with object instance annotations as input, we build a hierarchical visual graph $\mathcal{G}_v = \{\mathcal{V}_v,\mathcal{E}_v\}$ where $\nu_{v}$ and $\mathcal{E}_v$ denotes object instances and edges of object pairs respectively, under the guidance of the hierarchical symbolic knowledge graph $\kappa_{s}$ . Then, a region-aware graph network is employed to propagate node messages through the visual graph $\mathcal{G}_v$ to learn the contextualized feature representation. + +Visual graph construction. We use Point Cloud Transformer [13] to extract spatial-aware visual features $f_{v}$ for each object instance. To encode the spatial features $f_{t}$ of each bounding box, we use an MLP to lift the parameters of each bounding box (i.e., center and size) to feature space. We assign the semantic features $f_{w}$ for each object using an embedding table initialized by GloVe [26]. Each node in the visual graph is initialized as the concatenation of features $f_{v}, f_{t}$ and $f_{w}$ . To capture the implicit structure of the point cloud scene, we route each node in the visual graph $\mathcal{G}_{v}$ into its corresponding layer according to the hierarchical tokens in hierarchical symbolic knowledge graph $\kappa_{s}$ . Then, we complete the edge set $\mathcal{E}_{v}$ of visual graph $\mathcal{G}_{v}$ by extracting potential physical relationships between nodes in the adjacent layers. Specifically, we add an edge representing physical support relationship between node pair in the visual graph $\mathcal{G}_{v}$ if a support edge also exists between the corresponding nodes in the hierarchical symbolic knowledge graph $\kappa_{s}$ . Similar to [46], we model the spatial interactions between node pairs and encode the initial edge embedding for node pairs using an MLP. + +Contextualized features encoding. Objects sharing the + +same physical support are correlated since they have similar functional role in the environment and are generally in close proximity to each other. For instance, both pillow and clothes are usually supported by a bed. Therefore, we propose a region-aware graph network to jointly highlight the interrelated regions of each node in the visual graph $\mathcal{G}_v$ and encode the hierarchical contexts of the input scene. + +Given the initial representations of nodes and edges in the visual graph $\mathcal{G}_v$ , the region-aware graph network iteratively updates the hidden state $\mathbf{h}_i^{o,t}$ of each node $v_i$ and $\mathbf{h}_{ij}^{e,t}$ of each edge $(v_i,v_j)$ at each time step $t$ via message passing. Since the contextual regions around each node in the visual graph $\mathcal{G}_v$ can be defined as other nodes sharing the same physical support with it, each node first gathers information from nodes within the same contextual region to enrich its current hidden state before propagating messages along the edges in the visual graph $\mathcal{G}_v$ . Specifically, the enriched hidden state $\tilde{\mathbf{h}}_i^{o,t}$ of each node is: + +$$ +\tilde {\mathbf {h}} _ {i} ^ {o, t} = \mathbf {h} _ {i} ^ {o, t} + \sum_ {j \in N _ {r} (i)} \psi \left(\mathbf {h} _ {j} ^ {o, t}\right) \tag {2} +$$ + +$N_{r}(i)$ contains nodes that share the same level support with node $v_{i}$ and $\psi$ is a feed forward network for non-linear transformation. For edge $(v_{i},v_{j})$ , its enriched hidden state $\tilde{\mathbf{h}}_{ij}^{e,t}$ is computed by: + +$$ +\tilde {\mathbf {h}} _ {i j} ^ {e, t} = \mathbf {h} _ {i j} ^ {e, t} + \sum_ {k \in N _ {r} (i)} \psi \left(\mathbf {h} _ {k} ^ {o, t}\right) + \sum_ {s \in N _ {r} (j)} \psi \left(\mathbf {h} _ {s} ^ {o, t}\right) \tag {3} +$$ + +After the feature representation enhancements, the message passing of nodes and edges can be formulated as: + +$$ +\mathbf {h} _ {i} ^ {o, t + 1} = G R U \left(\tilde {\mathbf {h}} _ {i} ^ {o, t}, \mathbf {m} _ {i} ^ {o, t}\right) \tag {4} +$$ + +$$ +\mathbf {h} _ {i j} ^ {e, t + 1} = G R U \left(\tilde {\mathbf {h}} _ {i j} ^ {e, t}, \mathbf {m} _ {i j} ^ {e, t}\right) \tag {5} +$$ + +where $\mathbf{m}_i^{o,t}$ and $\mathbf{m}_{ij}^{e,t}$ are the incoming messages for updating each node and edge. The calculation of the message for each node is: + +$$ +\mathbf {m} _ {i} ^ {o, t} = \sum_ {j \in N _ {v} (i)} \left(\varphi_ {n} \left(\tilde {\mathbf {h}} _ {j} ^ {o, t}\right) + \varphi_ {e} \left(\tilde {\mathbf {h}} _ {i j} ^ {e, t}\right)\right) \tag {6} +$$ + +where $\mathcal{N}_v(i)$ denotes the neighbor nodes of $v_{i}$ in the visual graph $\mathcal{G}_v$ , $\varphi_{n}$ and $\varphi_{e}$ are two non-linear transformation for associated nodes and edges. For each edge, we transform the hidden state of subject and object node by two MLPs before fusing them to obtain the message: + +$$ +\mathbf {m} _ {i j} ^ {e, t} = \varphi_ {s} \left(\tilde {\mathbf {h}} _ {i} ^ {o, t}\right) + \varphi_ {o} \left(\tilde {\mathbf {h}} _ {j} ^ {o, t}\right) \tag {7} +$$ + +We take the final hidden states of nodes and edges as the contextual feature $\mathbf{c}_i^o$ for each node $v_{i}\in \mathcal{V}_{v}$ and $\mathbf{c}_{ij}^{e}$ for each edge $(v_{i},v_{j})\in \mathcal{E}_{v}$ . + +# 3.3. Spatial Multimodal Knowledge Accumulation + +Though our hierarchical symbolic knowledge graph $\kappa_{s}$ can provide high-quality knowledge about the hierarchical structures of point cloud scene, this information is largely limited to symbolic knowledge that can only be explicitly expressed by text-relevant labels for relationship triplets. Therefore, we propose a novel schema to accumulate 3D spatial multimodal knowledge $\kappa_{m}$ progressively from the visual context via a graph reasoning network. We then incorporate the learned multimodal knowledge $\kappa_{m}$ and the contextual features to predict the possible relationships. + +Reasoning on knowledge graph. Since the contextual features encode the implicit hierarchical structure patterns in 3D spaces, we design a graph reasoning network which utilizes the visual contextual features and textual facts from the hierarchical symbolic knowledge graph $\kappa_{s}$ to accumulate 3D spatial multimodal knowledge $\kappa_{m}$ by aligning the entities in the symbolic knowledge graph with related visual contextual features. + +The graph reasoning network generates context for 3D spatial multimodal knowledge $\mathcal{K}_m$ , which is in the form of embeddings that capture the regular structure patterns in 3D scenes for each node and edge in the hierarchical symbolic knowledge graph $\mathcal{K}_s$ . Given the contextual features of nodes and edges in visual graph $\mathcal{G}_v$ , each node and edge in the graph reasoning network receives three inputs: (1) the trainable node or edge embedding in the hierarchical symbolic knowledge graph $\mathcal{K}_s$ , (2) a $0/1$ indicator of whether this node or edge appears in the visual graph $\mathcal{G}_v$ , (3) the contextual feature $\mathbf{c}_i^o$ and $\mathbf{c}_{ij}^e$ in the visual graph $\mathcal{G}_v$ corresponding to this node or edge, missing nodes and edges are padded with zero vectors. The graph reasoning network uses message passing to perform reasoning on hierarchical symbolic knowledge graph $\mathcal{K}_s$ . Specifically, at each time step $t$ , to calculate the hidden states $\mathbf{d}_i^{o,t}$ for all nodes $s_i \in \mathcal{V}_s$ and $\mathbf{d}_{ij}^{e,t}$ for all edges $(s_i, s_j) \in \mathcal{E}_s$ , each node and edge first gather messages from their neighbors through the graph structure then update their hidden states: + +$$ +\mathbf {d} _ {i} ^ {o, t + 1} = G R U \left(\mathbf {d} _ {i} ^ {o, t}, \mathbf {m} _ {i} ^ {o, t}\right), \tag {8} +$$ + +$$ +\mathbf {d} _ {i j} ^ {e, t + 1} = G R U \left(\mathbf {d} _ {i j} ^ {e, t}, \mathbf {m} _ {i j} ^ {e, t}\right), \tag {9} +$$ + +where $\mathbf{m}_i^{o,t}$ and $\mathbf{m}_{ij}^{e,t}$ are the incoming messages for nodes and edges. The incoming message for each node is + +$$ +\mathbf {m} _ {i} ^ {o, t} = \sum_ {j \in N _ {k} (i)} \left(\varphi_ {n} \left(\mathbf {d} _ {j} ^ {o, t}\right) + \varphi_ {e} \left(\mathbf {d} _ {i j} ^ {e, t}\right)\right), \tag {10} +$$ + +where $N_{k}(i)$ denotes the neighbor nodes of node $s_i$ in the knowledge graph $\mathcal{K}_s$ . Similar to Eq. (7), the incoming message for each edge is + +$$ +\mathbf {m} _ {i j} ^ {e, t} = \varphi_ {s} \left(\mathbf {d} _ {i} ^ {o, t}\right) + \varphi_ {o} \left(\mathbf {d} _ {j} ^ {o, t}\right). \tag {11} +$$ + +We take the sum of the stacked hidden states as the 3D spatial multimodal knowledge embedding $\mathbf{b}_i^o$ for all nodes and $\mathbf{b}_{ij}^{e}$ for all edges in the symbolic knowledge graph $\kappa_{s}$ . + +Knowledge-enabled Scene Graph Prediction. To incorporate the 3D spatial multimodal knowledge $\mathcal{K}_m$ into scene graph inference, we propose fusing the multimodal knowledge embedding with the contextual features in the visual graph to facilitate 3D scene graph prediction. Towards this goal, we utilize an MLP as object detection head to predict confident initial class guesses given the contextual node features. We then select the three most confident multimodal knowledge embeddings for each node. For edges in the visual graph, we select the three most confident object categories for the subject and object node based on the initial guesses. We then retrieve the multimodal knowledge embedding using the predicted subject and object categories. Since the multimodal knowledge embedding and the contextual features are in different feature spaces, we transform them by two MLPs $\varphi_b$ and $\varphi_c$ respectively before fusing them. For each node in the visual graph, we fuse the retrieved multimodal knowledge embedding $\{\mathbf{b}^k\}_{k=1,2,3}$ and the contextual node feature $\mathbf{c}_i^o$ to obtain the knowledge-enabled contextual feature $\mathbf{f}_i^o$ : + +$$ +\mathbf {f} _ {i} ^ {o} = \phi \left(\varphi_ {c} \left(\mathbf {c} _ {i} ^ {o}\right) + \varphi_ {b} \left(\sum_ {k = 1} ^ {3} \mathbf {b} ^ {k}\right)\right). \tag {12} +$$ + +For each edge in the visual graph, the multimodal knowledge embedding is fused with its contextual feature in the same way as the node. + +Equipped with the 3D spatial multimodal knowledge-enabled contextual features $\mathbf{f}_i^o$ for nodes and $\mathbf{f}_{ij}^{e}$ for edges in the visual graph, we generate the scene graph by decoding the contextual features using a standard graph convolution network (GCN) [18]. We assume that each object pair can have a relationship (including none) and fully connect them as a graph where relationships are represented as edges. Each node is initialized by its contextual node feature $\mathbf{f}_i^o$ , and each edge is initialized either by the contextual edge feature $\mathbf{f}_{ij}^{e}$ or the contextual features of its subject and object nodes if the edge is not presented in the visual graph. The last part of the GCN consists of two detection heads for object and relationship classification. The object detection head takes the decoded node features as input to predict the object classification possibilities. The relationship prediction head first fuses the decoded subject and object node features with the decoded edge features, then predicts a discrete distribution over all possible relationship classes. + +Loss Function. We adopt the standard cross entropy loss for object and relationship classification in our model. Since the contextual node feature $\mathbf{c}_i^o$ is used to predict the initial class guesses, we use a cross entropy loss $\mathcal{L}_{init}^o$ for the initial detection. For the final prediction, we use two cross entropy + +
MethodsPredClsSGClsSGDet
R@50/100mR@50/100R@50/100mR@50/100R@50/100mR@50/100
3D+IMP [42]48.15 / 48.7221.56 / 21.8517.41 / 17.899.06 / 9.2324.54 / 24.5721.71 / 21.72
3D+MOTIFS [45]52.43 / 53.3724.35 / 24.5218.34 / 18.579.74 / 9.8626.58 / 26.5924.12 / 24.17
3D+VCTree [36]53.12 / 54.3824.75 / 24.9119.93 / 20.2410.34 / 10.5527.58 / 27.6224.92 / 24.94
3D+KERN [6]54.74 / 56.5325.21 / 25.8321.41 / 21.7811.02 / 11.3627.75 / 27.7824.03 / 24.05
3D+Schemata [32]58.13 / 59.1142.11 / 42.8328.72 / 28.9726.72 / 27.0528.12 / 28.1325.29 / 25.30
3D+HetH [40]58.24 / 58.7542.53 / 42.7428.83 / 29.0526.68 / 26.8528.17 / 28.1825.31 / 25.32
Ours68.32 / 69.4966.54 / 66.9231.50 / 31.6430.29 / 30.5629.41 / 29.4425.35 / 25.36
+ +Table 1. Comparison with state-of-the-art 2D scene graph prediction methods re-implemented to work on 3DSSG dataset. + +
MethodsPredClsSGClsSGDet
R@50/100mR@50/100R@50/100mR@50/100R@50/100mR@50/100
SGPN [37]57.71 / 58.0538.12 / 38.6728.39 / 28.7422.23 / 22.57- / -- / -
EdgeGCN [46]58.42 / 59.1138.84 / 39.3528.58 / 28.9322.67 / 23.33- / -- / -
KISG [47]64.47 / 64.9363.19 / 63.5229.46 / 29.6528.20 / 28.64- / -- / -
Ours68.32 / 69.4966.54 / 66.9231.50 / 31.6430.29 / 30.5629.41 / 29.4425.35 / 25.36
+ +Table 2. Comparison with 3D scene graph prediction methods on the 3DSSG dataset. + +losses $\mathcal{L}_{final}^{o}$ and $\mathcal{L}_{final}^{r}$ for the object and relationship classification: + +$$ +\mathcal {L} _ {\text {f i n a l}} = w _ {o} \mathcal {L} _ {\text {f i n a l}} ^ {o} + w _ {r} \mathcal {L} _ {\text {f i n a l}} ^ {r} \tag {13} +$$ + +where $w_{o}$ and $w_{r}$ are the weights for object and relation loss. In our experiment, we set $w_{o}$ to 0.75 and $w_{r}$ to 1. Our final loss function can be formulated as: + +$$ +\mathcal {L} = \mathcal {L} _ {\text {i n i t}} ^ {o} + \mathcal {L} _ {\text {f i n a l}} \tag {14} +$$ + +# 4. Experiments + +# 4.1. Experimental Configuration + +We evaluate our model on 3DSSG dataset [37]. Following [47], we select 160 object categories and 27 relationship classes for detection. We compare our model with others in three standard tasks proposed in [42]. (1) Predicate Classification (PredCls): Given the ground truth 3D bounding boxes and their corresponding semantic labels, our model classifies the relationship between each object pair. (2) Scene Graph Classification (SGCls): Given the ground truth 3D bounding boxes, our model predicts the relationships as well as the object categories jointly. (3) Scene Graph Generation (SGDet): Given the raw point cloud, our model detects 3D objects, their semantic information, as well as their relationships in an end-to-end manner. Following existing 2D and 3D scene graph generation works, we adopt the constrained evaluation metric recall@K (R@K) and mean recall@K (mR@K). + +Our model is implemented in PyTorch, and trained using one NVIDIA GTX TITAN X GPU for 40 epochs with the ADAM optimizer. We use an initial learning rate of 0.0001, weight decay of 0.5, and mini-batch of 4. After 15, 25, and 40 epochs, we multiply the learning rate by 0.1. We adopt VoteNet [27] as the 3D object detection backbone to generate an initial set of 256 object candidates in the SGDet task. The Point Cloud Transformer is pre-trained on the 3DSSG dataset using the same settings in [13]. + +# 4.2. Comparison to State-of-the-Art + +We first compare our model with the following state-of-art 2D image scene graph generation models, modified to fit the 3DSSG dataset: IMP [42], MOTIFS [45] and VC-Tree [36] which creatively devise various message passing methods for improving graph representations. KERN [6], Schemata [32], and HetH [40] incorporate statistical priors and learning-based commonsense knowledge into the scene graph prediction. Therefore, we include these models to illustrate the superiority of the 3D spatial multimodal knowledge about the implicit hierarchical structure correlations between object pairs in the 3D scene. + +Our results in Tab. 1 lead to a few key observations: (1) Our model consistently outperforms all the existing approaches on all metrics and achieve $3.57\%$ boost on mR@50 in SGCls task and $10.08\%$ boost on R@50 in PredCls task. This indicates that leveraging regular patterns of 3D physical spaces is beneficial for scene graph prediction. (2) Our model outperforms traditional message passing model IMP and MOTIFS. Furthermore, our method achieves considerable improvement when compared to VCTree. (3) Compared to Schemata, our model achieves an improvement of $2.78\%$ and $10.19\%$ on R@50 in SGCls and PredCls, suggesting that our multimodal knowledge embedding is a better approach compared to the class-level prototypical representations learned from perceptual outputs in Schemata. (4) Compared with KERN and HetH, our proposed hierarchical structure of 3D spaces is superior to the graph structure they adopted to represent the input as our model outperforms them with a significant margin. (5) The performance has been saturated in the SGDet task. This is mainly because object detection performance on this dataset is a bottleneck that limits the performance. + +We also compare the performance of our model with the state-of-the-art 3D point cloud-based scene graph predic + +
MethodsR@50/100mR@50/100
Knowledge Ks
w/o Hierarchical Tokens30.47 / 30.6728.94 / 29.19
w/o Support Edge30.55 / 30.7429.17 / 29.47
w/o Both28.41 / 28.4727.13 / 27.52
Visual Context Encoding
Gv replaced w/ Gfc28.17 / 28.3226.28 / 26.29
w/o RaGN26.43 / 26.5724.23 / 24.36
RaGN replaced w/ GCN31.03 / 31.2129.67 / 29.88
Knowledge Km
w/o bjo and bceij26.27 / 26.3522.93 / 23.18
w/o cjo and cceij as input28.14 / 28.3125.05 / 25.31
Ours31.50 / 31.6430.29 / 30.56
+ +tion models to demonstrate the effectiveness of 3D spatial multimodal knowledge. We include several existing works such as SGPN [37], EdgeGCN [46] and KISG [47] since they all report competitive results. SGPN and EdgeGCN exploit multi-dimensional edge features for explicit relationship modeling whereas KISG learns a group of class-dependent prototypical representations for each semantic class. As shown in Tab. 2, our model dominantly surpasses all methods. Benefiting from the hierarchical structure of 3D spaces, our model is able to reason complex relationship hierarchically and systematically. Compared to SGPN and EdgeGCN, our model improves the R@50 by $2.92\%$ and $9.90\%$ in SGCls and PredCls tasks. We can also see that our method outperforms KISG by $2.04\%$ on R@50 in SGCls. KISG captures class-related priors in the scene from text-only ground truth labels. Such knowledge cannot efficiently represent diverse relationships and complex 3D environments. In contrast, our model extracts indispensable 3D spatial multimodal knowledge which benefits the scene graph prediction. + +# 4.3. Ablation Study + +We only report the performance results in the Recall and mean Recall metrics on the SGCs task for ablation studies. The results are shown in Tab. 3. + +Hierarchical symbolic knowledge. We first look at the hierarchical symbolic knowledge graph $\kappa_{s}$ to investigate its effectiveness. Specifically, we find that using ConceptNet without classifying the hierarchical tokens or adding support edges leads to sub-optimal performance. Furthermore, using ConceptNet without any augmentation drops the performance significantly, indicating that both the hierarchical tokens and support edges are crucial elements of the hierarchical structures in 3D scene. + +Knowledge-guided visual context encoding. Next, we analyse the knowledge-guided visual context encoding mod + +Table 3. Quantitative results of different module configurations on the SGCls task. + +
VariantsPredCISSGCIS
R@50mR@50R@50mR@50
Gr62.7458.2528.1727.28
Gt68.4166.5931.5930.35
Gv(original)68.3266.5431.5030.29
+ +Table 4. Comparison of different variants of the visual graph. + +
MethodsHeadBodyTail
SGPN [37]39.4223.6413.03
EdgeGCN [46]39.5123.8513.15
KISG [37]40.3624.5613.61
Ours44.2326.2714.73
+ +Table 5. The R@50 metric of biased relationship prediction on the SGCIs task. + +ule. We can see that replacing the hierarchical visual graph $\mathcal{G}_v$ with a fully-connected graph $\mathcal{G}_{fc}$ decreases the performance by a margin of $3.33\%$ on R@50, indicating that the hierarchical structure is superior to a plain fully-connected graph in terms of modeling context. Furthermore, removing the subsequent region-aware graph network (RaGN) and directly fusing the multimodal knowledge embedding with the initial representation of each node and edge in the visual graph negatively impacts the performance on all metrics. Replacing the region-aware graph network with a standard graph convolution network also hurts the performance. + +3D spatial multimodal knowledge accumulation. Lastly, we examine the accumulated multimodal knowledge $\mathcal{K}_m$ to learn about how $\mathcal{K}_m$ and rest of the model interact. We first see how much of the improvement comes from the 3D spatial multimodal knowledge $\mathcal{K}_m$ . As shown in Tab. 3, the multimodal knowledge embedding significantly improves the R@50 and mR@50 by $5.23\%$ and $7.36\%$ respectively. In addition, dropping the contextual feature input $\mathbf{c}_i^o$ for nodes and $\mathbf{c}_{ij}^{e}$ for edges in the graph reasoning network decreases the performance by a margin of $3.36\%$ and $5.24\%$ on R@50 and mR@50 in SGCls. This drop in performance indicates that the contextual feature plays a pivotal role in bridging the heterogeneous gap between the symbolic knowledge and visual information. + +# 4.4. Further Analysis + +Analysis on the hierarchical structure of 3D spaces. To validate the potential of the hierarchical visual graph $\mathcal{G}_v$ in capturing the inherent hierarchical structure of a 3D scene, we design two visual graph variants and compare them to the hierarchical visual $\mathcal{G}_v$ : (1) Instead of using the hierarchical symbolic knowledge graph $\kappa_s$ , we build a ground truth graph $\mathcal{G}_t$ based on the ground truth labels for support relations. In particular, each edge in $\mathcal{G}_t$ represents the ground truth support relationship of the input scene. (2) We also design a randomly connected graph $\mathcal{G}_r$ , where we keep all of the nodes the same but randomize the edges that connect them. As shown in Tab. 4, both $\mathcal{G}_v$ and $\mathcal{G}_t$ outperform $\mathcal{G}_r$ + +![](images/91d8452bce554c9b263e50fd3cec6a362c62fd0729918728c32ceb9937a8def1.jpg) +Figure 3. Comparison of our model and KISG on the SGCs task when trained with noisy labels. + +with a significant margin on all metrics. More importantly, we observe that $\mathcal{G}_t$ and $\mathcal{G}_v$ perform mostly similar while $\mathcal{G}_t$ slightly outperforms $\mathcal{G}_v$ . The results confirm that the hierarchical visual graph $\mathcal{G}_v$ is one of the more optimal ways of extracting the hierarchical structure patterns of 3D spaces. + +Robustness of 3D spatial multimodal knowledge. Additionally, we investigate the robustness of the 3D spatial multimodal knowledge $\mathcal{K}_m$ by training our model with noisy labels. Specifically, we add different proportions of noises into the 3DSSG training set by replacing part of ground truth relationships with the randomly selected wrong relationships for input scenes. The performance of our model and KISG [47] on the SGCs task is reported in Fig. 3. We can see that, the performance of KISG decreases drastically while ours decreases slowly with increasing noise rate. Under the $30\%$ noise rate condition, our model improves the R@50 metric by about $6.89\%$ over KISG, which indicates that our model achieves improved robustness over KISG. The main reason is that KISG captures relevant prior knowledge from text-only ground truth labels and noises contained in the labels are easily included in their knowledge base and affects the prediction of relationships. Different with KISG, our model leverages the inherently hierarchical structures of 3D scenes and accumulates multimodal knowledge which is both label free and reliable. + +Long-tail analysis. We also investigate how our model performs on the long-tail part of the dataset. To do this, we order all the relationships based on the frequency of each relationship category occurring in triplets. We select the 5 most common relationship categories as the head, the 5 least common relationship categories as the tail, and the rest of the categories as the body. Tab. 5 reports the R@50 metric on each long-tail category groups of our model. Moreover, our model achieves best performance when evaluating the R@50 metric on the tail relationship categories, which shows that our model has the ability to mitigate the effect of sample imbalance. The main reason is that the hierarchical structures can be extracted accurately which influence other relationships in the prediction process. + +# 4.5. Qualitative Results + +We visualize intermediate results in Fig. 4(a-c). We can see that both the hierarchical visual graph $\mathcal{G}_v$ and 3D scene + +![](images/f391d0b44e1c763cc3b12171ec320e87da05b2d1452fa24e2ed50d9464e6783f.jpg) + +![](images/e53a70bc74cbd4d472bf57013b28d150ff1f3b03de0c8bc31ac7df70d5eadc8c.jpg) +(a) Input scene +(b) Hierarchical visual graph + +![](images/0452d5a6262ed068e721491e14b7f1c5bec28cc63e4aa23e55f642c389483f70.jpg) +(c) 3D scene graph +Figure 4. Visualizations of our predicted scene graph on 3DSSG dataset. Red indicates the misclassified objects or relationships. + +graph $\mathcal{G}$ are well constructed. However, our model incorrectly classifies the relationship between Window1 and Floor. This is mainly because our model fails to extract discriminative features for Window1 as there are few points within its bounding box. The token of Window1 is classified incorrectly in the second layer while it should be in the third layer. We provide more visualization samples in the supplementary. + +# 5. Conclusion + +We proposed a method for 3D scene graph prediction from raw point clouds. Our method explores the regular patterns of 3D physical spaces into the deep network to facilitate 3D scene graph prediction. Hierarchical symbolic knowledge is first reconstructed via exploiting external knowledge as the baseline to admit the hierarchical structure cues of a 3D scene. A knowledge-guided visual context encoding module then builds a hierarchical visual graph and learns the contextualized features by a region-aware graph network. Finally, a 3D spatial multimodal knowledge accumulation module is proposed to regularize the semantic space of relationship prediction. Extensive experiments on the 3DSSG dataset show that our method outperforms existing state-of-the-art and can mitigate the effect of data imbalance and label noises. In the future, we plan to exploit the attributes of 3D objects to build richer knowledge graphs to improve the prediction performances of attribute-focused relationships, such as same symmetric as and same texture as. + +# 6. Acknowledgments + +This work was supported in part by the National Natural Science Foundation of China under Grant 62003253, Grant 61973106, Grant U2013203, Grant U21A20482 and Grant U20A20185. Professor Ajmal Mian is the recipient of an Australian Research Council Future Fellowship Award (project number FT210100268) funded by the Australian Government. + +# References + +[1] Iro Armeni, Zhi-Yang He, JunYoung Gwak, Amir R Zamir, Martin Fischer, Jitendra Malik, and Silvio Savarese. 3d scene graph: A structure for unified semantics, 3d space, and camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5664-5673, 2019. 1, 2 +[2] Soren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. 2007. 2, 3 +[3] Sumithra Bhakthavatsalam, Kyle Richardson, Niket Tandon, and Peter Clark. Do dogs have whiskers? a new knowledge base of haspart relations. arXiv preprint arXiv:2006.07510, 2020. 2 +[4] Xiaojun Chang, Pengzhen Ren, Pengfei Xu, Zhihui Li, Xiaojiang Chen, and Alexander G Hauptmann. A comprehensive survey of scene graphs: Generation and application. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 2 +[5] Long Chen, Hanwang Zhang, Jun Xiao, Xiangnan He, Shiliang Pu, and Shih-Fu Chang. Counterfactual critic multi-agent training for scene graph generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4613-4623, 2019. 2 +[6] Tianshui Chen, Weihao Yu, Riquan Chen, and Liang Lin. Knowledge-embedded routing network for scene graph generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6163–6171, 2019. 2, 6 +[7] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5828-5839, 2017. 3 +[8] Yang Ding, Jing Yu, Bang Liu, Yue Hu, Mingxin Cui, and Qi Wu. Mukea: Multimodal knowledge extraction and accumulation for knowledge-based visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5089-5098, 2022. 2 +[9] Zijin Du, Hailiang Ye, and Feilong Cao. A novel local-global graph convolutional method for point cloud semantic segmentation. IEEE Transactions on Neural Networks and Learning Systems, 2022. 1 +[10] Mingtao Feng, Syed Zulqarnain Gilani, Yaonan Wang, Liang Zhang, and Ajmal Mian. Relation graph network for 3d object detection in point clouds. IEEE Transactions on Image Processing, 30:92-107, 2020. 1 +[11] Mingtao Feng, Liang Zhang, Xuefei Lin, Syed Zulqarnain Gilani, and Ajmal Mian. Point attention network for semantic segmentation of 3d point clouds. Pattern Recognition, 107:107446, 2020. 1 +[12] Chen Gao, Jinyu Chen, Si Liu, Luting Wang, Qiong Zhang, and Qi Wu. Room-and-object aware knowledge reasoning for remote embodied referring expression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3064–3073, 2021. 2 + +[13] Meng-Hao Guo, Jun-Xiong Cai, Zheng-Ning Liu, Tai-Jiang Mu, Ralph R Martin, and Shi-Min Hu. Pct: Point cloud transformer. Computational Visual Media, 7(2):187-199, 2021. 4, 6 +[14] Alon Hafri and Chaz Firestone. The perception of relations. Trends in Cognitive Sciences, 2021. 1 +[15] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11108-11117, 2020. 1 +[16] Maximilian Jaritz, Tuan-Hung Vu, Raoul De Charette, Emilie Wirbel, and Patrick Pérez. Cross-modal learning for domain adaptation in 3d semantic segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1 +[17] Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei-Fei. Image retrieval using scene graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3668–3678, 2015. 2 +[18] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016. 5 +[19] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123(1):32-73, 2017. 2 +[20] Manyi Li, Akshay Gadi Patil, Kai Xu, Siddhartha Chaudhuri, Owais Khan, Ariel Shamir, Changhe Tu, Baoquan Chen, Daniel Cohen-Or, and Hao Zhang. Grains: Generative recursive autoencoders for indoor scenes. ACM Transactions on Graphics (TOG), 38(2):1-16, 2019. 1 +[21] Mengtian Li, Yuan Xie, Yunhang Shen, Bo Ke, Ruizhi Qiao, Bo Ren, Shaohui Lin, and Lizhuang Ma. Hybridcr: Weakly-supervised 3d point cloud semantic segmentation via hybrid contrastive regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14930-14939, 2022. 1 +[22] Ze Liu, Zheng Zhang, Yue Cao, Han Hu, and Xin Tong. Group-free 3d object detection via transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2949-2958, 2021. 1 +[23] Kenneth Marino, Xinlei Chen, Devi Parikh, Abhinav Gupta, and Marcus Rohrbach. Krisp: Integrating implicit and symbolic knowledge for open-domain knowledge-based vqa. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14111-14121, 2021. 2 +[24] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 2 +[25] Xuran Pan, Zhuofan Xia, Shiji Song, Li Erran Li, and Gao Huang. 3d object detection with pointformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7463-7472, 2021. 1 +[26] Jeffrey Pennington, Richard Socher, and Christopher D Manning. Glove: Global vectors for word representation. In + +Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543, 2014. 4 +[27] Charles R Qi, Or Litany, Kaiming He, and Leonidas J Guibas. Deep hough voting for 3d object detection in point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9277-9286, 2019. 1, 6 +[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 1 +[29] Charles R Qi, Li Yi, Hao Su, and Leonidas J Guibas. Point-net++ deep hierarchical feature learning on point sets in a metric space. In Proceedings of the 31st International Conference on Neural Information Processing Systems, pages 5105-5114, 2017. 1 +[30] Antoni Rosinol, Andrew Violette, Marcus Abate, Nathan Hughes, Yun Chang, Jingnan Shi, Arjun Gupta, and Luca Carlone. Kimera: From slam to spatial perception with 3d dynamic scene graphs. The International Journal of Robotics Research, 40(12-14):1510-1546, 2021. 1, 2 +[31] Morteza Sarafyazd and Mehrdad Jazayeri. Hierarchical reasoning by neural circuits in the frontal cortex. Science, 364(6441), 2019. 1 +[32] Sahand Sharifzadeh, Sina Moayed Baharlou, and Volker Tresp. Classification by attention: Scene graph classification with prior knowledge. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 5025-5033, 2021. 6 +[33] Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In European conference on computer vision, pages 746-760. Springer, 2012. 1 +[34] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 567-576, 2015. 3 +[35] Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI conference on artificial intelligence, 2017. 2, 3 +[36] Kaihua Tang, Hanwang Zhang, Baoyuan Wu, Wenhan Luo, and Wei Liu. Learning to compose dynamic tree structures for visual contexts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6619-6628, 2019. 2, 6 +[37] Johanna Wald, Helisa Dhamo, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs from 3d indoor reconstructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3961-3970, 2020. 1, 2, 6, 7 +[38] Johanna Wald, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs with instance embeddings. International Journal of Computer Vision, pages 1-22, 2022. 1, 2 + +[39] Kai Wang, Yu-An Lin, Ben Weissmann, Manolis Savva, Angel X Chang, and Daniel Ritchie. Planit: Planning and instantiating indoor scenes with relation graph and spatial prior networks. ACM Transactions on Graphics (TOG), 38(4):1-15, 2019. 1 +[40] Wenbin Wang, Ruiping Wang, Shiguang Shan, and Xilin Chen. Sketching image gist: Human-mimetic hierarchical scene graph generation. In European Conference on Computer Vision, pages 222-239. Springer, 2020. 6 +[41] Shun-Cheng Wu, Johanna Wald, Keisuke Tateno, Nassir Navab, and Federico Tombari. Scenegraphfusion: Incremental 3d scene graph prediction from rgb-d sequences. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7515-7525, 2021. 1, 2 +[42] Danfei Xu, Yuke Zhu, Christopher B Choy, and Li Fei-Fei. Scene graph generation by iterative message passing. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5410–5419, 2017. 2, 6 +[43] Qiangeng Xu, Yiqi Zhong, and Ulrich Neumann. Behind the curtain: Learning occluded shapes for 3d object detection. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2893-2901, 2022. 1 +[44] Jianwei Yang, Jiasen Lu, Stefan Lee, Dhruv Batra, and Devi Parikh. Graph r-cnn for scene graph generation. In Proceedings of the European conference on computer vision (ECCV), pages 670-685, 2018. 2 +[45] Rowan Zellers, Mark Yatskar, Sam Thomson, and Yejin Choi. Neural motifs: Scene graph parsing with global context. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5831-5840, 2018. 2, 6 +[46] Chaoyi Zhang, Jianhui Yu, Yang Song, and Weidong Cai. Exploiting edge-oriented reasoning for 3d point-based scene graph analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9705-9715, 2021. 1, 2, 4, 6, 7 +[47] Shoulong Zhang, Aimin Hao, Hong Qin, et al. Knowledge-inspired 3d scene graph prediction in point cloud. Advances in Neural Information Processing Systems, 34, 2021. 2, 3, 6, 7, 8 +[48] Yifeng Zhang, Ming Jiang, and Qi Zhao. Explicit knowledge incorporation for visual reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1356-1365, 2021. 2 +[49] Na Zhao, Tat-Seng Chua, and Gim Hee Lee. Few-shot 3d point cloud semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8873-8882, 2021. 1 \ No newline at end of file diff --git a/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/images.zip b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..8699d6fcad3a77fae8170488cf30b99ae0414311 --- /dev/null +++ b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c24d50d2718ac1eaf56a8f3688b8d41535aed94de889c1daa77eb08aeef98266 +size 459655 diff --git a/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/layout.json b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1f80e1cc83384159964030b6c010c10023ced0fe --- /dev/null +++ b/2023/3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud/layout.json @@ -0,0 +1,9534 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 92, + 103, + 501, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 103, + 501, + 138 + ], + "spans": [ + { + "bbox": [ + 92, + 103, + 501, + 138 + ], + "type": "text", + "content": "3D Spatial Multimodal Knowledge Accumulation for Scene Graph Prediction in Point Cloud" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "spans": [ + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": "Mingtao Feng" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": " Haoran Hou" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": " Liang Zhang" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": " Zijie Wu" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": " Yulan Guo" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": " Ajmal Mian" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": "Xidian University, " + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": "Hunan University, " + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": "Sun Yat-Sen University, " + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 52, + 160, + 566, + 191 + ], + "type": "text", + "content": "The University of Western Australia" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 217, + 190, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 217, + 190, + 228 + ], + "spans": [ + { + "bbox": [ + 143, + 217, + 190, + 228 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 228, + 290, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 228, + 290, + 526 + ], + "spans": [ + { + "bbox": [ + 46, + 228, + 290, + 526 + ], + "type": "text", + "content": "In-depth understanding of a 3D scene not only involves locating/recognizing individual objects, but also requires to infer the relationships and interactions among them. However, since 3D scenes contain partially scanned objects with physical connections, dense placement, changing sizes, and a wide variety of challenging relationships, existing methods perform quite poorly with limited training samples. In this work, we find that the inherently hierarchical structures of physical space in 3D scenes aid in the automatic association of semantic and spatial arrangements, specifying clear patterns and leading to less ambiguous predictions. Thus, they well meet the challenges due to the rich variations within scene categories. To achieve this, we explicitly unify these structural cues of 3D physical spaces into deep neural networks to facilitate scene graph prediction. Specifically, we exploit an external knowledge base as a baseline to accumulate both contextualized visual content and textual facts to form a 3D spatial multimodal knowledge graph. Moreover, we propose a knowledge-enabled scene graph prediction module benefiting from the 3D spatial knowledge to effectively regularize semantic space of relationships. Extensive experiments demonstrate the superiority of the proposed method over current state-of-the-art competitors. Our code is available at https://github.com/HHrEtvP/SMKA." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 537, + 128, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 537, + 128, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 537, + 128, + 550 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 555, + 288, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 555, + 288, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 288, + 687 + ], + "type": "text", + "content": "In recent years, much success has been achieved on 3D point cloud scene understanding such as semantic segmentation [9, 11, 15, 16, 21, 28, 29, 49] and object detection [10, 22, 25, 27, 43]. However, the 3D world is not only defined by objects but also by the relationships between objects. A 3D scene graph can abstract the environment as a graph where nodes represent objects and edges characterize the relationships between object pairs, which has already been recognized in recent seminal works [1, 30, 37, 38, 41, 46]. However, relationship graphs predicted by current methods are far from satisfactory due to the noisy, cluttered and par" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 215, + 539, + 350 + ], + "blocks": [ + { + "bbox": [ + 310, + 215, + 539, + 350 + ], + "lines": [ + { + "bbox": [ + 310, + 215, + 539, + 350 + ], + "spans": [ + { + "bbox": [ + 310, + 215, + 539, + 350 + ], + "type": "image", + "image_path": "8492445031b3f950e3dd7e8bc7d3d1476da3cdd7e5c625d9f8de6c45f71b4669.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 347, + 350, + 504, + 361 + ], + "lines": [ + { + "bbox": [ + 347, + 350, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 347, + 350, + 504, + 361 + ], + "type": "text", + "content": "Figure 1. A brief overview of our method." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 365, + 547, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 365, + 547, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 365, + 547, + 473 + ], + "type": "text", + "content": "tial nature of real 3D scans. Moreover, these data-driven methods treat sophisticated relationships in 3D space independently for classification using the geometric features proximity or fit, and are ignorant of commonsense or other useful 3D spatial cues beyond visual information. 3D objects in real scenes commonly have strongly structured regularities [33,39], whose semantic and spatial arrangements follow clear patterns, but still exhibit rich structural variations even within the scene category." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 473, + 548, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 548, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 548, + 713 + ], + "type": "text", + "content": "The key observation is that 3D scene structures are inherently hierarchical [20]. By definition, an instance can have multiple supports, lamps are standing on a table, chairs are supported by the floor and only the floor does not have any support, and it is unlikely that a pillow is supporting a couch. Although relationships themselves cast no light on the human eyes, a growing body of works [14, 31] suggest that even very complex relationship information is reasoned hierarchically and systemically according to the role of the prefrontal cortex. Relationships, such as support, can be extracted rapidly, are hard to ignore, and influence other relationships in the perceptual process. For example, a TV and a sofa are related since they together serve the function of 'watching TV', but these two objects can be far apart in a scene. Relationships of this kind are much more difficult, if not possible, to infer based on geometric analysis alone. The model can relate the table easily which supports the TV and use the table as a bridge to predict the 'front' relationship with sofa, where table and sofa are all supported by the floor and relationships within them is intuitive." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 693, + 124, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 693, + 124, + 703 + ], + "spans": [ + { + "bbox": [ + 58, + 693, + 124, + 703 + ], + "type": "text", + "content": "*Equal contribution" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 59, + 703, + 134, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 134, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 134, + 712 + ], + "type": "text", + "content": "† Corresponding author" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9182" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 275 + ], + "type": "text", + "content": "The underlying hierarchical structures in 3D scenes are label free and reliable, and can hence play an essential role in scene understanding at no additional cost. Existing 3D scene graph prediction models [1, 30, 37, 38, 41, 46] are oblivious to the underlying structures in the point cloud scenes. The question is how to take this prior knowledge into consideration to make the 3D scene graph achieve higher accuracy? KISG [47] proposes a graph auto-encoder to learn a closed set and ground truth prior knowledge from relationship triplets in data for 3D scene graph prediction. Although KISG [47] takes note of knowledge, it captures relevant prior knowledge from text-only ground truth labels, which merely contain facts expressed by label descriptions while lacking complex but indispensable multimodal knowledge for 3D scene graph prediction. In addition, noises contained in the manually annotated labels are easily included in the knowledge base and affects the prediction of relationships." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 275, + 289, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 275, + 289, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 275, + 289, + 550 + ], + "type": "text", + "content": "To address the above problems, we show that the implicit hierarchical structure correlations between object pairs and their relationships can be explicitly represented by a knowledge base. As shown in Fig. 1, we propose a 3D spatial multimodal knowledge accumulation module to explicitly merge the hierarchical structures of 3D scenes into the network to strengthen the 3D scene graph prediction process. Firstly, we filter the external commonsense knowledge base, classify the hierarchical tokens for each node, and add new support edges to form the hierarchical symbolic knowledge graph for 3D scenes. Secondly, we retrieve the hierarchical token from the reconstructed symbolic knowledge graph for object instances in 3D scenes to build a visual graph, and extract contextual features for nodes and edges using a region-aware graph network. Finally, to bridge the heterogeneous gap between the symbolic knowledge and visual information, we propose a graph reasoning network to correlate 3D spatial visual contents of scenes with textual facts. Conditioned on the learned vision-relevant 3D spatial multimodal knowledge, we incorporate this network into the relationships prediction stage as extra guidance, which can effectively regularize the distribution of possible relationships of object pairs and thus make the predictions less ambiguous." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 552, + 289, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 552, + 289, + 708 + ], + "spans": [ + { + "bbox": [ + 47, + 552, + 289, + 708 + ], + "type": "text", + "content": "Our main contributions are: 1) We are the first to explicitly unify the regular patterns of 3D physical spaces with the deep architecture to facilitate 3D scene graph prediction. 2) We propose a hierarchical symbolic knowledge construction module that exploits extra knowledge as the baseline to admit the hierarchical structure cues of 3D scene. 3) We introduce a knowledge-guided visual context encoding module to construct hierarchical visual graph and learn the contextualized features by a region-aware graph network. 4) We propose a 3D spatial multimodal knowledge accumulation module to regularize the semantic space of relationship prediction. Results show that the learned knowledge and proposed modules consistently boost 3D scene graph prediction performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 305, + 71, + 392, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 71, + 392, + 83 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 392, + 83 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 86, + 547, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 86, + 547, + 264 + ], + "spans": [ + { + "bbox": [ + 304, + 86, + 547, + 264 + ], + "type": "text", + "content": "2D Image-based Scene Graph Generation. Scene graph was first proposed for image retrieval [17], and subsequently received increasing attention in the vision community to produce graphical abstractions of images. Mainstream approaches [5, 36, 42, 44, 45] follow a two-step pipeline that first detects objects followed by classification of the relationship for each object pair. However, research on scene graphs has focused primarily on 2D images, ignoring 3D spatial characteristics such as position and geometry, and with limited spatial coverage. Our proposed method extends 2D scene graphs to 3D spaces, where the scene representation, network architecture and training mechanism all have to be altered in fundamental ways to meet the challenges arising from learning 3D scene structures and relationships. More detailed discussions can be found in the survey [4]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 265, + 548, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 265, + 548, + 503 + ], + "spans": [ + { + "bbox": [ + 304, + 265, + 548, + 503 + ], + "type": "text", + "content": "Knowledge Representation has been extensively studied to incorporate prior knowledge, e.g. DBPedia [2], ConceptNet [35], WordNet [24], VisualGenome [19] and hasPart [3], to aid numerous vision tasks [23]. Gao et al. [12] incorporated commonsense knowledge to learn the internal-external correlations among room and object entities for an agent to take proper decisions at each viewpoint. Zhang et al. [48] addressed the explainability of visual reasoning by introducing the explicit integration of external knowledge. Ding et al. [8] extracted the multimodal knowledge triplet to boost the performance of visual question answering. Chen et al. [6] constructed the prior knowledge of statistical correlations between object pairs and their relationships to address the issue of the uneven distribution over different relationships. Although previous studies have taken notice of knowledge in different vision tasks, they only implicitly mine the extra knowledge base or count the frequency of relationship pairs in datasets to strengthen the iterative message propagation between relationships and objects while ignoring the intrinsic properties of the data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 504, + 548, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 504, + 548, + 708 + ], + "spans": [ + { + "bbox": [ + 304, + 504, + 548, + 708 + ], + "type": "text", + "content": "Scene Graph Prediction in Point Clouds. With the recently proposed 3DSSG datasets containing 3D scene graph annotations [37], the community started to explore semantic relationship prediction in 3D real world data. SGPN [37, 38] is the first work to build a 3D scene graph using both objects and their interrelations as graph nodes. It then performs message propagation using graph convolutional networks. Kimera [30] proposed a 3D dynamic scene graph that captures metric and semantic aspects of a dynamic environment, where nodes represent spatial concepts at different levels of abstraction, and edges represent spatial-temporal relations among the nodes. EdgeGCN [46] exploits multi-dimensional edge features for explicit relationship modeling and explores two associated twinning interaction mechanisms for the independent evolution of scene graph representations. Wu et al. [41] proposed a method to incrementally build semantic scene graphs from a 3D environment given a sequence of" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "9183" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 68, + 547, + 205 + ], + "blocks": [ + { + "bbox": [ + 47, + 68, + 547, + 205 + ], + "lines": [ + { + "bbox": [ + 47, + 68, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 47, + 68, + 547, + 205 + ], + "type": "image", + "image_path": "bdfdc2effd33af1603a1a213bab4bd1bcdf4e8dcf99263018eacc7818e8c103f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 207, + 547, + 242 + ], + "lines": [ + { + "bbox": [ + 45, + 207, + 547, + 242 + ], + "spans": [ + { + "bbox": [ + 45, + 207, + 547, + 242 + ], + "type": "text", + "content": "Figure 2. Method pipeline. (a) A hierarchical symbolic knowledge is firstly reconstructed to exploit external knowledge as the baseline and admit the hierarchical structure cues of 3D scene. (b) We then build a hierarchical visual graph and learn the contextualized features by the region-aware graph network. (c) Finally, a 3D spatial multimodal knowledge is accumulated to strengthen relationship predictions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 244, + 290, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 244, + 290, + 365 + ], + "spans": [ + { + "bbox": [ + 46, + 244, + 290, + 365 + ], + "type": "text", + "content": "RGB-D frames. KISG [47] uses the ground truth relationship triplets in the dataset to extract the prior knowledge and then fuses it in the scene graph prediction stage. One limitation of KISG [47] is that its relevant prior knowledge depends on the text-only dataset label while ignoring hierarchical and indispensable structures in the 3D scene for visual understanding. Our method differentiates itself from these related studies by exploring the 3D implicit structure pattern and introducing 3D spatial multimodal knowledge, which enables our model to predict relationships more accurately." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 372, + 130, + 386 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 372, + 130, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 372, + 130, + 386 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "spans": [ + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": "Problem Formulation: The goal of 3D scene graph generation is to describe a given 3D point cloud scene " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " with a semantic scene graph " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{G} = \\{\\mathcal{V},\\mathcal{R}\\}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " represent instance object nodes and their inner relationship edges respectively. " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " forms a structured representation of the semantic content of the 3D scene. The nodes " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " consist of a set of objects " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "O = \\{o_1,o_2,\\dots ,o_n\\}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " with object " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " assigned to a certain class label " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": ", a corresponding set of bounding boxes " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "B = \\{b_{1},b_{2},\\dots ,b_{n}\\}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "b_{i}\\in \\mathbb{R}^{6}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": ", and a set of relationship edges " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{R} = \\{r_1,r_2,\\dots ,r_n\\}" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " with each " + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 46, + 389, + 290, + 534 + ], + "type": "text", + "content": " represents a predicate between a pair of objects. Our proposed model can be decomposed as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 542, + 287, + 566 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 542, + 287, + 566 + ], + "spans": [ + { + "bbox": [ + 58, + 542, + 287, + 566 + ], + "type": "interline_equation", + "content": "P (\\mathcal {G} | \\mathcal {I}) = P \\left(\\mathcal {K} _ {s} | \\mathcal {I}\\right) P \\left(\\mathcal {G} _ {v} \\mid \\mathcal {K} _ {s}, \\mathcal {I}\\right) P \\left(\\mathcal {R}, \\mathcal {K} _ {m} \\mid \\mathcal {G} _ {v}, \\mathcal {K} _ {s}, \\mathcal {I}\\right) \\tag {1}", + "image_path": "cbef86a0bff1f762529a702a0afac90ebf5283931d02254281f2c5a9fcb23818.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "content": "In this equation, the component " + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "inline_equation", + "content": "P(\\mathcal{K}_s|\\mathcal{I})" + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "content": " collects all the symbolic entities from the datasets, filters the extra knowledge bases, and combines the hierarchical structure patterns of 3D scenes to construct the hierarchical symbolic knowledge " + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_s" + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "content": ". The component " + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "inline_equation", + "content": "P(\\mathcal{G}_v|\\mathcal{K}_s,\\mathcal{I})" + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "content": " builds visual graphs for scenes under the guidance of knowledge " + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_s" + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "content": ", where contextual features for each node are extracted. Conditioned on the knowledge " + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_s" + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "content": " and visual graph " + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "content": ", the component " + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "inline_equation", + "content": "P(\\mathcal{R},\\mathcal{K}_m|\\mathcal{G}_v,\\mathcal{K}_s,\\mathcal{I})" + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "content": " accumulates the 3D spatial multimodal knowledge by correlating the knowledge " + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_s" + }, + { + "bbox": [ + 46, + 567, + 290, + 712 + ], + "type": "text", + "content": " with visual content and predicts relationships simultaneously. Fig. 2 illustrates the overall pipeline of the proposed model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 244, + 545, + 267 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 244, + 545, + 267 + ], + "spans": [ + { + "bbox": [ + 305, + 244, + 545, + 267 + ], + "type": "text", + "content": "3.1. Hierarchical Symbolic Knowledge Initialization" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 271, + 547, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 271, + 547, + 463 + ], + "spans": [ + { + "bbox": [ + 304, + 271, + 547, + 463 + ], + "type": "text", + "content": "Unlike KISG [47], we do not use a closed set or ground truth relationship triplets from labels to learn prior knowledge. Hence, we must make an additional choice of what knowledge sources to use and how to clean them. Prior knowledge of object classes can be reliable predictors of the likelihoods of physical support relationships. For instance, it is unlikely that a cup is supported by a wall while tables are almost always supported by the floor. Therefore, given a set of objects, we can classify each object based on whether it is directly supported by the floor. The result is a three-layer hierarchical structure about objects in the 3D scene. In particular, the first layer only contains the floor since it does not have any support. The second layer contains objects directly supported by the floor, e.g. bed, table, and sofa. The third layer contains the remaining objects usually supported by objects in the second layer, e.g. pillow, cup, and cushion." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 464, + 548, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 464, + 548, + 654 + ], + "spans": [ + { + "bbox": [ + 304, + 464, + 548, + 654 + ], + "type": "text", + "content": "To exploit the regular structure patterns in 3D spaces and construct the scene graph hierarchically, we construct a hierarchical symbolic knowledge graph to guide the 3D spatial knowledge reasoning. Knowledge sources, such as ConceptNet [35] and DBPedia [2], are a valuable tool containing commonsense knowledge about the real world. In this work, we use ConceptNet as our external knowledge base which gives us more spatial relationships and common pairwise objects. While ConceptNet contains very useful information, it also includes some knowledge that is irrelevant to our model. To mitigate this issue, we limit the ConceptNet to common object categories in 3D point cloud scenes. We collect object categories from two widely-used 3D point cloud datasets, SUNRGBD [34] and Scannet [7], and then include edges that only include these objects. After filtering, we have a total of about 5,000 edges and 760 nodes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 654, + 548, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 654, + 548, + 703 + ], + "spans": [ + { + "bbox": [ + 305, + 654, + 548, + 703 + ], + "type": "text", + "content": "We denote the external knowledge graph as " + }, + { + "bbox": [ + 305, + 654, + 548, + 703 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_e = \\{\\mathcal{V}_e, \\mathcal{E}_e\\}" + }, + { + "bbox": [ + 305, + 654, + 548, + 703 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 305, + 654, + 548, + 703 + ], + "type": "inline_equation", + "content": "\\mathcal{V}_e" + }, + { + "bbox": [ + 305, + 654, + 548, + 703 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 654, + 548, + 703 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_e" + }, + { + "bbox": [ + 305, + 654, + 548, + 703 + ], + "type": "text", + "content": " represent nodes and edges respectively. To merge the hierarchical structures in 3D spaces into the external knowledge graph and construct the hierarchical" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9184" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": "symbolic knowledge graph " + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "inline_equation", + "content": "\\kappa_{s}" + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": ", we first use a pre-trained multi-layer perceptron (MLP) to classify the hierarchical tokens for each node in the external knowledge graph to distinguish the discrepancy among different layers of nodes. The hierarchical token of each node denotes its corresponding layer in the hierarchical structure. Each node is then initialized as the concatenation of its trainable hierarchical token and the word2vec (GloVe [26]) representation of the object category. Since the hierarchical structure of 3D spaces is built based on the physical support relationships between objects, we add additional edges representing support relationships between nodes to the external knowledge graph " + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "inline_equation", + "content": "\\kappa_{e}" + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": ". Specifically, we define a new edge type: given two nodes " + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "inline_equation", + "content": "s_j" + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": ", we connect " + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "inline_equation", + "content": "s_j" + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": " using a support edge to represent the physical support relationship between " + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "inline_equation", + "content": "s_j" + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": ". By definition, each node in the hierarchical structure is supported by the node in neighboring layers. Therefore, we add a support edge between two correlated nodes in neighboring layers. Each edge is initialized as the trainable GloVe representation of its edge type. Finally, we formulate the updated external knowledge graph as hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "inline_equation", + "content": "\\kappa_{s}" + }, + { + "bbox": [ + 47, + 72, + 289, + 336 + ], + "type": "text", + "content": ". Additional details can be found in supplementary." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 339, + 277, + 352 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 339, + 277, + 352 + ], + "spans": [ + { + "bbox": [ + 47, + 339, + 277, + 352 + ], + "type": "text", + "content": "3.2. Knowledge-guided Visual Context Encoding" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "spans": [ + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "text", + "content": "As shown in Fig. 2, taking a scene point cloud with object instance annotations as input, we build a hierarchical visual graph " + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v = \\{\\mathcal{V}_v,\\mathcal{E}_v\\}" + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "inline_equation", + "content": "\\nu_{v}" + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_v" + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "text", + "content": " denotes object instances and edges of object pairs respectively, under the guidance of the hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "inline_equation", + "content": "\\kappa_{s}" + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "text", + "content": ". Then, a region-aware graph network is employed to propagate node messages through the visual graph " + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 46, + 354, + 289, + 450 + ], + "type": "text", + "content": " to learn the contextualized feature representation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "spans": [ + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": "Visual graph construction. We use Point Cloud Transformer [13] to extract spatial-aware visual features " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "f_{v}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": " for each object instance. To encode the spatial features " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": " of each bounding box, we use an MLP to lift the parameters of each bounding box (i.e., center and size) to feature space. We assign the semantic features " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "f_{w}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": " for each object using an embedding table initialized by GloVe [26]. Each node in the visual graph is initialized as the concatenation of features " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "f_{v}, f_{t}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "f_{w}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": ". To capture the implicit structure of the point cloud scene, we route each node in the visual graph " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{v}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": " into its corresponding layer according to the hierarchical tokens in hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "\\kappa_{s}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": ". Then, we complete the edge set " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{v}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": " of visual graph " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{v}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": " by extracting potential physical relationships between nodes in the adjacent layers. Specifically, we add an edge representing physical support relationship between node pair in the visual graph " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{v}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": " if a support edge also exists between the corresponding nodes in the hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "inline_equation", + "content": "\\kappa_{s}" + }, + { + "bbox": [ + 47, + 451, + 289, + 701 + ], + "type": "text", + "content": ". Similar to [46], we model the spatial interactions between node pairs and encode the initial edge embedding for node pairs using an MLP." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "content": "Contextualized features encoding. Objects sharing the" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "type": "text", + "content": "same physical support are correlated since they have similar functional role in the environment and are generally in close proximity to each other. For instance, both pillow and clothes are usually supported by a bed. Therefore, we propose a region-aware graph network to jointly highlight the interrelated regions of each node in the visual graph " + }, + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 304, + 72, + 547, + 156 + ], + "type": "text", + "content": " and encode the hierarchical contexts of the input scene." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "spans": [ + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": "Given the initial representations of nodes and edges in the visual graph " + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": ", the region-aware graph network iteratively updates the hidden state " + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_i^{o,t}" + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": " of each node " + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "inline_equation", + "content": "v_i" + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_{ij}^{e,t}" + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": " of each edge " + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "inline_equation", + "content": "(v_i,v_j)" + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": " at each time step " + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": " via message passing. Since the contextual regions around each node in the visual graph " + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": " can be defined as other nodes sharing the same physical support with it, each node first gathers information from nodes within the same contextual region to enrich its current hidden state before propagating messages along the edges in the visual graph " + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": ". Specifically, the enriched hidden state " + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{h}}_i^{o,t}" + }, + { + "bbox": [ + 304, + 157, + 547, + 289 + ], + "type": "text", + "content": " of each node is:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 364, + 298, + 547, + 326 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 298, + 547, + 326 + ], + "spans": [ + { + "bbox": [ + 364, + 298, + 547, + 326 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {h}} _ {i} ^ {o, t} = \\mathbf {h} _ {i} ^ {o, t} + \\sum_ {j \\in N _ {r} (i)} \\psi \\left(\\mathbf {h} _ {j} ^ {o, t}\\right) \\tag {2}", + "image_path": "ce5a77dcd119c5f6dd4bdcf81244a8dc5c78e390c1861a4fe35be206be8a01d4.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "spans": [ + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "inline_equation", + "content": "N_{r}(i)" + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "text", + "content": " contains nodes that share the same level support with node " + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "text", + "content": " is a feed forward network for non-linear transformation. For edge " + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "inline_equation", + "content": "(v_{i},v_{j})" + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "text", + "content": ", its enriched hidden state " + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{h}}_{ij}^{e,t}" + }, + { + "bbox": [ + 304, + 333, + 547, + 384 + ], + "type": "text", + "content": " is computed by:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 327, + 394, + 547, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 394, + 547, + 422 + ], + "spans": [ + { + "bbox": [ + 327, + 394, + 547, + 422 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {h}} _ {i j} ^ {e, t} = \\mathbf {h} _ {i j} ^ {e, t} + \\sum_ {k \\in N _ {r} (i)} \\psi \\left(\\mathbf {h} _ {k} ^ {o, t}\\right) + \\sum_ {s \\in N _ {r} (j)} \\psi \\left(\\mathbf {h} _ {s} ^ {o, t}\\right) \\tag {3}", + "image_path": "2d2c4b562b865ebb8731f0731b4017caeb819255a8d5e9490790beb4152acf5a.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 429, + 545, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 429, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 304, + 429, + 545, + 453 + ], + "type": "text", + "content": "After the feature representation enhancements, the message passing of nodes and edges can be formulated as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 368, + 459, + 547, + 474 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 459, + 547, + 474 + ], + "spans": [ + { + "bbox": [ + 368, + 459, + 547, + 474 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {i} ^ {o, t + 1} = G R U \\left(\\tilde {\\mathbf {h}} _ {i} ^ {o, t}, \\mathbf {m} _ {i} ^ {o, t}\\right) \\tag {4}", + "image_path": "1d7f16f0492138affdfbc6b09dbd1e9f31b489cba2cb99e258ac1a222d411000.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 369, + 491, + 545, + 508 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 491, + 545, + 508 + ], + "spans": [ + { + "bbox": [ + 369, + 491, + 545, + 508 + ], + "type": "interline_equation", + "content": "\\mathbf {h} _ {i j} ^ {e, t + 1} = G R U \\left(\\tilde {\\mathbf {h}} _ {i j} ^ {e, t}, \\mathbf {m} _ {i j} ^ {e, t}\\right) \\tag {5}", + "image_path": "be5139ada07a6ee3540d9b51f069a3d82e580f51462b74326ac4fdabc0322704.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 511, + 547, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 511, + 547, + 548 + ], + "spans": [ + { + "bbox": [ + 304, + 511, + 547, + 548 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 511, + 547, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_i^{o,t}" + }, + { + "bbox": [ + 304, + 511, + 547, + 548 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 511, + 547, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{ij}^{e,t}" + }, + { + "bbox": [ + 304, + 511, + 547, + 548 + ], + "type": "text", + "content": " are the incoming messages for updating each node and edge. The calculation of the message for each node is:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 347, + 554, + 545, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 554, + 545, + 582 + ], + "spans": [ + { + "bbox": [ + 347, + 554, + 545, + 582 + ], + "type": "interline_equation", + "content": "\\mathbf {m} _ {i} ^ {o, t} = \\sum_ {j \\in N _ {v} (i)} \\left(\\varphi_ {n} \\left(\\tilde {\\mathbf {h}} _ {j} ^ {o, t}\\right) + \\varphi_ {e} \\left(\\tilde {\\mathbf {h}} _ {i j} ^ {e, t}\\right)\\right) \\tag {6}", + "image_path": "ec69fb6cfe37081c52c9a71379caff12fea2fe601b1280787e19c0b21d2b5fe8.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "inline_equation", + "content": "\\mathcal{N}_v(i)" + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "text", + "content": " denotes the neighbor nodes of " + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "text", + "content": " in the visual graph " + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "inline_equation", + "content": "\\varphi_{n}" + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "inline_equation", + "content": "\\varphi_{e}" + }, + { + "bbox": [ + 304, + 588, + 545, + 649 + ], + "type": "text", + "content": " are two non-linear transformation for associated nodes and edges. For each edge, we transform the hidden state of subject and object node by two MLPs before fusing them to obtain the message:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 366, + 655, + 545, + 672 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 655, + 545, + 672 + ], + "spans": [ + { + "bbox": [ + 366, + 655, + 545, + 672 + ], + "type": "interline_equation", + "content": "\\mathbf {m} _ {i j} ^ {e, t} = \\varphi_ {s} \\left(\\tilde {\\mathbf {h}} _ {i} ^ {o, t}\\right) + \\varphi_ {o} \\left(\\tilde {\\mathbf {h}} _ {j} ^ {o, t}\\right) \\tag {7}", + "image_path": "c050672d08fcd9677c352b647b3f8cccb8a74d510f037dd2591c0b899a1b65d2.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "content": "We take the final hidden states of nodes and edges as the contextual feature " + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i^o" + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "content": " for each node " + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "inline_equation", + "content": "v_{i}\\in \\mathcal{V}_{v}" + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ij}^{e}" + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "content": " for each edge " + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "inline_equation", + "content": "(v_{i},v_{j})\\in \\mathcal{E}_{v}" + }, + { + "bbox": [ + 304, + 677, + 545, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9185" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 284, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 284, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 284, + 85 + ], + "type": "text", + "content": "3.3. Spatial Multimodal Knowledge Accumulation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 87, + 287, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 87, + 287, + 206 + ], + "spans": [ + { + "bbox": [ + 46, + 87, + 287, + 206 + ], + "type": "text", + "content": "Though our hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 46, + 87, + 287, + 206 + ], + "type": "inline_equation", + "content": "\\kappa_{s}" + }, + { + "bbox": [ + 46, + 87, + 287, + 206 + ], + "type": "text", + "content": " can provide high-quality knowledge about the hierarchical structures of point cloud scene, this information is largely limited to symbolic knowledge that can only be explicitly expressed by text-relevant labels for relationship triplets. Therefore, we propose a novel schema to accumulate 3D spatial multimodal knowledge " + }, + { + "bbox": [ + 46, + 87, + 287, + 206 + ], + "type": "inline_equation", + "content": "\\kappa_{m}" + }, + { + "bbox": [ + 46, + 87, + 287, + 206 + ], + "type": "text", + "content": " progressively from the visual context via a graph reasoning network. We then incorporate the learned multimodal knowledge " + }, + { + "bbox": [ + 46, + 87, + 287, + 206 + ], + "type": "inline_equation", + "content": "\\kappa_{m}" + }, + { + "bbox": [ + 46, + 87, + 287, + 206 + ], + "type": "text", + "content": " and the contextual features to predict the possible relationships." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 207, + 288, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 207, + 288, + 301 + ], + "spans": [ + { + "bbox": [ + 46, + 207, + 288, + 301 + ], + "type": "text", + "content": "Reasoning on knowledge graph. Since the contextual features encode the implicit hierarchical structure patterns in 3D spaces, we design a graph reasoning network which utilizes the visual contextual features and textual facts from the hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 46, + 207, + 288, + 301 + ], + "type": "inline_equation", + "content": "\\kappa_{s}" + }, + { + "bbox": [ + 46, + 207, + 288, + 301 + ], + "type": "text", + "content": " to accumulate 3D spatial multimodal knowledge " + }, + { + "bbox": [ + 46, + 207, + 288, + 301 + ], + "type": "inline_equation", + "content": "\\kappa_{m}" + }, + { + "bbox": [ + 46, + 207, + 288, + 301 + ], + "type": "text", + "content": " by aligning the entities in the symbolic knowledge graph with related visual contextual features." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "spans": [ + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": "The graph reasoning network generates context for 3D spatial multimodal knowledge " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_m" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": ", which is in the form of embeddings that capture the regular structure patterns in 3D scenes for each node and edge in the hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_s" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": ". Given the contextual features of nodes and edges in visual graph " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": ", each node and edge in the graph reasoning network receives three inputs: (1) the trainable node or edge embedding in the hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_s" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": ", (2) a " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "0/1" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": " indicator of whether this node or edge appears in the visual graph " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": ", (3) the contextual feature " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i^o" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ij}^e" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": " in the visual graph " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": " corresponding to this node or edge, missing nodes and edges are padded with zero vectors. The graph reasoning network uses message passing to perform reasoning on hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_s" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": ". Specifically, at each time step " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": ", to calculate the hidden states " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{d}_i^{o,t}" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": " for all nodes " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "s_i \\in \\mathcal{V}_s" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{d}_{ij}^{e,t}" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": " for all edges " + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "inline_equation", + "content": "(s_i, s_j) \\in \\mathcal{E}_s" + }, + { + "bbox": [ + 46, + 302, + 288, + 529 + ], + "type": "text", + "content": ", each node and edge first gather messages from their neighbors through the graph structure then update their hidden states:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 536, + 287, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 536, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 108, + 536, + 287, + 552 + ], + "type": "interline_equation", + "content": "\\mathbf {d} _ {i} ^ {o, t + 1} = G R U \\left(\\mathbf {d} _ {i} ^ {o, t}, \\mathbf {m} _ {i} ^ {o, t}\\right), \\tag {8}", + "image_path": "38a6a2bb574c1e8c2a69cc16a99228950294f188ba633cf63d17002cdb473b4e.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 569, + 287, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 569, + 287, + 586 + ], + "spans": [ + { + "bbox": [ + 108, + 569, + 287, + 586 + ], + "type": "interline_equation", + "content": "\\mathbf {d} _ {i j} ^ {e, t + 1} = G R U \\left(\\mathbf {d} _ {i j} ^ {e, t}, \\mathbf {m} _ {i j} ^ {e, t}\\right), \\tag {9}", + "image_path": "43e619045fe8fb6a95ee130563db6b4abd4b38b174333104da68520d9d98354a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 589, + 287, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 589, + 287, + 615 + ], + "spans": [ + { + "bbox": [ + 47, + 589, + 287, + 615 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 589, + 287, + 615 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_i^{o,t}" + }, + { + "bbox": [ + 47, + 589, + 287, + 615 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 589, + 287, + 615 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_{ij}^{e,t}" + }, + { + "bbox": [ + 47, + 589, + 287, + 615 + ], + "type": "text", + "content": " are the incoming messages for nodes and edges. The incoming message for each node is" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 88, + 622, + 287, + 649 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 622, + 287, + 649 + ], + "spans": [ + { + "bbox": [ + 88, + 622, + 287, + 649 + ], + "type": "interline_equation", + "content": "\\mathbf {m} _ {i} ^ {o, t} = \\sum_ {j \\in N _ {k} (i)} \\left(\\varphi_ {n} \\left(\\mathbf {d} _ {j} ^ {o, t}\\right) + \\varphi_ {e} \\left(\\mathbf {d} _ {i j} ^ {e, t}\\right)\\right), \\tag {10}", + "image_path": "5af8ba6b39a7c45cbc86192de69647c528d4b13718a95207cc42307510a8b0ce.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 656, + 287, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 656, + 287, + 693 + ], + "spans": [ + { + "bbox": [ + 47, + 656, + 287, + 693 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 656, + 287, + 693 + ], + "type": "inline_equation", + "content": "N_{k}(i)" + }, + { + "bbox": [ + 47, + 656, + 287, + 693 + ], + "type": "text", + "content": " denotes the neighbor nodes of node " + }, + { + "bbox": [ + 47, + 656, + 287, + 693 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 47, + 656, + 287, + 693 + ], + "type": "text", + "content": " in the knowledge graph " + }, + { + "bbox": [ + 47, + 656, + 287, + 693 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_s" + }, + { + "bbox": [ + 47, + 656, + 287, + 693 + ], + "type": "text", + "content": ". Similar to Eq. (7), the incoming message for each edge is" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 700, + 287, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 700, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 106, + 700, + 287, + 715 + ], + "type": "interline_equation", + "content": "\\mathbf {m} _ {i j} ^ {e, t} = \\varphi_ {s} \\left(\\mathbf {d} _ {i} ^ {o, t}\\right) + \\varphi_ {o} \\left(\\mathbf {d} _ {j} ^ {o, t}\\right). \\tag {11}", + "image_path": "b0d1b88d233ee2f43c5cc2c1d8456b8e39f0fba375e3b1ed2283f746bbd232f3.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": "We take the sum of the stacked hidden states as the 3D spatial multimodal knowledge embedding " + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "inline_equation", + "content": "\\mathbf{b}_i^o" + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": " for all nodes and " + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "inline_equation", + "content": "\\mathbf{b}_{ij}^{e}" + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": " for all edges in the symbolic knowledge graph " + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "inline_equation", + "content": "\\kappa_{s}" + }, + { + "bbox": [ + 304, + 72, + 545, + 108 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "spans": [ + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "text", + "content": "Knowledge-enabled Scene Graph Prediction. To incorporate the 3D spatial multimodal knowledge " + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_m" + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "text", + "content": " into scene graph inference, we propose fusing the multimodal knowledge embedding with the contextual features in the visual graph to facilitate 3D scene graph prediction. Towards this goal, we utilize an MLP as object detection head to predict confident initial class guesses given the contextual node features. We then select the three most confident multimodal knowledge embeddings for each node. For edges in the visual graph, we select the three most confident object categories for the subject and object node based on the initial guesses. We then retrieve the multimodal knowledge embedding using the predicted subject and object categories. Since the multimodal knowledge embedding and the contextual features are in different feature spaces, we transform them by two MLPs " + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "inline_equation", + "content": "\\varphi_b" + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "inline_equation", + "content": "\\varphi_c" + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "text", + "content": " respectively before fusing them. For each node in the visual graph, we fuse the retrieved multimodal knowledge embedding " + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{b}^k\\}_{k=1,2,3}" + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "text", + "content": " and the contextual node feature " + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i^o" + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "text", + "content": " to obtain the knowledge-enabled contextual feature " + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i^o" + }, + { + "bbox": [ + 304, + 108, + 547, + 350 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 361, + 357, + 545, + 390 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 357, + 545, + 390 + ], + "spans": [ + { + "bbox": [ + 361, + 357, + 545, + 390 + ], + "type": "interline_equation", + "content": "\\mathbf {f} _ {i} ^ {o} = \\phi \\left(\\varphi_ {c} \\left(\\mathbf {c} _ {i} ^ {o}\\right) + \\varphi_ {b} \\left(\\sum_ {k = 1} ^ {3} \\mathbf {b} ^ {k}\\right)\\right). \\tag {12}", + "image_path": "6c3ee0f7cd343b8b4dabeecf15137c241fba265d416109f590053b8b7842d373.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 399, + 545, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 545, + 434 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 545, + 434 + ], + "type": "text", + "content": "For each edge in the visual graph, the multimodal knowledge embedding is fused with its contextual feature in the same way as the node." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "text", + "content": "Equipped with the 3D spatial multimodal knowledge-enabled contextual features " + }, + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i^o" + }, + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "text", + "content": " for nodes and " + }, + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{ij}^{e}" + }, + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "text", + "content": " for edges in the visual graph, we generate the scene graph by decoding the contextual features using a standard graph convolution network (GCN) [18]. We assume that each object pair can have a relationship (including none) and fully connect them as a graph where relationships are represented as edges. Each node is initialized by its contextual node feature " + }, + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i^o" + }, + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "text", + "content": ", and each edge is initialized either by the contextual edge feature " + }, + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{ij}^{e}" + }, + { + "bbox": [ + 304, + 434, + 545, + 649 + ], + "type": "text", + "content": " or the contextual features of its subject and object nodes if the edge is not presented in the visual graph. The last part of the GCN consists of two detection heads for object and relationship classification. The object detection head takes the decoded node features as input to predict the object classification possibilities. The relationship prediction head first fuses the decoded subject and object node features with the decoded edge features, then predicts a discrete distribution over all possible relationship classes." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 650, + 545, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 650, + 545, + 710 + ], + "spans": [ + { + "bbox": [ + 304, + 650, + 545, + 710 + ], + "type": "text", + "content": "Loss Function. We adopt the standard cross entropy loss for object and relationship classification in our model. Since the contextual node feature " + }, + { + "bbox": [ + 304, + 650, + 545, + 710 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i^o" + }, + { + "bbox": [ + 304, + 650, + 545, + 710 + ], + "type": "text", + "content": " is used to predict the initial class guesses, we use a cross entropy loss " + }, + { + "bbox": [ + 304, + 650, + 545, + 710 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{init}^o" + }, + { + "bbox": [ + 304, + 650, + 545, + 710 + ], + "type": "text", + "content": " for the initial detection. For the final prediction, we use two cross entropy" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9186" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 96, + 70, + 500, + 174 + ], + "blocks": [ + { + "bbox": [ + 96, + 70, + 500, + 174 + ], + "lines": [ + { + "bbox": [ + 96, + 70, + 500, + 174 + ], + "spans": [ + { + "bbox": [ + 96, + 70, + 500, + 174 + ], + "type": "table", + "html": "
MethodsPredClsSGClsSGDet
R@50/100mR@50/100R@50/100mR@50/100R@50/100mR@50/100
3D+IMP [42]48.15 / 48.7221.56 / 21.8517.41 / 17.899.06 / 9.2324.54 / 24.5721.71 / 21.72
3D+MOTIFS [45]52.43 / 53.3724.35 / 24.5218.34 / 18.579.74 / 9.8626.58 / 26.5924.12 / 24.17
3D+VCTree [36]53.12 / 54.3824.75 / 24.9119.93 / 20.2410.34 / 10.5527.58 / 27.6224.92 / 24.94
3D+KERN [6]54.74 / 56.5325.21 / 25.8321.41 / 21.7811.02 / 11.3627.75 / 27.7824.03 / 24.05
3D+Schemata [32]58.13 / 59.1142.11 / 42.8328.72 / 28.9726.72 / 27.0528.12 / 28.1325.29 / 25.30
3D+HetH [40]58.24 / 58.7542.53 / 42.7428.83 / 29.0526.68 / 26.8528.17 / 28.1825.31 / 25.32
Ours68.32 / 69.4966.54 / 66.9231.50 / 31.6430.29 / 30.5629.41 / 29.4425.35 / 25.36
", + "image_path": "e80cfd67ae8d81971ea27900b24278d2e23f969be222d50115ec29ab31b8ac10.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 101, + 193, + 492, + 269 + ], + "blocks": [ + { + "bbox": [ + 76, + 176, + 516, + 189 + ], + "lines": [ + { + "bbox": [ + 76, + 176, + 516, + 189 + ], + "spans": [ + { + "bbox": [ + 76, + 176, + 516, + 189 + ], + "type": "text", + "content": "Table 1. Comparison with state-of-the-art 2D scene graph prediction methods re-implemented to work on 3DSSG dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 101, + 193, + 492, + 269 + ], + "lines": [ + { + "bbox": [ + 101, + 193, + 492, + 269 + ], + "spans": [ + { + "bbox": [ + 101, + 193, + 492, + 269 + ], + "type": "table", + "html": "
MethodsPredClsSGClsSGDet
R@50/100mR@50/100R@50/100mR@50/100R@50/100mR@50/100
SGPN [37]57.71 / 58.0538.12 / 38.6728.39 / 28.7422.23 / 22.57- / -- / -
EdgeGCN [46]58.42 / 59.1138.84 / 39.3528.58 / 28.9322.67 / 23.33- / -- / -
KISG [47]64.47 / 64.9363.19 / 63.5229.46 / 29.6528.20 / 28.64- / -- / -
Ours68.32 / 69.4966.54 / 66.9231.50 / 31.6430.29 / 30.5629.41 / 29.4425.35 / 25.36
", + "image_path": "f92ec95e66176bd4201bd39b7cfdd1688b4b6763c74acadaa01bb246b07acf34.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 141, + 270, + 451, + 282 + ], + "lines": [ + { + "bbox": [ + 141, + 270, + 451, + 282 + ], + "spans": [ + { + "bbox": [ + 141, + 270, + 451, + 282 + ], + "type": "text", + "content": "Table 2. Comparison with 3D scene graph prediction methods on the 3DSSG dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 286, + 288, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 286, + 288, + 309 + ], + "spans": [ + { + "bbox": [ + 46, + 286, + 288, + 309 + ], + "type": "text", + "content": "losses " + }, + { + "bbox": [ + 46, + 286, + 288, + 309 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{final}^{o}" + }, + { + "bbox": [ + 46, + 286, + 288, + 309 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 286, + 288, + 309 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{final}^{r}" + }, + { + "bbox": [ + 46, + 286, + 288, + 309 + ], + "type": "text", + "content": " for the object and relationship classification:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 100, + 316, + 288, + 331 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 316, + 288, + 331 + ], + "spans": [ + { + "bbox": [ + 100, + 316, + 288, + 331 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {f i n a l}} = w _ {o} \\mathcal {L} _ {\\text {f i n a l}} ^ {o} + w _ {r} \\mathcal {L} _ {\\text {f i n a l}} ^ {r} \\tag {13}", + "image_path": "b98b47eaf0207943c3011df7ac90f2ecc50f3e4596fb535bb2968c20b4f10c29.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "spans": [ + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "inline_equation", + "content": "w_{o}" + }, + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "inline_equation", + "content": "w_{r}" + }, + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "text", + "content": " are the weights for object and relation loss. In our experiment, we set " + }, + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "inline_equation", + "content": "w_{o}" + }, + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "text", + "content": " to 0.75 and " + }, + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "inline_equation", + "content": "w_{r}" + }, + { + "bbox": [ + 46, + 335, + 288, + 371 + ], + "type": "text", + "content": " to 1. Our final loss function can be formulated as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 125, + 378, + 288, + 392 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 378, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 125, + 378, + 288, + 392 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\text {i n i t}} ^ {o} + \\mathcal {L} _ {\\text {f i n a l}} \\tag {14}", + "image_path": "d9255dc9fafec7e3182ac3be5a7374e061225df752a2d78e475faf8f4ab40708.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 394, + 129, + 408 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 394, + 129, + 408 + ], + "spans": [ + { + "bbox": [ + 47, + 394, + 129, + 408 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 411, + 203, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 411, + 203, + 424 + ], + "spans": [ + { + "bbox": [ + 47, + 411, + 203, + 424 + ], + "type": "text", + "content": "4.1. Experimental Configuration" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 427, + 288, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 427, + 288, + 606 + ], + "spans": [ + { + "bbox": [ + 46, + 427, + 288, + 606 + ], + "type": "text", + "content": "We evaluate our model on 3DSSG dataset [37]. Following [47], we select 160 object categories and 27 relationship classes for detection. We compare our model with others in three standard tasks proposed in [42]. (1) Predicate Classification (PredCls): Given the ground truth 3D bounding boxes and their corresponding semantic labels, our model classifies the relationship between each object pair. (2) Scene Graph Classification (SGCls): Given the ground truth 3D bounding boxes, our model predicts the relationships as well as the object categories jointly. (3) Scene Graph Generation (SGDet): Given the raw point cloud, our model detects 3D objects, their semantic information, as well as their relationships in an end-to-end manner. Following existing 2D and 3D scene graph generation works, we adopt the constrained evaluation metric recall@K (R@K) and mean recall@K (mR@K)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 607, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 607, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 607, + 288, + 715 + ], + "type": "text", + "content": "Our model is implemented in PyTorch, and trained using one NVIDIA GTX TITAN X GPU for 40 epochs with the ADAM optimizer. We use an initial learning rate of 0.0001, weight decay of 0.5, and mini-batch of 4. After 15, 25, and 40 epochs, we multiply the learning rate by 0.1. We adopt VoteNet [27] as the 3D object detection backbone to generate an initial set of 256 object candidates in the SGDet task. The Point Cloud Transformer is pre-trained on the 3DSSG dataset using the same settings in [13]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 285, + 476, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 285, + 476, + 298 + ], + "spans": [ + { + "bbox": [ + 305, + 285, + 476, + 298 + ], + "type": "text", + "content": "4.2. Comparison to State-of-the-Art" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 301, + 547, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 301, + 547, + 433 + ], + "spans": [ + { + "bbox": [ + 304, + 301, + 547, + 433 + ], + "type": "text", + "content": "We first compare our model with the following state-of-art 2D image scene graph generation models, modified to fit the 3DSSG dataset: IMP [42], MOTIFS [45] and VC-Tree [36] which creatively devise various message passing methods for improving graph representations. KERN [6], Schemata [32], and HetH [40] incorporate statistical priors and learning-based commonsense knowledge into the scene graph prediction. Therefore, we include these models to illustrate the superiority of the 3D spatial multimodal knowledge about the implicit hierarchical structure correlations between object pairs in the 3D scene." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "spans": [ + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "text", + "content": "Our results in Tab. 1 lead to a few key observations: (1) Our model consistently outperforms all the existing approaches on all metrics and achieve " + }, + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "inline_equation", + "content": "3.57\\%" + }, + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "text", + "content": " boost on mR@50 in SGCls task and " + }, + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "inline_equation", + "content": "10.08\\%" + }, + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "text", + "content": " boost on R@50 in PredCls task. This indicates that leveraging regular patterns of 3D physical spaces is beneficial for scene graph prediction. (2) Our model outperforms traditional message passing model IMP and MOTIFS. Furthermore, our method achieves considerable improvement when compared to VCTree. (3) Compared to Schemata, our model achieves an improvement of " + }, + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "inline_equation", + "content": "2.78\\%" + }, + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "inline_equation", + "content": "10.19\\%" + }, + { + "bbox": [ + 304, + 434, + 548, + 684 + ], + "type": "text", + "content": " on R@50 in SGCls and PredCls, suggesting that our multimodal knowledge embedding is a better approach compared to the class-level prototypical representations learned from perceptual outputs in Schemata. (4) Compared with KERN and HetH, our proposed hierarchical structure of 3D spaces is superior to the graph structure they adopted to represent the input as our model outperforms them with a significant margin. (5) The performance has been saturated in the SGDet task. This is mainly because object detection performance on this dataset is a bottleneck that limits the performance." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 684, + 547, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 684, + 547, + 708 + ], + "spans": [ + { + "bbox": [ + 305, + 684, + 547, + 708 + ], + "type": "text", + "content": "We also compare the performance of our model with the state-of-the-art 3D point cloud-based scene graph predic" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9187" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 66, + 70, + 267, + 255 + ], + "blocks": [ + { + "bbox": [ + 66, + 70, + 267, + 255 + ], + "lines": [ + { + "bbox": [ + 66, + 70, + 267, + 255 + ], + "spans": [ + { + "bbox": [ + 66, + 70, + 267, + 255 + ], + "type": "table", + "html": "
MethodsR@50/100mR@50/100
Knowledge Ks
w/o Hierarchical Tokens30.47 / 30.6728.94 / 29.19
w/o Support Edge30.55 / 30.7429.17 / 29.47
w/o Both28.41 / 28.4727.13 / 27.52
Visual Context Encoding
Gv replaced w/ Gfc28.17 / 28.3226.28 / 26.29
w/o RaGN26.43 / 26.5724.23 / 24.36
RaGN replaced w/ GCN31.03 / 31.2129.67 / 29.88
Knowledge Km
w/o bjo and bceij26.27 / 26.3522.93 / 23.18
w/o cjo and cceij as input28.14 / 28.3125.05 / 25.31
Ours31.50 / 31.6430.29 / 30.56
", + "image_path": "a3ba697e93c79f947fbd19d8b97f836a4c6fe9331d965fdf1589dd897920098a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 284, + 289, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 284, + 289, + 523 + ], + "spans": [ + { + "bbox": [ + 46, + 284, + 289, + 523 + ], + "type": "text", + "content": "tion models to demonstrate the effectiveness of 3D spatial multimodal knowledge. We include several existing works such as SGPN [37], EdgeGCN [46] and KISG [47] since they all report competitive results. SGPN and EdgeGCN exploit multi-dimensional edge features for explicit relationship modeling whereas KISG learns a group of class-dependent prototypical representations for each semantic class. As shown in Tab. 2, our model dominantly surpasses all methods. Benefiting from the hierarchical structure of 3D spaces, our model is able to reason complex relationship hierarchically and systematically. Compared to SGPN and EdgeGCN, our model improves the R@50 by " + }, + { + "bbox": [ + 46, + 284, + 289, + 523 + ], + "type": "inline_equation", + "content": "2.92\\%" + }, + { + "bbox": [ + 46, + 284, + 289, + 523 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 284, + 289, + 523 + ], + "type": "inline_equation", + "content": "9.90\\%" + }, + { + "bbox": [ + 46, + 284, + 289, + 523 + ], + "type": "text", + "content": " in SGCls and PredCls tasks. We can also see that our method outperforms KISG by " + }, + { + "bbox": [ + 46, + 284, + 289, + 523 + ], + "type": "inline_equation", + "content": "2.04\\%" + }, + { + "bbox": [ + 46, + 284, + 289, + 523 + ], + "type": "text", + "content": " on R@50 in SGCls. KISG captures class-related priors in the scene from text-only ground truth labels. Such knowledge cannot efficiently represent diverse relationships and complex 3D environments. In contrast, our model extracts indispensable 3D spatial multimodal knowledge which benefits the scene graph prediction." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 528, + 141, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 528, + 141, + 541 + ], + "spans": [ + { + "bbox": [ + 47, + 528, + 141, + 541 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 544, + 287, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 578 + ], + "type": "text", + "content": "We only report the performance results in the Recall and mean Recall metrics on the SGCs task for ablation studies. The results are shown in Tab. 3." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 579, + 288, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 579, + 288, + 686 + ], + "spans": [ + { + "bbox": [ + 46, + 579, + 288, + 686 + ], + "type": "text", + "content": "Hierarchical symbolic knowledge. We first look at the hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 46, + 579, + 288, + 686 + ], + "type": "inline_equation", + "content": "\\kappa_{s}" + }, + { + "bbox": [ + 46, + 579, + 288, + 686 + ], + "type": "text", + "content": " to investigate its effectiveness. Specifically, we find that using ConceptNet without classifying the hierarchical tokens or adding support edges leads to sub-optimal performance. Furthermore, using ConceptNet without any augmentation drops the performance significantly, indicating that both the hierarchical tokens and support edges are crucial elements of the hierarchical structures in 3D scene." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 687, + 288, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 687, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 687, + 288, + 712 + ], + "type": "text", + "content": "Knowledge-guided visual context encoding. Next, we analyse the knowledge-guided visual context encoding mod" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 326, + 71, + 525, + 135 + ], + "blocks": [ + { + "bbox": [ + 47, + 258, + 287, + 280 + ], + "lines": [ + { + "bbox": [ + 47, + 258, + 287, + 280 + ], + "spans": [ + { + "bbox": [ + 47, + 258, + 287, + 280 + ], + "type": "text", + "content": "Table 3. Quantitative results of different module configurations on the SGCls task." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 326, + 71, + 525, + 135 + ], + "lines": [ + { + "bbox": [ + 326, + 71, + 525, + 135 + ], + "spans": [ + { + "bbox": [ + 326, + 71, + 525, + 135 + ], + "type": "table", + "html": "
VariantsPredCISSGCIS
R@50mR@50R@50mR@50
Gr62.7458.2528.1727.28
Gt68.4166.5931.5930.35
Gv(original)68.3266.5431.5030.29
", + "image_path": "3e9b7d32cf9049d67971221692f00c3d6bc227f543ee94fd2c83bf247e003f8f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 348, + 156, + 503, + 220 + ], + "blocks": [ + { + "bbox": [ + 314, + 138, + 536, + 149 + ], + "lines": [ + { + "bbox": [ + 314, + 138, + 536, + 149 + ], + "spans": [ + { + "bbox": [ + 314, + 138, + 536, + 149 + ], + "type": "text", + "content": "Table 4. Comparison of different variants of the visual graph." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 348, + 156, + 503, + 220 + ], + "lines": [ + { + "bbox": [ + 348, + 156, + 503, + 220 + ], + "spans": [ + { + "bbox": [ + 348, + 156, + 503, + 220 + ], + "type": "table", + "html": "
MethodsHeadBodyTail
SGPN [37]39.4223.6413.03
EdgeGCN [46]39.5123.8513.15
KISG [37]40.3624.5613.61
Ours44.2326.2714.73
", + "image_path": "e017f8e38feb4e81ee83d1ba928d1da22712e7a7007b78b95a6a95a9ff0f3f13.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 223, + 545, + 245 + ], + "lines": [ + { + "bbox": [ + 305, + 223, + 545, + 245 + ], + "spans": [ + { + "bbox": [ + 305, + 223, + 545, + 245 + ], + "type": "text", + "content": "Table 5. The R@50 metric of biased relationship prediction on the SGCIs task." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 250, + 547, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 250, + 547, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 250, + 547, + 380 + ], + "type": "text", + "content": "ule. We can see that replacing the hierarchical visual graph " + }, + { + "bbox": [ + 304, + 250, + 547, + 380 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 304, + 250, + 547, + 380 + ], + "type": "text", + "content": " with a fully-connected graph " + }, + { + "bbox": [ + 304, + 250, + 547, + 380 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{fc}" + }, + { + "bbox": [ + 304, + 250, + 547, + 380 + ], + "type": "text", + "content": " decreases the performance by a margin of " + }, + { + "bbox": [ + 304, + 250, + 547, + 380 + ], + "type": "inline_equation", + "content": "3.33\\%" + }, + { + "bbox": [ + 304, + 250, + 547, + 380 + ], + "type": "text", + "content": " on R@50, indicating that the hierarchical structure is superior to a plain fully-connected graph in terms of modeling context. Furthermore, removing the subsequent region-aware graph network (RaGN) and directly fusing the multimodal knowledge embedding with the initial representation of each node and edge in the visual graph negatively impacts the performance on all metrics. Replacing the region-aware graph network with a standard graph convolution network also hurts the performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "spans": [ + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": "3D spatial multimodal knowledge accumulation. Lastly, we examine the accumulated multimodal knowledge " + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_m" + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": " to learn about how " + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_m" + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": " and rest of the model interact. We first see how much of the improvement comes from the 3D spatial multimodal knowledge " + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_m" + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": ". As shown in Tab. 3, the multimodal knowledge embedding significantly improves the R@50 and mR@50 by " + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "inline_equation", + "content": "5.23\\%" + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "inline_equation", + "content": "7.36\\%" + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": " respectively. In addition, dropping the contextual feature input " + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i^o" + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": " for nodes and " + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_{ij}^{e}" + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": " for edges in the graph reasoning network decreases the performance by a margin of " + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "inline_equation", + "content": "3.36\\%" + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "inline_equation", + "content": "5.24\\%" + }, + { + "bbox": [ + 304, + 381, + 547, + 548 + ], + "type": "text", + "content": " on R@50 and mR@50 in SGCls. This drop in performance indicates that the contextual feature plays a pivotal role in bridging the heterogeneous gap between the symbolic knowledge and visual information." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 553, + 409, + 566 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 553, + 409, + 566 + ], + "spans": [ + { + "bbox": [ + 306, + 553, + 409, + 566 + ], + "type": "text", + "content": "4.4. Further Analysis" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "content": "Analysis on the hierarchical structure of 3D spaces. To validate the potential of the hierarchical visual graph " + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "content": " in capturing the inherent hierarchical structure of a 3D scene, we design two visual graph variants and compare them to the hierarchical visual " + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "content": ": (1) Instead of using the hierarchical symbolic knowledge graph " + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "inline_equation", + "content": "\\kappa_s" + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "content": ", we build a ground truth graph " + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_t" + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "content": " based on the ground truth labels for support relations. In particular, each edge in " + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_t" + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "content": " represents the ground truth support relationship of the input scene. (2) We also design a randomly connected graph " + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_r" + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "content": ", where we keep all of the nodes the same but randomize the edges that connect them. As shown in Tab. 4, both " + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_t" + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "text", + "content": " outperform " + }, + { + "bbox": [ + 304, + 569, + 547, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_r" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9188" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 70, + 265, + 180 + ], + "blocks": [ + { + "bbox": [ + 70, + 70, + 265, + 180 + ], + "lines": [ + { + "bbox": [ + 70, + 70, + 265, + 180 + ], + "spans": [ + { + "bbox": [ + 70, + 70, + 265, + 180 + ], + "type": "image", + "image_path": "91d8452bce554c9b263e50fd3cec6a362c62fd0729918728c32ceb9937a8def1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 181, + 287, + 203 + ], + "lines": [ + { + "bbox": [ + 46, + 181, + 287, + 203 + ], + "spans": [ + { + "bbox": [ + 46, + 181, + 287, + 203 + ], + "type": "text", + "content": "Figure 3. Comparison of our model and KISG on the SGCs task when trained with noisy labels." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "spans": [ + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "text", + "content": "with a significant margin on all metrics. More importantly, we observe that " + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_t" + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "text", + "content": " perform mostly similar while " + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_t" + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "text", + "content": " slightly outperforms " + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "text", + "content": ". The results confirm that the hierarchical visual graph " + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 46, + 206, + 289, + 266 + ], + "type": "text", + "content": " is one of the more optimal ways of extracting the hierarchical structure patterns of 3D spaces." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 266, + 290, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 290, + 504 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 290, + 504 + ], + "type": "text", + "content": "Robustness of 3D spatial multimodal knowledge. Additionally, we investigate the robustness of the 3D spatial multimodal knowledge " + }, + { + "bbox": [ + 46, + 266, + 290, + 504 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_m" + }, + { + "bbox": [ + 46, + 266, + 290, + 504 + ], + "type": "text", + "content": " by training our model with noisy labels. Specifically, we add different proportions of noises into the 3DSSG training set by replacing part of ground truth relationships with the randomly selected wrong relationships for input scenes. The performance of our model and KISG [47] on the SGCs task is reported in Fig. 3. We can see that, the performance of KISG decreases drastically while ours decreases slowly with increasing noise rate. Under the " + }, + { + "bbox": [ + 46, + 266, + 290, + 504 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 46, + 266, + 290, + 504 + ], + "type": "text", + "content": " noise rate condition, our model improves the R@50 metric by about " + }, + { + "bbox": [ + 46, + 266, + 290, + 504 + ], + "type": "inline_equation", + "content": "6.89\\%" + }, + { + "bbox": [ + 46, + 266, + 290, + 504 + ], + "type": "text", + "content": " over KISG, which indicates that our model achieves improved robustness over KISG. The main reason is that KISG captures relevant prior knowledge from text-only ground truth labels and noises contained in the labels are easily included in their knowledge base and affects the prediction of relationships. Different with KISG, our model leverages the inherently hierarchical structures of 3D scenes and accumulates multimodal knowledge which is both label free and reliable." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 506, + 290, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 290, + 673 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 290, + 673 + ], + "type": "text", + "content": "Long-tail analysis. We also investigate how our model performs on the long-tail part of the dataset. To do this, we order all the relationships based on the frequency of each relationship category occurring in triplets. We select the 5 most common relationship categories as the head, the 5 least common relationship categories as the tail, and the rest of the categories as the body. Tab. 5 reports the R@50 metric on each long-tail category groups of our model. Moreover, our model achieves best performance when evaluating the R@50 metric on the tail relationship categories, which shows that our model has the ability to mitigate the effect of sample imbalance. The main reason is that the hierarchical structures can be extracted accurately which influence other relationships in the prediction process." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 673, + 160, + 686 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 673, + 160, + 686 + ], + "spans": [ + { + "bbox": [ + 47, + 673, + 160, + 686 + ], + "type": "text", + "content": "4.5. Qualitative Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "We visualize intermediate results in Fig. 4(a-c). We can see that both the hierarchical visual graph " + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_v" + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": " and 3D scene" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 377, + 70, + 460, + 126 + ], + "blocks": [ + { + "bbox": [ + 377, + 70, + 460, + 126 + ], + "lines": [ + { + "bbox": [ + 377, + 70, + 460, + 126 + ], + "spans": [ + { + "bbox": [ + 377, + 70, + 460, + 126 + ], + "type": "image", + "image_path": "f391d0b44e1c763cc3b12171ec320e87da05b2d1452fa24e2ed50d9464e6783f.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 131, + 415, + 244 + ], + "blocks": [ + { + "bbox": [ + 395, + 126, + 439, + 133 + ], + "lines": [ + { + "bbox": [ + 395, + 126, + 439, + 133 + ], + "spans": [ + { + "bbox": [ + 395, + 126, + 439, + 133 + ], + "type": "text", + "content": "(a) Input scene" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 131, + 415, + 244 + ], + "lines": [ + { + "bbox": [ + 307, + 131, + 415, + 244 + ], + "spans": [ + { + "bbox": [ + 307, + 131, + 415, + 244 + ], + "type": "image", + "image_path": "e53a70bc74cbd4d472bf57013b28d150ff1f3b03de0c8bc31ac7df70d5eadc8c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 247, + 410, + 256 + ], + "lines": [ + { + "bbox": [ + 325, + 247, + 410, + 256 + ], + "spans": [ + { + "bbox": [ + 325, + 247, + 410, + 256 + ], + "type": "text", + "content": "(b) Hierarchical visual graph" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 415, + 131, + 544, + 246 + ], + "blocks": [ + { + "bbox": [ + 415, + 131, + 544, + 246 + ], + "lines": [ + { + "bbox": [ + 415, + 131, + 544, + 246 + ], + "spans": [ + { + "bbox": [ + 415, + 131, + 544, + 246 + ], + "type": "image", + "image_path": "0452d5a6262ed068e721491e14b7f1c5bec28cc63e4aa23e55f642c389483f70.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 451, + 247, + 505, + 256 + ], + "lines": [ + { + "bbox": [ + 451, + 247, + 505, + 256 + ], + "spans": [ + { + "bbox": [ + 451, + 247, + 505, + 256 + ], + "type": "text", + "content": "(c) 3D scene graph" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 259, + 547, + 282 + ], + "lines": [ + { + "bbox": [ + 305, + 259, + 547, + 282 + ], + "spans": [ + { + "bbox": [ + 305, + 259, + 547, + 282 + ], + "type": "text", + "content": "Figure 4. Visualizations of our predicted scene graph on 3DSSG dataset. Red indicates the misclassified objects or relationships." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 286, + 547, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 286, + 547, + 370 + ], + "spans": [ + { + "bbox": [ + 304, + 286, + 547, + 370 + ], + "type": "text", + "content": "graph " + }, + { + "bbox": [ + 304, + 286, + 547, + 370 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 304, + 286, + 547, + 370 + ], + "type": "text", + "content": " are well constructed. However, our model incorrectly classifies the relationship between Window1 and Floor. This is mainly because our model fails to extract discriminative features for Window1 as there are few points within its bounding box. The token of Window1 is classified incorrectly in the second layer while it should be in the third layer. We provide more visualization samples in the supplementary." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 375, + 379, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 375, + 379, + 387 + ], + "spans": [ + { + "bbox": [ + 306, + 375, + 379, + 387 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 392, + 548, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 392, + 548, + 608 + ], + "spans": [ + { + "bbox": [ + 304, + 392, + 548, + 608 + ], + "type": "text", + "content": "We proposed a method for 3D scene graph prediction from raw point clouds. Our method explores the regular patterns of 3D physical spaces into the deep network to facilitate 3D scene graph prediction. Hierarchical symbolic knowledge is first reconstructed via exploiting external knowledge as the baseline to admit the hierarchical structure cues of a 3D scene. A knowledge-guided visual context encoding module then builds a hierarchical visual graph and learns the contextualized features by a region-aware graph network. Finally, a 3D spatial multimodal knowledge accumulation module is proposed to regularize the semantic space of relationship prediction. Extensive experiments on the 3DSSG dataset show that our method outperforms existing state-of-the-art and can mitigate the effect of data imbalance and label noises. In the future, we plan to exploit the attributes of 3D objects to build richer knowledge graphs to improve the prediction performances of attribute-focused relationships, such as same symmetric as and same texture as." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 613, + 416, + 626 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 613, + 416, + 626 + ], + "spans": [ + { + "bbox": [ + 306, + 613, + 416, + 626 + ], + "type": "text", + "content": "6. Acknowledgments" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 630, + 547, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 547, + 712 + ], + "type": "text", + "content": "This work was supported in part by the National Natural Science Foundation of China under Grant 62003253, Grant 61973106, Grant U2013203, Grant U21A20482 and Grant U20A20185. Professor Ajmal Mian is the recipient of an Australian Research Council Future Fellowship Award (project number FT210100268) funded by the Australian Government." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "9189" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 705 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 288, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 288, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 288, + 145 + ], + "type": "text", + "content": "[1] Iro Armeni, Zhi-Yang He, JunYoung Gwak, Amir R Zamir, Martin Fischer, Jitendra Malik, and Silvio Savarese. 3d scene graph: A structure for unified semantics, 3d space, and camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5664-5673, 2019. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 147, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 147, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 147, + 288, + 190 + ], + "type": "text", + "content": "[2] Soren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. Dbpedia: A nucleus for a web of open data. In The semantic web, pages 722-735. 2007. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 192, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 192, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 192, + 288, + 235 + ], + "type": "text", + "content": "[3] Sumithra Bhakthavatsalam, Kyle Richardson, Niket Tandon, and Peter Clark. Do dogs have whiskers? a new knowledge base of haspart relations. arXiv preprint arXiv:2006.07510, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 237, + 288, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 237, + 288, + 290 + ], + "spans": [ + { + "bbox": [ + 53, + 237, + 288, + 290 + ], + "type": "text", + "content": "[4] Xiaojun Chang, Pengzhen Ren, Pengfei Xu, Zhihui Li, Xiaojiang Chen, and Alexander G Hauptmann. A comprehensive survey of scene graphs: Generation and application. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 293, + 288, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 293, + 288, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 293, + 288, + 346 + ], + "type": "text", + "content": "[5] Long Chen, Hanwang Zhang, Jun Xiao, Xiangnan He, Shiliang Pu, and Shih-Fu Chang. Counterfactual critic multi-agent training for scene graph generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4613-4623, 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 348, + 288, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 348, + 288, + 402 + ], + "spans": [ + { + "bbox": [ + 53, + 348, + 288, + 402 + ], + "type": "text", + "content": "[6] Tianshui Chen, Weihao Yu, Riquan Chen, and Liang Lin. Knowledge-embedded routing network for scene graph generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6163–6171, 2019. 2, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 404, + 288, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 404, + 288, + 459 + ], + "spans": [ + { + "bbox": [ + 53, + 404, + 288, + 459 + ], + "type": "text", + "content": "[7] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5828-5839, 2017. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 460, + 288, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 460, + 288, + 514 + ], + "spans": [ + { + "bbox": [ + 53, + 460, + 288, + 514 + ], + "type": "text", + "content": "[8] Yang Ding, Jing Yu, Bang Liu, Yue Hu, Mingxin Cui, and Qi Wu. Mukea: Multimodal knowledge extraction and accumulation for knowledge-based visual question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5089-5098, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 516, + 288, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 516, + 288, + 559 + ], + "spans": [ + { + "bbox": [ + 53, + 516, + 288, + 559 + ], + "type": "text", + "content": "[9] Zijin Du, Hailiang Ye, and Feilong Cao. A novel local-global graph convolutional method for point cloud semantic segmentation. IEEE Transactions on Neural Networks and Learning Systems, 2022. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 561, + 288, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 561, + 288, + 604 + ], + "spans": [ + { + "bbox": [ + 48, + 561, + 288, + 604 + ], + "type": "text", + "content": "[10] Mingtao Feng, Syed Zulqarnain Gilani, Yaonan Wang, Liang Zhang, and Ajmal Mian. Relation graph network for 3d object detection in point clouds. IEEE Transactions on Image Processing, 30:92-107, 2020. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 605, + 288, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 605, + 288, + 649 + ], + "spans": [ + { + "bbox": [ + 48, + 605, + 288, + 649 + ], + "type": "text", + "content": "[11] Mingtao Feng, Liang Zhang, Xuefei Lin, Syed Zulqarnain Gilani, and Ajmal Mian. Point attention network for semantic segmentation of 3d point clouds. Pattern Recognition, 107:107446, 2020. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 650, + 288, + 705 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 650, + 288, + 705 + ], + "spans": [ + { + "bbox": [ + 48, + 650, + 288, + 705 + ], + "type": "text", + "content": "[12] Chen Gao, Jinyu Chen, Si Liu, Luting Wang, Qiong Zhang, and Qi Wu. Room-and-object aware knowledge reasoning for remote embodied referring expression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3064–3073, 2021. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "text", + "content": "[13] Meng-Hao Guo, Jun-Xiong Cai, Zheng-Ning Liu, Tai-Jiang Mu, Ralph R Martin, and Shi-Min Hu. Pct: Point cloud transformer. Computational Visual Media, 7(2):187-199, 2021. 4, 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 118, + 547, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 547, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 547, + 139 + ], + "type": "text", + "content": "[14] Alon Hafri and Chaz Firestone. The perception of relations. Trends in Cognitive Sciences, 2021. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 140, + 547, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 547, + 205 + ], + "type": "text", + "content": "[15] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11108-11117, 2020. 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 206, + 546, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 206, + 546, + 249 + ], + "spans": [ + { + "bbox": [ + 308, + 206, + 546, + 249 + ], + "type": "text", + "content": "[16] Maximilian Jaritz, Tuan-Hung Vu, Raoul De Charette, Emilie Wirbel, and Patrick Pérez. Cross-modal learning for domain adaptation in 3d semantic segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 251, + 546, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 251, + 546, + 304 + ], + "spans": [ + { + "bbox": [ + 308, + 251, + 546, + 304 + ], + "type": "text", + "content": "[17] Justin Johnson, Ranjay Krishna, Michael Stark, Li-Jia Li, David Shamma, Michael Bernstein, and Li Fei-Fei. Image retrieval using scene graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3668–3678, 2015. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 305, + 546, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 305, + 546, + 337 + ], + "spans": [ + { + "bbox": [ + 308, + 305, + 546, + 337 + ], + "type": "text", + "content": "[18] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 338, + 547, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 338, + 547, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 547, + 403 + ], + "type": "text", + "content": "[19] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International journal of computer vision, 123(1):32-73, 2017. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 404, + 547, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 404, + 547, + 458 + ], + "spans": [ + { + "bbox": [ + 308, + 404, + 547, + 458 + ], + "type": "text", + "content": "[20] Manyi Li, Akshay Gadi Patil, Kai Xu, Siddhartha Chaudhuri, Owais Khan, Ariel Shamir, Changhe Tu, Baoquan Chen, Daniel Cohen-Or, and Hao Zhang. Grains: Generative recursive autoencoders for indoor scenes. ACM Transactions on Graphics (TOG), 38(2):1-16, 2019. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 460, + 547, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 460, + 547, + 525 + ], + "spans": [ + { + "bbox": [ + 308, + 460, + 547, + 525 + ], + "type": "text", + "content": "[21] Mengtian Li, Yuan Xie, Yunhang Shen, Bo Ke, Ruizhi Qiao, Bo Ren, Shaohui Lin, and Lizhuang Ma. Hybridcr: Weakly-supervised 3d point cloud semantic segmentation via hybrid contrastive regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14930-14939, 2022. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 526, + 547, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 526, + 547, + 569 + ], + "spans": [ + { + "bbox": [ + 308, + 526, + 547, + 569 + ], + "type": "text", + "content": "[22] Ze Liu, Zheng Zhang, Yue Cao, Han Hu, and Xin Tong. Group-free 3d object detection via transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2949-2958, 2021. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 571, + 547, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 571, + 547, + 624 + ], + "spans": [ + { + "bbox": [ + 308, + 571, + 547, + 624 + ], + "type": "text", + "content": "[23] Kenneth Marino, Xinlei Chen, Devi Parikh, Abhinav Gupta, and Marcus Rohrbach. Krisp: Integrating implicit and symbolic knowledge for open-domain knowledge-based vqa. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14111-14121, 2021. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 625, + 547, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 625, + 547, + 646 + ], + "spans": [ + { + "bbox": [ + 308, + 625, + 547, + 646 + ], + "type": "text", + "content": "[24] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 647, + 546, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 546, + 690 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 546, + 690 + ], + "type": "text", + "content": "[25] Xuran Pan, Zhuofan Xia, Shiji Song, Li Erran Li, and Gao Huang. 3d object detection with pointformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7463-7472, 2021. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 692, + 547, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 692, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 692, + 547, + 713 + ], + "type": "text", + "content": "[26] Jeffrey Pennington, Richard Socher, and Christopher D Manning. Glove: Global vectors for word representation. In" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "9190" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 696 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 66, + 72, + 288, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 288, + 105 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 288, + 105 + ], + "type": "text", + "content": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543, 2014. 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 288, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 288, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 288, + 150 + ], + "type": "text", + "content": "[27] Charles R Qi, Or Litany, Kaiming He, and Leonidas J Guibas. Deep hough voting for 3d object detection in point clouds. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9277-9286, 2019. 1, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 288, + 205 + ], + "type": "text", + "content": "[28] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 208, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 288, + 262 + ], + "type": "text", + "content": "[29] Charles R Qi, Li Yi, Hao Su, and Leonidas J Guibas. Point-net++ deep hierarchical feature learning on point sets in a metric space. In Proceedings of the 31st International Conference on Neural Information Processing Systems, pages 5105-5114, 2017. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 263, + 288, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 263, + 288, + 318 + ], + "spans": [ + { + "bbox": [ + 48, + 263, + 288, + 318 + ], + "type": "text", + "content": "[30] Antoni Rosinol, Andrew Violette, Marcus Abate, Nathan Hughes, Yun Chang, Jingnan Shi, Arjun Gupta, and Luca Carlone. Kimera: From slam to spatial perception with 3d dynamic scene graphs. The International Journal of Robotics Research, 40(12-14):1510-1546, 2021. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 319, + 288, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 319, + 288, + 351 + ], + "spans": [ + { + "bbox": [ + 48, + 319, + 288, + 351 + ], + "type": "text", + "content": "[31] Morteza Sarafyazd and Mehrdad Jazayeri. Hierarchical reasoning by neural circuits in the frontal cortex. Science, 364(6441), 2019. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 353, + 288, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 353, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 48, + 353, + 288, + 407 + ], + "type": "text", + "content": "[32] Sahand Sharifzadeh, Sina Moayed Baharlou, and Volker Tresp. Classification by attention: Scene graph classification with prior knowledge. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 5025-5033, 2021. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 409, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 409, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 409, + 288, + 453 + ], + "type": "text", + "content": "[33] Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In European conference on computer vision, pages 746-760. Springer, 2012. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 454, + 288, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 454, + 288, + 497 + ], + "spans": [ + { + "bbox": [ + 48, + 454, + 288, + 497 + ], + "type": "text", + "content": "[34] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 567-576, 2015. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 498, + 288, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 498, + 288, + 541 + ], + "spans": [ + { + "bbox": [ + 48, + 498, + 288, + 541 + ], + "type": "text", + "content": "[35] Robyn Speer, Joshua Chin, and Catherine Havasi. Conceptnet 5.5: An open multilingual graph of general knowledge. In Thirty-first AAAI conference on artificial intelligence, 2017. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 544, + 288, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 288, + 597 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 288, + 597 + ], + "type": "text", + "content": "[36] Kaihua Tang, Hanwang Zhang, Baoyuan Wu, Wenhan Luo, and Wei Liu. Learning to compose dynamic tree structures for visual contexts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6619-6628, 2019. 2, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 599, + 288, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 599, + 288, + 653 + ], + "spans": [ + { + "bbox": [ + 48, + 599, + 288, + 653 + ], + "type": "text", + "content": "[37] Johanna Wald, Helisa Dhamo, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs from 3d indoor reconstructions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3961-3970, 2020. 1, 2, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 655, + 288, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 655, + 288, + 696 + ], + "spans": [ + { + "bbox": [ + 48, + 655, + 288, + 696 + ], + "type": "text", + "content": "[38] Johanna Wald, Nassir Navab, and Federico Tombari. Learning 3d semantic scene graphs with instance embeddings. International Journal of Computer Vision, pages 1-22, 2022. 1, 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 609 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 126 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 126 + ], + "type": "text", + "content": "[39] Kai Wang, Yu-An Lin, Ben Weissmann, Manolis Savva, Angel X Chang, and Daniel Ritchie. Planit: Planning and instantiating indoor scenes with relation graph and spatial prior networks. ACM Transactions on Graphics (TOG), 38(4):1-15, 2019. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 129, + 547, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 547, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 547, + 173 + ], + "type": "text", + "content": "[40] Wenbin Wang, Ruiping Wang, Shiguang Shan, and Xilin Chen. Sketching image gist: Human-mimetic hierarchical scene graph generation. In European Conference on Computer Vision, pages 222-239. Springer, 2020. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 175, + 547, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 547, + 237 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 547, + 237 + ], + "type": "text", + "content": "[41] Shun-Cheng Wu, Johanna Wald, Keisuke Tateno, Nassir Navab, and Federico Tombari. Scenegraphfusion: Incremental 3d scene graph prediction from rgb-d sequences. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7515-7525, 2021. 1, 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 240, + 547, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 240, + 547, + 284 + ], + "spans": [ + { + "bbox": [ + 307, + 240, + 547, + 284 + ], + "type": "text", + "content": "[42] Danfei Xu, Yuke Zhu, Christopher B Choy, and Li Fei-Fei. Scene graph generation by iterative message passing. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5410–5419, 2017. 2, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 285, + 547, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 285, + 547, + 328 + ], + "spans": [ + { + "bbox": [ + 307, + 285, + 547, + 328 + ], + "type": "text", + "content": "[43] Qiangeng Xu, Yiqi Zhong, and Ulrich Neumann. Behind the curtain: Learning occluded shapes for 3d object detection. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2893-2901, 2022. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 331, + 547, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 331, + 547, + 373 + ], + "spans": [ + { + "bbox": [ + 307, + 331, + 547, + 373 + ], + "type": "text", + "content": "[44] Jianwei Yang, Jiasen Lu, Stefan Lee, Dhruv Batra, and Devi Parikh. Graph r-cnn for scene graph generation. In Proceedings of the European conference on computer vision (ECCV), pages 670-685, 2018. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 375, + 547, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 375, + 547, + 418 + ], + "spans": [ + { + "bbox": [ + 307, + 375, + 547, + 418 + ], + "type": "text", + "content": "[45] Rowan Zellers, Mark Yatskar, Sam Thomson, and Yejin Choi. Neural motifs: Scene graph parsing with global context. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5831-5840, 2018. 2, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 420, + 547, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 420, + 547, + 473 + ], + "spans": [ + { + "bbox": [ + 307, + 420, + 547, + 473 + ], + "type": "text", + "content": "[46] Chaoyi Zhang, Jianhui Yu, Yang Song, and Weidong Cai. Exploiting edge-oriented reasoning for 3d point-based scene graph analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9705-9715, 2021. 1, 2, 4, 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 475, + 547, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 475, + 547, + 518 + ], + "spans": [ + { + "bbox": [ + 307, + 475, + 547, + 518 + ], + "type": "text", + "content": "[47] Shoulong Zhang, Aimin Hao, Hong Qin, et al. Knowledge-inspired 3d scene graph prediction in point cloud. Advances in Neural Information Processing Systems, 34, 2021. 2, 3, 6, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 520, + 547, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 520, + 547, + 564 + ], + "spans": [ + { + "bbox": [ + 307, + 520, + 547, + 564 + ], + "type": "text", + "content": "[48] Yifeng Zhang, Ming Jiang, and Qi Zhao. Explicit knowledge incorporation for visual reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1356-1365, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 566, + 547, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 566, + 547, + 609 + ], + "spans": [ + { + "bbox": [ + 307, + 566, + 547, + 609 + ], + "type": "text", + "content": "[49] Na Zhao, Tat-Seng Chua, and Gim Hee Lee. Few-shot 3d point cloud semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8873-8882, 2021. 1" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "9191" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Video Loops From Asynchronous Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_content_list.json b/2023/3D Video Loops From Asynchronous Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..84833e5d6bfc3c465a976718375bd015176b24de --- /dev/null +++ b/2023/3D Video Loops From Asynchronous Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_content_list.json @@ -0,0 +1,1683 @@ +[ + { + "type": "text", + "text": "3D Video Loops from Asynchronous Input", + "text_level": 1, + "bbox": [ + 269, + 130, + 700, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Li Ma $^{1}$ Xiaoyu Li $^{2}$ Jing Liao $^{3}$ Pedro V. Sander $^{1}$", + "bbox": [ + 233, + 179, + 730, + 200 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1The Hong Kong University of Science and Technology", + "bbox": [ + 263, + 204, + 705, + 222 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Tencent AI Lab $^{3}$ City University of Hong Kong", + "bbox": [ + 272, + 223, + 696, + 241 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/424e31edcd5e4876067e0ffeb5cfc2dc81f2b55db2c708dfe3f76429a986a527.jpg", + "image_caption": [ + "(a) Reconstructed 3D Video Representation" + ], + "image_footnote": [], + "bbox": [ + 78, + 257, + 200, + 396 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/334ba8de867f95f76c793daee3e2e74e1651be2f42cdcc97559d83bfa050e684.jpg", + "image_caption": [ + "Figure 1. Given a set of asynchronous multi-view videos, we propose a pipeline to construct a novel 3D looping video representation (a), which consists of a static texture atlas, a dynamic texture atlas, and multiple tiles as the geometry proxy. The 3D video loops allow both view and time control (b), and can be rendered in real time even on mobile devices (c). We strongly recommend readers refer to the supplementary material for video results." + ], + "image_footnote": [], + "bbox": [ + 200, + 258, + 447, + 395 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e836695320aa370be43d5ab5b71de4f138330987fd71f48e9f3453c8d7f36342.jpg", + "image_caption": [ + "(b) View and Time Control" + ], + "image_footnote": [], + "bbox": [ + 450, + 258, + 607, + 398 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c340f0555bb3260ada99a0da15cbb2506246e6a7cadd2f6602d5775f23e90bc7.jpg", + "image_caption": [ + "(c) Real Time Demo" + ], + "image_footnote": [], + "bbox": [ + 609, + 260, + 766, + 398 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/754cf507d69b28aff5d38f66bb3b92b8b6a10df930f5e4922d00136f0ddf75dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 772, + 262, + 890, + 396 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 494, + 313, + 508 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Looping videos are short video clips that can be looped endlessly without visible seams or artifacts. They provide a very attractive way to capture the dynamism of natural scenes. Existing methods have been mostly limited to 2D representations. In this paper, we take a step forward and propose a practical solution that enables an immersive experience on dynamic 3D looping scenes. The key challenge is to consider the per-view looping conditions from asynchronous input while maintaining view consistency for the 3D representation. We propose a novel sparse 3D video representation, namely Multi-Tile Video (MTV), which not only provides a view-consistent prior, but also greatly reduces memory usage, making the optimization of a 4D volume tractable. Then, we introduce a two-stage pipeline to construct the 3D looping MTV from completely asynchronous multi-view videos with no time overlap. A novel looping loss based on video temporal retargeting algorithms is adopted during the optimization to loop the 3D scene. Experiments of our framework have shown promise in successfully generating and rendering photorealistic 3D looping videos in real time even on mobile devices. The code, dataset, and live demos are available in https://limacv.github.io/VideoLoop3D_web/.", + "bbox": [ + 73, + 536, + 473, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 493, + 630, + 508 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Endless looping videos are fascinating ways to record special moments. These video loops are compact in terms of storage and provide a much richer experience for scenes that exhibit looping behavior. One successful commercial use of this technique is the live photo [19] feature in the Apple iPhone, which tries to find an optimal looping period and fade in/out short video clips to create looping videos. There have been several works on automatically constructing 2D looping videos from non-looping short video clips. Liao et al. [24] first propose to create 2D video loops from videos captured with static cameras. They solve for the optimal starting frame and looping period for each pixel in the input video to composite the final video. Later on, several methods are proposed to improve the computation speed [23], or extend to panoramas [1, 36], and gigapixel videos [16]. However, few attempts have been made to extend video loops to a 3D representation. One existing work that shares a similar setting as ours is VBR [46], which generates plausible video loops in novel views. However, it comes with some limitations: It builds on top of ULR [5], which can produce ghosting artifacts due to inaccurate mesh reconstruction, as shown in [30]. Besides, VBR generates looping videos and reduces the inconsistency from asynchronous input by adaptively blending in different frequency domains, which tends to blur away details.", + "bbox": [ + 496, + 523, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "310", + "bbox": [ + 485, + 925, + 511, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To allow free-view observation of the looping videos, a proper 3D representation needs to be employed. Recently, tremendous progress has been made in novel view synthesis based on 3D scene representations such as triangle meshes [37, 38, 45], Multi-plane Image (MPI) [9, 56], and Neural Radiance Field (NeRF) [7, 31, 32], which could be reconstructed given only sparse observations of real scenes and render photo-realistic images in novel views. Much effort has been made to adapt these methods to dynamic scenes, which allows for both viewing space and time controls [2, 6, 27, 28, 34, 35, 52, 57]. Therefore, a straightforward solution to generate a 3D looping video is to employ the 2D looping algorithms for each view and lift the results to 3D using these methods. However, we find it hard to get satisfactory results since the 2D looping algorithms do not consider view consistency, which is even more challenging for the asynchronous multi-view videos that we use as input.", + "bbox": [ + 76, + 90, + 472, + 349 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we develop a practical solution for these problems by using the captured video input of the dynamic 3D scene with only one commodity camera. We automatically construct a 3D looping video representation from completely asynchronous multi-view input videos with no time overlap. To get promising 3D video loop results, two main issues need to be addressed. First, we need to solve for a view-consistent looping pattern from inconsistent multi-view videos, from which we need to identify spatio-temporal 3D patches that are as consistent as possible. Second, the 3D video potentially requires a memory-intensive 4D volume for storage. Therefore, we need to develop a 3D video representation that is both efficient in rendering and compact in memory usage to make the optimization of the 4D volume tractable.", + "bbox": [ + 76, + 352, + 472, + 580 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, we develop an analysis-by-synthesis approach that trains for a view-consistent 3D video representation by optimizing multi-view looping targets. We propose an efficient 3D video representation based on Multi-plane Images (MPIs), namely Multi-tile Videos (MTVs), by exploiting the spatial and temporal sparsity of the 3D scene. As shown in Fig. 2, instead of densely storing large planes, MTVs store static or dynamic texture tiles that are sparsely scattered in the view frustum. This greatly reduces the memory requirement for rendering compared with other 3D video representations, making the optimization of the 3D looping video feasible in a single GPU. The sparsity of MTVs also serves as a view-consistent prior when optimizing the 3D looping video. To optimize the representation for looping, we formulate the looping generation for each view as a temporal video retargeting problem and develop a novel looping loss based on this formulation. We propose a two-stage pipeline to generate a looping MTV, and the experiments show that our method can produce photorealistic 3D video loops that maintain similar dynamism from the input, and enable real-time rendering even in mobile devices.", + "bbox": [ + 76, + 583, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions can be summarized as follows:", + "bbox": [ + 500, + 90, + 823, + 106 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose Multi-tile Videos (MTVs), a novel dynamic 3D scene representation that is efficient in rendering and compact in memory usage.", + "- We propose a novel looping loss by formulating the 3D video looping construction as a temporal retargeting problem.", + "- We propose a two-stage pipeline that constructs MTVs from completely asynchronous multi-view videos." + ], + "bbox": [ + 517, + 111, + 893, + 244 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 257, + 640, + 272 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our work lies at the confluence of two research topics: looping video construction and novel view synthesis. We will review each of them in this section.", + "bbox": [ + 498, + 282, + 890, + 327 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Video Loops. Several works have been proposed to synthesize looping videos from short video clips. Schödl et al. [40] create video loops by finding similar video frames and jumping between them. Audio [33] can also be leveraged for further refinement. Liao et al. [24] formulate the looping as a combinatorial optimization problem that tries to find the optimal start frame and looping period for each pixel. It seeks to maximize spatio-temporal consistency in the output looping videos. This formulation is further developed and accelerated by Liao et al. [23], and extended to gigapixel looping videos [16] by stitching multiple looping videos. Panorama video loops can also be created by taking a video with a panning camera [1, 36]. VBR [46] generates loops by fading in/out temporal Laplacian pyramids, and extends video loops to 3D using ULR [5]. Another line of work tries to create video loops from still images and strokes provided by users as rough guidelines of the looping motion. Endless Loops [15] tries to find self-similarities from the image and solve for the optical flow field, which is then used to warp and composite the frames of the looping video. This process can also be replaced by data-driven approaches [18, 29], or physics-based simulation [8]. Despite the progress in creating various forms of looping videos, extending looping videos to 3D is still an unexplored direction.", + "bbox": [ + 496, + 335, + 893, + 712 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Novel View Synthesis of Dynamic Scenes. Novel View Synthesis (NVS) aims at interpolating views given only a set of sparse input views. For dynamic scenes, NVS requires the construction of a 4D representation that allows for both space and time control. Some methods use synchronized multi-view videos as input, which are often only available in a studio setting [27, 28, 57], or using specially designed camera arrays [4, 9, 21, 25]. To ease hardware requirements, Open4D [3] uses unconstrained multi-view input, but still requires multiple observations at the same timestamp. With the development of neural rendering, it is possible to use only monocular input. However, this is", + "bbox": [ + 496, + 719, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "311", + "bbox": [ + 485, + 924, + 509, + 936 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "a highly ill-posed problem since the camera and scene elements are moving simultaneously. Some methods use extra sensors such as a depth sensor [2, 6], while some use a data-driven prior to help construct the scene geometry [12, 50]. Others use a hand-crafted motion prior to regularize the scene motion [22, 34, 35, 47], which usually can only handle simple motions. In our setting, we take asynchronous multi-view videos with no time overlap, which is a setting that has not been addressed before.", + "bbox": [ + 75, + 90, + 472, + 224 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D Scene Representations. A critical issue in NVS is the underlying scene representation. A triangle mesh is the most commonly used scene representation in commercial 3D software. Some methods use meshes as their representation [37, 38, 46]. However, reconstructing an accurate, temporally consistent mesh is still an open problem, being particularly challenging for complex in-the-wild scenes [28]. A volumetric representation is another option to express the 3D world by storing scene parameters in a dense 3D grid [11, 27, 49, 54]. One benefit is that it trivially supports differentiable rendering, which greatly improves the reconstruction quality. The Multi-plane Image (MPI) [9, 10, 30, 44, 48, 56] is an adapted volumetric representation that represents a scene using multiple RGBA planes in the camera frustum. Volume representations can model complex geometry, but at the cost of higher memory usage. Another rapidly developing representation is Neural Radiance Field (NeRF) [31], which models scenes as continuous functions and parameterizes the function as an implicit neural network. It achieves photorealistic rendering results at the expense of long training and rendering times, especially for dynamic scenes.", + "bbox": [ + 75, + 231, + 472, + 563 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 579, + 169, + 594 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 76, + 604, + 186, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our goal is to reconstruct a view-consistent 3D video representation that can be looped infinitely using completely asynchronous multi-view 2D videos. We start by introducing a novel 3D video representation, namely Multitile Videos (MTVs), which improves efficiency by exploiting sparsity. Then we propose a two-stage pipeline as shown in Fig. 3 to construct a 3D looping MTV. In the first stage, we initialize the MTV by optimizing a static Multiplane Image (MPI) and a 3D loopable mask using long exposure images and 2D loopable masks derived from the input videos. We then construct an MTV through a tile culling process. In the second stage, we train the MTV using an analysis-by-synthesis approach in a coarse-to-fine manner. The key enabler for this process is a novel looping loss based on video retargeting algorithms, which encourages a video to simultaneously loop and preserve similarity to the input. The remainder of this section describes the details of this proposed approach.", + "bbox": [ + 75, + 628, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a085c62d5448702bb9e1610535da2958477c0dca0b4f04fc95e4755d3d097f74.jpg", + "image_caption": [ + "Figure 2. Comparison between the Multi-plane Video representation and the Multi-tile Video representation." + ], + "image_footnote": [], + "bbox": [ + 498, + 87, + 893, + 218 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Data Preparation", + "text_level": 1, + "bbox": [ + 498, + 270, + 671, + 286 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The input to our system are multiple asynchronous videos of the same scene from different views. Each video $\\mathbf{V} \\in \\mathbb{R}^{F \\times H \\times W \\times 3}$ is a short clip with $F$ frames and a resolution of $H \\times W$ . Video lengths may differ for each view. Each video is expected to have a fixed camera pose, which can be achieved using tripods or existing video stabilization tools during post-process. Since we allow videos to be asynchronous, we could capture each view sequentially using a single commodity camera.", + "bbox": [ + 496, + 294, + 890, + 429 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given the precondition that the captured scene contains mostly repetitive content, we assume the long exposure images for each view to be view-consistent. Therefore, we compute an average image for each video $\\mathbf{V}$ , and then register a pinhole camera model for each video using COLMAP [41, 42]. We also compute a binary loopable mask for each input video similar to Liao et al. [23], where 1 indicates pixel with the potential to form a loop and 0 otherwise.", + "bbox": [ + 496, + 430, + 890, + 551 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Multi-tile Video (MTV) Representation", + "text_level": 1, + "bbox": [ + 498, + 560, + 836, + 575 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Before introducing our proposed MTV representation, we first briefly review the MPI representation [56]. An MPI represents the scene using $D$ fronto-parallel RGBA planes in the frustum of a reference camera, with each plane arranged at fixed depths [48]. To render an MPI from novel views, we first need to warp each plane based on the depth of the plane and the viewing camera, and then iteratively blend each warped plane from back to front. A straightforward dynamic extension of MPI, namely Multi-plane Video (MPV), is to store a sequence of RGBA maps for each plane. For a video with $T$ frames, this results in a 4D volume in $\\mathbb{R}^{D\\times T\\times H\\times W\\times 4}$ , which is very memory consuming. Inspired by recent work on sparse volume representation [17,26,53], we propose Multi-tile Videos, which reduce the memory requirements by exploiting the spatio-temporal sparsity of the scene. Specifically, we subdivide each plane into a regular grid of tiny tiles. Each tile $\\mathbf{T}\\in \\mathbb{R}^{F\\times H_s\\times W_s\\times 4}$ stores a small RGBA patch sequence with spatial resolution $H_{s}\\times W_{s}$ . For each tile, we assign a label $l$ by identifying whether it contains looping content $l_{loop}$ , a static scene $l_{static}$ , or is simply empty $l_{empty}$ . We could then store a", + "bbox": [ + 496, + 583, + 890, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "312", + "bbox": [ + 485, + 926, + 511, + 936 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a19264169a7d76cbe52657564e1662fc754b3e13b4e8db8d8ac4b9b7668b1f53.jpg", + "image_caption": [ + "Figure 3. The two-stage pipeline to generate the MTV representation from multi-view videos." + ], + "image_footnote": [], + "bbox": [ + 83, + 85, + 885, + 270 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "single RGBA patch for $l_{\\text{static}}$ , and discard tiles that are empty. Fig. 1 visualizes a reconstructed MTV representation, where the RGBA patches are packed into static and dynamic texture atlas. Fig. 2 shows the difference between MPVs and MTVs.", + "bbox": [ + 75, + 297, + 470, + 375 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Stage 1: MTV Initialization", + "text_level": 1, + "bbox": [ + 76, + 381, + 326, + 397 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We find that optimizing a dense MTV directly from scratch results in the approach being easily trapped in local minima, which yields view-inconsistent results. To address this, we use a two-stage pipeline shown in Fig. 3. In the first stage, we start by constructing a \"long exposure\" MPI. Then we initialize the sparse MTV by tile culling process that removes unnecessary tiles. By reducing the number of parameters, the initialized MTV provides a view-consistent prior and leads to a high-quality 3D video representation.", + "bbox": [ + 75, + 404, + 470, + 541 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training a looping-aware MPI. We start by training a dense MPI $\\mathbf{M} \\in \\mathbb{R}^{D \\times H \\times W \\times 4}$ , as well as a 3D loopable mask $\\mathbf{L} \\in \\mathbb{R}^{D \\times H \\times W}$ , using the average image and the 2D loopable mask, respectively. We randomly initialize $\\mathbf{M}$ and $\\mathbf{L}$ , and in each iteration, we randomly sample a patch in a random view, and render an RGB patch $\\hat{\\mathbf{p}}_c \\in \\mathbb{R}^{h \\times w \\times 3}$ and a loopable mask patch $\\hat{\\mathbf{p}}_l \\in \\mathbb{R}^{h \\times w}$ using the standard MPI rendering method. Note that the $\\alpha$ channel is shared between $\\mathbf{M}$ and $\\mathbf{L}$ during rendering. We supervise the MPI $\\mathbf{M}$ by minimizing the Mean Square Error (MSE) between the rendering results and the corresponding patch $\\mathbf{p}_c$ from the average image. We supervise the loopable mask $\\mathbf{L}$ by minimizing the Binary Cross Entropy (BCE) between the rendered 2D mask $\\hat{\\mathbf{p}}_l$ and the corresponding patch $\\mathbf{p}_l$ from the 2D loopable mask:", + "bbox": [ + 75, + 558, + 468, + 785 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {m s e} = \\frac {1}{h w} \\| \\mathbf {p} _ {c} - \\hat {\\mathbf {p}} _ {c} \\| _ {2} ^ {2}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 789, + 468, + 819 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {b c d} = \\frac {1}{h w} \\| - (\\mathbf {p} _ {l} \\log (\\hat {\\mathbf {p}} _ {l}) + (1 - \\mathbf {p} _ {l}) \\log (1 - \\hat {\\mathbf {p}} _ {l})) \\| _ {1}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 821, + 468, + 862 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\| \\mathbf{p}\\| _1$ and $\\| \\mathbf{p}\\| _2$ are the L1 and L2 norm of a flattened patch $\\mathbf{p}$ . The log is computed for every element of a patch.", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Since the rendering of the MPI is differentiable, we optimize $\\mathbf{M}$ and $\\mathbf{L}$ using the Adam optimizer [20]. Optimizing all the parameters freely causes noisy artifacts, therefore, we apply total variation (TV) regularization [39] to $\\mathbf{M}$ :", + "bbox": [ + 496, + 297, + 890, + 359 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {t v} = \\frac {1}{H W} \\left(\\| \\Delta_ {x} \\mathbf {M} \\| _ {1} + \\| \\Delta_ {y} \\mathbf {M} \\| _ {1}\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 364, + 890, + 393 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\| \\Delta_x\\mathbf{M}\\| _1$ is shorthand for the L1 norm of the gradient of each pixel in the MPI $\\mathbf{M}$ along $x$ direction, and analogously for $\\| \\Delta_y\\mathbf{M}\\| _1$ . We also adopt a sparsity loss to further encourage sparsity to the $\\alpha$ channel of the MPI $\\mathbf{M}_{\\alpha}$ as in Broxton et al. [4]. Specifically, we collect $D$ alpha values in each pixel location of $\\mathbf{M}_{\\alpha}$ into a vector $\\beta$ , where $D$ is the number of planes. Then the sparsity loss is computed as:", + "bbox": [ + 496, + 398, + 892, + 517 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s p a} = \\frac {1}{H W} \\sum_ {\\text {p i x e l}} \\frac {\\| \\beta \\| _ {1}}{\\| \\beta \\| _ {2}}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 517, + 890, + 554 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The final loss in the first stage is a weighted sum of the four losses:", + "bbox": [ + 496, + 556, + 890, + 585 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {m s e} + \\mathcal {L} _ {b c d} + \\lambda_ {t v} \\mathcal {L} _ {t v} + \\lambda_ {s p a} \\mathcal {L} _ {s p a}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 595, + 890, + 612 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Tile Culling. After training, we reconstruct a static MPI M as well as a 3D loopable mask $\\mathbf{L}$ . We subdivide each plane into a regular grid of tiles. In the experiments, we subdivide the plane so that each tile has a resolution of $H_{s} = W_{s} = 16$ . We denote $\\{T_c\\}, \\{T_\\alpha\\}, \\{T_l\\}$ to be the set of RGB color, alpha value, and loopable mask of a tile, respectively. We then assign label $l \\in \\{l_{empty}, l_{static}, l_{loop}\\}$ based on the $\\{T_\\alpha\\}$ and $\\{T_l\\}$ for each tile:", + "bbox": [ + 496, + 627, + 890, + 750 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nl = \\left\\{ \\begin{array}{l l} l _ {\\text {e m p t y}} & \\text {i f} \\max \\left\\{T _ {\\alpha} \\right\\} \\leq \\tau_ {\\alpha}, \\\\ l _ {\\text {s t a t i c}} & \\text {i f} \\max \\left\\{T _ {\\alpha} \\right\\} > \\tau_ {\\alpha} \\text {a n d} \\max \\left\\{T _ {l} \\right\\} < \\tau_ {l}, \\\\ l _ {\\text {l o o p}} & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 756, + 890, + 824 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We set the threshold of culling to be $\\tau_{\\alpha} = 0.05$ and $\\tau_{l} = 0.5$ . We cull the tiles with $l = l_{empty}$ . For tiles with $l = l_{loop}$ , we lift the static 2D RGBA patch into a patch sequence by copying the patch $T$ times, where $T$ is the number of frames that we would like the MTV to have. We add", + "bbox": [ + 496, + 825, + 892, + 898 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "313", + "bbox": [ + 485, + 925, + 509, + 936 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/cf3722ee028e5c341f276593a1a9d440a9e1dc9213ebf70a4e41497da9f0972d.jpg", + "image_caption": [ + "Figure 4. Visualization of looping loss. We first pad frames and extract 3D patches along the time axis for each pixel location, then we compute a normalized similarity score for each patch pair. Finally, the looping loss is computed by averaging errors between patches with minimum scores." + ], + "image_footnote": [], + "bbox": [ + 78, + 88, + 472, + 176 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "a small random noise to the lifted patch video to prevent the straightforward static solution. For tiles with $l = l_{\\text{static}}$ , we simply keep it unchanged. This culling process greatly reduces the memory requirement for optimizing the 4D volume.", + "bbox": [ + 75, + 267, + 468, + 342 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Stage 2: MTV Optimization", + "text_level": 1, + "bbox": [ + 76, + 351, + 330, + 368 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After initializing the MTV representation, we then seek to optimize the final looping MTV.", + "bbox": [ + 75, + 375, + 468, + 405 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "**Looping Loss.** The main supervision of the optimization process is a novel looping loss, which is inspired by the recent progress in image [13] and video [14] retargeting algorithm. Specifically, in each iteration, we randomly sample a view and a rectangle window of size $h \\times w$ , and render the video $\\hat{\\mathbf{V}}_o \\in \\mathbb{R}^{T \\times h \\times w \\times 3}$ from MTV. We denote the corresponding input video as $\\mathbf{V}_p \\in \\mathbb{R}^{F \\times h \\times w \\times 3}$ . Our goal is to optimize the MTV such that $\\hat{\\mathbf{V}}_o$ forms a looping video $\\mathbf{V}_{\\infty}$ :", + "bbox": [ + 75, + 422, + 468, + 559 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {V} _ {\\infty} (t) = \\hat {\\mathbf {V}} _ {o} (t \\bmod T), t \\in [ 1, + \\infty), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 570, + 468, + 588 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{V}(t)$ means $t$ -th frame of the video and mod is the modulus operation. We define the looping loss to encourage the $\\mathbf{V}_{\\infty}$ to be a temporal retargeting result of $\\mathbf{V}_p$ . A visualization of the process is shown in Fig. 4.", + "bbox": [ + 75, + 598, + 468, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We start by extracting 3D patch sets $\\{\\mathbf{Q}_i; i = 1, \\dots, n\\}$ and $\\{\\mathbf{K}_j; j = 1, \\dots, m\\}$ from $\\mathbf{V}_{\\infty}$ and $\\mathbf{V}_p$ , respectively, along temporal axis. $\\{\\mathbf{Q}_i\\}$ and $\\{\\mathbf{K}_j\\}$ are all centered at the same pixel location and we repeat the same process for every pixel. Note that although there are infinitely many patches from the looping video, the extracted patch set of the looping video is equivalent to a finite set of patches, which are extracted from the rendered video by circularly padding the first $p = s - d$ frames of the rendered video $\\hat{\\mathbf{V}}_o$ at the end of itself, where $s$ and $d$ are the size and stride of the patches in the time axis. Fig. 5 demonstrates a toy example with 5 frames. By optimizing both the patches inside the video range and patches crossing the temporal boundary, we optimize a video that is both spatio-temporally consistent with the target and seamlessly looping. We then try to minimize the bidirectional similarity (BDS) [43] between", + "bbox": [ + 75, + 659, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1279fc8e50dbdc8e079d03fffa99c71d97d51702029111fb71956ded814f04d3.jpg", + "image_caption": [ + "Figure 5. For patches of size 3 and stride 1, the patch set extracted from the video that endlessly repeats 5 frames is the same as the patch set extracted from the padded video that circularly pads 2 frames." + ], + "image_footnote": [], + "bbox": [ + 506, + 87, + 887, + 208 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the two sets of patches. Intuitively, this means every patch in $\\{\\mathbf{Q}_i\\}$ appears in $\\{\\mathbf{K}_j\\}$ (for coherence) and every patch in $\\{\\mathbf{K}_j\\}$ appears in $\\{\\mathbf{Q}_i\\}$ (for completeness).", + "bbox": [ + 498, + 282, + 890, + 330 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To minimize the BDS between the two patch sets, we use the Patch Nearest Neighbor (PNN) algorithm [13] that first computes a 2D table of normalized similarity scores (NSSs) $s_{ij}$ for every possible pair of $\\mathbf{Q}_i$ and $\\mathbf{K}_j$ . Then for each patch $\\mathbf{Q}_i$ , we select a target patch $\\mathbf{K}_{f(i)} \\in \\{\\mathbf{K}_j\\}$ that has minimal NSS, where $f(i)$ is a selection function:", + "bbox": [ + 498, + 330, + 890, + 420 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nf (i) = \\arg \\min _ {k} s _ {i, k}, \\text {w h e r e} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 544, + 428, + 890, + 450 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ns _ {i j} = \\frac {1}{\\rho + \\min _ {k} \\| \\mathbf {Q} _ {k} - \\mathbf {K} _ {j} \\| _ {2} ^ {2}} \\| \\mathbf {Q} _ {i} - \\mathbf {K} _ {j} \\| _ {2} ^ {2}. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 553, + 452, + 890, + 484 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here $\\rho$ is a hyperparameter that controls the degree of completeness. Intuitively, when $\\rho \\rightarrow \\infty$ , Eq. 9 degenerates to $s_{ij} \\sim D(\\mathbf{Q}_i, \\mathbf{K}_j)$ , so we simply select $\\mathbf{K}_j$ that is most similar to $\\mathbf{Q}_i$ . And if $\\rho = 0$ , the denominator $\\min_k D(\\mathbf{Q}_k, \\mathbf{K}_j)$ penalizes the score if there are already some $\\mathbf{Q}_i$ that is closest to $\\mathbf{K}_j$ . Thus, the selection will prefer patches that have not yet been selected.", + "bbox": [ + 498, + 491, + 890, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Using the PNN algorithm, we get the set of patches $\\{\\mathbf{K}_{f(i)}\\}$ that is coherent to the target patch set $\\{\\mathbf{K}_j\\}$ , and the completeness is controlled by $\\rho$ . The looping loss is then defined as the MSE loss between $\\mathbf{Q}_i$ and $\\mathbf{K}_{f(i)}$ :", + "bbox": [ + 500, + 597, + 890, + 659 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {l o o p}} = \\frac {1}{n h w} \\sum_ {\\text {p i x e l}} \\sum_ {i = 1} ^ {n} \\| \\mathbf {Q} _ {i} - \\mathbf {K} _ {f (i)} \\| _ {2} ^ {2}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 667, + 890, + 708 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\sum_{pixel}$ indicates that the term is summed over all the pixel locations of the rendered video.", + "bbox": [ + 498, + 715, + 890, + 747 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pyramid Training. In the implementation, we adopt a pyramid training scheme. In the coarse level, we downsample both the input video and the MTV. The downsampling of the MTV is conducted by downsampling the tiles. We start from the coarsest level with downsample factor 0.24 and train the MTV representation for 50 epochs. We then upsample each tile by $1.4 \\times$ and repeat the training step. We show that the pyramid training scheme can improve the generation results.", + "bbox": [ + 496, + 763, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "314", + "bbox": [ + 485, + 926, + 511, + 936 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/7900b94c79fbad5437d514bed3a1ad9b73168d71f4e5378a8702b602fae6b7fb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
VLPIPS↓STDerr↓Com.↓Coh.↓LoopQ↓# Params.↓Render Spd↑
Ours0.139256.0210.659.2699.26333M-184M140fps
VBR0.207482.3612.9811.4211.49300M20fps
loop2D + MTV0.2447118.911.839.9199.92733M-184M140fps
loop2D + MPV0.2546117.511.829.8179.8402123M110fps
loop2D + DyNeRF0.2282123.711.9310.2310.272M0.1fps
", + "bbox": [ + 133, + 88, + 834, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. Quantitative comparison of reconstruction quality and efficiency. $\\downarrow$ (↑) indicates lower (higher) is better. Our method produces the best quality and strikes a good balance between the number of parameters and rendering speed.", + "bbox": [ + 75, + 186, + 892, + 214 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8049ebae6245ab91d94956eea9b54e801123a2a18d9e0ad5dc1086ef05aa07eb.jpg", + "image_caption": [ + "Figure 6. Qualitative comparison with other baselines. Our method produces the sharpest results." + ], + "image_footnote": [], + "bbox": [ + 78, + 231, + 468, + 407 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 455, + 209, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 479, + 294, + 496 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We captured 16 scenes for quantitative and qualitative studies. For each scene, we captured 8-10 views in a faceforward manner using a Sony $\\alpha 9$ II camera. We captured each view at 25 fps for 10-20 seconds. We downsample each video to a resolution of $640 \\times 360$ . Finally, we randomly select one view for evaluation. The others are used for constructing MTVs using the two-stage pipeline. In the first stage, we empirically set $\\lambda_{tv} = 0.5$ and $\\lambda_{spa} = 0.004$ . We construct MPI with $D = 32$ layers. In the second stage, we let the hyperparameter $\\rho = 0$ to guarantee maximum completeness. We extract 3D patches with spatial dimension 11 and temporal dimension 3. We construct MTVs with approximately 50 frames, i.e., 2 seconds. We set the rendering window in each iteration to $h = 180$ , $w = 320$ for both stages.", + "bbox": [ + 75, + 503, + 468, + 731 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Metrics", + "text_level": 1, + "bbox": [ + 76, + 739, + 171, + 755 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For our quantitative study, we synthesize looping videos in test views using the reconstructed 3D video representation and compare the synthetic results with captured target videos. However, we do not have paired ground truth videos since we generate 3D videos with completely asynchronous inputs. Therefore, we adopt several intuitive metrics to evaluate the results in spatial and temporal aspects.", + "bbox": [ + 75, + 763, + 468, + 869 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Spatial Quality. We evaluate the spatial quality of a synthetic frame by computing the LPIPS value [55] between", + "bbox": [ + 75, + 869, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the synthetic frame with the frame in the target video that is most similar in terms of LPIPS. We average the values among all the 50 synthetic frames, which we denote as VLPIPS.", + "bbox": [ + 496, + 233, + 890, + 292 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Temporal Quality. Given two videos that have similar dynamism, they should have similar color distribution in each pixel location. We measure the temporal quality of the synthetic videos by first computing the standard deviation (STD) of the RGB color at each pixel location of the synthetic video and the target video, resulting in two STD maps of dimension $H \\times W \\times 3$ . We then compute $STDerr$ by measuring the MSE between the two maps.", + "bbox": [ + 496, + 292, + 892, + 414 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Spatio-temporal Quality. We evaluate the spatio-temporal similarity between the synthetic and target videos following the bidirectional similarity (BDS) [43]. We individually report Completeness and Coherence scores (abbreviated as Com. and Coh., respectively) by extracting and finding nearest neighbor 3D patches in two directions. Specifically, for each patch in the target video, we find the closest patches in the synthetic video for Com. and vice-versa. We measure the distance of two 3D patches using MSE, and the final scores are the averages of multiple different patch configurations of size and stride. We present the details of the patch configurations in the supplementary material.", + "bbox": [ + 496, + 415, + 892, + 595 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In addition, we use a metric similar to Coh. to measure the loop quality (LoopQ), which reflects the coherence of the looping video when switching from the last frame back to the first frame. This is achieved by extracting the 3D patches that overlap with the first and last frame, as shown by the blue rectangles in Fig. 5. Other steps remain the same as the Coh. score.", + "bbox": [ + 496, + 597, + 890, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Comparisons", + "text_level": 1, + "bbox": [ + 500, + 710, + 638, + 727 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first compare with VBR [46] by implementing it based on the descriptions in the paper since the code and data are not publicly available. We also compare with straightforward solutions that lift classical 2D looping algorithms to 3D. Specifically, we first generate a 2D looping video for each of the input videos using the method of Liao et al. [23]. And then we construct various scene representations using the 2D looping video and synthesize novel views. We compare with our sparse MTV representation $(loop2D + MTV)$ , the Multi-plane Video representation $(loop2D + MPV)$ and the dynamic NeRF representation", + "bbox": [ + 496, + 734, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "315", + "bbox": [ + 486, + 926, + 509, + 936 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b8157b026dcfd215d567c22117981b60e70bf19ab52e5e7ebfdf59a28e481ac2.jpg", + "image_caption": [ + "Figure 7. We visualize the pixel-wise $STDerr$ value for each method. Our method has a lower error, indicating that our approach best retains the dynamism of the scene. We recommend readers watch the supplemental video, where the difference is more noticeable." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 207, + 212 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e1c41d8094f406775c2bb0fe6970b93d5b99309f1b0489f3728ed8fd7802d3c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 210, + 88, + 336, + 212 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a8eb22b2c555e44e6a1936c240e6bd2115a0bc45c960857cb61571a79b0e50fb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 88, + 465, + 212 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0780bac44544fbf7c4e2fed1fa9e83d9eff794d06191c95011430f827d10eba0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 465, + 88, + 594, + 212 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cb70da44993d28a0c8df9973d533f696d373a89d0b2ad418c74f2354af9b5a60.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 594, + 88, + 720, + 213 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7c2cb70165326bd67f67ea5e012759250e4e23e9608043d1cedf2bd421722bf2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 720, + 88, + 851, + 213 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/02b6e4a1ec281e20334e62c474d4eeaaa79bdceb686fb502ad327100f725db21.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 851, + 88, + 890, + 212 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5bbb4bbb138a6b2e9e49a95237ef2ae04c302befb1654652d9f799bab0ac7301.jpg", + "image_caption": [ + "Figure 8. Results of our ablations. Our full model produces the fewest artifacts." + ], + "image_footnote": [], + "bbox": [ + 80, + 247, + 238, + 386 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/980b12aa297fe88048109321f0fe2c3648ef6ebc5fa46e2128a654c40139c243.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 240, + 247, + 401, + 386 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1f0220714d2ab30b1c0d3c941fafb7155bd9f8586a5b7e56d981ac0d1dbc04eb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 403, + 247, + 563, + 386 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b1c7688eb3fd9070a3ebeb3a0efaef2a5a23506e1b747c8f7c335e5492f73d1a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 565, + 247, + 727, + 386 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d637ce61efeaabd210355e441c0a0ef05dc7c39dabf64e85440f76d827176425.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 247, + 890, + 386 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d11a84cfe949f2052d3081a991a003696db3849208a0e54fd59e9206a5b679e6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
VLPIPS ↓STDerr ↓Com. ↓Coh. ↓LoopQ ↓
Ours0.139256.0210.659.2699.263
w/o pad0.138755.6710.669.2739.395
w/o 2stage0.175567.9911.699.98210.13
w/o pyr0.141257.4110.869.5559.465
w/o tv0.153056.5111.129.7669.689
", + "bbox": [ + 78, + 406, + 467, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Ablations of our method. $\\downarrow \\left( \\uparrow \\right)$ indicates lower (higher) is better. (best in bold, and second best underlined)", + "bbox": [ + 75, + 501, + 467, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tion [21] $(loop2D + DyNeRF)$", + "bbox": [ + 76, + 537, + 282, + 551 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We compare our method with the four baselines on our captured dataset. We synthesize novel view videos and report VLPIPS, STDerr, Com., Coh. and LoopQ metrics in Tab. 1. Our method outperforms other baselines in terms of visual quality, scene dynamism preservation, spatio-temporal consistency, and loop quality. We show the qualitative comparison in Fig. 6. We also visualize the STDerr value for each pixel in Fig. 7, which reflects the difference in dynamism between the synthetic results and the reference. We recommend that readers also see the video results included in the supplementary material. Note that our method produces the sharpest results, while best retaining the dynamism of the scene. VBR directly blends inconsistent videos from multiple input views. and the 2D looping baselines fail to consider multi-view information and produce view-inconsistent looping videos. As a result, they tend to blur out spatial and temporal details to compensate for view inconsistencies. We observe that loop2D+DyNeRF also generates sharper results compared with the other two baselines. This is because DyNeRF conditions on the view direction and tolerates the view inconsistency. However, it performs poorly in maintaining the dynamism of the scene.", + "bbox": [ + 75, + 553, + 468, + 885 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Additionally, we measure the efficiency of the scene rep", + "bbox": [ + 96, + 886, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "resentations using several metrics. We first show the number of parameters (# Params.) of the model to represent a dynamic 3D volume of 50 frames. We evaluate rendering speed (Render Spd) at a $360 \\times 640$ resolution on a laptop equipped with an RTX 2060 GPU. We present the metrics in Tab. 1. Since the MTV representation varies with different scenes, we report the maximum and minimum values when evaluated in our dataset. We can see that our method surpasses VBR in # Params. and Render Spd. Compared with MPV that densely stores the scene parameters in a 4D volume, our sparse MTV representation can reduce the number of parameters by up to $98\\%$ , resulting in a slightly faster rendering speed and much smaller memory and disk usage. On the other hand, despite the surprisingly small number of parameters, the NeRF representation has extremely slow rendering speed. In other words, our MTV representation achieves the best trade-off between the number of parameters and rendering efficiency.", + "bbox": [ + 496, + 407, + 890, + 679 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 694, + 663, + 709 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conducted extensive ablation studies of our method to test the effectiveness of several design decisions in our pipeline by individually removing each component and constructing 3D looping videos from our dataset. We experimented on the following components: the frame padding operation as illustrated in Fig. 5 when computing $\\mathcal{L}_{loop}$ (w/o pad), the two-stage training pipeline (w/o 2stage), the coarse-to-fine training strategy (w/o pyr), and the TV regularization (w/o tv). The numerical results are shown in Tab. 2, and qualitative results are presented in Fig. 8 and Fig. 9. We also experimented with different values of $\\lambda_{spa}$ and $\\rho$ to understand the resulting effect.", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "316", + "bbox": [ + 485, + 926, + 511, + 936 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ed4c4cb65b00f5088b02f8b20aa53e2d9284cd7302d26f94922e0c434140d8ce.jpg", + "image_caption": [ + "Figure 9. Ablations for the padding operation. In the second row, we visualize the temporal coherence by flattening the pixels in the green line along the time axis and repeating 3 times. Red rectangles highlight the discontinuity produced without the padding operation. We encourage readers to refer to the video results for a clearer demonstration." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 468, + 224 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Padding Operation. As shown in Tab. 2, without the padding operation, our method can still produce competitive results in terms of spatial quality and spatio-temporal consistency. It even has better temporal quality. This is because the padding operation adds extra boundary conditions to the optimization, making the optimization more difficult. However, as highlighted in the red rectangles in Fig. 9, without padding, our method is less prone to generate a properly looping video since it can not guarantee a smooth transition from the last frame to the first frame, leading to a lower loop quality score.", + "bbox": [ + 75, + 321, + 468, + 487 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Two-stage Pipeline. It can be seen from Tab. 2 that the two-stage pipeline plays an important role in generating high-quality results. Without the two-stage pipeline, where we directly optimize a dense MPV representation using the looping loss, the MPV easily gets trapped into view-inconsistent results, leading to significant drop in every metric evaluated.", + "bbox": [ + 75, + 489, + 468, + 593 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Coarse-to-fine Training. Results also show that the coarse-to-fine training scheme produces slightly better spatial and temporal quality than optimizing only on the finest level. This is because the patch-based optimization has a wider perceptual field at the coarse level, leading to a better global solution. Therefore, our full model tends to produce fewer artifacts compared with the w/o pyr model.", + "bbox": [ + 75, + 595, + 468, + 700 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "TV Regularization. We find it necessary to apply TV regularization, since the pipeline tends to generate MTVs with holes without this regularization, as shown in Fig. 8, which greatly affects the visual quality.", + "bbox": [ + 75, + 703, + 468, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Weight for $\\mathcal{L}_{spa}$ . We experimented on different values of $\\lambda_{spa}$ on one scene. We plot the relationship between Coh. scores and # Params. with respect to $\\lambda_{spa}$ . We can see that when $\\lambda_{spa} = 0$ , the reconstructed MTV is less sparse, which degenerates to a dense representation. This makes it harder to optimize and leads to a worse Coh. score. Then # Params. and Coh. drop rapidly as $\\lambda_{spa}$ grow. However, if $\\lambda_{spa}$ is larger than a threshold, Coh. increases again, while the improvement on # Params. is less substantial. This", + "bbox": [ + 75, + 763, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/43c7a6437d3c6af60f5749ab57b00f6bb1c0d6778ac7cdcb418744afcb9a15d3.jpg", + "image_caption": [ + "Figure 10. The trend of Coh. score and # Params. under different $\\lambda_{spa}$ . The green line is the value we use in all other experiments." + ], + "image_footnote": [], + "bbox": [ + 501, + 88, + 885, + 169 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c8dd751d2c77b8519db812a661e676dca324465a61917667751f219647f255ce.jpg", + "image_caption": [ + "Figure 11. Controlling the dynamism by changing $\\rho$ ." + ], + "image_footnote": [], + "bbox": [ + 501, + 215, + 890, + 292 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "is because the excessive sparseness causes the tile-culling process to over-cull necessary tiles, resulting in holes in the rendering results. Therefore, we chose $\\lambda_{spa} = 0.004$ (green line in Fig. 10) in other experiments.", + "bbox": [ + 496, + 325, + 890, + 387 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Value of $\\rho$ . In the experiments, we use $\\rho = 0$ to ensure maximum completeness with respect to the input video. However, we find that by controlling the hyperparameter $\\rho$ , we could control the degree of dynamism of the reconstructed 3D video. One example is shown in Fig. 11.", + "bbox": [ + 496, + 392, + 890, + 468 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Discussion and Conclusion", + "text_level": 1, + "bbox": [ + 500, + 497, + 746, + 512 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations and Future Work. Our method comes with some limitations. First, since the MTV representation does not condition on view direction, it fails to model complex view-dependent effects, such as non-planar specular. One possible way to improve the representation is by introducing view-dependency, such as spherical harmonics [53] or neural basis function [51]. Another limitation is that we assume the scene to possess a looping pattern, which works best for natural scenes like flowing water and waving trees. However, if the scene is not loopable, our method tends to fail because each view has a completely unique content. This leads to a highly ill-posed problem in constructing a looping video from the asynchronous input videos.", + "bbox": [ + 496, + 527, + 890, + 724 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Conclusion. In this paper, we propose a practical solution for constructing a 3D looping video representation given completely asynchronous multi-view videos. Experiments verify the effectiveness of our pipeline and demonstrate significant improvement in quality and efficiency over several baselines. We hope that this work will further motivate research into dynamic 3D scene reconstruction.", + "bbox": [ + 496, + 729, + 890, + 834 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. The authors from HKUST were partially supported by the Hong Kong Research Grants Council (RGC). The author from CityU was partially supported by an ECS grant from the RGC (Project No. CityU 21209119).", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "317", + "bbox": [ + 485, + 926, + 509, + 936 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Aseem Agarwala, Ke Colin Zheng, Chris Pal, Maneesh Agrawala, Michael Cohen, Brian Curless, David Salesin, and Richard Szeliski. Panoramic video textures. ACM Trans. Graph., 24(3):821-827, jul 2005. 1, 2", + "[2] Benjamin Attal, Eliot Laidlaw, Aaron Gokaslan, Changil Kim, Christian Richardt, James Tompkin, and Matthew O'Toole. Törf: Time-of-flight radiance fields for dynamic scene view synthesis. Advances in neural information processing systems, 34:26289-26301, 2021. 2, 3", + "[3] Aayush Bansal, Minh Vo, Yaser Sheikh, Deva Ramanan, and Srinivasa Narasimhan. 4d visualization of dynamic events from unconstrained multi-view videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5366-5375, 2020. 2", + "[4] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2, 4", + "[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 425-432, 2001. 1, 2", + "[6] Hongrui Cai, Wanquan Feng, Xuetao Feng, Yan Wang, and Juyong Zhang. Neural surface reconstruction of dynamic scenes with monocular rgb-d camera. arXiv preprint arXiv:2206.15258, 2022. 2, 3", + "[7] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXII, pages 333-350. Springer, 2022. 2", + "[8] Siming Fan, Jingtan Piao, Chen Qian, Kwan-Yee Lin, and Hongsheng Li. Simulating fluids in real-world still images. arXiv preprint arXiv:2204.11335, 2022. 2", + "[9] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snively, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2, 3", + "[10] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. Deepstereo: Learning to predict new views from the world's imagery. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5515-5524, 2016. 3", + "[11] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 3", + "[12] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5712-5721, 2021. 3" + ], + "bbox": [ + 78, + 114, + 472, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] Niv Granot, Ben Feinstein, Assaf Shocher, Shai Bagon, and Michal Irani. Drop the gan: In defense of patches nearest neighbors as single image generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13460-13469, June 2022. 5", + "[14] Niv Haim, Ben Feinstein, Niv Granot, Assaf Shocher, Shai Bagon, Tali Dekel, and Michal Irani. Diverse generation from a single video made possible. arXiv preprint arXiv:2109.08591, 2021. 5", + "[15] Tavi Halperin, Hanit Hakim, Orestis Vantzos, Gershon Hochman, Netai Benaim, Lior Sassy, Michael Kupchik, Ofir Bibi, and Ohad Fried. Endless loops: detecting and animating periodic patterns in still images. ACM Transactions on Graphics (TOG), 40(4):1-12, 2021. 2", + "[16] Mingming He, Jing Liao, Pedro V Sander, and Hugues Hoppe. Gigapixel panorama video loops. ACM Transactions on Graphics (TOG), 37(1):1-15, 2017. 1, 2", + "[17] Peter Hedman, Pratul P Srinivasan, Ben Mildenhall, Jonathan T Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5875-5884, 2021. 3", + "[18] Aleksander Holynski, Brian L Curless, Steven M Seitz, and Richard Szeliski. Animating pictures with eulerian motion fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5810-5819, 2021. 2", + "[19] Apple Inc. Take and edit live photos, Oct 2021. 1", + "[20] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun, editors, 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. 4", + "[21] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 2, 7", + "[22] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6498-6508, 2021. 3", + "[23] Jing Liao, Mark Finch, and Hugues Hoppe. Fast computation of seamless video loops. ACM Transactions on Graphics (TOG), 34(6):1-10, 2015. 1, 2, 3, 6", + "[24] Zicheng Liao, Neel Joshi, and Hugues Hoppe. Automated video looping with progressive dynamism. ACM Transactions on Graphics (TOG), 32(4):1-10, 2013. 1, 2", + "[25] Kai-En Lin, Lei Xiao, Feng Liu, Guowei Yang, and Ravi Ramamoorthi. Deep 3d mask volume for view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1749–1758, 2021. 2", + "[26] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. Advances" + ], + "bbox": [ + 501, + 92, + 893, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "318", + "bbox": [ + 486, + 926, + 511, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "in Neural Information Processing Systems, 33:15651-15663, 2020. 3", + "[27] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 2, 3", + "[28] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 40(4):1-13, 2021. 2, 3", + "[29] Aniruddha Mahapatra and Kuldeep Kulkarni. Controllable animation of fluid elements in still images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3667-3676, 2022. 2", + "[30] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):1-14, 2019. 1, 3", + "[31] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2, 3", + "[32] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, July 2022. 2", + "[33] Medhini Narasimhan, Shiry Ginosar, Andrew Owens, Alexei A. Efros, and Trevor Darrell. Strumming to the beat: Audio-conditioned contrastive video textures. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 3761-3770, January 2022. 2", + "[34] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 3", + "[35] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2, 3", + "[36] Alex Rav-Acha, Yael Pritch, Dani Lischinski, and Shmuel Peleg. Dynamosaics: Video mosaics with non-chronological time. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), volume 1, pages 58-65. IEEE, 2005. 1, 2", + "[37] Gernot Riegler and Vladlen Koltun. Free view synthesis. In European Conference on Computer Vision, 2020. 2, 3", + "[38] Gernot Riegler and Vladlen Koltun. Stable view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2021. 2, 3", + "[39] Leonid I Rudin and Stanley Osher. Total variation based image restoration with free local constraints. In Proceedings of" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1st international conference on image processing, volume 1, pages 31-35. IEEE, 1994. 4", + "[40] Arno Schödl, Richard Szeliski, David H Salesin, and Irfan Essa. Video textures. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 489-498, 2000. 2", + "[41] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 3", + "[42] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016. 3", + "[43] Denis Simakov, Yaron Caspi, Eli Shechtman, and Michal Irani. Summarizing visual data using bidirectional similarity. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 5, 6", + "[44] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snavely. Pushing the boundaries of view extrapolation with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 175-184, 2019. 3", + "[45] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 38(4):1-12, 2019. 2", + "[46] Théo Thonat, Yagiz Aksoy, Miika Aittala, Sylvain Paris, Frédo Durand, and George Drettakis. Video-based rendering of dynamic stationary environments from unsynchronized inputs. In Computer Graphics Forum, volume 40, pages 73-86. Wiley Online Library, 2021. 1, 2, 3, 6", + "[47] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12959-12970, 2021. 3", + "[48] Richard Tucker and Noah Snively. Single-view view synthesis with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 551-560, 2020. 3", + "[49] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 3", + "[50] Qianqian Wang, Zhengqi Li, David Salesin, Noah Snavely, Brian Curless, and Janne Kontkanen. 3d moments from near-duplicate photos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3906-3915, 2022. 3", + "[51] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time view synthesis with neural basis expansion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8534-8543, 2021. 8" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "319", + "bbox": [ + 486, + 926, + 511, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] Jae Shin Yoon, Kihwan Kim, Orazio Gallo, Hyun Soo Park, and Jan Kautz. Novel view synthesis of dynamic scenes with globally coherent depths from a monocular camera. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5336-5345, 2020. 2", + "[53] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoptrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 3, 8", + "[54] Jiakai Zhang, Liao Wang, Xinhang Liu, Fuqiang Zhao, Minzhang Li, Haizhao Dai, Boyuan Zhang, Wei Yang, Lan Xu, and Jingyi Yu. Neuvv: Neural volumetric videos with immersive rendering and editing. arXiv preprint arXiv:2202.06088, 2022.3", + "[55] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 6", + "[56] Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images, 2018. 2, 3", + "[57] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM transactions on graphics (TOG), 23(3):600-608, 2004. 2" + ], + "bbox": [ + 78, + 90, + 470, + 445 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "320", + "bbox": [ + 486, + 926, + 511, + 936 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3D Video Loops From Asynchronous Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_model.json b/2023/3D Video Loops From Asynchronous Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2ffef8fd4eca83167443d8dee4eec0c36987588c --- /dev/null +++ b/2023/3D Video Loops From Asynchronous Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_model.json @@ -0,0 +1,2356 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.27, + 0.131, + 0.702, + 0.154 + ], + "angle": 0, + "content": "3D Video Loops from Asynchronous Input" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.18, + 0.731, + 0.201 + ], + "angle": 0, + "content": "Li Ma\\(^{1}\\) Xiaoyu Li\\(^{2}\\) Jing Liao\\(^{3}\\) Pedro V. Sander\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.264, + 0.205, + 0.706, + 0.223 + ], + "angle": 0, + "content": "1The Hong Kong University of Science and Technology" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.224, + 0.697, + 0.242 + ], + "angle": 0, + "content": "\\(^{2}\\)Tencent AI Lab \\(^{3}\\)City University of Hong Kong" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.258, + 0.201, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.137, + 0.402, + 0.387, + 0.415 + ], + "angle": 0, + "content": "(a) Reconstructed 3D Video Representation" + }, + { + "type": "image", + "bbox": [ + 0.202, + 0.259, + 0.449, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.452, + 0.26, + 0.609, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.531, + 0.402, + 0.687, + 0.415 + ], + "angle": 0, + "content": "(b) View and Time Control" + }, + { + "type": "image", + "bbox": [ + 0.611, + 0.261, + 0.767, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.774, + 0.263, + 0.892, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.769, + 0.402, + 0.888, + 0.415 + ], + "angle": 0, + "content": "(c) Real Time Demo" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.426, + 0.895, + 0.482 + ], + "angle": 0, + "content": "Figure 1. Given a set of asynchronous multi-view videos, we propose a pipeline to construct a novel 3D looping video representation (a), which consists of a static texture atlas, a dynamic texture atlas, and multiple tiles as the geometry proxy. The 3D video loops allow both view and time control (b), and can be rendered in real time even on mobile devices (c). We strongly recommend readers refer to the supplementary material for video results." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.495, + 0.314, + 0.51 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.537, + 0.474, + 0.884 + ], + "angle": 0, + "content": "- Looping videos are short video clips that can be looped endlessly without visible seams or artifacts. They provide a very attractive way to capture the dynamism of natural scenes. Existing methods have been mostly limited to 2D representations. In this paper, we take a step forward and propose a practical solution that enables an immersive experience on dynamic 3D looping scenes. The key challenge is to consider the per-view looping conditions from asynchronous input while maintaining view consistency for the 3D representation. We propose a novel sparse 3D video representation, namely Multi-Tile Video (MTV), which not only provides a view-consistent prior, but also greatly reduces memory usage, making the optimization of a 4D volume tractable. Then, we introduce a two-stage pipeline to construct the 3D looping MTV from completely asynchronous multi-view videos with no time overlap. A novel looping loss based on video temporal retargeting algorithms is adopted during the optimization to loop the 3D scene. Experiments of our framework have shown promise in successfully generating and rendering photorealistic 3D looping videos in real time even on mobile devices. The code, dataset, and live demos are available in https://limacv.github.io/VideoLoop3D_web/." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.494, + 0.631, + 0.51 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Endless looping videos are fascinating ways to record special moments. These video loops are compact in terms of storage and provide a much richer experience for scenes that exhibit looping behavior. One successful commercial use of this technique is the live photo [19] feature in the Apple iPhone, which tries to find an optimal looping period and fade in/out short video clips to create looping videos. There have been several works on automatically constructing 2D looping videos from non-looping short video clips. Liao et al. [24] first propose to create 2D video loops from videos captured with static cameras. They solve for the optimal starting frame and looping period for each pixel in the input video to composite the final video. Later on, several methods are proposed to improve the computation speed [23], or extend to panoramas [1, 36], and gigapixel videos [16]. However, few attempts have been made to extend video loops to a 3D representation. One existing work that shares a similar setting as ours is VBR [46], which generates plausible video loops in novel views. However, it comes with some limitations: It builds on top of ULR [5], which can produce ghosting artifacts due to inaccurate mesh reconstruction, as shown in [30]. Besides, VBR generates looping videos and reduces the inconsistency from asynchronous input by adaptively blending in different frequency domains, which tends to blur away details." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.926, + 0.512, + 0.937 + ], + "angle": 0, + "content": "310" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.35 + ], + "angle": 0, + "content": "To allow free-view observation of the looping videos, a proper 3D representation needs to be employed. Recently, tremendous progress has been made in novel view synthesis based on 3D scene representations such as triangle meshes [37, 38, 45], Multi-plane Image (MPI) [9, 56], and Neural Radiance Field (NeRF) [7, 31, 32], which could be reconstructed given only sparse observations of real scenes and render photo-realistic images in novel views. Much effort has been made to adapt these methods to dynamic scenes, which allows for both viewing space and time controls [2, 6, 27, 28, 34, 35, 52, 57]. Therefore, a straightforward solution to generate a 3D looping video is to employ the 2D looping algorithms for each view and lift the results to 3D using these methods. However, we find it hard to get satisfactory results since the 2D looping algorithms do not consider view consistency, which is even more challenging for the asynchronous multi-view videos that we use as input." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.353, + 0.473, + 0.581 + ], + "angle": 0, + "content": "In this work, we develop a practical solution for these problems by using the captured video input of the dynamic 3D scene with only one commodity camera. We automatically construct a 3D looping video representation from completely asynchronous multi-view input videos with no time overlap. To get promising 3D video loop results, two main issues need to be addressed. First, we need to solve for a view-consistent looping pattern from inconsistent multi-view videos, from which we need to identify spatio-temporal 3D patches that are as consistent as possible. Second, the 3D video potentially requires a memory-intensive 4D volume for storage. Therefore, we need to develop a 3D video representation that is both efficient in rendering and compact in memory usage to make the optimization of the 4D volume tractable." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.584, + 0.473, + 0.903 + ], + "angle": 0, + "content": "To this end, we develop an analysis-by-synthesis approach that trains for a view-consistent 3D video representation by optimizing multi-view looping targets. We propose an efficient 3D video representation based on Multi-plane Images (MPIs), namely Multi-tile Videos (MTVs), by exploiting the spatial and temporal sparsity of the 3D scene. As shown in Fig. 2, instead of densely storing large planes, MTVs store static or dynamic texture tiles that are sparsely scattered in the view frustum. This greatly reduces the memory requirement for rendering compared with other 3D video representations, making the optimization of the 3D looping video feasible in a single GPU. The sparsity of MTVs also serves as a view-consistent prior when optimizing the 3D looping video. To optimize the representation for looping, we formulate the looping generation for each view as a temporal video retargeting problem and develop a novel looping loss based on this formulation. We propose a two-stage pipeline to generate a looping MTV, and the experiments show that our method can produce photorealistic 3D video loops that maintain similar dynamism from the input, and enable real-time rendering even in mobile devices." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.825, + 0.107 + ], + "angle": 0, + "content": "Our contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.112, + 0.892, + 0.158 + ], + "angle": 0, + "content": "- We propose Multi-tile Videos (MTVs), a novel dynamic 3D scene representation that is efficient in rendering and compact in memory usage." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.163, + 0.894, + 0.209 + ], + "angle": 0, + "content": "- We propose a novel looping loss by formulating the 3D video looping construction as a temporal retargeting problem." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.215, + 0.892, + 0.245 + ], + "angle": 0, + "content": "- We propose a two-stage pipeline that constructs MTVs from completely asynchronous multi-view videos." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.112, + 0.894, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.258, + 0.642, + 0.273 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.283, + 0.892, + 0.328 + ], + "angle": 0, + "content": "Our work lies at the confluence of two research topics: looping video construction and novel view synthesis. We will review each of them in this section." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.336, + 0.895, + 0.713 + ], + "angle": 0, + "content": "Video Loops. Several works have been proposed to synthesize looping videos from short video clips. Schödl et al. [40] create video loops by finding similar video frames and jumping between them. Audio [33] can also be leveraged for further refinement. Liao et al. [24] formulate the looping as a combinatorial optimization problem that tries to find the optimal start frame and looping period for each pixel. It seeks to maximize spatio-temporal consistency in the output looping videos. This formulation is further developed and accelerated by Liao et al. [23], and extended to gigapixel looping videos [16] by stitching multiple looping videos. Panorama video loops can also be created by taking a video with a panning camera [1, 36]. VBR [46] generates loops by fading in/out temporal Laplacian pyramids, and extends video loops to 3D using ULR [5]. Another line of work tries to create video loops from still images and strokes provided by users as rough guidelines of the looping motion. Endless Loops [15] tries to find self-similarities from the image and solve for the optical flow field, which is then used to warp and composite the frames of the looping video. This process can also be replaced by data-driven approaches [18, 29], or physics-based simulation [8]. Despite the progress in creating various forms of looping videos, extending looping videos to 3D is still an unexplored direction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Novel View Synthesis of Dynamic Scenes. Novel View Synthesis (NVS) aims at interpolating views given only a set of sparse input views. For dynamic scenes, NVS requires the construction of a 4D representation that allows for both space and time control. Some methods use synchronized multi-view videos as input, which are often only available in a studio setting [27, 28, 57], or using specially designed camera arrays [4, 9, 21, 25]. To ease hardware requirements, Open4D [3] uses unconstrained multi-view input, but still requires multiple observations at the same timestamp. With the development of neural rendering, it is possible to use only monocular input. However, this is" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.925, + 0.511, + 0.938 + ], + "angle": 0, + "content": "311" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.226 + ], + "angle": 0, + "content": "a highly ill-posed problem since the camera and scene elements are moving simultaneously. Some methods use extra sensors such as a depth sensor [2, 6], while some use a data-driven prior to help construct the scene geometry [12, 50]. Others use a hand-crafted motion prior to regularize the scene motion [22, 34, 35, 47], which usually can only handle simple motions. In our setting, we take asynchronous multi-view videos with no time overlap, which is a setting that has not been addressed before." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.232, + 0.473, + 0.564 + ], + "angle": 0, + "content": "3D Scene Representations. A critical issue in NVS is the underlying scene representation. A triangle mesh is the most commonly used scene representation in commercial 3D software. Some methods use meshes as their representation [37, 38, 46]. However, reconstructing an accurate, temporally consistent mesh is still an open problem, being particularly challenging for complex in-the-wild scenes [28]. A volumetric representation is another option to express the 3D world by storing scene parameters in a dense 3D grid [11, 27, 49, 54]. One benefit is that it trivially supports differentiable rendering, which greatly improves the reconstruction quality. The Multi-plane Image (MPI) [9, 10, 30, 44, 48, 56] is an adapted volumetric representation that represents a scene using multiple RGBA planes in the camera frustum. Volume representations can model complex geometry, but at the cost of higher memory usage. Another rapidly developing representation is Neural Radiance Field (NeRF) [31], which models scenes as continuous functions and parameterizes the function as an implicit neural network. It achieves photorealistic rendering results at the expense of long training and rendering times, especially for dynamic scenes." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.58, + 0.17, + 0.595 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.606, + 0.187, + 0.62 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.629, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Our goal is to reconstruct a view-consistent 3D video representation that can be looped infinitely using completely asynchronous multi-view 2D videos. We start by introducing a novel 3D video representation, namely Multitile Videos (MTVs), which improves efficiency by exploiting sparsity. Then we propose a two-stage pipeline as shown in Fig. 3 to construct a 3D looping MTV. In the first stage, we initialize the MTV by optimizing a static Multiplane Image (MPI) and a 3D loopable mask using long exposure images and 2D loopable masks derived from the input videos. We then construct an MTV through a tile culling process. In the second stage, we train the MTV using an analysis-by-synthesis approach in a coarse-to-fine manner. The key enabler for this process is a novel looping loss based on video retargeting algorithms, which encourages a video to simultaneously loop and preserve similarity to the input. The remainder of this section describes the details of this proposed approach." + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.088, + 0.895, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.228, + 0.892, + 0.256 + ], + "angle": 0, + "content": "Figure 2. Comparison between the Multi-plane Video representation and the Multi-tile Video representation." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.271, + 0.673, + 0.287 + ], + "angle": 0, + "content": "3.2. Data Preparation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.295, + 0.892, + 0.43 + ], + "angle": 0, + "content": "The input to our system are multiple asynchronous videos of the same scene from different views. Each video \\(\\mathbf{V} \\in \\mathbb{R}^{F \\times H \\times W \\times 3}\\) is a short clip with \\(F\\) frames and a resolution of \\(H \\times W\\). Video lengths may differ for each view. Each video is expected to have a fixed camera pose, which can be achieved using tripods or existing video stabilization tools during post-process. Since we allow videos to be asynchronous, we could capture each view sequentially using a single commodity camera." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.431, + 0.892, + 0.552 + ], + "angle": 0, + "content": "Given the precondition that the captured scene contains mostly repetitive content, we assume the long exposure images for each view to be view-consistent. Therefore, we compute an average image for each video \\(\\mathbf{V}\\), and then register a pinhole camera model for each video using COLMAP [41, 42]. We also compute a binary loopable mask for each input video similar to Liao et al. [23], where 1 indicates pixel with the potential to form a loop and 0 otherwise." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.561, + 0.838, + 0.577 + ], + "angle": 0, + "content": "3.3. Multi-tile Video (MTV) Representation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.584, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Before introducing our proposed MTV representation, we first briefly review the MPI representation [56]. An MPI represents the scene using \\(D\\) fronto-parallel RGBA planes in the frustum of a reference camera, with each plane arranged at fixed depths [48]. To render an MPI from novel views, we first need to warp each plane based on the depth of the plane and the viewing camera, and then iteratively blend each warped plane from back to front. A straightforward dynamic extension of MPI, namely Multi-plane Video (MPV), is to store a sequence of RGBA maps for each plane. For a video with \\(T\\) frames, this results in a 4D volume in \\(\\mathbb{R}^{D\\times T\\times H\\times W\\times 4}\\), which is very memory consuming. Inspired by recent work on sparse volume representation [17,26,53], we propose Multi-tile Videos, which reduce the memory requirements by exploiting the spatio-temporal sparsity of the scene. Specifically, we subdivide each plane into a regular grid of tiny tiles. Each tile \\(\\mathbf{T}\\in \\mathbb{R}^{F\\times H_s\\times W_s\\times 4}\\) stores a small RGBA patch sequence with spatial resolution \\(H_{s}\\times W_{s}\\). For each tile, we assign a label \\(l\\) by identifying whether it contains looping content \\(l_{loop}\\), a static scene \\(l_{static}\\), or is simply empty \\(l_{empty}\\). We could then store a" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.927, + 0.512, + 0.938 + ], + "angle": 0, + "content": "312" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.086, + 0.887, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.206, + 0.273, + 0.761, + 0.287 + ], + "angle": 0, + "content": "Figure 3. The two-stage pipeline to generate the MTV representation from multi-view videos." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.299, + 0.471, + 0.375 + ], + "angle": 0, + "content": "single RGBA patch for \\( l_{\\text{static}} \\), and discard tiles that are empty. Fig. 1 visualizes a reconstructed MTV representation, where the RGBA patches are packed into static and dynamic texture atlas. Fig. 2 shows the difference between MPVs and MTVs." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.382, + 0.327, + 0.398 + ], + "angle": 0, + "content": "3.4. Stage 1: MTV Initialization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.405, + 0.471, + 0.542 + ], + "angle": 0, + "content": "We find that optimizing a dense MTV directly from scratch results in the approach being easily trapped in local minima, which yields view-inconsistent results. To address this, we use a two-stage pipeline shown in Fig. 3. In the first stage, we start by constructing a \"long exposure\" MPI. Then we initialize the sparse MTV by tile culling process that removes unnecessary tiles. By reducing the number of parameters, the initialized MTV provides a view-consistent prior and leads to a high-quality 3D video representation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.559, + 0.47, + 0.786 + ], + "angle": 0, + "content": "Training a looping-aware MPI. We start by training a dense MPI \\(\\mathbf{M} \\in \\mathbb{R}^{D \\times H \\times W \\times 4}\\), as well as a 3D loopable mask \\(\\mathbf{L} \\in \\mathbb{R}^{D \\times H \\times W}\\), using the average image and the 2D loopable mask, respectively. We randomly initialize \\(\\mathbf{M}\\) and \\(\\mathbf{L}\\), and in each iteration, we randomly sample a patch in a random view, and render an RGB patch \\(\\hat{\\mathbf{p}}_c \\in \\mathbb{R}^{h \\times w \\times 3}\\) and a loopable mask patch \\(\\hat{\\mathbf{p}}_l \\in \\mathbb{R}^{h \\times w}\\) using the standard MPI rendering method. Note that the \\(\\alpha\\) channel is shared between \\(\\mathbf{M}\\) and \\(\\mathbf{L}\\) during rendering. We supervise the MPI \\(\\mathbf{M}\\) by minimizing the Mean Square Error (MSE) between the rendering results and the corresponding patch \\(\\mathbf{p}_c\\) from the average image. We supervise the loopable mask \\(\\mathbf{L}\\) by minimizing the Binary Cross Entropy (BCE) between the rendered 2D mask \\(\\hat{\\mathbf{p}}_l\\) and the corresponding patch \\(\\mathbf{p}_l\\) from the 2D loopable mask:" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.79, + 0.469, + 0.82 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {m s e} = \\frac {1}{h w} \\| \\mathbf {p} _ {c} - \\hat {\\mathbf {p}} _ {c} \\| _ {2} ^ {2}, \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.096, + 0.822, + 0.469, + 0.863 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {b c d} = \\frac {1}{h w} \\| - (\\mathbf {p} _ {l} \\log (\\hat {\\mathbf {p}} _ {l}) + (1 - \\mathbf {p} _ {l}) \\log (1 - \\hat {\\mathbf {p}} _ {l})) \\| _ {1}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.87, + 0.469, + 0.902 + ], + "angle": 0, + "content": "where \\(\\| \\mathbf{p}\\| _1\\) and \\(\\| \\mathbf{p}\\| _2\\) are the L1 and L2 norm of a flattened patch \\(\\mathbf{p}\\). The log is computed for every element of a patch." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.299, + 0.892, + 0.361 + ], + "angle": 0, + "content": "Since the rendering of the MPI is differentiable, we optimize \\(\\mathbf{M}\\) and \\(\\mathbf{L}\\) using the Adam optimizer [20]. Optimizing all the parameters freely causes noisy artifacts, therefore, we apply total variation (TV) regularization [39] to \\(\\mathbf{M}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.57, + 0.366, + 0.892, + 0.395 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {t v} = \\frac {1}{H W} \\left(\\| \\Delta_ {x} \\mathbf {M} \\| _ {1} + \\| \\Delta_ {y} \\mathbf {M} \\| _ {1}\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.4, + 0.893, + 0.518 + ], + "angle": 0, + "content": "where \\(\\| \\Delta_x\\mathbf{M}\\| _1\\) is shorthand for the L1 norm of the gradient of each pixel in the MPI \\(\\mathbf{M}\\) along \\(x\\) direction, and analogously for \\(\\| \\Delta_y\\mathbf{M}\\| _1\\). We also adopt a sparsity loss to further encourage sparsity to the \\(\\alpha\\) channel of the MPI \\(\\mathbf{M}_{\\alpha}\\) as in Broxton et al. [4]. Specifically, we collect \\(D\\) alpha values in each pixel location of \\(\\mathbf{M}_{\\alpha}\\) into a vector \\(\\beta\\), where \\(D\\) is the number of planes. Then the sparsity loss is computed as:" + }, + { + "type": "equation", + "bbox": [ + 0.61, + 0.518, + 0.892, + 0.555 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s p a} = \\frac {1}{H W} \\sum_ {\\text {p i x e l}} \\frac {\\| \\beta \\| _ {1}}{\\| \\beta \\| _ {2}}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.558, + 0.892, + 0.587 + ], + "angle": 0, + "content": "The final loss in the first stage is a weighted sum of the four losses:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.596, + 0.891, + 0.613 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {m s e} + \\mathcal {L} _ {b c d} + \\lambda_ {t v} \\mathcal {L} _ {t v} + \\lambda_ {s p a} \\mathcal {L} _ {s p a}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.628, + 0.892, + 0.75 + ], + "angle": 0, + "content": "Tile Culling. After training, we reconstruct a static MPI M as well as a 3D loopable mask \\(\\mathbf{L}\\). We subdivide each plane into a regular grid of tiles. In the experiments, we subdivide the plane so that each tile has a resolution of \\(H_{s} = W_{s} = 16\\). We denote \\(\\{T_c\\}, \\{T_\\alpha\\}, \\{T_l\\}\\) to be the set of RGB color, alpha value, and loopable mask of a tile, respectively. We then assign label \\(l \\in \\{l_{empty}, l_{static}, l_{loop}\\}\\) based on the \\(\\{T_\\alpha\\}\\) and \\(\\{T_l\\}\\) for each tile:" + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.757, + 0.892, + 0.825 + ], + "angle": 0, + "content": "\\[\nl = \\left\\{ \\begin{array}{l l} l _ {\\text {e m p t y}} & \\text {i f} \\max \\left\\{T _ {\\alpha} \\right\\} \\leq \\tau_ {\\alpha}, \\\\ l _ {\\text {s t a t i c}} & \\text {i f} \\max \\left\\{T _ {\\alpha} \\right\\} > \\tau_ {\\alpha} \\text {a n d} \\max \\left\\{T _ {l} \\right\\} < \\tau_ {l}, \\\\ l _ {\\text {l o o p}} & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.893, + 0.9 + ], + "angle": 0, + "content": "We set the threshold of culling to be \\(\\tau_{\\alpha} = 0.05\\) and \\(\\tau_{l} = 0.5\\). We cull the tiles with \\(l = l_{empty}\\). For tiles with \\(l = l_{loop}\\), we lift the static 2D RGBA patch into a patch sequence by copying the patch \\(T\\) times, where \\(T\\) is the number of frames that we would like the MTV to have. We add" + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.926, + 0.511, + 0.937 + ], + "angle": 0, + "content": "313" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.089, + 0.473, + 0.177 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.187, + 0.47, + 0.258 + ], + "angle": 0, + "content": "Figure 4. Visualization of looping loss. We first pad frames and extract 3D patches along the time axis for each pixel location, then we compute a normalized similarity score for each patch pair. Finally, the looping loss is computed by averaging errors between patches with minimum scores." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.268, + 0.469, + 0.343 + ], + "angle": 0, + "content": "a small random noise to the lifted patch video to prevent the straightforward static solution. For tiles with \\( l = l_{\\text{static}} \\), we simply keep it unchanged. This culling process greatly reduces the memory requirement for optimizing the 4D volume." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.352, + 0.331, + 0.369 + ], + "angle": 0, + "content": "3.5. Stage 2: MTV Optimization" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.376, + 0.469, + 0.406 + ], + "angle": 0, + "content": "After initializing the MTV representation, we then seek to optimize the final looping MTV." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.424, + 0.469, + 0.56 + ], + "angle": 0, + "content": "**Looping Loss.** The main supervision of the optimization process is a novel looping loss, which is inspired by the recent progress in image [13] and video [14] retargeting algorithm. Specifically, in each iteration, we randomly sample a view and a rectangle window of size \\( h \\times w \\), and render the video \\( \\hat{\\mathbf{V}}_o \\in \\mathbb{R}^{T \\times h \\times w \\times 3} \\) from MTV. We denote the corresponding input video as \\( \\mathbf{V}_p \\in \\mathbb{R}^{F \\times h \\times w \\times 3} \\). Our goal is to optimize the MTV such that \\( \\hat{\\mathbf{V}}_o \\) forms a looping video \\( \\mathbf{V}_{\\infty} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.571, + 0.469, + 0.589 + ], + "angle": 0, + "content": "\\[\n\\mathbf {V} _ {\\infty} (t) = \\hat {\\mathbf {V}} _ {o} (t \\bmod T), t \\in [ 1, + \\infty), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.469, + 0.659 + ], + "angle": 0, + "content": "where \\(\\mathbf{V}(t)\\) means \\(t\\)-th frame of the video and mod is the modulus operation. We define the looping loss to encourage the \\(\\mathbf{V}_{\\infty}\\) to be a temporal retargeting result of \\(\\mathbf{V}_p\\). A visualization of the process is shown in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We start by extracting 3D patch sets \\(\\{\\mathbf{Q}_i; i = 1, \\dots, n\\}\\) and \\(\\{\\mathbf{K}_j; j = 1, \\dots, m\\}\\) from \\(\\mathbf{V}_{\\infty}\\) and \\(\\mathbf{V}_p\\), respectively, along temporal axis. \\(\\{\\mathbf{Q}_i\\}\\) and \\(\\{\\mathbf{K}_j\\}\\) are all centered at the same pixel location and we repeat the same process for every pixel. Note that although there are infinitely many patches from the looping video, the extracted patch set of the looping video is equivalent to a finite set of patches, which are extracted from the rendered video by circularly padding the first \\(p = s - d\\) frames of the rendered video \\(\\hat{\\mathbf{V}}_o\\) at the end of itself, where \\(s\\) and \\(d\\) are the size and stride of the patches in the time axis. Fig. 5 demonstrates a toy example with 5 frames. By optimizing both the patches inside the video range and patches crossing the temporal boundary, we optimize a video that is both spatio-temporally consistent with the target and seamlessly looping. We then try to minimize the bidirectional similarity (BDS) [43] between" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.088, + 0.888, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.219, + 0.892, + 0.273 + ], + "angle": 0, + "content": "Figure 5. For patches of size 3 and stride 1, the patch set extracted from the video that endlessly repeats 5 frames is the same as the patch set extracted from the padded video that circularly pads 2 frames." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.284, + 0.892, + 0.331 + ], + "angle": 0, + "content": "the two sets of patches. Intuitively, this means every patch in \\(\\{\\mathbf{Q}_i\\}\\) appears in \\(\\{\\mathbf{K}_j\\}\\) (for coherence) and every patch in \\(\\{\\mathbf{K}_j\\}\\) appears in \\(\\{\\mathbf{Q}_i\\}\\) (for completeness)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.331, + 0.892, + 0.421 + ], + "angle": 0, + "content": "To minimize the BDS between the two patch sets, we use the Patch Nearest Neighbor (PNN) algorithm [13] that first computes a 2D table of normalized similarity scores (NSSs) \\( s_{ij} \\) for every possible pair of \\( \\mathbf{Q}_i \\) and \\( \\mathbf{K}_j \\). Then for each patch \\( \\mathbf{Q}_i \\), we select a target patch \\( \\mathbf{K}_{f(i)} \\in \\{\\mathbf{K}_j\\} \\) that has minimal NSS, where \\( f(i) \\) is a selection function:" + }, + { + "type": "equation", + "bbox": [ + 0.545, + 0.429, + 0.891, + 0.451 + ], + "angle": 0, + "content": "\\[\nf (i) = \\arg \\min _ {k} s _ {i, k}, \\text {w h e r e} \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.554, + 0.453, + 0.891, + 0.486 + ], + "angle": 0, + "content": "\\[\ns _ {i j} = \\frac {1}{\\rho + \\min _ {k} \\| \\mathbf {Q} _ {k} - \\mathbf {K} _ {j} \\| _ {2} ^ {2}} \\| \\mathbf {Q} _ {i} - \\mathbf {K} _ {j} \\| _ {2} ^ {2}. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.492, + 0.892, + 0.597 + ], + "angle": 0, + "content": "Here \\(\\rho\\) is a hyperparameter that controls the degree of completeness. Intuitively, when \\(\\rho \\rightarrow \\infty\\), Eq. 9 degenerates to \\(s_{ij} \\sim D(\\mathbf{Q}_i, \\mathbf{K}_j)\\), so we simply select \\(\\mathbf{K}_j\\) that is most similar to \\(\\mathbf{Q}_i\\). And if \\(\\rho = 0\\), the denominator \\(\\min_k D(\\mathbf{Q}_k, \\mathbf{K}_j)\\) penalizes the score if there are already some \\(\\mathbf{Q}_i\\) that is closest to \\(\\mathbf{K}_j\\). Thus, the selection will prefer patches that have not yet been selected." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.598, + 0.892, + 0.66 + ], + "angle": 0, + "content": "Using the PNN algorithm, we get the set of patches \\(\\{\\mathbf{K}_{f(i)}\\}\\) that is coherent to the target patch set \\(\\{\\mathbf{K}_j\\}\\), and the completeness is controlled by \\(\\rho\\). The looping loss is then defined as the MSE loss between \\(\\mathbf{Q}_i\\) and \\(\\mathbf{K}_{f(i)}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.564, + 0.668, + 0.891, + 0.709 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {l o o p}} = \\frac {1}{n h w} \\sum_ {\\text {p i x e l}} \\sum_ {i = 1} ^ {n} \\| \\mathbf {Q} _ {i} - \\mathbf {K} _ {f (i)} \\| _ {2} ^ {2}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.717, + 0.891, + 0.748 + ], + "angle": 0, + "content": "where \\(\\sum_{pixel}\\) indicates that the term is summed over all the pixel locations of the rendered video." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Pyramid Training. In the implementation, we adopt a pyramid training scheme. In the coarse level, we downsample both the input video and the MTV. The downsampling of the MTV is conducted by downsampling the tiles. We start from the coarsest level with downsample factor 0.24 and train the MTV representation for 50 epochs. We then upsample each tile by \\(1.4 \\times\\) and repeat the training step. We show that the pyramid training scheme can improve the generation results." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.927, + 0.512, + 0.938 + ], + "angle": 0, + "content": "314" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.134, + 0.089, + 0.835, + 0.183 + ], + "angle": 0, + "content": "
VLPIPS↓STDerr↓Com.↓Coh.↓LoopQ↓# Params.↓Render Spd↑
Ours0.139256.0210.659.2699.26333M-184M140fps
VBR0.207482.3612.9811.4211.49300M20fps
loop2D + MTV0.2447118.911.839.9199.92733M-184M140fps
loop2D + MPV0.2546117.511.829.8179.8402123M110fps
loop2D + DyNeRF0.2282123.711.9310.2310.272M0.1fps
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.187, + 0.893, + 0.215 + ], + "angle": 0, + "content": "Table 1. Quantitative comparison of reconstruction quality and efficiency. \\( \\downarrow \\) (↑) indicates lower (higher) is better. Our method produces the best quality and strikes a good balance between the number of parameters and rendering speed." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.232, + 0.47, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.417, + 0.471, + 0.446 + ], + "angle": 0, + "content": "Figure 6. Qualitative comparison with other baselines. Our method produces the sharpest results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.456, + 0.21, + 0.472 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.481, + 0.295, + 0.497 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.505, + 0.47, + 0.732 + ], + "angle": 0, + "content": "We captured 16 scenes for quantitative and qualitative studies. For each scene, we captured 8-10 views in a faceforward manner using a Sony \\(\\alpha 9\\) II camera. We captured each view at 25 fps for 10-20 seconds. We downsample each video to a resolution of \\(640 \\times 360\\). Finally, we randomly select one view for evaluation. The others are used for constructing MTVs using the two-stage pipeline. In the first stage, we empirically set \\(\\lambda_{tv} = 0.5\\) and \\(\\lambda_{spa} = 0.004\\). We construct MPI with \\(D = 32\\) layers. In the second stage, we let the hyperparameter \\(\\rho = 0\\) to guarantee maximum completeness. We extract 3D patches with spatial dimension 11 and temporal dimension 3. We construct MTVs with approximately 50 frames, i.e., 2 seconds. We set the rendering window in each iteration to \\(h = 180\\), \\(w = 320\\) for both stages." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.741, + 0.173, + 0.756 + ], + "angle": 0, + "content": "4.2. Metrics" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.469, + 0.87 + ], + "angle": 0, + "content": "For our quantitative study, we synthesize looping videos in test views using the reconstructed 3D video representation and compare the synthetic results with captured target videos. However, we do not have paired ground truth videos since we generate 3D videos with completely asynchronous inputs. Therefore, we adopt several intuitive metrics to evaluate the results in spatial and temporal aspects." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Spatial Quality. We evaluate the spatial quality of a synthetic frame by computing the LPIPS value [55] between" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.234, + 0.892, + 0.293 + ], + "angle": 0, + "content": "the synthetic frame with the frame in the target video that is most similar in terms of LPIPS. We average the values among all the 50 synthetic frames, which we denote as VLPIPS." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.294, + 0.893, + 0.415 + ], + "angle": 0, + "content": "Temporal Quality. Given two videos that have similar dynamism, they should have similar color distribution in each pixel location. We measure the temporal quality of the synthetic videos by first computing the standard deviation (STD) of the RGB color at each pixel location of the synthetic video and the target video, resulting in two STD maps of dimension \\( H \\times W \\times 3 \\). We then compute \\( STDerr \\) by measuring the MSE between the two maps." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.416, + 0.893, + 0.597 + ], + "angle": 0, + "content": "Spatio-temporal Quality. We evaluate the spatio-temporal similarity between the synthetic and target videos following the bidirectional similarity (BDS) [43]. We individually report Completeness and Coherence scores (abbreviated as Com. and Coh., respectively) by extracting and finding nearest neighbor 3D patches in two directions. Specifically, for each patch in the target video, we find the closest patches in the synthetic video for Com. and vice-versa. We measure the distance of two 3D patches using MSE, and the final scores are the averages of multiple different patch configurations of size and stride. We present the details of the patch configurations in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.598, + 0.892, + 0.702 + ], + "angle": 0, + "content": "In addition, we use a metric similar to Coh. to measure the loop quality (LoopQ), which reflects the coherence of the looping video when switching from the last frame back to the first frame. This is achieved by extracting the 3D patches that overlap with the first and last frame, as shown by the blue rectangles in Fig. 5. Other steps remain the same as the Coh. score." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.712, + 0.64, + 0.728 + ], + "angle": 0, + "content": "4.3. Comparisons" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.893, + 0.901 + ], + "angle": 0, + "content": "We first compare with VBR [46] by implementing it based on the descriptions in the paper since the code and data are not publicly available. We also compare with straightforward solutions that lift classical 2D looping algorithms to 3D. Specifically, we first generate a 2D looping video for each of the input videos using the method of Liao et al. [23]. And then we construct various scene representations using the 2D looping video and synthesize novel views. We compare with our sparse MTV representation \\((loop2D + MTV)\\), the Multi-plane Video representation \\((loop2D + MPV)\\) and the dynamic NeRF representation" + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.927, + 0.511, + 0.938 + ], + "angle": 0, + "content": "315" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.209, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.211, + 0.089, + 0.338, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.089, + 0.466, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.466, + 0.089, + 0.595, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.595, + 0.089, + 0.722, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.722, + 0.089, + 0.852, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.852, + 0.089, + 0.892, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.217, + 0.892, + 0.243 + ], + "angle": 0, + "content": "Figure 7. We visualize the pixel-wise \\( STDerr \\) value for each method. Our method has a lower error, indicating that our approach best retains the dynamism of the scene. We recommend readers watch the supplemental video, where the difference is more noticeable." + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.248, + 0.24, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.248, + 0.403, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.404, + 0.248, + 0.565, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.566, + 0.248, + 0.728, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.729, + 0.248, + 0.891, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.248, + 0.39, + 0.72, + 0.403 + ], + "angle": 0, + "content": "Figure 8. Results of our ablations. Our full model produces the fewest artifacts." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.407, + 0.468, + 0.5 + ], + "angle": 0, + "content": "
VLPIPS ↓STDerr ↓Com. ↓Coh. ↓LoopQ ↓
Ours0.139256.0210.659.2699.263
w/o pad0.138755.6710.669.2739.395
w/o 2stage0.175567.9911.699.98210.13
w/o pyr0.141257.4110.869.5559.465
w/o tv0.153056.5111.129.7669.689
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.502, + 0.468, + 0.529 + ], + "angle": 0, + "content": "Table 2. Ablations of our method. \\( \\downarrow \\left( \\uparrow \\right) \\) indicates lower (higher) is better. (best in bold, and second best underlined)" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.539, + 0.283, + 0.553 + ], + "angle": 0, + "content": "tion [21] \\((loop2D + DyNeRF)\\)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.554, + 0.47, + 0.886 + ], + "angle": 0, + "content": "We compare our method with the four baselines on our captured dataset. We synthesize novel view videos and report VLPIPS, STDerr, Com., Coh. and LoopQ metrics in Tab. 1. Our method outperforms other baselines in terms of visual quality, scene dynamism preservation, spatio-temporal consistency, and loop quality. We show the qualitative comparison in Fig. 6. We also visualize the STDerr value for each pixel in Fig. 7, which reflects the difference in dynamism between the synthetic results and the reference. We recommend that readers also see the video results included in the supplementary material. Note that our method produces the sharpest results, while best retaining the dynamism of the scene. VBR directly blends inconsistent videos from multiple input views. and the 2D looping baselines fail to consider multi-view information and produce view-inconsistent looping videos. As a result, they tend to blur out spatial and temporal details to compensate for view inconsistencies. We observe that loop2D+DyNeRF also generates sharper results compared with the other two baselines. This is because DyNeRF conditions on the view direction and tolerates the view inconsistency. However, it performs poorly in maintaining the dynamism of the scene." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.887, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Additionally, we measure the efficiency of the scene rep" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.409, + 0.892, + 0.68 + ], + "angle": 0, + "content": "resentations using several metrics. We first show the number of parameters (# Params.) of the model to represent a dynamic 3D volume of 50 frames. We evaluate rendering speed (Render Spd) at a \\(360 \\times 640\\) resolution on a laptop equipped with an RTX 2060 GPU. We present the metrics in Tab. 1. Since the MTV representation varies with different scenes, we report the maximum and minimum values when evaluated in our dataset. We can see that our method surpasses VBR in # Params. and Render Spd. Compared with MPV that densely stores the scene parameters in a 4D volume, our sparse MTV representation can reduce the number of parameters by up to \\(98\\%\\), resulting in a slightly faster rendering speed and much smaller memory and disk usage. On the other hand, despite the surprisingly small number of parameters, the NeRF representation has extremely slow rendering speed. In other words, our MTV representation achieves the best trade-off between the number of parameters and rendering efficiency." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.695, + 0.664, + 0.71 + ], + "angle": 0, + "content": "4.4. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We conducted extensive ablation studies of our method to test the effectiveness of several design decisions in our pipeline by individually removing each component and constructing 3D looping videos from our dataset. We experimented on the following components: the frame padding operation as illustrated in Fig. 5 when computing \\(\\mathcal{L}_{loop}\\) (w/o pad), the two-stage training pipeline (w/o 2stage), the coarse-to-fine training strategy (w/o pyr), and the TV regularization (w/o tv). The numerical results are shown in Tab. 2, and qualitative results are presented in Fig. 8 and Fig. 9. We also experimented with different values of \\(\\lambda_{spa}\\) and \\(\\rho\\) to understand the resulting effect." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.927, + 0.512, + 0.938 + ], + "angle": 0, + "content": "316" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.47, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.231, + 0.47, + 0.314 + ], + "angle": 0, + "content": "Figure 9. Ablations for the padding operation. In the second row, we visualize the temporal coherence by flattening the pixels in the green line along the time axis and repeating 3 times. Red rectangles highlight the discontinuity produced without the padding operation. We encourage readers to refer to the video results for a clearer demonstration." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.322, + 0.469, + 0.488 + ], + "angle": 0, + "content": "Padding Operation. As shown in Tab. 2, without the padding operation, our method can still produce competitive results in terms of spatial quality and spatio-temporal consistency. It even has better temporal quality. This is because the padding operation adds extra boundary conditions to the optimization, making the optimization more difficult. However, as highlighted in the red rectangles in Fig. 9, without padding, our method is less prone to generate a properly looping video since it can not guarantee a smooth transition from the last frame to the first frame, leading to a lower loop quality score." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.49, + 0.469, + 0.594 + ], + "angle": 0, + "content": "Two-stage Pipeline. It can be seen from Tab. 2 that the two-stage pipeline plays an important role in generating high-quality results. Without the two-stage pipeline, where we directly optimize a dense MPV representation using the looping loss, the MPV easily gets trapped into view-inconsistent results, leading to significant drop in every metric evaluated." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.597, + 0.469, + 0.702 + ], + "angle": 0, + "content": "Coarse-to-fine Training. Results also show that the coarse-to-fine training scheme produces slightly better spatial and temporal quality than optimizing only on the finest level. This is because the patch-based optimization has a wider perceptual field at the coarse level, leading to a better global solution. Therefore, our full model tends to produce fewer artifacts compared with the w/o pyr model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.469, + 0.764 + ], + "angle": 0, + "content": "TV Regularization. We find it necessary to apply TV regularization, since the pipeline tends to generate MTVs with holes without this regularization, as shown in Fig. 8, which greatly affects the visual quality." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Weight for \\(\\mathcal{L}_{spa}\\). We experimented on different values of \\(\\lambda_{spa}\\) on one scene. We plot the relationship between Coh. scores and # Params. with respect to \\(\\lambda_{spa}\\). We can see that when \\(\\lambda_{spa} = 0\\), the reconstructed MTV is less sparse, which degenerates to a dense representation. This makes it harder to optimize and leads to a worse Coh. score. Then # Params. and Coh. drop rapidly as \\(\\lambda_{spa}\\) grow. However, if \\(\\lambda_{spa}\\) is larger than a threshold, Coh. increases again, while the improvement on # Params. is less substantial. This" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.887, + 0.17 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.172, + 0.892, + 0.201 + ], + "angle": 0, + "content": "Figure 10. The trend of Coh. score and # Params. under different \\(\\lambda_{spa}\\). The green line is the value we use in all other experiments." + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.217, + 0.892, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.538, + 0.298, + 0.853, + 0.312 + ], + "angle": 0, + "content": "Figure 11. Controlling the dynamism by changing \\(\\rho\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.327, + 0.892, + 0.388 + ], + "angle": 0, + "content": "is because the excessive sparseness causes the tile-culling process to over-cull necessary tiles, resulting in holes in the rendering results. Therefore, we chose \\(\\lambda_{spa} = 0.004\\) (green line in Fig. 10) in other experiments." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.393, + 0.892, + 0.469 + ], + "angle": 0, + "content": "Value of \\(\\rho\\). In the experiments, we use \\(\\rho = 0\\) to ensure maximum completeness with respect to the input video. However, we find that by controlling the hyperparameter \\(\\rho\\), we could control the degree of dynamism of the reconstructed 3D video. One example is shown in Fig. 11." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.498, + 0.747, + 0.513 + ], + "angle": 0, + "content": "5. Discussion and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.528, + 0.892, + 0.725 + ], + "angle": 0, + "content": "Limitations and Future Work. Our method comes with some limitations. First, since the MTV representation does not condition on view direction, it fails to model complex view-dependent effects, such as non-planar specular. One possible way to improve the representation is by introducing view-dependency, such as spherical harmonics [53] or neural basis function [51]. Another limitation is that we assume the scene to possess a looping pattern, which works best for natural scenes like flowing water and waving trees. However, if the scene is not loopable, our method tends to fail because each view has a completely unique content. This leads to a highly ill-posed problem in constructing a looping video from the asynchronous input videos." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.73, + 0.892, + 0.835 + ], + "angle": 0, + "content": "Conclusion. In this paper, we propose a practical solution for constructing a 3D looping video representation given completely asynchronous multi-view videos. Experiments verify the effectiveness of our pipeline and demonstrate significant improvement in quality and efficiency over several baselines. We hope that this work will further motivate research into dynamic 3D scene reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Acknowledgements. The authors from HKUST were partially supported by the Hong Kong Research Grants Council (RGC). The author from CityU was partially supported by an ECS grant from the RGC (Project No. CityU 21209119)." + }, + { + "type": "page_number", + "bbox": [ + 0.486, + 0.927, + 0.511, + 0.938 + ], + "angle": 0, + "content": "317" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Aseem Agarwala, Ke Colin Zheng, Chris Pal, Maneesh Agrawala, Michael Cohen, Brian Curless, David Salesin, and Richard Szeliski. Panoramic video textures. ACM Trans. Graph., 24(3):821-827, jul 2005. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.472, + 0.242 + ], + "angle": 0, + "content": "[2] Benjamin Attal, Eliot Laidlaw, Aaron Gokaslan, Changil Kim, Christian Richardt, James Tompkin, and Matthew O'Toole. Törf: Time-of-flight radiance fields for dynamic scene view synthesis. Advances in neural information processing systems, 34:26289-26301, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.242, + 0.473, + 0.312 + ], + "angle": 0, + "content": "[3] Aayush Bansal, Minh Vo, Yaser Sheikh, Deva Ramanan, and Srinivasa Narasimhan. 4d visualization of dynamic events from unconstrained multi-view videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5366-5375, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.312, + 0.472, + 0.383 + ], + "angle": 0, + "content": "[4] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.472, + 0.452 + ], + "angle": 0, + "content": "[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 425-432, 2001. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.452, + 0.472, + 0.508 + ], + "angle": 0, + "content": "[6] Hongrui Cai, Wanquan Feng, Xuetao Feng, Yan Wang, and Juyong Zhang. Neural surface reconstruction of dynamic scenes with monocular rgb-d camera. arXiv preprint arXiv:2206.15258, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.509, + 0.472, + 0.579 + ], + "angle": 0, + "content": "[7] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXII, pages 333-350. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.579, + 0.472, + 0.621 + ], + "angle": 0, + "content": "[8] Siming Fan, Jingtan Piao, Chen Qian, Kwan-Yee Lin, and Hongsheng Li. Simulating fluids in real-world still images. arXiv preprint arXiv:2204.11335, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.621, + 0.472, + 0.704 + ], + "angle": 0, + "content": "[9] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snively, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.705, + 0.47, + 0.775 + ], + "angle": 0, + "content": "[10] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. Deepstereo: Learning to predict new views from the world's imagery. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5515-5524, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.472, + 0.845 + ], + "angle": 0, + "content": "[11] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.472, + 0.902 + ], + "angle": 0, + "content": "[12] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5712-5721, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.473, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.175 + ], + "angle": 0, + "content": "[13] Niv Granot, Ben Feinstein, Assaf Shocher, Shai Bagon, and Michal Irani. Drop the gan: In defense of patches nearest neighbors as single image generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13460-13469, June 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.176, + 0.894, + 0.232 + ], + "angle": 0, + "content": "[14] Niv Haim, Ben Feinstein, Niv Granot, Assaf Shocher, Shai Bagon, Tali Dekel, and Michal Irani. Diverse generation from a single video made possible. arXiv preprint arXiv:2109.08591, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.232, + 0.894, + 0.302 + ], + "angle": 0, + "content": "[15] Tavi Halperin, Hanit Hakim, Orestis Vantzos, Gershon Hochman, Netai Benaim, Lior Sassy, Michael Kupchik, Ofir Bibi, and Ohad Fried. Endless loops: detecting and animating periodic patterns in still images. ACM Transactions on Graphics (TOG), 40(4):1-12, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.302, + 0.894, + 0.344 + ], + "angle": 0, + "content": "[16] Mingming He, Jing Liao, Pedro V Sander, and Hugues Hoppe. Gigapixel panorama video loops. ACM Transactions on Graphics (TOG), 37(1):1-15, 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.344, + 0.894, + 0.413 + ], + "angle": 0, + "content": "[17] Peter Hedman, Pratul P Srinivasan, Ben Mildenhall, Jonathan T Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5875-5884, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.413, + 0.894, + 0.482 + ], + "angle": 0, + "content": "[18] Aleksander Holynski, Brian L Curless, Steven M Seitz, and Richard Szeliski. Animating pictures with eulerian motion fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5810-5819, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.483, + 0.83, + 0.497 + ], + "angle": 0, + "content": "[19] Apple Inc. Take and edit live photos, Oct 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.497, + 0.894, + 0.567 + ], + "angle": 0, + "content": "[20] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun, editors, 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.567, + 0.894, + 0.663 + ], + "angle": 0, + "content": "[21] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.664, + 0.894, + 0.734 + ], + "angle": 0, + "content": "[22] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6498-6508, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.734, + 0.894, + 0.775 + ], + "angle": 0, + "content": "[23] Jing Liao, Mark Finch, and Hugues Hoppe. Fast computation of seamless video loops. ACM Transactions on Graphics (TOG), 34(6):1-10, 2015. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.776, + 0.894, + 0.817 + ], + "angle": 0, + "content": "[24] Zicheng Liao, Neel Joshi, and Hugues Hoppe. Automated video looping with progressive dynamism. ACM Transactions on Graphics (TOG), 32(4):1-10, 2013. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.818, + 0.894, + 0.874 + ], + "angle": 0, + "content": "[25] Kai-En Lin, Lei Xiao, Feng Liu, Guowei Yang, and Ravi Ramamoorthi. Deep 3d mask volume for view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1749–1758, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.874, + 0.894, + 0.902 + ], + "angle": 0, + "content": "[26] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. Advances" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.927, + 0.512, + 0.938 + ], + "angle": 0, + "content": "318" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.468, + 0.119 + ], + "angle": 0, + "content": "in Neural Information Processing Systems, 33:15651-15663, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.177 + ], + "angle": 0, + "content": "[27] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.247 + ], + "angle": 0, + "content": "[28] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 40(4):1-13, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.25, + 0.47, + 0.305 + ], + "angle": 0, + "content": "[29] Aniruddha Mahapatra and Kuldeep Kulkarni. Controllable animation of fluid elements in still images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3667-3676, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.307, + 0.469, + 0.375 + ], + "angle": 0, + "content": "[30] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):1-14, 2019. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.378, + 0.469, + 0.445 + ], + "angle": 0, + "content": "[31] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.448, + 0.469, + 0.503 + ], + "angle": 0, + "content": "[32] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, July 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.505, + 0.47, + 0.586 + ], + "angle": 0, + "content": "[33] Medhini Narasimhan, Shiry Ginosar, Andrew Owens, Alexei A. Efros, and Trevor Darrell. Strumming to the beat: Audio-conditioned contrastive video textures. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 3761-3770, January 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.589, + 0.469, + 0.658 + ], + "angle": 0, + "content": "[34] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.66, + 0.469, + 0.729 + ], + "angle": 0, + "content": "[35] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.469, + 0.8 + ], + "angle": 0, + "content": "[36] Alex Rav-Acha, Yael Pritch, Dani Lischinski, and Shmuel Peleg. Dynamosaics: Video mosaics with non-chronological time. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), volume 1, pages 58-65. IEEE, 2005. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.802, + 0.469, + 0.829 + ], + "angle": 0, + "content": "[37] Gernot Riegler and Vladlen Koltun. Free view synthesis. In European Conference on Computer Vision, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.831, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[38] Gernot Riegler and Vladlen Koltun. Stable view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[39] Leonid I Rudin and Stanley Osher. Total variation based image restoration with free local constraints. In Proceedings of" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "1st international conference on image processing, volume 1, pages 31-35. IEEE, 1994. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[40] Arno Schödl, Richard Szeliski, David H Salesin, and Irfan Essa. Video textures. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 489-498, 2000. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.221 + ], + "angle": 0, + "content": "[41] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.223, + 0.892, + 0.277 + ], + "angle": 0, + "content": "[42] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.28, + 0.892, + 0.334 + ], + "angle": 0, + "content": "[43] Denis Simakov, Yaron Caspi, Eli Shechtman, and Michal Irani. Summarizing visual data using bidirectional similarity. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.337, + 0.892, + 0.405 + ], + "angle": 0, + "content": "[44] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snavely. Pushing the boundaries of view extrapolation with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 175-184, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.408, + 0.892, + 0.461 + ], + "angle": 0, + "content": "[45] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 38(4):1-12, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.464, + 0.892, + 0.533 + ], + "angle": 0, + "content": "[46] Théo Thonat, Yagiz Aksoy, Miika Aittala, Sylvain Paris, Frédo Durand, and George Drettakis. Video-based rendering of dynamic stationary environments from unsynchronized inputs. In Computer Graphics Forum, volume 40, pages 73-86. Wiley Online Library, 2021. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.535, + 0.892, + 0.618 + ], + "angle": 0, + "content": "[47] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12959-12970, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.62, + 0.892, + 0.674 + ], + "angle": 0, + "content": "[48] Richard Tucker and Noah Snively. Single-view view synthesis with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 551-560, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.677, + 0.892, + 0.758 + ], + "angle": 0, + "content": "[49] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.761, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[50] Qianqian Wang, Zhengqi Li, David Salesin, Noah Snavely, Brian Curless, and Janne Kontkanen. 3d moments from near-duplicate photos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3906-3915, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[51] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time view synthesis with neural basis expansion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8534-8543, 2021. 8" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.927, + 0.512, + 0.938 + ], + "angle": 0, + "content": "319" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[52] Jae Shin Yoon, Kihwan Kim, Orazio Gallo, Hyun Soo Park, and Jan Kautz. Novel view synthesis of dynamic scenes with globally coherent depths from a monocular camera. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5336-5345, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.472, + 0.233 + ], + "angle": 0, + "content": "[53] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoptrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.234, + 0.472, + 0.302 + ], + "angle": 0, + "content": "[54] Jiakai Zhang, Liao Wang, Xinhang Liu, Fuqiang Zhao, Minzhang Li, Haizhao Dai, Boyuan Zhang, Wei Yang, Lan Xu, and Jingyi Yu. Neuvv: Neural volumetric videos with immersive rendering and editing. arXiv preprint arXiv:2202.06088, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.304, + 0.472, + 0.346 + ], + "angle": 0, + "content": "[55] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.347, + 0.472, + 0.389 + ], + "angle": 0, + "content": "[56] Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images, 2018. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.39, + 0.472, + 0.446 + ], + "angle": 0, + "content": "[57] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM transactions on graphics (TOG), 23(3):600-608, 2004. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.487, + 0.927, + 0.512, + 0.938 + ], + "angle": 0, + "content": "320" + } + ] +] \ No newline at end of file diff --git a/2023/3D Video Loops From Asynchronous Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_origin.pdf b/2023/3D Video Loops From Asynchronous Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..507ee1192782ba1a42c39a57413877e268a0b432 --- /dev/null +++ b/2023/3D Video Loops From Asynchronous Input/7bb72ce9-0dd3-422a-99d9-0bd1bcda48bf_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:716103cf049706a510bf1f8923be6b619ddc42c594cf3c8734d8d3bb0f9013f8 +size 7502759 diff --git a/2023/3D Video Loops From Asynchronous Input/full.md b/2023/3D Video Loops From Asynchronous Input/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d5577746373b91c068e72d10a385d6dac8284142 --- /dev/null +++ b/2023/3D Video Loops From Asynchronous Input/full.md @@ -0,0 +1,335 @@ +# 3D Video Loops from Asynchronous Input + +Li Ma $^{1}$ Xiaoyu Li $^{2}$ Jing Liao $^{3}$ Pedro V. Sander $^{1}$ + +1The Hong Kong University of Science and Technology + +$^{2}$ Tencent AI Lab $^{3}$ City University of Hong Kong + +![](images/424e31edcd5e4876067e0ffeb5cfc2dc81f2b55db2c708dfe3f76429a986a527.jpg) +(a) Reconstructed 3D Video Representation + +![](images/334ba8de867f95f76c793daee3e2e74e1651be2f42cdcc97559d83bfa050e684.jpg) +Figure 1. Given a set of asynchronous multi-view videos, we propose a pipeline to construct a novel 3D looping video representation (a), which consists of a static texture atlas, a dynamic texture atlas, and multiple tiles as the geometry proxy. The 3D video loops allow both view and time control (b), and can be rendered in real time even on mobile devices (c). We strongly recommend readers refer to the supplementary material for video results. + +![](images/e836695320aa370be43d5ab5b71de4f138330987fd71f48e9f3453c8d7f36342.jpg) +(b) View and Time Control + +![](images/c340f0555bb3260ada99a0da15cbb2506246e6a7cadd2f6602d5775f23e90bc7.jpg) +(c) Real Time Demo + +![](images/754cf507d69b28aff5d38f66bb3b92b8b6a10df930f5e4922d00136f0ddf75dc.jpg) + +# Abstract + +- Looping videos are short video clips that can be looped endlessly without visible seams or artifacts. They provide a very attractive way to capture the dynamism of natural scenes. Existing methods have been mostly limited to 2D representations. In this paper, we take a step forward and propose a practical solution that enables an immersive experience on dynamic 3D looping scenes. The key challenge is to consider the per-view looping conditions from asynchronous input while maintaining view consistency for the 3D representation. We propose a novel sparse 3D video representation, namely Multi-Tile Video (MTV), which not only provides a view-consistent prior, but also greatly reduces memory usage, making the optimization of a 4D volume tractable. Then, we introduce a two-stage pipeline to construct the 3D looping MTV from completely asynchronous multi-view videos with no time overlap. A novel looping loss based on video temporal retargeting algorithms is adopted during the optimization to loop the 3D scene. Experiments of our framework have shown promise in successfully generating and rendering photorealistic 3D looping videos in real time even on mobile devices. The code, dataset, and live demos are available in https://limacv.github.io/VideoLoop3D_web/. + +# 1. Introduction + +Endless looping videos are fascinating ways to record special moments. These video loops are compact in terms of storage and provide a much richer experience for scenes that exhibit looping behavior. One successful commercial use of this technique is the live photo [19] feature in the Apple iPhone, which tries to find an optimal looping period and fade in/out short video clips to create looping videos. There have been several works on automatically constructing 2D looping videos from non-looping short video clips. Liao et al. [24] first propose to create 2D video loops from videos captured with static cameras. They solve for the optimal starting frame and looping period for each pixel in the input video to composite the final video. Later on, several methods are proposed to improve the computation speed [23], or extend to panoramas [1, 36], and gigapixel videos [16]. However, few attempts have been made to extend video loops to a 3D representation. One existing work that shares a similar setting as ours is VBR [46], which generates plausible video loops in novel views. However, it comes with some limitations: It builds on top of ULR [5], which can produce ghosting artifacts due to inaccurate mesh reconstruction, as shown in [30]. Besides, VBR generates looping videos and reduces the inconsistency from asynchronous input by adaptively blending in different frequency domains, which tends to blur away details. + +To allow free-view observation of the looping videos, a proper 3D representation needs to be employed. Recently, tremendous progress has been made in novel view synthesis based on 3D scene representations such as triangle meshes [37, 38, 45], Multi-plane Image (MPI) [9, 56], and Neural Radiance Field (NeRF) [7, 31, 32], which could be reconstructed given only sparse observations of real scenes and render photo-realistic images in novel views. Much effort has been made to adapt these methods to dynamic scenes, which allows for both viewing space and time controls [2, 6, 27, 28, 34, 35, 52, 57]. Therefore, a straightforward solution to generate a 3D looping video is to employ the 2D looping algorithms for each view and lift the results to 3D using these methods. However, we find it hard to get satisfactory results since the 2D looping algorithms do not consider view consistency, which is even more challenging for the asynchronous multi-view videos that we use as input. + +In this work, we develop a practical solution for these problems by using the captured video input of the dynamic 3D scene with only one commodity camera. We automatically construct a 3D looping video representation from completely asynchronous multi-view input videos with no time overlap. To get promising 3D video loop results, two main issues need to be addressed. First, we need to solve for a view-consistent looping pattern from inconsistent multi-view videos, from which we need to identify spatio-temporal 3D patches that are as consistent as possible. Second, the 3D video potentially requires a memory-intensive 4D volume for storage. Therefore, we need to develop a 3D video representation that is both efficient in rendering and compact in memory usage to make the optimization of the 4D volume tractable. + +To this end, we develop an analysis-by-synthesis approach that trains for a view-consistent 3D video representation by optimizing multi-view looping targets. We propose an efficient 3D video representation based on Multi-plane Images (MPIs), namely Multi-tile Videos (MTVs), by exploiting the spatial and temporal sparsity of the 3D scene. As shown in Fig. 2, instead of densely storing large planes, MTVs store static or dynamic texture tiles that are sparsely scattered in the view frustum. This greatly reduces the memory requirement for rendering compared with other 3D video representations, making the optimization of the 3D looping video feasible in a single GPU. The sparsity of MTVs also serves as a view-consistent prior when optimizing the 3D looping video. To optimize the representation for looping, we formulate the looping generation for each view as a temporal video retargeting problem and develop a novel looping loss based on this formulation. We propose a two-stage pipeline to generate a looping MTV, and the experiments show that our method can produce photorealistic 3D video loops that maintain similar dynamism from the input, and enable real-time rendering even in mobile devices. + +Our contributions can be summarized as follows: + +- We propose Multi-tile Videos (MTVs), a novel dynamic 3D scene representation that is efficient in rendering and compact in memory usage. +- We propose a novel looping loss by formulating the 3D video looping construction as a temporal retargeting problem. +- We propose a two-stage pipeline that constructs MTVs from completely asynchronous multi-view videos. + +# 2. Related Work + +Our work lies at the confluence of two research topics: looping video construction and novel view synthesis. We will review each of them in this section. + +Video Loops. Several works have been proposed to synthesize looping videos from short video clips. Schödl et al. [40] create video loops by finding similar video frames and jumping between them. Audio [33] can also be leveraged for further refinement. Liao et al. [24] formulate the looping as a combinatorial optimization problem that tries to find the optimal start frame and looping period for each pixel. It seeks to maximize spatio-temporal consistency in the output looping videos. This formulation is further developed and accelerated by Liao et al. [23], and extended to gigapixel looping videos [16] by stitching multiple looping videos. Panorama video loops can also be created by taking a video with a panning camera [1, 36]. VBR [46] generates loops by fading in/out temporal Laplacian pyramids, and extends video loops to 3D using ULR [5]. Another line of work tries to create video loops from still images and strokes provided by users as rough guidelines of the looping motion. Endless Loops [15] tries to find self-similarities from the image and solve for the optical flow field, which is then used to warp and composite the frames of the looping video. This process can also be replaced by data-driven approaches [18, 29], or physics-based simulation [8]. Despite the progress in creating various forms of looping videos, extending looping videos to 3D is still an unexplored direction. + +Novel View Synthesis of Dynamic Scenes. Novel View Synthesis (NVS) aims at interpolating views given only a set of sparse input views. For dynamic scenes, NVS requires the construction of a 4D representation that allows for both space and time control. Some methods use synchronized multi-view videos as input, which are often only available in a studio setting [27, 28, 57], or using specially designed camera arrays [4, 9, 21, 25]. To ease hardware requirements, Open4D [3] uses unconstrained multi-view input, but still requires multiple observations at the same timestamp. With the development of neural rendering, it is possible to use only monocular input. However, this is + +a highly ill-posed problem since the camera and scene elements are moving simultaneously. Some methods use extra sensors such as a depth sensor [2, 6], while some use a data-driven prior to help construct the scene geometry [12, 50]. Others use a hand-crafted motion prior to regularize the scene motion [22, 34, 35, 47], which usually can only handle simple motions. In our setting, we take asynchronous multi-view videos with no time overlap, which is a setting that has not been addressed before. + +3D Scene Representations. A critical issue in NVS is the underlying scene representation. A triangle mesh is the most commonly used scene representation in commercial 3D software. Some methods use meshes as their representation [37, 38, 46]. However, reconstructing an accurate, temporally consistent mesh is still an open problem, being particularly challenging for complex in-the-wild scenes [28]. A volumetric representation is another option to express the 3D world by storing scene parameters in a dense 3D grid [11, 27, 49, 54]. One benefit is that it trivially supports differentiable rendering, which greatly improves the reconstruction quality. The Multi-plane Image (MPI) [9, 10, 30, 44, 48, 56] is an adapted volumetric representation that represents a scene using multiple RGBA planes in the camera frustum. Volume representations can model complex geometry, but at the cost of higher memory usage. Another rapidly developing representation is Neural Radiance Field (NeRF) [31], which models scenes as continuous functions and parameterizes the function as an implicit neural network. It achieves photorealistic rendering results at the expense of long training and rendering times, especially for dynamic scenes. + +# 3. Method + +# 3.1. Overview + +Our goal is to reconstruct a view-consistent 3D video representation that can be looped infinitely using completely asynchronous multi-view 2D videos. We start by introducing a novel 3D video representation, namely Multitile Videos (MTVs), which improves efficiency by exploiting sparsity. Then we propose a two-stage pipeline as shown in Fig. 3 to construct a 3D looping MTV. In the first stage, we initialize the MTV by optimizing a static Multiplane Image (MPI) and a 3D loopable mask using long exposure images and 2D loopable masks derived from the input videos. We then construct an MTV through a tile culling process. In the second stage, we train the MTV using an analysis-by-synthesis approach in a coarse-to-fine manner. The key enabler for this process is a novel looping loss based on video retargeting algorithms, which encourages a video to simultaneously loop and preserve similarity to the input. The remainder of this section describes the details of this proposed approach. + +![](images/a085c62d5448702bb9e1610535da2958477c0dca0b4f04fc95e4755d3d097f74.jpg) +Figure 2. Comparison between the Multi-plane Video representation and the Multi-tile Video representation. + +# 3.2. Data Preparation + +The input to our system are multiple asynchronous videos of the same scene from different views. Each video $\mathbf{V} \in \mathbb{R}^{F \times H \times W \times 3}$ is a short clip with $F$ frames and a resolution of $H \times W$ . Video lengths may differ for each view. Each video is expected to have a fixed camera pose, which can be achieved using tripods or existing video stabilization tools during post-process. Since we allow videos to be asynchronous, we could capture each view sequentially using a single commodity camera. + +Given the precondition that the captured scene contains mostly repetitive content, we assume the long exposure images for each view to be view-consistent. Therefore, we compute an average image for each video $\mathbf{V}$ , and then register a pinhole camera model for each video using COLMAP [41, 42]. We also compute a binary loopable mask for each input video similar to Liao et al. [23], where 1 indicates pixel with the potential to form a loop and 0 otherwise. + +# 3.3. Multi-tile Video (MTV) Representation + +Before introducing our proposed MTV representation, we first briefly review the MPI representation [56]. An MPI represents the scene using $D$ fronto-parallel RGBA planes in the frustum of a reference camera, with each plane arranged at fixed depths [48]. To render an MPI from novel views, we first need to warp each plane based on the depth of the plane and the viewing camera, and then iteratively blend each warped plane from back to front. A straightforward dynamic extension of MPI, namely Multi-plane Video (MPV), is to store a sequence of RGBA maps for each plane. For a video with $T$ frames, this results in a 4D volume in $\mathbb{R}^{D\times T\times H\times W\times 4}$ , which is very memory consuming. Inspired by recent work on sparse volume representation [17,26,53], we propose Multi-tile Videos, which reduce the memory requirements by exploiting the spatio-temporal sparsity of the scene. Specifically, we subdivide each plane into a regular grid of tiny tiles. Each tile $\mathbf{T}\in \mathbb{R}^{F\times H_s\times W_s\times 4}$ stores a small RGBA patch sequence with spatial resolution $H_{s}\times W_{s}$ . For each tile, we assign a label $l$ by identifying whether it contains looping content $l_{loop}$ , a static scene $l_{static}$ , or is simply empty $l_{empty}$ . We could then store a + +![](images/a19264169a7d76cbe52657564e1662fc754b3e13b4e8db8d8ac4b9b7668b1f53.jpg) +Figure 3. The two-stage pipeline to generate the MTV representation from multi-view videos. + +single RGBA patch for $l_{\text{static}}$ , and discard tiles that are empty. Fig. 1 visualizes a reconstructed MTV representation, where the RGBA patches are packed into static and dynamic texture atlas. Fig. 2 shows the difference between MPVs and MTVs. + +# 3.4. Stage 1: MTV Initialization + +We find that optimizing a dense MTV directly from scratch results in the approach being easily trapped in local minima, which yields view-inconsistent results. To address this, we use a two-stage pipeline shown in Fig. 3. In the first stage, we start by constructing a "long exposure" MPI. Then we initialize the sparse MTV by tile culling process that removes unnecessary tiles. By reducing the number of parameters, the initialized MTV provides a view-consistent prior and leads to a high-quality 3D video representation. + +Training a looping-aware MPI. We start by training a dense MPI $\mathbf{M} \in \mathbb{R}^{D \times H \times W \times 4}$ , as well as a 3D loopable mask $\mathbf{L} \in \mathbb{R}^{D \times H \times W}$ , using the average image and the 2D loopable mask, respectively. We randomly initialize $\mathbf{M}$ and $\mathbf{L}$ , and in each iteration, we randomly sample a patch in a random view, and render an RGB patch $\hat{\mathbf{p}}_c \in \mathbb{R}^{h \times w \times 3}$ and a loopable mask patch $\hat{\mathbf{p}}_l \in \mathbb{R}^{h \times w}$ using the standard MPI rendering method. Note that the $\alpha$ channel is shared between $\mathbf{M}$ and $\mathbf{L}$ during rendering. We supervise the MPI $\mathbf{M}$ by minimizing the Mean Square Error (MSE) between the rendering results and the corresponding patch $\mathbf{p}_c$ from the average image. We supervise the loopable mask $\mathbf{L}$ by minimizing the Binary Cross Entropy (BCE) between the rendered 2D mask $\hat{\mathbf{p}}_l$ and the corresponding patch $\mathbf{p}_l$ from the 2D loopable mask: + +$$ +\mathcal {L} _ {m s e} = \frac {1}{h w} \| \mathbf {p} _ {c} - \hat {\mathbf {p}} _ {c} \| _ {2} ^ {2}, \tag {1} +$$ + +$$ +\mathcal {L} _ {b c d} = \frac {1}{h w} \| - (\mathbf {p} _ {l} \log (\hat {\mathbf {p}} _ {l}) + (1 - \mathbf {p} _ {l}) \log (1 - \hat {\mathbf {p}} _ {l})) \| _ {1}, \tag {2} +$$ + +where $\| \mathbf{p}\| _1$ and $\| \mathbf{p}\| _2$ are the L1 and L2 norm of a flattened patch $\mathbf{p}$ . The log is computed for every element of a patch. + +Since the rendering of the MPI is differentiable, we optimize $\mathbf{M}$ and $\mathbf{L}$ using the Adam optimizer [20]. Optimizing all the parameters freely causes noisy artifacts, therefore, we apply total variation (TV) regularization [39] to $\mathbf{M}$ : + +$$ +\mathcal {L} _ {t v} = \frac {1}{H W} \left(\| \Delta_ {x} \mathbf {M} \| _ {1} + \| \Delta_ {y} \mathbf {M} \| _ {1}\right), \tag {3} +$$ + +where $\| \Delta_x\mathbf{M}\| _1$ is shorthand for the L1 norm of the gradient of each pixel in the MPI $\mathbf{M}$ along $x$ direction, and analogously for $\| \Delta_y\mathbf{M}\| _1$ . We also adopt a sparsity loss to further encourage sparsity to the $\alpha$ channel of the MPI $\mathbf{M}_{\alpha}$ as in Broxton et al. [4]. Specifically, we collect $D$ alpha values in each pixel location of $\mathbf{M}_{\alpha}$ into a vector $\beta$ , where $D$ is the number of planes. Then the sparsity loss is computed as: + +$$ +\mathcal {L} _ {s p a} = \frac {1}{H W} \sum_ {\text {p i x e l}} \frac {\| \beta \| _ {1}}{\| \beta \| _ {2}}. \tag {4} +$$ + +The final loss in the first stage is a weighted sum of the four losses: + +$$ +\mathcal {L} = \mathcal {L} _ {m s e} + \mathcal {L} _ {b c d} + \lambda_ {t v} \mathcal {L} _ {t v} + \lambda_ {s p a} \mathcal {L} _ {s p a}. \tag {5} +$$ + +Tile Culling. After training, we reconstruct a static MPI M as well as a 3D loopable mask $\mathbf{L}$ . We subdivide each plane into a regular grid of tiles. In the experiments, we subdivide the plane so that each tile has a resolution of $H_{s} = W_{s} = 16$ . We denote $\{T_c\}, \{T_\alpha\}, \{T_l\}$ to be the set of RGB color, alpha value, and loopable mask of a tile, respectively. We then assign label $l \in \{l_{empty}, l_{static}, l_{loop}\}$ based on the $\{T_\alpha\}$ and $\{T_l\}$ for each tile: + +$$ +l = \left\{ \begin{array}{l l} l _ {\text {e m p t y}} & \text {i f} \max \left\{T _ {\alpha} \right\} \leq \tau_ {\alpha}, \\ l _ {\text {s t a t i c}} & \text {i f} \max \left\{T _ {\alpha} \right\} > \tau_ {\alpha} \text {a n d} \max \left\{T _ {l} \right\} < \tau_ {l}, \\ l _ {\text {l o o p}} & \text {o t h e r w i s e .} \end{array} \right. \tag {6} +$$ + +We set the threshold of culling to be $\tau_{\alpha} = 0.05$ and $\tau_{l} = 0.5$ . We cull the tiles with $l = l_{empty}$ . For tiles with $l = l_{loop}$ , we lift the static 2D RGBA patch into a patch sequence by copying the patch $T$ times, where $T$ is the number of frames that we would like the MTV to have. We add + +![](images/cf3722ee028e5c341f276593a1a9d440a9e1dc9213ebf70a4e41497da9f0972d.jpg) +Figure 4. Visualization of looping loss. We first pad frames and extract 3D patches along the time axis for each pixel location, then we compute a normalized similarity score for each patch pair. Finally, the looping loss is computed by averaging errors between patches with minimum scores. + +a small random noise to the lifted patch video to prevent the straightforward static solution. For tiles with $l = l_{\text{static}}$ , we simply keep it unchanged. This culling process greatly reduces the memory requirement for optimizing the 4D volume. + +# 3.5. Stage 2: MTV Optimization + +After initializing the MTV representation, we then seek to optimize the final looping MTV. + +**Looping Loss.** The main supervision of the optimization process is a novel looping loss, which is inspired by the recent progress in image [13] and video [14] retargeting algorithm. Specifically, in each iteration, we randomly sample a view and a rectangle window of size $h \times w$ , and render the video $\hat{\mathbf{V}}_o \in \mathbb{R}^{T \times h \times w \times 3}$ from MTV. We denote the corresponding input video as $\mathbf{V}_p \in \mathbb{R}^{F \times h \times w \times 3}$ . Our goal is to optimize the MTV such that $\hat{\mathbf{V}}_o$ forms a looping video $\mathbf{V}_{\infty}$ : + +$$ +\mathbf {V} _ {\infty} (t) = \hat {\mathbf {V}} _ {o} (t \bmod T), t \in [ 1, + \infty), \tag {7} +$$ + +where $\mathbf{V}(t)$ means $t$ -th frame of the video and mod is the modulus operation. We define the looping loss to encourage the $\mathbf{V}_{\infty}$ to be a temporal retargeting result of $\mathbf{V}_p$ . A visualization of the process is shown in Fig. 4. + +We start by extracting 3D patch sets $\{\mathbf{Q}_i; i = 1, \dots, n\}$ and $\{\mathbf{K}_j; j = 1, \dots, m\}$ from $\mathbf{V}_{\infty}$ and $\mathbf{V}_p$ , respectively, along temporal axis. $\{\mathbf{Q}_i\}$ and $\{\mathbf{K}_j\}$ are all centered at the same pixel location and we repeat the same process for every pixel. Note that although there are infinitely many patches from the looping video, the extracted patch set of the looping video is equivalent to a finite set of patches, which are extracted from the rendered video by circularly padding the first $p = s - d$ frames of the rendered video $\hat{\mathbf{V}}_o$ at the end of itself, where $s$ and $d$ are the size and stride of the patches in the time axis. Fig. 5 demonstrates a toy example with 5 frames. By optimizing both the patches inside the video range and patches crossing the temporal boundary, we optimize a video that is both spatio-temporally consistent with the target and seamlessly looping. We then try to minimize the bidirectional similarity (BDS) [43] between + +![](images/1279fc8e50dbdc8e079d03fffa99c71d97d51702029111fb71956ded814f04d3.jpg) +Figure 5. For patches of size 3 and stride 1, the patch set extracted from the video that endlessly repeats 5 frames is the same as the patch set extracted from the padded video that circularly pads 2 frames. + +the two sets of patches. Intuitively, this means every patch in $\{\mathbf{Q}_i\}$ appears in $\{\mathbf{K}_j\}$ (for coherence) and every patch in $\{\mathbf{K}_j\}$ appears in $\{\mathbf{Q}_i\}$ (for completeness). + +To minimize the BDS between the two patch sets, we use the Patch Nearest Neighbor (PNN) algorithm [13] that first computes a 2D table of normalized similarity scores (NSSs) $s_{ij}$ for every possible pair of $\mathbf{Q}_i$ and $\mathbf{K}_j$ . Then for each patch $\mathbf{Q}_i$ , we select a target patch $\mathbf{K}_{f(i)} \in \{\mathbf{K}_j\}$ that has minimal NSS, where $f(i)$ is a selection function: + +$$ +f (i) = \arg \min _ {k} s _ {i, k}, \text {w h e r e} \tag {8} +$$ + +$$ +s _ {i j} = \frac {1}{\rho + \min _ {k} \| \mathbf {Q} _ {k} - \mathbf {K} _ {j} \| _ {2} ^ {2}} \| \mathbf {Q} _ {i} - \mathbf {K} _ {j} \| _ {2} ^ {2}. \tag {9} +$$ + +Here $\rho$ is a hyperparameter that controls the degree of completeness. Intuitively, when $\rho \rightarrow \infty$ , Eq. 9 degenerates to $s_{ij} \sim D(\mathbf{Q}_i, \mathbf{K}_j)$ , so we simply select $\mathbf{K}_j$ that is most similar to $\mathbf{Q}_i$ . And if $\rho = 0$ , the denominator $\min_k D(\mathbf{Q}_k, \mathbf{K}_j)$ penalizes the score if there are already some $\mathbf{Q}_i$ that is closest to $\mathbf{K}_j$ . Thus, the selection will prefer patches that have not yet been selected. + +Using the PNN algorithm, we get the set of patches $\{\mathbf{K}_{f(i)}\}$ that is coherent to the target patch set $\{\mathbf{K}_j\}$ , and the completeness is controlled by $\rho$ . The looping loss is then defined as the MSE loss between $\mathbf{Q}_i$ and $\mathbf{K}_{f(i)}$ : + +$$ +\mathcal {L} _ {\text {l o o p}} = \frac {1}{n h w} \sum_ {\text {p i x e l}} \sum_ {i = 1} ^ {n} \| \mathbf {Q} _ {i} - \mathbf {K} _ {f (i)} \| _ {2} ^ {2}, \tag {10} +$$ + +where $\sum_{pixel}$ indicates that the term is summed over all the pixel locations of the rendered video. + +Pyramid Training. In the implementation, we adopt a pyramid training scheme. In the coarse level, we downsample both the input video and the MTV. The downsampling of the MTV is conducted by downsampling the tiles. We start from the coarsest level with downsample factor 0.24 and train the MTV representation for 50 epochs. We then upsample each tile by $1.4 \times$ and repeat the training step. We show that the pyramid training scheme can improve the generation results. + +
VLPIPS↓STDerr↓Com.↓Coh.↓LoopQ↓# Params.↓Render Spd↑
Ours0.139256.0210.659.2699.26333M-184M140fps
VBR0.207482.3612.9811.4211.49300M20fps
loop2D + MTV0.2447118.911.839.9199.92733M-184M140fps
loop2D + MPV0.2546117.511.829.8179.8402123M110fps
loop2D + DyNeRF0.2282123.711.9310.2310.272M0.1fps
+ +Table 1. Quantitative comparison of reconstruction quality and efficiency. $\downarrow$ (↑) indicates lower (higher) is better. Our method produces the best quality and strikes a good balance between the number of parameters and rendering speed. + +![](images/8049ebae6245ab91d94956eea9b54e801123a2a18d9e0ad5dc1086ef05aa07eb.jpg) +Figure 6. Qualitative comparison with other baselines. Our method produces the sharpest results. + +# 4. Experiments + +# 4.1. Implementation Details + +We captured 16 scenes for quantitative and qualitative studies. For each scene, we captured 8-10 views in a faceforward manner using a Sony $\alpha 9$ II camera. We captured each view at 25 fps for 10-20 seconds. We downsample each video to a resolution of $640 \times 360$ . Finally, we randomly select one view for evaluation. The others are used for constructing MTVs using the two-stage pipeline. In the first stage, we empirically set $\lambda_{tv} = 0.5$ and $\lambda_{spa} = 0.004$ . We construct MPI with $D = 32$ layers. In the second stage, we let the hyperparameter $\rho = 0$ to guarantee maximum completeness. We extract 3D patches with spatial dimension 11 and temporal dimension 3. We construct MTVs with approximately 50 frames, i.e., 2 seconds. We set the rendering window in each iteration to $h = 180$ , $w = 320$ for both stages. + +# 4.2. Metrics + +For our quantitative study, we synthesize looping videos in test views using the reconstructed 3D video representation and compare the synthetic results with captured target videos. However, we do not have paired ground truth videos since we generate 3D videos with completely asynchronous inputs. Therefore, we adopt several intuitive metrics to evaluate the results in spatial and temporal aspects. + +Spatial Quality. We evaluate the spatial quality of a synthetic frame by computing the LPIPS value [55] between + +the synthetic frame with the frame in the target video that is most similar in terms of LPIPS. We average the values among all the 50 synthetic frames, which we denote as VLPIPS. + +Temporal Quality. Given two videos that have similar dynamism, they should have similar color distribution in each pixel location. We measure the temporal quality of the synthetic videos by first computing the standard deviation (STD) of the RGB color at each pixel location of the synthetic video and the target video, resulting in two STD maps of dimension $H \times W \times 3$ . We then compute $STDerr$ by measuring the MSE between the two maps. + +Spatio-temporal Quality. We evaluate the spatio-temporal similarity between the synthetic and target videos following the bidirectional similarity (BDS) [43]. We individually report Completeness and Coherence scores (abbreviated as Com. and Coh., respectively) by extracting and finding nearest neighbor 3D patches in two directions. Specifically, for each patch in the target video, we find the closest patches in the synthetic video for Com. and vice-versa. We measure the distance of two 3D patches using MSE, and the final scores are the averages of multiple different patch configurations of size and stride. We present the details of the patch configurations in the supplementary material. + +In addition, we use a metric similar to Coh. to measure the loop quality (LoopQ), which reflects the coherence of the looping video when switching from the last frame back to the first frame. This is achieved by extracting the 3D patches that overlap with the first and last frame, as shown by the blue rectangles in Fig. 5. Other steps remain the same as the Coh. score. + +# 4.3. Comparisons + +We first compare with VBR [46] by implementing it based on the descriptions in the paper since the code and data are not publicly available. We also compare with straightforward solutions that lift classical 2D looping algorithms to 3D. Specifically, we first generate a 2D looping video for each of the input videos using the method of Liao et al. [23]. And then we construct various scene representations using the 2D looping video and synthesize novel views. We compare with our sparse MTV representation $(loop2D + MTV)$ , the Multi-plane Video representation $(loop2D + MPV)$ and the dynamic NeRF representation + +![](images/b8157b026dcfd215d567c22117981b60e70bf19ab52e5e7ebfdf59a28e481ac2.jpg) +Figure 7. We visualize the pixel-wise $STDerr$ value for each method. Our method has a lower error, indicating that our approach best retains the dynamism of the scene. We recommend readers watch the supplemental video, where the difference is more noticeable. + +![](images/e1c41d8094f406775c2bb0fe6970b93d5b99309f1b0489f3728ed8fd7802d3c1.jpg) + +![](images/a8eb22b2c555e44e6a1936c240e6bd2115a0bc45c960857cb61571a79b0e50fb.jpg) + +![](images/0780bac44544fbf7c4e2fed1fa9e83d9eff794d06191c95011430f827d10eba0.jpg) + +![](images/cb70da44993d28a0c8df9973d533f696d373a89d0b2ad418c74f2354af9b5a60.jpg) + +![](images/7c2cb70165326bd67f67ea5e012759250e4e23e9608043d1cedf2bd421722bf2.jpg) + +![](images/02b6e4a1ec281e20334e62c474d4eeaaa79bdceb686fb502ad327100f725db21.jpg) + +![](images/5bbb4bbb138a6b2e9e49a95237ef2ae04c302befb1654652d9f799bab0ac7301.jpg) +Figure 8. Results of our ablations. Our full model produces the fewest artifacts. + +![](images/980b12aa297fe88048109321f0fe2c3648ef6ebc5fa46e2128a654c40139c243.jpg) + +![](images/1f0220714d2ab30b1c0d3c941fafb7155bd9f8586a5b7e56d981ac0d1dbc04eb.jpg) + +![](images/b1c7688eb3fd9070a3ebeb3a0efaef2a5a23506e1b747c8f7c335e5492f73d1a.jpg) + +![](images/d637ce61efeaabd210355e441c0a0ef05dc7c39dabf64e85440f76d827176425.jpg) + +
VLPIPS ↓STDerr ↓Com. ↓Coh. ↓LoopQ ↓
Ours0.139256.0210.659.2699.263
w/o pad0.138755.6710.669.2739.395
w/o 2stage0.175567.9911.699.98210.13
w/o pyr0.141257.4110.869.5559.465
w/o tv0.153056.5111.129.7669.689
+ +Table 2. Ablations of our method. $\downarrow \left( \uparrow \right)$ indicates lower (higher) is better. (best in bold, and second best underlined) + +tion [21] $(loop2D + DyNeRF)$ + +We compare our method with the four baselines on our captured dataset. We synthesize novel view videos and report VLPIPS, STDerr, Com., Coh. and LoopQ metrics in Tab. 1. Our method outperforms other baselines in terms of visual quality, scene dynamism preservation, spatio-temporal consistency, and loop quality. We show the qualitative comparison in Fig. 6. We also visualize the STDerr value for each pixel in Fig. 7, which reflects the difference in dynamism between the synthetic results and the reference. We recommend that readers also see the video results included in the supplementary material. Note that our method produces the sharpest results, while best retaining the dynamism of the scene. VBR directly blends inconsistent videos from multiple input views. and the 2D looping baselines fail to consider multi-view information and produce view-inconsistent looping videos. As a result, they tend to blur out spatial and temporal details to compensate for view inconsistencies. We observe that loop2D+DyNeRF also generates sharper results compared with the other two baselines. This is because DyNeRF conditions on the view direction and tolerates the view inconsistency. However, it performs poorly in maintaining the dynamism of the scene. + +Additionally, we measure the efficiency of the scene rep + +resentations using several metrics. We first show the number of parameters (# Params.) of the model to represent a dynamic 3D volume of 50 frames. We evaluate rendering speed (Render Spd) at a $360 \times 640$ resolution on a laptop equipped with an RTX 2060 GPU. We present the metrics in Tab. 1. Since the MTV representation varies with different scenes, we report the maximum and minimum values when evaluated in our dataset. We can see that our method surpasses VBR in # Params. and Render Spd. Compared with MPV that densely stores the scene parameters in a 4D volume, our sparse MTV representation can reduce the number of parameters by up to $98\%$ , resulting in a slightly faster rendering speed and much smaller memory and disk usage. On the other hand, despite the surprisingly small number of parameters, the NeRF representation has extremely slow rendering speed. In other words, our MTV representation achieves the best trade-off between the number of parameters and rendering efficiency. + +# 4.4. Ablation Studies + +We conducted extensive ablation studies of our method to test the effectiveness of several design decisions in our pipeline by individually removing each component and constructing 3D looping videos from our dataset. We experimented on the following components: the frame padding operation as illustrated in Fig. 5 when computing $\mathcal{L}_{loop}$ (w/o pad), the two-stage training pipeline (w/o 2stage), the coarse-to-fine training strategy (w/o pyr), and the TV regularization (w/o tv). The numerical results are shown in Tab. 2, and qualitative results are presented in Fig. 8 and Fig. 9. We also experimented with different values of $\lambda_{spa}$ and $\rho$ to understand the resulting effect. + +![](images/ed4c4cb65b00f5088b02f8b20aa53e2d9284cd7302d26f94922e0c434140d8ce.jpg) +Figure 9. Ablations for the padding operation. In the second row, we visualize the temporal coherence by flattening the pixels in the green line along the time axis and repeating 3 times. Red rectangles highlight the discontinuity produced without the padding operation. We encourage readers to refer to the video results for a clearer demonstration. + +Padding Operation. As shown in Tab. 2, without the padding operation, our method can still produce competitive results in terms of spatial quality and spatio-temporal consistency. It even has better temporal quality. This is because the padding operation adds extra boundary conditions to the optimization, making the optimization more difficult. However, as highlighted in the red rectangles in Fig. 9, without padding, our method is less prone to generate a properly looping video since it can not guarantee a smooth transition from the last frame to the first frame, leading to a lower loop quality score. + +Two-stage Pipeline. It can be seen from Tab. 2 that the two-stage pipeline plays an important role in generating high-quality results. Without the two-stage pipeline, where we directly optimize a dense MPV representation using the looping loss, the MPV easily gets trapped into view-inconsistent results, leading to significant drop in every metric evaluated. + +Coarse-to-fine Training. Results also show that the coarse-to-fine training scheme produces slightly better spatial and temporal quality than optimizing only on the finest level. This is because the patch-based optimization has a wider perceptual field at the coarse level, leading to a better global solution. Therefore, our full model tends to produce fewer artifacts compared with the w/o pyr model. + +TV Regularization. We find it necessary to apply TV regularization, since the pipeline tends to generate MTVs with holes without this regularization, as shown in Fig. 8, which greatly affects the visual quality. + +Weight for $\mathcal{L}_{spa}$ . We experimented on different values of $\lambda_{spa}$ on one scene. We plot the relationship between Coh. scores and # Params. with respect to $\lambda_{spa}$ . We can see that when $\lambda_{spa} = 0$ , the reconstructed MTV is less sparse, which degenerates to a dense representation. This makes it harder to optimize and leads to a worse Coh. score. Then # Params. and Coh. drop rapidly as $\lambda_{spa}$ grow. However, if $\lambda_{spa}$ is larger than a threshold, Coh. increases again, while the improvement on # Params. is less substantial. This + +![](images/43c7a6437d3c6af60f5749ab57b00f6bb1c0d6778ac7cdcb418744afcb9a15d3.jpg) +Figure 10. The trend of Coh. score and # Params. under different $\lambda_{spa}$ . The green line is the value we use in all other experiments. + +![](images/c8dd751d2c77b8519db812a661e676dca324465a61917667751f219647f255ce.jpg) +Figure 11. Controlling the dynamism by changing $\rho$ . + +is because the excessive sparseness causes the tile-culling process to over-cull necessary tiles, resulting in holes in the rendering results. Therefore, we chose $\lambda_{spa} = 0.004$ (green line in Fig. 10) in other experiments. + +Value of $\rho$ . In the experiments, we use $\rho = 0$ to ensure maximum completeness with respect to the input video. However, we find that by controlling the hyperparameter $\rho$ , we could control the degree of dynamism of the reconstructed 3D video. One example is shown in Fig. 11. + +# 5. Discussion and Conclusion + +Limitations and Future Work. Our method comes with some limitations. First, since the MTV representation does not condition on view direction, it fails to model complex view-dependent effects, such as non-planar specular. One possible way to improve the representation is by introducing view-dependency, such as spherical harmonics [53] or neural basis function [51]. Another limitation is that we assume the scene to possess a looping pattern, which works best for natural scenes like flowing water and waving trees. However, if the scene is not loopable, our method tends to fail because each view has a completely unique content. This leads to a highly ill-posed problem in constructing a looping video from the asynchronous input videos. + +Conclusion. In this paper, we propose a practical solution for constructing a 3D looping video representation given completely asynchronous multi-view videos. Experiments verify the effectiveness of our pipeline and demonstrate significant improvement in quality and efficiency over several baselines. We hope that this work will further motivate research into dynamic 3D scene reconstruction. + +Acknowledgements. The authors from HKUST were partially supported by the Hong Kong Research Grants Council (RGC). The author from CityU was partially supported by an ECS grant from the RGC (Project No. CityU 21209119). + +# References + +[1] Aseem Agarwala, Ke Colin Zheng, Chris Pal, Maneesh Agrawala, Michael Cohen, Brian Curless, David Salesin, and Richard Szeliski. Panoramic video textures. ACM Trans. Graph., 24(3):821-827, jul 2005. 1, 2 +[2] Benjamin Attal, Eliot Laidlaw, Aaron Gokaslan, Changil Kim, Christian Richardt, James Tompkin, and Matthew O'Toole. Törf: Time-of-flight radiance fields for dynamic scene view synthesis. Advances in neural information processing systems, 34:26289-26301, 2021. 2, 3 +[3] Aayush Bansal, Minh Vo, Yaser Sheikh, Deva Ramanan, and Srinivasa Narasimhan. 4d visualization of dynamic events from unconstrained multi-view videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5366-5375, 2020. 2 +[4] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2, 4 +[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 425-432, 2001. 1, 2 +[6] Hongrui Cai, Wanquan Feng, Xuetao Feng, Yan Wang, and Juyong Zhang. Neural surface reconstruction of dynamic scenes with monocular rgb-d camera. arXiv preprint arXiv:2206.15258, 2022. 2, 3 +[7] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXII, pages 333-350. Springer, 2022. 2 +[8] Siming Fan, Jingtan Piao, Chen Qian, Kwan-Yee Lin, and Hongsheng Li. Simulating fluids in real-world still images. arXiv preprint arXiv:2204.11335, 2022. 2 +[9] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snively, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2, 3 +[10] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. Deepstereo: Learning to predict new views from the world's imagery. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5515-5524, 2016. 3 +[11] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 3 +[12] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5712-5721, 2021. 3 + +[13] Niv Granot, Ben Feinstein, Assaf Shocher, Shai Bagon, and Michal Irani. Drop the gan: In defense of patches nearest neighbors as single image generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13460-13469, June 2022. 5 +[14] Niv Haim, Ben Feinstein, Niv Granot, Assaf Shocher, Shai Bagon, Tali Dekel, and Michal Irani. Diverse generation from a single video made possible. arXiv preprint arXiv:2109.08591, 2021. 5 +[15] Tavi Halperin, Hanit Hakim, Orestis Vantzos, Gershon Hochman, Netai Benaim, Lior Sassy, Michael Kupchik, Ofir Bibi, and Ohad Fried. Endless loops: detecting and animating periodic patterns in still images. ACM Transactions on Graphics (TOG), 40(4):1-12, 2021. 2 +[16] Mingming He, Jing Liao, Pedro V Sander, and Hugues Hoppe. Gigapixel panorama video loops. ACM Transactions on Graphics (TOG), 37(1):1-15, 2017. 1, 2 +[17] Peter Hedman, Pratul P Srinivasan, Ben Mildenhall, Jonathan T Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5875-5884, 2021. 3 +[18] Aleksander Holynski, Brian L Curless, Steven M Seitz, and Richard Szeliski. Animating pictures with eulerian motion fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5810-5819, 2021. 2 +[19] Apple Inc. Take and edit live photos, Oct 2021. 1 +[20] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun, editors, 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. 4 +[21] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 2, 7 +[22] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6498-6508, 2021. 3 +[23] Jing Liao, Mark Finch, and Hugues Hoppe. Fast computation of seamless video loops. ACM Transactions on Graphics (TOG), 34(6):1-10, 2015. 1, 2, 3, 6 +[24] Zicheng Liao, Neel Joshi, and Hugues Hoppe. Automated video looping with progressive dynamism. ACM Transactions on Graphics (TOG), 32(4):1-10, 2013. 1, 2 +[25] Kai-En Lin, Lei Xiao, Feng Liu, Guowei Yang, and Ravi Ramamoorthi. Deep 3d mask volume for view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1749–1758, 2021. 2 +[26] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. Advances + +in Neural Information Processing Systems, 33:15651-15663, 2020. 3 +[27] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 2, 3 +[28] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 40(4):1-13, 2021. 2, 3 +[29] Aniruddha Mahapatra and Kuldeep Kulkarni. Controllable animation of fluid elements in still images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3667-3676, 2022. 2 +[30] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):1-14, 2019. 1, 3 +[31] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2, 3 +[32] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, July 2022. 2 +[33] Medhini Narasimhan, Shiry Ginosar, Andrew Owens, Alexei A. Efros, and Trevor Darrell. Strumming to the beat: Audio-conditioned contrastive video textures. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 3761-3770, January 2022. 2 +[34] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 3 +[35] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2, 3 +[36] Alex Rav-Acha, Yael Pritch, Dani Lischinski, and Shmuel Peleg. Dynamosaics: Video mosaics with non-chronological time. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), volume 1, pages 58-65. IEEE, 2005. 1, 2 +[37] Gernot Riegler and Vladlen Koltun. Free view synthesis. In European Conference on Computer Vision, 2020. 2, 3 +[38] Gernot Riegler and Vladlen Koltun. Stable view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2021. 2, 3 +[39] Leonid I Rudin and Stanley Osher. Total variation based image restoration with free local constraints. In Proceedings of + +1st international conference on image processing, volume 1, pages 31-35. IEEE, 1994. 4 +[40] Arno Schödl, Richard Szeliski, David H Salesin, and Irfan Essa. Video textures. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 489-498, 2000. 2 +[41] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 3 +[42] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016. 3 +[43] Denis Simakov, Yaron Caspi, Eli Shechtman, and Michal Irani. Summarizing visual data using bidirectional similarity. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 5, 6 +[44] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snavely. Pushing the boundaries of view extrapolation with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 175-184, 2019. 3 +[45] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 38(4):1-12, 2019. 2 +[46] Théo Thonat, Yagiz Aksoy, Miika Aittala, Sylvain Paris, Frédo Durand, and George Drettakis. Video-based rendering of dynamic stationary environments from unsynchronized inputs. In Computer Graphics Forum, volume 40, pages 73-86. Wiley Online Library, 2021. 1, 2, 3, 6 +[47] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12959-12970, 2021. 3 +[48] Richard Tucker and Noah Snively. Single-view view synthesis with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 551-560, 2020. 3 +[49] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 3 +[50] Qianqian Wang, Zhengqi Li, David Salesin, Noah Snavely, Brian Curless, and Janne Kontkanen. 3d moments from near-duplicate photos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3906-3915, 2022. 3 +[51] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time view synthesis with neural basis expansion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8534-8543, 2021. 8 + +[52] Jae Shin Yoon, Kihwan Kim, Orazio Gallo, Hyun Soo Park, and Jan Kautz. Novel view synthesis of dynamic scenes with globally coherent depths from a monocular camera. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5336-5345, 2020. 2 +[53] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoptrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 3, 8 +[54] Jiakai Zhang, Liao Wang, Xinhang Liu, Fuqiang Zhao, Minzhang Li, Haizhao Dai, Boyuan Zhang, Wei Yang, Lan Xu, and Jingyi Yu. Neuvv: Neural volumetric videos with immersive rendering and editing. arXiv preprint arXiv:2202.06088, 2022.3 +[55] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 6 +[56] Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images, 2018. 2, 3 +[57] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM transactions on graphics (TOG), 23(3):600-608, 2004. 2 \ No newline at end of file diff --git a/2023/3D Video Loops From Asynchronous Input/images.zip b/2023/3D Video Loops From Asynchronous Input/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..4d344a875a1b445d1e2d3eeb87995d640b8106d3 --- /dev/null +++ b/2023/3D Video Loops From Asynchronous Input/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4b79aadc6cce26522382ee28ba5b80167db06d055952b11735b0d9cd6cdc2d5 +size 711352 diff --git a/2023/3D Video Loops From Asynchronous Input/layout.json b/2023/3D Video Loops From Asynchronous Input/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e104e14228ae14fc0a30bc8b34ae40711ad5c632 --- /dev/null +++ b/2023/3D Video Loops From Asynchronous Input/layout.json @@ -0,0 +1,10135 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 165, + 103, + 429, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 103, + 429, + 121 + ], + "spans": [ + { + "bbox": [ + 165, + 103, + 429, + 121 + ], + "type": "text", + "content": "3D Video Loops from Asynchronous Input" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "spans": [ + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "type": "text", + "content": "Li Ma" + }, + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "type": "text", + "content": " Xiaoyu Li" + }, + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "type": "text", + "content": " Jing Liao" + }, + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "type": "text", + "content": " Pedro V. Sander" + }, + { + "bbox": [ + 143, + 142, + 447, + 159 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 161, + 162, + 432, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 162, + 432, + 176 + ], + "spans": [ + { + "bbox": [ + 161, + 162, + 432, + 176 + ], + "type": "text", + "content": "1The Hong Kong University of Science and Technology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 167, + 177, + 426, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 177, + 426, + 191 + ], + "spans": [ + { + "bbox": [ + 167, + 177, + 426, + 191 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 167, + 177, + 426, + 191 + ], + "type": "text", + "content": "Tencent AI Lab " + }, + { + "bbox": [ + 167, + 177, + 426, + 191 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 167, + 177, + 426, + 191 + ], + "type": "text", + "content": "City University of Hong Kong" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 48, + 204, + 123, + 314 + ], + "blocks": [ + { + "bbox": [ + 48, + 204, + 123, + 314 + ], + "lines": [ + { + "bbox": [ + 48, + 204, + 123, + 314 + ], + "spans": [ + { + "bbox": [ + 48, + 204, + 123, + 314 + ], + "type": "image", + "image_path": "424e31edcd5e4876067e0ffeb5cfc2dc81f2b55db2c708dfe3f76429a986a527.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 83, + 318, + 236, + 328 + ], + "lines": [ + { + "bbox": [ + 83, + 318, + 236, + 328 + ], + "spans": [ + { + "bbox": [ + 83, + 318, + 236, + 328 + ], + "type": "text", + "content": "(a) Reconstructed 3D Video Representation" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 123, + 205, + 274, + 313 + ], + "blocks": [ + { + "bbox": [ + 123, + 205, + 274, + 313 + ], + "lines": [ + { + "bbox": [ + 123, + 205, + 274, + 313 + ], + "spans": [ + { + "bbox": [ + 123, + 205, + 274, + 313 + ], + "type": "image", + "image_path": "334ba8de867f95f76c793daee3e2e74e1651be2f42cdcc97559d83bfa050e684.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 337, + 547, + 381 + ], + "lines": [ + { + "bbox": [ + 46, + 337, + 547, + 381 + ], + "spans": [ + { + "bbox": [ + 46, + 337, + 547, + 381 + ], + "type": "text", + "content": "Figure 1. Given a set of asynchronous multi-view videos, we propose a pipeline to construct a novel 3D looping video representation (a), which consists of a static texture atlas, a dynamic texture atlas, and multiple tiles as the geometry proxy. The 3D video loops allow both view and time control (b), and can be rendered in real time even on mobile devices (c). We strongly recommend readers refer to the supplementary material for video results." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 276, + 205, + 372, + 316 + ], + "blocks": [ + { + "bbox": [ + 276, + 205, + 372, + 316 + ], + "lines": [ + { + "bbox": [ + 276, + 205, + 372, + 316 + ], + "spans": [ + { + "bbox": [ + 276, + 205, + 372, + 316 + ], + "type": "image", + "image_path": "e836695320aa370be43d5ab5b71de4f138330987fd71f48e9f3453c8d7f36342.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 318, + 420, + 328 + ], + "lines": [ + { + "bbox": [ + 324, + 318, + 420, + 328 + ], + "spans": [ + { + "bbox": [ + 324, + 318, + 420, + 328 + ], + "type": "text", + "content": "(b) View and Time Control" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 373, + 206, + 469, + 316 + ], + "blocks": [ + { + "bbox": [ + 373, + 206, + 469, + 316 + ], + "lines": [ + { + "bbox": [ + 373, + 206, + 469, + 316 + ], + "spans": [ + { + "bbox": [ + 373, + 206, + 469, + 316 + ], + "type": "image", + "image_path": "c340f0555bb3260ada99a0da15cbb2506246e6a7cadd2f6602d5775f23e90bc7.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 470, + 318, + 543, + 328 + ], + "lines": [ + { + "bbox": [ + 470, + 318, + 543, + 328 + ], + "spans": [ + { + "bbox": [ + 470, + 318, + 543, + 328 + ], + "type": "text", + "content": "(c) Real Time Demo" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 473, + 208, + 545, + 314 + ], + "blocks": [ + { + "bbox": [ + 473, + 208, + 545, + 314 + ], + "lines": [ + { + "bbox": [ + 473, + 208, + 545, + 314 + ], + "spans": [ + { + "bbox": [ + 473, + 208, + 545, + 314 + ], + "type": "image", + "image_path": "754cf507d69b28aff5d38f66bb3b92b8b6a10df930f5e4922d00136f0ddf75dc.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 143, + 392, + 192, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 392, + 192, + 403 + ], + "spans": [ + { + "bbox": [ + 143, + 392, + 192, + 403 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 45, + 425, + 290, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 425, + 290, + 700 + ], + "spans": [ + { + "bbox": [ + 45, + 425, + 290, + 700 + ], + "type": "text", + "content": "- Looping videos are short video clips that can be looped endlessly without visible seams or artifacts. They provide a very attractive way to capture the dynamism of natural scenes. Existing methods have been mostly limited to 2D representations. In this paper, we take a step forward and propose a practical solution that enables an immersive experience on dynamic 3D looping scenes. The key challenge is to consider the per-view looping conditions from asynchronous input while maintaining view consistency for the 3D representation. We propose a novel sparse 3D video representation, namely Multi-Tile Video (MTV), which not only provides a view-consistent prior, but also greatly reduces memory usage, making the optimization of a 4D volume tractable. Then, we introduce a two-stage pipeline to construct the 3D looping MTV from completely asynchronous multi-view videos with no time overlap. A novel looping loss based on video temporal retargeting algorithms is adopted during the optimization to loop the 3D scene. Experiments of our framework have shown promise in successfully generating and rendering photorealistic 3D looping videos in real time even on mobile devices. The code, dataset, and live demos are available in https://limacv.github.io/VideoLoop3D_web/." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 391, + 386, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 391, + 386, + 403 + ], + "spans": [ + { + "bbox": [ + 307, + 391, + 386, + 403 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 415, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 547, + 715 + ], + "type": "text", + "content": "Endless looping videos are fascinating ways to record special moments. These video loops are compact in terms of storage and provide a much richer experience for scenes that exhibit looping behavior. One successful commercial use of this technique is the live photo [19] feature in the Apple iPhone, which tries to find an optimal looping period and fade in/out short video clips to create looping videos. There have been several works on automatically constructing 2D looping videos from non-looping short video clips. Liao et al. [24] first propose to create 2D video loops from videos captured with static cameras. They solve for the optimal starting frame and looping period for each pixel in the input video to composite the final video. Later on, several methods are proposed to improve the computation speed [23], or extend to panoramas [1, 36], and gigapixel videos [16]. However, few attempts have been made to extend video loops to a 3D representation. One existing work that shares a similar setting as ours is VBR [46], which generates plausible video loops in novel views. However, it comes with some limitations: It builds on top of ULR [5], which can produce ghosting artifacts due to inaccurate mesh reconstruction, as shown in [30]. Besides, VBR generates looping videos and reduces the inconsistency from asynchronous input by adaptively blending in different frequency domains, which tends to blur away details." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 297, + 733, + 313, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 733, + 313, + 742 + ], + "spans": [ + { + "bbox": [ + 297, + 733, + 313, + 742 + ], + "type": "text", + "content": "310" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 277 + ], + "type": "text", + "content": "To allow free-view observation of the looping videos, a proper 3D representation needs to be employed. Recently, tremendous progress has been made in novel view synthesis based on 3D scene representations such as triangle meshes [37, 38, 45], Multi-plane Image (MPI) [9, 56], and Neural Radiance Field (NeRF) [7, 31, 32], which could be reconstructed given only sparse observations of real scenes and render photo-realistic images in novel views. Much effort has been made to adapt these methods to dynamic scenes, which allows for both viewing space and time controls [2, 6, 27, 28, 34, 35, 52, 57]. Therefore, a straightforward solution to generate a 3D looping video is to employ the 2D looping algorithms for each view and lift the results to 3D using these methods. However, we find it hard to get satisfactory results since the 2D looping algorithms do not consider view consistency, which is even more challenging for the asynchronous multi-view videos that we use as input." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 279, + 289, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 279, + 289, + 460 + ], + "spans": [ + { + "bbox": [ + 47, + 279, + 289, + 460 + ], + "type": "text", + "content": "In this work, we develop a practical solution for these problems by using the captured video input of the dynamic 3D scene with only one commodity camera. We automatically construct a 3D looping video representation from completely asynchronous multi-view input videos with no time overlap. To get promising 3D video loop results, two main issues need to be addressed. First, we need to solve for a view-consistent looping pattern from inconsistent multi-view videos, from which we need to identify spatio-temporal 3D patches that are as consistent as possible. Second, the 3D video potentially requires a memory-intensive 4D volume for storage. Therefore, we need to develop a 3D video representation that is both efficient in rendering and compact in memory usage to make the optimization of the 4D volume tractable." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 462, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 462, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 462, + 289, + 715 + ], + "type": "text", + "content": "To this end, we develop an analysis-by-synthesis approach that trains for a view-consistent 3D video representation by optimizing multi-view looping targets. We propose an efficient 3D video representation based on Multi-plane Images (MPIs), namely Multi-tile Videos (MTVs), by exploiting the spatial and temporal sparsity of the 3D scene. As shown in Fig. 2, instead of densely storing large planes, MTVs store static or dynamic texture tiles that are sparsely scattered in the view frustum. This greatly reduces the memory requirement for rendering compared with other 3D video representations, making the optimization of the 3D looping video feasible in a single GPU. The sparsity of MTVs also serves as a view-consistent prior when optimizing the 3D looping video. To optimize the representation for looping, we formulate the looping generation for each view as a temporal video retargeting problem and develop a novel looping loss based on this formulation. We propose a two-stage pipeline to generate a looping MTV, and the experiments show that our method can produce photorealistic 3D video loops that maintain similar dynamism from the input, and enable real-time rendering even in mobile devices." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 306, + 72, + 504, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 504, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 504, + 84 + ], + "type": "text", + "content": "Our contributions can be summarized as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 317, + 88, + 547, + 194 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 317, + 88, + 545, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 88, + 545, + 125 + ], + "spans": [ + { + "bbox": [ + 317, + 88, + 545, + 125 + ], + "type": "text", + "content": "- We propose Multi-tile Videos (MTVs), a novel dynamic 3D scene representation that is efficient in rendering and compact in memory usage." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 129, + 547, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 129, + 547, + 165 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 547, + 165 + ], + "type": "text", + "content": "- We propose a novel looping loss by formulating the 3D video looping construction as a temporal retargeting problem." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 170, + 545, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 170, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 317, + 170, + 545, + 194 + ], + "type": "text", + "content": "- We propose a two-stage pipeline that constructs MTVs from completely asynchronous multi-view videos." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 204, + 392, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 204, + 392, + 216 + ], + "spans": [ + { + "bbox": [ + 306, + 204, + 392, + 216 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 224, + 545, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 224, + 545, + 259 + ], + "spans": [ + { + "bbox": [ + 305, + 224, + 545, + 259 + ], + "type": "text", + "content": "Our work lies at the confluence of two research topics: looping video construction and novel view synthesis. We will review each of them in this section." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 266, + 547, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 266, + 547, + 564 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 547, + 564 + ], + "type": "text", + "content": "Video Loops. Several works have been proposed to synthesize looping videos from short video clips. Schödl et al. [40] create video loops by finding similar video frames and jumping between them. Audio [33] can also be leveraged for further refinement. Liao et al. [24] formulate the looping as a combinatorial optimization problem that tries to find the optimal start frame and looping period for each pixel. It seeks to maximize spatio-temporal consistency in the output looping videos. This formulation is further developed and accelerated by Liao et al. [23], and extended to gigapixel looping videos [16] by stitching multiple looping videos. Panorama video loops can also be created by taking a video with a panning camera [1, 36]. VBR [46] generates loops by fading in/out temporal Laplacian pyramids, and extends video loops to 3D using ULR [5]. Another line of work tries to create video loops from still images and strokes provided by users as rough guidelines of the looping motion. Endless Loops [15] tries to find self-similarities from the image and solve for the optical flow field, which is then used to warp and composite the frames of the looping video. This process can also be replaced by data-driven approaches [18, 29], or physics-based simulation [8]. Despite the progress in creating various forms of looping videos, extending looping videos to 3D is still an unexplored direction." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 570, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 715 + ], + "type": "text", + "content": "Novel View Synthesis of Dynamic Scenes. Novel View Synthesis (NVS) aims at interpolating views given only a set of sparse input views. For dynamic scenes, NVS requires the construction of a 4D representation that allows for both space and time control. Some methods use synchronized multi-view videos as input, which are often only available in a studio setting [27, 28, 57], or using specially designed camera arrays [4, 9, 21, 25]. To ease hardware requirements, Open4D [3] uses unconstrained multi-view input, but still requires multiple observations at the same timestamp. With the development of neural rendering, it is possible to use only monocular input. However, this is" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 732, + 312, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 732, + 312, + 742 + ], + "spans": [ + { + "bbox": [ + 297, + 732, + 312, + 742 + ], + "type": "text", + "content": "311" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 178 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 178 + ], + "type": "text", + "content": "a highly ill-posed problem since the camera and scene elements are moving simultaneously. Some methods use extra sensors such as a depth sensor [2, 6], while some use a data-driven prior to help construct the scene geometry [12, 50]. Others use a hand-crafted motion prior to regularize the scene motion [22, 34, 35, 47], which usually can only handle simple motions. In our setting, we take asynchronous multi-view videos with no time overlap, which is a setting that has not been addressed before." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 183, + 289, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 183, + 289, + 446 + ], + "spans": [ + { + "bbox": [ + 46, + 183, + 289, + 446 + ], + "type": "text", + "content": "3D Scene Representations. A critical issue in NVS is the underlying scene representation. A triangle mesh is the most commonly used scene representation in commercial 3D software. Some methods use meshes as their representation [37, 38, 46]. However, reconstructing an accurate, temporally consistent mesh is still an open problem, being particularly challenging for complex in-the-wild scenes [28]. A volumetric representation is another option to express the 3D world by storing scene parameters in a dense 3D grid [11, 27, 49, 54]. One benefit is that it trivially supports differentiable rendering, which greatly improves the reconstruction quality. The Multi-plane Image (MPI) [9, 10, 30, 44, 48, 56] is an adapted volumetric representation that represents a scene using multiple RGBA planes in the camera frustum. Volume representations can model complex geometry, but at the cost of higher memory usage. Another rapidly developing representation is Neural Radiance Field (NeRF) [31], which models scenes as continuous functions and parameterizes the function as an implicit neural network. It achieves photorealistic rendering results at the expense of long training and rendering times, especially for dynamic scenes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 459, + 104, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 459, + 104, + 471 + ], + "spans": [ + { + "bbox": [ + 47, + 459, + 104, + 471 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 479, + 114, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 479, + 114, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 479, + 114, + 491 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 498, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 289, + 715 + ], + "type": "text", + "content": "Our goal is to reconstruct a view-consistent 3D video representation that can be looped infinitely using completely asynchronous multi-view 2D videos. We start by introducing a novel 3D video representation, namely Multitile Videos (MTVs), which improves efficiency by exploiting sparsity. Then we propose a two-stage pipeline as shown in Fig. 3 to construct a 3D looping MTV. In the first stage, we initialize the MTV by optimizing a static Multiplane Image (MPI) and a 3D loopable mask using long exposure images and 2D loopable masks derived from the input videos. We then construct an MTV through a tile culling process. In the second stage, we train the MTV using an analysis-by-synthesis approach in a coarse-to-fine manner. The key enabler for this process is a novel looping loss based on video retargeting algorithms, which encourages a video to simultaneously loop and preserve similarity to the input. The remainder of this section describes the details of this proposed approach." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 305, + 69, + 547, + 173 + ], + "blocks": [ + { + "bbox": [ + 305, + 69, + 547, + 173 + ], + "lines": [ + { + "bbox": [ + 305, + 69, + 547, + 173 + ], + "spans": [ + { + "bbox": [ + 305, + 69, + 547, + 173 + ], + "type": "image", + "image_path": "a085c62d5448702bb9e1610535da2958477c0dca0b4f04fc95e4755d3d097f74.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 180, + 545, + 202 + ], + "lines": [ + { + "bbox": [ + 304, + 180, + 545, + 202 + ], + "spans": [ + { + "bbox": [ + 304, + 180, + 545, + 202 + ], + "type": "text", + "content": "Figure 2. Comparison between the Multi-plane Video representation and the Multi-tile Video representation." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 214, + 411, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 214, + 411, + 227 + ], + "spans": [ + { + "bbox": [ + 305, + 214, + 411, + 227 + ], + "type": "text", + "content": "3.2. Data Preparation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 233, + 545, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 233, + 545, + 340 + ], + "spans": [ + { + "bbox": [ + 304, + 233, + 545, + 340 + ], + "type": "text", + "content": "The input to our system are multiple asynchronous videos of the same scene from different views. Each video " + }, + { + "bbox": [ + 304, + 233, + 545, + 340 + ], + "type": "inline_equation", + "content": "\\mathbf{V} \\in \\mathbb{R}^{F \\times H \\times W \\times 3}" + }, + { + "bbox": [ + 304, + 233, + 545, + 340 + ], + "type": "text", + "content": " is a short clip with " + }, + { + "bbox": [ + 304, + 233, + 545, + 340 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 233, + 545, + 340 + ], + "type": "text", + "content": " frames and a resolution of " + }, + { + "bbox": [ + 304, + 233, + 545, + 340 + ], + "type": "inline_equation", + "content": "H \\times W" + }, + { + "bbox": [ + 304, + 233, + 545, + 340 + ], + "type": "text", + "content": ". Video lengths may differ for each view. Each video is expected to have a fixed camera pose, which can be achieved using tripods or existing video stabilization tools during post-process. Since we allow videos to be asynchronous, we could capture each view sequentially using a single commodity camera." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 341, + 545, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 341, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 304, + 341, + 545, + 437 + ], + "type": "text", + "content": "Given the precondition that the captured scene contains mostly repetitive content, we assume the long exposure images for each view to be view-consistent. Therefore, we compute an average image for each video " + }, + { + "bbox": [ + 304, + 341, + 545, + 437 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 304, + 341, + 545, + 437 + ], + "type": "text", + "content": ", and then register a pinhole camera model for each video using COLMAP [41, 42]. We also compute a binary loopable mask for each input video similar to Liao et al. [23], where 1 indicates pixel with the potential to form a loop and 0 otherwise." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 444, + 512, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 444, + 512, + 456 + ], + "spans": [ + { + "bbox": [ + 305, + 444, + 512, + 456 + ], + "type": "text", + "content": "3.3. Multi-tile Video (MTV) Representation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": "Before introducing our proposed MTV representation, we first briefly review the MPI representation [56]. An MPI represents the scene using " + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": " fronto-parallel RGBA planes in the frustum of a reference camera, with each plane arranged at fixed depths [48]. To render an MPI from novel views, we first need to warp each plane based on the depth of the plane and the viewing camera, and then iteratively blend each warped plane from back to front. A straightforward dynamic extension of MPI, namely Multi-plane Video (MPV), is to store a sequence of RGBA maps for each plane. For a video with " + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": " frames, this results in a 4D volume in " + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{D\\times T\\times H\\times W\\times 4}" + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": ", which is very memory consuming. Inspired by recent work on sparse volume representation [17,26,53], we propose Multi-tile Videos, which reduce the memory requirements by exploiting the spatio-temporal sparsity of the scene. Specifically, we subdivide each plane into a regular grid of tiny tiles. Each tile " + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{T}\\in \\mathbb{R}^{F\\times H_s\\times W_s\\times 4}" + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": " stores a small RGBA patch sequence with spatial resolution " + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "inline_equation", + "content": "H_{s}\\times W_{s}" + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": ". For each tile, we assign a label " + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": " by identifying whether it contains looping content " + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "inline_equation", + "content": "l_{loop}" + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": ", a static scene " + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "inline_equation", + "content": "l_{static}" + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": ", or is simply empty " + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "inline_equation", + "content": "l_{empty}" + }, + { + "bbox": [ + 304, + 462, + 545, + 715 + ], + "type": "text", + "content": ". We could then store a" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 734, + 313, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 734, + 313, + 742 + ], + "spans": [ + { + "bbox": [ + 297, + 734, + 313, + 742 + ], + "type": "text", + "content": "312" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 68, + 542, + 214 + ], + "blocks": [ + { + "bbox": [ + 51, + 68, + 542, + 214 + ], + "lines": [ + { + "bbox": [ + 51, + 68, + 542, + 214 + ], + "spans": [ + { + "bbox": [ + 51, + 68, + 542, + 214 + ], + "type": "image", + "image_path": "a19264169a7d76cbe52657564e1662fc754b3e13b4e8db8d8ac4b9b7668b1f53.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 216, + 465, + 227 + ], + "lines": [ + { + "bbox": [ + 126, + 216, + 465, + 227 + ], + "spans": [ + { + "bbox": [ + 126, + 216, + 465, + 227 + ], + "type": "text", + "content": "Figure 3. The two-stage pipeline to generate the MTV representation from multi-view videos." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 236, + 288, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 236, + 288, + 297 + ], + "spans": [ + { + "bbox": [ + 46, + 236, + 288, + 297 + ], + "type": "text", + "content": "single RGBA patch for " + }, + { + "bbox": [ + 46, + 236, + 288, + 297 + ], + "type": "inline_equation", + "content": "l_{\\text{static}}" + }, + { + "bbox": [ + 46, + 236, + 288, + 297 + ], + "type": "text", + "content": ", and discard tiles that are empty. Fig. 1 visualizes a reconstructed MTV representation, where the RGBA patches are packed into static and dynamic texture atlas. Fig. 2 shows the difference between MPVs and MTVs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 302, + 200, + 315 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 302, + 200, + 315 + ], + "spans": [ + { + "bbox": [ + 47, + 302, + 200, + 315 + ], + "type": "text", + "content": "3.4. Stage 1: MTV Initialization" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 320, + 288, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 320, + 288, + 429 + ], + "spans": [ + { + "bbox": [ + 46, + 320, + 288, + 429 + ], + "type": "text", + "content": "We find that optimizing a dense MTV directly from scratch results in the approach being easily trapped in local minima, which yields view-inconsistent results. To address this, we use a two-stage pipeline shown in Fig. 3. In the first stage, we start by constructing a \"long exposure\" MPI. Then we initialize the sparse MTV by tile culling process that removes unnecessary tiles. By reducing the number of parameters, the initialized MTV provides a view-consistent prior and leads to a high-quality 3D video representation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": "Training a looping-aware MPI. We start by training a dense MPI " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\mathbb{R}^{D \\times H \\times W \\times 4}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": ", as well as a 3D loopable mask " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{L} \\in \\mathbb{R}^{D \\times H \\times W}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": ", using the average image and the 2D loopable mask, respectively. We randomly initialize " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{L}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": ", and in each iteration, we randomly sample a patch in a random view, and render an RGB patch " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{p}}_c \\in \\mathbb{R}^{h \\times w \\times 3}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " and a loopable mask patch " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{p}}_l \\in \\mathbb{R}^{h \\times w}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " using the standard MPI rendering method. Note that the " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " channel is shared between " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{L}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " during rendering. We supervise the MPI " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " by minimizing the Mean Square Error (MSE) between the rendering results and the corresponding patch " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_c" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " from the average image. We supervise the loopable mask " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{L}" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " by minimizing the Binary Cross Entropy (BCE) between the rendered 2D mask " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{p}}_l" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " and the corresponding patch " + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_l" + }, + { + "bbox": [ + 46, + 442, + 287, + 622 + ], + "type": "text", + "content": " from the 2D loopable mask:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 625, + 287, + 649 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 625, + 287, + 649 + ], + "spans": [ + { + "bbox": [ + 55, + 625, + 287, + 649 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {m s e} = \\frac {1}{h w} \\| \\mathbf {p} _ {c} - \\hat {\\mathbf {p}} _ {c} \\| _ {2} ^ {2}, \\tag {1}", + "image_path": "3b0653c09281c05a3702b39c866ac5060feb58cf1024735ec9b3dfacead85655.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 651, + 287, + 683 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 651, + 287, + 683 + ], + "spans": [ + { + "bbox": [ + 58, + 651, + 287, + 683 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {b c d} = \\frac {1}{h w} \\| - (\\mathbf {p} _ {l} \\log (\\hat {\\mathbf {p}} _ {l}) + (1 - \\mathbf {p} _ {l}) \\log (1 - \\hat {\\mathbf {p}} _ {l})) \\| _ {1}, \\tag {2}", + "image_path": "5921c16a3d862f54c73946c27680abc5dee74d66c3390f5e690cfed5035210a7.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\| \\mathbf{p}\\| _1" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\| \\mathbf{p}\\| _2" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": " are the L1 and L2 norm of a flattened patch " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": ". The log is computed for every element of a patch." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 236, + 545, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 236, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 304, + 236, + 545, + 285 + ], + "type": "text", + "content": "Since the rendering of the MPI is differentiable, we optimize " + }, + { + "bbox": [ + 304, + 236, + 545, + 285 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 236, + 545, + 285 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 236, + 545, + 285 + ], + "type": "inline_equation", + "content": "\\mathbf{L}" + }, + { + "bbox": [ + 304, + 236, + 545, + 285 + ], + "type": "text", + "content": " using the Adam optimizer [20]. Optimizing all the parameters freely causes noisy artifacts, therefore, we apply total variation (TV) regularization [39] to " + }, + { + "bbox": [ + 304, + 236, + 545, + 285 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 236, + 545, + 285 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 348, + 289, + 545, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 289, + 545, + 312 + ], + "spans": [ + { + "bbox": [ + 348, + 289, + 545, + 312 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {t v} = \\frac {1}{H W} \\left(\\| \\Delta_ {x} \\mathbf {M} \\| _ {1} + \\| \\Delta_ {y} \\mathbf {M} \\| _ {1}\\right), \\tag {3}", + "image_path": "aa953053b10bf8d31baf9594ebb18f54f488269dae486dc1253ca2497c3853ee.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "\\| \\Delta_x\\mathbf{M}\\| _1" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": " is shorthand for the L1 norm of the gradient of each pixel in the MPI " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": " along " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": " direction, and analogously for " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "\\| \\Delta_y\\mathbf{M}\\| _1" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": ". We also adopt a sparsity loss to further encourage sparsity to the " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": " channel of the MPI " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{\\alpha}" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": " as in Broxton et al. [4]. Specifically, we collect " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": " alpha values in each pixel location of " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{\\alpha}" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": " into a vector " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 316, + 546, + 410 + ], + "type": "text", + "content": " is the number of planes. Then the sparsity loss is computed as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 373, + 410, + 545, + 439 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 410, + 545, + 439 + ], + "spans": [ + { + "bbox": [ + 373, + 410, + 545, + 439 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s p a} = \\frac {1}{H W} \\sum_ {\\text {p i x e l}} \\frac {\\| \\beta \\| _ {1}}{\\| \\beta \\| _ {2}}. \\tag {4}", + "image_path": "0eb48c5d56c18196ec578e331fcf54b287dfad046ec33009d245dd603a3179de.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 441, + 545, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 441, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 441, + 545, + 464 + ], + "type": "text", + "content": "The final loss in the first stage is a weighted sum of the four losses:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 341, + 472, + 545, + 485 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 472, + 545, + 485 + ], + "spans": [ + { + "bbox": [ + 341, + 472, + 545, + 485 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {m s e} + \\mathcal {L} _ {b c d} + \\lambda_ {t v} \\mathcal {L} _ {t v} + \\lambda_ {s p a} \\mathcal {L} _ {s p a}. \\tag {5}", + "image_path": "5609d81ff97bd8cfb00664991fecd6ea420ad9b719d3d8c9936dfd3af10d9661.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "spans": [ + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "text", + "content": "Tile Culling. After training, we reconstruct a static MPI M as well as a 3D loopable mask " + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{L}" + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "text", + "content": ". We subdivide each plane into a regular grid of tiles. In the experiments, we subdivide the plane so that each tile has a resolution of " + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "inline_equation", + "content": "H_{s} = W_{s} = 16" + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "text", + "content": ". We denote " + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\{T_c\\}, \\{T_\\alpha\\}, \\{T_l\\}" + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "text", + "content": " to be the set of RGB color, alpha value, and loopable mask of a tile, respectively. We then assign label " + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "inline_equation", + "content": "l \\in \\{l_{empty}, l_{static}, l_{loop}\\}" + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "text", + "content": " based on the " + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\{T_\\alpha\\}" + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "inline_equation", + "content": "\\{T_l\\}" + }, + { + "bbox": [ + 304, + 497, + 545, + 594 + ], + "type": "text", + "content": " for each tile:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 599, + 545, + 653 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 599, + 545, + 653 + ], + "spans": [ + { + "bbox": [ + 315, + 599, + 545, + 653 + ], + "type": "interline_equation", + "content": "l = \\left\\{ \\begin{array}{l l} l _ {\\text {e m p t y}} & \\text {i f} \\max \\left\\{T _ {\\alpha} \\right\\} \\leq \\tau_ {\\alpha}, \\\\ l _ {\\text {s t a t i c}} & \\text {i f} \\max \\left\\{T _ {\\alpha} \\right\\} > \\tau_ {\\alpha} \\text {a n d} \\max \\left\\{T _ {l} \\right\\} < \\tau_ {l}, \\\\ l _ {\\text {l o o p}} & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {6}", + "image_path": "81d5a9f90be75b71143c769377c411410430e357f48953f4b2ed55fca1ff7e10.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "text", + "content": "We set the threshold of culling to be " + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "inline_equation", + "content": "\\tau_{\\alpha} = 0.05" + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "inline_equation", + "content": "\\tau_{l} = 0.5" + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "text", + "content": ". We cull the tiles with " + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "inline_equation", + "content": "l = l_{empty}" + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "text", + "content": ". For tiles with " + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "inline_equation", + "content": "l = l_{loop}" + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "text", + "content": ", we lift the static 2D RGBA patch into a patch sequence by copying the patch " + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "text", + "content": " times, where " + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 654, + 546, + 712 + ], + "type": "text", + "content": " is the number of frames that we would like the MTV to have. We add" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 733, + 312, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 733, + 312, + 742 + ], + "spans": [ + { + "bbox": [ + 297, + 733, + 312, + 742 + ], + "type": "text", + "content": "313" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 70, + 289, + 140 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 289, + 140 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 289, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 289, + 140 + ], + "type": "image", + "image_path": "cf3722ee028e5c341f276593a1a9d440a9e1dc9213ebf70a4e41497da9f0972d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 148, + 287, + 204 + ], + "lines": [ + { + "bbox": [ + 46, + 148, + 287, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 148, + 287, + 204 + ], + "type": "text", + "content": "Figure 4. Visualization of looping loss. We first pad frames and extract 3D patches along the time axis for each pixel location, then we compute a normalized similarity score for each patch pair. Finally, the looping loss is computed by averaging errors between patches with minimum scores." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 212, + 287, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 212, + 287, + 271 + ], + "spans": [ + { + "bbox": [ + 46, + 212, + 287, + 271 + ], + "type": "text", + "content": "a small random noise to the lifted patch video to prevent the straightforward static solution. For tiles with " + }, + { + "bbox": [ + 46, + 212, + 287, + 271 + ], + "type": "inline_equation", + "content": "l = l_{\\text{static}}" + }, + { + "bbox": [ + 46, + 212, + 287, + 271 + ], + "type": "text", + "content": ", we simply keep it unchanged. This culling process greatly reduces the memory requirement for optimizing the 4D volume." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 278, + 202, + 292 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 278, + 202, + 292 + ], + "spans": [ + { + "bbox": [ + 47, + 278, + 202, + 292 + ], + "type": "text", + "content": "3.5. Stage 2: MTV Optimization" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 297, + 287, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 297, + 287, + 321 + ], + "spans": [ + { + "bbox": [ + 46, + 297, + 287, + 321 + ], + "type": "text", + "content": "After initializing the MTV representation, we then seek to optimize the final looping MTV." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "text", + "content": "**Looping Loss.** The main supervision of the optimization process is a novel looping loss, which is inspired by the recent progress in image [13] and video [14] retargeting algorithm. Specifically, in each iteration, we randomly sample a view and a rectangle window of size " + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "inline_equation", + "content": "h \\times w" + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "text", + "content": ", and render the video " + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{V}}_o \\in \\mathbb{R}^{T \\times h \\times w \\times 3}" + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "text", + "content": " from MTV. We denote the corresponding input video as " + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_p \\in \\mathbb{R}^{F \\times h \\times w \\times 3}" + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "text", + "content": ". Our goal is to optimize the MTV such that " + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{V}}_o" + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "text", + "content": " forms a looping video " + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{\\infty}" + }, + { + "bbox": [ + 46, + 335, + 287, + 443 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 88, + 452, + 287, + 466 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 452, + 287, + 466 + ], + "spans": [ + { + "bbox": [ + 88, + 452, + 287, + 466 + ], + "type": "interline_equation", + "content": "\\mathbf {V} _ {\\infty} (t) = \\hat {\\mathbf {V}} _ {o} (t \\bmod T), t \\in [ 1, + \\infty), \\tag {7}", + "image_path": "fad51380c15dfc52cb9b7f467ef0720977170cd38be0bc21aa727ec54c1f2334.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{V}(t)" + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "content": " means " + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "content": "-th frame of the video and mod is the modulus operation. We define the looping loss to encourage the " + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{\\infty}" + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "content": " to be a temporal retargeting result of " + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_p" + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "content": ". A visualization of the process is shown in Fig. 4." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": "We start by extracting 3D patch sets " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{Q}_i; i = 1, \\dots, n\\}" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{K}_j; j = 1, \\dots, m\\}" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{\\infty}" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_p" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": ", respectively, along temporal axis. " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{Q}_i\\}" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{K}_j\\}" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": " are all centered at the same pixel location and we repeat the same process for every pixel. Note that although there are infinitely many patches from the looping video, the extracted patch set of the looping video is equivalent to a finite set of patches, which are extracted from the rendered video by circularly padding the first " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "p = s - d" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": " frames of the rendered video " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{V}}_o" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": " at the end of itself, where " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 522, + 287, + 713 + ], + "type": "text", + "content": " are the size and stride of the patches in the time axis. Fig. 5 demonstrates a toy example with 5 frames. By optimizing both the patches inside the video range and patches crossing the temporal boundary, we optimize a video that is both spatio-temporally consistent with the target and seamlessly looping. We then try to minimize the bidirectional similarity (BDS) [43] between" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 69, + 543, + 165 + ], + "blocks": [ + { + "bbox": [ + 310, + 69, + 543, + 165 + ], + "lines": [ + { + "bbox": [ + 310, + 69, + 543, + 165 + ], + "spans": [ + { + "bbox": [ + 310, + 69, + 543, + 165 + ], + "type": "image", + "image_path": "1279fc8e50dbdc8e079d03fffa99c71d97d51702029111fb71956ded814f04d3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 173, + 545, + 216 + ], + "lines": [ + { + "bbox": [ + 305, + 173, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 305, + 173, + 545, + 216 + ], + "type": "text", + "content": "Figure 5. For patches of size 3 and stride 1, the patch set extracted from the video that endlessly repeats 5 frames is the same as the patch set extracted from the padded video that circularly pads 2 frames." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "text", + "content": "the two sets of patches. Intuitively, this means every patch in " + }, + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{Q}_i\\}" + }, + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "text", + "content": " appears in " + }, + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{K}_j\\}" + }, + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "text", + "content": " (for coherence) and every patch in " + }, + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{K}_j\\}" + }, + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "text", + "content": " appears in " + }, + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{Q}_i\\}" + }, + { + "bbox": [ + 305, + 224, + 545, + 262 + ], + "type": "text", + "content": " (for completeness)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "spans": [ + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "text", + "content": "To minimize the BDS between the two patch sets, we use the Patch Nearest Neighbor (PNN) algorithm [13] that first computes a 2D table of normalized similarity scores (NSSs) " + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "inline_equation", + "content": "s_{ij}" + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "text", + "content": " for every possible pair of " + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_i" + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_j" + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "text", + "content": ". Then for each patch " + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_i" + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "text", + "content": ", we select a target patch " + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{f(i)} \\in \\{\\mathbf{K}_j\\}" + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "text", + "content": " that has minimal NSS, where " + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "inline_equation", + "content": "f(i)" + }, + { + "bbox": [ + 305, + 262, + 545, + 333 + ], + "type": "text", + "content": " is a selection function:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 333, + 339, + 545, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 339, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 333, + 339, + 545, + 357 + ], + "type": "interline_equation", + "content": "f (i) = \\arg \\min _ {k} s _ {i, k}, \\text {w h e r e} \\tag {8}", + "image_path": "d26b35d99192a869c9234cc1e726ab40fa93bcc69071767244085bc779953be4.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 339, + 358, + 545, + 384 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 358, + 545, + 384 + ], + "spans": [ + { + "bbox": [ + 339, + 358, + 545, + 384 + ], + "type": "interline_equation", + "content": "s _ {i j} = \\frac {1}{\\rho + \\min _ {k} \\| \\mathbf {Q} _ {k} - \\mathbf {K} _ {j} \\| _ {2} ^ {2}} \\| \\mathbf {Q} _ {i} - \\mathbf {K} _ {j} \\| _ {2} ^ {2}. \\tag {9}", + "image_path": "9cc9279c9c9c1ce473ae7f3d5dacfcc8f5d1d59128068ea07d68b45fdea606bb.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "spans": [ + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": " is a hyperparameter that controls the degree of completeness. Intuitively, when " + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "inline_equation", + "content": "\\rho \\rightarrow \\infty" + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": ", Eq. 9 degenerates to " + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "inline_equation", + "content": "s_{ij} \\sim D(\\mathbf{Q}_i, \\mathbf{K}_j)" + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": ", so we simply select " + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_j" + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": " that is most similar to " + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_i" + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": ". And if " + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "inline_equation", + "content": "\\rho = 0" + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": ", the denominator " + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "inline_equation", + "content": "\\min_k D(\\mathbf{Q}_k, \\mathbf{K}_j)" + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": " penalizes the score if there are already some " + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_i" + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": " that is closest to " + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_j" + }, + { + "bbox": [ + 305, + 389, + 545, + 472 + ], + "type": "text", + "content": ". Thus, the selection will prefer patches that have not yet been selected." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "spans": [ + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "text", + "content": "Using the PNN algorithm, we get the set of patches " + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{K}_{f(i)}\\}" + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "text", + "content": " that is coherent to the target patch set " + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{K}_j\\}" + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "text", + "content": ", and the completeness is controlled by " + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "text", + "content": ". The looping loss is then defined as the MSE loss between " + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_i" + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{f(i)}" + }, + { + "bbox": [ + 306, + 473, + 545, + 522 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 345, + 529, + 545, + 561 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 529, + 545, + 561 + ], + "spans": [ + { + "bbox": [ + 345, + 529, + 545, + 561 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {l o o p}} = \\frac {1}{n h w} \\sum_ {\\text {p i x e l}} \\sum_ {i = 1} ^ {n} \\| \\mathbf {Q} _ {i} - \\mathbf {K} _ {f (i)} \\| _ {2} ^ {2}, \\tag {10}", + "image_path": "0c764cbfa2f58b8a8ce5048c406cb17bebf53e4824623f0fd43e0f3ad81fbdfd.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 567, + 545, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 567, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 305, + 567, + 545, + 592 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 567, + 545, + 592 + ], + "type": "inline_equation", + "content": "\\sum_{pixel}" + }, + { + "bbox": [ + 305, + 567, + 545, + 592 + ], + "type": "text", + "content": " indicates that the term is summed over all the pixel locations of the rendered video." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "type": "text", + "content": "Pyramid Training. In the implementation, we adopt a pyramid training scheme. In the coarse level, we downsample both the input video and the MTV. The downsampling of the MTV is conducted by downsampling the tiles. We start from the coarsest level with downsample factor 0.24 and train the MTV representation for 50 epochs. We then upsample each tile by " + }, + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "type": "inline_equation", + "content": "1.4 \\times" + }, + { + "bbox": [ + 304, + 605, + 545, + 712 + ], + "type": "text", + "content": " and repeat the training step. We show that the pyramid training scheme can improve the generation results." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 734, + 313, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 734, + 313, + 742 + ], + "spans": [ + { + "bbox": [ + 297, + 734, + 313, + 742 + ], + "type": "text", + "content": "314" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 82, + 70, + 511, + 144 + ], + "blocks": [ + { + "bbox": [ + 82, + 70, + 511, + 144 + ], + "lines": [ + { + "bbox": [ + 82, + 70, + 511, + 144 + ], + "spans": [ + { + "bbox": [ + 82, + 70, + 511, + 144 + ], + "type": "table", + "html": "
VLPIPS↓STDerr↓Com.↓Coh.↓LoopQ↓# Params.↓Render Spd↑
Ours0.139256.0210.659.2699.26333M-184M140fps
VBR0.207482.3612.9811.4211.49300M20fps
loop2D + MTV0.2447118.911.839.9199.92733M-184M140fps
loop2D + MPV0.2546117.511.829.8179.8402123M110fps
loop2D + DyNeRF0.2282123.711.9310.2310.272M0.1fps
", + "image_path": "7900b94c79fbad5437d514bed3a1ad9b73168d71f4e5378a8702b602fae6b7fb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 148, + 546, + 170 + ], + "lines": [ + { + "bbox": [ + 46, + 148, + 546, + 170 + ], + "spans": [ + { + "bbox": [ + 46, + 148, + 546, + 170 + ], + "type": "text", + "content": "Table 1. Quantitative comparison of reconstruction quality and efficiency. " + }, + { + "bbox": [ + 46, + 148, + 546, + 170 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 46, + 148, + 546, + 170 + ], + "type": "text", + "content": " (↑) indicates lower (higher) is better. Our method produces the best quality and strikes a good balance between the number of parameters and rendering speed." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 48, + 183, + 287, + 323 + ], + "blocks": [ + { + "bbox": [ + 48, + 183, + 287, + 323 + ], + "lines": [ + { + "bbox": [ + 48, + 183, + 287, + 323 + ], + "spans": [ + { + "bbox": [ + 48, + 183, + 287, + 323 + ], + "type": "image", + "image_path": "8049ebae6245ab91d94956eea9b54e801123a2a18d9e0ad5dc1086ef05aa07eb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 330, + 288, + 353 + ], + "lines": [ + { + "bbox": [ + 46, + 330, + 288, + 353 + ], + "spans": [ + { + "bbox": [ + 46, + 330, + 288, + 353 + ], + "type": "text", + "content": "Figure 6. Qualitative comparison with other baselines. Our method produces the sharpest results." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 361, + 128, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 361, + 128, + 373 + ], + "spans": [ + { + "bbox": [ + 47, + 361, + 128, + 373 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 380, + 180, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 380, + 180, + 393 + ], + "spans": [ + { + "bbox": [ + 47, + 380, + 180, + 393 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "content": "We captured 16 scenes for quantitative and qualitative studies. For each scene, we captured 8-10 views in a faceforward manner using a Sony " + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "inline_equation", + "content": "\\alpha 9" + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "content": " II camera. We captured each view at 25 fps for 10-20 seconds. We downsample each video to a resolution of " + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "inline_equation", + "content": "640 \\times 360" + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "content": ". Finally, we randomly select one view for evaluation. The others are used for constructing MTVs using the two-stage pipeline. In the first stage, we empirically set " + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "inline_equation", + "content": "\\lambda_{tv} = 0.5" + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "inline_equation", + "content": "\\lambda_{spa} = 0.004" + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "content": ". We construct MPI with " + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "inline_equation", + "content": "D = 32" + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "content": " layers. In the second stage, we let the hyperparameter " + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "inline_equation", + "content": "\\rho = 0" + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "content": " to guarantee maximum completeness. We extract 3D patches with spatial dimension 11 and temporal dimension 3. We construct MTVs with approximately 50 frames, i.e., 2 seconds. We set the rendering window in each iteration to " + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "inline_equation", + "content": "h = 180" + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "inline_equation", + "content": "w = 320" + }, + { + "bbox": [ + 46, + 399, + 287, + 579 + ], + "type": "text", + "content": " for both stages." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 586, + 105, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 105, + 598 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 105, + 598 + ], + "type": "text", + "content": "4.2. Metrics" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 605, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 689 + ], + "type": "text", + "content": "For our quantitative study, we synthesize looping videos in test views using the reconstructed 3D video representation and compare the synthetic results with captured target videos. However, we do not have paired ground truth videos since we generate 3D videos with completely asynchronous inputs. Therefore, we adopt several intuitive metrics to evaluate the results in spatial and temporal aspects." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 287, + 713 + ], + "type": "text", + "content": "Spatial Quality. We evaluate the spatial quality of a synthetic frame by computing the LPIPS value [55] between" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 185, + 545, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 185, + 545, + 232 + ], + "spans": [ + { + "bbox": [ + 304, + 185, + 545, + 232 + ], + "type": "text", + "content": "the synthetic frame with the frame in the target video that is most similar in terms of LPIPS. We average the values among all the 50 synthetic frames, which we denote as VLPIPS." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 232, + 546, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 232, + 546, + 328 + ], + "spans": [ + { + "bbox": [ + 304, + 232, + 546, + 328 + ], + "type": "text", + "content": "Temporal Quality. Given two videos that have similar dynamism, they should have similar color distribution in each pixel location. We measure the temporal quality of the synthetic videos by first computing the standard deviation (STD) of the RGB color at each pixel location of the synthetic video and the target video, resulting in two STD maps of dimension " + }, + { + "bbox": [ + 304, + 232, + 546, + 328 + ], + "type": "inline_equation", + "content": "H \\times W \\times 3" + }, + { + "bbox": [ + 304, + 232, + 546, + 328 + ], + "type": "text", + "content": ". We then compute " + }, + { + "bbox": [ + 304, + 232, + 546, + 328 + ], + "type": "inline_equation", + "content": "STDerr" + }, + { + "bbox": [ + 304, + 232, + 546, + 328 + ], + "type": "text", + "content": " by measuring the MSE between the two maps." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 329, + 546, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 329, + 546, + 472 + ], + "spans": [ + { + "bbox": [ + 304, + 329, + 546, + 472 + ], + "type": "text", + "content": "Spatio-temporal Quality. We evaluate the spatio-temporal similarity between the synthetic and target videos following the bidirectional similarity (BDS) [43]. We individually report Completeness and Coherence scores (abbreviated as Com. and Coh., respectively) by extracting and finding nearest neighbor 3D patches in two directions. Specifically, for each patch in the target video, we find the closest patches in the synthetic video for Com. and vice-versa. We measure the distance of two 3D patches using MSE, and the final scores are the averages of multiple different patch configurations of size and stride. We present the details of the patch configurations in the supplementary material." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 473, + 545, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 473, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 473, + 545, + 555 + ], + "type": "text", + "content": "In addition, we use a metric similar to Coh. to measure the loop quality (LoopQ), which reflects the coherence of the looping video when switching from the last frame back to the first frame. This is achieved by extracting the 3D patches that overlap with the first and last frame, as shown by the blue rectangles in Fig. 5. Other steps remain the same as the Coh. score." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 563, + 391, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 563, + 391, + 576 + ], + "spans": [ + { + "bbox": [ + 306, + 563, + 391, + 576 + ], + "type": "text", + "content": "4.3. Comparisons" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "text", + "content": "We first compare with VBR [46] by implementing it based on the descriptions in the paper since the code and data are not publicly available. We also compare with straightforward solutions that lift classical 2D looping algorithms to 3D. Specifically, we first generate a 2D looping video for each of the input videos using the method of Liao et al. [23]. And then we construct various scene representations using the 2D looping video and synthesize novel views. We compare with our sparse MTV representation " + }, + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "inline_equation", + "content": "(loop2D + MTV)" + }, + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "text", + "content": ", the Multi-plane Video representation " + }, + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "inline_equation", + "content": "(loop2D + MPV)" + }, + { + "bbox": [ + 304, + 582, + 546, + 713 + ], + "type": "text", + "content": " and the dynamic NeRF representation" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 734, + 312, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 734, + 312, + 742 + ], + "spans": [ + { + "bbox": [ + 298, + 734, + 312, + 742 + ], + "type": "text", + "content": "315" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 127, + 168 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 127, + 168 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 127, + 168 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 127, + 168 + ], + "type": "image", + "image_path": "b8157b026dcfd215d567c22117981b60e70bf19ab52e5e7ebfdf59a28e481ac2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 171, + 545, + 192 + ], + "lines": [ + { + "bbox": [ + 46, + 171, + 545, + 192 + ], + "spans": [ + { + "bbox": [ + 46, + 171, + 545, + 192 + ], + "type": "text", + "content": "Figure 7. We visualize the pixel-wise " + }, + { + "bbox": [ + 46, + 171, + 545, + 192 + ], + "type": "inline_equation", + "content": "STDerr" + }, + { + "bbox": [ + 46, + 171, + 545, + 192 + ], + "type": "text", + "content": " value for each method. Our method has a lower error, indicating that our approach best retains the dynamism of the scene. We recommend readers watch the supplemental video, where the difference is more noticeable." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 129, + 70, + 206, + 168 + ], + "blocks": [ + { + "bbox": [ + 129, + 70, + 206, + 168 + ], + "lines": [ + { + "bbox": [ + 129, + 70, + 206, + 168 + ], + "spans": [ + { + "bbox": [ + 129, + 70, + 206, + 168 + ], + "type": "image", + "image_path": "e1c41d8094f406775c2bb0fe6970b93d5b99309f1b0489f3728ed8fd7802d3c1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 206, + 70, + 285, + 168 + ], + "blocks": [ + { + "bbox": [ + 206, + 70, + 285, + 168 + ], + "lines": [ + { + "bbox": [ + 206, + 70, + 285, + 168 + ], + "spans": [ + { + "bbox": [ + 206, + 70, + 285, + 168 + ], + "type": "image", + "image_path": "a8eb22b2c555e44e6a1936c240e6bd2115a0bc45c960857cb61571a79b0e50fb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 285, + 70, + 364, + 168 + ], + "blocks": [ + { + "bbox": [ + 285, + 70, + 364, + 168 + ], + "lines": [ + { + "bbox": [ + 285, + 70, + 364, + 168 + ], + "spans": [ + { + "bbox": [ + 285, + 70, + 364, + 168 + ], + "type": "image", + "image_path": "0780bac44544fbf7c4e2fed1fa9e83d9eff794d06191c95011430f827d10eba0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 364, + 70, + 441, + 169 + ], + "blocks": [ + { + "bbox": [ + 364, + 70, + 441, + 169 + ], + "lines": [ + { + "bbox": [ + 364, + 70, + 441, + 169 + ], + "spans": [ + { + "bbox": [ + 364, + 70, + 441, + 169 + ], + "type": "image", + "image_path": "cb70da44993d28a0c8df9973d533f696d373a89d0b2ad418c74f2354af9b5a60.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 441, + 70, + 521, + 169 + ], + "blocks": [ + { + "bbox": [ + 441, + 70, + 521, + 169 + ], + "lines": [ + { + "bbox": [ + 441, + 70, + 521, + 169 + ], + "spans": [ + { + "bbox": [ + 441, + 70, + 521, + 169 + ], + "type": "image", + "image_path": "7c2cb70165326bd67f67ea5e012759250e4e23e9608043d1cedf2bd421722bf2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 521, + 70, + 545, + 168 + ], + "blocks": [ + { + "bbox": [ + 521, + 70, + 545, + 168 + ], + "lines": [ + { + "bbox": [ + 521, + 70, + 545, + 168 + ], + "spans": [ + { + "bbox": [ + 521, + 70, + 545, + 168 + ], + "type": "image", + "image_path": "02b6e4a1ec281e20334e62c474d4eeaaa79bdceb686fb502ad327100f725db21.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 49, + 196, + 146, + 306 + ], + "blocks": [ + { + "bbox": [ + 49, + 196, + 146, + 306 + ], + "lines": [ + { + "bbox": [ + 49, + 196, + 146, + 306 + ], + "spans": [ + { + "bbox": [ + 49, + 196, + 146, + 306 + ], + "type": "image", + "image_path": "5bbb4bbb138a6b2e9e49a95237ef2ae04c302befb1654652d9f799bab0ac7301.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 151, + 308, + 440, + 319 + ], + "lines": [ + { + "bbox": [ + 151, + 308, + 440, + 319 + ], + "spans": [ + { + "bbox": [ + 151, + 308, + 440, + 319 + ], + "type": "text", + "content": "Figure 8. Results of our ablations. Our full model produces the fewest artifacts." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 147, + 196, + 246, + 306 + ], + "blocks": [ + { + "bbox": [ + 147, + 196, + 246, + 306 + ], + "lines": [ + { + "bbox": [ + 147, + 196, + 246, + 306 + ], + "spans": [ + { + "bbox": [ + 147, + 196, + 246, + 306 + ], + "type": "image", + "image_path": "980b12aa297fe88048109321f0fe2c3648ef6ebc5fa46e2128a654c40139c243.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 247, + 196, + 345, + 306 + ], + "blocks": [ + { + "bbox": [ + 247, + 196, + 345, + 306 + ], + "lines": [ + { + "bbox": [ + 247, + 196, + 345, + 306 + ], + "spans": [ + { + "bbox": [ + 247, + 196, + 345, + 306 + ], + "type": "image", + "image_path": "1f0220714d2ab30b1c0d3c941fafb7155bd9f8586a5b7e56d981ac0d1dbc04eb.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 346, + 196, + 445, + 306 + ], + "blocks": [ + { + "bbox": [ + 346, + 196, + 445, + 306 + ], + "lines": [ + { + "bbox": [ + 346, + 196, + 445, + 306 + ], + "spans": [ + { + "bbox": [ + 346, + 196, + 445, + 306 + ], + "type": "image", + "image_path": "b1c7688eb3fd9070a3ebeb3a0efaef2a5a23506e1b747c8f7c335e5492f73d1a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 446, + 196, + 545, + 306 + ], + "blocks": [ + { + "bbox": [ + 446, + 196, + 545, + 306 + ], + "lines": [ + { + "bbox": [ + 446, + 196, + 545, + 306 + ], + "spans": [ + { + "bbox": [ + 446, + 196, + 545, + 306 + ], + "type": "image", + "image_path": "d637ce61efeaabd210355e441c0a0ef05dc7c39dabf64e85440f76d827176425.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 48, + 322, + 286, + 396 + ], + "blocks": [ + { + "bbox": [ + 48, + 322, + 286, + 396 + ], + "lines": [ + { + "bbox": [ + 48, + 322, + 286, + 396 + ], + "spans": [ + { + "bbox": [ + 48, + 322, + 286, + 396 + ], + "type": "table", + "html": "
VLPIPS ↓STDerr ↓Com. ↓Coh. ↓LoopQ ↓
Ours0.139256.0210.659.2699.263
w/o pad0.138755.6710.669.2739.395
w/o 2stage0.175567.9911.699.98210.13
w/o pyr0.141257.4110.869.5559.465
w/o tv0.153056.5111.129.7669.689
", + "image_path": "d11a84cfe949f2052d3081a991a003696db3849208a0e54fd59e9206a5b679e6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 397, + 286, + 418 + ], + "lines": [ + { + "bbox": [ + 46, + 397, + 286, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 397, + 286, + 418 + ], + "type": "text", + "content": "Table 2. Ablations of our method. " + }, + { + "bbox": [ + 46, + 397, + 286, + 418 + ], + "type": "inline_equation", + "content": "\\downarrow \\left( \\uparrow \\right)" + }, + { + "bbox": [ + 46, + 397, + 286, + 418 + ], + "type": "text", + "content": " indicates lower (higher) is better. (best in bold, and second best underlined)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 426, + 173, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 426, + 173, + 437 + ], + "spans": [ + { + "bbox": [ + 47, + 426, + 173, + 437 + ], + "type": "text", + "content": "tion [21] " + }, + { + "bbox": [ + 47, + 426, + 173, + 437 + ], + "type": "inline_equation", + "content": "(loop2D + DyNeRF)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 438, + 287, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 438, + 287, + 701 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 287, + 701 + ], + "type": "text", + "content": "We compare our method with the four baselines on our captured dataset. We synthesize novel view videos and report VLPIPS, STDerr, Com., Coh. and LoopQ metrics in Tab. 1. Our method outperforms other baselines in terms of visual quality, scene dynamism preservation, spatio-temporal consistency, and loop quality. We show the qualitative comparison in Fig. 6. We also visualize the STDerr value for each pixel in Fig. 7, which reflects the difference in dynamism between the synthetic results and the reference. We recommend that readers also see the video results included in the supplementary material. Note that our method produces the sharpest results, while best retaining the dynamism of the scene. VBR directly blends inconsistent videos from multiple input views. and the 2D looping baselines fail to consider multi-view information and produce view-inconsistent looping videos. As a result, they tend to blur out spatial and temporal details to compensate for view inconsistencies. We observe that loop2D+DyNeRF also generates sharper results compared with the other two baselines. This is because DyNeRF conditions on the view direction and tolerates the view inconsistency. However, it performs poorly in maintaining the dynamism of the scene." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 59, + 702, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 702, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 702, + 286, + 713 + ], + "type": "text", + "content": "Additionally, we measure the efficiency of the scene rep" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 323, + 545, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 323, + 545, + 538 + ], + "spans": [ + { + "bbox": [ + 304, + 323, + 545, + 538 + ], + "type": "text", + "content": "resentations using several metrics. We first show the number of parameters (# Params.) of the model to represent a dynamic 3D volume of 50 frames. We evaluate rendering speed (Render Spd) at a " + }, + { + "bbox": [ + 304, + 323, + 545, + 538 + ], + "type": "inline_equation", + "content": "360 \\times 640" + }, + { + "bbox": [ + 304, + 323, + 545, + 538 + ], + "type": "text", + "content": " resolution on a laptop equipped with an RTX 2060 GPU. We present the metrics in Tab. 1. Since the MTV representation varies with different scenes, we report the maximum and minimum values when evaluated in our dataset. We can see that our method surpasses VBR in # Params. and Render Spd. Compared with MPV that densely stores the scene parameters in a 4D volume, our sparse MTV representation can reduce the number of parameters by up to " + }, + { + "bbox": [ + 304, + 323, + 545, + 538 + ], + "type": "inline_equation", + "content": "98\\%" + }, + { + "bbox": [ + 304, + 323, + 545, + 538 + ], + "type": "text", + "content": ", resulting in a slightly faster rendering speed and much smaller memory and disk usage. On the other hand, despite the surprisingly small number of parameters, the NeRF representation has extremely slow rendering speed. In other words, our MTV representation achieves the best trade-off between the number of parameters and rendering efficiency." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 550, + 406, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 550, + 406, + 562 + ], + "spans": [ + { + "bbox": [ + 306, + 550, + 406, + 562 + ], + "type": "text", + "content": "4.4. Ablation Studies" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "We conducted extensive ablation studies of our method to test the effectiveness of several design decisions in our pipeline by individually removing each component and constructing 3D looping videos from our dataset. We experimented on the following components: the frame padding operation as illustrated in Fig. 5 when computing " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{loop}" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": " (w/o pad), the two-stage training pipeline (w/o 2stage), the coarse-to-fine training strategy (w/o pyr), and the TV regularization (w/o tv). The numerical results are shown in Tab. 2, and qualitative results are presented in Fig. 8 and Fig. 9. We also experimented with different values of " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{spa}" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": " to understand the resulting effect." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 734, + 313, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 734, + 313, + 742 + ], + "spans": [ + { + "bbox": [ + 297, + 734, + 313, + 742 + ], + "type": "text", + "content": "316" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 287, + 178 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 287, + 178 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 287, + 178 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 287, + 178 + ], + "type": "image", + "image_path": "ed4c4cb65b00f5088b02f8b20aa53e2d9284cd7302d26f94922e0c434140d8ce.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 182, + 287, + 248 + ], + "lines": [ + { + "bbox": [ + 46, + 182, + 287, + 248 + ], + "spans": [ + { + "bbox": [ + 46, + 182, + 287, + 248 + ], + "type": "text", + "content": "Figure 9. Ablations for the padding operation. In the second row, we visualize the temporal coherence by flattening the pixels in the green line along the time axis and repeating 3 times. Red rectangles highlight the discontinuity produced without the padding operation. We encourage readers to refer to the video results for a clearer demonstration." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 255, + 287, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 287, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 287, + 386 + ], + "type": "text", + "content": "Padding Operation. As shown in Tab. 2, without the padding operation, our method can still produce competitive results in terms of spatial quality and spatio-temporal consistency. It even has better temporal quality. This is because the padding operation adds extra boundary conditions to the optimization, making the optimization more difficult. However, as highlighted in the red rectangles in Fig. 9, without padding, our method is less prone to generate a properly looping video since it can not guarantee a smooth transition from the last frame to the first frame, leading to a lower loop quality score." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 388, + 287, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 388, + 287, + 470 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 287, + 470 + ], + "type": "text", + "content": "Two-stage Pipeline. It can be seen from Tab. 2 that the two-stage pipeline plays an important role in generating high-quality results. Without the two-stage pipeline, where we directly optimize a dense MPV representation using the looping loss, the MPV easily gets trapped into view-inconsistent results, leading to significant drop in every metric evaluated." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 287, + 555 + ], + "type": "text", + "content": "Coarse-to-fine Training. Results also show that the coarse-to-fine training scheme produces slightly better spatial and temporal quality than optimizing only on the finest level. This is because the patch-based optimization has a wider perceptual field at the coarse level, leading to a better global solution. Therefore, our full model tends to produce fewer artifacts compared with the w/o pyr model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 557, + 287, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 287, + 605 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 287, + 605 + ], + "type": "text", + "content": "TV Regularization. We find it necessary to apply TV regularization, since the pipeline tends to generate MTVs with holes without this regularization, as shown in Fig. 8, which greatly affects the visual quality." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": "Weight for " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{spa}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ". We experimented on different values of " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{spa}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " on one scene. We plot the relationship between Coh. scores and # Params. with respect to " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{spa}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ". We can see that when " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{spa} = 0" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": ", the reconstructed MTV is less sparse, which degenerates to a dense representation. This makes it harder to optimize and leads to a worse Coh. score. Then # Params. and Coh. drop rapidly as " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{spa}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " grow. However, if " + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\lambda_{spa}" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": " is larger than a threshold, Coh. increases again, while the improvement on # Params. is less substantial. This" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 542, + 134 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 542, + 134 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 542, + 134 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 542, + 134 + ], + "type": "image", + "image_path": "43c7a6437d3c6af60f5749ab57b00f6bb1c0d6778ac7cdcb418744afcb9a15d3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 136, + 545, + 159 + ], + "lines": [ + { + "bbox": [ + 306, + 136, + 545, + 159 + ], + "spans": [ + { + "bbox": [ + 306, + 136, + 545, + 159 + ], + "type": "text", + "content": "Figure 10. The trend of Coh. score and # Params. under different " + }, + { + "bbox": [ + 306, + 136, + 545, + 159 + ], + "type": "inline_equation", + "content": "\\lambda_{spa}" + }, + { + "bbox": [ + 306, + 136, + 545, + 159 + ], + "type": "text", + "content": ". The green line is the value we use in all other experiments." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 171, + 545, + 232 + ], + "blocks": [ + { + "bbox": [ + 307, + 171, + 545, + 232 + ], + "lines": [ + { + "bbox": [ + 307, + 171, + 545, + 232 + ], + "spans": [ + { + "bbox": [ + 307, + 171, + 545, + 232 + ], + "type": "image", + "image_path": "c8dd751d2c77b8519db812a661e676dca324465a61917667751f219647f255ce.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 329, + 236, + 522, + 247 + ], + "lines": [ + { + "bbox": [ + 329, + 236, + 522, + 247 + ], + "spans": [ + { + "bbox": [ + 329, + 236, + 522, + 247 + ], + "type": "text", + "content": "Figure 11. Controlling the dynamism by changing " + }, + { + "bbox": [ + 329, + 236, + 522, + 247 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 329, + 236, + 522, + 247 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 258, + 545, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 545, + 307 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 545, + 307 + ], + "type": "text", + "content": "is because the excessive sparseness causes the tile-culling process to over-cull necessary tiles, resulting in holes in the rendering results. Therefore, we chose " + }, + { + "bbox": [ + 304, + 258, + 545, + 307 + ], + "type": "inline_equation", + "content": "\\lambda_{spa} = 0.004" + }, + { + "bbox": [ + 304, + 258, + 545, + 307 + ], + "type": "text", + "content": " (green line in Fig. 10) in other experiments." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 311, + 545, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 311, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 304, + 311, + 545, + 371 + ], + "type": "text", + "content": "Value of " + }, + { + "bbox": [ + 304, + 311, + 545, + 371 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 311, + 545, + 371 + ], + "type": "text", + "content": ". In the experiments, we use " + }, + { + "bbox": [ + 304, + 311, + 545, + 371 + ], + "type": "inline_equation", + "content": "\\rho = 0" + }, + { + "bbox": [ + 304, + 311, + 545, + 371 + ], + "type": "text", + "content": " to ensure maximum completeness with respect to the input video. However, we find that by controlling the hyperparameter " + }, + { + "bbox": [ + 304, + 311, + 545, + 371 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 311, + 545, + 371 + ], + "type": "text", + "content": ", we could control the degree of dynamism of the reconstructed 3D video. One example is shown in Fig. 11." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 394, + 457, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 394, + 457, + 406 + ], + "spans": [ + { + "bbox": [ + 306, + 394, + 457, + 406 + ], + "type": "text", + "content": "5. Discussion and Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 418, + 545, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 418, + 545, + 574 + ], + "spans": [ + { + "bbox": [ + 304, + 418, + 545, + 574 + ], + "type": "text", + "content": "Limitations and Future Work. Our method comes with some limitations. First, since the MTV representation does not condition on view direction, it fails to model complex view-dependent effects, such as non-planar specular. One possible way to improve the representation is by introducing view-dependency, such as spherical harmonics [53] or neural basis function [51]. Another limitation is that we assume the scene to possess a looping pattern, which works best for natural scenes like flowing water and waving trees. However, if the scene is not loopable, our method tends to fail because each view has a completely unique content. This leads to a highly ill-posed problem in constructing a looping video from the asynchronous input videos." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 578, + 545, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 578, + 545, + 661 + ], + "spans": [ + { + "bbox": [ + 304, + 578, + 545, + 661 + ], + "type": "text", + "content": "Conclusion. In this paper, we propose a practical solution for constructing a 3D looping video representation given completely asynchronous multi-view videos. Experiments verify the effectiveness of our pipeline and demonstrate significant improvement in quality and efficiency over several baselines. We hope that this work will further motivate research into dynamic 3D scene reconstruction." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Acknowledgements. The authors from HKUST were partially supported by the Hong Kong Research Grants Council (RGC). The author from CityU was partially supported by an ECS grant from the RGC (Project No. CityU 21209119)." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 297, + 734, + 312, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 734, + 312, + 742 + ], + "spans": [ + { + "bbox": [ + 297, + 734, + 312, + 742 + ], + "type": "text", + "content": "317" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 289, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Aseem Agarwala, Ke Colin Zheng, Chris Pal, Maneesh Agrawala, Michael Cohen, Brian Curless, David Salesin, and Richard Szeliski. Panoramic video textures. ACM Trans. Graph., 24(3):821-827, jul 2005. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 288, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 288, + 191 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 288, + 191 + ], + "type": "text", + "content": "[2] Benjamin Attal, Eliot Laidlaw, Aaron Gokaslan, Changil Kim, Christian Richardt, James Tompkin, and Matthew O'Toole. Törf: Time-of-flight radiance fields for dynamic scene view synthesis. Advances in neural information processing systems, 34:26289-26301, 2021. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 191, + 289, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 289, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 289, + 247 + ], + "type": "text", + "content": "[3] Aayush Bansal, Minh Vo, Yaser Sheikh, Deva Ramanan, and Srinivasa Narasimhan. 4d visualization of dynamic events from unconstrained multi-view videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5366-5375, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 247, + 288, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 288, + 303 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 288, + 303 + ], + "type": "text", + "content": "[4] Michael Broxton, John Flynn, Ryan Overbeck, Daniel Erickson, Peter Hedman, Matthew Duvall, Jason Dourgarian, Jay Busch, Matt Whalen, and Paul Debevec. Immersive light field video with a layered mesh representation. ACM Transactions on Graphics (TOG), 39(4):86-1, 2020. 2, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 302, + 288, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 288, + 357 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 288, + 357 + ], + "type": "text", + "content": "[5] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 425-432, 2001. 1, 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 357, + 288, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 357, + 288, + 402 + ], + "spans": [ + { + "bbox": [ + 53, + 357, + 288, + 402 + ], + "type": "text", + "content": "[6] Hongrui Cai, Wanquan Feng, Xuetao Feng, Yan Wang, and Juyong Zhang. Neural surface reconstruction of dynamic scenes with monocular rgb-d camera. arXiv preprint arXiv:2206.15258, 2022. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 403, + 288, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 403, + 288, + 458 + ], + "spans": [ + { + "bbox": [ + 53, + 403, + 288, + 458 + ], + "type": "text", + "content": "[7] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXII, pages 333-350. Springer, 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 458, + 288, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 458, + 288, + 491 + ], + "spans": [ + { + "bbox": [ + 53, + 458, + 288, + 491 + ], + "type": "text", + "content": "[8] Siming Fan, Jingtan Piao, Chen Qian, Kwan-Yee Lin, and Hongsheng Li. Simulating fluids in real-world still images. arXiv preprint arXiv:2204.11335, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 491, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 491, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 53, + 491, + 288, + 557 + ], + "type": "text", + "content": "[9] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snively, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 558, + 287, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 613 + ], + "type": "text", + "content": "[10] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. Deepstereo: Learning to predict new views from the world's imagery. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5515-5524, 2016. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 614, + 288, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 288, + 669 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 288, + 669 + ], + "type": "text", + "content": "[11] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 670, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 714 + ], + "type": "text", + "content": "[12] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5712-5721, 2021. 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 138 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 138 + ], + "type": "text", + "content": "[13] Niv Granot, Ben Feinstein, Assaf Shocher, Shai Bagon, and Michal Irani. Drop the gan: In defense of patches nearest neighbors as single image generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13460-13469, June 2022. 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 139, + 547, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 139, + 547, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 139, + 547, + 183 + ], + "type": "text", + "content": "[14] Niv Haim, Ben Feinstein, Niv Granot, Assaf Shocher, Shai Bagon, Tali Dekel, and Michal Irani. Diverse generation from a single video made possible. arXiv preprint arXiv:2109.08591, 2021. 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 183, + 547, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 183, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 307, + 183, + 547, + 239 + ], + "type": "text", + "content": "[15] Tavi Halperin, Hanit Hakim, Orestis Vantzos, Gershon Hochman, Netai Benaim, Lior Sassy, Michael Kupchik, Ofir Bibi, and Ohad Fried. Endless loops: detecting and animating periodic patterns in still images. ACM Transactions on Graphics (TOG), 40(4):1-12, 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 239, + 547, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 239, + 547, + 272 + ], + "spans": [ + { + "bbox": [ + 307, + 239, + 547, + 272 + ], + "type": "text", + "content": "[16] Mingming He, Jing Liao, Pedro V Sander, and Hugues Hoppe. Gigapixel panorama video loops. ACM Transactions on Graphics (TOG), 37(1):1-15, 2017. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 272, + 547, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 272, + 547, + 327 + ], + "spans": [ + { + "bbox": [ + 307, + 272, + 547, + 327 + ], + "type": "text", + "content": "[17] Peter Hedman, Pratul P Srinivasan, Ben Mildenhall, Jonathan T Barron, and Paul Debevec. Baking neural radiance fields for real-time view synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5875-5884, 2021. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 327, + 547, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 327, + 547, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 327, + 547, + 381 + ], + "type": "text", + "content": "[18] Aleksander Holynski, Brian L Curless, Steven M Seitz, and Richard Szeliski. Animating pictures with eulerian motion fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5810-5819, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 382, + 507, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 382, + 507, + 393 + ], + "spans": [ + { + "bbox": [ + 307, + 382, + 507, + 393 + ], + "type": "text", + "content": "[19] Apple Inc. Take and edit live photos, Oct 2021. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 393, + 547, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 393, + 547, + 449 + ], + "spans": [ + { + "bbox": [ + 307, + 393, + 547, + 449 + ], + "type": "text", + "content": "[20] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio and Yann LeCun, editors, 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 449, + 547, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 449, + 547, + 525 + ], + "spans": [ + { + "bbox": [ + 307, + 449, + 547, + 525 + ], + "type": "text", + "content": "[21] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022. 2, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 525, + 547, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 525, + 547, + 581 + ], + "spans": [ + { + "bbox": [ + 307, + 525, + 547, + 581 + ], + "type": "text", + "content": "[22] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6498-6508, 2021. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 581, + 547, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 581, + 547, + 613 + ], + "spans": [ + { + "bbox": [ + 307, + 581, + 547, + 613 + ], + "type": "text", + "content": "[23] Jing Liao, Mark Finch, and Hugues Hoppe. Fast computation of seamless video loops. ACM Transactions on Graphics (TOG), 34(6):1-10, 2015. 1, 2, 3, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 614, + 547, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 614, + 547, + 647 + ], + "spans": [ + { + "bbox": [ + 307, + 614, + 547, + 647 + ], + "type": "text", + "content": "[24] Zicheng Liao, Neel Joshi, and Hugues Hoppe. Automated video looping with progressive dynamism. ACM Transactions on Graphics (TOG), 32(4):1-10, 2013. 1, 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 647, + 547, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 547, + 692 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 547, + 692 + ], + "type": "text", + "content": "[25] Kai-En Lin, Lei Xiao, Feng Liu, Guowei Yang, and Ravi Ramamoorthi. Deep 3d mask volume for view synthesis of dynamic scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1749–1758, 2021. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 692, + 547, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 692, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 692, + 547, + 714 + ], + "type": "text", + "content": "[26] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. Advances" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 734, + 313, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 734, + 313, + 742 + ], + "spans": [ + { + "bbox": [ + 298, + 734, + 313, + 742 + ], + "type": "text", + "content": "318" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 67, + 72, + 286, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 286, + 94 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 286, + 94 + ], + "type": "text", + "content": "in Neural Information Processing Systems, 33:15651-15663, 2020. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[27] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 195 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 195 + ], + "type": "text", + "content": "[28] Stephen Lombardi, Tomas Simon, Gabriel Schwartz, Michael Zollhoefer, Yaser Sheikh, and Jason Saragih. Mixture of volumetric primitives for efficient neural rendering. ACM Transactions on Graphics (TOG), 40(4):1-13, 2021. 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 198, + 287, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 198, + 287, + 241 + ], + "spans": [ + { + "bbox": [ + 48, + 198, + 287, + 241 + ], + "type": "text", + "content": "[29] Aniruddha Mahapatra and Kuldeep Kulkarni. Controllable animation of fluid elements in still images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3667-3676, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 243, + 287, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 243, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 243, + 287, + 297 + ], + "type": "text", + "content": "[30] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4):1-14, 2019. 1, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 299, + 287, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 299, + 287, + 352 + ], + "spans": [ + { + "bbox": [ + 48, + 299, + 287, + 352 + ], + "type": "text", + "content": "[31] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 2, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 354, + 287, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 354, + 287, + 398 + ], + "spans": [ + { + "bbox": [ + 48, + 354, + 287, + 398 + ], + "type": "text", + "content": "[32] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, July 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 399, + 287, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 287, + 464 + ], + "type": "text", + "content": "[33] Medhini Narasimhan, Shiry Ginosar, Andrew Owens, Alexei A. Efros, and Trevor Darrell. Strumming to the beat: Audio-conditioned contrastive video textures. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 3761-3770, January 2022. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 466, + 287, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 466, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 48, + 466, + 287, + 521 + ], + "type": "text", + "content": "[34] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5865-5874, 2021. 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 522, + 287, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 522, + 287, + 577 + ], + "spans": [ + { + "bbox": [ + 48, + 522, + 287, + 577 + ], + "type": "text", + "content": "[35] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Ricardo MartinBrualla, and Steven M Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. arXiv preprint arXiv:2106.13228, 2021. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 578, + 287, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 287, + 633 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 287, + 633 + ], + "type": "text", + "content": "[36] Alex Rav-Acha, Yael Pritch, Dani Lischinski, and Shmuel Peleg. Dynamosaics: Video mosaics with non-chronological time. In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), volume 1, pages 58-65. IEEE, 2005. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 635, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 287, + 656 + ], + "type": "text", + "content": "[37] Gernot Riegler and Vladlen Koltun. Free view synthesis. In European Conference on Computer Vision, 2020. 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 658, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 287, + 689 + ], + "type": "text", + "content": "[38] Gernot Riegler and Vladlen Koltun. Stable view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2021. 2, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 287, + 713 + ], + "type": "text", + "content": "[39] Leonid I Rudin and Stanley Osher. Total variation based image restoration with free local constraints. In Proceedings of" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "1st international conference on image processing, volume 1, pages 31-35. IEEE, 1994. 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 96, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 140 + ], + "type": "text", + "content": "[40] Arno Schödl, Richard Szeliski, David H Salesin, and Irfan Essa. Video textures. In Proceedings of the 27th annual conference on Computer graphics and interactive techniques, pages 489-498, 2000. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 141, + 545, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 175 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 175 + ], + "type": "text", + "content": "[41] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 176, + 545, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 176, + 545, + 219 + ], + "spans": [ + { + "bbox": [ + 307, + 176, + 545, + 219 + ], + "type": "text", + "content": "[42] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In European Conference on Computer Vision (ECCV), 2016. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 221, + 545, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 221, + 545, + 264 + ], + "spans": [ + { + "bbox": [ + 308, + 221, + 545, + 264 + ], + "type": "text", + "content": "[43] Denis Simakov, Yaron Caspi, Eli Shechtman, and Michal Irani. Summarizing visual data using bidirectional similarity. In 2008 IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8. IEEE, 2008. 5, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 266, + 545, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 266, + 545, + 320 + ], + "spans": [ + { + "bbox": [ + 308, + 266, + 545, + 320 + ], + "type": "text", + "content": "[44] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snavely. Pushing the boundaries of view extrapolation with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 175-184, 2019. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 323, + 545, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 323, + 545, + 365 + ], + "spans": [ + { + "bbox": [ + 308, + 323, + 545, + 365 + ], + "type": "text", + "content": "[45] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. ACM Transactions on Graphics (TOG), 38(4):1-12, 2019. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 367, + 545, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 367, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 308, + 367, + 545, + 422 + ], + "type": "text", + "content": "[46] Théo Thonat, Yagiz Aksoy, Miika Aittala, Sylvain Paris, Frédo Durand, and George Drettakis. Video-based rendering of dynamic stationary environments from unsynchronized inputs. In Computer Graphics Forum, volume 40, pages 73-86. Wiley Online Library, 2021. 1, 2, 3, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 423, + 545, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 423, + 545, + 489 + ], + "spans": [ + { + "bbox": [ + 308, + 423, + 545, + 489 + ], + "type": "text", + "content": "[47] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12959-12970, 2021. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 491, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 491, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 491, + 545, + 533 + ], + "type": "text", + "content": "[48] Richard Tucker and Noah Snively. Single-view view synthesis with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 551-560, 2020. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 536, + 545, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 536, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 308, + 536, + 545, + 600 + ], + "type": "text", + "content": "[49] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 602, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 602, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 308, + 602, + 545, + 656 + ], + "type": "text", + "content": "[50] Qianqian Wang, Zhengqi Li, David Salesin, Noah Snavely, Brian Curless, and Janne Kontkanen. 3d moments from near-duplicate photos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3906-3915, 2022. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 658, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 713 + ], + "type": "text", + "content": "[51] Suttisak Wizadwongsa, Pakkapon Phongthawee, Jiraphon Yenphraphai, and Supasorn Suwajanakorn. Nex: Real-time view synthesis with neural basis expansion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8534-8543, 2021. 8" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 734, + 313, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 734, + 313, + 742 + ], + "spans": [ + { + "bbox": [ + 298, + 734, + 313, + 742 + ], + "type": "text", + "content": "319" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 353 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[52] Jae Shin Yoon, Kihwan Kim, Orazio Gallo, Hyun Soo Park, and Jan Kautz. Novel view synthesis of dynamic scenes with globally coherent depths from a monocular camera. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5336-5345, 2020. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "type": "text", + "content": "[53] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. Plenoptrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5752-5761, 2021. 3, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 185, + 288, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 185, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 185, + 288, + 239 + ], + "type": "text", + "content": "[54] Jiakai Zhang, Liao Wang, Xinhang Liu, Fuqiang Zhao, Minzhang Li, Haizhao Dai, Boyuan Zhang, Wei Yang, Lan Xu, and Jingyi Yu. Neuvv: Neural volumetric videos with immersive rendering and editing. arXiv preprint arXiv:2202.06088, 2022.3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 240, + 288, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 240, + 288, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 240, + 288, + 274 + ], + "type": "text", + "content": "[55] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 274, + 288, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 274, + 288, + 308 + ], + "spans": [ + { + "bbox": [ + 48, + 274, + 288, + 308 + ], + "type": "text", + "content": "[56] Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images, 2018. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 308, + 288, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 308, + 288, + 353 + ], + "spans": [ + { + "bbox": [ + 48, + 308, + 288, + 353 + ], + "type": "text", + "content": "[57] C Lawrence Zitnick, Sing Bing Kang, Matthew Uytendaele, Simon Winder, and Richard Szeliski. High-quality video view interpolation using a layered representation. ACM transactions on graphics (TOG), 23(3):600-608, 2004. 2" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 298, + 734, + 313, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 734, + 313, + 742 + ], + "spans": [ + { + "bbox": [ + 298, + 734, + 313, + 742 + ], + "type": "text", + "content": "320" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_content_list.json b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9464259d237c2a132959c69280942960abfb6fe9 --- /dev/null +++ b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_content_list.json @@ -0,0 +1,1655 @@ +[ + { + "type": "text", + "text": "3D Video Object Detection with Learnable Object-Centric Global Optimization", + "text_level": 1, + "bbox": [ + 84, + 130, + 883, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiawei He $^{1,2}$ Yuntao Chen $^{3}$ Naiyan Wang $^{4}$ Zhaoxiang Zhang $^{1,2,3}$ $^{1}$ CRIPAC, Institute of Automation, Chinese Academy of Sciences (CASIA) \n $^{2}$ School of Artificial Intelligence, University of Chinese Academy of Sciences (UCA) \n $^{3}$ Centre for Artificial Intelligence and Robotics, HKISI_CAS ${}^{4}$ TuSimple {hejiawei2019, zhaoxiang.zhang}@ia.ac.cn {chenyuntao08, winsty}@gmail.com", + "bbox": [ + 135, + 179, + 803, + 271 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 304, + 312, + 319 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We explore long-term temporal visual correspondence-based optimization for 3D video object detection in this work. Visual correspondence refers to one-to-one mappings for pixels across multiple images. Correspondence-based optimization is the cornerstone for 3D scene reconstruction but is less studied in 3D video object detection, because moving objects violate multi-view geometry constraints and are treated as outliers during scene reconstruction. We address this issue by treating objects as first-class citizens during correspondence-based optimization. In this work, we propose BA-Det, an end-to-end estimable object detector with object-centric temporal correspondence learning and feature metric object bundle adjustment. Empirically, we verify the effectiveness and efficiency of BA-Det for multiple baseline 3D detectors under various setups. Our BA-Det achieves SOTA performance on the large-scale Waymo Open Dataset (WOD) with only marginal computation cost. Our code is available at https://github.com/jiaweihe1996/BA-Det.", + "bbox": [ + 75, + 335, + 473, + 623 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 648, + 209, + 664 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D object detection is an important perception task, especially for indoor robots and autonomous-driving vehicles. Recently, image-only 3D object detection [23,52] has been proven practical and made great progress. In real-world applications, cameras capture video streams instead of unrelated frames, which suggests abundant temporal information is readily available for 3D object detection. In single-frame methods, despite simply relying on the prediction power of deep learning, finding correspondences play an important role in estimating per-pixel depth and the object pose in the camera frame. Popular correspondences include Perspective-n-Point (PnP) between pre-defined 3D keypoints [22, 52] and their 2D projections in monocular 3D object detection, and Epipolar Geometry [6,12] in multiview 3D object detection. However, unlike the single-frame", + "bbox": [ + 75, + 674, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "case, temporal visual correspondence has not been explored much in 3D video object detection.", + "bbox": [ + 498, + 305, + 890, + 335 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As summarized in Fig. 1, existing methods in 3D video object detection can be divided into three categories while each has its own limitations. Fig. 1a shows methods with object tracking [3], especially using a 3D Kalman Filter to smooth the trajectory of each detected object. This approach is detector-agnostic and thus widely adopted, but it is just an output-level smoothing process without any feature learning. As a result, the potential of video is underexploited. Fig. 1b illustrates the temporal BEV (Bird's-Eye View) approaches [14, 23, 26] for 3D video object detection. They introduce the multi-frame temporal crossattention or concatenation for BEV features in an end-to-end fusion manner. As for utilizing temporal information, temporal BEV methods rely solely on feature fusion while ignoring explicit temporal correspondence. Fig. 1c depicts stereo-from-video methods [46, 47]. These methods explicitly construct a pseudo-stereo view using ego-motion and then utilize the correspondence on the epipolar line of two frames for depth estimation. However, the use of explicit correspondence in these methods is restricted to only two frames, thereby limiting its potential to utilize more temporal information. Moreover, another inevitable defect of these methods is that moving objects break the epipolar constraints, which cannot be well handled, so monocular depth estimation has to be reused.", + "bbox": [ + 496, + 338, + 892, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Considering the aforementioned shortcomings, we seek a new method that can handle both static and moving objects, and utilize long-term temporal correspondences. Firstly, in order to handle both static and moving objects, we draw experience from the object-centric global optimization with reprojection constraints in Simultaneous Localization and Mapping (SLAM) [21, 48]. Instead of directly estimating the depth for each pixel from temporal cues, we utilize them to construct useful temporal constraints to refine the object pose prediction from network prediction. Specifically, we construct a non-linear least-square optimization problem with the temporal correspondence constraint in an", + "bbox": [ + 496, + 719, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "5106", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2cc27a219910578f4d25292cc16f5ef97904971da87ec69b8f310e5d18f12cb8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 137, + 102, + 828, + 130 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6e822a8cc35d7e2583b7483aa9711927eac18464f585bd700a1f2f3e948b68bc.jpg", + "image_caption": [ + "(a) Temporal Filtering" + ], + "image_footnote": [], + "bbox": [ + 86, + 140, + 251, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/517fea5a971ba8f9e67c909eae73f58a281b03e329341deb4e0025f889f8ea8b.jpg", + "image_caption": [ + "(b) Temporal BEV" + ], + "image_footnote": [], + "bbox": [ + 290, + 143, + 468, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/874fca725128ad06d0955a7d09274b22692e3fd314fec64283fa9efd18bee26f.jpg", + "image_caption": [ + "(c) Stereo from Video", + "Figure 1. Illustration of how to leverage temporal information in different 3D video object detection paradigms." + ], + "image_footnote": [], + "bbox": [ + 501, + 146, + 676, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e0b64a5a75ddc04a0f3a61c7ac1830cd84242b58f0c8e354e7d9d55ed48364d1.jpg", + "image_caption": [ + "(d) BA-Det (Ours)" + ], + "image_footnote": [], + "bbox": [ + 710, + 156, + 890, + 229 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "object-centric manner to optimize the pose of objects no matter whether they are moving or not. Secondly, for long-term temporal correspondence learning, hand-crafted descriptors like SIFT [27] or ORB [35] are no longer suitable for our end-to-end object detector. Besides, the long-term temporal correspondence needs to be robust to viewpoint changes and severe occlusions, where these traditional sparse descriptors are incompetent. So, we expect to learn a dense temporal correspondence for all available frames.", + "bbox": [ + 75, + 303, + 468, + 438 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, as shown in Fig. 1d, we propose a 3D video object detection paradigm with learnable long-term temporal visual correspondence, called BA-Det. Specifically, the detector has two stages. In the first stage, a CenterNet-style monocular 3D object detector is applied for single-frame object detection. After associating the same objects in the video, the second stage detector extracts RoI features for the objects in the tracklet and matches dense local features on the object among multi-frames, called the object-centric temporal correspondence learning (OTCL) module. To make traditional object bundle adjustment (OBA) learnable, we formulate feature metric OBA. In the training time, with feature metric OBA loss, the object detection and temporal feature correspondence are learned jointly. During inference, we use the 3D object estimation from the first stage as the initial pose and associate the objects with 3D Kalman Filter. The object-centric bundle adjustment refines the pose and 3D box size of the object in each frame at the tracklet level, taking the initial object pose and temporal feature correspondence from OTCL as the input. Experiment results on the large-scale Waymo Open Dataset (WOD) show that our BA-Det could achieve state-of-the-art performance compared with other single-frame and multi-frame object detectors. We also conduct extensive ablation studies to demonstrate the effectiveness and efficiency of each component in our method.", + "bbox": [ + 75, + 439, + 470, + 830 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our work has the following contributions:", + "bbox": [ + 94, + 832, + 457, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We present a novel object-centric 3D video object detection approach $BA-Det$ by learning object detection and temporal correspondence jointly.", + "bbox": [ + 76, + 854, + 470, + 901 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We design the second-stage object-centric temporal correspondence learning module and the featuremetric object bundle adjustment loss.", + "- We achieve state-of-the-art performance on the largescale WOD. The ablation study and comparisons show the effectiveness and efficiency of our BA-Det." + ], + "bbox": [ + 500, + 303, + 890, + 402 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 414, + 640, + 429 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. 3D Video Object Detection", + "text_level": 1, + "bbox": [ + 500, + 438, + 741, + 455 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For 3D video object detection, LiDAR-based methods [4, 8, 49] usually align point clouds from consecutive frames by compensating ego-motion and simply accumulate them to alleviate the sparsity of point clouds. Object-level methods [5, 9, 33, 50], handling the multi-frame point clouds of the tracked object, become a new trend. 3D object detection from the monocular video has not received enough attention from researchers. Kinematic3D [3] is a pioneer work decomposing kinematic information into ego-motion and target object motion. However, they only apply 3D Kalman Filter [17] based motion model for kinematic modeling and only consider the short-term temporal association (4 frames). Recently, BEVFormer [23] proposes an attentional transformer method to model the spatial and temporal relationship in the bird's-eye-view (BEV). A concurrent work, DfM [46], inspired by Multi-view Geometry, considers two frames as stereo and applies the cost volume in stereo to estimate depth. However, how to solve the moving objects is not well handled in this paradigm.", + "bbox": [ + 496, + 462, + 890, + 750 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Geometry in Videos", + "text_level": 1, + "bbox": [ + 500, + 756, + 689, + 772 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Many researchers utilize 3D geometry in videos to reconstruct the scene and estimate the camera pose, which is a classic topic of computer vision. Structure from Motion (SfM) [37] and Multi-view Stereo (MVS) [38] are two paradigms to estimate the sparse and dense depth from multi-view images respectively. In robotics, 3D geometry theory is applied for Simultaneous Localization and Mapping (SLAM) [30]. To globally optimize the 3D position of", + "bbox": [ + 496, + 779, + 890, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "5107", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the feature points and the camera pose at each time, bundle adjustment algorithm [42] is widely applied. However, most of them can only handle static regions in the scene.", + "bbox": [ + 75, + 90, + 468, + 136 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the deep learning era, with the development of object detection, object-level semantic SLAM [21, 31, 48] is rising, aiming to reconstruct the objects instead of the whole scene. These methods can handle dynamic scenes and help the object localization in the video. Besides, feature correspondence learning [36, 39] has received extensive attention in recent years. Deep learning has greatly changed the pipeline of feature matching. Differentiable bundle adjustment, like BANet [41] and NRE [11], makes the whole 3D geometry system end-to-end learnable. Unlike these works, we focus on the representation of the 3D object and integrate feature correspondence learning into 3D object detection. Utilizing the learned temporal feature correspondence, the proposed BA-Det optimizes the object pose of a tracklet in each frame.", + "bbox": [ + 75, + 137, + 470, + 361 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Preliminary: Bundle Adjustment", + "text_level": 1, + "bbox": [ + 76, + 375, + 379, + 391 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Bundle Adjustment [42] is a widely used globally temporal optimization technology in 3D reconstruction, which means optimally adjusting bundles of light rays from a given 3D global position to the camera center among multiframes. Specifically, we use $\\mathbf{P}_i = [x_i,y_i,z_i]^\\top$ to denote the $i$ -th 3D point coordinates in the global reference frame. According to the perspective camera model, the image coordinates of the projected 3D point at time $t$ is", + "bbox": [ + 75, + 398, + 468, + 518 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\Pi \\left(\\mathbf {T} _ {c g} ^ {t}, \\mathbf {P} _ {i}, \\mathbf {K}\\right) = \\frac {1}{z _ {i} ^ {t}} \\mathbf {K} \\left(\\mathbf {R} _ {c g} ^ {t} \\mathbf {P} _ {i} + \\mathbf {t} _ {c g} ^ {t}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 523, + 468, + 556 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\Pi$ is the perspective projection transformation, $\\mathbf{T}_{cg}^{t} = [\\mathbf{R}_{cg}^{t}|\\mathbf{t}_{cg}^{t}]$ is the camera extrinsic matrix at time $t$ . $\\mathbf{R}_{cg}^{t}$ and $\\mathbf{t}_{cg}^{t}$ are the rotation and the translation components of $\\mathbf{T}_{cg}^{t}$ , respectively. $\\mathbf{K}$ is the camera intrinsic matrix, and $z_{i}^{t}$ is the depth of the $i$ -th 3D point in the camera frame at time $t$ .", + "bbox": [ + 76, + 561, + 468, + 648 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Bundle adjustment is a nonlinear least-square problem to minimize the reprojection error as:", + "bbox": [ + 76, + 651, + 468, + 681 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\{\\bar {\\mathbf {T}} _ {c g} ^ {t} \\} _ {t = 1} ^ {T}, \\{\\bar {\\mathbf {P}} _ {i} \\} _ {i = 1} ^ {m} = \\\\ \\underset {\\{\\mathbf {T} _ {c g} ^ {t} \\} _ {t = 1} ^ {T}, \\{\\mathbf {P} _ {i} \\} _ {i = 1} ^ {m}} {\\arg \\min } \\frac {1}{2} \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} | | \\mathbf {p} _ {i} ^ {t} - \\Pi (\\mathbf {T} _ {c g} ^ {t}, \\mathbf {P} _ {i}, \\mathbf {K}) | | ^ {2}, \\tag {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 686, + 468, + 750 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{p}_i^t$ is the observed image coordinates of 3D point $\\mathbf{P}_i$ on frame $t$ . Bundle adjustment can be solved by Gauss-Newton or Levenberg-Marquardt algorithm effectively [1, 20].", + "bbox": [ + 75, + 755, + 468, + 816 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. BA-Det: Object-centric Global Optimizable Detector", + "text_level": 1, + "bbox": [ + 76, + 827, + 468, + 859 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we introduce the framework of our BA-Det (Fig. 2), a learnable object-centric global optimization", + "bbox": [ + 76, + 869, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "network. The pipeline consists of three parts: (1) First-stage single frame 3D object detection; (2) Second-stage object-centric temporal correspondence learning (OTCL) module; (3) Featuremetric object bundle adjustment loss for temporal feature correspondence learning.", + "bbox": [ + 496, + 90, + 890, + 167 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1. Single-frame 3D Object Detection", + "text_level": 1, + "bbox": [ + 498, + 174, + 795, + 191 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a video clip with consecutive frames $\\mathcal{V} = \\{I_1, I_2, \\dots, I_T\\}$ , 3D video object detection is to predict the class and the 3D bounding box of each object in each frame. Let $\\mathcal{O}_k^t$ be the $k$ -th object in frame $t$ . For the 3D bounding box $\\mathbf{B}_k^t$ , we estimate the size of the bounding box $\\mathbf{s}_t^k = [w, h, l]^\\top$ and the object pose ${}^k\\mathbf{T}_{co}^t$ in the camera frame, including translation ${}^k\\mathbf{t}_{co}^t = [x_c, y_c, z_c]^\\top$ and rotation ${}^k\\mathbf{r}_{co}^t = [r_x, r_y, r_z]^\\top$ . In most 3D object detection datasets, with the flat ground assumption, only yaw rotation $r_y$ is considered.", + "bbox": [ + 496, + 198, + 890, + 348 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We basically adopt MonoFlex [52] as our first-stage 3D object detector, which is a simple and widely-used baseline method. Different from the standard MonoFlex, we make some modifications for simplicity and adaptation. (1) Instead of ensemble the depth from keypoints and regression, we only used the regressed depth directly. (2) The edge fusion module in MonoFlex is removed for simplicity and better performance. The output of the first-stage object detector should be kept for the second stage. The predicted 2D bounding box $\\mathbf{b}_k^t$ for each object is used for the object-centric feature extraction in the second stage. The 3D estimations should be the initial pose estimation and be associated between frames. We follow ImmortalTracker [44] to associate the 3D box prediction outputs with a 3D Kalman Filter frame by frame. For convenience and clarity, we use the same index $k$ to denote the objects belonging to the same tracklet in the video from now on.", + "bbox": [ + 496, + 349, + 890, + 604 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.2. Object-Centric Temporal Correspondence Learning", + "text_level": 1, + "bbox": [ + 498, + 614, + 890, + 646 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Based on the predictions from the first-stage detector, we propose an object-centric temporal correspondence learning (OTCL) module, which plays an indispensable role in the learnable optimization. Specifically, the OTCL module is designed to learn the correspondence of the dense features for the same object among all available frames. Given a video $\\{I_1,I_2,\\dots ,I_T\\}$ and image features $\\{\\mathbf{F}^1,\\mathbf{F}^2,\\dots ,\\mathbf{F}^T\\}$ from the backbone in the first stage, we extract the RoI features $^k\\mathbf{F}^t\\in \\mathbb{R}^{H\\times W\\times C}$ of the object $\\mathcal{O}_k^t$ by the RoIAign operation [13],", + "bbox": [ + 496, + 652, + 890, + 804 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n^ k \\mathbf {F} ^ {t} = \\operatorname {R o I A l i g n} \\left(\\mathbf {F} ^ {t}, \\mathbf {b} _ {k} ^ {t}\\right). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 602, + 811, + 890, + 830 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We apply $L$ layers of cross- and self-attention operations before calculating the correspondence map to aggregate and enhance the spatial and temporal information for RoI features. Note that the object tracklet is available with the", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "5108", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/74cd65fd2b29cc070de67106dd04cc4555be34afa559af4aac47122d124093c3.jpg", + "image_caption": [ + "Figure 2. A overview of the proposed BA-Det framework. The left part of the framework is the first-stage object detector to predict the 3D object and its 2D bounding box. The second stage is called OTCL module. In the OTCL module, we extract the RoI features $^k\\mathbf{F}^t$ by RoIAlign, aggregate the RoI features and learn object-centric temporal correspondence using feature metric object bundle adjustment loss." + ], + "image_footnote": [], + "bbox": [ + 145, + 90, + 831, + 345 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "aforementioned tracker, so the cross-attention is applied between the objects in different frames for the same tracklet. For each layer of attention operations between two adjacent frames $t$ and $t'$ :", + "bbox": [ + 75, + 422, + 470, + 482 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l} ^ {k} \\widetilde {\\mathbf {F}} ^ {t} = \\operatorname {A t t} _ {\\mathbf {S}} (Q, K, V) = \\operatorname {A t t} _ {\\mathbf {S}} \\left(^ {k} \\hat {\\mathbf {F}} ^ {t}, ^ {k} \\hat {\\mathbf {F}} ^ {t}, ^ {k} \\hat {\\mathbf {F}} ^ {t}\\right), \\\\ ^ {k} \\widetilde {\\mathbf {F}} ^ {t ^ {\\prime}} = \\operatorname {A t t} _ {\\mathbf {S}} (Q, K, V) = \\operatorname {A t t} _ {\\mathbf {S}} \\left(^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}}, ^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}}, ^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}}\\right), \\\\ ^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}} = \\operatorname {A t t} _ {\\mathbf {T}} (Q, K, V) = \\operatorname {A t t} _ {\\mathbf {T}} \\left(^ {k} \\widetilde {\\mathbf {F}} ^ {t ^ {\\prime}}, ^ {k} \\widetilde {\\mathbf {F}} ^ {t}, ^ {k} \\widetilde {\\mathbf {F}} ^ {t}\\right), \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 489, + 468, + 558 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where ${}^k\\hat{\\mathbf{F}}^t\\in \\mathbb{R}^{HW\\times C}$ is the flattened RoI feature, AttS is the spatial self-attention, AttT is the temporal crossattention.", + "bbox": [ + 75, + 558, + 468, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We then define the spatial correspondence map between two flattened RoI features after the attention operations. In frame pair $(t, t')$ , we use $^k\\mathbf{f}_i$ to denote $i$ -th local feature in $^k\\hat{\\mathbf{F}}^{(L)}$ ( $i \\in \\{1, 2, \\dots, HW\\}$ ). The correspondence map $^k\\mathbf{C}_t^{t'} \\in \\mathbb{R}^{HW \\times HW}$ in two frames is defined as the inner product of two features in two frames:", + "bbox": [ + 75, + 604, + 468, + 696 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n^ {k} \\mathbf {C} _ {t} ^ {t ^ {\\prime}} [ i, i ^ {\\prime} ] = ^ {k} \\mathbf {f} _ {i} ^ {t} * ^ {k} \\mathbf {f} _ {i ^ {\\prime}} ^ {t ^ {\\prime}}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 705, + 468, + 726 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To normalize the correspondence map, we perform softmax over all spatial locations $i'$ ,", + "bbox": [ + 75, + 734, + 468, + 765 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n^ {k} \\widetilde {\\mathbf {C}} _ {t} ^ {t ^ {\\prime}} [ i, i ^ {\\prime} ] = \\operatorname {s o f t m a x} \\left(^ {k} \\mathbf {C} _ {t} ^ {t ^ {\\prime}} [ i, i ^ {\\prime} ]\\right). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 773, + 468, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3. Featuremetric Object Bundle Adjustment Loss", + "text_level": 1, + "bbox": [ + 75, + 801, + 468, + 818 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this subsection, we present that how to adapt and integrate the Object-centric Bundle Adjustment (OBA) into our learnable BA-Det framework, based on the obtained correspondence map. Generally speaking, we formulate the featuremetric OBA loss to supervise the temporal feature", + "bbox": [ + 75, + 825, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "correspondence learning. Note that here we only derive the tracklet-level OBA loss for the same object, and for the final supervision we will sum all the tracklet-level loss in the video.", + "bbox": [ + 496, + 422, + 890, + 482 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "First, we revisit the object-centric bundle adjustment, as shown in Fig. 3a. As proposed in Object SLAM [21, 48], OBA assumes that the object can only have rigid motion relative to the camera. For the object $\\mathcal{O}_k$ , we denote the 3D points as $\\mathcal{P}_k = \\{^k\\mathbf{P}_i\\}_{i=1}^m$ in the object frame, 2D points as $\\{^k\\mathbf{p}_i^t\\}_{i=1}^m$ , 2D features at position $^k\\mathbf{p}_i^t$ as $\\{\\mathbf{f}[^k\\mathbf{p}_i^t]\\}_{i=1}^m$ , and the camera pose in the object reference frame as $\\mathcal{T}_k = \\{^k\\mathbf{T}_{co}^t\\}_{t=1}^T$ , OBA can be casted as:", + "bbox": [ + 496, + 488, + 892, + 611 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\mathcal {T}} _ {k}, \\bar {\\mathcal {P}} _ {k} = \\underset {\\mathcal {T} _ {k}, \\mathcal {P} _ {k}} {\\arg \\min } \\frac {1}{2} \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} | | ^ {k} \\mathbf {p} _ {i} ^ {t} - \\Pi \\left(^ {k} \\mathbf {T} _ {c o} ^ {t}, ^ {k} \\mathbf {P} _ {i}, \\mathbf {K}\\right) | | _ {2} ^ {2}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 504, + 628, + 890, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To make the OBA layer end-to-end learnable, we formulate featuremetric [25] OBA:", + "bbox": [ + 496, + 707, + 890, + 738 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\bar {\\mathcal {T}} _ {k}, \\bar {\\mathcal {P}} _ {k} = \\\\ \\underset {\\mathcal {T} _ {k}, \\mathcal {P} _ {k}} {\\arg \\min } \\frac {1}{2} \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\sum_ {t ^ {\\prime} = 1} ^ {T} \\left| \\left| \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t} \\right] - \\mathbf {f} \\left[ \\Pi \\left(^ {k} \\mathbf {T} _ {c o} ^ {t ^ {\\prime}}, ^ {k} \\mathbf {P} _ {i}, \\mathbf {K}\\right) \\right] \\right| \\right| _ {2} ^ {2}, \\tag {8} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 504, + 756, + 890, + 833 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{f}[\\mathbf{p}]$ denotes the feature vector in pixel coordinates $\\mathbf{p}$ . Representing the 3D point ${}^k\\mathbf{P}_i$ in Eq. 8 with 2D points in each frame, the feature metric reprojection error of frame", + "bbox": [ + 496, + 854, + 892, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "5109", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ef98e7ea62dc61502455000a265fa8cd48e2e184d3cdc7ef4f53dede49c8590c.jpg", + "image_caption": [ + "(a) Object-centric Bundle Adjustment (OBA)." + ], + "image_footnote": [], + "bbox": [ + 127, + 93, + 405, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/78b3ef0e982b6c9bdb7883b4e810b53b5e392e729a3254cf5b50a4a6844eb517.jpg", + "image_caption": [ + "(b) The computation of the featuremetric OBA loss.", + "Figure 3. Illustration of featuremetric object bundle adjustment." + ], + "image_footnote": [], + "bbox": [ + 119, + 252, + 421, + 381 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$t$ could be derived as", + "bbox": [ + 76, + 440, + 217, + 453 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} ^ {k} e _ {i} ^ {t} = \\sum_ {t ^ {\\prime} = 1} ^ {T} \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t} \\right] - \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t ^ {\\prime}} \\right] (9) \\\\ = \\sum_ {t ^ {\\prime} = 1} ^ {T} \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t} \\right] - \\mathbf {f} \\left[ \\Pi \\left(^ {k} \\mathbf {T} _ {c o} ^ {t ^ {\\prime}}, \\Pi^ {- 1} \\left(^ {k} \\mathbf {T} _ {c o} ^ {t}, ^ {k} \\mathbf {p} _ {i} ^ {t}, \\mathbf {K}, z _ {i} ^ {t}\\right), \\mathbf {K}\\right) \\right], (10) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 458, + 473, + 558 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\Pi^{-1}(\\cdot)$ is the inverse projection function to lift the 2D point on the image to 3D in the object frame. $z_{i}^{t}$ is the ground-truth depth of ${}^{k}\\mathbf{p}_{i}^{t}$ (from LiDAR point clouds only for training). In the training time, we learn the feature correspondence, given the ground-truth pose of the object $\\mathcal{O}_k$ , denoted as ${}^{k}\\mathbf{T}_{co}^{t}$ and ${}^{k}\\mathbf{T}_{co}^{t'}$ in frame $t$ and frame $t'$ , respectively. Considering the feature metric reprojection loss in all frames and all points, the overall loss term for object $k$ can be formulated as", + "bbox": [ + 75, + 566, + 468, + 702 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {r e p}} ^ {k} = \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\left| \\left| ^ {k} e _ {i} ^ {t} \\right| \\right| _ {2} ^ {2} = \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\sum_ {t ^ {\\prime} = 1} ^ {T} \\left| \\left| ^ {k} \\mathbf {f} _ {i} ^ {t} - ^ {k} \\mathbf {f} _ {i} ^ {t ^ {\\prime}} \\right| \\right| _ {2} ^ {2} \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 705, + 468, + 748 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, we replace the $L2$ norm in Eq. 11 with the cosine distance to measure the featuremetric reprojection error. Thus we bring the normalized correspondence map $\\widetilde{\\mathbf{C}}$ in Sec. 4.2 into the loss term. With log-likelihood formulation, we formulate the featuremetric OBA loss to supervise the object-centric temporal correspondence learning:", + "bbox": [ + 75, + 758, + 468, + 851 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {O B A}} ^ {k} = - \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\sum_ {t ^ {\\prime} = 1} ^ {T} \\log \\left(^ {k} \\widetilde {\\mathbf {C}} _ {t} ^ {t ^ {\\prime}} \\left[ ^ {k} \\bar {\\mathbf {p}} _ {i} ^ {t}, ^ {k} \\bar {\\mathbf {p}} _ {i} ^ {t ^ {\\prime}} \\right]\\right). \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 109, + 856, + 468, + 897 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\left(^k\\bar{\\mathbf{p}}_i^t,\\bar{}^k\\bar{\\mathbf{p}}_i^{t'}\\right)$ are the ground-truth corresponding pair of the $i$ -th local feature. The illustration of the loss computation is in Fig. 3b.", + "bbox": [ + 498, + 89, + 890, + 137 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.4. Inference", + "text_level": 1, + "bbox": [ + 500, + 146, + 609, + 161 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After introducing the training loss design, we present the inference process of BA-Det as follows.", + "bbox": [ + 498, + 170, + 890, + 200 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "First-stage 3D object detection and association. The first-stage detector makes the prediction of classification scores and 2D / 3D bounding boxes. The 3D bounding boxes are associated across the frames by ImmortalTracker [44]. The following process is on the tracklet level.", + "bbox": [ + 498, + 200, + 890, + 276 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dense feature matching. To optimize the object pose, we need to obtain the feature correspondence in each frame for the same object. As mentioned in Sec. 4.2, the OTCL module is trained to generate a dense correspondence map in all frames. During inference, we match all $H \\times W$ dense local features in RoI between adjacent two frames and between the first frame and last frame of the time window $[t, t + \\tau]$ . We use the RANSAC algorithm [10] to filter the feature correspondence outliers.", + "bbox": [ + 498, + 277, + 890, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Feature tracking. To form a long-term keypoint tracklet from the obtained correspondence, we leverage a graph-based algorithm. First, the matched feature pairs are constructed into a graph $\\mathcal{G}$ . The features are on the vertices. If the features are matched, an edge is connected in the graph. Then we track the feature for the object in all available frames. We use the association method mainly following [7]. The graph partitioning method is applied to $\\mathcal{G}$ to make each connected subgraph have at most one vertex per frame. The graph cut is based on the similarity of the matched features.", + "bbox": [ + 496, + 412, + 890, + 578 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Object-centric bundle adjustment. In the inference stage, given the initial pose estimation and the temporal feature correspondence, we solve the object-centric bundle adjustment by Levenberg-Marquardt algorithm, and the object pose in each frame and the 3D position of the keypoints can be globally optimized between frames.", + "bbox": [ + 496, + 580, + 890, + 670 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Post-processing. We also apply some common post-processing in video object detection techniques like tracklet rescoring [18] and bounding box temporal interpolation.", + "bbox": [ + 498, + 671, + 890, + 715 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 729, + 632, + 748 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Datasets and metrics", + "text_level": 1, + "bbox": [ + 500, + 756, + 696, + 771 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We conduct our experiments on the large autonomous driving dataset, Waymo Open Dataset (WOD) [40]. The WOD has different versions with different annotations and metrics. To keep the fairness of the comparisons, we report the results both on WOD v1.2 and WOD v1.3.1. The annotations on v1.2 are based on LiDAR and the official metrics are mAP IoU@0.7 and mAP IoU@0.5. Recently, v1.3.1 is released to support multi-camera 3D object detec", + "bbox": [ + 496, + 780, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5110", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/c79583cdea79cc55b411f20ff66ad1dbc4ce685243397f881f2d2f7c808a8d79.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
M3D-RPN [2]0.350.343.793.630.330.333.613.46
PatchNet [29]0.390.372.922.740.380.362.422.28
PCT [43]0.890.884.204.150.660.664.033.99
MonoJSG [24]0.970.955.655.470.910.895.345.17
GUPNet [28]2.282.2710.029.942.142.129.399.31
DEVIANT [19]2.692.6710.9810.892.522.5010.2910.20
CaDDN [34]5.034.9917.5417.314.494.4516.5116.28
DID-M3D [32]--20.6620.47--19.3719.19
BEVFormer [23]†-7.70-30.80-6.90-27.70
DCD [22]12.5712.5033.4433.2411.7811.7231.4331.25
MonoFlex [52] (Baseline)11.7011.6432.2632.0610.9610.9030.3130.12
BA-Det(Ours)†16.6016.4540.9340.5115.5715.4438.5338.12
", + "bbox": [ + 122, + 88, + 854, + 289 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9bc8c1e610c110feff67ca906a04d20d0d390da94ddc1c45023f293a563e17cf.jpg", + "image_caption": [ + "(a) Frame 8." + ], + "image_footnote": [], + "bbox": [ + 161, + 349, + 220, + 486 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/22ef3b70ba0a8ea6e8ccb2a1340a098e22ddf7a939afbb041f4a28033a0ae807.jpg", + "image_caption": [ + "(b) Frame 22." + ], + "image_footnote": [], + "bbox": [ + 310, + 349, + 366, + 489 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/03675c24d7595c5dd5b492c1edab946d262e3e2a0304c93bdad36e3c5c8d811d.jpg", + "image_caption": [ + "(c) Frame 36." + ], + "image_footnote": [], + "bbox": [ + 457, + 359, + 531, + 494 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/42ee884163f0f7960fc9376f4173a67b305cac23dc0d4de2f89dbebbf2fa10c8.jpg", + "image_caption": [ + "(d) Frame 50." + ], + "image_footnote": [], + "bbox": [ + 606, + 354, + 689, + 491 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/b624b040eb17806ab75ffc531723fde36d14b5d44f797b9305f63ae77a47871f.jpg", + "image_caption": [ + "(e) Frame 57.", + "Figure 4. Qualitative results from the BEV in different frames. We use blue and red boxes to denote initial predictions and optimized predictions of the object we highlight. The green and black boxes denote the other box predictions and the ground truth boxes. The ego vehicle lies at the bottom of each figure." + ], + "image_footnote": [], + "bbox": [ + 723, + 361, + 833, + 497 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/0a3559907cc5781e2f3b6b935510c2af0f9b6b24efcbe21f14e1a1ba04ff5b68.jpg", + "table_caption": [ + "Table 1. The results on WODv1.2 [40] val set. $\\mathrm{AP}_{70}$ denotes AP with IoU threshold at 0.7. $\\mathrm{AP}_{50}$ denotes AP IoU@0.5. $\\dagger$ denotes the method utilizing temporal information." + ], + "table_footnote": [], + "table_body": "
MethodLET-APLLET-APLET-APH3D AP703D AP50
MV-FCOS3D++ [45]†58.1174.6873.5014.6636.02
BA-DetFCOS3D(Ours)†58.4774.8573.6615.0236.89
", + "bbox": [ + 78, + 595, + 468, + 638 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. The multi-camera results on WODv1.3.1 [16] val set. Besides the official LET-IoU-based metrics, we also report the metrics with standard 3D IoU. All metrics are reported for the LEVEL_2 difficulty.†: use temporal information.", + "bbox": [ + 75, + 648, + 468, + 705 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tion, and the annotations are camera-synced boxes. On the v1.3.1 dataset, a series of new LET-IoU-based metrics [16] are introduced to slightly tolerate the localization error from the worse sensor, camera, than LiDAR. Early work mainly reports the results on the v1.2 dataset, and we only compare our methods with the ones from WOD Challenge 2022 using the v1.3.1 dataset. Because we mainly focus on rigid objects, we report the results of the VEHICLE class.", + "bbox": [ + 75, + 733, + 468, + 853 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "LET-3D-AP and LET-3D-APL are the new metrics, relying on the Longitudinal Error Tolerant IoU (LET-IoU). LET-IoU is the 3D IoU calculated between the target ground", + "bbox": [ + 75, + 854, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "truth box and the prediction box aligned with ground truth along the depth that has minimum depth error. LET-3D-AP and LET-3D-APL are calculated from the average precision and the longitudinal affinity weighted average precision of the PR curve. For more details, please refer to [16].", + "bbox": [ + 498, + 599, + 892, + 675 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 691, + 718, + 709 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The first stage network architecture of BA-Det is the same as MonoFlex, with DLA-34 [51] backbone, the output feature map is with the stride of 8. In the second stage, the shape of the RoI feature is $60 \\times 80$ . The spatial and temporal attention module is stacked with 4 layers. The implementation is based on the PyTorch framework. We train our model on 8 NVIDIA RTX 3090 GPUs for 14 epochs. Adam optimizer is applied with $\\beta_{1} = 0.9$ and $\\beta_{2} = 0.999$ . The initial learning rate is $5 \\times 10^{-4}$ and weight decay is $10^{-5}$ . The learning rate scheduler is one-cycle. We use the Levenberg-Marquardt algorithm, implemented by DeepLM [15], to solve object-centric bundle adjustment. The maximum it", + "bbox": [ + 496, + 719, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "5111", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/ea3c2ef05c85396eaac7310bfd25a78b73ac66ec992532dc101c268b478a6725.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Method3D AP703D APH703D AP503D APH50
0-3030-5050-∞0-3030-5050-∞0-3030-5050-∞0-3030-5050-∞
L1DCD [22]32.475.941.2432.305.911.2362.7026.3510.1662.3526.2110.09
MonoFlex [52]30.645.291.0530.485.271.0461.1325.859.0360.7525.718.95
BA-Det (Ours)†37.7411.043.8637.4610.953.7971.0737.1514.8970.4636.7914.61
L2DCD [22]32.305.761.0832.195.731.0862.4825.608.9262.1325.468.86
MonoFlex [52]30.545.140.9130.375.110.9160.9125.117.9260.5424.977.85
BA-Det (Ours)†37.6110.723.3737.3310.633.3170.8336.1413.6270.2335.7913.37
", + "bbox": [ + 114, + 88, + 859, + 213 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ff9669b32a03cf2f6f7259c0b702aa6e0915d6ffadd62d0c60962f844e7f577a.jpg", + "table_caption": [ + "Table 3. The object depth range conditioned result on WODv1.2 [40] val set. L1 and L2 denote LEVEL_1 and LEVEL_2 difficulty, respectively. †: use temporal information." + ], + "table_footnote": [], + "table_body": "
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)11.7011.6432.2632.0610.9610.9030.3130.12
Our first-stage prediction13.5713.4834.7034.4312.7212.6432.5632.32
+3D Tracking [44]14.0113.9335.1934.9213.1313.0533.0332.78
+ Learnable global optimization15.8515.7538.0637.7614.8714.7735.7235.44
+ Tracklet rescoring16.4316.3040.0739.7015.4115.2937.6637.31
+ Bbox interpolation16.6016.4540.9340.5115.5715.4438.5338.12
", + "bbox": [ + 89, + 265, + 883, + 393 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4. Ablation study of each component in BA-Det.", + "bbox": [ + 320, + 405, + 647, + 419 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "eration of the LM algorithm is 200. For the object that appears less than 10 frames or the average keypoint number is less than 5, we do not optimize it.", + "bbox": [ + 75, + 446, + 468, + 492 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.3. Comparisons with State-of-the-art Methods", + "text_level": 1, + "bbox": [ + 76, + 511, + 447, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We compare our BA-Det with other state-of-the-art methods under two different settings. WODv1.2 is for the front view camera and WODv1.3.1 has the official evaluator for all 5 cameras. As shown in Table 1, using the FRONT camera, we outperform the SOTA method DCD [22] for about 4AP and 4APH ( $\\sim 30\\%$ improvement) under the 0.7 IoU threshold. Compared with the only temporal method BEVFormer [23], we have double points of 3D $\\mathrm{AP}_{70}$ and 3D $\\mathrm{APH}_{70}$ . To validate the effectiveness, we also report the multi-camera results on the newly released WODv1.3.1, as shown in Table 2. No published work reports the results on WODv1.3.1. So, we only compare with the open-source MV-FCOS3D++ [45], the second-place winner of WOD 2022 challenge. We design the variant of BA-Det, called BA-DetFCOS3D, to adapt to the multi-camera setting. BA-DetFCOS3D is also a two-stage object detector. The first stage is the same as MV-FCOS3D++, but with the output of 2D bounding boxes. The second stage is OTCL module supervised with featuremetric object bundle adjustment loss. Although there are overlaps between 5 cameras, to simplify the framework, we ignore the object BA optimization across cameras and only conduct temporal optimization. BA-DetFCOS3D outperforms MV-FCOS3D++ under main metrics and traditional 3D IoU-based metrics.", + "bbox": [ + 75, + 537, + 472, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.4. Qualitative Results", + "text_level": 1, + "bbox": [ + 500, + 445, + 683, + 460 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Fig. 4, we show the object-level qualitative results of the first-stage and second-stage predictions in different frames. For a tracklet, we can refine the bounding box predictions with the help of better measurements in other frames, even if there is a long time interval between them.", + "bbox": [ + 496, + 469, + 890, + 544 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.5. Distance Conditioned Results", + "text_level": 1, + "bbox": [ + 500, + 556, + 763, + 570 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We report the results with the different depth ranges in Table 3. The results indicate that the single frame methods, like DCD and MonoFlex, are seriously affected by object depth. When the object is farther away from the ego vehicle, the detection performance drops sharply. Compared with these methods, BA-Det, has the gain almost from the object far away from the ego-vehicle. The 3D $\\mathrm{AP}_{70}$ and 3D $\\mathrm{APH}_{70}$ are $3\\times$ compared with the baseline when the object is located in $[50\\mathrm{m},\\infty)$ , $2\\times$ in $[30\\mathrm{m},50\\mathrm{m})$ and $1.2\\times$ in $[0\\mathrm{m},30\\mathrm{m})$ . This is because we utilize the long-term temporal information for each object. In a tracklet, the predictions near the ego-vehicle can help to refine the object far away.", + "bbox": [ + 496, + 579, + 890, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.6. Ablation study", + "text_level": 1, + "bbox": [ + 500, + 771, + 650, + 787 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We ablate each component of BA-Det. The results are shown in Table 4. The first stage detector is slightly better than the MonoFlex baseline mainly because we remove the edge fusion module, which is harmful to the truncated objects in WOD. 3D KF associates the objects and smooths the object's trajectory. This part of improvement can be regarded as similar to Kinematic3D [3]. The core of BA-Det", + "bbox": [ + 496, + 794, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "5112", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/e4536cf05376adb51572032374ac8cbfb516d66e82b1d7c2d7a9ee56f8d948ea.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)11.7011.6432.2632.0610.9610.9030.3130.12
Initial prediction13.5713.4834.7034.4312.7212.6432.5632.32
Static BA14.7314.6237.8937.5613.8213.7235.6535.34
Ours16.6016.4540.9340.5115.5715.4438.5338.12
", + "bbox": [ + 122, + 88, + 851, + 189 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c9c01441ac8482a600609db2eddb9b6e340fed9c473095b15860d2644a98de34.jpg", + "table_caption": [ + "Table 5. Comparison between object-centric BA-Det and the traditional scene-level bundle adjustment (Static BA). Initial prediction denotes the predictions in the first stage." + ], + "table_footnote": [], + "table_body": "
\\(\\bar{L}_{t}\\)LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)-11.7011.6432.2632.0610.9610.9030.3130.12
BA-Det+ORB feature [35]2.614.0513.9635.2134.9513.1713.0833.0532.81
BA-Det+Our feature1016.6016.4540.9340.5115.5715.4438.5338.12
", + "bbox": [ + 89, + 241, + 883, + 329 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "is the learnable global optimization module, which obtains the largest gain in all modules. The tracklet rescoring and temporal interpolation modules are also useful.", + "bbox": [ + 75, + 368, + 468, + 414 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.7. Further Discussions", + "text_level": 1, + "bbox": [ + 76, + 422, + 266, + 438 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "BA vs. Object BA. We conduct experiments to discuss whether the object-centric manner is important in temporal optimization. We modify our pipeline and optimize the whole scene in the global frame instead of optimizing the object pose in the object frame, called Static BA in Table 5. Static BA ignores dynamic objects and treats them the same as static objects. The inability to handle dynamic objects causes decreases by about 2 AP compared with BA-Det.", + "bbox": [ + 75, + 446, + 468, + 568 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Temporal feature correspondence. As shown in Table 6, we ablate the features used for object-centric bundle adjustment. Compared with traditional ORB feature [35], widely used in SLAM, our feature learning module predicts denser and better correspondence. We find the average object tracklet length is 19.6 frames, and the average feature tracklet in our method is about 10 frames, which means we can keep a long feature dependency and better utilize long-range temporal information. However, the $\\bar{L}_t$ of the ORB feature is only 2.6 frames. The results show the short keypoint tracklet can not refine the long-term object pose well.", + "bbox": [ + 75, + 568, + 470, + 734 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Inference latency of each step in BA-Det. The inference latency of each step in BA-Det is shown in Table 7. The most time-consuming part is the first-stage object detector, more than $130\\mathrm{ms}$ per image, which is the same as the MonoFlex baseline. Our BA-Det only takes an additional $50\\mathrm{ms}$ latency per image, compared with the single-frame detector MonoFlex. Besides, although the dense feature correspondence is calculated, thanks to the shared backbone with the first stage detector and parallel processing for the objects, the feature correspondence module is not very time-consuming.", + "bbox": [ + 75, + 734, + 468, + 901 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/24f1691bac36a60a80fc6060a0ca602c6dc34140df4120463e9244e9ceee851c.jpg", + "table_caption": [ + "Table 6. Ablation study about different feature corresponding methods. ${\\bar{L}}_{t}$ denotes the average keypoint tracklet length for each object." + ], + "table_footnote": [], + "table_body": "
Total latency181.5ms
First-stage detector132.6ms
Object tracking6.6ms
Feature correspondence23.0ms
Object bundle adjustment19.3ms
", + "bbox": [ + 581, + 364, + 815, + 446 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 7. Inference latency of each step in BA-Det per image.", + "bbox": [ + 513, + 455, + 875, + 472 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Limitations and Future Work", + "text_level": 1, + "bbox": [ + 498, + 482, + 772, + 498 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In the current version of this paper, we only focus on the objects, such as cars, trucks, and trailers. The performance of non-rigid objects such as pedestrians has not been investigated. However, with mesh-based and skeleton-based 3D human models, we believe that a unified keypoint temporal alignment module can be designed in the future. So, we will explore the extension of BA-Det for non-rigid objects.", + "bbox": [ + 496, + 507, + 890, + 613 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 625, + 619, + 641 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we propose a 3D video object detection paradigm with long-term temporal visual correspondence, called BA-Det. BA-Det is a two-stage object detector that can jointly learn object detection and temporal feature correspondence with proposed Featuremetric OBA loss. Object-centric bundle adjustment optimizes the first-stage object estimation globally in each frame. BA-Det achieves state-of-the-art performance on WOD.", + "bbox": [ + 496, + 651, + 890, + 772 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 784, + 666, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported in part by the Major Project for New Generation of AI (No.2018AAA0100400), the National Natural Science Foundation of China (No. 61836014, No. U21B2042, No. 62072457, No. 62006231) and the InnoHK program. The authors thank Lue Fan and Yuqi Wang for their valuable suggestions.", + "bbox": [ + 496, + 809, + 890, + 901 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "5113", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Sameer Agarwal, Keir Mierle, and The Ceres Solver Team. Ceres Solver. https://github.com/ceres-solver/ceres-solver, 2022.3", + "[2] Garrick Brazil and Xiaoming Liu. M3d-rpn: Monocular 3d region proposal network for object detection. In ICCV, 2019. 6", + "[3] Garrick Brazil, Gerard Pons-Moll, Xiaoming Liu, and Bernt Schiele. Kinematic 3d object detection in monocular video. In ECCV, 2020. 1, 2, 7", + "[4] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In CVPR, 2020. 2", + "[5] Xuesong Chen, Shaoshuai Shi, Benjamin Zhu, Ka Chun Cheung, Hang Xu, and Hongsheng Li. Mppnet: Multi-frame feature intertwining with proxy points for 3d temporal object detection. In ECCV, 2022. 2", + "[6] Yilun Chen, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Dsgn: Deep stereo geometry network for 3d object detection. In CVPR, 2020. 1", + "[7] Mihai Dusmanu, Johannes L Schonberger, and Marc Pollefeys. Multi-view optimization of local feature geometry. In ECCV, 2020. 5", + "[8] Lue Fan, Ziqi Pang, Tianyuan Zhang, Yu-Xiong Wang, Hang Zhao, Feng Wang, Naiyan Wang, and Zhaoxiang Zhang. Embracing single stride 3d object detector with sparse transformer. In CVPR, 2022. 2", + "[9] Lue Fan, Yuxue Yang, Feng Wang, Naiyan Wang, and Zhaoxiang Zhang. Super sparse 3d object detection. arXiv preprint arXiv:2301.02562, 2023. 2", + "[10] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 5", + "[11] Hugo Germain, Vincent Lepetit, and Guillaume Bourmaud. Neural reprojection error: Merging feature learning and camera pose estimation. In CVPR, 2021. 3", + "[12] Xiaoyang Guo, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Liga-stereo: Learning lidar geometry aware representations for stereo-based 3d detector. In ICCV, 2021. 1", + "[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In ICCV, 2017. 3", + "[14] Junjie Huang and Guan Huang. Bevdet4d: Exploit temporal cues in multi-camera 3d object detection. arXiv preprint arXiv:2203.17054, 2022. 1", + "[15] Jingwei Huang, Shan Huang, and Mingwei Sun. Deeplm: Large-scale nonlinear least squares on deep learning frameworks using stochastic domain decomposition. In CVPR, 2021. 6", + "[16] Wei-Chih Hung, Henrik Kretzschmar, Vincent Casser, Jyh-Jing Hwang, and Dragomir Anguelov. Let-3d-ap: Longitudinal error tolerant 3d average precision for camera-only 3d detection. arXiv preprint arXiv:2206.07705, 2022. 6", + "[17] Rudolph Emil Kalman. A new approach to linear filtering and prediction problems. 1960. 2" + ], + "bbox": [ + 78, + 114, + 468, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[18] Kai Kang, Hongsheng Li, Junjie Yan, Xingyu Zeng, Bin Yang, Tong Xiao, Cong Zhang, Zhe Wang, Ruohui Wang, Xiaogang Wang, et al. T-cnn: Tubelets with convolutional neural networks for object detection from videos. IEEE Transactions on Circuits and Systems for Video Technology, 28(10):2896-2907, 2017. 5", + "[19] Abhinav Kumar, Garrick Brazil, Enrique Corona, Armin Parchami, and Xiaoming Liu. Deviant: Depth equivariant network for monocular 3d object detection. In ECCV, 2022. 6", + "[20] Rainer Kummerle, Giorgio Grisetti, Hauke Strasdat, Kurt Konolige, and Wolfram Burgard. g2o: A general framework for graph optimization. In ICRA, 2011. 3", + "[21] Peiliang Li, Tong Qin, et al. Stereo vision-based semantic 3d object and ego-motion tracking for autonomous driving. In ECCV, 2018. 1, 3, 4", + "[22] Yingyan Li, Yuntao Chen, Jiawei He, and Zhaoxiang Zhang. Densely constrained depth estimator for monocular 3d object detection. In ECCV, 2022. 1, 6, 7", + "[23] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In ECCV, 2022. 1, 2, 6, 7", + "[24] Qing Lian, Peiliang Li, and Xiaozhi Chen. Monojsg: Joint semantic and geometric cost volume for monocular 3d object detection. In CVPR, 2022. 6", + "[25] Philipp Lindenberger, Paul-Edouard Sarlin, Viktor Larsson, and Marc Pollefeys. Pixel-perfect structure-from-motion with featuremetric refinement. In ICCV, 2021. 4", + "[26] Yingfei Liu, Junjie Yan, Fan Jia, Shuai Lin Li, Qi Gao, Tiancai Wang, Xiangyu Zhang, and Jian Sun. Petrv2: A unified framework for 3d perception from multi-camera images. arXiv preprint arXiv:2206.01256, 2022. 1", + "[27] David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60(2):91-110, 2004. 2", + "[28] Yan Lu, Xinzhu Ma, Lei Yang, Tianzhu Zhang, Yating Liu, Qi Chu, Junjie Yan, and Wanli Ouyang. Geometry uncertainty projection network for monocular 3d object detection. In ICCV, 2021. 6", + "[29] Xinzhu Ma, Shinan Liu, Zhiyi Xia, Hongwen Zhang, Xingyu Zeng, and Wanli Ouyang. Rethinking pseudo-lidar representation. In ECCV, 2020. 6", + "[30] Raul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. Orb-slam: a versatile and accurate monocular slam system. IEEE transactions on robotics, 31(5):1147-1163, 2015. 2", + "[31] Lachlan Nicholson, Michael Milford, and Niko Sünderhauf. Quadricslam: Dual quadrics from object detections as landmarks in object-oriented slam. IEEE Robotics and Automation Letters, 4(1):1-8, 2018. 3", + "[32] Liang Peng, Xiaopei Wu, Zheng Yang, Haifeng Liu, and Deng Cai. Did-m3d: Decoupling instance depth for monocular 3d object detection. In ECCV, 2022. 6", + "[33] Charles R Qi, Yin Zhou, Mahyar Najibi, Pei Sun, Khoa Vo, Boyang Deng, and Dragomir Anguelov. Offboard 3d object detection from point cloud sequences. In CVPR, 2021. 2" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "5114", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[34] Cody Reading, Ali Harakeh, Julia Chae, and Steven L Waslander. Categorical depth distribution network for monocular 3d object detection. In CVPR, 2021. 6", + "[35] Ethan Rublee, Vincent Rabaud, Kurt Konolige, and Gary Bradski. Orb: An efficient alternative to sift or surf. In ICCV, 2011. 2, 8", + "[36] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In CVPR, 2020. 3", + "[37] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2", + "[38] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In ECCV, 2016. 2", + "[39] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In CVPR, 2021. 3", + "[40] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In CVPR, 2020. 5, 6, 7", + "[41] Chengzhou Tang and Ping Tan. BA-net: Dense bundle adjustment networks. In ICLR, 2019. 3", + "[42] Bill Triggs, Philip F McLauchlan, Richard I Hartley, and Andrew W Fitzgibbon. Bundle adjustment—a modern synthesis. In ICCV Workshops, 1999. 3", + "[43] Li Wang, Li Zhang, Yi Zhu, Zhi Zhang, Tong He, Mu Li, and Xiangyang Xue. Progressive coordinate transforms for monocular 3d object detection. NeurIPS, 2021. 6", + "[44] Qitai Wang, Yuntao Chen, Ziqi Pang, Naiyan Wang, and Zhaoxiang Zhang. Immortal tracker: Tracklet never dies. arXiv preprint arXiv:2111.13672, 2021. 3, 5, 7", + "[45] Tai Wang, Qing Lian, Chenming Zhu, Xinge Zhu, and Wenwei Zhang. MV-FCOS3D++: Multi-View camera-only 4d object detection with pretrained monocular backbones. arXiv preprint arXiv:2207.12716, 2022. 6, 7", + "[46] Tai Wang, Jiangmiao Pang, and Dahua Lin. Monocular 3d object detection with depth from motion. In ECCV, 2022. 1, 2", + "[47] Zengran Wang, Chen Min, Zheng Ge, Yinhao Li, Zeming Li, Hongyu Yang, and Di Huang. Sts: Surround-view temporal stereo for multi-view 3d detection. arXiv preprint arXiv:2208.10145, 2022. 1", + "[48] Shichao Yang and Sebastian Scherer. Cubeslam: Monocular 3-d object slam. IEEE Transactions on Robotics, 35(4):925-938, 2019. 1, 3, 4", + "[49] Tianwei Yin, Xingyi Zhou, and Philipp Krahenbuhl. Center-based 3d object detection and tracking. In CVPR, 2021. 2", + "[50] Yurong You, Katie Z Luo, Xiangyu Chen, Junan Chen, WeiLun Chao, Wen Sun, Bharath Hariharan, Mark Campbell, and Kilian Q Weinberger. Hindsight is 20/20: Leveraging past traversals to aid 3d perception. In ICLR, 2022. 2", + "[51] Fisher Yu, Dequan Wang, Evan Shelhamer, and Trevor Darrell. Deep layer aggregation. In CVPR, 2018. 6", + "[52] Yunpeng Zhang, Jiwen Lu, and Jie Zhou. Objects are different: Flexible monocular 3d object detection. In CVPR, 2021, 1, 3, 6, 7" + ], + "bbox": [ + 78, + 90, + 468, + 895 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "5115", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_model.json b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_model.json new file mode 100644 index 0000000000000000000000000000000000000000..fd616dfa5c0793a36e749708eecdb8c2096ed76e --- /dev/null +++ b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_model.json @@ -0,0 +1,2299 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.131, + 0.885, + 0.153 + ], + "angle": 0, + "content": "3D Video Object Detection with Learnable Object-Centric Global Optimization" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.18, + 0.805, + 0.272 + ], + "angle": 0, + "content": "Jiawei He\\(^{1,2}\\) Yuntao Chen\\(^{3}\\) Naiyan Wang\\(^{4}\\) Zhaoxiang Zhang\\(^{1,2,3}\\) \n\\(^{1}\\) CRIPAC, Institute of Automation, Chinese Academy of Sciences (CASIA) \n\\(^{2}\\) School of Artificial Intelligence, University of Chinese Academy of Sciences (UCA) \n\\(^{3}\\) Centre for Artificial Intelligence and Robotics, HKISI_CAS \\({}^{4}\\) TuSimple {hejiawei2019, zhaoxiang.zhang}@ia.ac.cn {chenyuntao08, winsty}@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.305, + 0.313, + 0.32 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.336, + 0.474, + 0.624 + ], + "angle": 0, + "content": "We explore long-term temporal visual correspondence-based optimization for 3D video object detection in this work. Visual correspondence refers to one-to-one mappings for pixels across multiple images. Correspondence-based optimization is the cornerstone for 3D scene reconstruction but is less studied in 3D video object detection, because moving objects violate multi-view geometry constraints and are treated as outliers during scene reconstruction. We address this issue by treating objects as first-class citizens during correspondence-based optimization. In this work, we propose BA-Det, an end-to-end estimable object detector with object-centric temporal correspondence learning and feature metric object bundle adjustment. Empirically, we verify the effectiveness and efficiency of BA-Det for multiple baseline 3D detectors under various setups. Our BA-Det achieves SOTA performance on the large-scale Waymo Open Dataset (WOD) with only marginal computation cost. Our code is available at https://github.com/jiaweihe1996/BA-Det." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.649, + 0.21, + 0.665 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.675, + 0.471, + 0.902 + ], + "angle": 0, + "content": "3D object detection is an important perception task, especially for indoor robots and autonomous-driving vehicles. Recently, image-only 3D object detection [23,52] has been proven practical and made great progress. In real-world applications, cameras capture video streams instead of unrelated frames, which suggests abundant temporal information is readily available for 3D object detection. In single-frame methods, despite simply relying on the prediction power of deep learning, finding correspondences play an important role in estimating per-pixel depth and the object pose in the camera frame. Popular correspondences include Perspective-n-Point (PnP) between pre-defined 3D keypoints [22, 52] and their 2D projections in monocular 3D object detection, and Epipolar Geometry [6,12] in multiview 3D object detection. However, unlike the single-frame" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.306, + 0.892, + 0.336 + ], + "angle": 0, + "content": "case, temporal visual correspondence has not been explored much in 3D video object detection." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.339, + 0.893, + 0.716 + ], + "angle": 0, + "content": "As summarized in Fig. 1, existing methods in 3D video object detection can be divided into three categories while each has its own limitations. Fig. 1a shows methods with object tracking [3], especially using a 3D Kalman Filter to smooth the trajectory of each detected object. This approach is detector-agnostic and thus widely adopted, but it is just an output-level smoothing process without any feature learning. As a result, the potential of video is underexploited. Fig. 1b illustrates the temporal BEV (Bird's-Eye View) approaches [14, 23, 26] for 3D video object detection. They introduce the multi-frame temporal crossattention or concatenation for BEV features in an end-to-end fusion manner. As for utilizing temporal information, temporal BEV methods rely solely on feature fusion while ignoring explicit temporal correspondence. Fig. 1c depicts stereo-from-video methods [46, 47]. These methods explicitly construct a pseudo-stereo view using ego-motion and then utilize the correspondence on the epipolar line of two frames for depth estimation. However, the use of explicit correspondence in these methods is restricted to only two frames, thereby limiting its potential to utilize more temporal information. Moreover, another inevitable defect of these methods is that moving objects break the epipolar constraints, which cannot be well handled, so monocular depth estimation has to be reused." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Considering the aforementioned shortcomings, we seek a new method that can handle both static and moving objects, and utilize long-term temporal correspondences. Firstly, in order to handle both static and moving objects, we draw experience from the object-centric global optimization with reprojection constraints in Simultaneous Localization and Mapping (SLAM) [21, 48]. Instead of directly estimating the depth for each pixel from temporal cues, we utilize them to construct useful temporal constraints to refine the object pose prediction from network prediction. Specifically, we construct a non-linear least-square optimization problem with the temporal correspondence constraint in an" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5106" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.138, + 0.103, + 0.83, + 0.131 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.141, + 0.252, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.11, + 0.238, + 0.23, + 0.251 + ], + "angle": 0, + "content": "(a) Temporal Filtering" + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.144, + 0.47, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.33, + 0.238, + 0.431, + 0.251 + ], + "angle": 0, + "content": "(b) Temporal BEV" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.147, + 0.678, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.238, + 0.65, + 0.251 + ], + "angle": 0, + "content": "(c) Stereo from Video" + }, + { + "type": "image", + "bbox": [ + 0.711, + 0.157, + 0.891, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.751, + 0.238, + 0.851, + 0.251 + ], + "angle": 0, + "content": "(d) BA-Det (Ours)" + }, + { + "type": "image_caption", + "bbox": [ + 0.153, + 0.263, + 0.816, + 0.278 + ], + "angle": 0, + "content": "Figure 1. Illustration of how to leverage temporal information in different 3D video object detection paradigms." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.304, + 0.47, + 0.439 + ], + "angle": 0, + "content": "object-centric manner to optimize the pose of objects no matter whether they are moving or not. Secondly, for long-term temporal correspondence learning, hand-crafted descriptors like SIFT [27] or ORB [35] are no longer suitable for our end-to-end object detector. Besides, the long-term temporal correspondence needs to be robust to viewpoint changes and severe occlusions, where these traditional sparse descriptors are incompetent. So, we expect to learn a dense temporal correspondence for all available frames." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.44, + 0.471, + 0.832 + ], + "angle": 0, + "content": "In this paper, as shown in Fig. 1d, we propose a 3D video object detection paradigm with learnable long-term temporal visual correspondence, called BA-Det. Specifically, the detector has two stages. In the first stage, a CenterNet-style monocular 3D object detector is applied for single-frame object detection. After associating the same objects in the video, the second stage detector extracts RoI features for the objects in the tracklet and matches dense local features on the object among multi-frames, called the object-centric temporal correspondence learning (OTCL) module. To make traditional object bundle adjustment (OBA) learnable, we formulate feature metric OBA. In the training time, with feature metric OBA loss, the object detection and temporal feature correspondence are learned jointly. During inference, we use the 3D object estimation from the first stage as the initial pose and associate the objects with 3D Kalman Filter. The object-centric bundle adjustment refines the pose and 3D box size of the object in each frame at the tracklet level, taking the initial object pose and temporal feature correspondence from OTCL as the input. Experiment results on the large-scale Waymo Open Dataset (WOD) show that our BA-Det could achieve state-of-the-art performance compared with other single-frame and multi-frame object detectors. We also conduct extensive ablation studies to demonstrate the effectiveness and efficiency of each component in our method." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.833, + 0.458, + 0.847 + ], + "angle": 0, + "content": "In summary, our work has the following contributions:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.856, + 0.471, + 0.902 + ], + "angle": 0, + "content": "- We present a novel object-centric 3D video object detection approach \\( BA-Det \\) by learning object detection and temporal correspondence jointly." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.304, + 0.892, + 0.349 + ], + "angle": 0, + "content": "- We design the second-stage object-centric temporal correspondence learning module and the featuremetric object bundle adjustment loss." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.357, + 0.892, + 0.403 + ], + "angle": 0, + "content": "- We achieve state-of-the-art performance on the largescale WOD. The ablation study and comparisons show the effectiveness and efficiency of our BA-Det." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.304, + 0.892, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.415, + 0.642, + 0.43 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.439, + 0.742, + 0.456 + ], + "angle": 0, + "content": "2.1. 3D Video Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.892, + 0.75 + ], + "angle": 0, + "content": "For 3D video object detection, LiDAR-based methods [4, 8, 49] usually align point clouds from consecutive frames by compensating ego-motion and simply accumulate them to alleviate the sparsity of point clouds. Object-level methods [5, 9, 33, 50], handling the multi-frame point clouds of the tracked object, become a new trend. 3D object detection from the monocular video has not received enough attention from researchers. Kinematic3D [3] is a pioneer work decomposing kinematic information into ego-motion and target object motion. However, they only apply 3D Kalman Filter [17] based motion model for kinematic modeling and only consider the short-term temporal association (4 frames). Recently, BEVFormer [23] proposes an attentional transformer method to model the spatial and temporal relationship in the bird's-eye-view (BEV). A concurrent work, DfM [46], inspired by Multi-view Geometry, considers two frames as stereo and applies the cost volume in stereo to estimate depth. However, how to solve the moving objects is not well handled in this paradigm." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.757, + 0.691, + 0.773 + ], + "angle": 0, + "content": "2.2. Geometry in Videos" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Many researchers utilize 3D geometry in videos to reconstruct the scene and estimate the camera pose, which is a classic topic of computer vision. Structure from Motion (SfM) [37] and Multi-view Stereo (MVS) [38] are two paradigms to estimate the sparse and dense depth from multi-view images respectively. In robotics, 3D geometry theory is applied for Simultaneous Localization and Mapping (SLAM) [30]. To globally optimize the 3D position of" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5107" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.137 + ], + "angle": 0, + "content": "the feature points and the camera pose at each time, bundle adjustment algorithm [42] is widely applied. However, most of them can only handle static regions in the scene." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.138, + 0.471, + 0.362 + ], + "angle": 0, + "content": "In the deep learning era, with the development of object detection, object-level semantic SLAM [21, 31, 48] is rising, aiming to reconstruct the objects instead of the whole scene. These methods can handle dynamic scenes and help the object localization in the video. Besides, feature correspondence learning [36, 39] has received extensive attention in recent years. Deep learning has greatly changed the pipeline of feature matching. Differentiable bundle adjustment, like BANet [41] and NRE [11], makes the whole 3D geometry system end-to-end learnable. Unlike these works, we focus on the representation of the 3D object and integrate feature correspondence learning into 3D object detection. Utilizing the learned temporal feature correspondence, the proposed BA-Det optimizes the object pose of a tracklet in each frame." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.375, + 0.38, + 0.392 + ], + "angle": 0, + "content": "3. Preliminary: Bundle Adjustment" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.4, + 0.47, + 0.52 + ], + "angle": 0, + "content": "Bundle Adjustment [42] is a widely used globally temporal optimization technology in 3D reconstruction, which means optimally adjusting bundles of light rays from a given 3D global position to the camera center among multiframes. Specifically, we use \\(\\mathbf{P}_i = [x_i,y_i,z_i]^\\top\\) to denote the \\(i\\)-th 3D point coordinates in the global reference frame. According to the perspective camera model, the image coordinates of the projected 3D point at time \\(t\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.141, + 0.525, + 0.47, + 0.557 + ], + "angle": 0, + "content": "\\[\n\\Pi \\left(\\mathbf {T} _ {c g} ^ {t}, \\mathbf {P} _ {i}, \\mathbf {K}\\right) = \\frac {1}{z _ {i} ^ {t}} \\mathbf {K} \\left(\\mathbf {R} _ {c g} ^ {t} \\mathbf {P} _ {i} + \\mathbf {t} _ {c g} ^ {t}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.562, + 0.47, + 0.65 + ], + "angle": 0, + "content": "where \\(\\Pi\\) is the perspective projection transformation, \\(\\mathbf{T}_{cg}^{t} = [\\mathbf{R}_{cg}^{t}|\\mathbf{t}_{cg}^{t}]\\) is the camera extrinsic matrix at time \\(t\\). \\(\\mathbf{R}_{cg}^{t}\\) and \\(\\mathbf{t}_{cg}^{t}\\) are the rotation and the translation components of \\(\\mathbf{T}_{cg}^{t}\\), respectively. \\(\\mathbf{K}\\) is the camera intrinsic matrix, and \\(z_{i}^{t}\\) is the depth of the \\(i\\)-th 3D point in the camera frame at time \\(t\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.652, + 0.47, + 0.682 + ], + "angle": 0, + "content": "Bundle adjustment is a nonlinear least-square problem to minimize the reprojection error as:" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.687, + 0.469, + 0.751 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\{\\bar {\\mathbf {T}} _ {c g} ^ {t} \\} _ {t = 1} ^ {T}, \\{\\bar {\\mathbf {P}} _ {i} \\} _ {i = 1} ^ {m} = \\\\ \\underset {\\{\\mathbf {T} _ {c g} ^ {t} \\} _ {t = 1} ^ {T}, \\{\\mathbf {P} _ {i} \\} _ {i = 1} ^ {m}} {\\arg \\min } \\frac {1}{2} \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} | | \\mathbf {p} _ {i} ^ {t} - \\Pi (\\mathbf {T} _ {c g} ^ {t}, \\mathbf {P} _ {i}, \\mathbf {K}) | | ^ {2}, \\tag {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.756, + 0.469, + 0.817 + ], + "angle": 0, + "content": "where \\(\\mathbf{p}_i^t\\) is the observed image coordinates of 3D point \\(\\mathbf{P}_i\\) on frame \\(t\\). Bundle adjustment can be solved by Gauss-Newton or Levenberg-Marquardt algorithm effectively [1, 20]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.828, + 0.469, + 0.861 + ], + "angle": 0, + "content": "4. BA-Det: Object-centric Global Optimizable Detector" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.901 + ], + "angle": 0, + "content": "In this section, we introduce the framework of our BA-Det (Fig. 2), a learnable object-centric global optimization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "network. The pipeline consists of three parts: (1) First-stage single frame 3D object detection; (2) Second-stage object-centric temporal correspondence learning (OTCL) module; (3) Featuremetric object bundle adjustment loss for temporal feature correspondence learning." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.175, + 0.796, + 0.192 + ], + "angle": 0, + "content": "4.1. Single-frame 3D Object Detection" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.199, + 0.892, + 0.349 + ], + "angle": 0, + "content": "Given a video clip with consecutive frames \\(\\mathcal{V} = \\{I_1, I_2, \\dots, I_T\\}\\), 3D video object detection is to predict the class and the 3D bounding box of each object in each frame. Let \\(\\mathcal{O}_k^t\\) be the \\(k\\)-th object in frame \\(t\\). For the 3D bounding box \\(\\mathbf{B}_k^t\\), we estimate the size of the bounding box \\(\\mathbf{s}_t^k = [w, h, l]^\\top\\) and the object pose \\({}^k\\mathbf{T}_{co}^t\\) in the camera frame, including translation \\({}^k\\mathbf{t}_{co}^t = [x_c, y_c, z_c]^\\top\\) and rotation \\({}^k\\mathbf{r}_{co}^t = [r_x, r_y, r_z]^\\top\\). In most 3D object detection datasets, with the flat ground assumption, only yaw rotation \\(r_y\\) is considered." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.35, + 0.892, + 0.606 + ], + "angle": 0, + "content": "We basically adopt MonoFlex [52] as our first-stage 3D object detector, which is a simple and widely-used baseline method. Different from the standard MonoFlex, we make some modifications for simplicity and adaptation. (1) Instead of ensemble the depth from keypoints and regression, we only used the regressed depth directly. (2) The edge fusion module in MonoFlex is removed for simplicity and better performance. The output of the first-stage object detector should be kept for the second stage. The predicted 2D bounding box \\(\\mathbf{b}_k^t\\) for each object is used for the object-centric feature extraction in the second stage. The 3D estimations should be the initial pose estimation and be associated between frames. We follow ImmortalTracker [44] to associate the 3D box prediction outputs with a 3D Kalman Filter frame by frame. For convenience and clarity, we use the same index \\(k\\) to denote the objects belonging to the same tracklet in the video from now on." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.615, + 0.892, + 0.647 + ], + "angle": 0, + "content": "4.2. Object-Centric Temporal Correspondence Learning" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.654, + 0.892, + 0.805 + ], + "angle": 0, + "content": "Based on the predictions from the first-stage detector, we propose an object-centric temporal correspondence learning (OTCL) module, which plays an indispensable role in the learnable optimization. Specifically, the OTCL module is designed to learn the correspondence of the dense features for the same object among all available frames. Given a video \\(\\{I_1,I_2,\\dots ,I_T\\}\\) and image features \\(\\{\\mathbf{F}^1,\\mathbf{F}^2,\\dots ,\\mathbf{F}^T\\}\\) from the backbone in the first stage, we extract the RoI features \\(^k\\mathbf{F}^t\\in \\mathbb{R}^{H\\times W\\times C}\\) of the object \\(\\mathcal{O}_k^t\\) by the RoIAign operation [13]," + }, + { + "type": "equation", + "bbox": [ + 0.604, + 0.813, + 0.891, + 0.832 + ], + "angle": 0, + "content": "\\[\n^ k \\mathbf {F} ^ {t} = \\operatorname {R o I A l i g n} \\left(\\mathbf {F} ^ {t}, \\mathbf {b} _ {k} ^ {t}\\right). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We apply \\(L\\) layers of cross- and self-attention operations before calculating the correspondence map to aggregate and enhance the spatial and temporal information for RoI features. Note that the object tracklet is available with the" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5108" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.146, + 0.091, + 0.833, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.355, + 0.895, + 0.399 + ], + "angle": 0, + "content": "Figure 2. A overview of the proposed BA-Det framework. The left part of the framework is the first-stage object detector to predict the 3D object and its 2D bounding box. The second stage is called OTCL module. In the OTCL module, we extract the RoI features \\(^k\\mathbf{F}^t\\) by RoIAlign, aggregate the RoI features and learn object-centric temporal correspondence using feature metric object bundle adjustment loss." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.424, + 0.471, + 0.483 + ], + "angle": 0, + "content": "aforementioned tracker, so the cross-attention is applied between the objects in different frames for the same tracklet. For each layer of attention operations between two adjacent frames \\( t \\) and \\( t' \\):" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.49, + 0.469, + 0.559 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l} ^ {k} \\widetilde {\\mathbf {F}} ^ {t} = \\operatorname {A t t} _ {\\mathbf {S}} (Q, K, V) = \\operatorname {A t t} _ {\\mathbf {S}} \\left(^ {k} \\hat {\\mathbf {F}} ^ {t}, ^ {k} \\hat {\\mathbf {F}} ^ {t}, ^ {k} \\hat {\\mathbf {F}} ^ {t}\\right), \\\\ ^ {k} \\widetilde {\\mathbf {F}} ^ {t ^ {\\prime}} = \\operatorname {A t t} _ {\\mathbf {S}} (Q, K, V) = \\operatorname {A t t} _ {\\mathbf {S}} \\left(^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}}, ^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}}, ^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}}\\right), \\\\ ^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}} = \\operatorname {A t t} _ {\\mathbf {T}} (Q, K, V) = \\operatorname {A t t} _ {\\mathbf {T}} \\left(^ {k} \\widetilde {\\mathbf {F}} ^ {t ^ {\\prime}}, ^ {k} \\widetilde {\\mathbf {F}} ^ {t}, ^ {k} \\widetilde {\\mathbf {F}} ^ {t}\\right), \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.559, + 0.47, + 0.605 + ], + "angle": 0, + "content": "where \\({}^k\\hat{\\mathbf{F}}^t\\in \\mathbb{R}^{HW\\times C}\\) is the flattened RoI feature, AttS is the spatial self-attention, AttT is the temporal crossattention." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.606, + 0.47, + 0.698 + ], + "angle": 0, + "content": "We then define the spatial correspondence map between two flattened RoI features after the attention operations. In frame pair \\((t, t')\\), we use \\(^k\\mathbf{f}_i\\) to denote \\(i\\)-th local feature in \\(^k\\hat{\\mathbf{F}}^{(L)}\\) (\\(i \\in \\{1, 2, \\dots, HW\\}\\)). The correspondence map \\(^k\\mathbf{C}_t^{t'} \\in \\mathbb{R}^{HW \\times HW}\\) in two frames is defined as the inner product of two features in two frames:" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.706, + 0.47, + 0.727 + ], + "angle": 0, + "content": "\\[\n^ {k} \\mathbf {C} _ {t} ^ {t ^ {\\prime}} [ i, i ^ {\\prime} ] = ^ {k} \\mathbf {f} _ {i} ^ {t} * ^ {k} \\mathbf {f} _ {i ^ {\\prime}} ^ {t ^ {\\prime}}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.766 + ], + "angle": 0, + "content": "To normalize the correspondence map, we perform softmax over all spatial locations \\( i' \\)," + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.774, + 0.47, + 0.794 + ], + "angle": 0, + "content": "\\[\n^ {k} \\widetilde {\\mathbf {C}} _ {t} ^ {t ^ {\\prime}} [ i, i ^ {\\prime} ] = \\operatorname {s o f t m a x} \\left(^ {k} \\mathbf {C} _ {t} ^ {t ^ {\\prime}} [ i, i ^ {\\prime} ]\\right). \\tag {6}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.802, + 0.47, + 0.819 + ], + "angle": 0, + "content": "4.3. Featuremetric Object Bundle Adjustment Loss" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.471, + 0.902 + ], + "angle": 0, + "content": "In this subsection, we present that how to adapt and integrate the Object-centric Bundle Adjustment (OBA) into our learnable BA-Det framework, based on the obtained correspondence map. Generally speaking, we formulate the featuremetric OBA loss to supervise the temporal feature" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.424, + 0.892, + 0.483 + ], + "angle": 0, + "content": "correspondence learning. Note that here we only derive the tracklet-level OBA loss for the same object, and for the final supervision we will sum all the tracklet-level loss in the video." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.489, + 0.893, + 0.612 + ], + "angle": 0, + "content": "First, we revisit the object-centric bundle adjustment, as shown in Fig. 3a. As proposed in Object SLAM [21, 48], OBA assumes that the object can only have rigid motion relative to the camera. For the object \\(\\mathcal{O}_k\\), we denote the 3D points as \\(\\mathcal{P}_k = \\{^k\\mathbf{P}_i\\}_{i=1}^m\\) in the object frame, 2D points as \\(\\{^k\\mathbf{p}_i^t\\}_{i=1}^m\\), 2D features at position \\(^k\\mathbf{p}_i^t\\) as \\(\\{\\mathbf{f}[^k\\mathbf{p}_i^t]\\}_{i=1}^m\\), and the camera pose in the object reference frame as \\(\\mathcal{T}_k = \\{^k\\mathbf{T}_{co}^t\\}_{t=1}^T\\), OBA can be casted as:" + }, + { + "type": "equation", + "bbox": [ + 0.505, + 0.63, + 0.892, + 0.687 + ], + "angle": 0, + "content": "\\[\n\\bar {\\mathcal {T}} _ {k}, \\bar {\\mathcal {P}} _ {k} = \\underset {\\mathcal {T} _ {k}, \\mathcal {P} _ {k}} {\\arg \\min } \\frac {1}{2} \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} | | ^ {k} \\mathbf {p} _ {i} ^ {t} - \\Pi \\left(^ {k} \\mathbf {T} _ {c o} ^ {t}, ^ {k} \\mathbf {P} _ {i}, \\mathbf {K}\\right) | | _ {2} ^ {2}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.708, + 0.892, + 0.739 + ], + "angle": 0, + "content": "To make the OBA layer end-to-end learnable, we formulate featuremetric [25] OBA:" + }, + { + "type": "equation", + "bbox": [ + 0.506, + 0.757, + 0.892, + 0.834 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\bar {\\mathcal {T}} _ {k}, \\bar {\\mathcal {P}} _ {k} = \\\\ \\underset {\\mathcal {T} _ {k}, \\mathcal {P} _ {k}} {\\arg \\min } \\frac {1}{2} \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\sum_ {t ^ {\\prime} = 1} ^ {T} \\left| \\left| \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t} \\right] - \\mathbf {f} \\left[ \\Pi \\left(^ {k} \\mathbf {T} _ {c o} ^ {t ^ {\\prime}}, ^ {k} \\mathbf {P} _ {i}, \\mathbf {K}\\right) \\right] \\right| \\right| _ {2} ^ {2}, \\tag {8} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.893, + 0.902 + ], + "angle": 0, + "content": "where \\(\\mathbf{f}[\\mathbf{p}]\\) denotes the feature vector in pixel coordinates \\(\\mathbf{p}\\). Representing the 3D point \\({}^k\\mathbf{P}_i\\) in Eq. 8 with 2D points in each frame, the feature metric reprojection error of frame" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5109" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.129, + 0.094, + 0.406, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.151, + 0.234, + 0.396, + 0.247 + ], + "angle": 0, + "content": "(a) Object-centric Bundle Adjustment (OBA)." + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.253, + 0.422, + 0.382 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.136, + 0.391, + 0.411, + 0.404 + ], + "angle": 0, + "content": "(b) The computation of the featuremetric OBA loss." + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.416, + 0.462, + 0.43 + ], + "angle": 0, + "content": "Figure 3. Illustration of featuremetric object bundle adjustment." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.441, + 0.218, + 0.454 + ], + "angle": 0, + "content": "\\(t\\) could be derived as" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.459, + 0.474, + 0.559 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} ^ {k} e _ {i} ^ {t} = \\sum_ {t ^ {\\prime} = 1} ^ {T} \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t} \\right] - \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t ^ {\\prime}} \\right] (9) \\\\ = \\sum_ {t ^ {\\prime} = 1} ^ {T} \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t} \\right] - \\mathbf {f} \\left[ \\Pi \\left(^ {k} \\mathbf {T} _ {c o} ^ {t ^ {\\prime}}, \\Pi^ {- 1} \\left(^ {k} \\mathbf {T} _ {c o} ^ {t}, ^ {k} \\mathbf {p} _ {i} ^ {t}, \\mathbf {K}, z _ {i} ^ {t}\\right), \\mathbf {K}\\right) \\right], (10) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.567, + 0.469, + 0.703 + ], + "angle": 0, + "content": "where \\(\\Pi^{-1}(\\cdot)\\) is the inverse projection function to lift the 2D point on the image to 3D in the object frame. \\(z_{i}^{t}\\) is the ground-truth depth of \\({}^{k}\\mathbf{p}_{i}^{t}\\) (from LiDAR point clouds only for training). In the training time, we learn the feature correspondence, given the ground-truth pose of the object \\(\\mathcal{O}_k\\), denoted as \\({}^{k}\\mathbf{T}_{co}^{t}\\) and \\({}^{k}\\mathbf{T}_{co}^{t'}\\) in frame \\(t\\) and frame \\(t'\\), respectively. Considering the feature metric reprojection loss in all frames and all points, the overall loss term for object \\(k\\) can be formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.707, + 0.47, + 0.749 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {r e p}} ^ {k} = \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\left| \\left| ^ {k} e _ {i} ^ {t} \\right| \\right| _ {2} ^ {2} = \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\sum_ {t ^ {\\prime} = 1} ^ {T} \\left| \\left| ^ {k} \\mathbf {f} _ {i} ^ {t} - ^ {k} \\mathbf {f} _ {i} ^ {t ^ {\\prime}} \\right| \\right| _ {2} ^ {2} \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.76, + 0.47, + 0.852 + ], + "angle": 0, + "content": "Finally, we replace the \\(L2\\) norm in Eq. 11 with the cosine distance to measure the featuremetric reprojection error. Thus we bring the normalized correspondence map \\(\\widetilde{\\mathbf{C}}\\) in Sec. 4.2 into the loss term. With log-likelihood formulation, we formulate the featuremetric OBA loss to supervise the object-centric temporal correspondence learning:" + }, + { + "type": "equation", + "bbox": [ + 0.11, + 0.857, + 0.47, + 0.898 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {O B A}} ^ {k} = - \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\sum_ {t ^ {\\prime} = 1} ^ {T} \\log \\left(^ {k} \\widetilde {\\mathbf {C}} _ {t} ^ {t ^ {\\prime}} \\left[ ^ {k} \\bar {\\mathbf {p}} _ {i} ^ {t}, ^ {k} \\bar {\\mathbf {p}} _ {i} ^ {t ^ {\\prime}} \\right]\\right). \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.09, + 0.892, + 0.138 + ], + "angle": 0, + "content": "where \\(\\left(^k\\bar{\\mathbf{p}}_i^t,\\bar{}^k\\bar{\\mathbf{p}}_i^{t'}\\right)\\) are the ground-truth corresponding pair of the \\(i\\)-th local feature. The illustration of the loss computation is in Fig. 3b." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.147, + 0.61, + 0.162 + ], + "angle": 0, + "content": "4.4. Inference" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.171, + 0.892, + 0.201 + ], + "angle": 0, + "content": "After introducing the training loss design, we present the inference process of BA-Det as follows." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.202, + 0.892, + 0.277 + ], + "angle": 0, + "content": "First-stage 3D object detection and association. The first-stage detector makes the prediction of classification scores and 2D / 3D bounding boxes. The 3D bounding boxes are associated across the frames by ImmortalTracker [44]. The following process is on the tracklet level." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.278, + 0.892, + 0.413 + ], + "angle": 0, + "content": "Dense feature matching. To optimize the object pose, we need to obtain the feature correspondence in each frame for the same object. As mentioned in Sec. 4.2, the OTCL module is trained to generate a dense correspondence map in all frames. During inference, we match all \\( H \\times W \\) dense local features in RoI between adjacent two frames and between the first frame and last frame of the time window \\( [t, t + \\tau] \\). We use the RANSAC algorithm [10] to filter the feature correspondence outliers." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.414, + 0.892, + 0.579 + ], + "angle": 0, + "content": "Feature tracking. To form a long-term keypoint tracklet from the obtained correspondence, we leverage a graph-based algorithm. First, the matched feature pairs are constructed into a graph \\(\\mathcal{G}\\). The features are on the vertices. If the features are matched, an edge is connected in the graph. Then we track the feature for the object in all available frames. We use the association method mainly following [7]. The graph partitioning method is applied to \\(\\mathcal{G}\\) to make each connected subgraph have at most one vertex per frame. The graph cut is based on the similarity of the matched features." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.581, + 0.892, + 0.671 + ], + "angle": 0, + "content": "Object-centric bundle adjustment. In the inference stage, given the initial pose estimation and the temporal feature correspondence, we solve the object-centric bundle adjustment by Levenberg-Marquardt algorithm, and the object pose in each frame and the 3D position of the keypoints can be globally optimized between frames." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.672, + 0.892, + 0.717 + ], + "angle": 0, + "content": "Post-processing. We also apply some common post-processing in video object detection techniques like tracklet rescoring [18] and bounding box temporal interpolation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.731, + 0.633, + 0.749 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.757, + 0.697, + 0.772 + ], + "angle": 0, + "content": "5.1. Datasets and metrics" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.781, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We conduct our experiments on the large autonomous driving dataset, Waymo Open Dataset (WOD) [40]. The WOD has different versions with different annotations and metrics. To keep the fairness of the comparisons, we report the results both on WOD v1.2 and WOD v1.3.1. The annotations on v1.2 are based on LiDAR and the official metrics are mAP IoU@0.7 and mAP IoU@0.5. Recently, v1.3.1 is released to support multi-camera 3D object detec" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5110" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.123, + 0.089, + 0.855, + 0.29 + ], + "angle": 0, + "content": "
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
M3D-RPN [2]0.350.343.793.630.330.333.613.46
PatchNet [29]0.390.372.922.740.380.362.422.28
PCT [43]0.890.884.204.150.660.664.033.99
MonoJSG [24]0.970.955.655.470.910.895.345.17
GUPNet [28]2.282.2710.029.942.142.129.399.31
DEVIANT [19]2.692.6710.9810.892.522.5010.2910.20
CaDDN [34]5.034.9917.5417.314.494.4516.5116.28
DID-M3D [32]--20.6620.47--19.3719.19
BEVFormer [23]†-7.70-30.80-6.90-27.70
DCD [22]12.5712.5033.4433.2411.7811.7231.4331.25
MonoFlex [52] (Baseline)11.7011.6432.2632.0610.9610.9030.3130.12
BA-Det(Ours)†16.6016.4540.9340.5115.5715.4438.5338.12
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.3, + 0.893, + 0.329 + ], + "angle": 0, + "content": "Table 1. The results on WODv1.2 [40] val set. \\(\\mathrm{AP}_{70}\\) denotes AP with IoU threshold at 0.7. \\(\\mathrm{AP}_{50}\\) denotes AP IoU@0.5.\\(\\dagger\\) denotes the method utilizing temporal information." + }, + { + "type": "image", + "bbox": [ + 0.163, + 0.351, + 0.222, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.159, + 0.507, + 0.222, + 0.519 + ], + "angle": 0, + "content": "(a) Frame 8." + }, + { + "type": "image", + "bbox": [ + 0.311, + 0.351, + 0.367, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.305, + 0.507, + 0.374, + 0.519 + ], + "angle": 0, + "content": "(b) Frame 22." + }, + { + "type": "image", + "bbox": [ + 0.458, + 0.361, + 0.532, + 0.495 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.453, + 0.507, + 0.522, + 0.519 + ], + "angle": 0, + "content": "(c) Frame 36." + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.355, + 0.691, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.602, + 0.507, + 0.669, + 0.519 + ], + "angle": 0, + "content": "(d) Frame 50." + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.362, + 0.834, + 0.498 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.75, + 0.507, + 0.817, + 0.519 + ], + "angle": 0, + "content": "(e) Frame 57." + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.531, + 0.895, + 0.575 + ], + "angle": 0, + "content": "Figure 4. Qualitative results from the BEV in different frames. We use blue and red boxes to denote initial predictions and optimized predictions of the object we highlight. The green and black boxes denote the other box predictions and the ground truth boxes. The ego vehicle lies at the bottom of each figure." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.596, + 0.47, + 0.64 + ], + "angle": 0, + "content": "
MethodLET-APLLET-APLET-APH3D AP703D AP50
MV-FCOS3D++ [45]†58.1174.6873.5014.6636.02
BA-DetFCOS3D(Ours)†58.4774.8573.6615.0236.89
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.65, + 0.47, + 0.707 + ], + "angle": 0, + "content": "Table 2. The multi-camera results on WODv1.3.1 [16] val set. Besides the official LET-IoU-based metrics, we also report the metrics with standard 3D IoU. All metrics are reported for the LEVEL_2 difficulty.†: use temporal information." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.734, + 0.469, + 0.854 + ], + "angle": 0, + "content": "tion, and the annotations are camera-synced boxes. On the v1.3.1 dataset, a series of new LET-IoU-based metrics [16] are introduced to slightly tolerate the localization error from the worse sensor, camera, than LiDAR. Early work mainly reports the results on the v1.2 dataset, and we only compare our methods with the ones from WOD Challenge 2022 using the v1.3.1 dataset. Because we mainly focus on rigid objects, we report the results of the VEHICLE class." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.902 + ], + "angle": 0, + "content": "LET-3D-AP and LET-3D-APL are the new metrics, relying on the Longitudinal Error Tolerant IoU (LET-IoU). LET-IoU is the 3D IoU calculated between the target ground" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.6, + 0.893, + 0.676 + ], + "angle": 0, + "content": "truth box and the prediction box aligned with ground truth along the depth that has minimum depth error. LET-3D-AP and LET-3D-APL are calculated from the average precision and the longitudinal affinity weighted average precision of the PR curve. For more details, please refer to [16]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.693, + 0.719, + 0.71 + ], + "angle": 0, + "content": "5.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.894, + 0.901 + ], + "angle": 0, + "content": "The first stage network architecture of BA-Det is the same as MonoFlex, with DLA-34 [51] backbone, the output feature map is with the stride of 8. In the second stage, the shape of the RoI feature is \\(60 \\times 80\\). The spatial and temporal attention module is stacked with 4 layers. The implementation is based on the PyTorch framework. We train our model on 8 NVIDIA RTX 3090 GPUs for 14 epochs. Adam optimizer is applied with \\(\\beta_{1} = 0.9\\) and \\(\\beta_{2} = 0.999\\). The initial learning rate is \\(5 \\times 10^{-4}\\) and weight decay is \\(10^{-5}\\). The learning rate scheduler is one-cycle. We use the Levenberg-Marquardt algorithm, implemented by DeepLM [15], to solve object-centric bundle adjustment. The maximum it" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "5111" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.089, + 0.861, + 0.214 + ], + "angle": 0, + "content": "
Method3D AP703D APH703D AP503D APH50
0-3030-5050-∞0-3030-5050-∞0-3030-5050-∞0-3030-5050-∞
L1DCD [22]32.475.941.2432.305.911.2362.7026.3510.1662.3526.2110.09
MonoFlex [52]30.645.291.0530.485.271.0461.1325.859.0360.7525.718.95
BA-Det (Ours)†37.7411.043.8637.4610.953.7971.0737.1514.8970.4636.7914.61
L2DCD [22]32.305.761.0832.195.731.0862.4825.608.9262.1325.468.86
MonoFlex [52]30.545.140.9130.375.110.9160.9125.117.9260.5424.977.85
BA-Det (Ours)†37.6110.723.3737.3310.633.3170.8336.1413.6270.2335.7913.37
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.224, + 0.894, + 0.253 + ], + "angle": 0, + "content": "Table 3. The object depth range conditioned result on WODv1.2 [40] val set. L1 and L2 denote LEVEL_1 and LEVEL_2 difficulty, respectively. †: use temporal information." + }, + { + "type": "table", + "bbox": [ + 0.09, + 0.266, + 0.885, + 0.395 + ], + "angle": 0, + "content": "
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)11.7011.6432.2632.0610.9610.9030.3130.12
Our first-stage prediction13.5713.4834.7034.4312.7212.6432.5632.32
+3D Tracking [44]14.0113.9335.1934.9213.1313.0533.0332.78
+ Learnable global optimization15.8515.7538.0637.7614.8714.7735.7235.44
+ Tracklet rescoring16.4316.3040.0739.7015.4115.2937.6637.31
+ Bbox interpolation16.6016.4540.9340.5115.5715.4438.5338.12
" + }, + { + "type": "table_caption", + "bbox": [ + 0.321, + 0.406, + 0.648, + 0.42 + ], + "angle": 0, + "content": "Table 4. Ablation study of each component in BA-Det." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.447, + 0.47, + 0.493 + ], + "angle": 0, + "content": "eration of the LM algorithm is 200. For the object that appears less than 10 frames or the average keypoint number is less than 5, we do not optimize it." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.512, + 0.449, + 0.529 + ], + "angle": 0, + "content": "5.3. Comparisons with State-of-the-art Methods" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.539, + 0.473, + 0.901 + ], + "angle": 0, + "content": "We compare our BA-Det with other state-of-the-art methods under two different settings. WODv1.2 is for the front view camera and WODv1.3.1 has the official evaluator for all 5 cameras. As shown in Table 1, using the FRONT camera, we outperform the SOTA method DCD [22] for about 4AP and 4APH (\\(\\sim 30\\%\\) improvement) under the 0.7 IoU threshold. Compared with the only temporal method BEVFormer [23], we have double points of 3D \\(\\mathrm{AP}_{70}\\) and 3D \\(\\mathrm{APH}_{70}\\). To validate the effectiveness, we also report the multi-camera results on the newly released WODv1.3.1, as shown in Table 2. No published work reports the results on WODv1.3.1. So, we only compare with the open-source MV-FCOS3D++ [45], the second-place winner of WOD 2022 challenge. We design the variant of BA-Det, called BA-DetFCOS3D, to adapt to the multi-camera setting. BA-DetFCOS3D is also a two-stage object detector. The first stage is the same as MV-FCOS3D++, but with the output of 2D bounding boxes. The second stage is OTCL module supervised with featuremetric object bundle adjustment loss. Although there are overlaps between 5 cameras, to simplify the framework, we ignore the object BA optimization across cameras and only conduct temporal optimization. BA-DetFCOS3D outperforms MV-FCOS3D++ under main metrics and traditional 3D IoU-based metrics." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.446, + 0.684, + 0.461 + ], + "angle": 0, + "content": "5.4. Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.47, + 0.892, + 0.545 + ], + "angle": 0, + "content": "In Fig. 4, we show the object-level qualitative results of the first-stage and second-stage predictions in different frames. For a tracklet, we can refine the bounding box predictions with the help of better measurements in other frames, even if there is a long time interval between them." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.557, + 0.764, + 0.571 + ], + "angle": 0, + "content": "5.5. Distance Conditioned Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.58, + 0.892, + 0.763 + ], + "angle": 0, + "content": "We report the results with the different depth ranges in Table 3. The results indicate that the single frame methods, like DCD and MonoFlex, are seriously affected by object depth. When the object is farther away from the ego vehicle, the detection performance drops sharply. Compared with these methods, BA-Det, has the gain almost from the object far away from the ego-vehicle. The 3D \\(\\mathrm{AP}_{70}\\) and 3D \\(\\mathrm{APH}_{70}\\) are \\(3\\times\\) compared with the baseline when the object is located in \\([50\\mathrm{m},\\infty)\\), \\(2\\times\\) in \\([30\\mathrm{m},50\\mathrm{m})\\) and \\(1.2\\times\\) in \\([0\\mathrm{m},30\\mathrm{m})\\). This is because we utilize the long-term temporal information for each object. In a tracklet, the predictions near the ego-vehicle can help to refine the object far away." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.772, + 0.651, + 0.789 + ], + "angle": 0, + "content": "5.6. Ablation study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We ablate each component of BA-Det. The results are shown in Table 4. The first stage detector is slightly better than the MonoFlex baseline mainly because we remove the edge fusion module, which is harmful to the truncated objects in WOD. 3D KF associates the objects and smooths the object's trajectory. This part of improvement can be regarded as similar to Kinematic3D [3]. The core of BA-Det" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5112" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.123, + 0.089, + 0.852, + 0.19 + ], + "angle": 0, + "content": "
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)11.7011.6432.2632.0610.9610.9030.3130.12
Initial prediction13.5713.4834.7034.4312.7212.6432.5632.32
Static BA14.7314.6237.8937.5613.8213.7235.6535.34
Ours16.6016.4540.9340.5115.5715.4438.5338.12
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.2, + 0.894, + 0.23 + ], + "angle": 0, + "content": "Table 5. Comparison between object-centric BA-Det and the traditional scene-level bundle adjustment (Static BA). Initial prediction denotes the predictions in the first stage." + }, + { + "type": "table", + "bbox": [ + 0.09, + 0.242, + 0.885, + 0.33 + ], + "angle": 0, + "content": "
\\(\\bar{L}_{t}\\)LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)-11.7011.6432.2632.0610.9610.9030.3130.12
BA-Det+ORB feature [35]2.614.0513.9635.2134.9513.1713.0833.0532.81
BA-Det+Our feature1016.6016.4540.9340.5115.5715.4438.5338.12
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.34, + 0.882, + 0.355 + ], + "angle": 0, + "content": "Table 6. Ablation study about different feature corresponding methods. \\( {\\bar{L}}_{t} \\) denotes the average keypoint tracklet length for each object." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.369, + 0.47, + 0.415 + ], + "angle": 0, + "content": "is the learnable global optimization module, which obtains the largest gain in all modules. The tracklet rescoring and temporal interpolation modules are also useful." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.424, + 0.267, + 0.439 + ], + "angle": 0, + "content": "5.7. Further Discussions" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.448, + 0.47, + 0.569 + ], + "angle": 0, + "content": "BA vs. Object BA. We conduct experiments to discuss whether the object-centric manner is important in temporal optimization. We modify our pipeline and optimize the whole scene in the global frame instead of optimizing the object pose in the object frame, called Static BA in Table 5. Static BA ignores dynamic objects and treats them the same as static objects. The inability to handle dynamic objects causes decreases by about 2 AP compared with BA-Det." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.569, + 0.471, + 0.735 + ], + "angle": 0, + "content": "Temporal feature correspondence. As shown in Table 6, we ablate the features used for object-centric bundle adjustment. Compared with traditional ORB feature [35], widely used in SLAM, our feature learning module predicts denser and better correspondence. We find the average object tracklet length is 19.6 frames, and the average feature tracklet in our method is about 10 frames, which means we can keep a long feature dependency and better utilize long-range temporal information. However, the \\(\\bar{L}_t\\) of the ORB feature is only 2.6 frames. The results show the short keypoint tracklet can not refine the long-term object pose well." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Inference latency of each step in BA-Det. The inference latency of each step in BA-Det is shown in Table 7. The most time-consuming part is the first-stage object detector, more than \\(130\\mathrm{ms}\\) per image, which is the same as the MonoFlex baseline. Our BA-Det only takes an additional \\(50\\mathrm{ms}\\) latency per image, compared with the single-frame detector MonoFlex. Besides, although the dense feature correspondence is calculated, thanks to the shared backbone with the first stage detector and parallel processing for the objects, the feature correspondence module is not very time-consuming." + }, + { + "type": "table", + "bbox": [ + 0.582, + 0.366, + 0.816, + 0.448 + ], + "angle": 0, + "content": "
Total latency181.5ms
First-stage detector132.6ms
Object tracking6.6ms
Feature correspondence23.0ms
Object bundle adjustment19.3ms
" + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.457, + 0.876, + 0.473 + ], + "angle": 0, + "content": "Table 7. Inference latency of each step in BA-Det per image." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.483, + 0.774, + 0.499 + ], + "angle": 0, + "content": "6. Limitations and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.508, + 0.892, + 0.614 + ], + "angle": 0, + "content": "In the current version of this paper, we only focus on the objects, such as cars, trucks, and trailers. The performance of non-rigid objects such as pedestrians has not been investigated. However, with mesh-based and skeleton-based 3D human models, we believe that a unified keypoint temporal alignment module can be designed in the future. So, we will explore the extension of BA-Det for non-rigid objects." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.626, + 0.62, + 0.642 + ], + "angle": 0, + "content": "7. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.652, + 0.892, + 0.773 + ], + "angle": 0, + "content": "In this paper, we propose a 3D video object detection paradigm with long-term temporal visual correspondence, called BA-Det. BA-Det is a two-stage object detector that can jointly learn object detection and temporal feature correspondence with proposed Featuremetric OBA loss. Object-centric bundle adjustment optimizes the first-stage object estimation globally in each frame. BA-Det achieves state-of-the-art performance on WOD." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.785, + 0.668, + 0.802 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.892, + 0.902 + ], + "angle": 0, + "content": "This work was supported in part by the Major Project for New Generation of AI (No.2018AAA0100400), the National Natural Science Foundation of China (No. 61836014, No. U21B2042, No. 62072457, No. 62006231) and the InnoHK program. The authors thank Lue Fan and Yuqi Wang for their valuable suggestions." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5113" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.156 + ], + "angle": 0, + "content": "[1] Sameer Agarwal, Keir Mierle, and The Ceres Solver Team. Ceres Solver. https://github.com/ceres-solver/ceres-solver, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.47, + 0.199 + ], + "angle": 0, + "content": "[2] Garrick Brazil and Xiaoming Liu. M3d-rpn: Monocular 3d region proposal network for object detection. In ICCV, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.202, + 0.468, + 0.242 + ], + "angle": 0, + "content": "[3] Garrick Brazil, Gerard Pons-Moll, Xiaoming Liu, and Bernt Schiele. Kinematic 3d object detection in monocular video. In ECCV, 2020. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.245, + 0.468, + 0.299 + ], + "angle": 0, + "content": "[4] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.302, + 0.468, + 0.356 + ], + "angle": 0, + "content": "[5] Xuesong Chen, Shaoshuai Shi, Benjamin Zhu, Ka Chun Cheung, Hang Xu, and Hongsheng Li. Mppnet: Multi-frame feature intertwining with proxy points for 3d temporal object detection. In ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.359, + 0.468, + 0.399 + ], + "angle": 0, + "content": "[6] Yilun Chen, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Dsgn: Deep stereo geometry network for 3d object detection. In CVPR, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.402, + 0.468, + 0.441 + ], + "angle": 0, + "content": "[7] Mihai Dusmanu, Johannes L Schonberger, and Marc Pollefeys. Multi-view optimization of local feature geometry. In ECCV, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.445, + 0.468, + 0.499 + ], + "angle": 0, + "content": "[8] Lue Fan, Ziqi Pang, Tianyuan Zhang, Yu-Xiong Wang, Hang Zhao, Feng Wang, Naiyan Wang, and Zhaoxiang Zhang. Embracing single stride 3d object detector with sparse transformer. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.502, + 0.468, + 0.543 + ], + "angle": 0, + "content": "[9] Lue Fan, Yuxue Yang, Feng Wang, Naiyan Wang, and Zhaoxiang Zhang. Super sparse 3d object detection. arXiv preprint arXiv:2301.02562, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.545, + 0.468, + 0.599 + ], + "angle": 0, + "content": "[10] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.602, + 0.468, + 0.642 + ], + "angle": 0, + "content": "[11] Hugo Germain, Vincent Lepetit, and Guillaume Bourmaud. Neural reprojection error: Merging feature learning and camera pose estimation. In CVPR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.645, + 0.468, + 0.685 + ], + "angle": 0, + "content": "[12] Xiaoyang Guo, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Liga-stereo: Learning lidar geometry aware representations for stereo-based 3d detector. In ICCV, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.468, + 0.714 + ], + "angle": 0, + "content": "[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In ICCV, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.717, + 0.468, + 0.757 + ], + "angle": 0, + "content": "[14] Junjie Huang and Guan Huang. Bevdet4d: Exploit temporal cues in multi-camera 3d object detection. arXiv preprint arXiv:2203.17054, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.76, + 0.468, + 0.813 + ], + "angle": 0, + "content": "[15] Jingwei Huang, Shan Huang, and Mingwei Sun. Deeplm: Large-scale nonlinear least squares on deep learning frameworks using stochastic domain decomposition. In CVPR, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.468, + 0.871 + ], + "angle": 0, + "content": "[16] Wei-Chih Hung, Henrik Kretzschmar, Vincent Casser, Jyh-Jing Hwang, and Dragomir Anguelov. Let-3d-ap: Longitudinal error tolerant 3d average precision for camera-only 3d detection. arXiv preprint arXiv:2206.07705, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[17] Rudolph Emil Kalman. A new approach to linear filtering and prediction problems. 1960. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.175 + ], + "angle": 0, + "content": "[18] Kai Kang, Hongsheng Li, Junjie Yan, Xingyu Zeng, Bin Yang, Tong Xiao, Cong Zhang, Zhe Wang, Ruohui Wang, Xiaogang Wang, et al. T-cnn: Tubelets with convolutional neural networks for object detection from videos. IEEE Transactions on Circuits and Systems for Video Technology, 28(10):2896-2907, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.177, + 0.892, + 0.23 + ], + "angle": 0, + "content": "[19] Abhinav Kumar, Garrick Brazil, Enrique Corona, Armin Parchami, and Xiaoming Liu. Deviant: Depth equivariant network for monocular 3d object detection. In ECCV, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.233, + 0.892, + 0.273 + ], + "angle": 0, + "content": "[20] Rainer Kummerle, Giorgio Grisetti, Hauke Strasdat, Kurt Konolige, and Wolfram Burgard. g2o: A general framework for graph optimization. In ICRA, 2011. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.275, + 0.892, + 0.314 + ], + "angle": 0, + "content": "[21] Peiliang Li, Tong Qin, et al. Stereo vision-based semantic 3d object and ego-motion tracking for autonomous driving. In ECCV, 2018. 1, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.317, + 0.892, + 0.356 + ], + "angle": 0, + "content": "[22] Yingyan Li, Yuntao Chen, Jiawei He, and Zhaoxiang Zhang. Densely constrained depth estimator for monocular 3d object detection. In ECCV, 2022. 1, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.358, + 0.892, + 0.425 + ], + "angle": 0, + "content": "[23] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In ECCV, 2022. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.428, + 0.892, + 0.467 + ], + "angle": 0, + "content": "[24] Qing Lian, Peiliang Li, and Xiaozhi Chen. Monojsg: Joint semantic and geometric cost volume for monocular 3d object detection. In CVPR, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.469, + 0.892, + 0.509 + ], + "angle": 0, + "content": "[25] Philipp Lindenberger, Paul-Edouard Sarlin, Viktor Larsson, and Marc Pollefeys. Pixel-perfect structure-from-motion with featuremetric refinement. In ICCV, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.511, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[26] Yingfei Liu, Junjie Yan, Fan Jia, Shuai Lin Li, Qi Gao, Tiancai Wang, Xiangyu Zhang, and Jian Sun. Petrv2: A unified framework for 3d perception from multi-camera images. arXiv preprint arXiv:2206.01256, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.568, + 0.892, + 0.607 + ], + "angle": 0, + "content": "[27] David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60(2):91-110, 2004. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.609, + 0.892, + 0.662 + ], + "angle": 0, + "content": "[28] Yan Lu, Xinzhu Ma, Lei Yang, Tianzhu Zhang, Yating Liu, Qi Chu, Junjie Yan, and Wanli Ouyang. Geometry uncertainty projection network for monocular 3d object detection. In ICCV, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.665, + 0.892, + 0.704 + ], + "angle": 0, + "content": "[29] Xinzhu Ma, Shinan Liu, Zhiyi Xia, Hongwen Zhang, Xingyu Zeng, and Wanli Ouyang. Rethinking pseudo-lidar representation. In ECCV, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.707, + 0.892, + 0.76 + ], + "angle": 0, + "content": "[30] Raul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. Orb-slam: a versatile and accurate monocular slam system. IEEE transactions on robotics, 31(5):1147-1163, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.762, + 0.892, + 0.816 + ], + "angle": 0, + "content": "[31] Lachlan Nicholson, Michael Milford, and Niko Sünderhauf. Quadricslam: Dual quadrics from object detections as landmarks in object-oriented slam. IEEE Robotics and Automation Letters, 4(1):1-8, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.818, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[32] Liang Peng, Xiaopei Wu, Zheng Yang, Haifeng Liu, and Deng Cai. Did-m3d: Decoupling instance depth for monocular 3d object detection. In ECCV, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[33] Charles R Qi, Yin Zhou, Mahyar Najibi, Pei Sun, Khoa Vo, Boyang Deng, and Dragomir Anguelov. Offboard 3d object detection from point cloud sequences. In CVPR, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "5114" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "[34] Cody Reading, Ali Harakeh, Julia Chae, and Steven L Waslander. Categorical depth distribution network for monocular 3d object detection. In CVPR, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.134, + 0.47, + 0.174 + ], + "angle": 0, + "content": "[35] Ethan Rublee, Vincent Rabaud, Kurt Konolige, and Gary Bradski. Orb: An efficient alternative to sift or surf. In ICCV, 2011. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.176, + 0.469, + 0.217 + ], + "angle": 0, + "content": "[36] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In CVPR, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.218, + 0.469, + 0.244 + ], + "angle": 0, + "content": "[37] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.245, + 0.469, + 0.285 + ], + "angle": 0, + "content": "[38] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In ECCV, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.287, + 0.469, + 0.327 + ], + "angle": 0, + "content": "[39] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In CVPR, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.328, + 0.469, + 0.396 + ], + "angle": 0, + "content": "[40] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In CVPR, 2020. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.398, + 0.469, + 0.424 + ], + "angle": 0, + "content": "[41] Chengzhou Tang and Ping Tan. BA-net: Dense bundle adjustment networks. In ICLR, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.426, + 0.469, + 0.466 + ], + "angle": 0, + "content": "[42] Bill Triggs, Philip F McLauchlan, Richard I Hartley, and Andrew W Fitzgibbon. Bundle adjustment—a modern synthesis. In ICCV Workshops, 1999. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.467, + 0.469, + 0.508 + ], + "angle": 0, + "content": "[43] Li Wang, Li Zhang, Yi Zhu, Zhi Zhang, Tong He, Mu Li, and Xiangyang Xue. Progressive coordinate transforms for monocular 3d object detection. NeurIPS, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.509, + 0.469, + 0.55 + ], + "angle": 0, + "content": "[44] Qitai Wang, Yuntao Chen, Ziqi Pang, Naiyan Wang, and Zhaoxiang Zhang. Immortal tracker: Tracklet never dies. arXiv preprint arXiv:2111.13672, 2021. 3, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.551, + 0.469, + 0.605 + ], + "angle": 0, + "content": "[45] Tai Wang, Qing Lian, Chenming Zhu, Xinge Zhu, and Wenwei Zhang. MV-FCOS3D++: Multi-View camera-only 4d object detection with pretrained monocular backbones. arXiv preprint arXiv:2207.12716, 2022. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.606, + 0.469, + 0.645 + ], + "angle": 0, + "content": "[46] Tai Wang, Jiangmiao Pang, and Dahua Lin. Monocular 3d object detection with depth from motion. In ECCV, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.647, + 0.469, + 0.701 + ], + "angle": 0, + "content": "[47] Zengran Wang, Chen Min, Zheng Ge, Yinhao Li, Zeming Li, Hongyu Yang, and Di Huang. Sts: Surround-view temporal stereo for multi-view 3d detection. arXiv preprint arXiv:2208.10145, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.702, + 0.469, + 0.743 + ], + "angle": 0, + "content": "[48] Shichao Yang and Sebastian Scherer. Cubeslam: Monocular 3-d object slam. IEEE Transactions on Robotics, 35(4):925-938, 2019. 1, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.744, + 0.469, + 0.772 + ], + "angle": 0, + "content": "[49] Tianwei Yin, Xingyi Zhou, and Philipp Krahenbuhl. Center-based 3d object detection and tracking. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.772, + 0.469, + 0.827 + ], + "angle": 0, + "content": "[50] Yurong You, Katie Z Luo, Xiangyu Chen, Junan Chen, WeiLun Chao, Wen Sun, Bharath Hariharan, Mark Campbell, and Kilian Q Weinberger. Hindsight is 20/20: Leveraging past traversals to aid 3d perception. In ICLR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.828, + 0.469, + 0.855 + ], + "angle": 0, + "content": "[51] Fisher Yu, Dequan Wang, Evan Shelhamer, and Trevor Darrell. Deep layer aggregation. In CVPR, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.856, + 0.469, + 0.896 + ], + "angle": 0, + "content": "[52] Yunpeng Zhang, Jiwen Lu, and Jie Zhou. Objects are different: Flexible monocular 3d object detection. In CVPR, 2021, 1, 3, 6, 7" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "5115" + } + ] +] \ No newline at end of file diff --git a/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_origin.pdf b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b18d594ca160a1d9a3cf60ae7536ef61957f1515 --- /dev/null +++ b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/2347d966-1e20-4c7d-aef9-82586306a3eb_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6f2bc8f456b756832fcff4bd271565099bfb2aa42a586cb535eca70f65f4775 +size 1706065 diff --git a/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/full.md b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0f7efb91e0f73b8207eafb26e4b9b21bd0b9a295 --- /dev/null +++ b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/full.md @@ -0,0 +1,340 @@ +# 3D Video Object Detection with Learnable Object-Centric Global Optimization + +Jiawei He $^{1,2}$ Yuntao Chen $^{3}$ Naiyan Wang $^{4}$ Zhaoxiang Zhang $^{1,2,3}$ $^{1}$ CRIPAC, Institute of Automation, Chinese Academy of Sciences (CASIA) + $^{2}$ School of Artificial Intelligence, University of Chinese Academy of Sciences (UCA) + $^{3}$ Centre for Artificial Intelligence and Robotics, HKISI_CAS ${}^{4}$ TuSimple {hejiawei2019, zhaoxiang.zhang}@ia.ac.cn {chenyuntao08, winsty}@gmail.com + +# Abstract + +We explore long-term temporal visual correspondence-based optimization for 3D video object detection in this work. Visual correspondence refers to one-to-one mappings for pixels across multiple images. Correspondence-based optimization is the cornerstone for 3D scene reconstruction but is less studied in 3D video object detection, because moving objects violate multi-view geometry constraints and are treated as outliers during scene reconstruction. We address this issue by treating objects as first-class citizens during correspondence-based optimization. In this work, we propose BA-Det, an end-to-end estimable object detector with object-centric temporal correspondence learning and feature metric object bundle adjustment. Empirically, we verify the effectiveness and efficiency of BA-Det for multiple baseline 3D detectors under various setups. Our BA-Det achieves SOTA performance on the large-scale Waymo Open Dataset (WOD) with only marginal computation cost. Our code is available at https://github.com/jiaweihe1996/BA-Det. + +# 1. Introduction + +3D object detection is an important perception task, especially for indoor robots and autonomous-driving vehicles. Recently, image-only 3D object detection [23,52] has been proven practical and made great progress. In real-world applications, cameras capture video streams instead of unrelated frames, which suggests abundant temporal information is readily available for 3D object detection. In single-frame methods, despite simply relying on the prediction power of deep learning, finding correspondences play an important role in estimating per-pixel depth and the object pose in the camera frame. Popular correspondences include Perspective-n-Point (PnP) between pre-defined 3D keypoints [22, 52] and their 2D projections in monocular 3D object detection, and Epipolar Geometry [6,12] in multiview 3D object detection. However, unlike the single-frame + +case, temporal visual correspondence has not been explored much in 3D video object detection. + +As summarized in Fig. 1, existing methods in 3D video object detection can be divided into three categories while each has its own limitations. Fig. 1a shows methods with object tracking [3], especially using a 3D Kalman Filter to smooth the trajectory of each detected object. This approach is detector-agnostic and thus widely adopted, but it is just an output-level smoothing process without any feature learning. As a result, the potential of video is underexploited. Fig. 1b illustrates the temporal BEV (Bird's-Eye View) approaches [14, 23, 26] for 3D video object detection. They introduce the multi-frame temporal crossattention or concatenation for BEV features in an end-to-end fusion manner. As for utilizing temporal information, temporal BEV methods rely solely on feature fusion while ignoring explicit temporal correspondence. Fig. 1c depicts stereo-from-video methods [46, 47]. These methods explicitly construct a pseudo-stereo view using ego-motion and then utilize the correspondence on the epipolar line of two frames for depth estimation. However, the use of explicit correspondence in these methods is restricted to only two frames, thereby limiting its potential to utilize more temporal information. Moreover, another inevitable defect of these methods is that moving objects break the epipolar constraints, which cannot be well handled, so monocular depth estimation has to be reused. + +Considering the aforementioned shortcomings, we seek a new method that can handle both static and moving objects, and utilize long-term temporal correspondences. Firstly, in order to handle both static and moving objects, we draw experience from the object-centric global optimization with reprojection constraints in Simultaneous Localization and Mapping (SLAM) [21, 48]. Instead of directly estimating the depth for each pixel from temporal cues, we utilize them to construct useful temporal constraints to refine the object pose prediction from network prediction. Specifically, we construct a non-linear least-square optimization problem with the temporal correspondence constraint in an + +![](images/2cc27a219910578f4d25292cc16f5ef97904971da87ec69b8f310e5d18f12cb8.jpg) + +![](images/6e822a8cc35d7e2583b7483aa9711927eac18464f585bd700a1f2f3e948b68bc.jpg) +(a) Temporal Filtering + +![](images/517fea5a971ba8f9e67c909eae73f58a281b03e329341deb4e0025f889f8ea8b.jpg) +(b) Temporal BEV + +![](images/874fca725128ad06d0955a7d09274b22692e3fd314fec64283fa9efd18bee26f.jpg) +(c) Stereo from Video +Figure 1. Illustration of how to leverage temporal information in different 3D video object detection paradigms. + +![](images/e0b64a5a75ddc04a0f3a61c7ac1830cd84242b58f0c8e354e7d9d55ed48364d1.jpg) +(d) BA-Det (Ours) + +object-centric manner to optimize the pose of objects no matter whether they are moving or not. Secondly, for long-term temporal correspondence learning, hand-crafted descriptors like SIFT [27] or ORB [35] are no longer suitable for our end-to-end object detector. Besides, the long-term temporal correspondence needs to be robust to viewpoint changes and severe occlusions, where these traditional sparse descriptors are incompetent. So, we expect to learn a dense temporal correspondence for all available frames. + +In this paper, as shown in Fig. 1d, we propose a 3D video object detection paradigm with learnable long-term temporal visual correspondence, called BA-Det. Specifically, the detector has two stages. In the first stage, a CenterNet-style monocular 3D object detector is applied for single-frame object detection. After associating the same objects in the video, the second stage detector extracts RoI features for the objects in the tracklet and matches dense local features on the object among multi-frames, called the object-centric temporal correspondence learning (OTCL) module. To make traditional object bundle adjustment (OBA) learnable, we formulate feature metric OBA. In the training time, with feature metric OBA loss, the object detection and temporal feature correspondence are learned jointly. During inference, we use the 3D object estimation from the first stage as the initial pose and associate the objects with 3D Kalman Filter. The object-centric bundle adjustment refines the pose and 3D box size of the object in each frame at the tracklet level, taking the initial object pose and temporal feature correspondence from OTCL as the input. Experiment results on the large-scale Waymo Open Dataset (WOD) show that our BA-Det could achieve state-of-the-art performance compared with other single-frame and multi-frame object detectors. We also conduct extensive ablation studies to demonstrate the effectiveness and efficiency of each component in our method. + +In summary, our work has the following contributions: + +- We present a novel object-centric 3D video object detection approach $BA-Det$ by learning object detection and temporal correspondence jointly. + +- We design the second-stage object-centric temporal correspondence learning module and the featuremetric object bundle adjustment loss. +- We achieve state-of-the-art performance on the largescale WOD. The ablation study and comparisons show the effectiveness and efficiency of our BA-Det. + +# 2. Related Work + +# 2.1. 3D Video Object Detection + +For 3D video object detection, LiDAR-based methods [4, 8, 49] usually align point clouds from consecutive frames by compensating ego-motion and simply accumulate them to alleviate the sparsity of point clouds. Object-level methods [5, 9, 33, 50], handling the multi-frame point clouds of the tracked object, become a new trend. 3D object detection from the monocular video has not received enough attention from researchers. Kinematic3D [3] is a pioneer work decomposing kinematic information into ego-motion and target object motion. However, they only apply 3D Kalman Filter [17] based motion model for kinematic modeling and only consider the short-term temporal association (4 frames). Recently, BEVFormer [23] proposes an attentional transformer method to model the spatial and temporal relationship in the bird's-eye-view (BEV). A concurrent work, DfM [46], inspired by Multi-view Geometry, considers two frames as stereo and applies the cost volume in stereo to estimate depth. However, how to solve the moving objects is not well handled in this paradigm. + +# 2.2. Geometry in Videos + +Many researchers utilize 3D geometry in videos to reconstruct the scene and estimate the camera pose, which is a classic topic of computer vision. Structure from Motion (SfM) [37] and Multi-view Stereo (MVS) [38] are two paradigms to estimate the sparse and dense depth from multi-view images respectively. In robotics, 3D geometry theory is applied for Simultaneous Localization and Mapping (SLAM) [30]. To globally optimize the 3D position of + +the feature points and the camera pose at each time, bundle adjustment algorithm [42] is widely applied. However, most of them can only handle static regions in the scene. + +In the deep learning era, with the development of object detection, object-level semantic SLAM [21, 31, 48] is rising, aiming to reconstruct the objects instead of the whole scene. These methods can handle dynamic scenes and help the object localization in the video. Besides, feature correspondence learning [36, 39] has received extensive attention in recent years. Deep learning has greatly changed the pipeline of feature matching. Differentiable bundle adjustment, like BANet [41] and NRE [11], makes the whole 3D geometry system end-to-end learnable. Unlike these works, we focus on the representation of the 3D object and integrate feature correspondence learning into 3D object detection. Utilizing the learned temporal feature correspondence, the proposed BA-Det optimizes the object pose of a tracklet in each frame. + +# 3. Preliminary: Bundle Adjustment + +Bundle Adjustment [42] is a widely used globally temporal optimization technology in 3D reconstruction, which means optimally adjusting bundles of light rays from a given 3D global position to the camera center among multiframes. Specifically, we use $\mathbf{P}_i = [x_i,y_i,z_i]^\top$ to denote the $i$ -th 3D point coordinates in the global reference frame. According to the perspective camera model, the image coordinates of the projected 3D point at time $t$ is + +$$ +\Pi \left(\mathbf {T} _ {c g} ^ {t}, \mathbf {P} _ {i}, \mathbf {K}\right) = \frac {1}{z _ {i} ^ {t}} \mathbf {K} \left(\mathbf {R} _ {c g} ^ {t} \mathbf {P} _ {i} + \mathbf {t} _ {c g} ^ {t}\right), \tag {1} +$$ + +where $\Pi$ is the perspective projection transformation, $\mathbf{T}_{cg}^{t} = [\mathbf{R}_{cg}^{t}|\mathbf{t}_{cg}^{t}]$ is the camera extrinsic matrix at time $t$ . $\mathbf{R}_{cg}^{t}$ and $\mathbf{t}_{cg}^{t}$ are the rotation and the translation components of $\mathbf{T}_{cg}^{t}$ , respectively. $\mathbf{K}$ is the camera intrinsic matrix, and $z_{i}^{t}$ is the depth of the $i$ -th 3D point in the camera frame at time $t$ . + +Bundle adjustment is a nonlinear least-square problem to minimize the reprojection error as: + +$$ +\begin{array}{l} \{\bar {\mathbf {T}} _ {c g} ^ {t} \} _ {t = 1} ^ {T}, \{\bar {\mathbf {P}} _ {i} \} _ {i = 1} ^ {m} = \\ \underset {\{\mathbf {T} _ {c g} ^ {t} \} _ {t = 1} ^ {T}, \{\mathbf {P} _ {i} \} _ {i = 1} ^ {m}} {\arg \min } \frac {1}{2} \sum_ {i = 1} ^ {m} \sum_ {t = 1} ^ {T} | | \mathbf {p} _ {i} ^ {t} - \Pi (\mathbf {T} _ {c g} ^ {t}, \mathbf {P} _ {i}, \mathbf {K}) | | ^ {2}, \tag {2} \\ \end{array} +$$ + +where $\mathbf{p}_i^t$ is the observed image coordinates of 3D point $\mathbf{P}_i$ on frame $t$ . Bundle adjustment can be solved by Gauss-Newton or Levenberg-Marquardt algorithm effectively [1, 20]. + +# 4. BA-Det: Object-centric Global Optimizable Detector + +In this section, we introduce the framework of our BA-Det (Fig. 2), a learnable object-centric global optimization + +network. The pipeline consists of three parts: (1) First-stage single frame 3D object detection; (2) Second-stage object-centric temporal correspondence learning (OTCL) module; (3) Featuremetric object bundle adjustment loss for temporal feature correspondence learning. + +# 4.1. Single-frame 3D Object Detection + +Given a video clip with consecutive frames $\mathcal{V} = \{I_1, I_2, \dots, I_T\}$ , 3D video object detection is to predict the class and the 3D bounding box of each object in each frame. Let $\mathcal{O}_k^t$ be the $k$ -th object in frame $t$ . For the 3D bounding box $\mathbf{B}_k^t$ , we estimate the size of the bounding box $\mathbf{s}_t^k = [w, h, l]^\top$ and the object pose ${}^k\mathbf{T}_{co}^t$ in the camera frame, including translation ${}^k\mathbf{t}_{co}^t = [x_c, y_c, z_c]^\top$ and rotation ${}^k\mathbf{r}_{co}^t = [r_x, r_y, r_z]^\top$ . In most 3D object detection datasets, with the flat ground assumption, only yaw rotation $r_y$ is considered. + +We basically adopt MonoFlex [52] as our first-stage 3D object detector, which is a simple and widely-used baseline method. Different from the standard MonoFlex, we make some modifications for simplicity and adaptation. (1) Instead of ensemble the depth from keypoints and regression, we only used the regressed depth directly. (2) The edge fusion module in MonoFlex is removed for simplicity and better performance. The output of the first-stage object detector should be kept for the second stage. The predicted 2D bounding box $\mathbf{b}_k^t$ for each object is used for the object-centric feature extraction in the second stage. The 3D estimations should be the initial pose estimation and be associated between frames. We follow ImmortalTracker [44] to associate the 3D box prediction outputs with a 3D Kalman Filter frame by frame. For convenience and clarity, we use the same index $k$ to denote the objects belonging to the same tracklet in the video from now on. + +# 4.2. Object-Centric Temporal Correspondence Learning + +Based on the predictions from the first-stage detector, we propose an object-centric temporal correspondence learning (OTCL) module, which plays an indispensable role in the learnable optimization. Specifically, the OTCL module is designed to learn the correspondence of the dense features for the same object among all available frames. Given a video $\{I_1,I_2,\dots ,I_T\}$ and image features $\{\mathbf{F}^1,\mathbf{F}^2,\dots ,\mathbf{F}^T\}$ from the backbone in the first stage, we extract the RoI features $^k\mathbf{F}^t\in \mathbb{R}^{H\times W\times C}$ of the object $\mathcal{O}_k^t$ by the RoIAign operation [13], + +$$ +^ k \mathbf {F} ^ {t} = \operatorname {R o I A l i g n} \left(\mathbf {F} ^ {t}, \mathbf {b} _ {k} ^ {t}\right). \tag {3} +$$ + +We apply $L$ layers of cross- and self-attention operations before calculating the correspondence map to aggregate and enhance the spatial and temporal information for RoI features. Note that the object tracklet is available with the + +![](images/74cd65fd2b29cc070de67106dd04cc4555be34afa559af4aac47122d124093c3.jpg) +Figure 2. A overview of the proposed BA-Det framework. The left part of the framework is the first-stage object detector to predict the 3D object and its 2D bounding box. The second stage is called OTCL module. In the OTCL module, we extract the RoI features $^k\mathbf{F}^t$ by RoIAlign, aggregate the RoI features and learn object-centric temporal correspondence using feature metric object bundle adjustment loss. + +aforementioned tracker, so the cross-attention is applied between the objects in different frames for the same tracklet. For each layer of attention operations between two adjacent frames $t$ and $t'$ : + +$$ +\left\{ \begin{array}{l} ^ {k} \widetilde {\mathbf {F}} ^ {t} = \operatorname {A t t} _ {\mathbf {S}} (Q, K, V) = \operatorname {A t t} _ {\mathbf {S}} \left(^ {k} \hat {\mathbf {F}} ^ {t}, ^ {k} \hat {\mathbf {F}} ^ {t}, ^ {k} \hat {\mathbf {F}} ^ {t}\right), \\ ^ {k} \widetilde {\mathbf {F}} ^ {t ^ {\prime}} = \operatorname {A t t} _ {\mathbf {S}} (Q, K, V) = \operatorname {A t t} _ {\mathbf {S}} \left(^ {k} \hat {\mathbf {F}} ^ {t ^ {\prime}}, ^ {k} \hat {\mathbf {F}} ^ {t ^ {\prime}}, ^ {k} \hat {\mathbf {F}} ^ {t ^ {\prime}}\right), \\ ^ {k} \hat {\mathbf {F}} ^ {t ^ {\prime}} = \operatorname {A t t} _ {\mathbf {T}} (Q, K, V) = \operatorname {A t t} _ {\mathbf {T}} \left(^ {k} \widetilde {\mathbf {F}} ^ {t ^ {\prime}}, ^ {k} \widetilde {\mathbf {F}} ^ {t}, ^ {k} \widetilde {\mathbf {F}} ^ {t}\right), \end{array} \right. \tag {4} +$$ + +where ${}^k\hat{\mathbf{F}}^t\in \mathbb{R}^{HW\times C}$ is the flattened RoI feature, AttS is the spatial self-attention, AttT is the temporal crossattention. + +We then define the spatial correspondence map between two flattened RoI features after the attention operations. In frame pair $(t, t')$ , we use $^k\mathbf{f}_i$ to denote $i$ -th local feature in $^k\hat{\mathbf{F}}^{(L)}$ ( $i \in \{1, 2, \dots, HW\}$ ). The correspondence map $^k\mathbf{C}_t^{t'} \in \mathbb{R}^{HW \times HW}$ in two frames is defined as the inner product of two features in two frames: + +$$ +^ {k} \mathbf {C} _ {t} ^ {t ^ {\prime}} [ i, i ^ {\prime} ] = ^ {k} \mathbf {f} _ {i} ^ {t} * ^ {k} \mathbf {f} _ {i ^ {\prime}} ^ {t ^ {\prime}}. \tag {5} +$$ + +To normalize the correspondence map, we perform softmax over all spatial locations $i'$ , + +$$ +^ {k} \widetilde {\mathbf {C}} _ {t} ^ {t ^ {\prime}} [ i, i ^ {\prime} ] = \operatorname {s o f t m a x} \left(^ {k} \mathbf {C} _ {t} ^ {t ^ {\prime}} [ i, i ^ {\prime} ]\right). \tag {6} +$$ + +# 4.3. Featuremetric Object Bundle Adjustment Loss + +In this subsection, we present that how to adapt and integrate the Object-centric Bundle Adjustment (OBA) into our learnable BA-Det framework, based on the obtained correspondence map. Generally speaking, we formulate the featuremetric OBA loss to supervise the temporal feature + +correspondence learning. Note that here we only derive the tracklet-level OBA loss for the same object, and for the final supervision we will sum all the tracklet-level loss in the video. + +First, we revisit the object-centric bundle adjustment, as shown in Fig. 3a. As proposed in Object SLAM [21, 48], OBA assumes that the object can only have rigid motion relative to the camera. For the object $\mathcal{O}_k$ , we denote the 3D points as $\mathcal{P}_k = \{^k\mathbf{P}_i\}_{i=1}^m$ in the object frame, 2D points as $\{^k\mathbf{p}_i^t\}_{i=1}^m$ , 2D features at position $^k\mathbf{p}_i^t$ as $\{\mathbf{f}[^k\mathbf{p}_i^t]\}_{i=1}^m$ , and the camera pose in the object reference frame as $\mathcal{T}_k = \{^k\mathbf{T}_{co}^t\}_{t=1}^T$ , OBA can be casted as: + +$$ +\bar {\mathcal {T}} _ {k}, \bar {\mathcal {P}} _ {k} = \underset {\mathcal {T} _ {k}, \mathcal {P} _ {k}} {\arg \min } \frac {1}{2} \sum_ {i = 1} ^ {m} \sum_ {t = 1} ^ {T} | | ^ {k} \mathbf {p} _ {i} ^ {t} - \Pi \left(^ {k} \mathbf {T} _ {c o} ^ {t}, ^ {k} \mathbf {P} _ {i}, \mathbf {K}\right) | | _ {2} ^ {2}. \tag {7} +$$ + +To make the OBA layer end-to-end learnable, we formulate featuremetric [25] OBA: + +$$ +\begin{array}{l} \bar {\mathcal {T}} _ {k}, \bar {\mathcal {P}} _ {k} = \\ \underset {\mathcal {T} _ {k}, \mathcal {P} _ {k}} {\arg \min } \frac {1}{2} \sum_ {i = 1} ^ {m} \sum_ {t = 1} ^ {T} \sum_ {t ^ {\prime} = 1} ^ {T} \left| \left| \mathbf {f} \left[ ^ {k} \mathbf {p} _ {i} ^ {t} \right] - \mathbf {f} \left[ \Pi \left(^ {k} \mathbf {T} _ {c o} ^ {t ^ {\prime}}, ^ {k} \mathbf {P} _ {i}, \mathbf {K}\right) \right] \right| \right| _ {2} ^ {2}, \tag {8} \\ \end{array} +$$ + +where $\mathbf{f}[\mathbf{p}]$ denotes the feature vector in pixel coordinates $\mathbf{p}$ . Representing the 3D point ${}^k\mathbf{P}_i$ in Eq. 8 with 2D points in each frame, the feature metric reprojection error of frame + +![](images/ef98e7ea62dc61502455000a265fa8cd48e2e184d3cdc7ef4f53dede49c8590c.jpg) +(a) Object-centric Bundle Adjustment (OBA). + +![](images/78b3ef0e982b6c9bdb7883b4e810b53b5e392e729a3254cf5b50a4a6844eb517.jpg) +(b) The computation of the featuremetric OBA loss. +Figure 3. Illustration of featuremetric object bundle adjustment. + +$t$ could be derived as + +$$ +\begin{array}{l} ^ {k} e _ {i} ^ {t} = \sum_ {t ^ {\prime} = 1} ^ {T} \mathbf {f} \left[ ^ {k} \mathbf {p} _ {i} ^ {t} \right] - \mathbf {f} \left[ ^ {k} \mathbf {p} _ {i} ^ {t ^ {\prime}} \right] (9) \\ = \sum_ {t ^ {\prime} = 1} ^ {T} \mathbf {f} \left[ ^ {k} \mathbf {p} _ {i} ^ {t} \right] - \mathbf {f} \left[ \Pi \left(^ {k} \mathbf {T} _ {c o} ^ {t ^ {\prime}}, \Pi^ {- 1} \left(^ {k} \mathbf {T} _ {c o} ^ {t}, ^ {k} \mathbf {p} _ {i} ^ {t}, \mathbf {K}, z _ {i} ^ {t}\right), \mathbf {K}\right) \right], (10) \\ \end{array} +$$ + +where $\Pi^{-1}(\cdot)$ is the inverse projection function to lift the 2D point on the image to 3D in the object frame. $z_{i}^{t}$ is the ground-truth depth of ${}^{k}\mathbf{p}_{i}^{t}$ (from LiDAR point clouds only for training). In the training time, we learn the feature correspondence, given the ground-truth pose of the object $\mathcal{O}_k$ , denoted as ${}^{k}\mathbf{T}_{co}^{t}$ and ${}^{k}\mathbf{T}_{co}^{t'}$ in frame $t$ and frame $t'$ , respectively. Considering the feature metric reprojection loss in all frames and all points, the overall loss term for object $k$ can be formulated as + +$$ +\mathcal {L} _ {\mathrm {r e p}} ^ {k} = \sum_ {i = 1} ^ {m} \sum_ {t = 1} ^ {T} \left| \left| ^ {k} e _ {i} ^ {t} \right| \right| _ {2} ^ {2} = \sum_ {i = 1} ^ {m} \sum_ {t = 1} ^ {T} \sum_ {t ^ {\prime} = 1} ^ {T} \left| \left| ^ {k} \mathbf {f} _ {i} ^ {t} - ^ {k} \mathbf {f} _ {i} ^ {t ^ {\prime}} \right| \right| _ {2} ^ {2} \tag {11} +$$ + +Finally, we replace the $L2$ norm in Eq. 11 with the cosine distance to measure the featuremetric reprojection error. Thus we bring the normalized correspondence map $\widetilde{\mathbf{C}}$ in Sec. 4.2 into the loss term. With log-likelihood formulation, we formulate the featuremetric OBA loss to supervise the object-centric temporal correspondence learning: + +$$ +\mathcal {L} _ {\mathrm {O B A}} ^ {k} = - \sum_ {i = 1} ^ {m} \sum_ {t = 1} ^ {T} \sum_ {t ^ {\prime} = 1} ^ {T} \log \left(^ {k} \widetilde {\mathbf {C}} _ {t} ^ {t ^ {\prime}} \left[ ^ {k} \bar {\mathbf {p}} _ {i} ^ {t}, ^ {k} \bar {\mathbf {p}} _ {i} ^ {t ^ {\prime}} \right]\right). \tag {12} +$$ + +where $\left(^k\bar{\mathbf{p}}_i^t,\bar{}^k\bar{\mathbf{p}}_i^{t'}\right)$ are the ground-truth corresponding pair of the $i$ -th local feature. The illustration of the loss computation is in Fig. 3b. + +# 4.4. Inference + +After introducing the training loss design, we present the inference process of BA-Det as follows. + +First-stage 3D object detection and association. The first-stage detector makes the prediction of classification scores and 2D / 3D bounding boxes. The 3D bounding boxes are associated across the frames by ImmortalTracker [44]. The following process is on the tracklet level. + +Dense feature matching. To optimize the object pose, we need to obtain the feature correspondence in each frame for the same object. As mentioned in Sec. 4.2, the OTCL module is trained to generate a dense correspondence map in all frames. During inference, we match all $H \times W$ dense local features in RoI between adjacent two frames and between the first frame and last frame of the time window $[t, t + \tau]$ . We use the RANSAC algorithm [10] to filter the feature correspondence outliers. + +Feature tracking. To form a long-term keypoint tracklet from the obtained correspondence, we leverage a graph-based algorithm. First, the matched feature pairs are constructed into a graph $\mathcal{G}$ . The features are on the vertices. If the features are matched, an edge is connected in the graph. Then we track the feature for the object in all available frames. We use the association method mainly following [7]. The graph partitioning method is applied to $\mathcal{G}$ to make each connected subgraph have at most one vertex per frame. The graph cut is based on the similarity of the matched features. + +Object-centric bundle adjustment. In the inference stage, given the initial pose estimation and the temporal feature correspondence, we solve the object-centric bundle adjustment by Levenberg-Marquardt algorithm, and the object pose in each frame and the 3D position of the keypoints can be globally optimized between frames. + +Post-processing. We also apply some common post-processing in video object detection techniques like tracklet rescoring [18] and bounding box temporal interpolation. + +# 5. Experiments + +# 5.1. Datasets and metrics + +We conduct our experiments on the large autonomous driving dataset, Waymo Open Dataset (WOD) [40]. The WOD has different versions with different annotations and metrics. To keep the fairness of the comparisons, we report the results both on WOD v1.2 and WOD v1.3.1. The annotations on v1.2 are based on LiDAR and the official metrics are mAP IoU@0.7 and mAP IoU@0.5. Recently, v1.3.1 is released to support multi-camera 3D object detec + +
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
M3D-RPN [2]0.350.343.793.630.330.333.613.46
PatchNet [29]0.390.372.922.740.380.362.422.28
PCT [43]0.890.884.204.150.660.664.033.99
MonoJSG [24]0.970.955.655.470.910.895.345.17
GUPNet [28]2.282.2710.029.942.142.129.399.31
DEVIANT [19]2.692.6710.9810.892.522.5010.2910.20
CaDDN [34]5.034.9917.5417.314.494.4516.5116.28
DID-M3D [32]--20.6620.47--19.3719.19
BEVFormer [23]†-7.70-30.80-6.90-27.70
DCD [22]12.5712.5033.4433.2411.7811.7231.4331.25
MonoFlex [52] (Baseline)11.7011.6432.2632.0610.9610.9030.3130.12
BA-Det(Ours)†16.6016.4540.9340.5115.5715.4438.5338.12
+ +![](images/9bc8c1e610c110feff67ca906a04d20d0d390da94ddc1c45023f293a563e17cf.jpg) +(a) Frame 8. + +![](images/22ef3b70ba0a8ea6e8ccb2a1340a098e22ddf7a939afbb041f4a28033a0ae807.jpg) +(b) Frame 22. + +![](images/03675c24d7595c5dd5b492c1edab946d262e3e2a0304c93bdad36e3c5c8d811d.jpg) +(c) Frame 36. + +![](images/42ee884163f0f7960fc9376f4173a67b305cac23dc0d4de2f89dbebbf2fa10c8.jpg) +(d) Frame 50. + +![](images/b624b040eb17806ab75ffc531723fde36d14b5d44f797b9305f63ae77a47871f.jpg) +(e) Frame 57. +Figure 4. Qualitative results from the BEV in different frames. We use blue and red boxes to denote initial predictions and optimized predictions of the object we highlight. The green and black boxes denote the other box predictions and the ground truth boxes. The ego vehicle lies at the bottom of each figure. + +Table 1. The results on WODv1.2 [40] val set. $\mathrm{AP}_{70}$ denotes AP with IoU threshold at 0.7. $\mathrm{AP}_{50}$ denotes AP IoU@0.5. $\dagger$ denotes the method utilizing temporal information. + +
MethodLET-APLLET-APLET-APH3D AP703D AP50
MV-FCOS3D++ [45]†58.1174.6873.5014.6636.02
BA-DetFCOS3D(Ours)†58.4774.8573.6615.0236.89
+ +Table 2. The multi-camera results on WODv1.3.1 [16] val set. Besides the official LET-IoU-based metrics, we also report the metrics with standard 3D IoU. All metrics are reported for the LEVEL_2 difficulty.†: use temporal information. + +tion, and the annotations are camera-synced boxes. On the v1.3.1 dataset, a series of new LET-IoU-based metrics [16] are introduced to slightly tolerate the localization error from the worse sensor, camera, than LiDAR. Early work mainly reports the results on the v1.2 dataset, and we only compare our methods with the ones from WOD Challenge 2022 using the v1.3.1 dataset. Because we mainly focus on rigid objects, we report the results of the VEHICLE class. + +LET-3D-AP and LET-3D-APL are the new metrics, relying on the Longitudinal Error Tolerant IoU (LET-IoU). LET-IoU is the 3D IoU calculated between the target ground + +truth box and the prediction box aligned with ground truth along the depth that has minimum depth error. LET-3D-AP and LET-3D-APL are calculated from the average precision and the longitudinal affinity weighted average precision of the PR curve. For more details, please refer to [16]. + +# 5.2. Implementation Details + +The first stage network architecture of BA-Det is the same as MonoFlex, with DLA-34 [51] backbone, the output feature map is with the stride of 8. In the second stage, the shape of the RoI feature is $60 \times 80$ . The spatial and temporal attention module is stacked with 4 layers. The implementation is based on the PyTorch framework. We train our model on 8 NVIDIA RTX 3090 GPUs for 14 epochs. Adam optimizer is applied with $\beta_{1} = 0.9$ and $\beta_{2} = 0.999$ . The initial learning rate is $5 \times 10^{-4}$ and weight decay is $10^{-5}$ . The learning rate scheduler is one-cycle. We use the Levenberg-Marquardt algorithm, implemented by DeepLM [15], to solve object-centric bundle adjustment. The maximum it + +
Method3D AP703D APH703D AP503D APH50
0-3030-5050-∞0-3030-5050-∞0-3030-5050-∞0-3030-5050-∞
L1DCD [22]32.475.941.2432.305.911.2362.7026.3510.1662.3526.2110.09
MonoFlex [52]30.645.291.0530.485.271.0461.1325.859.0360.7525.718.95
BA-Det (Ours)†37.7411.043.8637.4610.953.7971.0737.1514.8970.4636.7914.61
L2DCD [22]32.305.761.0832.195.731.0862.4825.608.9262.1325.468.86
MonoFlex [52]30.545.140.9130.375.110.9160.9125.117.9260.5424.977.85
BA-Det (Ours)†37.6110.723.3737.3310.633.3170.8336.1413.6270.2335.7913.37
+ +Table 3. The object depth range conditioned result on WODv1.2 [40] val set. L1 and L2 denote LEVEL_1 and LEVEL_2 difficulty, respectively. †: use temporal information. + +
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)11.7011.6432.2632.0610.9610.9030.3130.12
Our first-stage prediction13.5713.4834.7034.4312.7212.6432.5632.32
+3D Tracking [44]14.0113.9335.1934.9213.1313.0533.0332.78
+ Learnable global optimization15.8515.7538.0637.7614.8714.7735.7235.44
+ Tracklet rescoring16.4316.3040.0739.7015.4115.2937.6637.31
+ Bbox interpolation16.6016.4540.9340.5115.5715.4438.5338.12
+ +Table 4. Ablation study of each component in BA-Det. + +eration of the LM algorithm is 200. For the object that appears less than 10 frames or the average keypoint number is less than 5, we do not optimize it. + +# 5.3. Comparisons with State-of-the-art Methods + +We compare our BA-Det with other state-of-the-art methods under two different settings. WODv1.2 is for the front view camera and WODv1.3.1 has the official evaluator for all 5 cameras. As shown in Table 1, using the FRONT camera, we outperform the SOTA method DCD [22] for about 4AP and 4APH ( $\sim 30\%$ improvement) under the 0.7 IoU threshold. Compared with the only temporal method BEVFormer [23], we have double points of 3D $\mathrm{AP}_{70}$ and 3D $\mathrm{APH}_{70}$ . To validate the effectiveness, we also report the multi-camera results on the newly released WODv1.3.1, as shown in Table 2. No published work reports the results on WODv1.3.1. So, we only compare with the open-source MV-FCOS3D++ [45], the second-place winner of WOD 2022 challenge. We design the variant of BA-Det, called BA-DetFCOS3D, to adapt to the multi-camera setting. BA-DetFCOS3D is also a two-stage object detector. The first stage is the same as MV-FCOS3D++, but with the output of 2D bounding boxes. The second stage is OTCL module supervised with featuremetric object bundle adjustment loss. Although there are overlaps between 5 cameras, to simplify the framework, we ignore the object BA optimization across cameras and only conduct temporal optimization. BA-DetFCOS3D outperforms MV-FCOS3D++ under main metrics and traditional 3D IoU-based metrics. + +# 5.4. Qualitative Results + +In Fig. 4, we show the object-level qualitative results of the first-stage and second-stage predictions in different frames. For a tracklet, we can refine the bounding box predictions with the help of better measurements in other frames, even if there is a long time interval between them. + +# 5.5. Distance Conditioned Results + +We report the results with the different depth ranges in Table 3. The results indicate that the single frame methods, like DCD and MonoFlex, are seriously affected by object depth. When the object is farther away from the ego vehicle, the detection performance drops sharply. Compared with these methods, BA-Det, has the gain almost from the object far away from the ego-vehicle. The 3D $\mathrm{AP}_{70}$ and 3D $\mathrm{APH}_{70}$ are $3\times$ compared with the baseline when the object is located in $[50\mathrm{m},\infty)$ , $2\times$ in $[30\mathrm{m},50\mathrm{m})$ and $1.2\times$ in $[0\mathrm{m},30\mathrm{m})$ . This is because we utilize the long-term temporal information for each object. In a tracklet, the predictions near the ego-vehicle can help to refine the object far away. + +# 5.6. Ablation study + +We ablate each component of BA-Det. The results are shown in Table 4. The first stage detector is slightly better than the MonoFlex baseline mainly because we remove the edge fusion module, which is harmful to the truncated objects in WOD. 3D KF associates the objects and smooths the object's trajectory. This part of improvement can be regarded as similar to Kinematic3D [3]. The core of BA-Det + +
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)11.7011.6432.2632.0610.9610.9030.3130.12
Initial prediction13.5713.4834.7034.4312.7212.6432.5632.32
Static BA14.7314.6237.8937.5613.8213.7235.6535.34
Ours16.6016.4540.9340.5115.5715.4438.5338.12
+ +Table 5. Comparison between object-centric BA-Det and the traditional scene-level bundle adjustment (Static BA). Initial prediction denotes the predictions in the first stage. + +
\(\bar{L}_{t}\)LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)-11.7011.6432.2632.0610.9610.9030.3130.12
BA-Det+ORB feature [35]2.614.0513.9635.2134.9513.1713.0833.0532.81
BA-Det+Our feature1016.6016.4540.9340.5115.5715.4438.5338.12
+ +is the learnable global optimization module, which obtains the largest gain in all modules. The tracklet rescoring and temporal interpolation modules are also useful. + +# 5.7. Further Discussions + +BA vs. Object BA. We conduct experiments to discuss whether the object-centric manner is important in temporal optimization. We modify our pipeline and optimize the whole scene in the global frame instead of optimizing the object pose in the object frame, called Static BA in Table 5. Static BA ignores dynamic objects and treats them the same as static objects. The inability to handle dynamic objects causes decreases by about 2 AP compared with BA-Det. + +Temporal feature correspondence. As shown in Table 6, we ablate the features used for object-centric bundle adjustment. Compared with traditional ORB feature [35], widely used in SLAM, our feature learning module predicts denser and better correspondence. We find the average object tracklet length is 19.6 frames, and the average feature tracklet in our method is about 10 frames, which means we can keep a long feature dependency and better utilize long-range temporal information. However, the $\bar{L}_t$ of the ORB feature is only 2.6 frames. The results show the short keypoint tracklet can not refine the long-term object pose well. + +Inference latency of each step in BA-Det. The inference latency of each step in BA-Det is shown in Table 7. The most time-consuming part is the first-stage object detector, more than $130\mathrm{ms}$ per image, which is the same as the MonoFlex baseline. Our BA-Det only takes an additional $50\mathrm{ms}$ latency per image, compared with the single-frame detector MonoFlex. Besides, although the dense feature correspondence is calculated, thanks to the shared backbone with the first stage detector and parallel processing for the objects, the feature correspondence module is not very time-consuming. + +Table 6. Ablation study about different feature corresponding methods. ${\bar{L}}_{t}$ denotes the average keypoint tracklet length for each object. + +
Total latency181.5ms
First-stage detector132.6ms
Object tracking6.6ms
Feature correspondence23.0ms
Object bundle adjustment19.3ms
+ +Table 7. Inference latency of each step in BA-Det per image. + +# 6. Limitations and Future Work + +In the current version of this paper, we only focus on the objects, such as cars, trucks, and trailers. The performance of non-rigid objects such as pedestrians has not been investigated. However, with mesh-based and skeleton-based 3D human models, we believe that a unified keypoint temporal alignment module can be designed in the future. So, we will explore the extension of BA-Det for non-rigid objects. + +# 7. Conclusion + +In this paper, we propose a 3D video object detection paradigm with long-term temporal visual correspondence, called BA-Det. BA-Det is a two-stage object detector that can jointly learn object detection and temporal feature correspondence with proposed Featuremetric OBA loss. Object-centric bundle adjustment optimizes the first-stage object estimation globally in each frame. BA-Det achieves state-of-the-art performance on WOD. + +# Acknowledgements + +This work was supported in part by the Major Project for New Generation of AI (No.2018AAA0100400), the National Natural Science Foundation of China (No. 61836014, No. U21B2042, No. 62072457, No. 62006231) and the InnoHK program. The authors thank Lue Fan and Yuqi Wang for their valuable suggestions. + +# References + +[1] Sameer Agarwal, Keir Mierle, and The Ceres Solver Team. Ceres Solver. https://github.com/ceres-solver/ceres-solver, 2022.3 +[2] Garrick Brazil and Xiaoming Liu. M3d-rpn: Monocular 3d region proposal network for object detection. In ICCV, 2019. 6 +[3] Garrick Brazil, Gerard Pons-Moll, Xiaoming Liu, and Bernt Schiele. Kinematic 3d object detection in monocular video. In ECCV, 2020. 1, 2, 7 +[4] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In CVPR, 2020. 2 +[5] Xuesong Chen, Shaoshuai Shi, Benjamin Zhu, Ka Chun Cheung, Hang Xu, and Hongsheng Li. Mppnet: Multi-frame feature intertwining with proxy points for 3d temporal object detection. In ECCV, 2022. 2 +[6] Yilun Chen, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Dsgn: Deep stereo geometry network for 3d object detection. In CVPR, 2020. 1 +[7] Mihai Dusmanu, Johannes L Schonberger, and Marc Pollefeys. Multi-view optimization of local feature geometry. In ECCV, 2020. 5 +[8] Lue Fan, Ziqi Pang, Tianyuan Zhang, Yu-Xiong Wang, Hang Zhao, Feng Wang, Naiyan Wang, and Zhaoxiang Zhang. Embracing single stride 3d object detector with sparse transformer. In CVPR, 2022. 2 +[9] Lue Fan, Yuxue Yang, Feng Wang, Naiyan Wang, and Zhaoxiang Zhang. Super sparse 3d object detection. arXiv preprint arXiv:2301.02562, 2023. 2 +[10] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 5 +[11] Hugo Germain, Vincent Lepetit, and Guillaume Bourmaud. Neural reprojection error: Merging feature learning and camera pose estimation. In CVPR, 2021. 3 +[12] Xiaoyang Guo, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Liga-stereo: Learning lidar geometry aware representations for stereo-based 3d detector. In ICCV, 2021. 1 +[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In ICCV, 2017. 3 +[14] Junjie Huang and Guan Huang. Bevdet4d: Exploit temporal cues in multi-camera 3d object detection. arXiv preprint arXiv:2203.17054, 2022. 1 +[15] Jingwei Huang, Shan Huang, and Mingwei Sun. Deeplm: Large-scale nonlinear least squares on deep learning frameworks using stochastic domain decomposition. In CVPR, 2021. 6 +[16] Wei-Chih Hung, Henrik Kretzschmar, Vincent Casser, Jyh-Jing Hwang, and Dragomir Anguelov. Let-3d-ap: Longitudinal error tolerant 3d average precision for camera-only 3d detection. arXiv preprint arXiv:2206.07705, 2022. 6 +[17] Rudolph Emil Kalman. A new approach to linear filtering and prediction problems. 1960. 2 + +[18] Kai Kang, Hongsheng Li, Junjie Yan, Xingyu Zeng, Bin Yang, Tong Xiao, Cong Zhang, Zhe Wang, Ruohui Wang, Xiaogang Wang, et al. T-cnn: Tubelets with convolutional neural networks for object detection from videos. IEEE Transactions on Circuits and Systems for Video Technology, 28(10):2896-2907, 2017. 5 +[19] Abhinav Kumar, Garrick Brazil, Enrique Corona, Armin Parchami, and Xiaoming Liu. Deviant: Depth equivariant network for monocular 3d object detection. In ECCV, 2022. 6 +[20] Rainer Kummerle, Giorgio Grisetti, Hauke Strasdat, Kurt Konolige, and Wolfram Burgard. g2o: A general framework for graph optimization. In ICRA, 2011. 3 +[21] Peiliang Li, Tong Qin, et al. Stereo vision-based semantic 3d object and ego-motion tracking for autonomous driving. In ECCV, 2018. 1, 3, 4 +[22] Yingyan Li, Yuntao Chen, Jiawei He, and Zhaoxiang Zhang. Densely constrained depth estimator for monocular 3d object detection. In ECCV, 2022. 1, 6, 7 +[23] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In ECCV, 2022. 1, 2, 6, 7 +[24] Qing Lian, Peiliang Li, and Xiaozhi Chen. Monojsg: Joint semantic and geometric cost volume for monocular 3d object detection. In CVPR, 2022. 6 +[25] Philipp Lindenberger, Paul-Edouard Sarlin, Viktor Larsson, and Marc Pollefeys. Pixel-perfect structure-from-motion with featuremetric refinement. In ICCV, 2021. 4 +[26] Yingfei Liu, Junjie Yan, Fan Jia, Shuai Lin Li, Qi Gao, Tiancai Wang, Xiangyu Zhang, and Jian Sun. Petrv2: A unified framework for 3d perception from multi-camera images. arXiv preprint arXiv:2206.01256, 2022. 1 +[27] David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60(2):91-110, 2004. 2 +[28] Yan Lu, Xinzhu Ma, Lei Yang, Tianzhu Zhang, Yating Liu, Qi Chu, Junjie Yan, and Wanli Ouyang. Geometry uncertainty projection network for monocular 3d object detection. In ICCV, 2021. 6 +[29] Xinzhu Ma, Shinan Liu, Zhiyi Xia, Hongwen Zhang, Xingyu Zeng, and Wanli Ouyang. Rethinking pseudo-lidar representation. In ECCV, 2020. 6 +[30] Raul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. Orb-slam: a versatile and accurate monocular slam system. IEEE transactions on robotics, 31(5):1147-1163, 2015. 2 +[31] Lachlan Nicholson, Michael Milford, and Niko Sünderhauf. Quadricslam: Dual quadrics from object detections as landmarks in object-oriented slam. IEEE Robotics and Automation Letters, 4(1):1-8, 2018. 3 +[32] Liang Peng, Xiaopei Wu, Zheng Yang, Haifeng Liu, and Deng Cai. Did-m3d: Decoupling instance depth for monocular 3d object detection. In ECCV, 2022. 6 +[33] Charles R Qi, Yin Zhou, Mahyar Najibi, Pei Sun, Khoa Vo, Boyang Deng, and Dragomir Anguelov. Offboard 3d object detection from point cloud sequences. In CVPR, 2021. 2 + +[34] Cody Reading, Ali Harakeh, Julia Chae, and Steven L Waslander. Categorical depth distribution network for monocular 3d object detection. In CVPR, 2021. 6 +[35] Ethan Rublee, Vincent Rabaud, Kurt Konolige, and Gary Bradski. Orb: An efficient alternative to sift or surf. In ICCV, 2011. 2, 8 +[36] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In CVPR, 2020. 3 +[37] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2 +[38] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In ECCV, 2016. 2 +[39] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In CVPR, 2021. 3 +[40] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In CVPR, 2020. 5, 6, 7 +[41] Chengzhou Tang and Ping Tan. BA-net: Dense bundle adjustment networks. In ICLR, 2019. 3 +[42] Bill Triggs, Philip F McLauchlan, Richard I Hartley, and Andrew W Fitzgibbon. Bundle adjustment—a modern synthesis. In ICCV Workshops, 1999. 3 +[43] Li Wang, Li Zhang, Yi Zhu, Zhi Zhang, Tong He, Mu Li, and Xiangyang Xue. Progressive coordinate transforms for monocular 3d object detection. NeurIPS, 2021. 6 +[44] Qitai Wang, Yuntao Chen, Ziqi Pang, Naiyan Wang, and Zhaoxiang Zhang. Immortal tracker: Tracklet never dies. arXiv preprint arXiv:2111.13672, 2021. 3, 5, 7 +[45] Tai Wang, Qing Lian, Chenming Zhu, Xinge Zhu, and Wenwei Zhang. MV-FCOS3D++: Multi-View camera-only 4d object detection with pretrained monocular backbones. arXiv preprint arXiv:2207.12716, 2022. 6, 7 +[46] Tai Wang, Jiangmiao Pang, and Dahua Lin. Monocular 3d object detection with depth from motion. In ECCV, 2022. 1, 2 +[47] Zengran Wang, Chen Min, Zheng Ge, Yinhao Li, Zeming Li, Hongyu Yang, and Di Huang. Sts: Surround-view temporal stereo for multi-view 3d detection. arXiv preprint arXiv:2208.10145, 2022. 1 +[48] Shichao Yang and Sebastian Scherer. Cubeslam: Monocular 3-d object slam. IEEE Transactions on Robotics, 35(4):925-938, 2019. 1, 3, 4 +[49] Tianwei Yin, Xingyi Zhou, and Philipp Krahenbuhl. Center-based 3d object detection and tracking. In CVPR, 2021. 2 +[50] Yurong You, Katie Z Luo, Xiangyu Chen, Junan Chen, WeiLun Chao, Wen Sun, Bharath Hariharan, Mark Campbell, and Kilian Q Weinberger. Hindsight is 20/20: Leveraging past traversals to aid 3d perception. In ICLR, 2022. 2 +[51] Fisher Yu, Dequan Wang, Evan Shelhamer, and Trevor Darrell. Deep layer aggregation. In CVPR, 2018. 6 +[52] Yunpeng Zhang, Jiwen Lu, and Jie Zhou. Objects are different: Flexible monocular 3d object detection. In CVPR, 2021, 1, 3, 6, 7 \ No newline at end of file diff --git a/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/images.zip b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ae76a05f069de830e13408ae761c22a819628688 --- /dev/null +++ b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:628e6ec4c53a5cc5a54a36f9f5fdd3e9e6ebdbaf2a596bd1c8dc6cafd31b5dd7 +size 577776 diff --git a/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/layout.json b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..901fa8af1e6d4cd12d5b60a189b65872e054db41 --- /dev/null +++ b/2023/3D Video Object Detection With Learnable Object-Centric Global Optimization/layout.json @@ -0,0 +1,9169 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 52, + 103, + 541, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 103, + 541, + 121 + ], + "spans": [ + { + "bbox": [ + 52, + 103, + 541, + 121 + ], + "type": "text", + "content": "3D Video Object Detection with Learnable Object-Centric Global Optimization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "spans": [ + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "text", + "content": "Jiawei He" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "text", + "content": " Yuntao Chen" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "text", + "content": " Naiyan Wang" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "text", + "content": " Zhaoxiang Zhang" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "inline_equation", + "content": "^{1,2,3}" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "text", + "content": " CRIPAC, Institute of Automation, Chinese Academy of Sciences (CASIA) \n" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "text", + "content": " School of Artificial Intelligence, University of Chinese Academy of Sciences (UCA) \n" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "text", + "content": " Centre for Artificial Intelligence and Robotics, HKISI_CAS " + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "inline_equation", + "content": "{}^{4}" + }, + { + "bbox": [ + 83, + 142, + 492, + 215 + ], + "type": "text", + "content": " TuSimple {hejiawei2019, zhaoxiang.zhang}@ia.ac.cn {chenyuntao08, winsty}@gmail.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 241, + 191, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 241, + 191, + 253 + ], + "spans": [ + { + "bbox": [ + 143, + 241, + 191, + 253 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 266, + 290, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 266, + 290, + 494 + ], + "spans": [ + { + "bbox": [ + 46, + 266, + 290, + 494 + ], + "type": "text", + "content": "We explore long-term temporal visual correspondence-based optimization for 3D video object detection in this work. Visual correspondence refers to one-to-one mappings for pixels across multiple images. Correspondence-based optimization is the cornerstone for 3D scene reconstruction but is less studied in 3D video object detection, because moving objects violate multi-view geometry constraints and are treated as outliers during scene reconstruction. We address this issue by treating objects as first-class citizens during correspondence-based optimization. In this work, we propose BA-Det, an end-to-end estimable object detector with object-centric temporal correspondence learning and feature metric object bundle adjustment. Empirically, we verify the effectiveness and efficiency of BA-Det for multiple baseline 3D detectors under various setups. Our BA-Det achieves SOTA performance on the large-scale Waymo Open Dataset (WOD) with only marginal computation cost. Our code is available at https://github.com/jiaweihe1996/BA-Det." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 514, + 128, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 128, + 526 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 128, + 526 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 534, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 534, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 534, + 288, + 714 + ], + "type": "text", + "content": "3D object detection is an important perception task, especially for indoor robots and autonomous-driving vehicles. Recently, image-only 3D object detection [23,52] has been proven practical and made great progress. In real-world applications, cameras capture video streams instead of unrelated frames, which suggests abundant temporal information is readily available for 3D object detection. In single-frame methods, despite simply relying on the prediction power of deep learning, finding correspondences play an important role in estimating per-pixel depth and the object pose in the camera frame. Popular correspondences include Perspective-n-Point (PnP) between pre-defined 3D keypoints [22, 52] and their 2D projections in monocular 3D object detection, and Epipolar Geometry [6,12] in multiview 3D object detection. However, unlike the single-frame" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 242, + 545, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 242, + 545, + 266 + ], + "spans": [ + { + "bbox": [ + 305, + 242, + 545, + 266 + ], + "type": "text", + "content": "case, temporal visual correspondence has not been explored much in 3D video object detection." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 268, + 546, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 268, + 546, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 268, + 546, + 567 + ], + "type": "text", + "content": "As summarized in Fig. 1, existing methods in 3D video object detection can be divided into three categories while each has its own limitations. Fig. 1a shows methods with object tracking [3], especially using a 3D Kalman Filter to smooth the trajectory of each detected object. This approach is detector-agnostic and thus widely adopted, but it is just an output-level smoothing process without any feature learning. As a result, the potential of video is underexploited. Fig. 1b illustrates the temporal BEV (Bird's-Eye View) approaches [14, 23, 26] for 3D video object detection. They introduce the multi-frame temporal crossattention or concatenation for BEV features in an end-to-end fusion manner. As for utilizing temporal information, temporal BEV methods rely solely on feature fusion while ignoring explicit temporal correspondence. Fig. 1c depicts stereo-from-video methods [46, 47]. These methods explicitly construct a pseudo-stereo view using ego-motion and then utilize the correspondence on the epipolar line of two frames for depth estimation. However, the use of explicit correspondence in these methods is restricted to only two frames, thereby limiting its potential to utilize more temporal information. Moreover, another inevitable defect of these methods is that moving objects break the epipolar constraints, which cannot be well handled, so monocular depth estimation has to be reused." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "type": "text", + "content": "Considering the aforementioned shortcomings, we seek a new method that can handle both static and moving objects, and utilize long-term temporal correspondences. Firstly, in order to handle both static and moving objects, we draw experience from the object-centric global optimization with reprojection constraints in Simultaneous Localization and Mapping (SLAM) [21, 48]. Instead of directly estimating the depth for each pixel from temporal cues, we utilize them to construct useful temporal constraints to refine the object pose prediction from network prediction. Specifically, we construct a non-linear least-square optimization problem with the temporal correspondence constraint in an" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5106" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 84, + 81, + 507, + 103 + ], + "blocks": [ + { + "bbox": [ + 84, + 81, + 507, + 103 + ], + "lines": [ + { + "bbox": [ + 84, + 81, + 507, + 103 + ], + "spans": [ + { + "bbox": [ + 84, + 81, + 507, + 103 + ], + "type": "image", + "image_path": "2cc27a219910578f4d25292cc16f5ef97904971da87ec69b8f310e5d18f12cb8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 53, + 111, + 154, + 182 + ], + "blocks": [ + { + "bbox": [ + 53, + 111, + 154, + 182 + ], + "lines": [ + { + "bbox": [ + 53, + 111, + 154, + 182 + ], + "spans": [ + { + "bbox": [ + 53, + 111, + 154, + 182 + ], + "type": "image", + "image_path": "6e822a8cc35d7e2583b7483aa9711927eac18464f585bd700a1f2f3e948b68bc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 188, + 140, + 198 + ], + "lines": [ + { + "bbox": [ + 67, + 188, + 140, + 198 + ], + "spans": [ + { + "bbox": [ + 67, + 188, + 140, + 198 + ], + "type": "text", + "content": "(a) Temporal Filtering" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 178, + 114, + 287, + 182 + ], + "blocks": [ + { + "bbox": [ + 178, + 114, + 287, + 182 + ], + "lines": [ + { + "bbox": [ + 178, + 114, + 287, + 182 + ], + "spans": [ + { + "bbox": [ + 178, + 114, + 287, + 182 + ], + "type": "image", + "image_path": "517fea5a971ba8f9e67c909eae73f58a281b03e329341deb4e0025f889f8ea8b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 201, + 188, + 263, + 198 + ], + "lines": [ + { + "bbox": [ + 201, + 188, + 263, + 198 + ], + "spans": [ + { + "bbox": [ + 201, + 188, + 263, + 198 + ], + "type": "text", + "content": "(b) Temporal BEV" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 116, + 414, + 182 + ], + "blocks": [ + { + "bbox": [ + 307, + 116, + 414, + 182 + ], + "lines": [ + { + "bbox": [ + 307, + 116, + 414, + 182 + ], + "spans": [ + { + "bbox": [ + 307, + 116, + 414, + 182 + ], + "type": "image", + "image_path": "874fca725128ad06d0955a7d09274b22692e3fd314fec64283fa9efd18bee26f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 188, + 397, + 198 + ], + "lines": [ + { + "bbox": [ + 325, + 188, + 397, + 198 + ], + "spans": [ + { + "bbox": [ + 325, + 188, + 397, + 198 + ], + "type": "text", + "content": "(c) Stereo from Video" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 93, + 208, + 499, + 220 + ], + "lines": [ + { + "bbox": [ + 93, + 208, + 499, + 220 + ], + "spans": [ + { + "bbox": [ + 93, + 208, + 499, + 220 + ], + "type": "text", + "content": "Figure 1. Illustration of how to leverage temporal information in different 3D video object detection paradigms." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 435, + 124, + 545, + 182 + ], + "blocks": [ + { + "bbox": [ + 435, + 124, + 545, + 182 + ], + "lines": [ + { + "bbox": [ + 435, + 124, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 435, + 124, + 545, + 182 + ], + "type": "image", + "image_path": "e0b64a5a75ddc04a0f3a61c7ac1830cd84242b58f0c8e354e7d9d55ed48364d1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 459, + 188, + 520, + 198 + ], + "lines": [ + { + "bbox": [ + 459, + 188, + 520, + 198 + ], + "spans": [ + { + "bbox": [ + 459, + 188, + 520, + 198 + ], + "type": "text", + "content": "(d) BA-Det (Ours)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 240, + 287, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 240, + 287, + 347 + ], + "spans": [ + { + "bbox": [ + 46, + 240, + 287, + 347 + ], + "type": "text", + "content": "object-centric manner to optimize the pose of objects no matter whether they are moving or not. Secondly, for long-term temporal correspondence learning, hand-crafted descriptors like SIFT [27] or ORB [35] are no longer suitable for our end-to-end object detector. Besides, the long-term temporal correspondence needs to be robust to viewpoint changes and severe occlusions, where these traditional sparse descriptors are incompetent. So, we expect to learn a dense temporal correspondence for all available frames." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 348, + 288, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 348, + 288, + 658 + ], + "spans": [ + { + "bbox": [ + 46, + 348, + 288, + 658 + ], + "type": "text", + "content": "In this paper, as shown in Fig. 1d, we propose a 3D video object detection paradigm with learnable long-term temporal visual correspondence, called BA-Det. Specifically, the detector has two stages. In the first stage, a CenterNet-style monocular 3D object detector is applied for single-frame object detection. After associating the same objects in the video, the second stage detector extracts RoI features for the objects in the tracklet and matches dense local features on the object among multi-frames, called the object-centric temporal correspondence learning (OTCL) module. To make traditional object bundle adjustment (OBA) learnable, we formulate feature metric OBA. In the training time, with feature metric OBA loss, the object detection and temporal feature correspondence are learned jointly. During inference, we use the 3D object estimation from the first stage as the initial pose and associate the objects with 3D Kalman Filter. The object-centric bundle adjustment refines the pose and 3D box size of the object in each frame at the tracklet level, taking the initial object pose and temporal feature correspondence from OTCL as the input. Experiment results on the large-scale Waymo Open Dataset (WOD) show that our BA-Det could achieve state-of-the-art performance compared with other single-frame and multi-frame object detectors. We also conduct extensive ablation studies to demonstrate the effectiveness and efficiency of each component in our method." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 58, + 659, + 280, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 659, + 280, + 670 + ], + "spans": [ + { + "bbox": [ + 58, + 659, + 280, + 670 + ], + "type": "text", + "content": "In summary, our work has the following contributions:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 677, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 288, + 714 + ], + "type": "text", + "content": "- We present a novel object-centric 3D video object detection approach " + }, + { + "bbox": [ + 47, + 677, + 288, + 714 + ], + "type": "inline_equation", + "content": "BA-Det" + }, + { + "bbox": [ + 47, + 677, + 288, + 714 + ], + "type": "text", + "content": " by learning object detection and temporal correspondence jointly." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 240, + 545, + 319 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 306, + 240, + 545, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 240, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 306, + 240, + 545, + 276 + ], + "type": "text", + "content": "- We design the second-stage object-centric temporal correspondence learning module and the featuremetric object bundle adjustment loss." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 282, + 545, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 282, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 306, + 282, + 545, + 319 + ], + "type": "text", + "content": "- We achieve state-of-the-art performance on the largescale WOD. The ablation study and comparisons show the effectiveness and efficiency of our BA-Det." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 328, + 392, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 328, + 392, + 340 + ], + "spans": [ + { + "bbox": [ + 306, + 328, + 392, + 340 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 347, + 454, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 347, + 454, + 361 + ], + "spans": [ + { + "bbox": [ + 306, + 347, + 454, + 361 + ], + "type": "text", + "content": "2.1. 3D Video Object Detection" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 366, + 545, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 545, + 594 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 545, + 594 + ], + "type": "text", + "content": "For 3D video object detection, LiDAR-based methods [4, 8, 49] usually align point clouds from consecutive frames by compensating ego-motion and simply accumulate them to alleviate the sparsity of point clouds. Object-level methods [5, 9, 33, 50], handling the multi-frame point clouds of the tracked object, become a new trend. 3D object detection from the monocular video has not received enough attention from researchers. Kinematic3D [3] is a pioneer work decomposing kinematic information into ego-motion and target object motion. However, they only apply 3D Kalman Filter [17] based motion model for kinematic modeling and only consider the short-term temporal association (4 frames). Recently, BEVFormer [23] proposes an attentional transformer method to model the spatial and temporal relationship in the bird's-eye-view (BEV). A concurrent work, DfM [46], inspired by Multi-view Geometry, considers two frames as stereo and applies the cost volume in stereo to estimate depth. However, how to solve the moving objects is not well handled in this paradigm." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 599, + 422, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 599, + 422, + 612 + ], + "spans": [ + { + "bbox": [ + 306, + 599, + 422, + 612 + ], + "type": "text", + "content": "2.2. Geometry in Videos" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 714 + ], + "type": "text", + "content": "Many researchers utilize 3D geometry in videos to reconstruct the scene and estimate the camera pose, which is a classic topic of computer vision. Structure from Motion (SfM) [37] and Multi-view Stereo (MVS) [38] are two paradigms to estimate the sparse and dense depth from multi-view images respectively. In robotics, 3D geometry theory is applied for Simultaneous Localization and Mapping (SLAM) [30]. To globally optimize the 3D position of" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5107" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 108 + ], + "type": "text", + "content": "the feature points and the camera pose at each time, bundle adjustment algorithm [42] is widely applied. However, most of them can only handle static regions in the scene." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 109, + 288, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 109, + 288, + 286 + ], + "spans": [ + { + "bbox": [ + 46, + 109, + 288, + 286 + ], + "type": "text", + "content": "In the deep learning era, with the development of object detection, object-level semantic SLAM [21, 31, 48] is rising, aiming to reconstruct the objects instead of the whole scene. These methods can handle dynamic scenes and help the object localization in the video. Besides, feature correspondence learning [36, 39] has received extensive attention in recent years. Deep learning has greatly changed the pipeline of feature matching. Differentiable bundle adjustment, like BANet [41] and NRE [11], makes the whole 3D geometry system end-to-end learnable. Unlike these works, we focus on the representation of the 3D object and integrate feature correspondence learning into 3D object detection. Utilizing the learned temporal feature correspondence, the proposed BA-Det optimizes the object pose of a tracklet in each frame." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 297, + 232, + 310 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 232, + 310 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 232, + 310 + ], + "type": "text", + "content": "3. Preliminary: Bundle Adjustment" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 316, + 287, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 316, + 287, + 411 + ], + "spans": [ + { + "bbox": [ + 46, + 316, + 287, + 411 + ], + "type": "text", + "content": "Bundle Adjustment [42] is a widely used globally temporal optimization technology in 3D reconstruction, which means optimally adjusting bundles of light rays from a given 3D global position to the camera center among multiframes. Specifically, we use " + }, + { + "bbox": [ + 46, + 316, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_i = [x_i,y_i,z_i]^\\top" + }, + { + "bbox": [ + 46, + 316, + 287, + 411 + ], + "type": "text", + "content": " to denote the " + }, + { + "bbox": [ + 46, + 316, + 287, + 411 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 316, + 287, + 411 + ], + "type": "text", + "content": "-th 3D point coordinates in the global reference frame. According to the perspective camera model, the image coordinates of the projected 3D point at time " + }, + { + "bbox": [ + 46, + 316, + 287, + 411 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 316, + 287, + 411 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 86, + 415, + 287, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 415, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 86, + 415, + 287, + 441 + ], + "type": "interline_equation", + "content": "\\Pi \\left(\\mathbf {T} _ {c g} ^ {t}, \\mathbf {P} _ {i}, \\mathbf {K}\\right) = \\frac {1}{z _ {i} ^ {t}} \\mathbf {K} \\left(\\mathbf {R} _ {c g} ^ {t} \\mathbf {P} _ {i} + \\mathbf {t} _ {c g} ^ {t}\\right), \\tag {1}", + "image_path": "e1f1681c0ac0355f5e32a5632c4e747a63ac4563e0646d99d2ddf9fd2f2f86bc.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "\\Pi" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": " is the perspective projection transformation, " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "\\mathbf{T}_{cg}^{t} = [\\mathbf{R}_{cg}^{t}|\\mathbf{t}_{cg}^{t}]" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": " is the camera extrinsic matrix at time " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_{cg}^{t}" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_{cg}^{t}" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": " are the rotation and the translation components of " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "\\mathbf{T}_{cg}^{t}" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": ", respectively. " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": " is the camera intrinsic matrix, and " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "z_{i}^{t}" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": " is the depth of the " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": "-th 3D point in the camera frame at time " + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 445, + 287, + 514 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 516, + 287, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 516, + 287, + 540 + ], + "spans": [ + { + "bbox": [ + 47, + 516, + 287, + 540 + ], + "type": "text", + "content": "Bundle adjustment is a nonlinear least-square problem to minimize the reprojection error as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 544, + 287, + 594 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 287, + 594 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\{\\bar {\\mathbf {T}} _ {c g} ^ {t} \\} _ {t = 1} ^ {T}, \\{\\bar {\\mathbf {P}} _ {i} \\} _ {i = 1} ^ {m} = \\\\ \\underset {\\{\\mathbf {T} _ {c g} ^ {t} \\} _ {t = 1} ^ {T}, \\{\\mathbf {P} _ {i} \\} _ {i = 1} ^ {m}} {\\arg \\min } \\frac {1}{2} \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} | | \\mathbf {p} _ {i} ^ {t} - \\Pi (\\mathbf {T} _ {c g} ^ {t}, \\mathbf {P} _ {i}, \\mathbf {K}) | | ^ {2}, \\tag {2} \\\\ \\end{array}", + "image_path": "54f8599ba415f87b74043f4dc7050bab6c6fcbe9682128e93791d24c43e16fdc.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 598, + 287, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 598, + 287, + 647 + ], + "spans": [ + { + "bbox": [ + 46, + 598, + 287, + 647 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_i^t" + }, + { + "bbox": [ + 46, + 598, + 287, + 647 + ], + "type": "text", + "content": " is the observed image coordinates of 3D point " + }, + { + "bbox": [ + 46, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "\\mathbf{P}_i" + }, + { + "bbox": [ + 46, + 598, + 287, + 647 + ], + "type": "text", + "content": " on frame " + }, + { + "bbox": [ + 46, + 598, + 287, + 647 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 598, + 287, + 647 + ], + "type": "text", + "content": ". Bundle adjustment can be solved by Gauss-Newton or Levenberg-Marquardt algorithm effectively [1, 20]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 655, + 287, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 655, + 287, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 655, + 287, + 681 + ], + "type": "text", + "content": "4. BA-Det: Object-centric Global Optimizable Detector" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 713 + ], + "type": "text", + "content": "In this section, we introduce the framework of our BA-Det (Fig. 2), a learnable object-centric global optimization" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": "network. The pipeline consists of three parts: (1) First-stage single frame 3D object detection; (2) Second-stage object-centric temporal correspondence learning (OTCL) module; (3) Featuremetric object bundle adjustment loss for temporal feature correspondence learning." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 138, + 487, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 138, + 487, + 152 + ], + "spans": [ + { + "bbox": [ + 305, + 138, + 487, + 152 + ], + "type": "text", + "content": "4.1. Single-frame 3D Object Detection" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": "Given a video clip with consecutive frames " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "\\mathcal{V} = \\{I_1, I_2, \\dots, I_T\\}" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": ", 3D video object detection is to predict the class and the 3D bounding box of each object in each frame. Let " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "\\mathcal{O}_k^t" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": " be the " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": "-th object in frame " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": ". For the 3D bounding box " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_k^t" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": ", we estimate the size of the bounding box " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_t^k = [w, h, l]^\\top" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": " and the object pose " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "{}^k\\mathbf{T}_{co}^t" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": " in the camera frame, including translation " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "{}^k\\mathbf{t}_{co}^t = [x_c, y_c, z_c]^\\top" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": " and rotation " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "{}^k\\mathbf{r}_{co}^t = [r_x, r_y, r_z]^\\top" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": ". In most 3D object detection datasets, with the flat ground assumption, only yaw rotation " + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "inline_equation", + "content": "r_y" + }, + { + "bbox": [ + 304, + 157, + 545, + 276 + ], + "type": "text", + "content": " is considered." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 277, + 545, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 277, + 545, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 277, + 545, + 479 + ], + "type": "text", + "content": "We basically adopt MonoFlex [52] as our first-stage 3D object detector, which is a simple and widely-used baseline method. Different from the standard MonoFlex, we make some modifications for simplicity and adaptation. (1) Instead of ensemble the depth from keypoints and regression, we only used the regressed depth directly. (2) The edge fusion module in MonoFlex is removed for simplicity and better performance. The output of the first-stage object detector should be kept for the second stage. The predicted 2D bounding box " + }, + { + "bbox": [ + 304, + 277, + 545, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{b}_k^t" + }, + { + "bbox": [ + 304, + 277, + 545, + 479 + ], + "type": "text", + "content": " for each object is used for the object-centric feature extraction in the second stage. The 3D estimations should be the initial pose estimation and be associated between frames. We follow ImmortalTracker [44] to associate the 3D box prediction outputs with a 3D Kalman Filter frame by frame. For convenience and clarity, we use the same index " + }, + { + "bbox": [ + 304, + 277, + 545, + 479 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 277, + 545, + 479 + ], + "type": "text", + "content": " to denote the objects belonging to the same tracklet in the video from now on." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 487, + 545, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 487, + 545, + 512 + ], + "spans": [ + { + "bbox": [ + 305, + 487, + 545, + 512 + ], + "type": "text", + "content": "4.2. Object-Centric Temporal Correspondence Learning" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "spans": [ + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "text", + "content": "Based on the predictions from the first-stage detector, we propose an object-centric temporal correspondence learning (OTCL) module, which plays an indispensable role in the learnable optimization. Specifically, the OTCL module is designed to learn the correspondence of the dense features for the same object among all available frames. Given a video " + }, + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "inline_equation", + "content": "\\{I_1,I_2,\\dots ,I_T\\}" + }, + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "text", + "content": " and image features " + }, + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{F}^1,\\mathbf{F}^2,\\dots ,\\mathbf{F}^T\\}" + }, + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "text", + "content": " from the backbone in the first stage, we extract the RoI features " + }, + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "inline_equation", + "content": "^k\\mathbf{F}^t\\in \\mathbb{R}^{H\\times W\\times C}" + }, + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "text", + "content": " of the object " + }, + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "inline_equation", + "content": "\\mathcal{O}_k^t" + }, + { + "bbox": [ + 304, + 517, + 545, + 637 + ], + "type": "text", + "content": " by the RoIAign operation [13]," + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 369, + 643, + 545, + 658 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 643, + 545, + 658 + ], + "spans": [ + { + "bbox": [ + 369, + 643, + 545, + 658 + ], + "type": "interline_equation", + "content": "^ k \\mathbf {F} ^ {t} = \\operatorname {R o I A l i g n} \\left(\\mathbf {F} ^ {t}, \\mathbf {b} _ {k} ^ {t}\\right). \\tag {3}", + "image_path": "3168501d210604cf766fb8c25c75c47881e7da72a2da15c123b02ced70971d0e.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "We apply " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": " layers of cross- and self-attention operations before calculating the correspondence map to aggregate and enhance the spatial and temporal information for RoI features. Note that the object tracklet is available with the" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5108" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 89, + 72, + 509, + 274 + ], + "blocks": [ + { + "bbox": [ + 89, + 72, + 509, + 274 + ], + "lines": [ + { + "bbox": [ + 89, + 72, + 509, + 274 + ], + "spans": [ + { + "bbox": [ + 89, + 72, + 509, + 274 + ], + "type": "image", + "image_path": "74cd65fd2b29cc070de67106dd04cc4555be34afa559af4aac47122d124093c3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 281, + 547, + 316 + ], + "lines": [ + { + "bbox": [ + 46, + 281, + 547, + 316 + ], + "spans": [ + { + "bbox": [ + 46, + 281, + 547, + 316 + ], + "type": "text", + "content": "Figure 2. A overview of the proposed BA-Det framework. The left part of the framework is the first-stage object detector to predict the 3D object and its 2D bounding box. The second stage is called OTCL module. In the OTCL module, we extract the RoI features " + }, + { + "bbox": [ + 46, + 281, + 547, + 316 + ], + "type": "inline_equation", + "content": "^k\\mathbf{F}^t" + }, + { + "bbox": [ + 46, + 281, + 547, + 316 + ], + "type": "text", + "content": " by RoIAlign, aggregate the RoI features and learn object-centric temporal correspondence using feature metric object bundle adjustment loss." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 335, + 288, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 335, + 288, + 382 + ], + "spans": [ + { + "bbox": [ + 46, + 335, + 288, + 382 + ], + "type": "text", + "content": "aforementioned tracker, so the cross-attention is applied between the objects in different frames for the same tracklet. For each layer of attention operations between two adjacent frames " + }, + { + "bbox": [ + 46, + 335, + 288, + 382 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 335, + 288, + 382 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 335, + 288, + 382 + ], + "type": "inline_equation", + "content": "t'" + }, + { + "bbox": [ + 46, + 335, + 288, + 382 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 388, + 287, + 442 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 388, + 287, + 442 + ], + "spans": [ + { + "bbox": [ + 56, + 388, + 287, + 442 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l} ^ {k} \\widetilde {\\mathbf {F}} ^ {t} = \\operatorname {A t t} _ {\\mathbf {S}} (Q, K, V) = \\operatorname {A t t} _ {\\mathbf {S}} \\left(^ {k} \\hat {\\mathbf {F}} ^ {t}, ^ {k} \\hat {\\mathbf {F}} ^ {t}, ^ {k} \\hat {\\mathbf {F}} ^ {t}\\right), \\\\ ^ {k} \\widetilde {\\mathbf {F}} ^ {t ^ {\\prime}} = \\operatorname {A t t} _ {\\mathbf {S}} (Q, K, V) = \\operatorname {A t t} _ {\\mathbf {S}} \\left(^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}}, ^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}}, ^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}}\\right), \\\\ ^ {k} \\hat {\\mathbf {F}} ^ {t ^ {\\prime}} = \\operatorname {A t t} _ {\\mathbf {T}} (Q, K, V) = \\operatorname {A t t} _ {\\mathbf {T}} \\left(^ {k} \\widetilde {\\mathbf {F}} ^ {t ^ {\\prime}}, ^ {k} \\widetilde {\\mathbf {F}} ^ {t}, ^ {k} \\widetilde {\\mathbf {F}} ^ {t}\\right), \\end{array} \\right. \\tag {4}", + "image_path": "278ec47bac036530882d92ce0673d5896703b9f0049a409e5980c50755a8ea44.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 442, + 287, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 442, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 46, + 442, + 287, + 479 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 442, + 287, + 479 + ], + "type": "inline_equation", + "content": "{}^k\\hat{\\mathbf{F}}^t\\in \\mathbb{R}^{HW\\times C}" + }, + { + "bbox": [ + 46, + 442, + 287, + 479 + ], + "type": "text", + "content": " is the flattened RoI feature, AttS is the spatial self-attention, AttT is the temporal crossattention." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "text", + "content": "We then define the spatial correspondence map between two flattened RoI features after the attention operations. In frame pair " + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "inline_equation", + "content": "(t, t')" + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "text", + "content": ", we use " + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "inline_equation", + "content": "^k\\mathbf{f}_i" + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "text", + "content": " to denote " + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "text", + "content": "-th local feature in " + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "inline_equation", + "content": "^k\\hat{\\mathbf{F}}^{(L)}" + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "inline_equation", + "content": "i \\in \\{1, 2, \\dots, HW\\}" + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "text", + "content": "). The correspondence map " + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "inline_equation", + "content": "^k\\mathbf{C}_t^{t'} \\in \\mathbb{R}^{HW \\times HW}" + }, + { + "bbox": [ + 46, + 479, + 287, + 552 + ], + "type": "text", + "content": " in two frames is defined as the inner product of two features in two frames:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 118, + 559, + 287, + 575 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 559, + 287, + 575 + ], + "spans": [ + { + "bbox": [ + 118, + 559, + 287, + 575 + ], + "type": "interline_equation", + "content": "^ {k} \\mathbf {C} _ {t} ^ {t ^ {\\prime}} [ i, i ^ {\\prime} ] = ^ {k} \\mathbf {f} _ {i} ^ {t} * ^ {k} \\mathbf {f} _ {i ^ {\\prime}} ^ {t ^ {\\prime}}. \\tag {5}", + "image_path": "4dd8b8a26a6c7b0c2a62296e46ee17304b0718f884fece7504d35f1fc30e172d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 582, + 287, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 606 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 606 + ], + "type": "text", + "content": "To normalize the correspondence map, we perform softmax over all spatial locations " + }, + { + "bbox": [ + 46, + 582, + 287, + 606 + ], + "type": "inline_equation", + "content": "i'" + }, + { + "bbox": [ + 46, + 582, + 287, + 606 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 96, + 613, + 287, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 613, + 287, + 628 + ], + "spans": [ + { + "bbox": [ + 96, + 613, + 287, + 628 + ], + "type": "interline_equation", + "content": "^ {k} \\widetilde {\\mathbf {C}} _ {t} ^ {t ^ {\\prime}} [ i, i ^ {\\prime} ] = \\operatorname {s o f t m a x} \\left(^ {k} \\mathbf {C} _ {t} ^ {t ^ {\\prime}} [ i, i ^ {\\prime} ]\\right). \\tag {6}", + "image_path": "84eb531c2c3f950f3634a34a2d2bfa1065fa3e87cbdcde146bcb885412db0783.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 635, + 287, + 648 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 635, + 287, + 648 + ], + "spans": [ + { + "bbox": [ + 46, + 635, + 287, + 648 + ], + "type": "text", + "content": "4.3. Featuremetric Object Bundle Adjustment Loss" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 288, + 714 + ], + "type": "text", + "content": "In this subsection, we present that how to adapt and integrate the Object-centric Bundle Adjustment (OBA) into our learnable BA-Det framework, based on the obtained correspondence map. Generally speaking, we formulate the featuremetric OBA loss to supervise the temporal feature" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 335, + 545, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 335, + 545, + 382 + ], + "spans": [ + { + "bbox": [ + 304, + 335, + 545, + 382 + ], + "type": "text", + "content": "correspondence learning. Note that here we only derive the tracklet-level OBA loss for the same object, and for the final supervision we will sum all the tracklet-level loss in the video." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "spans": [ + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "text", + "content": "First, we revisit the object-centric bundle adjustment, as shown in Fig. 3a. As proposed in Object SLAM [21, 48], OBA assumes that the object can only have rigid motion relative to the camera. For the object " + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "inline_equation", + "content": "\\mathcal{O}_k" + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "text", + "content": ", we denote the 3D points as " + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_k = \\{^k\\mathbf{P}_i\\}_{i=1}^m" + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "text", + "content": " in the object frame, 2D points as " + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "inline_equation", + "content": "\\{^k\\mathbf{p}_i^t\\}_{i=1}^m" + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "text", + "content": ", 2D features at position " + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "inline_equation", + "content": "^k\\mathbf{p}_i^t" + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{f}[^k\\mathbf{p}_i^t]\\}_{i=1}^m" + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "text", + "content": ", and the camera pose in the object reference frame as " + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_k = \\{^k\\mathbf{T}_{co}^t\\}_{t=1}^T" + }, + { + "bbox": [ + 304, + 387, + 546, + 484 + ], + "type": "text", + "content": ", OBA can be casted as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 498, + 545, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 498, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 309, + 498, + 545, + 544 + ], + "type": "interline_equation", + "content": "\\bar {\\mathcal {T}} _ {k}, \\bar {\\mathcal {P}} _ {k} = \\underset {\\mathcal {T} _ {k}, \\mathcal {P} _ {k}} {\\arg \\min } \\frac {1}{2} \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} | | ^ {k} \\mathbf {p} _ {i} ^ {t} - \\Pi \\left(^ {k} \\mathbf {T} _ {c o} ^ {t}, ^ {k} \\mathbf {P} _ {i}, \\mathbf {K}\\right) | | _ {2} ^ {2}. \\tag {7}", + "image_path": "a6618574e59f73dd298a03a120a45910c2aa3217d2d8394f8b33b695b28e746d.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 560, + 545, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 560, + 545, + 585 + ], + "spans": [ + { + "bbox": [ + 304, + 560, + 545, + 585 + ], + "type": "text", + "content": "To make the OBA layer end-to-end learnable, we formulate featuremetric [25] OBA:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 599, + 545, + 660 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 599, + 545, + 660 + ], + "spans": [ + { + "bbox": [ + 309, + 599, + 545, + 660 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\bar {\\mathcal {T}} _ {k}, \\bar {\\mathcal {P}} _ {k} = \\\\ \\underset {\\mathcal {T} _ {k}, \\mathcal {P} _ {k}} {\\arg \\min } \\frac {1}{2} \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\sum_ {t ^ {\\prime} = 1} ^ {T} \\left| \\left| \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t} \\right] - \\mathbf {f} \\left[ \\Pi \\left(^ {k} \\mathbf {T} _ {c o} ^ {t ^ {\\prime}}, ^ {k} \\mathbf {P} _ {i}, \\mathbf {K}\\right) \\right] \\right| \\right| _ {2} ^ {2}, \\tag {8} \\\\ \\end{array}", + "image_path": "8494147e61482a8f0c489379ac006185d6d17ce1890d1da31b46950bf31ad339.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{f}[\\mathbf{p}]" + }, + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "text", + "content": " denotes the feature vector in pixel coordinates " + }, + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "text", + "content": ". Representing the 3D point " + }, + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "inline_equation", + "content": "{}^k\\mathbf{P}_i" + }, + { + "bbox": [ + 304, + 677, + 546, + 714 + ], + "type": "text", + "content": " in Eq. 8 with 2D points in each frame, the feature metric reprojection error of frame" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5109" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 74, + 248, + 178 + ], + "blocks": [ + { + "bbox": [ + 78, + 74, + 248, + 178 + ], + "lines": [ + { + "bbox": [ + 78, + 74, + 248, + 178 + ], + "spans": [ + { + "bbox": [ + 78, + 74, + 248, + 178 + ], + "type": "image", + "image_path": "ef98e7ea62dc61502455000a265fa8cd48e2e184d3cdc7ef4f53dede49c8590c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 92, + 185, + 242, + 195 + ], + "lines": [ + { + "bbox": [ + 92, + 185, + 242, + 195 + ], + "spans": [ + { + "bbox": [ + 92, + 185, + 242, + 195 + ], + "type": "text", + "content": "(a) Object-centric Bundle Adjustment (OBA)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 73, + 200, + 258, + 302 + ], + "blocks": [ + { + "bbox": [ + 73, + 200, + 258, + 302 + ], + "lines": [ + { + "bbox": [ + 73, + 200, + 258, + 302 + ], + "spans": [ + { + "bbox": [ + 73, + 200, + 258, + 302 + ], + "type": "image", + "image_path": "78b3ef0e982b6c9bdb7883b4e810b53b5e392e729a3254cf5b50a4a6844eb517.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 83, + 309, + 251, + 319 + ], + "lines": [ + { + "bbox": [ + 83, + 309, + 251, + 319 + ], + "spans": [ + { + "bbox": [ + 83, + 309, + 251, + 319 + ], + "type": "text", + "content": "(b) The computation of the featuremetric OBA loss." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 50, + 329, + 282, + 340 + ], + "lines": [ + { + "bbox": [ + 50, + 329, + 282, + 340 + ], + "spans": [ + { + "bbox": [ + 50, + 329, + 282, + 340 + ], + "type": "text", + "content": "Figure 3. Illustration of featuremetric object bundle adjustment." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 349, + 133, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 349, + 133, + 359 + ], + "spans": [ + { + "bbox": [ + 47, + 349, + 133, + 359 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 349, + 133, + 359 + ], + "type": "text", + "content": " could be derived as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 363, + 290, + 442 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 363, + 290, + 442 + ], + "spans": [ + { + "bbox": [ + 47, + 363, + 290, + 442 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} ^ {k} e _ {i} ^ {t} = \\sum_ {t ^ {\\prime} = 1} ^ {T} \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t} \\right] - \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t ^ {\\prime}} \\right] (9) \\\\ = \\sum_ {t ^ {\\prime} = 1} ^ {T} \\mathbf {f} \\left[ ^ {k} \\mathbf {p} _ {i} ^ {t} \\right] - \\mathbf {f} \\left[ \\Pi \\left(^ {k} \\mathbf {T} _ {c o} ^ {t ^ {\\prime}}, \\Pi^ {- 1} \\left(^ {k} \\mathbf {T} _ {c o} ^ {t}, ^ {k} \\mathbf {p} _ {i} ^ {t}, \\mathbf {K}, z _ {i} ^ {t}\\right), \\mathbf {K}\\right) \\right], (10) \\\\ \\end{array}", + "image_path": "d3ea3b3435ea8c166fb16c1be31a30ef3205de45b30a9446e2d955877b817ca2.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "inline_equation", + "content": "\\Pi^{-1}(\\cdot)" + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": " is the inverse projection function to lift the 2D point on the image to 3D in the object frame. " + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "inline_equation", + "content": "z_{i}^{t}" + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": " is the ground-truth depth of " + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "inline_equation", + "content": "{}^{k}\\mathbf{p}_{i}^{t}" + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": " (from LiDAR point clouds only for training). In the training time, we learn the feature correspondence, given the ground-truth pose of the object " + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "inline_equation", + "content": "\\mathcal{O}_k" + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "inline_equation", + "content": "{}^{k}\\mathbf{T}_{co}^{t}" + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "inline_equation", + "content": "{}^{k}\\mathbf{T}_{co}^{t'}" + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": " in frame " + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": " and frame " + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "inline_equation", + "content": "t'" + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": ", respectively. Considering the feature metric reprojection loss in all frames and all points, the overall loss term for object " + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 449, + 287, + 556 + ], + "type": "text", + "content": " can be formulated as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 559, + 287, + 593 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 559, + 287, + 593 + ], + "spans": [ + { + "bbox": [ + 52, + 559, + 287, + 593 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {r e p}} ^ {k} = \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\left| \\left| ^ {k} e _ {i} ^ {t} \\right| \\right| _ {2} ^ {2} = \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\sum_ {t ^ {\\prime} = 1} ^ {T} \\left| \\left| ^ {k} \\mathbf {f} _ {i} ^ {t} - ^ {k} \\mathbf {f} _ {i} ^ {t ^ {\\prime}} \\right| \\right| _ {2} ^ {2} \\tag {11}", + "image_path": "f9d77c3dc77a93a848355874eb179f795f581d6cb78937aafdcec034a05e6f4b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 601, + 287, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 601, + 287, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 601, + 287, + 674 + ], + "type": "text", + "content": "Finally, we replace the " + }, + { + "bbox": [ + 46, + 601, + 287, + 674 + ], + "type": "inline_equation", + "content": "L2" + }, + { + "bbox": [ + 46, + 601, + 287, + 674 + ], + "type": "text", + "content": " norm in Eq. 11 with the cosine distance to measure the featuremetric reprojection error. Thus we bring the normalized correspondence map " + }, + { + "bbox": [ + 46, + 601, + 287, + 674 + ], + "type": "inline_equation", + "content": "\\widetilde{\\mathbf{C}}" + }, + { + "bbox": [ + 46, + 601, + 287, + 674 + ], + "type": "text", + "content": " in Sec. 4.2 into the loss term. With log-likelihood formulation, we formulate the featuremetric OBA loss to supervise the object-centric temporal correspondence learning:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 678, + 287, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 678, + 287, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 678, + 287, + 711 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {O B A}} ^ {k} = - \\sum_ {i = 1} ^ {m} \\sum_ {t = 1} ^ {T} \\sum_ {t ^ {\\prime} = 1} ^ {T} \\log \\left(^ {k} \\widetilde {\\mathbf {C}} _ {t} ^ {t ^ {\\prime}} \\left[ ^ {k} \\bar {\\mathbf {p}} _ {i} ^ {t}, ^ {k} \\bar {\\mathbf {p}} _ {i} ^ {t ^ {\\prime}} \\right]\\right). \\tag {12}", + "image_path": "9afe3b7fdb227d3a07848cdbd0f1b4d09ebf89e7dce788642c89fb220b97c801.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 71, + 545, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 71, + 545, + 109 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 545, + 109 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 71, + 545, + 109 + ], + "type": "inline_equation", + "content": "\\left(^k\\bar{\\mathbf{p}}_i^t,\\bar{}^k\\bar{\\mathbf{p}}_i^{t'}\\right)" + }, + { + "bbox": [ + 305, + 71, + 545, + 109 + ], + "type": "text", + "content": " are the ground-truth corresponding pair of the " + }, + { + "bbox": [ + 305, + 71, + 545, + 109 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 305, + 71, + 545, + 109 + ], + "type": "text", + "content": "-th local feature. The illustration of the loss computation is in Fig. 3b." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 116, + 373, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 116, + 373, + 128 + ], + "spans": [ + { + "bbox": [ + 306, + 116, + 373, + 128 + ], + "type": "text", + "content": "4.4. Inference" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 135, + 545, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 135, + 545, + 159 + ], + "spans": [ + { + "bbox": [ + 305, + 135, + 545, + 159 + ], + "type": "text", + "content": "After introducing the training loss design, we present the inference process of BA-Det as follows." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 159, + 545, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 159, + 545, + 219 + ], + "spans": [ + { + "bbox": [ + 305, + 159, + 545, + 219 + ], + "type": "text", + "content": "First-stage 3D object detection and association. The first-stage detector makes the prediction of classification scores and 2D / 3D bounding boxes. The 3D bounding boxes are associated across the frames by ImmortalTracker [44]. The following process is on the tracklet level." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 220, + 545, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 220, + 545, + 327 + ], + "spans": [ + { + "bbox": [ + 305, + 220, + 545, + 327 + ], + "type": "text", + "content": "Dense feature matching. To optimize the object pose, we need to obtain the feature correspondence in each frame for the same object. As mentioned in Sec. 4.2, the OTCL module is trained to generate a dense correspondence map in all frames. During inference, we match all " + }, + { + "bbox": [ + 305, + 220, + 545, + 327 + ], + "type": "inline_equation", + "content": "H \\times W" + }, + { + "bbox": [ + 305, + 220, + 545, + 327 + ], + "type": "text", + "content": " dense local features in RoI between adjacent two frames and between the first frame and last frame of the time window " + }, + { + "bbox": [ + 305, + 220, + 545, + 327 + ], + "type": "inline_equation", + "content": "[t, t + \\tau]" + }, + { + "bbox": [ + 305, + 220, + 545, + 327 + ], + "type": "text", + "content": ". We use the RANSAC algorithm [10] to filter the feature correspondence outliers." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 327, + 545, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 327, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 304, + 327, + 545, + 458 + ], + "type": "text", + "content": "Feature tracking. To form a long-term keypoint tracklet from the obtained correspondence, we leverage a graph-based algorithm. First, the matched feature pairs are constructed into a graph " + }, + { + "bbox": [ + 304, + 327, + 545, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 304, + 327, + 545, + 458 + ], + "type": "text", + "content": ". The features are on the vertices. If the features are matched, an edge is connected in the graph. Then we track the feature for the object in all available frames. We use the association method mainly following [7]. The graph partitioning method is applied to " + }, + { + "bbox": [ + 304, + 327, + 545, + 458 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 304, + 327, + 545, + 458 + ], + "type": "text", + "content": " to make each connected subgraph have at most one vertex per frame. The graph cut is based on the similarity of the matched features." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 460, + 545, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 531 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 531 + ], + "type": "text", + "content": "Object-centric bundle adjustment. In the inference stage, given the initial pose estimation and the temporal feature correspondence, we solve the object-centric bundle adjustment by Levenberg-Marquardt algorithm, and the object pose in each frame and the 3D position of the keypoints can be globally optimized between frames." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 532, + 545, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 532, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 305, + 532, + 545, + 567 + ], + "type": "text", + "content": "Post-processing. We also apply some common post-processing in video object detection techniques like tracklet rescoring [18] and bounding box temporal interpolation." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 578, + 387, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 578, + 387, + 593 + ], + "spans": [ + { + "bbox": [ + 306, + 578, + 387, + 593 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 599, + 426, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 599, + 426, + 611 + ], + "spans": [ + { + "bbox": [ + 306, + 599, + 426, + 611 + ], + "type": "text", + "content": "5.1. Datasets and metrics" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 618, + 545, + 713 + ], + "type": "text", + "content": "We conduct our experiments on the large autonomous driving dataset, Waymo Open Dataset (WOD) [40]. The WOD has different versions with different annotations and metrics. To keep the fairness of the comparisons, we report the results both on WOD v1.2 and WOD v1.3.1. The annotations on v1.2 are based on LiDAR and the official metrics are mAP IoU@0.7 and mAP IoU@0.5. Recently, v1.3.1 is released to support multi-camera 3D object detec" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5110" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 75, + 70, + 523, + 229 + ], + "blocks": [ + { + "bbox": [ + 75, + 70, + 523, + 229 + ], + "lines": [ + { + "bbox": [ + 75, + 70, + 523, + 229 + ], + "spans": [ + { + "bbox": [ + 75, + 70, + 523, + 229 + ], + "type": "table", + "html": "
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
M3D-RPN [2]0.350.343.793.630.330.333.613.46
PatchNet [29]0.390.372.922.740.380.362.422.28
PCT [43]0.890.884.204.150.660.664.033.99
MonoJSG [24]0.970.955.655.470.910.895.345.17
GUPNet [28]2.282.2710.029.942.142.129.399.31
DEVIANT [19]2.692.6710.9810.892.522.5010.2910.20
CaDDN [34]5.034.9917.5417.314.494.4516.5116.28
DID-M3D [32]--20.6620.47--19.3719.19
BEVFormer [23]†-7.70-30.80-6.90-27.70
DCD [22]12.5712.5033.4433.2411.7811.7231.4331.25
MonoFlex [52] (Baseline)11.7011.6432.2632.0610.9610.9030.3130.12
BA-Det(Ours)†16.6016.4540.9340.5115.5715.4438.5338.12
", + "image_path": "c79583cdea79cc55b411f20ff66ad1dbc4ce685243397f881f2d2f7c808a8d79.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 99, + 277, + 135, + 385 + ], + "blocks": [ + { + "bbox": [ + 99, + 277, + 135, + 385 + ], + "lines": [ + { + "bbox": [ + 99, + 277, + 135, + 385 + ], + "spans": [ + { + "bbox": [ + 99, + 277, + 135, + 385 + ], + "type": "image", + "image_path": "9bc8c1e610c110feff67ca906a04d20d0d390da94ddc1c45023f293a563e17cf.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 97, + 401, + 135, + 411 + ], + "lines": [ + { + "bbox": [ + 97, + 401, + 135, + 411 + ], + "spans": [ + { + "bbox": [ + 97, + 401, + 135, + 411 + ], + "type": "text", + "content": "(a) Frame 8." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 190, + 277, + 224, + 388 + ], + "blocks": [ + { + "bbox": [ + 190, + 277, + 224, + 388 + ], + "lines": [ + { + "bbox": [ + 190, + 277, + 224, + 388 + ], + "spans": [ + { + "bbox": [ + 190, + 277, + 224, + 388 + ], + "type": "image", + "image_path": "22ef3b70ba0a8ea6e8ccb2a1340a098e22ddf7a939afbb041f4a28033a0ae807.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 186, + 401, + 228, + 411 + ], + "lines": [ + { + "bbox": [ + 186, + 401, + 228, + 411 + ], + "spans": [ + { + "bbox": [ + 186, + 401, + 228, + 411 + ], + "type": "text", + "content": "(b) Frame 22." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 280, + 285, + 325, + 392 + ], + "blocks": [ + { + "bbox": [ + 280, + 285, + 325, + 392 + ], + "lines": [ + { + "bbox": [ + 280, + 285, + 325, + 392 + ], + "spans": [ + { + "bbox": [ + 280, + 285, + 325, + 392 + ], + "type": "image", + "image_path": "03675c24d7595c5dd5b492c1edab946d262e3e2a0304c93bdad36e3c5c8d811d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 277, + 401, + 319, + 411 + ], + "lines": [ + { + "bbox": [ + 277, + 401, + 319, + 411 + ], + "spans": [ + { + "bbox": [ + 277, + 401, + 319, + 411 + ], + "type": "text", + "content": "(c) Frame 36." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 371, + 281, + 422, + 389 + ], + "blocks": [ + { + "bbox": [ + 371, + 281, + 422, + 389 + ], + "lines": [ + { + "bbox": [ + 371, + 281, + 422, + 389 + ], + "spans": [ + { + "bbox": [ + 371, + 281, + 422, + 389 + ], + "type": "image", + "image_path": "42ee884163f0f7960fc9376f4173a67b305cac23dc0d4de2f89dbebbf2fa10c8.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 368, + 401, + 409, + 411 + ], + "lines": [ + { + "bbox": [ + 368, + 401, + 409, + 411 + ], + "spans": [ + { + "bbox": [ + 368, + 401, + 409, + 411 + ], + "type": "text", + "content": "(d) Frame 50." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 443, + 286, + 510, + 394 + ], + "blocks": [ + { + "bbox": [ + 443, + 286, + 510, + 394 + ], + "lines": [ + { + "bbox": [ + 443, + 286, + 510, + 394 + ], + "spans": [ + { + "bbox": [ + 443, + 286, + 510, + 394 + ], + "type": "image", + "image_path": "b624b040eb17806ab75ffc531723fde36d14b5d44f797b9305f63ae77a47871f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 459, + 401, + 500, + 411 + ], + "lines": [ + { + "bbox": [ + 459, + 401, + 500, + 411 + ], + "spans": [ + { + "bbox": [ + 459, + 401, + 500, + 411 + ], + "type": "text", + "content": "(e) Frame 57." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 420, + 547, + 455 + ], + "lines": [ + { + "bbox": [ + 46, + 420, + 547, + 455 + ], + "spans": [ + { + "bbox": [ + 46, + 420, + 547, + 455 + ], + "type": "text", + "content": "Figure 4. Qualitative results from the BEV in different frames. We use blue and red boxes to denote initial predictions and optimized predictions of the object we highlight. The green and black boxes denote the other box predictions and the ground truth boxes. The ego vehicle lies at the bottom of each figure." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 48, + 472, + 287, + 506 + ], + "blocks": [ + { + "bbox": [ + 46, + 237, + 546, + 260 + ], + "lines": [ + { + "bbox": [ + 46, + 237, + 546, + 260 + ], + "spans": [ + { + "bbox": [ + 46, + 237, + 546, + 260 + ], + "type": "text", + "content": "Table 1. The results on WODv1.2 [40] val set. " + }, + { + "bbox": [ + 46, + 237, + 546, + 260 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{70}" + }, + { + "bbox": [ + 46, + 237, + 546, + 260 + ], + "type": "text", + "content": " denotes AP with IoU threshold at 0.7. " + }, + { + "bbox": [ + 46, + 237, + 546, + 260 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{50}" + }, + { + "bbox": [ + 46, + 237, + 546, + 260 + ], + "type": "text", + "content": " denotes AP IoU@0.5." + }, + { + "bbox": [ + 46, + 237, + 546, + 260 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 46, + 237, + 546, + 260 + ], + "type": "text", + "content": " denotes the method utilizing temporal information." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 472, + 287, + 506 + ], + "lines": [ + { + "bbox": [ + 48, + 472, + 287, + 506 + ], + "spans": [ + { + "bbox": [ + 48, + 472, + 287, + 506 + ], + "type": "table", + "html": "
MethodLET-APLLET-APLET-APH3D AP703D AP50
MV-FCOS3D++ [45]†58.1174.6873.5014.6636.02
BA-DetFCOS3D(Ours)†58.4774.8573.6615.0236.89
", + "image_path": "0a3559907cc5781e2f3b6b935510c2af0f9b6b24efcbe21f14e1a1ba04ff5b68.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 514, + 287, + 559 + ], + "lines": [ + { + "bbox": [ + 46, + 514, + 287, + 559 + ], + "spans": [ + { + "bbox": [ + 46, + 514, + 287, + 559 + ], + "type": "text", + "content": "Table 2. The multi-camera results on WODv1.3.1 [16] val set. Besides the official LET-IoU-based metrics, we also report the metrics with standard 3D IoU. All metrics are reported for the LEVEL_2 difficulty.†: use temporal information." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 581, + 287, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 581, + 287, + 676 + ], + "spans": [ + { + "bbox": [ + 46, + 581, + 287, + 676 + ], + "type": "text", + "content": "tion, and the annotations are camera-synced boxes. On the v1.3.1 dataset, a series of new LET-IoU-based metrics [16] are introduced to slightly tolerate the localization error from the worse sensor, camera, than LiDAR. Early work mainly reports the results on the v1.2 dataset, and we only compare our methods with the ones from WOD Challenge 2022 using the v1.3.1 dataset. Because we mainly focus on rigid objects, we report the results of the VEHICLE class." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 714 + ], + "type": "text", + "content": "LET-3D-AP and LET-3D-APL are the new metrics, relying on the Longitudinal Error Tolerant IoU (LET-IoU). LET-IoU is the 3D IoU calculated between the target ground" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 475, + 546, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 475, + 546, + 535 + ], + "spans": [ + { + "bbox": [ + 305, + 475, + 546, + 535 + ], + "type": "text", + "content": "truth box and the prediction box aligned with ground truth along the depth that has minimum depth error. LET-3D-AP and LET-3D-APL are calculated from the average precision and the longitudinal affinity weighted average precision of the PR curve. For more details, please refer to [16]." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 548, + 440, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 548, + 440, + 562 + ], + "spans": [ + { + "bbox": [ + 306, + 548, + 440, + 562 + ], + "type": "text", + "content": "5.2. Implementation Details" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": "The first stage network architecture of BA-Det is the same as MonoFlex, with DLA-34 [51] backbone, the output feature map is with the stride of 8. In the second stage, the shape of the RoI feature is " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "60 \\times 80" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": ". The spatial and temporal attention module is stacked with 4 layers. The implementation is based on the PyTorch framework. We train our model on 8 NVIDIA RTX 3090 GPUs for 14 epochs. Adam optimizer is applied with " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.999" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": ". The initial learning rate is " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": " and weight decay is " + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "inline_equation", + "content": "10^{-5}" + }, + { + "bbox": [ + 304, + 570, + 547, + 713 + ], + "type": "text", + "content": ". The learning rate scheduler is one-cycle. We use the Levenberg-Marquardt algorithm, implemented by DeepLM [15], to solve object-centric bundle adjustment. The maximum it" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "5111" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 70, + 526, + 169 + ], + "blocks": [ + { + "bbox": [ + 70, + 70, + 526, + 169 + ], + "lines": [ + { + "bbox": [ + 70, + 70, + 526, + 169 + ], + "spans": [ + { + "bbox": [ + 70, + 70, + 526, + 169 + ], + "type": "table", + "html": "
Method3D AP703D APH703D AP503D APH50
0-3030-5050-∞0-3030-5050-∞0-3030-5050-∞0-3030-5050-∞
L1DCD [22]32.475.941.2432.305.911.2362.7026.3510.1662.3526.2110.09
MonoFlex [52]30.645.291.0530.485.271.0461.1325.859.0360.7525.718.95
BA-Det (Ours)†37.7411.043.8637.4610.953.7971.0737.1514.8970.4636.7914.61
L2DCD [22]32.305.761.0832.195.731.0862.4825.608.9262.1325.468.86
MonoFlex [52]30.545.140.9130.375.110.9160.9125.117.9260.5424.977.85
BA-Det (Ours)†37.6110.723.3737.3310.633.3170.8336.1413.6270.2335.7913.37
", + "image_path": "ea3c2ef05c85396eaac7310bfd25a78b73ac66ec992532dc101c268b478a6725.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 55, + 210, + 541, + 312 + ], + "blocks": [ + { + "bbox": [ + 46, + 177, + 547, + 200 + ], + "lines": [ + { + "bbox": [ + 46, + 177, + 547, + 200 + ], + "spans": [ + { + "bbox": [ + 46, + 177, + 547, + 200 + ], + "type": "text", + "content": "Table 3. The object depth range conditioned result on WODv1.2 [40] val set. L1 and L2 denote LEVEL_1 and LEVEL_2 difficulty, respectively. †: use temporal information." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 55, + 210, + 541, + 312 + ], + "lines": [ + { + "bbox": [ + 55, + 210, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 55, + 210, + 541, + 312 + ], + "type": "table", + "html": "
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)11.7011.6432.2632.0610.9610.9030.3130.12
Our first-stage prediction13.5713.4834.7034.4312.7212.6432.5632.32
+3D Tracking [44]14.0113.9335.1934.9213.1313.0533.0332.78
+ Learnable global optimization15.8515.7538.0637.7614.8714.7735.7235.44
+ Tracklet rescoring16.4316.3040.0739.7015.4115.2937.6637.31
+ Bbox interpolation16.6016.4540.9340.5115.5715.4438.5338.12
", + "image_path": "ff9669b32a03cf2f6f7259c0b702aa6e0915d6ffadd62d0c60962f844e7f577a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 196, + 321, + 396, + 332 + ], + "lines": [ + { + "bbox": [ + 196, + 321, + 396, + 332 + ], + "spans": [ + { + "bbox": [ + 196, + 321, + 396, + 332 + ], + "type": "text", + "content": "Table 4. Ablation study of each component in BA-Det." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 354, + 287, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 354, + 287, + 390 + ], + "spans": [ + { + "bbox": [ + 46, + 354, + 287, + 390 + ], + "type": "text", + "content": "eration of the LM algorithm is 200. For the object that appears less than 10 frames or the average keypoint number is less than 5, we do not optimize it." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 405, + 274, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 405, + 274, + 418 + ], + "spans": [ + { + "bbox": [ + 47, + 405, + 274, + 418 + ], + "type": "text", + "content": "5.3. Comparisons with State-of-the-art Methods" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 426, + 289, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 426, + 289, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 426, + 289, + 713 + ], + "type": "text", + "content": "We compare our BA-Det with other state-of-the-art methods under two different settings. WODv1.2 is for the front view camera and WODv1.3.1 has the official evaluator for all 5 cameras. As shown in Table 1, using the FRONT camera, we outperform the SOTA method DCD [22] for about 4AP and 4APH (" + }, + { + "bbox": [ + 46, + 426, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\sim 30\\%" + }, + { + "bbox": [ + 46, + 426, + 289, + 713 + ], + "type": "text", + "content": " improvement) under the 0.7 IoU threshold. Compared with the only temporal method BEVFormer [23], we have double points of 3D " + }, + { + "bbox": [ + 46, + 426, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{70}" + }, + { + "bbox": [ + 46, + 426, + 289, + 713 + ], + "type": "text", + "content": " and 3D " + }, + { + "bbox": [ + 46, + 426, + 289, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{APH}_{70}" + }, + { + "bbox": [ + 46, + 426, + 289, + 713 + ], + "type": "text", + "content": ". To validate the effectiveness, we also report the multi-camera results on the newly released WODv1.3.1, as shown in Table 2. No published work reports the results on WODv1.3.1. So, we only compare with the open-source MV-FCOS3D++ [45], the second-place winner of WOD 2022 challenge. We design the variant of BA-Det, called BA-DetFCOS3D, to adapt to the multi-camera setting. BA-DetFCOS3D is also a two-stage object detector. The first stage is the same as MV-FCOS3D++, but with the output of 2D bounding boxes. The second stage is OTCL module supervised with featuremetric object bundle adjustment loss. Although there are overlaps between 5 cameras, to simplify the framework, we ignore the object BA optimization across cameras and only conduct temporal optimization. BA-DetFCOS3D outperforms MV-FCOS3D++ under main metrics and traditional 3D IoU-based metrics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 353, + 418, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 353, + 418, + 365 + ], + "spans": [ + { + "bbox": [ + 306, + 353, + 418, + 365 + ], + "type": "text", + "content": "5.4. Qualitative Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 372, + 545, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 372, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 304, + 372, + 545, + 431 + ], + "type": "text", + "content": "In Fig. 4, we show the object-level qualitative results of the first-stage and second-stage predictions in different frames. For a tracklet, we can refine the bounding box predictions with the help of better measurements in other frames, even if there is a long time interval between them." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 441, + 467, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 441, + 467, + 452 + ], + "spans": [ + { + "bbox": [ + 306, + 441, + 467, + 452 + ], + "type": "text", + "content": "5.5. Distance Conditioned Results" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "content": "We report the results with the different depth ranges in Table 3. The results indicate that the single frame methods, like DCD and MonoFlex, are seriously affected by object depth. When the object is farther away from the ego vehicle, the detection performance drops sharply. Compared with these methods, BA-Det, has the gain almost from the object far away from the ego-vehicle. The 3D " + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{70}" + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "content": " and 3D " + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "inline_equation", + "content": "\\mathrm{APH}_{70}" + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "inline_equation", + "content": "3\\times" + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "content": " compared with the baseline when the object is located in " + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "inline_equation", + "content": "[50\\mathrm{m},\\infty)" + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "inline_equation", + "content": "2\\times" + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "inline_equation", + "content": "[30\\mathrm{m},50\\mathrm{m})" + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "inline_equation", + "content": "1.2\\times" + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "inline_equation", + "content": "[0\\mathrm{m},30\\mathrm{m})" + }, + { + "bbox": [ + 304, + 459, + 545, + 604 + ], + "type": "text", + "content": ". This is because we utilize the long-term temporal information for each object. In a tracklet, the predictions near the ego-vehicle can help to refine the object far away." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 611, + 398, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 611, + 398, + 624 + ], + "spans": [ + { + "bbox": [ + 306, + 611, + 398, + 624 + ], + "type": "text", + "content": "5.6. Ablation study" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": "We ablate each component of BA-Det. The results are shown in Table 4. The first stage detector is slightly better than the MonoFlex baseline mainly because we remove the edge fusion module, which is harmful to the truncated objects in WOD. 3D KF associates the objects and smooths the object's trajectory. This part of improvement can be regarded as similar to Kinematic3D [3]. The core of BA-Det" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5112" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 75, + 70, + 521, + 150 + ], + "blocks": [ + { + "bbox": [ + 75, + 70, + 521, + 150 + ], + "lines": [ + { + "bbox": [ + 75, + 70, + 521, + 150 + ], + "spans": [ + { + "bbox": [ + 75, + 70, + 521, + 150 + ], + "type": "table", + "html": "
LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)11.7011.6432.2632.0610.9610.9030.3130.12
Initial prediction13.5713.4834.7034.4312.7212.6432.5632.32
Static BA14.7314.6237.8937.5613.8213.7235.6535.34
Ours16.6016.4540.9340.5115.5715.4438.5338.12
", + "image_path": "e4536cf05376adb51572032374ac8cbfb516d66e82b1d7c2d7a9ee56f8d948ea.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 55, + 191, + 541, + 261 + ], + "blocks": [ + { + "bbox": [ + 46, + 158, + 547, + 182 + ], + "lines": [ + { + "bbox": [ + 46, + 158, + 547, + 182 + ], + "spans": [ + { + "bbox": [ + 46, + 158, + 547, + 182 + ], + "type": "text", + "content": "Table 5. Comparison between object-centric BA-Det and the traditional scene-level bundle adjustment (Static BA). Initial prediction denotes the predictions in the first stage." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 55, + 191, + 541, + 261 + ], + "lines": [ + { + "bbox": [ + 55, + 191, + 541, + 261 + ], + "spans": [ + { + "bbox": [ + 55, + 191, + 541, + 261 + ], + "type": "table", + "html": "
\\(\\bar{L}_{t}\\)LEVEL_1LEVEL_2
3D AP703D APH703D AP503D APH503D AP703D APH703D AP503D APH50
MonoFlex (baseline)-11.7011.6432.2632.0610.9610.9030.3130.12
BA-Det+ORB feature [35]2.614.0513.9635.2134.9513.1713.0833.0532.81
BA-Det+Our feature1016.6016.4540.9340.5115.5715.4438.5338.12
", + "image_path": "c9c01441ac8482a600609db2eddb9b6e340fed9c473095b15860d2644a98de34.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 292, + 287, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 292, + 287, + 328 + ], + "spans": [ + { + "bbox": [ + 46, + 292, + 287, + 328 + ], + "type": "text", + "content": "is the learnable global optimization module, which obtains the largest gain in all modules. The tracklet rescoring and temporal interpolation modules are also useful." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 335, + 163, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 335, + 163, + 347 + ], + "spans": [ + { + "bbox": [ + 47, + 335, + 163, + 347 + ], + "type": "text", + "content": "5.7. Further Discussions" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 354, + 287, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 354, + 287, + 450 + ], + "spans": [ + { + "bbox": [ + 46, + 354, + 287, + 450 + ], + "type": "text", + "content": "BA vs. Object BA. We conduct experiments to discuss whether the object-centric manner is important in temporal optimization. We modify our pipeline and optimize the whole scene in the global frame instead of optimizing the object pose in the object frame, called Static BA in Table 5. Static BA ignores dynamic objects and treats them the same as static objects. The inability to handle dynamic objects causes decreases by about 2 AP compared with BA-Det." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 450, + 288, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 450, + 288, + 582 + ], + "spans": [ + { + "bbox": [ + 46, + 450, + 288, + 582 + ], + "type": "text", + "content": "Temporal feature correspondence. As shown in Table 6, we ablate the features used for object-centric bundle adjustment. Compared with traditional ORB feature [35], widely used in SLAM, our feature learning module predicts denser and better correspondence. We find the average object tracklet length is 19.6 frames, and the average feature tracklet in our method is about 10 frames, which means we can keep a long feature dependency and better utilize long-range temporal information. However, the " + }, + { + "bbox": [ + 46, + 450, + 288, + 582 + ], + "type": "inline_equation", + "content": "\\bar{L}_t" + }, + { + "bbox": [ + 46, + 450, + 288, + 582 + ], + "type": "text", + "content": " of the ORB feature is only 2.6 frames. The results show the short keypoint tracklet can not refine the long-term object pose well." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "content": "Inference latency of each step in BA-Det. The inference latency of each step in BA-Det is shown in Table 7. The most time-consuming part is the first-stage object detector, more than " + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "inline_equation", + "content": "130\\mathrm{ms}" + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "content": " per image, which is the same as the MonoFlex baseline. Our BA-Det only takes an additional " + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "inline_equation", + "content": "50\\mathrm{ms}" + }, + { + "bbox": [ + 46, + 582, + 287, + 714 + ], + "type": "text", + "content": " latency per image, compared with the single-frame detector MonoFlex. Besides, although the dense feature correspondence is calculated, thanks to the shared backbone with the first stage detector and parallel processing for the objects, the feature correspondence module is not very time-consuming." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 356, + 289, + 499, + 354 + ], + "blocks": [ + { + "bbox": [ + 51, + 269, + 539, + 281 + ], + "lines": [ + { + "bbox": [ + 51, + 269, + 539, + 281 + ], + "spans": [ + { + "bbox": [ + 51, + 269, + 539, + 281 + ], + "type": "text", + "content": "Table 6. Ablation study about different feature corresponding methods. " + }, + { + "bbox": [ + 51, + 269, + 539, + 281 + ], + "type": "inline_equation", + "content": "{\\bar{L}}_{t}" + }, + { + "bbox": [ + 51, + 269, + 539, + 281 + ], + "type": "text", + "content": " denotes the average keypoint tracklet length for each object." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 356, + 289, + 499, + 354 + ], + "lines": [ + { + "bbox": [ + 356, + 289, + 499, + 354 + ], + "spans": [ + { + "bbox": [ + 356, + 289, + 499, + 354 + ], + "type": "table", + "html": "
Total latency181.5ms
First-stage detector132.6ms
Object tracking6.6ms
Feature correspondence23.0ms
Object bundle adjustment19.3ms
", + "image_path": "24f1691bac36a60a80fc6060a0ca602c6dc34140df4120463e9244e9ceee851c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 361, + 536, + 374 + ], + "lines": [ + { + "bbox": [ + 314, + 361, + 536, + 374 + ], + "spans": [ + { + "bbox": [ + 314, + 361, + 536, + 374 + ], + "type": "text", + "content": "Table 7. Inference latency of each step in BA-Det per image." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 305, + 382, + 473, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 382, + 473, + 395 + ], + "spans": [ + { + "bbox": [ + 305, + 382, + 473, + 395 + ], + "type": "text", + "content": "6. Limitations and Future Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 402, + 545, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 402, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 304, + 402, + 545, + 486 + ], + "type": "text", + "content": "In the current version of this paper, we only focus on the objects, such as cars, trucks, and trailers. The performance of non-rigid objects such as pedestrians has not been investigated. However, with mesh-based and skeleton-based 3D human models, we believe that a unified keypoint temporal alignment module can be designed in the future. So, we will explore the extension of BA-Det for non-rigid objects." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 495, + 379, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 495, + 379, + 508 + ], + "spans": [ + { + "bbox": [ + 306, + 495, + 379, + 508 + ], + "type": "text", + "content": "7. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 516, + 545, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 516, + 545, + 612 + ], + "spans": [ + { + "bbox": [ + 304, + 516, + 545, + 612 + ], + "type": "text", + "content": "In this paper, we propose a 3D video object detection paradigm with long-term temporal visual correspondence, called BA-Det. BA-Det is a two-stage object detector that can jointly learn object detection and temporal feature correspondence with proposed Featuremetric OBA loss. Object-centric bundle adjustment optimizes the first-stage object estimation globally in each frame. BA-Det achieves state-of-the-art performance on WOD." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 621, + 408, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 621, + 408, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 621, + 408, + 635 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 641, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 714 + ], + "type": "text", + "content": "This work was supported in part by the Major Project for New Generation of AI (No.2018AAA0100400), the National Natural Science Foundation of China (No. 61836014, No. U21B2042, No. 62072457, No. 62006231) and the InnoHK program. The authors thank Lue Fan and Yuqi Wang for their valuable suggestions." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5113" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "type": "text", + "content": "[1] Sameer Agarwal, Keir Mierle, and The Ceres Solver Team. Ceres Solver. https://github.com/ceres-solver/ceres-solver, 2022.3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 287, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 287, + 157 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 287, + 157 + ], + "type": "text", + "content": "[2] Garrick Brazil and Xiaoming Liu. M3d-rpn: Monocular 3d region proposal network for object detection. In ICCV, 2019. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 159, + 286, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 286, + 191 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 286, + 191 + ], + "type": "text", + "content": "[3] Garrick Brazil, Gerard Pons-Moll, Xiaoming Liu, and Bernt Schiele. Kinematic 3d object detection in monocular video. In ECCV, 2020. 1, 2, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 194, + 286, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 194, + 286, + 236 + ], + "spans": [ + { + "bbox": [ + 53, + 194, + 286, + 236 + ], + "type": "text", + "content": "[4] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In CVPR, 2020. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 239, + 286, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 286, + 281 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 286, + 281 + ], + "type": "text", + "content": "[5] Xuesong Chen, Shaoshuai Shi, Benjamin Zhu, Ka Chun Cheung, Hang Xu, and Hongsheng Li. Mppnet: Multi-frame feature intertwining with proxy points for 3d temporal object detection. In ECCV, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 284, + 286, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 284, + 286, + 316 + ], + "spans": [ + { + "bbox": [ + 53, + 284, + 286, + 316 + ], + "type": "text", + "content": "[6] Yilun Chen, Shu Liu, Xiaoyong Shen, and Jiaya Jia. Dsgn: Deep stereo geometry network for 3d object detection. In CVPR, 2020. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 318, + 286, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 286, + 349 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 286, + 349 + ], + "type": "text", + "content": "[7] Mihai Dusmanu, Johannes L Schonberger, and Marc Pollefeys. Multi-view optimization of local feature geometry. In ECCV, 2020. 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 352, + 286, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 352, + 286, + 395 + ], + "spans": [ + { + "bbox": [ + 53, + 352, + 286, + 395 + ], + "type": "text", + "content": "[8] Lue Fan, Ziqi Pang, Tianyuan Zhang, Yu-Xiong Wang, Hang Zhao, Feng Wang, Naiyan Wang, and Zhaoxiang Zhang. Embracing single stride 3d object detector with sparse transformer. In CVPR, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 397, + 286, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 397, + 286, + 430 + ], + "spans": [ + { + "bbox": [ + 53, + 397, + 286, + 430 + ], + "type": "text", + "content": "[9] Lue Fan, Yuxue Yang, Feng Wang, Naiyan Wang, and Zhaoxiang Zhang. Super sparse 3d object detection. arXiv preprint arXiv:2301.02562, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 431, + 286, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 431, + 286, + 474 + ], + "spans": [ + { + "bbox": [ + 48, + 431, + 286, + 474 + ], + "type": "text", + "content": "[10] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 476, + 286, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 476, + 286, + 508 + ], + "spans": [ + { + "bbox": [ + 48, + 476, + 286, + 508 + ], + "type": "text", + "content": "[11] Hugo Germain, Vincent Lepetit, and Guillaume Bourmaud. Neural reprojection error: Merging feature learning and camera pose estimation. In CVPR, 2021. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 510, + 286, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 510, + 286, + 542 + ], + "spans": [ + { + "bbox": [ + 48, + 510, + 286, + 542 + ], + "type": "text", + "content": "[12] Xiaoyang Guo, Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Liga-stereo: Learning lidar geometry aware representations for stereo-based 3d detector. In ICCV, 2021. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 544, + 286, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 286, + 565 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 286, + 565 + ], + "type": "text", + "content": "[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In ICCV, 2017. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 567, + 286, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 286, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 286, + 599 + ], + "type": "text", + "content": "[14] Junjie Huang and Guan Huang. Bevdet4d: Exploit temporal cues in multi-camera 3d object detection. arXiv preprint arXiv:2203.17054, 2022. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 601, + 286, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 286, + 643 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 286, + 643 + ], + "type": "text", + "content": "[15] Jingwei Huang, Shan Huang, and Mingwei Sun. Deeplm: Large-scale nonlinear least squares on deep learning frameworks using stochastic domain decomposition. In CVPR, 2021. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 646, + 286, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 286, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 286, + 689 + ], + "type": "text", + "content": "[16] Wei-Chih Hung, Henrik Kretzschmar, Vincent Casser, Jyh-Jing Hwang, and Dragomir Anguelov. Let-3d-ap: Longitudinal error tolerant 3d average precision for camera-only 3d detection. arXiv preprint arXiv:2206.07705, 2022. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 692, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 286, + 713 + ], + "type": "text", + "content": "[17] Rudolph Emil Kalman. A new approach to linear filtering and prediction problems. 1960. 2" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 138 + ], + "type": "text", + "content": "[18] Kai Kang, Hongsheng Li, Junjie Yan, Xingyu Zeng, Bin Yang, Tong Xiao, Cong Zhang, Zhe Wang, Ruohui Wang, Xiaogang Wang, et al. T-cnn: Tubelets with convolutional neural networks for object detection from videos. IEEE Transactions on Circuits and Systems for Video Technology, 28(10):2896-2907, 2017. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 140, + 545, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 140, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 307, + 140, + 545, + 182 + ], + "type": "text", + "content": "[19] Abhinav Kumar, Garrick Brazil, Enrique Corona, Armin Parchami, and Xiaoming Liu. Deviant: Depth equivariant network for monocular 3d object detection. In ECCV, 2022. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 184, + 545, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 184, + 545, + 216 + ], + "spans": [ + { + "bbox": [ + 307, + 184, + 545, + 216 + ], + "type": "text", + "content": "[20] Rainer Kummerle, Giorgio Grisetti, Hauke Strasdat, Kurt Konolige, and Wolfram Burgard. g2o: A general framework for graph optimization. In ICRA, 2011. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 217, + 545, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 217, + 545, + 248 + ], + "spans": [ + { + "bbox": [ + 307, + 217, + 545, + 248 + ], + "type": "text", + "content": "[21] Peiliang Li, Tong Qin, et al. Stereo vision-based semantic 3d object and ego-motion tracking for autonomous driving. In ECCV, 2018. 1, 3, 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 251, + 545, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 251, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 307, + 251, + 545, + 281 + ], + "type": "text", + "content": "[22] Yingyan Li, Yuntao Chen, Jiawei He, and Zhaoxiang Zhang. Densely constrained depth estimator for monocular 3d object detection. In ECCV, 2022. 1, 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 283, + 545, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 283, + 545, + 336 + ], + "spans": [ + { + "bbox": [ + 307, + 283, + 545, + 336 + ], + "type": "text", + "content": "[23] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In ECCV, 2022. 1, 2, 6, 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 338, + 545, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 338, + 545, + 369 + ], + "spans": [ + { + "bbox": [ + 307, + 338, + 545, + 369 + ], + "type": "text", + "content": "[24] Qing Lian, Peiliang Li, and Xiaozhi Chen. Monojsg: Joint semantic and geometric cost volume for monocular 3d object detection. In CVPR, 2022. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 371, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 371, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 307, + 371, + 545, + 403 + ], + "type": "text", + "content": "[25] Philipp Lindenberger, Paul-Edouard Sarlin, Viktor Larsson, and Marc Pollefeys. Pixel-perfect structure-from-motion with featuremetric refinement. In ICCV, 2021. 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 404, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 404, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 307, + 404, + 545, + 448 + ], + "type": "text", + "content": "[26] Yingfei Liu, Junjie Yan, Fan Jia, Shuai Lin Li, Qi Gao, Tiancai Wang, Xiangyu Zhang, and Jian Sun. Petrv2: A unified framework for 3d perception from multi-camera images. arXiv preprint arXiv:2206.01256, 2022. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 449, + 545, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 449, + 545, + 480 + ], + "spans": [ + { + "bbox": [ + 307, + 449, + 545, + 480 + ], + "type": "text", + "content": "[27] David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60(2):91-110, 2004. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 482, + 545, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 482, + 545, + 524 + ], + "spans": [ + { + "bbox": [ + 307, + 482, + 545, + 524 + ], + "type": "text", + "content": "[28] Yan Lu, Xinzhu Ma, Lei Yang, Tianzhu Zhang, Yating Liu, Qi Chu, Junjie Yan, and Wanli Ouyang. Geometry uncertainty projection network for monocular 3d object detection. In ICCV, 2021. 6" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 526, + 545, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 526, + 545, + 557 + ], + "spans": [ + { + "bbox": [ + 307, + 526, + 545, + 557 + ], + "type": "text", + "content": "[29] Xinzhu Ma, Shinan Liu, Zhiyi Xia, Hongwen Zhang, Xingyu Zeng, and Wanli Ouyang. Rethinking pseudo-lidar representation. In ECCV, 2020. 6" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 559, + 545, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 559, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 307, + 559, + 545, + 601 + ], + "type": "text", + "content": "[30] Raul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. Orb-slam: a versatile and accurate monocular slam system. IEEE transactions on robotics, 31(5):1147-1163, 2015. 2" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 603, + 545, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 603, + 545, + 646 + ], + "spans": [ + { + "bbox": [ + 307, + 603, + 545, + 646 + ], + "type": "text", + "content": "[31] Lachlan Nicholson, Michael Milford, and Niko Sünderhauf. Quadricslam: Dual quadrics from object detections as landmarks in object-oriented slam. IEEE Robotics and Automation Letters, 4(1):1-8, 2018. 3" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 679 + ], + "type": "text", + "content": "[32] Liang Peng, Xiaopei Wu, Zheng Yang, Haifeng Liu, and Deng Cai. Did-m3d: Decoupling instance depth for monocular 3d object detection. In ECCV, 2022. 6" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "text", + "content": "[33] Charles R Qi, Yin Zhou, Mahyar Najibi, Pei Sun, Khoa Vo, Boyang Deng, and Dragomir Anguelov. Offboard 3d object detection from point cloud sequences. In CVPR, 2021. 2" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5114" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 709 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[34] Cody Reading, Ali Harakeh, Julia Chae, and Steven L Waslander. Categorical depth distribution network for monocular 3d object detection. In CVPR, 2021. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 106, + 287, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 106, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 48, + 106, + 287, + 137 + ], + "type": "text", + "content": "[35] Ethan Rublee, Vincent Rabaud, Kurt Konolige, and Gary Bradski. Orb: An efficient alternative to sift or surf. In ICCV, 2011. 2, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 139, + 287, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 139, + 287, + 171 + ], + "spans": [ + { + "bbox": [ + 48, + 139, + 287, + 171 + ], + "type": "text", + "content": "[36] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In CVPR, 2020. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 172, + 287, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 172, + 287, + 193 + ], + "spans": [ + { + "bbox": [ + 48, + 172, + 287, + 193 + ], + "type": "text", + "content": "[37] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 194, + 287, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 194, + 287, + 225 + ], + "spans": [ + { + "bbox": [ + 48, + 194, + 287, + 225 + ], + "type": "text", + "content": "[38] Johannes Lutz Schonberger, Enliang Zheng, Marc Pollefeys, and Jan-Michael Frahm. Pixelwise view selection for unstructured multi-view stereo. In ECCV, 2016. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 227, + 287, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 227, + 287, + 258 + ], + "spans": [ + { + "bbox": [ + 48, + 227, + 287, + 258 + ], + "type": "text", + "content": "[39] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In CVPR, 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 259, + 287, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 259, + 287, + 313 + ], + "spans": [ + { + "bbox": [ + 48, + 259, + 287, + 313 + ], + "type": "text", + "content": "[40] Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, et al. Scalability in perception for autonomous driving: Waymo open dataset. In CVPR, 2020. 5, 6, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 315, + 287, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 315, + 287, + 335 + ], + "spans": [ + { + "bbox": [ + 48, + 315, + 287, + 335 + ], + "type": "text", + "content": "[41] Chengzhou Tang and Ping Tan. BA-net: Dense bundle adjustment networks. In ICLR, 2019. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 337, + 287, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 337, + 287, + 369 + ], + "spans": [ + { + "bbox": [ + 48, + 337, + 287, + 369 + ], + "type": "text", + "content": "[42] Bill Triggs, Philip F McLauchlan, Richard I Hartley, and Andrew W Fitzgibbon. Bundle adjustment—a modern synthesis. In ICCV Workshops, 1999. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 369, + 287, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 369, + 287, + 402 + ], + "spans": [ + { + "bbox": [ + 48, + 369, + 287, + 402 + ], + "type": "text", + "content": "[43] Li Wang, Li Zhang, Yi Zhu, Zhi Zhang, Tong He, Mu Li, and Xiangyang Xue. Progressive coordinate transforms for monocular 3d object detection. NeurIPS, 2021. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 403, + 287, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 403, + 287, + 435 + ], + "spans": [ + { + "bbox": [ + 48, + 403, + 287, + 435 + ], + "type": "text", + "content": "[44] Qitai Wang, Yuntao Chen, Ziqi Pang, Naiyan Wang, and Zhaoxiang Zhang. Immortal tracker: Tracklet never dies. arXiv preprint arXiv:2111.13672, 2021. 3, 5, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 436, + 287, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 436, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 48, + 436, + 287, + 479 + ], + "type": "text", + "content": "[45] Tai Wang, Qing Lian, Chenming Zhu, Xinge Zhu, and Wenwei Zhang. MV-FCOS3D++: Multi-View camera-only 4d object detection with pretrained monocular backbones. arXiv preprint arXiv:2207.12716, 2022. 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 479, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 479, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 48, + 479, + 287, + 510 + ], + "type": "text", + "content": "[46] Tai Wang, Jiangmiao Pang, and Dahua Lin. Monocular 3d object detection with depth from motion. In ECCV, 2022. 1, 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 512, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 512, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 48, + 512, + 287, + 555 + ], + "type": "text", + "content": "[47] Zengran Wang, Chen Min, Zheng Ge, Yinhao Li, Zeming Li, Hongyu Yang, and Di Huang. Sts: Surround-view temporal stereo for multi-view 3d detection. arXiv preprint arXiv:2208.10145, 2022. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 555, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 287, + 588 + ], + "type": "text", + "content": "[48] Shichao Yang and Sebastian Scherer. Cubeslam: Monocular 3-d object slam. IEEE Transactions on Robotics, 35(4):925-938, 2019. 1, 3, 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 589, + 287, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 589, + 287, + 611 + ], + "spans": [ + { + "bbox": [ + 48, + 589, + 287, + 611 + ], + "type": "text", + "content": "[49] Tianwei Yin, Xingyi Zhou, and Philipp Krahenbuhl. Center-based 3d object detection and tracking. In CVPR, 2021. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 611, + 287, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 611, + 287, + 654 + ], + "spans": [ + { + "bbox": [ + 48, + 611, + 287, + 654 + ], + "type": "text", + "content": "[50] Yurong You, Katie Z Luo, Xiangyu Chen, Junan Chen, WeiLun Chao, Wen Sun, Bharath Hariharan, Mark Campbell, and Kilian Q Weinberger. Hindsight is 20/20: Leveraging past traversals to aid 3d perception. In ICLR, 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 48, + 655, + 287, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 655, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 655, + 287, + 677 + ], + "type": "text", + "content": "[51] Fisher Yu, Dequan Wang, Evan Shelhamer, and Trevor Darrell. Deep layer aggregation. In CVPR, 2018. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 48, + 677, + 287, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 677, + 287, + 709 + ], + "spans": [ + { + "bbox": [ + 48, + 677, + 287, + 709 + ], + "type": "text", + "content": "[52] Yunpeng Zhang, Jiwen Lu, and Jie Zhou. Objects are different: Flexible monocular 3d object detection. In CVPR, 2021, 1, 3, 6, 7" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5115" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D-Aware Conditional Image Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_content_list.json b/2023/3D-Aware Conditional Image Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..04b237f353de03c0860032ab666ed6153950ed09 --- /dev/null +++ b/2023/3D-Aware Conditional Image Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_content_list.json @@ -0,0 +1,1809 @@ +[ + { + "type": "text", + "text": "3D-aware Conditional Image Synthesis", + "text_level": 1, + "bbox": [ + 287, + 130, + 684, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kangle Deng Gengshan Yang Deva Ramanan Jun-Yan Zhu Carnegie Mellon University", + "bbox": [ + 222, + 180, + 746, + 218 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d0db08c534775dbb6eca15e32dcc39d29d6901cc09dc50bce140e3be6216c11a.jpg", + "image_caption": [ + "Figure 1. Given a 2D label map as input, such as a segmentation or edge map, our model learns to predict high-quality 3D labels, geometry, and appearance, which enables us to render both labels and RGB images from different viewpoints. The inferred 3D labels further allow interactive editing of label maps from any viewpoint, as shown in Figure 10." + ], + "image_footnote": [], + "bbox": [ + 76, + 248, + 893, + 531 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 593, + 313, + 609 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We propose pix2pix3D, a 3D-aware conditional generative model for controllable photorealistic image synthesis. Given a 2D label map, such as a segmentation or edge map, our model learns to synthesize a corresponding image from different viewpoints. To enable explicit 3D user control, we extend conditional generative models with neural radiance fields. Given widely-available posed monocular image and label map pairs, our model learns to assign a label to every 3D point in addition to color and density, which enables it to render the image and pixel-aligned label map simultaneously. Finally, we build an interactive system that allows users to edit the label map from different viewpoints and generate outputs accordingly.", + "bbox": [ + 73, + 618, + 470, + 815 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 844, + 207, + 859 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Content creation with generative models has witnessed tremendous progress in recent years, enabling high-quality,", + "bbox": [ + 75, + 869, + 470, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "user-controllable image and video synthesis [19, 20, 24, 34]. In particular, image-to-image translation methods [29, 56, 84] allow users to interactively create and manipulate a high-resolution image given a 2D input label map. Unfortunately, existing image-to-image translation methods operate purely in 2D, without explicit reasoning of the underlying 3D structure of the content. As shown in Figure 1, we aim to make conditional image synthesis 3D-aware, allowing not only 3D content generation but also viewpoint manipulation and attribute editing (e.g., car shape) in 3D.", + "bbox": [ + 496, + 594, + 893, + 744 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Synthesizing 3D content conditioned on user input is challenging. For model training, it is costly to obtain large-scale datasets with paired user inputs and their desired 3D outputs. During test time, 3D content creation often requires multi-view user inputs, as a user may want to specify the details of 3D objects using 2D interfaces from different viewpoints. However, these inputs may not be 3D-consistent, providing conflicting signals for 3D content creation.", + "bbox": [ + 496, + 747, + 893, + 868 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address the above challenges, we extend conditional generative models with 3D neural scene representations. To", + "bbox": [ + 498, + 869, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "4434", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "enable cross-view editing, we additionally encode semantic information in 3D, which can then be rendered as 2D label maps from different viewpoints. We learn the aforementioned 3D representation using only 2D supervision in the form of image reconstruction and adversarial losses. While the reconstruction loss ensures the alignment between 2D user inputs and corresponding 3D content, our pixel-aligned conditional discriminator encourages the appearance and labels to look plausible while remaining pixel-aligned when rendered into novel viewpoints. We also propose a cross-view consistency loss to enforce the latent codes to be consistent from different viewpoints.", + "bbox": [ + 75, + 90, + 472, + 272 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We focus on 3D-aware semantic image synthesis on the CelebAMask-HQ [38], AFHQ-cat [16], and shapenetcar [10] datasets. Our method works well for various 2D user inputs, including segmentation maps and edge maps. Our method outperforms several 2D and 3D baselines, such as Pix2NeRF variants [6], SofGAN [11], and SEAN [87]. We further ablate the impact of various design choices and demonstrate applications of our method, such as cross-view editing and explicit user control over semantics and style. Please see our website for more results and code. Please check out the full version of our paper at arXiv.", + "bbox": [ + 75, + 273, + 472, + 439 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 455, + 218, + 470 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Implicit Representation. Neural implicit fields, such as DeepSDF and NeRFs [46, 54], model the appearance of objects and scenes with an implicitly defined, continuous 3D representation parameterized by neural networks. They have produced significant results for 3D reconstruction [67, 88] and novel view synthesis applications [39, 43, 44, 48, 80] thanks to their compactness and expressiveness. NeRF and its descendants aim to optimize a network for an individual scene, given hundreds of images from multiple viewpoints. Recent works further reduce the number of training views through learning network initializations [13, 70, 78], leveraging auxiliary supervision [18, 30], or imposing regularization terms [50]. Recently, explicit or hybrid representations of radiance fields [12, 48, 61] have also shown promising results regarding quality and speed. In our work, we use hybrid representations for modeling both user inputs and outputs in 3D, focusing on synthesizing novel images rather than reconstructing an existing scene. A recent work Pix2NeRF [6] aims to translate a single image to a neural radiance field, which allows single-image novel view synthesis. In contrast, we focus on 3D-aware user-controlled content generation.", + "bbox": [ + 75, + 487, + 472, + 819 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Conditional GANs. Generative adversarial networks (GANs) learn the distribution of natural images by forcing the generated and real images to be indistinguishable. They have demonstrated high-quality results on 2D image synthesis and manipulation [1, 3, 5, 20, 33-35, 59, 65, 72, 82, 83].", + "bbox": [ + 75, + 825, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Several methods adopt image-conditional GANs [29, 47] for user-guided image synthesis and editing applications [26, 27, 38, 40, 55, 56, 62, 73, 84, 87]. In contrast, we propose a 3D-aware generative model conditioned on 2D user inputs that can render view-consistent images and enable interactive 3D editing. Recently, SoFGAN [11] uses a 3D semantic map generator and a 2D semantic-to-image generator to enable 3D-aware generation, but using 2D generators does not ensure 3D consistency.", + "bbox": [ + 496, + 90, + 893, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D-aware Image Synthesis. Early data-driven 3D image editing systems can achieve various 3D effects but often require a huge amount of manual effort [14, 37]. Recent works have integrated the 3D structure into learning-based image generation pipelines using various geometric representations, including voxels [22,86], voxelized 3D features [49], and 3D morphable models [71, 77]. However, many rely on external 3D data [71, 77, 86]. Recently, neural scene representations have been integrated into GANs to enable 3D-aware image synthesis [8,9,21,51-53,64,76]. Intriguingly, these 3D-aware GANs can learn 3D structures without any 3D supervision. For example, StyleNeRF [21] and EG3D [8] learn to generate 3D representations by modulating either NeRFs or explicit representations with latent style vectors. This allows them to render high-resolution view-consistent images. Unlike the above methods, we focus on conditional synthesis and interactive editing rather than random sampling. Several works [17,28,42,75] have explored sketch-based shape generation but they do not allow realistic image synthesis.", + "bbox": [ + 496, + 229, + 895, + 516 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Closely related to our work, Huang et al. [25] propose synthesizing novel views conditional on a semantic map. Our work differs in three ways. First, we can predict full 3D labels, geometry, and appearance, rather than only 2D views, which enables cross-view editing. Second, our method can synthesize images with a much wider baseline than Huang et al. [25]. Finally, our learning algorithm does not require ground truth multi-view images of the same scene. Two recent works, FENeRF [69] and 3DSGAN [79], also leverage semantic labels for training 3D-aware GANs, but they do not support conditional inputs and require additional efforts (e.g., GAN-inversion) to allow user editing. Three concurrent works, IDE-3D [68], NeRFFaceEditing [31], and sem2nerf [15], also explore the task of 3D-aware generation based on segmentation masks. However, IDE-3D and sem2nerf only allow editing on a fixed view, and NeRF-FaceEditing focuses on real image editing rather than generation. All of them do not include results for other input modalities. In contrast, we present a general-purpose method that works well for diverse datasets and input controls.", + "bbox": [ + 496, + 517, + 895, + 819 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 830, + 591, + 844 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Given a 2D label map $\\mathbf{I}_{\\mathbf{s}}$ , such as a segmentation or edge map, pix2pix3D generates a 3D-volumetric representation of geometry, appearance, and labels that can be rendered", + "bbox": [ + 498, + 854, + 893, + 902 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "4435", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c1c686b869a2e8159fc2727cb9533b35544f9d7cbf22f1705c03b3f7bd14fb26.jpg", + "image_caption": [ + "Figure 2. Overall pipeline. Given a 2D label map (e.g., segmentation map), a random latent code $z$ , and a camera pose $\\hat{P}$ as inputs, our generator renders the label map and image from viewpoint $\\hat{P}$ . Intuitively, the input label map specifies the geometric structure, while the latent code captures the appearance, such as hair color. We begin with an encoder that encodes both the input label map and the latent code into style vectors $\\mathbf{w}^{+}$ . We then use $\\mathbf{w}^{+}$ to modulate our 3D representation, which takes a spatial point $\\mathbf{x}$ and outputs (1) color $\\mathbf{c} \\in \\mathbb{R}^3$ , (2) density $\\sigma$ , (3) feature $\\phi \\in \\mathbb{R}^l$ , and (4) label $\\mathbf{s} \\in \\mathbb{R}^c$ . We then perform volumetric rendering and 2D upsampling to get the high-resolution label map $\\hat{\\mathbf{I}}_{\\mathbf{s}}^{+}$ and RGB Image $\\hat{\\mathbf{I}}_{\\mathbf{c}}^{+}$ . For those rendered from ground-truth poses, we compare them to ground-truth labels and images with an LPIPS loss and label reconstruction loss. We apply a GAN loss on labels and images rendered from both novel and original viewpoints." + ], + "image_footnote": [], + "bbox": [ + 83, + 92, + 888, + 444 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "from different viewpoints. Figure 2 provides an overview. We first introduce the formulation of our 3D conditional generative model for 3D-aware image synthesis in Section 3.1. Then, in Section 3.2, we discuss how to learn the model from color and label map pairs $\\{\\mathbf{I}_{\\mathrm{c}},\\mathbf{I}_{\\mathrm{s}}\\}$ associated with poses $\\mathbf{P}$ .", + "bbox": [ + 75, + 566, + 470, + 643 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Conditional 3D Generative Models", + "text_level": 1, + "bbox": [ + 76, + 648, + 382, + 664 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Similar to EG3D [8], we adopt a hybrid representation for the density and appearance of a scene and use style vectors to modulate the 3D generations. To condition the 3D representations on 2D label map inputs, we introduce a conditional encoder that maps a 2D label map into a latent style vector. Additionally, pix2pix3D produces 3D labels that can be rendered from different viewpoints, allowing for cross-view user editing.", + "bbox": [ + 75, + 672, + 468, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Conditional Encoder. Given a 2D label map input $\\mathbf{I}_{\\mathrm{s}}$ and a random latent code sampled from the spherical Gaussian space $\\mathbf{z} \\sim \\mathcal{N}(0, I)$ , our conditional encoder $E$ outputs a list of style vectors $\\mathbf{w}^{+} \\in \\mathbb{R}^{l \\times 256}$ ,", + "bbox": [ + 75, + 795, + 470, + 854 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {w} ^ {+} = E (\\mathbf {I _ {s}}, \\mathbf {z}),\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 863, + 328, + 878 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $l = 13$ is the number of layers to be modulated.", + "bbox": [ + 76, + 885, + 472, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Specifically, we encode $\\mathbf{I}_{\\mathrm{s}}$ into the first 7 style vectors that represent the global geometric information of the scene. We then feed the random latent code $\\mathbf{z}$ through a Multi-Layer Perceptron (MLP) mapping network to obtain the rest of the style vectors that control the appearance.", + "bbox": [ + 496, + 566, + 892, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Conditional 3D Representation. Our 3D representation is parameterized by tri-planes followed by an 2-layer MLP $f$ [8], which takes in a spatial point $\\mathbf{x} \\in \\mathbb{R}^3$ and returns 4 types of outputs: (1) color $\\mathbf{c} \\in \\mathbb{R}^3$ , (2) density $\\sigma \\in \\mathbb{R}^+$ , (3) feature $\\phi \\in \\mathbb{R}^{64}$ for the purpose of 2D upsampling, and most notably, (4) label $\\mathbf{s} \\in \\mathbb{R}^c$ , where $c$ is the number of classes if $\\mathbf{I_s}$ is a segmentation map, otherwise 1 for edge labels. We make the field conditional by modulating the generation of tri-planes $F^{\\mathrm{tri}}$ with the style vectors $\\mathbf{w}^+$ . We also remove the view dependence of the color following [8, 21]. Formally,", + "bbox": [ + 496, + 645, + 893, + 797 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n(\\mathbf {c}, \\mathbf {s}, \\sigma , \\phi) = f (F _ {\\mathbf {w} ^ {+}} ^ {\\mathrm {t r i}} (\\mathbf {x})).\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 806, + 784, + 825 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Volume Rendering and Upsampling. We apply volumetric rendering to synthesize color images [32, 46]. In addition, we render label maps, which are crucial for enabling cross-view editing (Section 4.3) and improving rendering quality", + "bbox": [ + 496, + 839, + 895, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "4436", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "(Table 1). Given a viewpoint $\\hat{P}$ looking at the scene origin, we sample $N$ points along the ray that emanates from a pixel location and query density, color, labels, and feature information from our 3D representation. Let $\\mathbf{x_i}$ be the i-th sampled point along the ray $r$ . Let $\\mathbf{c}_i, \\mathbf{s}_i$ and $\\phi_i$ be the color, labels, and the features of $\\mathbf{x_i}$ . Similar to [69], The color, label map, and feature images are computed as the weighted combination of queried values,", + "bbox": [ + 75, + 90, + 470, + 212 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {I}} _ {\\mathbf {c}} (r) = \\sum_ {i = 1} ^ {N} \\tau_ {i} \\mathbf {c} _ {i}, \\quad \\hat {\\mathbf {I}} _ {\\mathbf {s}} (r) = \\sum_ {i = 1} ^ {N} \\tau_ {i} \\mathbf {s} _ {i}, \\quad \\hat {\\mathbf {I}} _ {\\phi} (r) = \\sum_ {i = 1} ^ {N} \\tau_ {i} \\phi_ {i}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 222, + 468, + 276 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the transmittance $\\tau_{i}$ is computed as the probability of a photon traversing between the camera center and the i-th point given the length of the i-th interval $\\delta_{i}$ ,", + "bbox": [ + 75, + 277, + 470, + 323 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tau_ {i} = \\prod_ {j = 1} ^ {i} \\exp \\left(- \\sigma_ {j} \\delta_ {j}\\right) (1 - \\exp \\left(- \\sigma_ {i} \\delta_ {i}\\right)).\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 333, + 411, + 377 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similar to prior works [8, 21, 52], we approximate Equation 1 by 2D Upsampler $U$ to reduce the computational cost. We render high-res $512 \\times 512$ images in two passes. In the first pass, we render low-res $64 \\times 64$ images $\\hat{\\mathbf{I}}_{\\mathbf{c}}, \\hat{\\mathbf{I}}_{\\mathbf{s}}, \\hat{\\mathbf{I}}_{\\phi}$ . Then a CNN up-sampler $U$ is applied to obtain high-res images,", + "bbox": [ + 75, + 385, + 468, + 460 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+} = U (\\hat {\\mathbf {I}} _ {\\mathbf {c}}, \\hat {\\mathbf {I}} _ {\\phi}), \\qquad \\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {+} = U (\\hat {\\mathbf {I}} _ {\\mathbf {s}}, \\hat {\\mathbf {I}} _ {\\phi}).\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 469, + 400, + 489 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Learning Objective", + "text_level": 1, + "bbox": [ + 76, + 498, + 264, + 513 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Learning conditional 3D representations from monocular images is challenging due to its under-constrained nature. Given training data of associated images, label maps, and camera poses predicted by an off-the-shelf model, we carefully construct learning objectives, including reconstruction, adversarial, and cross-view consistency losses. These objectives will be described below.", + "bbox": [ + 75, + 522, + 470, + 626 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Reconstruction Loss. Given a ground-truth viewpoint $\\mathbf{P}$ associated with the color and label maps $\\{\\mathbf{I}_{\\mathbf{c}}, \\mathbf{I}_{\\mathbf{s}}\\}$ , we render color and label maps from $\\mathbf{P}$ and compute reconstruction losses for both high-res and low-res output. We use LPIPS [81] to compute the image reconstruction loss $\\mathcal{L}_c$ for color images. For label reconstruction loss $\\mathcal{L}_s$ , we use the balanced cross-entropy loss for segmentation maps or L2 Loss for edge maps,", + "bbox": [ + 75, + 631, + 468, + 752 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {r e c o n}} = \\lambda_ {c} \\mathcal {L} _ {c} \\left(\\mathbf {I} _ {\\mathbf {c}}, \\left\\{\\hat {\\mathbf {I}} _ {\\mathbf {c}}, \\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+} \\right\\}\\right) + \\lambda_ {s} \\mathcal {L} _ {s} \\left(\\mathbf {I} _ {\\mathbf {s}}, \\left\\{\\hat {\\mathbf {I}} _ {\\mathbf {s}}, \\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {+} \\right\\}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 762, + 441, + 780 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\lambda_{c}$ and $\\lambda_{s}$ balance two terms.", + "bbox": [ + 76, + 791, + 316, + 805 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Pixel-aligned Conditional Discriminator. The reconstruction loss alone fails to synthesize detailed results from novel viewpoints. Therefore, we use an adversarial loss [20] to enforce renderings to look realistic from random viewpoints. Specifically, we have two discriminators $D_{\\mathbf{c}}$ and $D_{\\mathbf{s}}$ for RGB images and label maps, respectively. $D_{\\mathbf{c}}$ is a widely-used", + "bbox": [ + 75, + 809, + 470, + 900 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/988411ec3b4d606f33e07441af6e5a4bd16111801d61dbb690142bd8ed4bbd8a.jpg", + "image_caption": [ + "Multi-view Generation of Seg Maps", + "Figure 3. Cross-View Consistency Loss. Given an input label map $\\mathbf{I}_{\\mathbf{s}}$ and its associated pose $\\mathbf{P}$ , we first infer the geometry latent code $\\mathbf{w}_{\\mathbf{g}}$ . From $\\mathbf{w}_{\\mathbf{g}}$ , we can generate a label map $\\hat{\\mathbf{I}}_{\\mathbf{s}}$ from the same pose $\\mathbf{P}$ , and $\\hat{\\mathbf{I}}_{\\mathbf{s}}'$ from a random pose $\\mathbf{P}'$ . Next, we infer $\\mathbf{w}_{\\mathbf{g}}'$ from the novel view $\\hat{\\mathbf{I}}_{\\mathbf{s}}'$ , and render it back to the original pose $\\mathbf{P}$ to obtain $\\hat{\\mathbf{I}}_{\\mathbf{s}}''$ . Finally, we add a reconstruction loss: $\\mathcal{L}_{\\mathrm{CVC}} = \\lambda_{\\mathrm{CVC}}\\mathcal{L}_s(\\hat{\\mathbf{I}}_{\\mathbf{s}}'', \\hat{\\mathbf{I}}_{\\mathbf{s}})$ ." + ], + "image_footnote": [], + "bbox": [ + 501, + 103, + 890, + 219 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "GAN loss that takes real and fake images as input, while the pixel-aligned conditional discriminator $D_{\\mathbf{s}}$ concatenates color images and label maps as input, which encourages pixel alignment between color images and label maps. Notably, in $D_{\\mathbf{s}}$ , we stop the gradients for the color images to prevent a potential quality downgrade. We also feed the rendered low-res images to prevent the upsampler from hallucinating details, inconsistent with the low-res output. The adversarial loss can be written as follows.", + "bbox": [ + 496, + 339, + 893, + 474 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {G A N}} = \\lambda_ {D _ {\\mathbf {c}}} \\mathcal {L} _ {D _ {\\mathbf {c}}} (\\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+}, \\hat {\\mathbf {I}} _ {\\mathbf {c}}) + \\lambda_ {D _ {\\mathbf {s}}} \\mathcal {L} _ {D _ {\\mathbf {s}}} (\\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+}, \\hat {\\mathbf {I}} _ {\\mathbf {c}}, \\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {+}, \\hat {\\mathbf {I}} _ {\\mathbf {s}}).\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 482, + 870, + 501 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\lambda_{D_{\\mathrm{c}}}$ and $\\lambda_{D_{\\mathrm{s}}}$ balance two terms. To stabilize the GAN training, we adopt the R1 regularization loss [45].", + "bbox": [ + 496, + 508, + 890, + 539 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Cross-view Consistency Loss. We observe that inputting label maps of the same object from different viewpoints will sometimes result in different 3D shapes. Therefore we add a cross-view consistency loss to regularize the training, as illustrated in Figure 3. Given an input label map $\\mathbf{I}_{\\mathbf{s}}$ and its associated pose $\\mathbf{P}$ , we generate the label map $\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime}$ from a different viewpoint $\\mathbf{P}^{\\prime}$ , and render the label map $\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime \\prime}$ back to the pose $\\mathbf{P}$ using $\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime}$ as input. We add a reconstruction loss between $\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime \\prime}$ and $\\hat{\\mathbf{I}}_{\\mathbf{s}}$ :", + "bbox": [ + 496, + 542, + 890, + 678 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {C V C}} = \\lambda_ {\\mathrm {C V C}} \\mathcal {L} _ {s} (\\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {\\prime \\prime}, \\hat {\\mathbf {I}} _ {\\mathbf {s}}),\n$$\n", + "text_format": "latex", + "bbox": [ + 611, + 685, + 779, + 703 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_s$ denotes the reconstruction loss in the label space, and $\\lambda_{\\mathrm{CVC}}$ weights the loss term. This loss is crucial for reducing error accumulation during cross-view editing.", + "bbox": [ + 496, + 710, + 893, + 756 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Optimization. Our final learning objective is written as follows:", + "bbox": [ + 496, + 758, + 893, + 787 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {r e c o n}} + \\mathcal {L} _ {\\text {G A N}} + \\mathcal {L} _ {\\text {C V C}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 588, + 789, + 800, + 805 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "At every iteration, we determine whether to use a ground-truth pose or sample a random one with a probability of $p$ . We use the reconstruction loss and GAN loss for ground-truth poses, while for random poses, we only use the GAN loss. We provide the hyper-parameters and more implementation details in the appendix of our arXiv version.", + "bbox": [ + 496, + 810, + 893, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4437", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6524c121ce230d05cd41feb9f75bed487b059bf5a675aa6a54d2e8ffe2d5a69d.jpg", + "image_caption": [ + "Input Seg Map" + ], + "image_footnote": [], + "bbox": [ + 93, + 108, + 187, + 194 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/962e55fa0d7fee6b63d4c7ba2d40169c247bab59d0ed8d5aa78cd86a0ce374a6.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 196, + 104, + 426, + 194 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/492c45d44d7ba6d8169e4d0c645e6a8da2d7374cd6ea95cdbfd93ce49e14e693.jpg", + "image_caption": [ + "Pix2NeRF", + "Figure 4. Qualitative Comparison with Pix2NeRF [6], SoFGAN [11], and SEAN [87] on CelebAMask dataset for seg2face task. SEAN fails in multi-view synthesis, while SoFGAN suffers from multi-view inconsistency (e.g., face identity changes across viewpoints). Our method renders high-quality images while maintaining multi-view consistency. Please check our website for more examples." + ], + "image_footnote": [], + "bbox": [ + 426, + 104, + 598, + 193 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/25d1c57f903489de4ac9d4c8d90385d68150f583be4b4f372a55bccb3970eaff.jpg", + "image_caption": [ + "SoFGAN" + ], + "image_footnote": [], + "bbox": [ + 598, + 104, + 771, + 193 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/446644698823e9c07c72d457e262b4557f2b556ad8bda9432911171a4ac03a66.jpg", + "image_caption": [ + "SEAN" + ], + "image_footnote": [], + "bbox": [ + 771, + 104, + 887, + 193 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/73d0e8dcc36b189f63f1ea7b19c085511cee534a07c251b442368c72542146b3.jpg", + "image_caption": [ + "Input Seg Map" + ], + "image_footnote": [], + "bbox": [ + 91, + 256, + 166, + 330 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/fa82f049d6800f9a4b8d6e0160c5b23261b5fbb18c367726987cf682cf2ab95d.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 179, + 255, + 320, + 330 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2667b3ac10d618c8c04aee838c4b19b38a78be508b30b10c8f6b052b31387a13.jpg", + "image_caption": [ + "w/o 3D Labels" + ], + "image_footnote": [], + "bbox": [ + 325, + 255, + 465, + 329 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1286d0a864f24b9a995cdaf2bfb3b7b1c4ed3425e1e932af73f41d5580cda404.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 81, + 330, + 176, + 405 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/293baefdf04c37c560ceb68f7e44c4547c13fcb023a6b1f32d5d0724828710b5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 330, + 320, + 406 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0754bdc1996513a0f381d6645fa35e4f747966a743de55433aa9c6cf0cd03390.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 330, + 465, + 406 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/df3d8e6c4c75d7110ef47a2f48f699d9fcf88ae9f5a6cf9ffbdfcdcc3d299080.jpg", + "image_caption": [ + "Figure 5. Qualitative ablation on seg2face and seg2cat. We ablate our method by removing the branch that renders label maps (w/o 3D Labels). Our results better align with input labels (e.g., hairlines and the cat's ear)." + ], + "image_footnote": [], + "bbox": [ + 81, + 407, + 166, + 481 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/763d78b11909cd56b602c8cb563bda80d1ea09fa760c6ba2976ce6dc62826ded.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 407, + 320, + 481 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e03c8222f29076d852d5ad90fbeeb9406e2a18927e788482a6e3617d19ad05bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 323, + 407, + 465, + 481 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/689b58efb2e7efef39d2c744f5934ead645dc516fc6f60e473d266da6db4b071.jpg", + "image_caption": [ + "Input Edge Map" + ], + "image_footnote": [], + "bbox": [ + 80, + 551, + 207, + 744 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/3d3c8a105e49a2338d37b272fe6e18044354958d74ec02ee690fe6d523e90e42.jpg", + "image_caption": [ + "Rendered RGB images & edge maps", + "GT View", + "Figure 6. Results on edge2cat. Our model is trained on AFHQcat [16] with edges extracted by pidinet [66]." + ], + "image_footnote": [], + "bbox": [ + 215, + 551, + 467, + 746 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiment", + "text_level": 1, + "bbox": [ + 76, + 791, + 200, + 809 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We first introduce the datasets and evaluation metrics. Then we compare our method with the baselines. Finally, we demonstrate cross-view editing and multi-modal synthesis applications enabled by our method.", + "bbox": [ + 75, + 818, + 470, + 878 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. We consider four tasks: seg2face, seg2cat,", + "bbox": [ + 76, + 885, + 470, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/611b3a3f67c0d28c059363a578b0e5558d3d974a5ff4a53806747d05ed6cb5fa.jpg", + "image_caption": [ + "Input Seg Map" + ], + "image_footnote": [], + "bbox": [ + 501, + 256, + 596, + 330 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/da8d379482d15b6744c8d2fee408dc38176a00236fdc0e02e612b6f55aa221d2.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 602, + 256, + 741, + 329 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c3837de7e313c28c101c3a24263b10dbcd65e874fee7bd8a9466700be6d15ec3.jpg", + "image_caption": [ + "w/o 3D Labels" + ], + "image_footnote": [], + "bbox": [ + 748, + 255, + 888, + 329 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/075e8f080fffcde2a09c9e911fb4c114a3fb1e5e0c56c746265e0731c1264b44.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 330, + 601, + 400 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/654619b953a1a632b636e8147a0aaf0328730b9e93a0ac2f876bb36d04e60bdc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 330, + 743, + 405 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7ad193f647b58e706d59eba116ba5f003f6a531c1c23e3860a17c6c55c3b1890.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 746, + 330, + 888, + 405 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/2dc61c293039e792708ef518ce6744dd1404d7395c9f1e736c40b54c5d212cb9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 407, + 601, + 474 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a81b6d3de8edacada11ba680be9da78a60da8c73063b4a6670252dddd8b0b201.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 407, + 743, + 481 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/332e8ae0fe3aaa5a4391ce7bcee64fcc21e0f83f2fbda3ae1f8e1a3214739393.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 748, + 407, + 888, + 481 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/31b1df148d6c6de23c999ffade5f1dbd5fb4fc80aaa0116bd7e8a51219eec901.jpg", + "image_caption": [ + "Input Edge Map" + ], + "image_footnote": [], + "bbox": [ + 509, + 599, + 586, + 656 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0e1af3b81c89ef415836333616082c06bb5a4f1e64c77537ccebfc922c49bfb7.jpg", + "image_caption": [ + "Figure 7. Qualitative comparisons on edge2car. pix2pix3D (Ours) and Pix2NeRF [6] are trained on shapenet-car [10], and pix2pix3D achieves better quality and alignment than Pix2NeRF." + ], + "image_footnote": [], + "bbox": [ + 594, + 523, + 888, + 708 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "edge2cat, and edge2car in our experiments. For seg2face, we use CelebAMask-HQ [38] for evaluation. CelebAMask-HQ contains 30,000 high-resolution face images from CelebA [41], and each image has a facial part segmentation mask and a predicted pose. The segmentation masks contain 19 classes, including skin, eyebrows, ears, mouth, lip, etc. The pose associated with each image segmentation is predicted by HopeNet [60]. We split the CelebAMask-HQ dataset into", + "bbox": [ + 496, + 779, + 892, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "4438", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/2374ae64585ac4b56a0d85dde8600055ee5f40a442ea6d68e4a74da910eba874.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Seg2FaceQUALITYALIGNMENT
SGFVV Identity ↓
CELEBAMASK [38]FID ↓KID ↓Diversity ↑mIoU ↑acc ↑
SEAN [87]32.740.0180.290.520.85N/A
SoFGAN [11]23.340.0120.330.530.890.58
PIX2NERF [6]54.230.0420.160.360.650.44
PIX2PIX3D (OURS)
W/O 3D LABELS12.960.0050.30N/A (0.43)N/A (0.81)0.38
W/O CVC11.620.0040.300.50 (0.50)0.87 (0.85)0.42
FULL MODEL11.540.0030.280.51 (0.52)0.90 (0.88)0.36
FULL MODEL†11.130.0030.290.51 (0.50)0.90 (0.87)0.36
", + "bbox": [ + 78, + 88, + 486, + 244 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/1189952badc1b804dbe5b9f36263042554a0e2e5a2af12b3b6e49f5d3584f625.jpg", + "table_caption": [ + "Table 1. Seg2face Evaluation. Our metrics include image quality (FID, KID, SG Diversity), alignment (mIoU and acc against GT label maps), and multi-view consistency (FVV Identity). Single-generation diversity (SG Diversity) is obtained by computing the LPIPS metric between randomly generated pairs given a single conditional input. To evaluate alignment, we compare the generated label maps against the ground truth in terms of mIoU and pixel accuracy (acc). Alternatively, given a generated image, one could estimate label maps via a face parser, and compare those against the ground truth (numbers in parentheses). We include SEAN [87] and SoFGAN [11] as baselines, and modify Pix2NeRF [6] to take conditional input. Our method achieves the best quality, alignment ACC, and FVV Identity while being competitive on SG Diversity. SoFGAN tends to have better alignment but worse 3D consistency. We also ablate our method w.r.t the 3D labels and the cross-view consistency (CVC) loss. Our 3D labels are crucial for alignment, while the CVC loss improves multi-view consistency. Using pretrained models from EG3D $(\\dagger)$ also improves the performance." + ], + "table_footnote": [], + "table_body": "
Edge2CarQUALITYALIGNMENT
FID ↓KID ↓SG Diversity ↑AP ↑
PIX2NERF [6]23.420.0140.060.28
PIX2PIX3D (OURS)
w/o 3D LABELS10.730.0050.120.45 (0.42)
w/o CVC9.420.0040.130.61 (0.59)
FULL MODEL8.310.0040.130.63 (0.59)
", + "bbox": [ + 91, + 511, + 454, + 626 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "a training set of 24,183, a validation set of 2,993, and a test set of 2,824, following the original work [38]. For seg2cat and edge2cat, we use AFHQ-cat [16], which contains 5,065 images at $512 \\times$ resolution. We estimate the viewpoints using unsup3d [74]. We extract the edges using pidinet [66] and obtain segmentation by clustering DINO features [2] into 6 classes. For edge2car, we use 3D models from shapenet-", + "bbox": [ + 75, + 794, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/e822f0db70976b8ad5509a5bb9cb91533e8845b520d1e7ecbc87b76561ea8ff1.jpg", + "table_caption": [ + "Table 2. Edge2car Evaluation. We compare our method with Pix2NeRF [6] on edge2car using the shapenet-car [10] dataset. Similar to Table 1, we evaluate FID, KID, and SG Diversity for image quality. We also evaluate the alignment with the input edge map using AP. Similarly, we can either run informative drawing [7] on generated images to obtain edge maps (numbers in parentheses) or directly use generated edge maps to calculate the metrics. We achieve better image quality and alignment than Pix2NeRF. We also find that using 3D labels and cross-view consistency loss is helpful regarding FID and AP metrics." + ], + "table_footnote": [], + "table_body": "
Seg2CatFID ↓QUALITYALIGNMENT
AFHQ-CAT [34]KID ↓SG Diversity ↑mIoU ↑acc ↑
PIX2NERF [6]43.920.0810.150.270.58
OURS
w/o 3D LABELS10.410.0040.26N/A (0.49)N/A (0.69)
w/o CVC9.640.0040.260.66 (0.63)0.76 (0.73)
FULL MODEL8.620.0030.270.66 (0.62)0.78 (0.73)
", + "bbox": [ + 501, + 88, + 890, + 203 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3. Seg2cat Evaluation. We compare our method with Pix2NeRF [6] on Seg2Cat using AFHQ-cat dataset [16], with segmentation obtained by clustering DINO features [2]. Similar to Table 1, we evaluate the image quality and alignment. Ours performs better in all metrics.", + "bbox": [ + 496, + 213, + 893, + 282 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5b3d08623c8b1ae15837e2929eac3d8a625b8e094d1d60e27bc8bf860a30031c.jpg", + "image_caption": [ + "Figure 8. Semantic Mesh. We show semantic meshes of human and cat faces from marching cubes colored by 3D labels." + ], + "image_footnote": [], + "bbox": [ + 511, + 290, + 887, + 455 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "car [10] and render 500,000 images at $128 \\times$ resolution for training, and 30,000 for evaluation. We extract the edges using informative drawing [7]. We train our model at $512 \\times$ resolution except for $128 \\times$ in the edge2car task.", + "bbox": [ + 496, + 512, + 890, + 573 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Running Time. For training the model at $512 \\times$ resolution, it takes about three days on eight RTX 3090 GPUs. But we can significantly reduce the training time to 4 hours if we initialize parts of our model with pretrained weights from EG3D [8]. During inference, our model takes $10\\mathrm{ms}$ to obtain the style vector, and another $30\\mathrm{ms}$ to render the final image and the label map on a single RTX A5000. The low latency (25 FPS) allows for interactive user editing.", + "bbox": [ + 496, + 577, + 890, + 699 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Evaluation metrics", + "text_level": 1, + "bbox": [ + 500, + 707, + 681, + 722 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate the models from two aspects: 1) the image quality regarding fidelity and diversity, and 2) the alignment between input label maps and generated outputs.", + "bbox": [ + 496, + 729, + 890, + 776 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quality Metrics. Following prior works [21, 57], we use the clean-fid library [58] to compute Fréchet Inception Distance (FID) [23] and Kernel Inception Distance (KID) [4] to measure the distribution distance between synthesized results and real images. We also evaluate the single-generation diversity (SG Diversity) by calculating the LPIPS metric between randomly generated pairs given a single input following prior works [11, 85]. For FID and KID, we generate", + "bbox": [ + 496, + 779, + 893, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "4439", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ddcc366d3be846fb4986f5b23ba0240c5f84a3fb77a866926f6ae612b9a99c3e.jpg", + "image_caption": [ + "Figure 9. We study the effect of random pose sampling probability $p$ during training. Without random poses ( $p = 0$ ), the model achieves the best alignment with input semantic maps, with reduced image quality. In contrast, only using random poses ( $p = 1$ ) achieves the best image quality, while results fail to align with input maps. We find $p = 0.5$ balances the image quality and input alignment." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 279, + 204 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/86a570277fc93bf09c5b134d5bfdbc4c976c22b004fdaeb0c62af6db371f5793.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 88, + 483, + 204 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/be042cfffb6bb81a1708257c1eb218309bcad47386f9707ff233a8fd004df1b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 88, + 687, + 204 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/24569639882bfc6cc5f28e3701610500a4d7632303263fe8a02bc4e6d00ac557.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 89, + 890, + 204 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8d4b5c7c30f547f27dc1f0dffe40615b6cedbbd5014ad3204a95d10f266d6b63.jpg", + "image_caption": [ + "Figure 10. Cross-view Editing of Edge2Car. Our 3D editing system allows users to edit label maps from any viewpoint instead of only the input view. Importantly, our feed-forward encoder allows fast inference of the latent code without GAN-inversion. Typically, a single forward pass of rendering takes only $40\\mathrm{ms}$ on a single RTX A5000, which enables interactive editing. Please check our demo video on our website." + ], + "image_footnote": [], + "bbox": [ + 89, + 255, + 890, + 411 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "10 images per label map in the test set using randomly sampled $z$ . We compare our generated images with the whole dataset, including training and test images.", + "bbox": [ + 75, + 470, + 470, + 517 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Alignment Metrics. We evaluate models on the test set using mean Intersection-over-Union (mIoU) and pixel accuracy (acc) for segmentation maps following existing works [57, 63], and average precision (AP) for edge maps. For those models that render label maps as output, we directly compare them with ground-truth labels. Otherwise, we first predict the label maps from the output RGB images using off-the-shelf networks [38, 66], and then compare the prediction with the ground truth. The metrics regarding such predicted semantic maps are reported within brackets in Table 1 and Table 2.", + "bbox": [ + 75, + 520, + 468, + 671 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For seg2face, we evaluate the preservation of facial identity from different viewpoints (FVV Identity) by calculating their distances with the dlib face recognition algorithm*.", + "bbox": [ + 75, + 672, + 470, + 717 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Baseline comparison", + "text_level": 1, + "bbox": [ + 76, + 728, + 272, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Baselines. Since there are no prior works on conditional 3D-aware image synthesis, we make minimum modifications to Pix2NeRF [6] to be conditional on label maps instead of images. For a thorough comparison, we introduce several baselines: SEAN [87] and SoFGAN [11]. 2D baselines like SEAN [87] cannot generate multi-view images by design (N/A for FVV Identity), while SoFGAN [11] uses an unconditional 3D semantic map generator before the 2D", + "bbox": [ + 75, + 755, + 468, + 878 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "generator so we can evaluate FVV Identity for that.", + "bbox": [ + 500, + 470, + 839, + 486 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results. Figure 4 shows the qualitative comparison for seg2face and Table 1 reports the evaluation results. SoFGAN [11] tends to produce results with slightly better alignment but worse 3D consistency for its 2D RGB generator. Our method achieves the best quality, alignment acc, and FVV Identity while being competitive with 2D baselines on SG diversity. Figure 5 shows the qualitative ablation on seg2face and seg2cat. Table ?? reports the metrics for seg2cat. Figure 6 shows the example results for edge2cat. Figure 7 shows the qualitative comparison for edge2car and Table 2 reports the metrics. Our method achieves the best image quality and alignment. Figure 8 shows semantic meshes of human and cat faces, extracted by marching cubes and colored by our learned 3D labels. We provide more evaluation results in the appendix of our arXiv version.", + "bbox": [ + 496, + 497, + 893, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation Study. We compare our full method to several variants. Specifically, (1) w/o 3D LABELS, we remove the branch of rendering label maps from our method, and (2) w/o CVC, we remove the cross-view consistency loss. From Table 1, Table 2, and Figure 5, rendering label maps is crucial for the alignment with the input. We posit that the joint learning of appearance, geometry, and label information poses strong constraints on correspondence between the input label maps and the 3D representation. Thus our method can synthesize images pixel-aligned with the inputs. Our CVC loss helps preserve the facial identity from different viewpoints.", + "bbox": [ + 496, + 734, + 895, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "\\*https://github.com/ageitgey/face_recognition", + "bbox": [ + 94, + 887, + 450, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "4440", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/701762258dbb14addbc873741842e7690298e349830108143d07aff390405b8c.jpg", + "image_caption": [ + "Figure 11. Multi-modal Synthesis. The leftmost column is the input segmentation map. We use the same segmentation map for each row. We generate multi-modal results by randomly sampling an appearance style for each column." + ], + "image_footnote": [], + "bbox": [ + 80, + 89, + 467, + 404 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Analysis on random sampling of poses. We study the effect of the different probabilities of sampling random poses during training, as shown in Figure 9. When sampling no random poses $(p = 0)$ , the model best aligns with input label maps with suboptimal image quality. Conversely, only sampling random poses $(p = 1)$ gives the best image quality but suffers huge misalignment with input label maps. We find $p = 0.5$ achieves the balance between the image quality and the alignment with the input.", + "bbox": [ + 75, + 470, + 470, + 606 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Applications", + "text_level": 1, + "bbox": [ + 76, + 614, + 210, + 630 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Cross-view Editing. As shown in Figure 10, our 3D editing system allows users to generate and edit label maps from any viewpoint instead of only the input view. The edited label map is further fed into the conditional encoder to update the 3D representation. Unlike GAN inversion [83], our feedforward conditional encoder allows fast inference of the latent code. Thus, a single forward pass of our full model takes only $40\\mathrm{ms}$ on a single RTX A5000.", + "bbox": [ + 75, + 641, + 468, + 761 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Multi-modal synthesis and interpolation. Like other style-based generative models [8, 21, 34, 36], our method can disentangle the geometry and appearance information. Specifically, the input label map captures the geometry information while the randomly sampled latent code controls the appearance. We show style manipulation results in Figure 11. We can also interpolate both the geometry styles and the appearance styles (Figure 12). These results show the clear disentanglement of our 3D representation.", + "bbox": [ + 75, + 763, + 472, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8ab2ba4ff5cd2044bcafae38e595a31eb99f389503ece2b7b84028a1763f0c5c.jpg", + "image_caption": [ + "Figure 12. Interpolation. In each $5 \\times 5$ grid, the images at the top left and bottom right are generated from the input maps next to them. Each row interpolates two images in label space, while each column interpolates the appearance. For camera poses, we interpolate the pitch along the row and the yaw along the column." + ], + "image_footnote": [], + "bbox": [ + 501, + 89, + 890, + 536 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Discussion", + "text_level": 1, + "bbox": [ + 500, + 622, + 612, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We have introduced pix2pix3D, a 3D-aware conditional generative model for controllable image synthesis. Given a 2D label map, our model allows users to render images given any viewpoint. Our model augments the neural field with 3D labels, assigning label, color, and density to every 3D point, allowing for the simultaneous rendering of the image and a pixel-aligned label map. The learned 3D labels further enable interactive 3D cross-view editing. We discuss the limitations and societal impact in our arXiv version.", + "bbox": [ + 496, + 650, + 893, + 787 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments. We thank Sheng-Yu Wang, Nupur Kumari, Gaurav Parmer, Ruihan Gao, Muyang Li, George Cazenavette, Andrew Song, Zhipeng Bao, Tamaki Kojima, Krishna Wadhwani, Takuya Narihira, and Tatsuo Fujiwara for their discussion and help. We are grateful for the support from Sony Corporation, Singapore DSTA, and the CMU Argo AI Center for Autonomous Vehicle Research.", + "bbox": [ + 496, + 794, + 893, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "4441", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Rameen Abdal, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In IEEE International Conference on Computer Vision (ICCV), 2019. 2", + "[2] Shir Amir, Yossi Gandelsman, Shai Bagon, and Tali Dekel. Deep vit features as dense visual descriptors. ECCVW What is Motion For?, 2022. 6", + "[3] David Bau, Hendrik Strobelt, William Peebles, Jonas Wulff, Bolei Zhou, Jun-Yan Zhu, and Antonio Torralba. Semantic photo manipulation with a generative image prior. In ACM SIGGRAPH, 2019. 2", + "[4] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. In International Conference on Learning Representations (ICLR), 2018. 6", + "[5] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In International Conference on Learning Representations (ICLR), 2019. 2", + "[6] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional $\\pi$ -gan for single image to neural radiance fields translation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 5, 6, 7", + "[7] Caroline Chan, Frédo Durand, and Phillip Isola. Learning to generate line drawings that convey geometry and semantics. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 6", + "[8] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3, 4, 6, 8", + "[9] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[10] Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An Information-Rich 3D Model Repository. Technical Report arXiv:1512.03012 [cs.GR], Stanford University — Princeton University — Toyota Technological Institute at Chicago, 2015. 2, 5, 6", + "[11] Anpei Chen, Ruiyang Liu, Ling Xie, Zhang Chen, Hao Su, and Jingyi Yu. Sofgan: A portrait image generator with dynamic styling. In ACM SIGGRAPH, 2021. 2, 5, 6, 7", + "[12] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2", + "[13] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Tao Chen, Zhe Zhu, Ariel Shamir, Shi-Min Hu, and Daniel Cohen-Or. 3-sweep: Extracting editable objects from a single photo. ACM Transactions on Graphics (TOG), 32(6):1-10, 2013. 2", + "[15] Yuedong Chen, Qianyi Wu, Chuanxia Zheng, Tat-Jen Cham, and Jianfei Cai. Sem2nerf: Converting single-view semantic masks to neural radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2", + "[16] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6", + "[17] JOHANNA Delanoy, ADRIEN Bousseau, MATHIEU Aubry, PHILLIP Isola, and ALEXEIA A Efros. What you sketch is what you get: 3d sketching using multi-view deep volumetric prediction. In ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games (I3D), 2018. 2", + "[18] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised NeRF: Fewer views and faster training for free. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[19] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1", + "[20] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, 2014. 1, 2, 4", + "[21] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2, 3, 4, 6, 8", + "[22] Philipp Henzler, Niloy J Mitra, and Tobias Ritschel. Escaping plato's cave: 3d shape from adversarial rendering. In IEEE International Conference on Computer Vision (ICCV), 2019. 2", + "[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two timescale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems (NeurIPS), 2017. 6", + "[24] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 1", + "[25] Hsin-Ping Huang, Hung-Yu Tseng, Hsin-Ying Lee, and Jia-Bin Huang. Semantic view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2", + "[26] Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. Multimodal unsupervised image-to-image translation. In European Conference on Computer Vision (ECCV), 2018. 2", + "[27] Zeng Huang, Tianye Li, Weikai Chen, Yajie Zhao, Jun Xing, Chloe Legendre, Linjie Luo, Chongyang Ma, and Hao Li. Deep volumetric video from very sparse multi-view performance capture. In European Conference on Computer Vision (ECCV), pages 351-369, 2018. 2" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "4442", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Takeo Igarashi, Satoshi Matsuoka, and Hidehiko Tanaka. Teddy: a sketching interface for 3d freeform design. In ACM SIGGRAPH, 1999. 2", + "[29] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1, 2", + "[30] Ajay Jain, Matthew Tancik, and Pieter Abbeel. Putting nerf on a diet: Semantically consistent few-shot view synthesis. In IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[31] Kaiwen Jiang, Shu-Yu Chen, Feng-Lin Liu, Hongbo Fu, and Lin Gao. Nerffaceediting: Disentangled face editing in neural radiance fields. In ACM SIGGRAPH Asia, 2022. 2", + "[32] James T Kajiya and Brian P Von Herzen. Ray tracing volume densities. ACM SIGGRAPH, 18(3):165-174, 1984. 3", + "[33] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2", + "[34] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2, 6, 8", + "[35] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[36] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 8", + "[37] Natasha Kholgade, Tomas Simon, Alexei Efros, and Yaser Sheikh. 3d object manipulation in a single photograph using stock 3d models. ACM Transactions on Graphics (TOG), 33(4):1-12, 2014.", + "[38] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6, 7", + "[39] Chen-Hsuan Lin, Wei-Chiu Ma, Antonio Torralba, and Simon Lucey. Barf: Bundle-adjusting neural radiance fields. In IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[40] Ming-Yu Liu, Thomas Breuel, and Jan Kautz. Unsupervised image-to-image translation networks. Advances in neural information processing systems, 30, 2017. 2", + "[41] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 5", + "[42] Zhaoliang Lun, Matheus Gadelha, Evangelos Kalogerakis, Subhransu Maji, and Rui Wang. 3d shape reconstruction from sketches via multi-view convolutional networks. In 2017 International Conference on 3D Vision (3DV). IEEE, 2017. 2" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[43] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[44] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. In IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[45] Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for gans do actually converge? In International Conference on Machine Learning (ICML), 2018. 4", + "[46] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2, 3", + "[47] Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784, 2014. 2", + "[48] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. In ACM SIGGRAPH, 2022. 2", + "[49] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In IEEE International Conference on Computer Vision (ICCV), 2019. 2", + "[50] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[51] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[52] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4", + "[53] Xingang Pan, Xudong Xu, Chen Change Loy, Christian Theobalt, and Bo Dai. A shading-guided generative implicit model for shape-accurate 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2", + "[54] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[55] Taesung Park, Alexei A Efros, Richard Zhang, and Jun-Yan Zhu. Contrastive learning for unpaired image-to-image translation. In European Conference on Computer Vision (ECCV), 2020. 2" + ], + "bbox": [ + 501, + 92, + 893, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "4443", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[56] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2", + "[57] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 6, 7", + "[58] Gaurav Parmar, Richard Zhang, and Jun-Yan Zhu. On aliased resizing and surprising subtleties in gan evaluation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 6", + "[59] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In IEEE International Conference on Computer Vision (ICCV), 2021. 2", + "[60] Nataniel Ruiz, Eunji Chong, and James M. Rehg. Fine-grained head pose estimation without keypoints. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshop, 2018. 5", + "[61] Sara Fridovich-Keil and Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[62] Edgar Schonfeld, Vadim Sushko, Dan Zhang, Juergen Gall, Bernt Schiele, and Anna Khoreva. You only need adversarial supervision for semantic image synthesis. In International Conference on Learning Representations (ICLR), 2020. 2", + "[63] Edgar Schonfeld, Vadim Sushko, Dan Zhang, Juergen Gall, Bernt Schiele, and Anna Khoreva. You only need adversarial supervision for semantic image synthesis. In International Conference on Learning Representations (ICLR), 2021. 7", + "[64] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2", + "[65] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[66] Zhuo Su, Wenzhe Liu, Zitong Yu, Dewen Hu, Qing Liao, Qi Tian, Matti Pietikainen, and Li Liu. Pixel difference networks for efficient edge detection. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 5, 6, 7", + "[67] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew Davison. iMAP: Implicit mapping and positioning in real-time. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2", + "[68] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. In ACM Transactions on Graphics (TOG), 2022. 2", + "[69] Jingxiang Sun, Xuan Wang, Yong Zhang, Xiaoyu Li, Qi Zhang, Yebin Liu, and Jue Wang. Fenerf: Face editing in neural radiance fields. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4", + "[70] Matthew Tancik, Ben Mildenhall, Terrance Wang, Divi Schmidt, Pratul P Srinivasan, Jonathan T Barron, and Ren" + ], + "bbox": [ + 78, + 90, + 470, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ng. Learned initializations for optimizing coordinate-based neural representations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[71] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2", + "[72] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. In ACM Transactions on Graphics (TOG), 2021. 2", + "[73] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. High-resolution image synthesis and semantic manipulation with conditional gans. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2", + "[74] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6", + "[75] Xiaohua Xie, Kai Xu, Niloy J Mitra, Daniel Cohen-Or, Wenyong Gong, Qi Su, and Baoquan Chen. Sketch-to-design: Context-based part assembly. In Computer Graphics Forum, volume 32, pages 233–245. Wiley Online Library, 2013. 2", + "[76] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2", + "[77] Shunyu Yao, Tzu Ming Hsu, Jun-Yan Zhu, Jiajun Wu, Antonio Torralba, Bill Freeman, and Josh Tenenbaum. 3d-aware scene manipulation via inverse graphics. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 2", + "[78] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. Pixelnerf: Neural radiance fields from one or few images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2", + "[79] Jichao Zhang, Enver Sangineto, Hao Tang, Aliaksandr Siarohin, Zhun Zhong, Nicu Sebe, and Wei Wang. 3d-aware semantic-guided generative model for human synthesis. In European Conference on Computer Vision (ECCV), 2022. 2", + "[80] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2", + "[81] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 4", + "[82] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European Conference on Computer Vision (ECCV), 2020. 2", + "[83] Jun-Yan Zhu, Philipp Krahenbuhl, Eli Shechtman, and Alexei A Efros. Generative visual manipulation on the natural image manifold. In European Conference on Computer Vision (ECCV), 2016. 2, 8", + "[84] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent" + ], + "bbox": [ + 503, + 92, + 893, + 898 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "4444", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "adversarial networks. In IEEE International Conference on Computer Vision (ICCV), 2017. 1, 2", + "[85] Jun-Yan Zhu, Richard Zhang, Deepak Pathak, Trevor Darrell, Alexei A Efros, Oliver Wang, and Eli Shechtman. Toward multimodal image-to-image translation. Advances in neural information processing systems, 30, 2017. 6", + "[86] Jun-Yan Zhu, Zhoutong Zhang, Chengkai Zhang, Jiajun Wu, Antonio Torralba, Josh Tenenbaum, and Bill Freeman. Visual object networks: Image generation with disentangled 3d representations. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 2", + "[87] Peihao Zhu, Rameen Abdal, Yipeng Qin, and Peter Wonka. Sean: Image synthesis with semantic region-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6, 7", + "[88] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R. Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + ], + "bbox": [ + 78, + 90, + 470, + 373 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "4445", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2023/3D-Aware Conditional Image Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_model.json b/2023/3D-Aware Conditional Image Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_model.json new file mode 100644 index 0000000000000000000000000000000000000000..99607c98ee92a35135b349e5f90da2f41b79fc51 --- /dev/null +++ b/2023/3D-Aware Conditional Image Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_model.json @@ -0,0 +1,2886 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.288, + 0.131, + 0.685, + 0.154 + ], + "angle": 0, + "content": "3D-aware Conditional Image Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.181, + 0.748, + 0.219 + ], + "angle": 0, + "content": "Kangle Deng Gengshan Yang Deva Ramanan Jun-Yan Zhu Carnegie Mellon University" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.249, + 0.895, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.54, + 0.895, + 0.581 + ], + "angle": 0, + "content": "Figure 1. Given a 2D label map as input, such as a segmentation or edge map, our model learns to predict high-quality 3D labels, geometry, and appearance, which enables us to render both labels and RGB images from different viewpoints. The inferred 3D labels further allow interactive editing of label maps from any viewpoint, as shown in Figure 10." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.594, + 0.314, + 0.61 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.619, + 0.472, + 0.816 + ], + "angle": 0, + "content": "We propose pix2pix3D, a 3D-aware conditional generative model for controllable photorealistic image synthesis. Given a 2D label map, such as a segmentation or edge map, our model learns to synthesize a corresponding image from different viewpoints. To enable explicit 3D user control, we extend conditional generative models with neural radiance fields. Given widely-available posed monocular image and label map pairs, our model learns to assign a label to every 3D point in addition to color and density, which enables it to render the image and pixel-aligned label map simultaneously. Finally, we build an interactive system that allows users to edit the label map from different viewpoints and generate outputs accordingly." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.845, + 0.208, + 0.86 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Content creation with generative models has witnessed tremendous progress in recent years, enabling high-quality," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.595, + 0.895, + 0.746 + ], + "angle": 0, + "content": "user-controllable image and video synthesis [19, 20, 24, 34]. In particular, image-to-image translation methods [29, 56, 84] allow users to interactively create and manipulate a high-resolution image given a 2D input label map. Unfortunately, existing image-to-image translation methods operate purely in 2D, without explicit reasoning of the underlying 3D structure of the content. As shown in Figure 1, we aim to make conditional image synthesis 3D-aware, allowing not only 3D content generation but also viewpoint manipulation and attribute editing (e.g., car shape) in 3D." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.748, + 0.895, + 0.869 + ], + "angle": 0, + "content": "Synthesizing 3D content conditioned on user input is challenging. For model training, it is costly to obtain large-scale datasets with paired user inputs and their desired 3D outputs. During test time, 3D content creation often requires multi-view user inputs, as a user may want to specify the details of 3D objects using 2D interfaces from different viewpoints. However, these inputs may not be 3D-consistent, providing conflicting signals for 3D content creation." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.895, + 0.901 + ], + "angle": 0, + "content": "To address the above challenges, we extend conditional generative models with 3D neural scene representations. To" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4434" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.273 + ], + "angle": 0, + "content": "enable cross-view editing, we additionally encode semantic information in 3D, which can then be rendered as 2D label maps from different viewpoints. We learn the aforementioned 3D representation using only 2D supervision in the form of image reconstruction and adversarial losses. While the reconstruction loss ensures the alignment between 2D user inputs and corresponding 3D content, our pixel-aligned conditional discriminator encourages the appearance and labels to look plausible while remaining pixel-aligned when rendered into novel viewpoints. We also propose a cross-view consistency loss to enforce the latent codes to be consistent from different viewpoints." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.274, + 0.473, + 0.44 + ], + "angle": 0, + "content": "We focus on 3D-aware semantic image synthesis on the CelebAMask-HQ [38], AFHQ-cat [16], and shapenetcar [10] datasets. Our method works well for various 2D user inputs, including segmentation maps and edge maps. Our method outperforms several 2D and 3D baselines, such as Pix2NeRF variants [6], SofGAN [11], and SEAN [87]. We further ablate the impact of various design choices and demonstrate applications of our method, such as cross-view editing and explicit user control over semantics and style. Please see our website for more results and code. Please check out the full version of our paper at arXiv." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.457, + 0.22, + 0.472 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.488, + 0.473, + 0.82 + ], + "angle": 0, + "content": "Neural Implicit Representation. Neural implicit fields, such as DeepSDF and NeRFs [46, 54], model the appearance of objects and scenes with an implicitly defined, continuous 3D representation parameterized by neural networks. They have produced significant results for 3D reconstruction [67, 88] and novel view synthesis applications [39, 43, 44, 48, 80] thanks to their compactness and expressiveness. NeRF and its descendants aim to optimize a network for an individual scene, given hundreds of images from multiple viewpoints. Recent works further reduce the number of training views through learning network initializations [13, 70, 78], leveraging auxiliary supervision [18, 30], or imposing regularization terms [50]. Recently, explicit or hybrid representations of radiance fields [12, 48, 61] have also shown promising results regarding quality and speed. In our work, we use hybrid representations for modeling both user inputs and outputs in 3D, focusing on synthesizing novel images rather than reconstructing an existing scene. A recent work Pix2NeRF [6] aims to translate a single image to a neural radiance field, which allows single-image novel view synthesis. In contrast, we focus on 3D-aware user-controlled content generation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.826, + 0.472, + 0.903 + ], + "angle": 0, + "content": "Conditional GANs. Generative adversarial networks (GANs) learn the distribution of natural images by forcing the generated and real images to be indistinguishable. They have demonstrated high-quality results on 2D image synthesis and manipulation [1, 3, 5, 20, 33-35, 59, 65, 72, 82, 83]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.228 + ], + "angle": 0, + "content": "Several methods adopt image-conditional GANs [29, 47] for user-guided image synthesis and editing applications [26, 27, 38, 40, 55, 56, 62, 73, 84, 87]. In contrast, we propose a 3D-aware generative model conditioned on 2D user inputs that can render view-consistent images and enable interactive 3D editing. Recently, SoFGAN [11] uses a 3D semantic map generator and a 2D semantic-to-image generator to enable 3D-aware generation, but using 2D generators does not ensure 3D consistency." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.23, + 0.897, + 0.517 + ], + "angle": 0, + "content": "3D-aware Image Synthesis. Early data-driven 3D image editing systems can achieve various 3D effects but often require a huge amount of manual effort [14, 37]. Recent works have integrated the 3D structure into learning-based image generation pipelines using various geometric representations, including voxels [22,86], voxelized 3D features [49], and 3D morphable models [71, 77]. However, many rely on external 3D data [71, 77, 86]. Recently, neural scene representations have been integrated into GANs to enable 3D-aware image synthesis [8,9,21,51-53,64,76]. Intriguingly, these 3D-aware GANs can learn 3D structures without any 3D supervision. For example, StyleNeRF [21] and EG3D [8] learn to generate 3D representations by modulating either NeRFs or explicit representations with latent style vectors. This allows them to render high-resolution view-consistent images. Unlike the above methods, we focus on conditional synthesis and interactive editing rather than random sampling. Several works [17,28,42,75] have explored sketch-based shape generation but they do not allow realistic image synthesis." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.518, + 0.897, + 0.82 + ], + "angle": 0, + "content": "Closely related to our work, Huang et al. [25] propose synthesizing novel views conditional on a semantic map. Our work differs in three ways. First, we can predict full 3D labels, geometry, and appearance, rather than only 2D views, which enables cross-view editing. Second, our method can synthesize images with a much wider baseline than Huang et al. [25]. Finally, our learning algorithm does not require ground truth multi-view images of the same scene. Two recent works, FENeRF [69] and 3DSGAN [79], also leverage semantic labels for training 3D-aware GANs, but they do not support conditional inputs and require additional efforts (e.g., GAN-inversion) to allow user editing. Three concurrent works, IDE-3D [68], NeRFFaceEditing [31], and sem2nerf [15], also explore the task of 3D-aware generation based on segmentation masks. However, IDE-3D and sem2nerf only allow editing on a fixed view, and NeRF-FaceEditing focuses on real image editing rather than generation. All of them do not include results for other input modalities. In contrast, we present a general-purpose method that works well for diverse datasets and input controls." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.831, + 0.593, + 0.845 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Given a 2D label map \\(\\mathbf{I}_{\\mathbf{s}}\\), such as a segmentation or edge map, pix2pix3D generates a 3D-volumetric representation of geometry, appearance, and labels that can be rendered" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4435" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.093, + 0.89, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.449, + 0.897, + 0.548 + ], + "angle": 0, + "content": "Figure 2. Overall pipeline. Given a 2D label map (e.g., segmentation map), a random latent code \\( z \\), and a camera pose \\( \\hat{P} \\) as inputs, our generator renders the label map and image from viewpoint \\( \\hat{P} \\). Intuitively, the input label map specifies the geometric structure, while the latent code captures the appearance, such as hair color. We begin with an encoder that encodes both the input label map and the latent code into style vectors \\( \\mathbf{w}^{+} \\). We then use \\( \\mathbf{w}^{+} \\) to modulate our 3D representation, which takes a spatial point \\( \\mathbf{x} \\) and outputs (1) color \\( \\mathbf{c} \\in \\mathbb{R}^3 \\), (2) density \\( \\sigma \\), (3) feature \\( \\phi \\in \\mathbb{R}^l \\), and (4) label \\( \\mathbf{s} \\in \\mathbb{R}^c \\). We then perform volumetric rendering and 2D upsampling to get the high-resolution label map \\( \\hat{\\mathbf{I}}_{\\mathbf{s}}^{+} \\) and RGB Image \\( \\hat{\\mathbf{I}}_{\\mathbf{c}}^{+} \\). For those rendered from ground-truth poses, we compare them to ground-truth labels and images with an LPIPS loss and label reconstruction loss. We apply a GAN loss on labels and images rendered from both novel and original viewpoints." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.567, + 0.472, + 0.645 + ], + "angle": 0, + "content": "from different viewpoints. Figure 2 provides an overview. We first introduce the formulation of our 3D conditional generative model for 3D-aware image synthesis in Section 3.1. Then, in Section 3.2, we discuss how to learn the model from color and label map pairs \\(\\{\\mathbf{I}_{\\mathrm{c}},\\mathbf{I}_{\\mathrm{s}}\\}\\) associated with poses \\(\\mathbf{P}\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.65, + 0.383, + 0.665 + ], + "angle": 0, + "content": "3.1. Conditional 3D Generative Models" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.673, + 0.47, + 0.794 + ], + "angle": 0, + "content": "Similar to EG3D [8], we adopt a hybrid representation for the density and appearance of a scene and use style vectors to modulate the 3D generations. To condition the 3D representations on 2D label map inputs, we introduce a conditional encoder that maps a 2D label map into a latent style vector. Additionally, pix2pix3D produces 3D labels that can be rendered from different viewpoints, allowing for cross-view user editing." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.471, + 0.856 + ], + "angle": 0, + "content": "Conditional Encoder. Given a 2D label map input \\(\\mathbf{I}_{\\mathrm{s}}\\) and a random latent code sampled from the spherical Gaussian space \\(\\mathbf{z} \\sim \\mathcal{N}(0, I)\\), our conditional encoder \\(E\\) outputs a list of style vectors \\(\\mathbf{w}^{+} \\in \\mathbb{R}^{l \\times 256}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.864, + 0.33, + 0.88 + ], + "angle": 0, + "content": "\\[\n\\mathbf {w} ^ {+} = E (\\mathbf {I _ {s}}, \\mathbf {z}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.886, + 0.473, + 0.902 + ], + "angle": 0, + "content": "where \\( l = 13 \\) is the number of layers to be modulated." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.893, + 0.643 + ], + "angle": 0, + "content": "Specifically, we encode \\(\\mathbf{I}_{\\mathrm{s}}\\) into the first 7 style vectors that represent the global geometric information of the scene. We then feed the random latent code \\(\\mathbf{z}\\) through a Multi-Layer Perceptron (MLP) mapping network to obtain the rest of the style vectors that control the appearance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.646, + 0.895, + 0.798 + ], + "angle": 0, + "content": "Conditional 3D Representation. Our 3D representation is parameterized by tri-planes followed by an 2-layer MLP \\( f \\) [8], which takes in a spatial point \\( \\mathbf{x} \\in \\mathbb{R}^3 \\) and returns 4 types of outputs: (1) color \\( \\mathbf{c} \\in \\mathbb{R}^3 \\), (2) density \\( \\sigma \\in \\mathbb{R}^+ \\), (3) feature \\( \\phi \\in \\mathbb{R}^{64} \\) for the purpose of 2D upsampling, and most notably, (4) label \\( \\mathbf{s} \\in \\mathbb{R}^c \\), where \\( c \\) is the number of classes if \\( \\mathbf{I_s} \\) is a segmentation map, otherwise 1 for edge labels. We make the field conditional by modulating the generation of tri-planes \\( F^{\\mathrm{tri}} \\) with the style vectors \\( \\mathbf{w}^+ \\). We also remove the view dependence of the color following [8, 21]. Formally," + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.808, + 0.785, + 0.827 + ], + "angle": 0, + "content": "\\[\n(\\mathbf {c}, \\mathbf {s}, \\sigma , \\phi) = f (F _ {\\mathbf {w} ^ {+}} ^ {\\mathrm {t r i}} (\\mathbf {x})).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.897, + 0.903 + ], + "angle": 0, + "content": "Volume Rendering and Upsampling. We apply volumetric rendering to synthesize color images [32, 46]. In addition, we render label maps, which are crucial for enabling cross-view editing (Section 4.3) and improving rendering quality" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4436" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.472, + 0.213 + ], + "angle": 0, + "content": "(Table 1). Given a viewpoint \\(\\hat{P}\\) looking at the scene origin, we sample \\(N\\) points along the ray that emanates from a pixel location and query density, color, labels, and feature information from our 3D representation. Let \\(\\mathbf{x_i}\\) be the i-th sampled point along the ray \\(r\\). Let \\(\\mathbf{c}_i, \\mathbf{s}_i\\) and \\(\\phi_i\\) be the color, labels, and the features of \\(\\mathbf{x_i}\\). Similar to [69], The color, label map, and feature images are computed as the weighted combination of queried values," + }, + { + "type": "equation", + "bbox": [ + 0.086, + 0.223, + 0.47, + 0.277 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {I}} _ {\\mathbf {c}} (r) = \\sum_ {i = 1} ^ {N} \\tau_ {i} \\mathbf {c} _ {i}, \\quad \\hat {\\mathbf {I}} _ {\\mathbf {s}} (r) = \\sum_ {i = 1} ^ {N} \\tau_ {i} \\mathbf {s} _ {i}, \\quad \\hat {\\mathbf {I}} _ {\\phi} (r) = \\sum_ {i = 1} ^ {N} \\tau_ {i} \\phi_ {i}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.278, + 0.471, + 0.324 + ], + "angle": 0, + "content": "where the transmittance \\(\\tau_{i}\\) is computed as the probability of a photon traversing between the camera center and the i-th point given the length of the i-th interval \\(\\delta_{i}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.334, + 0.412, + 0.378 + ], + "angle": 0, + "content": "\\[\n\\tau_ {i} = \\prod_ {j = 1} ^ {i} \\exp \\left(- \\sigma_ {j} \\delta_ {j}\\right) (1 - \\exp \\left(- \\sigma_ {i} \\delta_ {i}\\right)).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.386, + 0.47, + 0.462 + ], + "angle": 0, + "content": "Similar to prior works [8, 21, 52], we approximate Equation 1 by 2D Upsampler \\(U\\) to reduce the computational cost. We render high-res \\(512 \\times 512\\) images in two passes. In the first pass, we render low-res \\(64 \\times 64\\) images \\(\\hat{\\mathbf{I}}_{\\mathbf{c}}, \\hat{\\mathbf{I}}_{\\mathbf{s}}, \\hat{\\mathbf{I}}_{\\phi}\\). Then a CNN up-sampler \\(U\\) is applied to obtain high-res images," + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.47, + 0.401, + 0.49 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+} = U (\\hat {\\mathbf {I}} _ {\\mathbf {c}}, \\hat {\\mathbf {I}} _ {\\phi}), \\qquad \\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {+} = U (\\hat {\\mathbf {I}} _ {\\mathbf {s}}, \\hat {\\mathbf {I}} _ {\\phi}).\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.499, + 0.265, + 0.515 + ], + "angle": 0, + "content": "3.2. Learning Objective" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.523, + 0.471, + 0.627 + ], + "angle": 0, + "content": "Learning conditional 3D representations from monocular images is challenging due to its under-constrained nature. Given training data of associated images, label maps, and camera poses predicted by an off-the-shelf model, we carefully construct learning objectives, including reconstruction, adversarial, and cross-view consistency losses. These objectives will be described below." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.632, + 0.47, + 0.753 + ], + "angle": 0, + "content": "Reconstruction Loss. Given a ground-truth viewpoint \\(\\mathbf{P}\\) associated with the color and label maps \\(\\{\\mathbf{I}_{\\mathbf{c}}, \\mathbf{I}_{\\mathbf{s}}\\}\\), we render color and label maps from \\(\\mathbf{P}\\) and compute reconstruction losses for both high-res and low-res output. We use LPIPS [81] to compute the image reconstruction loss \\(\\mathcal{L}_c\\) for color images. For label reconstruction loss \\(\\mathcal{L}_s\\), we use the balanced cross-entropy loss for segmentation maps or L2 Loss for edge maps," + }, + { + "type": "equation", + "bbox": [ + 0.105, + 0.763, + 0.442, + 0.781 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {r e c o n}} = \\lambda_ {c} \\mathcal {L} _ {c} \\left(\\mathbf {I} _ {\\mathbf {c}}, \\left\\{\\hat {\\mathbf {I}} _ {\\mathbf {c}}, \\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+} \\right\\}\\right) + \\lambda_ {s} \\mathcal {L} _ {s} \\left(\\mathbf {I} _ {\\mathbf {s}}, \\left\\{\\hat {\\mathbf {I}} _ {\\mathbf {s}}, \\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {+} \\right\\}\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.792, + 0.317, + 0.806 + ], + "angle": 0, + "content": "where \\(\\lambda_{c}\\) and \\(\\lambda_{s}\\) balance two terms." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Pixel-aligned Conditional Discriminator. The reconstruction loss alone fails to synthesize detailed results from novel viewpoints. Therefore, we use an adversarial loss [20] to enforce renderings to look realistic from random viewpoints. Specifically, we have two discriminators \\( D_{\\mathbf{c}} \\) and \\( D_{\\mathbf{s}} \\) for RGB images and label maps, respectively. \\( D_{\\mathbf{c}} \\) is a widely-used" + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.089, + 0.806, + 0.103 + ], + "angle": 0, + "content": "Multi-view Generation of Seg Maps" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.104, + 0.891, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.231, + 0.894, + 0.318 + ], + "angle": 0, + "content": "Figure 3. Cross-View Consistency Loss. Given an input label map \\(\\mathbf{I}_{\\mathbf{s}}\\) and its associated pose \\(\\mathbf{P}\\), we first infer the geometry latent code \\(\\mathbf{w}_{\\mathbf{g}}\\). From \\(\\mathbf{w}_{\\mathbf{g}}\\), we can generate a label map \\(\\hat{\\mathbf{I}}_{\\mathbf{s}}\\) from the same pose \\(\\mathbf{P}\\), and \\(\\hat{\\mathbf{I}}_{\\mathbf{s}}'\\) from a random pose \\(\\mathbf{P}'\\). Next, we infer \\(\\mathbf{w}_{\\mathbf{g}}'\\) from the novel view \\(\\hat{\\mathbf{I}}_{\\mathbf{s}}'\\), and render it back to the original pose \\(\\mathbf{P}\\) to obtain \\(\\hat{\\mathbf{I}}_{\\mathbf{s}}''\\). Finally, we add a reconstruction loss: \\(\\mathcal{L}_{\\mathrm{CVC}} = \\lambda_{\\mathrm{CVC}}\\mathcal{L}_s(\\hat{\\mathbf{I}}_{\\mathbf{s}}'', \\hat{\\mathbf{I}}_{\\mathbf{s}})\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.34, + 0.895, + 0.476 + ], + "angle": 0, + "content": "GAN loss that takes real and fake images as input, while the pixel-aligned conditional discriminator \\( D_{\\mathbf{s}} \\) concatenates color images and label maps as input, which encourages pixel alignment between color images and label maps. Notably, in \\( D_{\\mathbf{s}} \\), we stop the gradients for the color images to prevent a potential quality downgrade. We also feed the rendered low-res images to prevent the upsampler from hallucinating details, inconsistent with the low-res output. The adversarial loss can be written as follows." + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.483, + 0.871, + 0.502 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {G A N}} = \\lambda_ {D _ {\\mathbf {c}}} \\mathcal {L} _ {D _ {\\mathbf {c}}} (\\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+}, \\hat {\\mathbf {I}} _ {\\mathbf {c}}) + \\lambda_ {D _ {\\mathbf {s}}} \\mathcal {L} _ {D _ {\\mathbf {s}}} (\\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+}, \\hat {\\mathbf {I}} _ {\\mathbf {c}}, \\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {+}, \\hat {\\mathbf {I}} _ {\\mathbf {s}}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.509, + 0.891, + 0.54 + ], + "angle": 0, + "content": "where \\(\\lambda_{D_{\\mathrm{c}}}\\) and \\(\\lambda_{D_{\\mathrm{s}}}\\) balance two terms. To stabilize the GAN training, we adopt the R1 regularization loss [45]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.543, + 0.892, + 0.679 + ], + "angle": 0, + "content": "Cross-view Consistency Loss. We observe that inputting label maps of the same object from different viewpoints will sometimes result in different 3D shapes. Therefore we add a cross-view consistency loss to regularize the training, as illustrated in Figure 3. Given an input label map \\(\\mathbf{I}_{\\mathbf{s}}\\) and its associated pose \\(\\mathbf{P}\\), we generate the label map \\(\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime}\\) from a different viewpoint \\(\\mathbf{P}^{\\prime}\\), and render the label map \\(\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime \\prime}\\) back to the pose \\(\\mathbf{P}\\) using \\(\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime}\\) as input. We add a reconstruction loss between \\(\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime \\prime}\\) and \\(\\hat{\\mathbf{I}}_{\\mathbf{s}}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.612, + 0.686, + 0.78, + 0.704 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {C V C}} = \\lambda_ {\\mathrm {C V C}} \\mathcal {L} _ {s} (\\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {\\prime \\prime}, \\hat {\\mathbf {I}} _ {\\mathbf {s}}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.711, + 0.894, + 0.757 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_s\\) denotes the reconstruction loss in the label space, and \\(\\lambda_{\\mathrm{CVC}}\\) weights the loss term. This loss is crucial for reducing error accumulation during cross-view editing." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.76, + 0.894, + 0.788 + ], + "angle": 0, + "content": "Optimization. Our final learning objective is written as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.589, + 0.79, + 0.802, + 0.806 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {r e c o n}} + \\mathcal {L} _ {\\text {G A N}} + \\mathcal {L} _ {\\text {C V C}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.894, + 0.901 + ], + "angle": 0, + "content": "At every iteration, we determine whether to use a ground-truth pose or sample a random one with a probability of \\( p \\). We use the reconstruction loss and GAN loss for ground-truth poses, while for random poses, we only use the GAN loss. We provide the hyper-parameters and more implementation details in the appendix of our arXiv version." + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4437" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.095, + 0.093, + 0.182, + 0.107 + ], + "angle": 0, + "content": "Input Seg Map" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.109, + 0.189, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.287, + 0.094, + 0.318, + 0.104 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.197, + 0.106, + 0.427, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.492, + 0.093, + 0.553, + 0.103 + ], + "angle": 0, + "content": "Pix2NeRF" + }, + { + "type": "image", + "bbox": [ + 0.428, + 0.106, + 0.599, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.655, + 0.093, + 0.71, + 0.103 + ], + "angle": 0, + "content": "SoFGAN" + }, + { + "type": "image", + "bbox": [ + 0.599, + 0.106, + 0.772, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.81, + 0.093, + 0.849, + 0.103 + ], + "angle": 0, + "content": "SEAN" + }, + { + "type": "image", + "bbox": [ + 0.772, + 0.106, + 0.888, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.199, + 0.893, + 0.241 + ], + "angle": 0, + "content": "Figure 4. Qualitative Comparison with Pix2NeRF [6], SoFGAN [11], and SEAN [87] on CelebAMask dataset for seg2face task. SEAN fails in multi-view synthesis, while SoFGAN suffers from multi-view inconsistency (e.g., face identity changes across viewpoints). Our method renders high-quality images while maintaining multi-view consistency. Please check our website for more examples." + }, + { + "type": "image_caption", + "bbox": [ + 0.093, + 0.244, + 0.17, + 0.257 + ], + "angle": 0, + "content": "Input Seg Map" + }, + { + "type": "image", + "bbox": [ + 0.093, + 0.257, + 0.167, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.237, + 0.244, + 0.266, + 0.255 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.256, + 0.321, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.352, + 0.244, + 0.431, + 0.256 + ], + "angle": 0, + "content": "w/o 3D Labels" + }, + { + "type": "image", + "bbox": [ + 0.326, + 0.256, + 0.466, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.332, + 0.177, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.332, + 0.321, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.332, + 0.467, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.409, + 0.167, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.18, + 0.409, + 0.321, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.409, + 0.467, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.485, + 0.893, + 0.512 + ], + "angle": 0, + "content": "Figure 5. Qualitative ablation on seg2face and seg2cat. We ablate our method by removing the branch that renders label maps (w/o 3D Labels). Our results better align with input labels (e.g., hairlines and the cat's ear)." + }, + { + "type": "image_caption", + "bbox": [ + 0.093, + 0.538, + 0.196, + 0.552 + ], + "angle": 0, + "content": "Input Edge Map" + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.553, + 0.209, + 0.746 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.228, + 0.524, + 0.451, + 0.538 + ], + "angle": 0, + "content": "Rendered RGB images & edge maps" + }, + { + "type": "image_caption", + "bbox": [ + 0.252, + 0.538, + 0.311, + 0.55 + ], + "angle": 0, + "content": "GT View" + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.552, + 0.468, + 0.747 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.749, + 0.471, + 0.777 + ], + "angle": 0, + "content": "Figure 6. Results on edge2cat. Our model is trained on AFHQcat [16] with edges extracted by pidinet [66]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.792, + 0.202, + 0.81 + ], + "angle": 0, + "content": "4. Experiment" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.819, + 0.471, + 0.88 + ], + "angle": 0, + "content": "We first introduce the datasets and evaluation metrics. Then we compare our method with the baselines. Finally, we demonstrate cross-view editing and multi-modal synthesis applications enabled by our method." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.886, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Datasets. We consider four tasks: seg2face, seg2cat," + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.244, + 0.592, + 0.258 + ], + "angle": 0, + "content": "Input Seg Map" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.257, + 0.598, + 0.332 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.653, + 0.244, + 0.681, + 0.256 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.257, + 0.743, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.779, + 0.244, + 0.857, + 0.256 + ], + "angle": 0, + "content": "w/o 3D Labels" + }, + { + "type": "image", + "bbox": [ + 0.749, + 0.256, + 0.89, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.332, + 0.602, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.605, + 0.332, + 0.744, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.748, + 0.332, + 0.89, + 0.406 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.408, + 0.602, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.605, + 0.408, + 0.744, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.749, + 0.408, + 0.89, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.587, + 0.592, + 0.6 + ], + "angle": 0, + "content": "Input Edge Map" + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.601, + 0.587, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.595, + 0.525, + 0.89, + 0.709 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.712, + 0.892, + 0.755 + ], + "angle": 0, + "content": "Figure 7. Qualitative comparisons on edge2car. pix2pix3D (Ours) and Pix2NeRF [6] are trained on shapenet-car [10], and pix2pix3D achieves better quality and alignment than Pix2NeRF." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.893, + 0.901 + ], + "angle": 0, + "content": "edge2cat, and edge2car in our experiments. For seg2face, we use CelebAMask-HQ [38] for evaluation. CelebAMask-HQ contains 30,000 high-resolution face images from CelebA [41], and each image has a facial part segmentation mask and a predicted pose. The segmentation masks contain 19 classes, including skin, eyebrows, ears, mouth, lip, etc. The pose associated with each image segmentation is predicted by HopeNet [60]. We split the CelebAMask-HQ dataset into" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4438" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.079, + 0.089, + 0.487, + 0.245 + ], + "angle": 0, + "content": "
Seg2FaceQUALITYALIGNMENT
SGFVV Identity ↓
CELEBAMASK [38]FID ↓KID ↓Diversity ↑mIoU ↑acc ↑
SEAN [87]32.740.0180.290.520.85N/A
SoFGAN [11]23.340.0120.330.530.890.58
PIX2NERF [6]54.230.0420.160.360.650.44
PIX2PIX3D (OURS)
W/O 3D LABELS12.960.0050.30N/A (0.43)N/A (0.81)0.38
W/O CVC11.620.0040.300.50 (0.50)0.87 (0.85)0.42
FULL MODEL11.540.0030.280.51 (0.52)0.90 (0.88)0.36
FULL MODEL†11.130.0030.290.51 (0.50)0.90 (0.87)0.36
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.255, + 0.473, + 0.506 + ], + "angle": 0, + "content": "Table 1. Seg2face Evaluation. Our metrics include image quality (FID, KID, SG Diversity), alignment (mIoU and acc against GT label maps), and multi-view consistency (FVV Identity). Single-generation diversity (SG Diversity) is obtained by computing the LPIPS metric between randomly generated pairs given a single conditional input. To evaluate alignment, we compare the generated label maps against the ground truth in terms of mIoU and pixel accuracy (acc). Alternatively, given a generated image, one could estimate label maps via a face parser, and compare those against the ground truth (numbers in parentheses). We include SEAN [87] and SoFGAN [11] as baselines, and modify Pix2NeRF [6] to take conditional input. Our method achieves the best quality, alignment ACC, and FVV Identity while being competitive on SG Diversity. SoFGAN tends to have better alignment but worse 3D consistency. We also ablate our method w.r.t the 3D labels and the cross-view consistency (CVC) loss. Our 3D labels are crucial for alignment, while the CVC loss improves multi-view consistency. Using pretrained models from EG3D \\((\\dagger)\\) also improves the performance." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.512, + 0.455, + 0.627 + ], + "angle": 0, + "content": "
Edge2CarQUALITYALIGNMENT
FID ↓KID ↓SG Diversity ↑AP ↑
PIX2NERF [6]23.420.0140.060.28
PIX2PIX3D (OURS)
w/o 3D LABELS10.730.0050.120.45 (0.42)
w/o CVC9.420.0040.130.61 (0.59)
FULL MODEL8.310.0040.130.63 (0.59)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.637, + 0.472, + 0.776 + ], + "angle": 0, + "content": "Table 2. Edge2car Evaluation. We compare our method with Pix2NeRF [6] on edge2car using the shapenet-car [10] dataset. Similar to Table 1, we evaluate FID, KID, and SG Diversity for image quality. We also evaluate the alignment with the input edge map using AP. Similarly, we can either run informative drawing [7] on generated images to obtain edge maps (numbers in parentheses) or directly use generated edge maps to calculate the metrics. We achieve better image quality and alignment than Pix2NeRF. We also find that using 3D labels and cross-view consistency loss is helpful regarding FID and AP metrics." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.472, + 0.903 + ], + "angle": 0, + "content": "a training set of 24,183, a validation set of 2,993, and a test set of 2,824, following the original work [38]. For seg2cat and edge2cat, we use AFHQ-cat [16], which contains 5,065 images at \\(512 \\times\\) resolution. We estimate the viewpoints using unsup3d [74]. We extract the edges using pidinet [66] and obtain segmentation by clustering DINO features [2] into 6 classes. For edge2car, we use 3D models from shapenet-" + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.089, + 0.892, + 0.204 + ], + "angle": 0, + "content": "
Seg2CatFID ↓QUALITYALIGNMENT
AFHQ-CAT [34]KID ↓SG Diversity ↑mIoU ↑acc ↑
PIX2NERF [6]43.920.0810.150.270.58
OURS
w/o 3D LABELS10.410.0040.26N/A (0.49)N/A (0.69)
w/o CVC9.640.0040.260.66 (0.63)0.76 (0.73)
FULL MODEL8.620.0030.270.66 (0.62)0.78 (0.73)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.214, + 0.895, + 0.283 + ], + "angle": 0, + "content": "Table 3. Seg2cat Evaluation. We compare our method with Pix2NeRF [6] on Seg2Cat using AFHQ-cat dataset [16], with segmentation obtained by clustering DINO features [2]. Similar to Table 1, we evaluate the image quality and alignment. Ours performs better in all metrics." + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.291, + 0.888, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.471, + 0.892, + 0.5 + ], + "angle": 0, + "content": "Figure 8. Semantic Mesh. We show semantic meshes of human and cat faces from marching cubes colored by 3D labels." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.513, + 0.892, + 0.574 + ], + "angle": 0, + "content": "car [10] and render 500,000 images at \\(128 \\times\\) resolution for training, and 30,000 for evaluation. We extract the edges using informative drawing [7]. We train our model at \\(512 \\times\\) resolution except for \\(128 \\times\\) in the edge2car task." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.578, + 0.892, + 0.7 + ], + "angle": 0, + "content": "Running Time. For training the model at \\(512 \\times\\) resolution, it takes about three days on eight RTX 3090 GPUs. But we can significantly reduce the training time to 4 hours if we initialize parts of our model with pretrained weights from EG3D [8]. During inference, our model takes \\(10\\mathrm{ms}\\) to obtain the style vector, and another \\(30\\mathrm{ms}\\) to render the final image and the label map on a single RTX A5000. The low latency (25 FPS) allows for interactive user editing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.708, + 0.683, + 0.723 + ], + "angle": 0, + "content": "4.1. Evaluation metrics" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.731, + 0.892, + 0.777 + ], + "angle": 0, + "content": "We evaluate the models from two aspects: 1) the image quality regarding fidelity and diversity, and 2) the alignment between input label maps and generated outputs." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Quality Metrics. Following prior works [21, 57], we use the clean-fid library [58] to compute Fréchet Inception Distance (FID) [23] and Kernel Inception Distance (KID) [4] to measure the distribution distance between synthesized results and real images. We also evaluate the single-generation diversity (SG Diversity) by calculating the LPIPS metric between randomly generated pairs given a single input following prior works [11, 85]. For FID and KID, we generate" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4439" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.28, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.089, + 0.485, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.089, + 0.688, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.69, + 0.09, + 0.892, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.208, + 0.895, + 0.251 + ], + "angle": 0, + "content": "Figure 9. We study the effect of random pose sampling probability \\( p \\) during training. Without random poses (\\( p = 0 \\)), the model achieves the best alignment with input semantic maps, with reduced image quality. In contrast, only using random poses (\\( p = 1 \\)) achieves the best image quality, while results fail to align with input maps. We find \\( p = 0.5 \\) balances the image quality and input alignment." + }, + { + "type": "image", + "bbox": [ + 0.09, + 0.256, + 0.891, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.415, + 0.895, + 0.459 + ], + "angle": 0, + "content": "Figure 10. Cross-view Editing of Edge2Car. Our 3D editing system allows users to edit label maps from any viewpoint instead of only the input view. Importantly, our feed-forward encoder allows fast inference of the latent code without GAN-inversion. Typically, a single forward pass of rendering takes only \\(40\\mathrm{ms}\\) on a single RTX A5000, which enables interactive editing. Please check our demo video on our website." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.472, + 0.471, + 0.518 + ], + "angle": 0, + "content": "10 images per label map in the test set using randomly sampled \\( z \\). We compare our generated images with the whole dataset, including training and test images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.521, + 0.47, + 0.672 + ], + "angle": 0, + "content": "Alignment Metrics. We evaluate models on the test set using mean Intersection-over-Union (mIoU) and pixel accuracy (acc) for segmentation maps following existing works [57, 63], and average precision (AP) for edge maps. For those models that render label maps as output, we directly compare them with ground-truth labels. Otherwise, we first predict the label maps from the output RGB images using off-the-shelf networks [38, 66], and then compare the prediction with the ground truth. The metrics regarding such predicted semantic maps are reported within brackets in Table 1 and Table 2." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.673, + 0.471, + 0.718 + ], + "angle": 0, + "content": "For seg2face, we evaluate the preservation of facial identity from different viewpoints (FVV Identity) by calculating their distances with the dlib face recognition algorithm*." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.729, + 0.273, + 0.745 + ], + "angle": 0, + "content": "4.2. Baseline comparison" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.756, + 0.47, + 0.879 + ], + "angle": 0, + "content": "Baselines. Since there are no prior works on conditional 3D-aware image synthesis, we make minimum modifications to Pix2NeRF [6] to be conditional on label maps instead of images. For a thorough comparison, we introduce several baselines: SEAN [87] and SoFGAN [11]. 2D baselines like SEAN [87] cannot generate multi-view images by design (N/A for FVV Identity), while SoFGAN [11] uses an unconditional 3D semantic map generator before the 2D" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.472, + 0.84, + 0.487 + ], + "angle": 0, + "content": "generator so we can evaluate FVV Identity for that." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.498, + 0.895, + 0.726 + ], + "angle": 0, + "content": "Results. Figure 4 shows the qualitative comparison for seg2face and Table 1 reports the evaluation results. SoFGAN [11] tends to produce results with slightly better alignment but worse 3D consistency for its 2D RGB generator. Our method achieves the best quality, alignment acc, and FVV Identity while being competitive with 2D baselines on SG diversity. Figure 5 shows the qualitative ablation on seg2face and seg2cat. Table ?? reports the metrics for seg2cat. Figure 6 shows the example results for edge2cat. Figure 7 shows the qualitative comparison for edge2car and Table 2 reports the metrics. Our method achieves the best image quality and alignment. Figure 8 shows semantic meshes of human and cat faces, extracted by marching cubes and colored by our learned 3D labels. We provide more evaluation results in the appendix of our arXiv version." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.897, + 0.903 + ], + "angle": 0, + "content": "Ablation Study. We compare our full method to several variants. Specifically, (1) w/o 3D LABELS, we remove the branch of rendering label maps from our method, and (2) w/o CVC, we remove the cross-view consistency loss. From Table 1, Table 2, and Figure 5, rendering label maps is crucial for the alignment with the input. We posit that the joint learning of appearance, geometry, and label information poses strong constraints on correspondence between the input label maps and the 3D representation. Thus our method can synthesize images pixel-aligned with the inputs. Our CVC loss helps preserve the facial identity from different viewpoints." + }, + { + "type": "page_footnote", + "bbox": [ + 0.095, + 0.888, + 0.451, + 0.901 + ], + "angle": 0, + "content": "\\*https://github.com/ageitgey/face_recognition" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "4440" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.468, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.41, + 0.471, + 0.466 + ], + "angle": 0, + "content": "Figure 11. Multi-modal Synthesis. The leftmost column is the input segmentation map. We use the same segmentation map for each row. We generate multi-modal results by randomly sampling an appearance style for each column." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.471, + 0.472, + 0.607 + ], + "angle": 0, + "content": "Analysis on random sampling of poses. We study the effect of the different probabilities of sampling random poses during training, as shown in Figure 9. When sampling no random poses \\((p = 0)\\), the model best aligns with input label maps with suboptimal image quality. Conversely, only sampling random poses \\((p = 1)\\) gives the best image quality but suffers huge misalignment with input label maps. We find \\(p = 0.5\\) achieves the balance between the image quality and the alignment with the input." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.615, + 0.212, + 0.631 + ], + "angle": 0, + "content": "4.3. Applications" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.642, + 0.47, + 0.762 + ], + "angle": 0, + "content": "Cross-view Editing. As shown in Figure 10, our 3D editing system allows users to generate and edit label maps from any viewpoint instead of only the input view. The edited label map is further fed into the conditional encoder to update the 3D representation. Unlike GAN inversion [83], our feedforward conditional encoder allows fast inference of the latent code. Thus, a single forward pass of our full model takes only \\(40\\mathrm{ms}\\) on a single RTX A5000." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Multi-modal synthesis and interpolation. Like other style-based generative models [8, 21, 34, 36], our method can disentangle the geometry and appearance information. Specifically, the input label map captures the geometry information while the randomly sampled latent code controls the appearance. We show style manipulation results in Figure 11. We can also interpolate both the geometry styles and the appearance styles (Figure 12). These results show the clear disentanglement of our 3D representation." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.09, + 0.891, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.54, + 0.895, + 0.611 + ], + "angle": 0, + "content": "Figure 12. Interpolation. In each \\(5 \\times 5\\) grid, the images at the top left and bottom right are generated from the input maps next to them. Each row interpolates two images in label space, while each column interpolates the appearance. For camera poses, we interpolate the pitch along the row and the yaw along the column." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.623, + 0.614, + 0.639 + ], + "angle": 0, + "content": "5. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.651, + 0.895, + 0.788 + ], + "angle": 0, + "content": "We have introduced pix2pix3D, a 3D-aware conditional generative model for controllable image synthesis. Given a 2D label map, our model allows users to render images given any viewpoint. Our model augments the neural field with 3D labels, assigning label, color, and density to every 3D point, allowing for the simultaneous rendering of the image and a pixel-aligned label map. The learned 3D labels further enable interactive 3D cross-view editing. We discuss the limitations and societal impact in our arXiv version." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Acknowledgments. We thank Sheng-Yu Wang, Nupur Kumari, Gaurav Parmer, Ruihan Gao, Muyang Li, George Cazenavette, Andrew Song, Zhipeng Bao, Tamaki Kojima, Krishna Wadhwani, Takuya Narihira, and Tatsuo Fujiwara for their discussion and help. We are grateful for the support from Sony Corporation, Singapore DSTA, and the CMU Argo AI Center for Autonomous Vehicle Research." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "4441" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.105 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.472, + 0.17 + ], + "angle": 0, + "content": "[1] Rameen Abdal, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.472, + 0.212 + ], + "angle": 0, + "content": "[2] Shir Amir, Yossi Gandelsman, Shai Bagon, and Tali Dekel. Deep vit features as dense visual descriptors. ECCVW What is Motion For?, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.213, + 0.472, + 0.268 + ], + "angle": 0, + "content": "[3] David Bau, Hendrik Strobelt, William Peebles, Jonas Wulff, Bolei Zhou, Jun-Yan Zhu, and Antonio Torralba. Semantic photo manipulation with a generative image prior. In ACM SIGGRAPH, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.27, + 0.472, + 0.312 + ], + "angle": 0, + "content": "[4] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. In International Conference on Learning Representations (ICLR), 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.313, + 0.472, + 0.368 + ], + "angle": 0, + "content": "[5] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In International Conference on Learning Representations (ICLR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.369, + 0.472, + 0.436 + ], + "angle": 0, + "content": "[6] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional \\(\\pi\\)-gan for single image to neural radiance fields translation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.439, + 0.472, + 0.494 + ], + "angle": 0, + "content": "[7] Caroline Chan, Frédo Durand, and Phillip Isola. Learning to generate line drawings that convey geometry and semantics. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.496, + 0.472, + 0.578 + ], + "angle": 0, + "content": "[8] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3, 4, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.58, + 0.472, + 0.648 + ], + "angle": 0, + "content": "[9] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.65, + 0.472, + 0.746 + ], + "angle": 0, + "content": "[10] Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An Information-Rich 3D Model Repository. Technical Report arXiv:1512.03012 [cs.GR], Stanford University — Princeton University — Toyota Technological Institute at Chicago, 2015. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.747, + 0.472, + 0.788 + ], + "angle": 0, + "content": "[11] Anpei Chen, Ruiyang Liu, Ling Xie, Zhang Chen, Hao Su, and Jingyi Yu. Sofgan: A portrait image generator with dynamic styling. In ACM SIGGRAPH, 2021. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.472, + 0.831 + ], + "angle": 0, + "content": "[12] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.472, + 0.9 + ], + "angle": 0, + "content": "[13] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.115, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.147 + ], + "angle": 0, + "content": "[14] Tao Chen, Zhe Zhu, Ariel Shamir, Shi-Min Hu, and Daniel Cohen-Or. 3-sweep: Extracting editable objects from a single photo. ACM Transactions on Graphics (TOG), 32(6):1-10, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.894, + 0.206 + ], + "angle": 0, + "content": "[15] Yuedong Chen, Qianyi Wu, Chuanxia Zheng, Tat-Jen Cham, and Jianfei Cai. Sem2nerf: Converting single-view semantic masks to neural radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.208, + 0.894, + 0.263 + ], + "angle": 0, + "content": "[16] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.266, + 0.894, + 0.335 + ], + "angle": 0, + "content": "[17] JOHANNA Delanoy, ADRIEN Bousseau, MATHIEU Aubry, PHILLIP Isola, and ALEXEIA A Efros. What you sketch is what you get: 3d sketching using multi-view deep volumetric prediction. In ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games (I3D), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.338, + 0.894, + 0.394 + ], + "angle": 0, + "content": "[18] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised NeRF: Fewer views and faster training for free. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.396, + 0.894, + 0.451 + ], + "angle": 0, + "content": "[19] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.453, + 0.893, + 0.509 + ], + "angle": 0, + "content": "[20] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, 2014. 1, 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.512, + 0.894, + 0.567 + ], + "angle": 0, + "content": "[21] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2, 3, 4, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.569, + 0.894, + 0.624 + ], + "angle": 0, + "content": "[22] Philipp Henzler, Niloy J Mitra, and Tobias Ritschel. Escaping plato's cave: 3d shape from adversarial rendering. In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.628, + 0.894, + 0.695 + ], + "angle": 0, + "content": "[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two timescale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems (NeurIPS), 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.699, + 0.894, + 0.741 + ], + "angle": 0, + "content": "[24] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.743, + 0.894, + 0.785 + ], + "angle": 0, + "content": "[25] Hsin-Ping Huang, Hung-Yu Tseng, Hsin-Ying Lee, and Jia-Bin Huang. Semantic view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.788, + 0.894, + 0.829 + ], + "angle": 0, + "content": "[26] Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. Multimodal unsupervised image-to-image translation. In European Conference on Computer Vision (ECCV), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.894, + 0.9 + ], + "angle": 0, + "content": "[27] Zeng Huang, Tianye Li, Weikai Chen, Yajie Zhao, Jun Xing, Chloe Legendre, Linjie Luo, Chongyang Ma, and Hao Li. Deep volumetric video from very sparse multi-view performance capture. In European Conference on Computer Vision (ECCV), pages 351-369, 2018. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4442" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.133 + ], + "angle": 0, + "content": "[28] Takeo Igarashi, Satoshi Matsuoka, and Hidehiko Tanaka. Teddy: a sketching interface for 3d freeform design. In ACM SIGGRAPH, 1999. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.136, + 0.471, + 0.192 + ], + "angle": 0, + "content": "[29] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.194, + 0.471, + 0.248 + ], + "angle": 0, + "content": "[30] Ajay Jain, Matthew Tancik, and Pieter Abbeel. Putting nerf on a diet: Semantically consistent few-shot view synthesis. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.252, + 0.47, + 0.293 + ], + "angle": 0, + "content": "[31] Kaiwen Jiang, Shu-Yu Chen, Feng-Lin Liu, Hongbo Fu, and Lin Gao. Nerffaceediting: Disentangled face editing in neural radiance fields. In ACM SIGGRAPH Asia, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.296, + 0.469, + 0.323 + ], + "angle": 0, + "content": "[32] James T Kajiya and Brian P Von Herzen. Ray tracing volume densities. ACM SIGGRAPH, 18(3):165-174, 1984. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.326, + 0.471, + 0.381 + ], + "angle": 0, + "content": "[33] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.384, + 0.471, + 0.438 + ], + "angle": 0, + "content": "[34] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.442, + 0.471, + 0.496 + ], + "angle": 0, + "content": "[35] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.499, + 0.471, + 0.554 + ], + "angle": 0, + "content": "[36] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.557, + 0.471, + 0.611 + ], + "angle": 0, + "content": "[37] Natasha Kholgade, Tomas Simon, Alexei Efros, and Yaser Sheikh. 3d object manipulation in a single photograph using stock 3d models. ACM Transactions on Graphics (TOG), 33(4):1-12, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.615, + 0.471, + 0.669 + ], + "angle": 0, + "content": "[38] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.673, + 0.471, + 0.726 + ], + "angle": 0, + "content": "[39] Chen-Hsuan Lin, Wei-Chiu Ma, Antonio Torralba, and Simon Lucey. Barf: Bundle-adjusting neural radiance fields. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.73, + 0.47, + 0.771 + ], + "angle": 0, + "content": "[40] Ming-Yu Liu, Thomas Breuel, and Jan Kautz. Unsupervised image-to-image translation networks. Advances in neural information processing systems, 30, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.774, + 0.471, + 0.828 + ], + "angle": 0, + "content": "[41] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.832, + 0.471, + 0.899 + ], + "angle": 0, + "content": "[42] Zhaoliang Lun, Matheus Gadelha, Evangelos Kalogerakis, Subhransu Maji, and Rui Wang. 3d shape reconstruction from sketches via multi-view convolutional networks. In 2017 International Conference on 3D Vision (3DV). IEEE, 2017. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.162 + ], + "angle": 0, + "content": "[43] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.165, + 0.894, + 0.22 + ], + "angle": 0, + "content": "[44] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.223, + 0.894, + 0.276 + ], + "angle": 0, + "content": "[45] Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for gans do actually converge? In International Conference on Machine Learning (ICML), 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.281, + 0.894, + 0.349 + ], + "angle": 0, + "content": "[46] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.353, + 0.893, + 0.38 + ], + "angle": 0, + "content": "[47] Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.384, + 0.894, + 0.424 + ], + "angle": 0, + "content": "[48] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. In ACM SIGGRAPH, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.428, + 0.894, + 0.495 + ], + "angle": 0, + "content": "[49] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.5, + 0.894, + 0.568 + ], + "angle": 0, + "content": "[50] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.572, + 0.894, + 0.626 + ], + "angle": 0, + "content": "[51] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.63, + 0.894, + 0.698 + ], + "angle": 0, + "content": "[52] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.702, + 0.894, + 0.769 + ], + "angle": 0, + "content": "[53] Xingang Pan, Xudong Xu, Chen Change Loy, Christian Theobalt, and Bo Dai. A shading-guided generative implicit model for shape-accurate 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.774, + 0.894, + 0.842 + ], + "angle": 0, + "content": "[54] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.894, + 0.899 + ], + "angle": 0, + "content": "[55] Taesung Park, Alexei A Efros, Richard Zhang, and Jun-Yan Zhu. Contrastive learning for unpaired image-to-image translation. In European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4443" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "[56] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.149, + 0.471, + 0.204 + ], + "angle": 0, + "content": "[57] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.205, + 0.471, + 0.259 + ], + "angle": 0, + "content": "[58] Gaurav Parmar, Richard Zhang, and Jun-Yan Zhu. On aliased resizing and surprising subtleties in gan evaluation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.261, + 0.471, + 0.315 + ], + "angle": 0, + "content": "[59] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.316, + 0.471, + 0.372 + ], + "angle": 0, + "content": "[60] Nataniel Ruiz, Eunji Chong, and James M. Rehg. Fine-grained head pose estimation without keypoints. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshop, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.373, + 0.471, + 0.439 + ], + "angle": 0, + "content": "[61] Sara Fridovich-Keil and Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.442, + 0.471, + 0.496 + ], + "angle": 0, + "content": "[62] Edgar Schonfeld, Vadim Sushko, Dan Zhang, Juergen Gall, Bernt Schiele, and Anna Khoreva. You only need adversarial supervision for semantic image synthesis. In International Conference on Learning Representations (ICLR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.498, + 0.471, + 0.552 + ], + "angle": 0, + "content": "[63] Edgar Schonfeld, Vadim Sushko, Dan Zhang, Juergen Gall, Bernt Schiele, and Anna Khoreva. You only need adversarial supervision for semantic image synthesis. In International Conference on Learning Representations (ICLR), 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.553, + 0.471, + 0.607 + ], + "angle": 0, + "content": "[64] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.609, + 0.471, + 0.65 + ], + "angle": 0, + "content": "[65] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.651, + 0.471, + 0.705 + ], + "angle": 0, + "content": "[66] Zhuo Su, Wenzhe Liu, Zitong Yu, Dewen Hu, Qing Liao, Qi Tian, Matti Pietikainen, and Li Liu. Pixel difference networks for efficient edge detection. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.707, + 0.471, + 0.761 + ], + "angle": 0, + "content": "[67] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew Davison. iMAP: Implicit mapping and positioning in real-time. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.762, + 0.471, + 0.817 + ], + "angle": 0, + "content": "[68] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. In ACM Transactions on Graphics (TOG), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.818, + 0.471, + 0.872 + ], + "angle": 0, + "content": "[69] Jingxiang Sun, Xuan Wang, Yong Zhang, Xiaoyu Li, Qi Zhang, Yebin Liu, and Jue Wang. Fenerf: Face editing in neural radiance fields. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.873, + 0.471, + 0.9 + ], + "angle": 0, + "content": "[70] Matthew Tancik, Ben Mildenhall, Terrance Wang, Divi Schmidt, Pratul P Srinivasan, Jonathan T Barron, and Ren" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.893, + 0.134 + ], + "angle": 0, + "content": "Ng. Learned initializations for optimizing coordinate-based neural representations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.894, + 0.205 + ], + "angle": 0, + "content": "[71] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.207, + 0.894, + 0.26 + ], + "angle": 0, + "content": "[72] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. In ACM Transactions on Graphics (TOG), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.263, + 0.894, + 0.331 + ], + "angle": 0, + "content": "[73] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. High-resolution image synthesis and semantic manipulation with conditional gans. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.334, + 0.894, + 0.389 + ], + "angle": 0, + "content": "[74] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.391, + 0.894, + 0.445 + ], + "angle": 0, + "content": "[75] Xiaohua Xie, Kai Xu, Niloy J Mitra, Daniel Cohen-Or, Wenyong Gong, Qi Su, and Baoquan Chen. Sketch-to-design: Context-based part assembly. In Computer Graphics Forum, volume 32, pages 233–245. Wiley Online Library, 2013. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.447, + 0.893, + 0.502 + ], + "angle": 0, + "content": "[76] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.504, + 0.893, + 0.559 + ], + "angle": 0, + "content": "[77] Shunyu Yao, Tzu Ming Hsu, Jun-Yan Zhu, Jiajun Wu, Antonio Torralba, Bill Freeman, and Josh Tenenbaum. 3d-aware scene manipulation via inverse graphics. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.561, + 0.894, + 0.615 + ], + "angle": 0, + "content": "[78] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. Pixelnerf: Neural radiance fields from one or few images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.617, + 0.894, + 0.673 + ], + "angle": 0, + "content": "[79] Jichao Zhang, Enver Sangineto, Hao Tang, Aliaksandr Siarohin, Zhun Zhong, Nicu Sebe, and Wei Wang. 3d-aware semantic-guided generative model for human synthesis. In European Conference on Computer Vision (ECCV), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.675, + 0.893, + 0.715 + ], + "angle": 0, + "content": "[80] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.717, + 0.894, + 0.772 + ], + "angle": 0, + "content": "[81] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.774, + 0.893, + 0.815 + ], + "angle": 0, + "content": "[82] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European Conference on Computer Vision (ECCV), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.817, + 0.893, + 0.871 + ], + "angle": 0, + "content": "[83] Jun-Yan Zhu, Philipp Krahenbuhl, Eli Shechtman, and Alexei A Efros. Generative visual manipulation on the natural image manifold. In European Conference on Computer Vision (ECCV), 2016. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.893, + 0.9 + ], + "angle": 0, + "content": "[84] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4444" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.469, + 0.12 + ], + "angle": 0, + "content": "adversarial networks. In IEEE International Conference on Computer Vision (ICCV), 2017. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.177 + ], + "angle": 0, + "content": "[85] Jun-Yan Zhu, Richard Zhang, Deepak Pathak, Trevor Darrell, Alexei A Efros, Oliver Wang, and Eli Shechtman. Toward multimodal image-to-image translation. Advances in neural information processing systems, 30, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.471, + 0.247 + ], + "angle": 0, + "content": "[86] Jun-Yan Zhu, Zhoutong Zhang, Chengkai Zhang, Jiajun Wu, Antonio Torralba, Josh Tenenbaum, and Bill Freeman. Visual object networks: Image generation with disentangled 3d representations. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.249, + 0.471, + 0.303 + ], + "angle": 0, + "content": "[87] Peihao Zhu, Rameen Abdal, Yipeng Qin, and Peter Wonka. Sean: Image synthesis with semantic region-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.305, + 0.471, + 0.374 + ], + "angle": 0, + "content": "[88] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R. Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4445" + } + ] +] \ No newline at end of file diff --git a/2023/3D-Aware Conditional Image Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_origin.pdf b/2023/3D-Aware Conditional Image Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..97dda96cbeb45411acaa99f4bfba36462b6cff0e --- /dev/null +++ b/2023/3D-Aware Conditional Image Synthesis/b9625555-02d4-4da7-b507-7cd64cc67a00_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a288dffa8e6f9266d89fe3ebfbeee7b16bcc97fd5bb182c75486fd36cb2d9dc3 +size 9865346 diff --git a/2023/3D-Aware Conditional Image Synthesis/full.md b/2023/3D-Aware Conditional Image Synthesis/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c70ed90bef0fe3e54bbbceffb026341e5fba44ee --- /dev/null +++ b/2023/3D-Aware Conditional Image Synthesis/full.md @@ -0,0 +1,382 @@ +# 3D-aware Conditional Image Synthesis + +Kangle Deng Gengshan Yang Deva Ramanan Jun-Yan Zhu Carnegie Mellon University + +![](images/d0db08c534775dbb6eca15e32dcc39d29d6901cc09dc50bce140e3be6216c11a.jpg) +Figure 1. Given a 2D label map as input, such as a segmentation or edge map, our model learns to predict high-quality 3D labels, geometry, and appearance, which enables us to render both labels and RGB images from different viewpoints. The inferred 3D labels further allow interactive editing of label maps from any viewpoint, as shown in Figure 10. + +# Abstract + +We propose pix2pix3D, a 3D-aware conditional generative model for controllable photorealistic image synthesis. Given a 2D label map, such as a segmentation or edge map, our model learns to synthesize a corresponding image from different viewpoints. To enable explicit 3D user control, we extend conditional generative models with neural radiance fields. Given widely-available posed monocular image and label map pairs, our model learns to assign a label to every 3D point in addition to color and density, which enables it to render the image and pixel-aligned label map simultaneously. Finally, we build an interactive system that allows users to edit the label map from different viewpoints and generate outputs accordingly. + +# 1. Introduction + +Content creation with generative models has witnessed tremendous progress in recent years, enabling high-quality, + +user-controllable image and video synthesis [19, 20, 24, 34]. In particular, image-to-image translation methods [29, 56, 84] allow users to interactively create and manipulate a high-resolution image given a 2D input label map. Unfortunately, existing image-to-image translation methods operate purely in 2D, without explicit reasoning of the underlying 3D structure of the content. As shown in Figure 1, we aim to make conditional image synthesis 3D-aware, allowing not only 3D content generation but also viewpoint manipulation and attribute editing (e.g., car shape) in 3D. + +Synthesizing 3D content conditioned on user input is challenging. For model training, it is costly to obtain large-scale datasets with paired user inputs and their desired 3D outputs. During test time, 3D content creation often requires multi-view user inputs, as a user may want to specify the details of 3D objects using 2D interfaces from different viewpoints. However, these inputs may not be 3D-consistent, providing conflicting signals for 3D content creation. + +To address the above challenges, we extend conditional generative models with 3D neural scene representations. To + +enable cross-view editing, we additionally encode semantic information in 3D, which can then be rendered as 2D label maps from different viewpoints. We learn the aforementioned 3D representation using only 2D supervision in the form of image reconstruction and adversarial losses. While the reconstruction loss ensures the alignment between 2D user inputs and corresponding 3D content, our pixel-aligned conditional discriminator encourages the appearance and labels to look plausible while remaining pixel-aligned when rendered into novel viewpoints. We also propose a cross-view consistency loss to enforce the latent codes to be consistent from different viewpoints. + +We focus on 3D-aware semantic image synthesis on the CelebAMask-HQ [38], AFHQ-cat [16], and shapenetcar [10] datasets. Our method works well for various 2D user inputs, including segmentation maps and edge maps. Our method outperforms several 2D and 3D baselines, such as Pix2NeRF variants [6], SofGAN [11], and SEAN [87]. We further ablate the impact of various design choices and demonstrate applications of our method, such as cross-view editing and explicit user control over semantics and style. Please see our website for more results and code. Please check out the full version of our paper at arXiv. + +# 2. Related Work + +Neural Implicit Representation. Neural implicit fields, such as DeepSDF and NeRFs [46, 54], model the appearance of objects and scenes with an implicitly defined, continuous 3D representation parameterized by neural networks. They have produced significant results for 3D reconstruction [67, 88] and novel view synthesis applications [39, 43, 44, 48, 80] thanks to their compactness and expressiveness. NeRF and its descendants aim to optimize a network for an individual scene, given hundreds of images from multiple viewpoints. Recent works further reduce the number of training views through learning network initializations [13, 70, 78], leveraging auxiliary supervision [18, 30], or imposing regularization terms [50]. Recently, explicit or hybrid representations of radiance fields [12, 48, 61] have also shown promising results regarding quality and speed. In our work, we use hybrid representations for modeling both user inputs and outputs in 3D, focusing on synthesizing novel images rather than reconstructing an existing scene. A recent work Pix2NeRF [6] aims to translate a single image to a neural radiance field, which allows single-image novel view synthesis. In contrast, we focus on 3D-aware user-controlled content generation. + +Conditional GANs. Generative adversarial networks (GANs) learn the distribution of natural images by forcing the generated and real images to be indistinguishable. They have demonstrated high-quality results on 2D image synthesis and manipulation [1, 3, 5, 20, 33-35, 59, 65, 72, 82, 83]. + +Several methods adopt image-conditional GANs [29, 47] for user-guided image synthesis and editing applications [26, 27, 38, 40, 55, 56, 62, 73, 84, 87]. In contrast, we propose a 3D-aware generative model conditioned on 2D user inputs that can render view-consistent images and enable interactive 3D editing. Recently, SoFGAN [11] uses a 3D semantic map generator and a 2D semantic-to-image generator to enable 3D-aware generation, but using 2D generators does not ensure 3D consistency. + +3D-aware Image Synthesis. Early data-driven 3D image editing systems can achieve various 3D effects but often require a huge amount of manual effort [14, 37]. Recent works have integrated the 3D structure into learning-based image generation pipelines using various geometric representations, including voxels [22,86], voxelized 3D features [49], and 3D morphable models [71, 77]. However, many rely on external 3D data [71, 77, 86]. Recently, neural scene representations have been integrated into GANs to enable 3D-aware image synthesis [8,9,21,51-53,64,76]. Intriguingly, these 3D-aware GANs can learn 3D structures without any 3D supervision. For example, StyleNeRF [21] and EG3D [8] learn to generate 3D representations by modulating either NeRFs or explicit representations with latent style vectors. This allows them to render high-resolution view-consistent images. Unlike the above methods, we focus on conditional synthesis and interactive editing rather than random sampling. Several works [17,28,42,75] have explored sketch-based shape generation but they do not allow realistic image synthesis. + +Closely related to our work, Huang et al. [25] propose synthesizing novel views conditional on a semantic map. Our work differs in three ways. First, we can predict full 3D labels, geometry, and appearance, rather than only 2D views, which enables cross-view editing. Second, our method can synthesize images with a much wider baseline than Huang et al. [25]. Finally, our learning algorithm does not require ground truth multi-view images of the same scene. Two recent works, FENeRF [69] and 3DSGAN [79], also leverage semantic labels for training 3D-aware GANs, but they do not support conditional inputs and require additional efforts (e.g., GAN-inversion) to allow user editing. Three concurrent works, IDE-3D [68], NeRFFaceEditing [31], and sem2nerf [15], also explore the task of 3D-aware generation based on segmentation masks. However, IDE-3D and sem2nerf only allow editing on a fixed view, and NeRF-FaceEditing focuses on real image editing rather than generation. All of them do not include results for other input modalities. In contrast, we present a general-purpose method that works well for diverse datasets and input controls. + +# 3. Method + +Given a 2D label map $\mathbf{I}_{\mathbf{s}}$ , such as a segmentation or edge map, pix2pix3D generates a 3D-volumetric representation of geometry, appearance, and labels that can be rendered + +![](images/c1c686b869a2e8159fc2727cb9533b35544f9d7cbf22f1705c03b3f7bd14fb26.jpg) +Figure 2. Overall pipeline. Given a 2D label map (e.g., segmentation map), a random latent code $z$ , and a camera pose $\hat{P}$ as inputs, our generator renders the label map and image from viewpoint $\hat{P}$ . Intuitively, the input label map specifies the geometric structure, while the latent code captures the appearance, such as hair color. We begin with an encoder that encodes both the input label map and the latent code into style vectors $\mathbf{w}^{+}$ . We then use $\mathbf{w}^{+}$ to modulate our 3D representation, which takes a spatial point $\mathbf{x}$ and outputs (1) color $\mathbf{c} \in \mathbb{R}^3$ , (2) density $\sigma$ , (3) feature $\phi \in \mathbb{R}^l$ , and (4) label $\mathbf{s} \in \mathbb{R}^c$ . We then perform volumetric rendering and 2D upsampling to get the high-resolution label map $\hat{\mathbf{I}}_{\mathbf{s}}^{+}$ and RGB Image $\hat{\mathbf{I}}_{\mathbf{c}}^{+}$ . For those rendered from ground-truth poses, we compare them to ground-truth labels and images with an LPIPS loss and label reconstruction loss. We apply a GAN loss on labels and images rendered from both novel and original viewpoints. + +from different viewpoints. Figure 2 provides an overview. We first introduce the formulation of our 3D conditional generative model for 3D-aware image synthesis in Section 3.1. Then, in Section 3.2, we discuss how to learn the model from color and label map pairs $\{\mathbf{I}_{\mathrm{c}},\mathbf{I}_{\mathrm{s}}\}$ associated with poses $\mathbf{P}$ . + +# 3.1. Conditional 3D Generative Models + +Similar to EG3D [8], we adopt a hybrid representation for the density and appearance of a scene and use style vectors to modulate the 3D generations. To condition the 3D representations on 2D label map inputs, we introduce a conditional encoder that maps a 2D label map into a latent style vector. Additionally, pix2pix3D produces 3D labels that can be rendered from different viewpoints, allowing for cross-view user editing. + +Conditional Encoder. Given a 2D label map input $\mathbf{I}_{\mathrm{s}}$ and a random latent code sampled from the spherical Gaussian space $\mathbf{z} \sim \mathcal{N}(0, I)$ , our conditional encoder $E$ outputs a list of style vectors $\mathbf{w}^{+} \in \mathbb{R}^{l \times 256}$ , + +$$ +\mathbf {w} ^ {+} = E (\mathbf {I _ {s}}, \mathbf {z}), +$$ + +where $l = 13$ is the number of layers to be modulated. + +Specifically, we encode $\mathbf{I}_{\mathrm{s}}$ into the first 7 style vectors that represent the global geometric information of the scene. We then feed the random latent code $\mathbf{z}$ through a Multi-Layer Perceptron (MLP) mapping network to obtain the rest of the style vectors that control the appearance. + +Conditional 3D Representation. Our 3D representation is parameterized by tri-planes followed by an 2-layer MLP $f$ [8], which takes in a spatial point $\mathbf{x} \in \mathbb{R}^3$ and returns 4 types of outputs: (1) color $\mathbf{c} \in \mathbb{R}^3$ , (2) density $\sigma \in \mathbb{R}^+$ , (3) feature $\phi \in \mathbb{R}^{64}$ for the purpose of 2D upsampling, and most notably, (4) label $\mathbf{s} \in \mathbb{R}^c$ , where $c$ is the number of classes if $\mathbf{I_s}$ is a segmentation map, otherwise 1 for edge labels. We make the field conditional by modulating the generation of tri-planes $F^{\mathrm{tri}}$ with the style vectors $\mathbf{w}^+$ . We also remove the view dependence of the color following [8, 21]. Formally, + +$$ +(\mathbf {c}, \mathbf {s}, \sigma , \phi) = f (F _ {\mathbf {w} ^ {+}} ^ {\mathrm {t r i}} (\mathbf {x})). +$$ + +Volume Rendering and Upsampling. We apply volumetric rendering to synthesize color images [32, 46]. In addition, we render label maps, which are crucial for enabling cross-view editing (Section 4.3) and improving rendering quality + +(Table 1). Given a viewpoint $\hat{P}$ looking at the scene origin, we sample $N$ points along the ray that emanates from a pixel location and query density, color, labels, and feature information from our 3D representation. Let $\mathbf{x_i}$ be the i-th sampled point along the ray $r$ . Let $\mathbf{c}_i, \mathbf{s}_i$ and $\phi_i$ be the color, labels, and the features of $\mathbf{x_i}$ . Similar to [69], The color, label map, and feature images are computed as the weighted combination of queried values, + +$$ +\hat {\mathbf {I}} _ {\mathbf {c}} (r) = \sum_ {i = 1} ^ {N} \tau_ {i} \mathbf {c} _ {i}, \quad \hat {\mathbf {I}} _ {\mathbf {s}} (r) = \sum_ {i = 1} ^ {N} \tau_ {i} \mathbf {s} _ {i}, \quad \hat {\mathbf {I}} _ {\phi} (r) = \sum_ {i = 1} ^ {N} \tau_ {i} \phi_ {i}, \tag {1} +$$ + +where the transmittance $\tau_{i}$ is computed as the probability of a photon traversing between the camera center and the i-th point given the length of the i-th interval $\delta_{i}$ , + +$$ +\tau_ {i} = \prod_ {j = 1} ^ {i} \exp \left(- \sigma_ {j} \delta_ {j}\right) (1 - \exp \left(- \sigma_ {i} \delta_ {i}\right)). +$$ + +Similar to prior works [8, 21, 52], we approximate Equation 1 by 2D Upsampler $U$ to reduce the computational cost. We render high-res $512 \times 512$ images in two passes. In the first pass, we render low-res $64 \times 64$ images $\hat{\mathbf{I}}_{\mathbf{c}}, \hat{\mathbf{I}}_{\mathbf{s}}, \hat{\mathbf{I}}_{\phi}$ . Then a CNN up-sampler $U$ is applied to obtain high-res images, + +$$ +\hat {\mathbf {I}} _ {\mathbf {c}} ^ {+} = U (\hat {\mathbf {I}} _ {\mathbf {c}}, \hat {\mathbf {I}} _ {\phi}), \qquad \hat {\mathbf {I}} _ {\mathbf {s}} ^ {+} = U (\hat {\mathbf {I}} _ {\mathbf {s}}, \hat {\mathbf {I}} _ {\phi}). +$$ + +# 3.2. Learning Objective + +Learning conditional 3D representations from monocular images is challenging due to its under-constrained nature. Given training data of associated images, label maps, and camera poses predicted by an off-the-shelf model, we carefully construct learning objectives, including reconstruction, adversarial, and cross-view consistency losses. These objectives will be described below. + +Reconstruction Loss. Given a ground-truth viewpoint $\mathbf{P}$ associated with the color and label maps $\{\mathbf{I}_{\mathbf{c}}, \mathbf{I}_{\mathbf{s}}\}$ , we render color and label maps from $\mathbf{P}$ and compute reconstruction losses for both high-res and low-res output. We use LPIPS [81] to compute the image reconstruction loss $\mathcal{L}_c$ for color images. For label reconstruction loss $\mathcal{L}_s$ , we use the balanced cross-entropy loss for segmentation maps or L2 Loss for edge maps, + +$$ +\mathcal {L} _ {\text {r e c o n}} = \lambda_ {c} \mathcal {L} _ {c} \left(\mathbf {I} _ {\mathbf {c}}, \left\{\hat {\mathbf {I}} _ {\mathbf {c}}, \hat {\mathbf {I}} _ {\mathbf {c}} ^ {+} \right\}\right) + \lambda_ {s} \mathcal {L} _ {s} \left(\mathbf {I} _ {\mathbf {s}}, \left\{\hat {\mathbf {I}} _ {\mathbf {s}}, \hat {\mathbf {I}} _ {\mathbf {s}} ^ {+} \right\}\right), +$$ + +where $\lambda_{c}$ and $\lambda_{s}$ balance two terms. + +Pixel-aligned Conditional Discriminator. The reconstruction loss alone fails to synthesize detailed results from novel viewpoints. Therefore, we use an adversarial loss [20] to enforce renderings to look realistic from random viewpoints. Specifically, we have two discriminators $D_{\mathbf{c}}$ and $D_{\mathbf{s}}$ for RGB images and label maps, respectively. $D_{\mathbf{c}}$ is a widely-used + +![](images/988411ec3b4d606f33e07441af6e5a4bd16111801d61dbb690142bd8ed4bbd8a.jpg) +Multi-view Generation of Seg Maps +Figure 3. Cross-View Consistency Loss. Given an input label map $\mathbf{I}_{\mathbf{s}}$ and its associated pose $\mathbf{P}$ , we first infer the geometry latent code $\mathbf{w}_{\mathbf{g}}$ . From $\mathbf{w}_{\mathbf{g}}$ , we can generate a label map $\hat{\mathbf{I}}_{\mathbf{s}}$ from the same pose $\mathbf{P}$ , and $\hat{\mathbf{I}}_{\mathbf{s}}'$ from a random pose $\mathbf{P}'$ . Next, we infer $\mathbf{w}_{\mathbf{g}}'$ from the novel view $\hat{\mathbf{I}}_{\mathbf{s}}'$ , and render it back to the original pose $\mathbf{P}$ to obtain $\hat{\mathbf{I}}_{\mathbf{s}}''$ . Finally, we add a reconstruction loss: $\mathcal{L}_{\mathrm{CVC}} = \lambda_{\mathrm{CVC}}\mathcal{L}_s(\hat{\mathbf{I}}_{\mathbf{s}}'', \hat{\mathbf{I}}_{\mathbf{s}})$ . + +GAN loss that takes real and fake images as input, while the pixel-aligned conditional discriminator $D_{\mathbf{s}}$ concatenates color images and label maps as input, which encourages pixel alignment between color images and label maps. Notably, in $D_{\mathbf{s}}$ , we stop the gradients for the color images to prevent a potential quality downgrade. We also feed the rendered low-res images to prevent the upsampler from hallucinating details, inconsistent with the low-res output. The adversarial loss can be written as follows. + +$$ +\mathcal {L} _ {\mathrm {G A N}} = \lambda_ {D _ {\mathbf {c}}} \mathcal {L} _ {D _ {\mathbf {c}}} (\hat {\mathbf {I}} _ {\mathbf {c}} ^ {+}, \hat {\mathbf {I}} _ {\mathbf {c}}) + \lambda_ {D _ {\mathbf {s}}} \mathcal {L} _ {D _ {\mathbf {s}}} (\hat {\mathbf {I}} _ {\mathbf {c}} ^ {+}, \hat {\mathbf {I}} _ {\mathbf {c}}, \hat {\mathbf {I}} _ {\mathbf {s}} ^ {+}, \hat {\mathbf {I}} _ {\mathbf {s}}). +$$ + +where $\lambda_{D_{\mathrm{c}}}$ and $\lambda_{D_{\mathrm{s}}}$ balance two terms. To stabilize the GAN training, we adopt the R1 regularization loss [45]. + +Cross-view Consistency Loss. We observe that inputting label maps of the same object from different viewpoints will sometimes result in different 3D shapes. Therefore we add a cross-view consistency loss to regularize the training, as illustrated in Figure 3. Given an input label map $\mathbf{I}_{\mathbf{s}}$ and its associated pose $\mathbf{P}$ , we generate the label map $\hat{\mathbf{I}}_{\mathbf{s}}^{\prime}$ from a different viewpoint $\mathbf{P}^{\prime}$ , and render the label map $\hat{\mathbf{I}}_{\mathbf{s}}^{\prime \prime}$ back to the pose $\mathbf{P}$ using $\hat{\mathbf{I}}_{\mathbf{s}}^{\prime}$ as input. We add a reconstruction loss between $\hat{\mathbf{I}}_{\mathbf{s}}^{\prime \prime}$ and $\hat{\mathbf{I}}_{\mathbf{s}}$ : + +$$ +\mathcal {L} _ {\mathrm {C V C}} = \lambda_ {\mathrm {C V C}} \mathcal {L} _ {s} (\hat {\mathbf {I}} _ {\mathbf {s}} ^ {\prime \prime}, \hat {\mathbf {I}} _ {\mathbf {s}}), +$$ + +where $\mathcal{L}_s$ denotes the reconstruction loss in the label space, and $\lambda_{\mathrm{CVC}}$ weights the loss term. This loss is crucial for reducing error accumulation during cross-view editing. + +Optimization. Our final learning objective is written as follows: + +$$ +\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {r e c o n}} + \mathcal {L} _ {\text {G A N}} + \mathcal {L} _ {\text {C V C}}. +$$ + +At every iteration, we determine whether to use a ground-truth pose or sample a random one with a probability of $p$ . We use the reconstruction loss and GAN loss for ground-truth poses, while for random poses, we only use the GAN loss. We provide the hyper-parameters and more implementation details in the appendix of our arXiv version. + +![](images/6524c121ce230d05cd41feb9f75bed487b059bf5a675aa6a54d2e8ffe2d5a69d.jpg) +Input Seg Map + +![](images/962e55fa0d7fee6b63d4c7ba2d40169c247bab59d0ed8d5aa78cd86a0ce374a6.jpg) +Ours + +![](images/492c45d44d7ba6d8169e4d0c645e6a8da2d7374cd6ea95cdbfd93ce49e14e693.jpg) +Pix2NeRF +Figure 4. Qualitative Comparison with Pix2NeRF [6], SoFGAN [11], and SEAN [87] on CelebAMask dataset for seg2face task. SEAN fails in multi-view synthesis, while SoFGAN suffers from multi-view inconsistency (e.g., face identity changes across viewpoints). Our method renders high-quality images while maintaining multi-view consistency. Please check our website for more examples. + +![](images/25d1c57f903489de4ac9d4c8d90385d68150f583be4b4f372a55bccb3970eaff.jpg) +SoFGAN + +![](images/446644698823e9c07c72d457e262b4557f2b556ad8bda9432911171a4ac03a66.jpg) +SEAN + +![](images/73d0e8dcc36b189f63f1ea7b19c085511cee534a07c251b442368c72542146b3.jpg) +Input Seg Map + +![](images/fa82f049d6800f9a4b8d6e0160c5b23261b5fbb18c367726987cf682cf2ab95d.jpg) +Ours + +![](images/2667b3ac10d618c8c04aee838c4b19b38a78be508b30b10c8f6b052b31387a13.jpg) +w/o 3D Labels + +![](images/1286d0a864f24b9a995cdaf2bfb3b7b1c4ed3425e1e932af73f41d5580cda404.jpg) + +![](images/293baefdf04c37c560ceb68f7e44c4547c13fcb023a6b1f32d5d0724828710b5.jpg) + +![](images/0754bdc1996513a0f381d6645fa35e4f747966a743de55433aa9c6cf0cd03390.jpg) + +![](images/df3d8e6c4c75d7110ef47a2f48f699d9fcf88ae9f5a6cf9ffbdfcdcc3d299080.jpg) +Figure 5. Qualitative ablation on seg2face and seg2cat. We ablate our method by removing the branch that renders label maps (w/o 3D Labels). Our results better align with input labels (e.g., hairlines and the cat's ear). + +![](images/763d78b11909cd56b602c8cb563bda80d1ea09fa760c6ba2976ce6dc62826ded.jpg) + +![](images/e03c8222f29076d852d5ad90fbeeb9406e2a18927e788482a6e3617d19ad05bf.jpg) + +![](images/689b58efb2e7efef39d2c744f5934ead645dc516fc6f60e473d266da6db4b071.jpg) +Input Edge Map + +![](images/3d3c8a105e49a2338d37b272fe6e18044354958d74ec02ee690fe6d523e90e42.jpg) +Rendered RGB images & edge maps +GT View +Figure 6. Results on edge2cat. Our model is trained on AFHQcat [16] with edges extracted by pidinet [66]. + +# 4. Experiment + +We first introduce the datasets and evaluation metrics. Then we compare our method with the baselines. Finally, we demonstrate cross-view editing and multi-modal synthesis applications enabled by our method. + +Datasets. We consider four tasks: seg2face, seg2cat, + +![](images/611b3a3f67c0d28c059363a578b0e5558d3d974a5ff4a53806747d05ed6cb5fa.jpg) +Input Seg Map + +![](images/da8d379482d15b6744c8d2fee408dc38176a00236fdc0e02e612b6f55aa221d2.jpg) +Ours + +![](images/c3837de7e313c28c101c3a24263b10dbcd65e874fee7bd8a9466700be6d15ec3.jpg) +w/o 3D Labels + +![](images/075e8f080fffcde2a09c9e911fb4c114a3fb1e5e0c56c746265e0731c1264b44.jpg) + +![](images/654619b953a1a632b636e8147a0aaf0328730b9e93a0ac2f876bb36d04e60bdc.jpg) + +![](images/7ad193f647b58e706d59eba116ba5f003f6a531c1c23e3860a17c6c55c3b1890.jpg) + +![](images/2dc61c293039e792708ef518ce6744dd1404d7395c9f1e736c40b54c5d212cb9.jpg) + +![](images/a81b6d3de8edacada11ba680be9da78a60da8c73063b4a6670252dddd8b0b201.jpg) + +![](images/332e8ae0fe3aaa5a4391ce7bcee64fcc21e0f83f2fbda3ae1f8e1a3214739393.jpg) + +![](images/31b1df148d6c6de23c999ffade5f1dbd5fb4fc80aaa0116bd7e8a51219eec901.jpg) +Input Edge Map + +![](images/0e1af3b81c89ef415836333616082c06bb5a4f1e64c77537ccebfc922c49bfb7.jpg) +Figure 7. Qualitative comparisons on edge2car. pix2pix3D (Ours) and Pix2NeRF [6] are trained on shapenet-car [10], and pix2pix3D achieves better quality and alignment than Pix2NeRF. + +edge2cat, and edge2car in our experiments. For seg2face, we use CelebAMask-HQ [38] for evaluation. CelebAMask-HQ contains 30,000 high-resolution face images from CelebA [41], and each image has a facial part segmentation mask and a predicted pose. The segmentation masks contain 19 classes, including skin, eyebrows, ears, mouth, lip, etc. The pose associated with each image segmentation is predicted by HopeNet [60]. We split the CelebAMask-HQ dataset into + +
Seg2FaceQUALITYALIGNMENT
SGFVV Identity ↓
CELEBAMASK [38]FID ↓KID ↓Diversity ↑mIoU ↑acc ↑
SEAN [87]32.740.0180.290.520.85N/A
SoFGAN [11]23.340.0120.330.530.890.58
PIX2NERF [6]54.230.0420.160.360.650.44
PIX2PIX3D (OURS)
W/O 3D LABELS12.960.0050.30N/A (0.43)N/A (0.81)0.38
W/O CVC11.620.0040.300.50 (0.50)0.87 (0.85)0.42
FULL MODEL11.540.0030.280.51 (0.52)0.90 (0.88)0.36
FULL MODEL†11.130.0030.290.51 (0.50)0.90 (0.87)0.36
+ +Table 1. Seg2face Evaluation. Our metrics include image quality (FID, KID, SG Diversity), alignment (mIoU and acc against GT label maps), and multi-view consistency (FVV Identity). Single-generation diversity (SG Diversity) is obtained by computing the LPIPS metric between randomly generated pairs given a single conditional input. To evaluate alignment, we compare the generated label maps against the ground truth in terms of mIoU and pixel accuracy (acc). Alternatively, given a generated image, one could estimate label maps via a face parser, and compare those against the ground truth (numbers in parentheses). We include SEAN [87] and SoFGAN [11] as baselines, and modify Pix2NeRF [6] to take conditional input. Our method achieves the best quality, alignment ACC, and FVV Identity while being competitive on SG Diversity. SoFGAN tends to have better alignment but worse 3D consistency. We also ablate our method w.r.t the 3D labels and the cross-view consistency (CVC) loss. Our 3D labels are crucial for alignment, while the CVC loss improves multi-view consistency. Using pretrained models from EG3D $(\dagger)$ also improves the performance. + +
Edge2CarQUALITYALIGNMENT
FID ↓KID ↓SG Diversity ↑AP ↑
PIX2NERF [6]23.420.0140.060.28
PIX2PIX3D (OURS)
w/o 3D LABELS10.730.0050.120.45 (0.42)
w/o CVC9.420.0040.130.61 (0.59)
FULL MODEL8.310.0040.130.63 (0.59)
+ +a training set of 24,183, a validation set of 2,993, and a test set of 2,824, following the original work [38]. For seg2cat and edge2cat, we use AFHQ-cat [16], which contains 5,065 images at $512 \times$ resolution. We estimate the viewpoints using unsup3d [74]. We extract the edges using pidinet [66] and obtain segmentation by clustering DINO features [2] into 6 classes. For edge2car, we use 3D models from shapenet- + +Table 2. Edge2car Evaluation. We compare our method with Pix2NeRF [6] on edge2car using the shapenet-car [10] dataset. Similar to Table 1, we evaluate FID, KID, and SG Diversity for image quality. We also evaluate the alignment with the input edge map using AP. Similarly, we can either run informative drawing [7] on generated images to obtain edge maps (numbers in parentheses) or directly use generated edge maps to calculate the metrics. We achieve better image quality and alignment than Pix2NeRF. We also find that using 3D labels and cross-view consistency loss is helpful regarding FID and AP metrics. + +
Seg2CatFID ↓QUALITYALIGNMENT
AFHQ-CAT [34]KID ↓SG Diversity ↑mIoU ↑acc ↑
PIX2NERF [6]43.920.0810.150.270.58
OURS
w/o 3D LABELS10.410.0040.26N/A (0.49)N/A (0.69)
w/o CVC9.640.0040.260.66 (0.63)0.76 (0.73)
FULL MODEL8.620.0030.270.66 (0.62)0.78 (0.73)
+ +Table 3. Seg2cat Evaluation. We compare our method with Pix2NeRF [6] on Seg2Cat using AFHQ-cat dataset [16], with segmentation obtained by clustering DINO features [2]. Similar to Table 1, we evaluate the image quality and alignment. Ours performs better in all metrics. + +![](images/5b3d08623c8b1ae15837e2929eac3d8a625b8e094d1d60e27bc8bf860a30031c.jpg) +Figure 8. Semantic Mesh. We show semantic meshes of human and cat faces from marching cubes colored by 3D labels. + +car [10] and render 500,000 images at $128 \times$ resolution for training, and 30,000 for evaluation. We extract the edges using informative drawing [7]. We train our model at $512 \times$ resolution except for $128 \times$ in the edge2car task. + +Running Time. For training the model at $512 \times$ resolution, it takes about three days on eight RTX 3090 GPUs. But we can significantly reduce the training time to 4 hours if we initialize parts of our model with pretrained weights from EG3D [8]. During inference, our model takes $10\mathrm{ms}$ to obtain the style vector, and another $30\mathrm{ms}$ to render the final image and the label map on a single RTX A5000. The low latency (25 FPS) allows for interactive user editing. + +# 4.1. Evaluation metrics + +We evaluate the models from two aspects: 1) the image quality regarding fidelity and diversity, and 2) the alignment between input label maps and generated outputs. + +Quality Metrics. Following prior works [21, 57], we use the clean-fid library [58] to compute Fréchet Inception Distance (FID) [23] and Kernel Inception Distance (KID) [4] to measure the distribution distance between synthesized results and real images. We also evaluate the single-generation diversity (SG Diversity) by calculating the LPIPS metric between randomly generated pairs given a single input following prior works [11, 85]. For FID and KID, we generate + +![](images/ddcc366d3be846fb4986f5b23ba0240c5f84a3fb77a866926f6ae612b9a99c3e.jpg) +Figure 9. We study the effect of random pose sampling probability $p$ during training. Without random poses ( $p = 0$ ), the model achieves the best alignment with input semantic maps, with reduced image quality. In contrast, only using random poses ( $p = 1$ ) achieves the best image quality, while results fail to align with input maps. We find $p = 0.5$ balances the image quality and input alignment. + +![](images/86a570277fc93bf09c5b134d5bfdbc4c976c22b004fdaeb0c62af6db371f5793.jpg) + +![](images/be042cfffb6bb81a1708257c1eb218309bcad47386f9707ff233a8fd004df1b2.jpg) + +![](images/24569639882bfc6cc5f28e3701610500a4d7632303263fe8a02bc4e6d00ac557.jpg) + +![](images/8d4b5c7c30f547f27dc1f0dffe40615b6cedbbd5014ad3204a95d10f266d6b63.jpg) +Figure 10. Cross-view Editing of Edge2Car. Our 3D editing system allows users to edit label maps from any viewpoint instead of only the input view. Importantly, our feed-forward encoder allows fast inference of the latent code without GAN-inversion. Typically, a single forward pass of rendering takes only $40\mathrm{ms}$ on a single RTX A5000, which enables interactive editing. Please check our demo video on our website. + +10 images per label map in the test set using randomly sampled $z$ . We compare our generated images with the whole dataset, including training and test images. + +Alignment Metrics. We evaluate models on the test set using mean Intersection-over-Union (mIoU) and pixel accuracy (acc) for segmentation maps following existing works [57, 63], and average precision (AP) for edge maps. For those models that render label maps as output, we directly compare them with ground-truth labels. Otherwise, we first predict the label maps from the output RGB images using off-the-shelf networks [38, 66], and then compare the prediction with the ground truth. The metrics regarding such predicted semantic maps are reported within brackets in Table 1 and Table 2. + +For seg2face, we evaluate the preservation of facial identity from different viewpoints (FVV Identity) by calculating their distances with the dlib face recognition algorithm*. + +# 4.2. Baseline comparison + +Baselines. Since there are no prior works on conditional 3D-aware image synthesis, we make minimum modifications to Pix2NeRF [6] to be conditional on label maps instead of images. For a thorough comparison, we introduce several baselines: SEAN [87] and SoFGAN [11]. 2D baselines like SEAN [87] cannot generate multi-view images by design (N/A for FVV Identity), while SoFGAN [11] uses an unconditional 3D semantic map generator before the 2D + +generator so we can evaluate FVV Identity for that. + +Results. Figure 4 shows the qualitative comparison for seg2face and Table 1 reports the evaluation results. SoFGAN [11] tends to produce results with slightly better alignment but worse 3D consistency for its 2D RGB generator. Our method achieves the best quality, alignment acc, and FVV Identity while being competitive with 2D baselines on SG diversity. Figure 5 shows the qualitative ablation on seg2face and seg2cat. Table ?? reports the metrics for seg2cat. Figure 6 shows the example results for edge2cat. Figure 7 shows the qualitative comparison for edge2car and Table 2 reports the metrics. Our method achieves the best image quality and alignment. Figure 8 shows semantic meshes of human and cat faces, extracted by marching cubes and colored by our learned 3D labels. We provide more evaluation results in the appendix of our arXiv version. + +Ablation Study. We compare our full method to several variants. Specifically, (1) w/o 3D LABELS, we remove the branch of rendering label maps from our method, and (2) w/o CVC, we remove the cross-view consistency loss. From Table 1, Table 2, and Figure 5, rendering label maps is crucial for the alignment with the input. We posit that the joint learning of appearance, geometry, and label information poses strong constraints on correspondence between the input label maps and the 3D representation. Thus our method can synthesize images pixel-aligned with the inputs. Our CVC loss helps preserve the facial identity from different viewpoints. + +![](images/701762258dbb14addbc873741842e7690298e349830108143d07aff390405b8c.jpg) +Figure 11. Multi-modal Synthesis. The leftmost column is the input segmentation map. We use the same segmentation map for each row. We generate multi-modal results by randomly sampling an appearance style for each column. + +Analysis on random sampling of poses. We study the effect of the different probabilities of sampling random poses during training, as shown in Figure 9. When sampling no random poses $(p = 0)$ , the model best aligns with input label maps with suboptimal image quality. Conversely, only sampling random poses $(p = 1)$ gives the best image quality but suffers huge misalignment with input label maps. We find $p = 0.5$ achieves the balance between the image quality and the alignment with the input. + +# 4.3. Applications + +Cross-view Editing. As shown in Figure 10, our 3D editing system allows users to generate and edit label maps from any viewpoint instead of only the input view. The edited label map is further fed into the conditional encoder to update the 3D representation. Unlike GAN inversion [83], our feedforward conditional encoder allows fast inference of the latent code. Thus, a single forward pass of our full model takes only $40\mathrm{ms}$ on a single RTX A5000. + +Multi-modal synthesis and interpolation. Like other style-based generative models [8, 21, 34, 36], our method can disentangle the geometry and appearance information. Specifically, the input label map captures the geometry information while the randomly sampled latent code controls the appearance. We show style manipulation results in Figure 11. We can also interpolate both the geometry styles and the appearance styles (Figure 12). These results show the clear disentanglement of our 3D representation. + +![](images/8ab2ba4ff5cd2044bcafae38e595a31eb99f389503ece2b7b84028a1763f0c5c.jpg) +Figure 12. Interpolation. In each $5 \times 5$ grid, the images at the top left and bottom right are generated from the input maps next to them. Each row interpolates two images in label space, while each column interpolates the appearance. For camera poses, we interpolate the pitch along the row and the yaw along the column. + +# 5. Discussion + +We have introduced pix2pix3D, a 3D-aware conditional generative model for controllable image synthesis. Given a 2D label map, our model allows users to render images given any viewpoint. Our model augments the neural field with 3D labels, assigning label, color, and density to every 3D point, allowing for the simultaneous rendering of the image and a pixel-aligned label map. The learned 3D labels further enable interactive 3D cross-view editing. We discuss the limitations and societal impact in our arXiv version. + +Acknowledgments. We thank Sheng-Yu Wang, Nupur Kumari, Gaurav Parmer, Ruihan Gao, Muyang Li, George Cazenavette, Andrew Song, Zhipeng Bao, Tamaki Kojima, Krishna Wadhwani, Takuya Narihira, and Tatsuo Fujiwara for their discussion and help. We are grateful for the support from Sony Corporation, Singapore DSTA, and the CMU Argo AI Center for Autonomous Vehicle Research. + +# References + +[1] Rameen Abdal, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In IEEE International Conference on Computer Vision (ICCV), 2019. 2 +[2] Shir Amir, Yossi Gandelsman, Shai Bagon, and Tali Dekel. Deep vit features as dense visual descriptors. ECCVW What is Motion For?, 2022. 6 +[3] David Bau, Hendrik Strobelt, William Peebles, Jonas Wulff, Bolei Zhou, Jun-Yan Zhu, and Antonio Torralba. Semantic photo manipulation with a generative image prior. In ACM SIGGRAPH, 2019. 2 +[4] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. In International Conference on Learning Representations (ICLR), 2018. 6 +[5] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In International Conference on Learning Representations (ICLR), 2019. 2 +[6] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional $\pi$ -gan for single image to neural radiance fields translation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 5, 6, 7 +[7] Caroline Chan, Frédo Durand, and Phillip Isola. Learning to generate line drawings that convey geometry and semantics. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 6 +[8] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3, 4, 6, 8 +[9] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[10] Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An Information-Rich 3D Model Repository. Technical Report arXiv:1512.03012 [cs.GR], Stanford University — Princeton University — Toyota Technological Institute at Chicago, 2015. 2, 5, 6 +[11] Anpei Chen, Ruiyang Liu, Ling Xie, Zhang Chen, Hao Su, and Jingyi Yu. Sofgan: A portrait image generator with dynamic styling. In ACM SIGGRAPH, 2021. 2, 5, 6, 7 +[12] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2 +[13] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 + +[14] Tao Chen, Zhe Zhu, Ariel Shamir, Shi-Min Hu, and Daniel Cohen-Or. 3-sweep: Extracting editable objects from a single photo. ACM Transactions on Graphics (TOG), 32(6):1-10, 2013. 2 +[15] Yuedong Chen, Qianyi Wu, Chuanxia Zheng, Tat-Jen Cham, and Jianfei Cai. Sem2nerf: Converting single-view semantic masks to neural radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2 +[16] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6 +[17] JOHANNA Delanoy, ADRIEN Bousseau, MATHIEU Aubry, PHILLIP Isola, and ALEXEIA A Efros. What you sketch is what you get: 3d sketching using multi-view deep volumetric prediction. In ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games (I3D), 2018. 2 +[18] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised NeRF: Fewer views and faster training for free. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[19] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1 +[20] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, 2014. 1, 2, 4 +[21] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2, 3, 4, 6, 8 +[22] Philipp Henzler, Niloy J Mitra, and Tobias Ritschel. Escaping plato's cave: 3d shape from adversarial rendering. In IEEE International Conference on Computer Vision (ICCV), 2019. 2 +[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two timescale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems (NeurIPS), 2017. 6 +[24] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 1 +[25] Hsin-Ping Huang, Hung-Yu Tseng, Hsin-Ying Lee, and Jia-Bin Huang. Semantic view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2 +[26] Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. Multimodal unsupervised image-to-image translation. In European Conference on Computer Vision (ECCV), 2018. 2 +[27] Zeng Huang, Tianye Li, Weikai Chen, Yajie Zhao, Jun Xing, Chloe Legendre, Linjie Luo, Chongyang Ma, and Hao Li. Deep volumetric video from very sparse multi-view performance capture. In European Conference on Computer Vision (ECCV), pages 351-369, 2018. 2 + +[28] Takeo Igarashi, Satoshi Matsuoka, and Hidehiko Tanaka. Teddy: a sketching interface for 3d freeform design. In ACM SIGGRAPH, 1999. 2 +[29] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1, 2 +[30] Ajay Jain, Matthew Tancik, and Pieter Abbeel. Putting nerf on a diet: Semantically consistent few-shot view synthesis. In IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[31] Kaiwen Jiang, Shu-Yu Chen, Feng-Lin Liu, Hongbo Fu, and Lin Gao. Nerffaceediting: Disentangled face editing in neural radiance fields. In ACM SIGGRAPH Asia, 2022. 2 +[32] James T Kajiya and Brian P Von Herzen. Ray tracing volume densities. ACM SIGGRAPH, 18(3):165-174, 1984. 3 +[33] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2 +[34] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2, 6, 8 +[35] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[36] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 8 +[37] Natasha Kholgade, Tomas Simon, Alexei Efros, and Yaser Sheikh. 3d object manipulation in a single photograph using stock 3d models. ACM Transactions on Graphics (TOG), 33(4):1-12, 2014. +[38] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6, 7 +[39] Chen-Hsuan Lin, Wei-Chiu Ma, Antonio Torralba, and Simon Lucey. Barf: Bundle-adjusting neural radiance fields. In IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[40] Ming-Yu Liu, Thomas Breuel, and Jan Kautz. Unsupervised image-to-image translation networks. Advances in neural information processing systems, 30, 2017. 2 +[41] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 5 +[42] Zhaoliang Lun, Matheus Gadelha, Evangelos Kalogerakis, Subhransu Maji, and Rui Wang. 3d shape reconstruction from sketches via multi-view convolutional networks. In 2017 International Conference on 3D Vision (3DV). IEEE, 2017. 2 + +[43] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[44] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. In IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[45] Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for gans do actually converge? In International Conference on Machine Learning (ICML), 2018. 4 +[46] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2, 3 +[47] Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784, 2014. 2 +[48] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. In ACM SIGGRAPH, 2022. 2 +[49] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In IEEE International Conference on Computer Vision (ICCV), 2019. 2 +[50] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[51] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[52] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4 +[53] Xingang Pan, Xudong Xu, Chen Change Loy, Christian Theobalt, and Bo Dai. A shading-guided generative implicit model for shape-accurate 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2 +[54] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[55] Taesung Park, Alexei A Efros, Richard Zhang, and Jun-Yan Zhu. Contrastive learning for unpaired image-to-image translation. In European Conference on Computer Vision (ECCV), 2020. 2 + +[56] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2 +[57] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 6, 7 +[58] Gaurav Parmar, Richard Zhang, and Jun-Yan Zhu. On aliased resizing and surprising subtleties in gan evaluation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 6 +[59] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In IEEE International Conference on Computer Vision (ICCV), 2021. 2 +[60] Nataniel Ruiz, Eunji Chong, and James M. Rehg. Fine-grained head pose estimation without keypoints. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshop, 2018. 5 +[61] Sara Fridovich-Keil and Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[62] Edgar Schonfeld, Vadim Sushko, Dan Zhang, Juergen Gall, Bernt Schiele, and Anna Khoreva. You only need adversarial supervision for semantic image synthesis. In International Conference on Learning Representations (ICLR), 2020. 2 +[63] Edgar Schonfeld, Vadim Sushko, Dan Zhang, Juergen Gall, Bernt Schiele, and Anna Khoreva. You only need adversarial supervision for semantic image synthesis. In International Conference on Learning Representations (ICLR), 2021. 7 +[64] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2 +[65] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[66] Zhuo Su, Wenzhe Liu, Zitong Yu, Dewen Hu, Qing Liao, Qi Tian, Matti Pietikainen, and Li Liu. Pixel difference networks for efficient edge detection. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 5, 6, 7 +[67] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew Davison. iMAP: Implicit mapping and positioning in real-time. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2 +[68] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. In ACM Transactions on Graphics (TOG), 2022. 2 +[69] Jingxiang Sun, Xuan Wang, Yong Zhang, Xiaoyu Li, Qi Zhang, Yebin Liu, and Jue Wang. Fenerf: Face editing in neural radiance fields. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4 +[70] Matthew Tancik, Ben Mildenhall, Terrance Wang, Divi Schmidt, Pratul P Srinivasan, Jonathan T Barron, and Ren + +Ng. Learned initializations for optimizing coordinate-based neural representations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[71] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2 +[72] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. In ACM Transactions on Graphics (TOG), 2021. 2 +[73] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. High-resolution image synthesis and semantic manipulation with conditional gans. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2 +[74] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6 +[75] Xiaohua Xie, Kai Xu, Niloy J Mitra, Daniel Cohen-Or, Wenyong Gong, Qi Su, and Baoquan Chen. Sketch-to-design: Context-based part assembly. In Computer Graphics Forum, volume 32, pages 233–245. Wiley Online Library, 2013. 2 +[76] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 +[77] Shunyu Yao, Tzu Ming Hsu, Jun-Yan Zhu, Jiajun Wu, Antonio Torralba, Bill Freeman, and Josh Tenenbaum. 3d-aware scene manipulation via inverse graphics. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 2 +[78] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. Pixelnerf: Neural radiance fields from one or few images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2 +[79] Jichao Zhang, Enver Sangineto, Hao Tang, Aliaksandr Siarohin, Zhun Zhong, Nicu Sebe, and Wei Wang. 3d-aware semantic-guided generative model for human synthesis. In European Conference on Computer Vision (ECCV), 2022. 2 +[80] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2 +[81] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 4 +[82] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European Conference on Computer Vision (ECCV), 2020. 2 +[83] Jun-Yan Zhu, Philipp Krahenbuhl, Eli Shechtman, and Alexei A Efros. Generative visual manipulation on the natural image manifold. In European Conference on Computer Vision (ECCV), 2016. 2, 8 +[84] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent + +adversarial networks. In IEEE International Conference on Computer Vision (ICCV), 2017. 1, 2 +[85] Jun-Yan Zhu, Richard Zhang, Deepak Pathak, Trevor Darrell, Alexei A Efros, Oliver Wang, and Eli Shechtman. Toward multimodal image-to-image translation. Advances in neural information processing systems, 30, 2017. 6 +[86] Jun-Yan Zhu, Zhoutong Zhang, Chengkai Zhang, Jiajun Wu, Antonio Torralba, Josh Tenenbaum, and Bill Freeman. Visual object networks: Image generation with disentangled 3d representations. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 2 +[87] Peihao Zhu, Rameen Abdal, Yipeng Qin, and Peter Wonka. Sean: Image synthesis with semantic region-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6, 7 +[88] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R. Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2 \ No newline at end of file diff --git a/2023/3D-Aware Conditional Image Synthesis/images.zip b/2023/3D-Aware Conditional Image Synthesis/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..fdfa851a4b6434e920748766c69847c541d08d8c --- /dev/null +++ b/2023/3D-Aware Conditional Image Synthesis/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cab0b813f65fe33afee9a4fced949fd7c250a222839fd4f73b4de788130481b5 +size 990518 diff --git a/2023/3D-Aware Conditional Image Synthesis/layout.json b/2023/3D-Aware Conditional Image Synthesis/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f44eb8a26868f20cecec4fd32b2b67eace58b454 --- /dev/null +++ b/2023/3D-Aware Conditional Image Synthesis/layout.json @@ -0,0 +1,11189 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 176, + 103, + 419, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 103, + 419, + 121 + ], + "spans": [ + { + "bbox": [ + 176, + 103, + 419, + 121 + ], + "type": "text", + "content": "3D-aware Conditional Image Synthesis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 136, + 143, + 457, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 143, + 457, + 173 + ], + "spans": [ + { + "bbox": [ + 136, + 143, + 457, + 173 + ], + "type": "text", + "content": "Kangle Deng Gengshan Yang Deva Ramanan Jun-Yan Zhu Carnegie Mellon University" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 47, + 197, + 547, + 421 + ], + "blocks": [ + { + "bbox": [ + 47, + 197, + 547, + 421 + ], + "lines": [ + { + "bbox": [ + 47, + 197, + 547, + 421 + ], + "spans": [ + { + "bbox": [ + 47, + 197, + 547, + 421 + ], + "type": "image", + "image_path": "d0db08c534775dbb6eca15e32dcc39d29d6901cc09dc50bce140e3be6216c11a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 427, + 547, + 460 + ], + "lines": [ + { + "bbox": [ + 45, + 427, + 547, + 460 + ], + "spans": [ + { + "bbox": [ + 45, + 427, + 547, + 460 + ], + "type": "text", + "content": "Figure 1. Given a 2D label map as input, such as a segmentation or edge map, our model learns to predict high-quality 3D labels, geometry, and appearance, which enables us to render both labels and RGB images from different viewpoints. The inferred 3D labels further allow interactive editing of label maps from any viewpoint, as shown in Figure 10." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 470, + 192, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 470, + 192, + 483 + ], + "spans": [ + { + "bbox": [ + 143, + 470, + 192, + 483 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 490, + 288, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 490, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 45, + 490, + 288, + 646 + ], + "type": "text", + "content": "We propose pix2pix3D, a 3D-aware conditional generative model for controllable photorealistic image synthesis. Given a 2D label map, such as a segmentation or edge map, our model learns to synthesize a corresponding image from different viewpoints. To enable explicit 3D user control, we extend conditional generative models with neural radiance fields. Given widely-available posed monocular image and label map pairs, our model learns to assign a label to every 3D point in addition to color and density, which enables it to render the image and pixel-aligned label map simultaneously. Finally, we build an interactive system that allows users to edit the label map from different viewpoints and generate outputs accordingly." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 669, + 127, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 127, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 127, + 681 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": "Content creation with generative models has witnessed tremendous progress in recent years, enabling high-quality," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 471, + 547, + 590 + ], + "type": "text", + "content": "user-controllable image and video synthesis [19, 20, 24, 34]. In particular, image-to-image translation methods [29, 56, 84] allow users to interactively create and manipulate a high-resolution image given a 2D input label map. Unfortunately, existing image-to-image translation methods operate purely in 2D, without explicit reasoning of the underlying 3D structure of the content. As shown in Figure 1, we aim to make conditional image synthesis 3D-aware, allowing not only 3D content generation but also viewpoint manipulation and attribute editing (e.g., car shape) in 3D." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 592, + 547, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 592, + 547, + 688 + ], + "spans": [ + { + "bbox": [ + 304, + 592, + 547, + 688 + ], + "type": "text", + "content": "Synthesizing 3D content conditioned on user input is challenging. For model training, it is costly to obtain large-scale datasets with paired user inputs and their desired 3D outputs. During test time, 3D content creation often requires multi-view user inputs, as a user may want to specify the details of 3D objects using 2D interfaces from different viewpoints. However, these inputs may not be 3D-consistent, providing conflicting signals for 3D content creation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 689, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 547, + 713 + ], + "type": "text", + "content": "To address the above challenges, we extend conditional generative models with 3D neural scene representations. To" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4434" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 216 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 216 + ], + "type": "text", + "content": "enable cross-view editing, we additionally encode semantic information in 3D, which can then be rendered as 2D label maps from different viewpoints. We learn the aforementioned 3D representation using only 2D supervision in the form of image reconstruction and adversarial losses. While the reconstruction loss ensures the alignment between 2D user inputs and corresponding 3D content, our pixel-aligned conditional discriminator encourages the appearance and labels to look plausible while remaining pixel-aligned when rendered into novel viewpoints. We also propose a cross-view consistency loss to enforce the latent codes to be consistent from different viewpoints." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 217, + 289, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 217, + 289, + 348 + ], + "spans": [ + { + "bbox": [ + 46, + 217, + 289, + 348 + ], + "type": "text", + "content": "We focus on 3D-aware semantic image synthesis on the CelebAMask-HQ [38], AFHQ-cat [16], and shapenetcar [10] datasets. Our method works well for various 2D user inputs, including segmentation maps and edge maps. Our method outperforms several 2D and 3D baselines, such as Pix2NeRF variants [6], SofGAN [11], and SEAN [87]. We further ablate the impact of various design choices and demonstrate applications of our method, such as cross-view editing and explicit user control over semantics and style. Please see our website for more results and code. Please check out the full version of our paper at arXiv." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 361, + 134, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 361, + 134, + 373 + ], + "spans": [ + { + "bbox": [ + 47, + 361, + 134, + 373 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 386, + 289, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 386, + 289, + 649 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 289, + 649 + ], + "type": "text", + "content": "Neural Implicit Representation. Neural implicit fields, such as DeepSDF and NeRFs [46, 54], model the appearance of objects and scenes with an implicitly defined, continuous 3D representation parameterized by neural networks. They have produced significant results for 3D reconstruction [67, 88] and novel view synthesis applications [39, 43, 44, 48, 80] thanks to their compactness and expressiveness. NeRF and its descendants aim to optimize a network for an individual scene, given hundreds of images from multiple viewpoints. Recent works further reduce the number of training views through learning network initializations [13, 70, 78], leveraging auxiliary supervision [18, 30], or imposing regularization terms [50]. Recently, explicit or hybrid representations of radiance fields [12, 48, 61] have also shown promising results regarding quality and speed. In our work, we use hybrid representations for modeling both user inputs and outputs in 3D, focusing on synthesizing novel images rather than reconstructing an existing scene. A recent work Pix2NeRF [6] aims to translate a single image to a neural radiance field, which allows single-image novel view synthesis. In contrast, we focus on 3D-aware user-controlled content generation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 654, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 288, + 715 + ], + "type": "text", + "content": "Conditional GANs. Generative adversarial networks (GANs) learn the distribution of natural images by forcing the generated and real images to be indistinguishable. They have demonstrated high-quality results on 2D image synthesis and manipulation [1, 3, 5, 20, 33-35, 59, 65, 72, 82, 83]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 180 + ], + "type": "text", + "content": "Several methods adopt image-conditional GANs [29, 47] for user-guided image synthesis and editing applications [26, 27, 38, 40, 55, 56, 62, 73, 84, 87]. In contrast, we propose a 3D-aware generative model conditioned on 2D user inputs that can render view-consistent images and enable interactive 3D editing. Recently, SoFGAN [11] uses a 3D semantic map generator and a 2D semantic-to-image generator to enable 3D-aware generation, but using 2D generators does not ensure 3D consistency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 182, + 548, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 182, + 548, + 409 + ], + "spans": [ + { + "bbox": [ + 304, + 182, + 548, + 409 + ], + "type": "text", + "content": "3D-aware Image Synthesis. Early data-driven 3D image editing systems can achieve various 3D effects but often require a huge amount of manual effort [14, 37]. Recent works have integrated the 3D structure into learning-based image generation pipelines using various geometric representations, including voxels [22,86], voxelized 3D features [49], and 3D morphable models [71, 77]. However, many rely on external 3D data [71, 77, 86]. Recently, neural scene representations have been integrated into GANs to enable 3D-aware image synthesis [8,9,21,51-53,64,76]. Intriguingly, these 3D-aware GANs can learn 3D structures without any 3D supervision. For example, StyleNeRF [21] and EG3D [8] learn to generate 3D representations by modulating either NeRFs or explicit representations with latent style vectors. This allows them to render high-resolution view-consistent images. Unlike the above methods, we focus on conditional synthesis and interactive editing rather than random sampling. Several works [17,28,42,75] have explored sketch-based shape generation but they do not allow realistic image synthesis." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 410, + 548, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 410, + 548, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 410, + 548, + 649 + ], + "type": "text", + "content": "Closely related to our work, Huang et al. [25] propose synthesizing novel views conditional on a semantic map. Our work differs in three ways. First, we can predict full 3D labels, geometry, and appearance, rather than only 2D views, which enables cross-view editing. Second, our method can synthesize images with a much wider baseline than Huang et al. [25]. Finally, our learning algorithm does not require ground truth multi-view images of the same scene. Two recent works, FENeRF [69] and 3DSGAN [79], also leverage semantic labels for training 3D-aware GANs, but they do not support conditional inputs and require additional efforts (e.g., GAN-inversion) to allow user editing. Three concurrent works, IDE-3D [68], NeRFFaceEditing [31], and sem2nerf [15], also explore the task of 3D-aware generation based on segmentation masks. However, IDE-3D and sem2nerf only allow editing on a fixed view, and NeRF-FaceEditing focuses on real image editing rather than generation. All of them do not include results for other input modalities. In contrast, we present a general-purpose method that works well for diverse datasets and input controls." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 658, + 362, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 362, + 669 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 362, + 669 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "text", + "content": "Given a 2D label map " + }, + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{\\mathbf{s}}" + }, + { + "bbox": [ + 305, + 677, + 547, + 715 + ], + "type": "text", + "content": ", such as a segmentation or edge map, pix2pix3D generates a 3D-volumetric representation of geometry, appearance, and labels that can be rendered" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4435" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 73, + 544, + 352 + ], + "blocks": [ + { + "bbox": [ + 51, + 73, + 544, + 352 + ], + "lines": [ + { + "bbox": [ + 51, + 73, + 544, + 352 + ], + "spans": [ + { + "bbox": [ + 51, + 73, + 544, + 352 + ], + "type": "image", + "image_path": "c1c686b869a2e8159fc2727cb9533b35544f9d7cbf22f1705c03b3f7bd14fb26.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "lines": [ + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "spans": [ + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": "Figure 2. Overall pipeline. Given a 2D label map (e.g., segmentation map), a random latent code " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": ", and a camera pose " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\hat{P}" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": " as inputs, our generator renders the label map and image from viewpoint " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\hat{P}" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": ". Intuitively, the input label map specifies the geometric structure, while the latent code captures the appearance, such as hair color. We begin with an encoder that encodes both the input label map and the latent code into style vectors " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^{+}" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": ". We then use " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^{+}" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": " to modulate our 3D representation, which takes a spatial point " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": " and outputs (1) color " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{c} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": ", (2) density " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": ", (3) feature " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\phi \\in \\mathbb{R}^l" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": ", and (4) label " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\mathbf{s} \\in \\mathbb{R}^c" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": ". We then perform volumetric rendering and 2D upsampling to get the high-resolution label map " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}^{+}" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": " and RGB Image " + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{c}}^{+}" + }, + { + "bbox": [ + 46, + 355, + 548, + 434 + ], + "type": "text", + "content": ". For those rendered from ground-truth poses, we compare them to ground-truth labels and images with an LPIPS loss and label reconstruction loss. We apply a GAN loss on labels and images rendered from both novel and original viewpoints." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 449, + 288, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 288, + 510 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 288, + 510 + ], + "type": "text", + "content": "from different viewpoints. Figure 2 provides an overview. We first introduce the formulation of our 3D conditional generative model for 3D-aware image synthesis in Section 3.1. Then, in Section 3.2, we discuss how to learn the model from color and label map pairs " + }, + { + "bbox": [ + 46, + 449, + 288, + 510 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_{\\mathrm{c}},\\mathbf{I}_{\\mathrm{s}}\\}" + }, + { + "bbox": [ + 46, + 449, + 288, + 510 + ], + "type": "text", + "content": " associated with poses " + }, + { + "bbox": [ + 46, + 449, + 288, + 510 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 46, + 449, + 288, + 510 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 514, + 234, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 234, + 526 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 234, + 526 + ], + "type": "text", + "content": "3.1. Conditional 3D Generative Models" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 533, + 287, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 628 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 628 + ], + "type": "text", + "content": "Similar to EG3D [8], we adopt a hybrid representation for the density and appearance of a scene and use style vectors to modulate the 3D generations. To condition the 3D representations on 2D label map inputs, we introduce a conditional encoder that maps a 2D label map into a latent style vector. Additionally, pix2pix3D produces 3D labels that can be rendered from different viewpoints, allowing for cross-view user editing." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "text", + "content": "Conditional Encoder. Given a 2D label map input " + }, + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "text", + "content": " and a random latent code sampled from the spherical Gaussian space " + }, + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\sim \\mathcal{N}(0, I)" + }, + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "text", + "content": ", our conditional encoder " + }, + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "text", + "content": " outputs a list of style vectors " + }, + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^{+} \\in \\mathbb{R}^{l \\times 256}" + }, + { + "bbox": [ + 46, + 630, + 288, + 677 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 684, + 201, + 696 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 684, + 201, + 696 + ], + "spans": [ + { + "bbox": [ + 132, + 684, + 201, + 696 + ], + "type": "interline_equation", + "content": "\\mathbf {w} ^ {+} = E (\\mathbf {I _ {s}}, \\mathbf {z}),", + "image_path": "2e2b9ea34b44817760405976ad891f8ed05ae1f548c1fb0430094f705a9b7aee.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 701, + 289, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 289, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 701, + 289, + 714 + ], + "type": "inline_equation", + "content": "l = 13" + }, + { + "bbox": [ + 47, + 701, + 289, + 714 + ], + "type": "text", + "content": " is the number of layers to be modulated." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 449, + 546, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 546, + 509 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 546, + 509 + ], + "type": "text", + "content": "Specifically, we encode " + }, + { + "bbox": [ + 304, + 449, + 546, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 449, + 546, + 509 + ], + "type": "text", + "content": " into the first 7 style vectors that represent the global geometric information of the scene. We then feed the random latent code " + }, + { + "bbox": [ + 304, + 449, + 546, + 509 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 304, + 449, + 546, + 509 + ], + "type": "text", + "content": " through a Multi-Layer Perceptron (MLP) mapping network to obtain the rest of the style vectors that control the appearance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "spans": [ + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": "Conditional 3D Representation. Our 3D representation is parameterized by tri-planes followed by an 2-layer MLP " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": " [8], which takes in a spatial point " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": " and returns 4 types of outputs: (1) color " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "\\mathbf{c} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": ", (2) density " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "\\sigma \\in \\mathbb{R}^+" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": ", (3) feature " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "\\phi \\in \\mathbb{R}^{64}" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": " for the purpose of 2D upsampling, and most notably, (4) label " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "\\mathbf{s} \\in \\mathbb{R}^c" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": " is the number of classes if " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "\\mathbf{I_s}" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": " is a segmentation map, otherwise 1 for edge labels. We make the field conditional by modulating the generation of tri-planes " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "F^{\\mathrm{tri}}" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": " with the style vectors " + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^+" + }, + { + "bbox": [ + 304, + 511, + 547, + 632 + ], + "type": "text", + "content": ". We also remove the view dependence of the color following [8, 21]. Formally," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 372, + 639, + 480, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 639, + 480, + 654 + ], + "spans": [ + { + "bbox": [ + 372, + 639, + 480, + 654 + ], + "type": "interline_equation", + "content": "(\\mathbf {c}, \\mathbf {s}, \\sigma , \\phi) = f (F _ {\\mathbf {w} ^ {+}} ^ {\\mathrm {t r i}} (\\mathbf {x})).", + "image_path": "7398d2afd9f181e27d62f12bdccfb65aacd8fc0b852bed9beacb642cd4d1e1c5.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 665, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 548, + 715 + ], + "type": "text", + "content": "Volume Rendering and Upsampling. We apply volumetric rendering to synthesize color images [32, 46]. In addition, we render label maps, which are crucial for enabling cross-view editing (Section 4.3) and improving rendering quality" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4436" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": "(Table 1). Given a viewpoint " + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "inline_equation", + "content": "\\hat{P}" + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": " looking at the scene origin, we sample " + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": " points along the ray that emanates from a pixel location and query density, color, labels, and feature information from our 3D representation. Let " + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "inline_equation", + "content": "\\mathbf{x_i}" + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": " be the i-th sampled point along the ray " + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i, \\mathbf{s}_i" + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "inline_equation", + "content": "\\phi_i" + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": " be the color, labels, and the features of " + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "inline_equation", + "content": "\\mathbf{x_i}" + }, + { + "bbox": [ + 46, + 72, + 288, + 168 + ], + "type": "text", + "content": ". Similar to [69], The color, label map, and feature images are computed as the weighted combination of queried values," + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 176, + 287, + 219 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 176, + 287, + 219 + ], + "spans": [ + { + "bbox": [ + 52, + 176, + 287, + 219 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {I}} _ {\\mathbf {c}} (r) = \\sum_ {i = 1} ^ {N} \\tau_ {i} \\mathbf {c} _ {i}, \\quad \\hat {\\mathbf {I}} _ {\\mathbf {s}} (r) = \\sum_ {i = 1} ^ {N} \\tau_ {i} \\mathbf {s} _ {i}, \\quad \\hat {\\mathbf {I}} _ {\\phi} (r) = \\sum_ {i = 1} ^ {N} \\tau_ {i} \\phi_ {i}, \\tag {1}", + "image_path": "3738ac7cce1b5b81a5c69f7afbab0c9eb47ed61198dd88dcb24a60d6b28750ff.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 220, + 288, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 220, + 288, + 256 + ], + "spans": [ + { + "bbox": [ + 46, + 220, + 288, + 256 + ], + "type": "text", + "content": "where the transmittance " + }, + { + "bbox": [ + 46, + 220, + 288, + 256 + ], + "type": "inline_equation", + "content": "\\tau_{i}" + }, + { + "bbox": [ + 46, + 220, + 288, + 256 + ], + "type": "text", + "content": " is computed as the probability of a photon traversing between the camera center and the i-th point given the length of the i-th interval " + }, + { + "bbox": [ + 46, + 220, + 288, + 256 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 46, + 220, + 288, + 256 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 264, + 252, + 299 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 264, + 252, + 299 + ], + "spans": [ + { + "bbox": [ + 83, + 264, + 252, + 299 + ], + "type": "interline_equation", + "content": "\\tau_ {i} = \\prod_ {j = 1} ^ {i} \\exp \\left(- \\sigma_ {j} \\delta_ {j}\\right) (1 - \\exp \\left(- \\sigma_ {i} \\delta_ {i}\\right)).", + "image_path": "4bb3f317c01154fa510d2568446d4bc3456db53614aaf75974ef3dfccaa6f3ae.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "text", + "content": "Similar to prior works [8, 21, 52], we approximate Equation 1 by 2D Upsampler " + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "text", + "content": " to reduce the computational cost. We render high-res " + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "text", + "content": " images in two passes. In the first pass, we render low-res " + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "text", + "content": " images " + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{c}}, \\hat{\\mathbf{I}}_{\\mathbf{s}}, \\hat{\\mathbf{I}}_{\\phi}" + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "text", + "content": ". Then a CNN up-sampler " + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 305, + 287, + 365 + ], + "type": "text", + "content": " is applied to obtain high-res images," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 88, + 372, + 245, + 388 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 372, + 245, + 388 + ], + "spans": [ + { + "bbox": [ + 88, + 372, + 245, + 388 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+} = U (\\hat {\\mathbf {I}} _ {\\mathbf {c}}, \\hat {\\mathbf {I}} _ {\\phi}), \\qquad \\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {+} = U (\\hat {\\mathbf {I}} _ {\\mathbf {s}}, \\hat {\\mathbf {I}} _ {\\phi}).", + "image_path": "994847a011ab3b7bd0222227d1d20cde163442c61e408d0749c1dcb4ef9fee69.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 395, + 162, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 395, + 162, + 407 + ], + "spans": [ + { + "bbox": [ + 47, + 395, + 162, + 407 + ], + "type": "text", + "content": "3.2. Learning Objective" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 414, + 288, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 414, + 288, + 496 + ], + "spans": [ + { + "bbox": [ + 46, + 414, + 288, + 496 + ], + "type": "text", + "content": "Learning conditional 3D representations from monocular images is challenging due to its under-constrained nature. Given training data of associated images, label maps, and camera poses predicted by an off-the-shelf model, we carefully construct learning objectives, including reconstruction, adversarial, and cross-view consistency losses. These objectives will be described below." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "spans": [ + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "text", + "content": "Reconstruction Loss. Given a ground-truth viewpoint " + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "text", + "content": " associated with the color and label maps " + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{I}_{\\mathbf{c}}, \\mathbf{I}_{\\mathbf{s}}\\}" + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "text", + "content": ", we render color and label maps from " + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "text", + "content": " and compute reconstruction losses for both high-res and low-res output. We use LPIPS [81] to compute the image reconstruction loss " + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_c" + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "text", + "content": " for color images. For label reconstruction loss " + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_s" + }, + { + "bbox": [ + 46, + 500, + 287, + 596 + ], + "type": "text", + "content": ", we use the balanced cross-entropy loss for segmentation maps or L2 Loss for edge maps," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 64, + 604, + 270, + 618 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 604, + 270, + 618 + ], + "spans": [ + { + "bbox": [ + 64, + 604, + 270, + 618 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {r e c o n}} = \\lambda_ {c} \\mathcal {L} _ {c} \\left(\\mathbf {I} _ {\\mathbf {c}}, \\left\\{\\hat {\\mathbf {I}} _ {\\mathbf {c}}, \\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+} \\right\\}\\right) + \\lambda_ {s} \\mathcal {L} _ {s} \\left(\\mathbf {I} _ {\\mathbf {s}}, \\left\\{\\hat {\\mathbf {I}} _ {\\mathbf {s}}, \\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {+} \\right\\}\\right),", + "image_path": "fe214cdcb656c6ac6812b12e9a830b0d7a9308078b41f8f10d0561757b3d0171.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 627, + 194, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 627, + 194, + 638 + ], + "spans": [ + { + "bbox": [ + 47, + 627, + 194, + 638 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 627, + 194, + 638 + ], + "type": "inline_equation", + "content": "\\lambda_{c}" + }, + { + "bbox": [ + 47, + 627, + 194, + 638 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 627, + 194, + 638 + ], + "type": "inline_equation", + "content": "\\lambda_{s}" + }, + { + "bbox": [ + 47, + 627, + 194, + 638 + ], + "type": "text", + "content": " balance two terms." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 641, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 288, + 713 + ], + "type": "text", + "content": "Pixel-aligned Conditional Discriminator. The reconstruction loss alone fails to synthesize detailed results from novel viewpoints. Therefore, we use an adversarial loss [20] to enforce renderings to look realistic from random viewpoints. Specifically, we have two discriminators " + }, + { + "bbox": [ + 46, + 641, + 288, + 713 + ], + "type": "inline_equation", + "content": "D_{\\mathbf{c}}" + }, + { + "bbox": [ + 46, + 641, + 288, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 641, + 288, + 713 + ], + "type": "inline_equation", + "content": "D_{\\mathbf{s}}" + }, + { + "bbox": [ + 46, + 641, + 288, + 713 + ], + "type": "text", + "content": " for RGB images and label maps, respectively. " + }, + { + "bbox": [ + 46, + 641, + 288, + 713 + ], + "type": "inline_equation", + "content": "D_{\\mathbf{c}}" + }, + { + "bbox": [ + 46, + 641, + 288, + 713 + ], + "type": "text", + "content": " is a widely-used" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 307, + 82, + 545, + 174 + ], + "blocks": [ + { + "bbox": [ + 359, + 70, + 493, + 81 + ], + "lines": [ + { + "bbox": [ + 359, + 70, + 493, + 81 + ], + "spans": [ + { + "bbox": [ + 359, + 70, + 493, + 81 + ], + "type": "text", + "content": "Multi-view Generation of Seg Maps" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 82, + 545, + 174 + ], + "lines": [ + { + "bbox": [ + 307, + 82, + 545, + 174 + ], + "spans": [ + { + "bbox": [ + 307, + 82, + 545, + 174 + ], + "type": "image", + "image_path": "988411ec3b4d606f33e07441af6e5a4bd16111801d61dbb690142bd8ed4bbd8a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "lines": [ + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": "Figure 3. Cross-View Consistency Loss. Given an input label map " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{\\mathbf{s}}" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": " and its associated pose " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": ", we first infer the geometry latent code " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{\\mathbf{g}}" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": ". From " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{\\mathbf{g}}" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": ", we can generate a label map " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": " from the same pose " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}'" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": " from a random pose " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{P}'" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": ". Next, we infer " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{\\mathbf{g}}'" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": " from the novel view " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}'" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": ", and render it back to the original pose " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": " to obtain " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}''" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": ". Finally, we add a reconstruction loss: " + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{CVC}} = \\lambda_{\\mathrm{CVC}}\\mathcal{L}_s(\\hat{\\mathbf{I}}_{\\mathbf{s}}'', \\hat{\\mathbf{I}}_{\\mathbf{s}})" + }, + { + "bbox": [ + 304, + 182, + 547, + 251 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 269, + 547, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 269, + 547, + 376 + ], + "spans": [ + { + "bbox": [ + 304, + 269, + 547, + 376 + ], + "type": "text", + "content": "GAN loss that takes real and fake images as input, while the pixel-aligned conditional discriminator " + }, + { + "bbox": [ + 304, + 269, + 547, + 376 + ], + "type": "inline_equation", + "content": "D_{\\mathbf{s}}" + }, + { + "bbox": [ + 304, + 269, + 547, + 376 + ], + "type": "text", + "content": " concatenates color images and label maps as input, which encourages pixel alignment between color images and label maps. Notably, in " + }, + { + "bbox": [ + 304, + 269, + 547, + 376 + ], + "type": "inline_equation", + "content": "D_{\\mathbf{s}}" + }, + { + "bbox": [ + 304, + 269, + 547, + 376 + ], + "type": "text", + "content": ", we stop the gradients for the color images to prevent a potential quality downgrade. We also feed the rendered low-res images to prevent the upsampler from hallucinating details, inconsistent with the low-res output. The adversarial loss can be written as follows." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 382, + 533, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 382, + 533, + 397 + ], + "spans": [ + { + "bbox": [ + 318, + 382, + 533, + 397 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {G A N}} = \\lambda_ {D _ {\\mathbf {c}}} \\mathcal {L} _ {D _ {\\mathbf {c}}} (\\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+}, \\hat {\\mathbf {I}} _ {\\mathbf {c}}) + \\lambda_ {D _ {\\mathbf {s}}} \\mathcal {L} _ {D _ {\\mathbf {s}}} (\\hat {\\mathbf {I}} _ {\\mathbf {c}} ^ {+}, \\hat {\\mathbf {I}} _ {\\mathbf {c}}, \\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {+}, \\hat {\\mathbf {I}} _ {\\mathbf {s}}).", + "image_path": "c3caaf1612ba924b345d669f203179afd709be9eab0f1076ddf49c3ecee01253.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 403, + 545, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 545, + 427 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 545, + 427 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 403, + 545, + 427 + ], + "type": "inline_equation", + "content": "\\lambda_{D_{\\mathrm{c}}}" + }, + { + "bbox": [ + 304, + 403, + 545, + 427 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 403, + 545, + 427 + ], + "type": "inline_equation", + "content": "\\lambda_{D_{\\mathrm{s}}}" + }, + { + "bbox": [ + 304, + 403, + 545, + 427 + ], + "type": "text", + "content": " balance two terms. To stabilize the GAN training, we adopt the R1 regularization loss [45]." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "spans": [ + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": "Cross-view Consistency Loss. We observe that inputting label maps of the same object from different viewpoints will sometimes result in different 3D shapes. Therefore we add a cross-view consistency loss to regularize the training, as illustrated in Figure 3. Given an input label map " + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{\\mathbf{s}}" + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": " and its associated pose " + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": ", we generate the label map " + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime}" + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": " from a different viewpoint " + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{\\prime}" + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": ", and render the label map " + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime \\prime}" + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": " back to the pose " + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime}" + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": " as input. We add a reconstruction loss between " + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}^{\\prime \\prime}" + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{I}}_{\\mathbf{s}}" + }, + { + "bbox": [ + 304, + 430, + 545, + 537 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 374, + 543, + 477, + 557 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 543, + 477, + 557 + ], + "spans": [ + { + "bbox": [ + 374, + 543, + 477, + 557 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {C V C}} = \\lambda_ {\\mathrm {C V C}} \\mathcal {L} _ {s} (\\hat {\\mathbf {I}} _ {\\mathbf {s}} ^ {\\prime \\prime}, \\hat {\\mathbf {I}} _ {\\mathbf {s}}),", + "image_path": "80ac3a65c4b0a34ac53b632ab67bfa4b470643fdf504e2f4b104d10731c2acc4.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 563, + 547, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 563, + 547, + 599 + ], + "spans": [ + { + "bbox": [ + 304, + 563, + 547, + 599 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 563, + 547, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_s" + }, + { + "bbox": [ + 304, + 563, + 547, + 599 + ], + "type": "text", + "content": " denotes the reconstruction loss in the label space, and " + }, + { + "bbox": [ + 304, + 563, + 547, + 599 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{CVC}}" + }, + { + "bbox": [ + 304, + 563, + 547, + 599 + ], + "type": "text", + "content": " weights the loss term. This loss is crucial for reducing error accumulation during cross-view editing." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 601, + 547, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 601, + 547, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 601, + 547, + 624 + ], + "type": "text", + "content": "Optimization. Our final learning objective is written as follows:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 360, + 625, + 490, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 360, + 625, + 490, + 638 + ], + "spans": [ + { + "bbox": [ + 360, + 625, + 490, + 638 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\text {r e c o n}} + \\mathcal {L} _ {\\text {G A N}} + \\mathcal {L} _ {\\text {C V C}}.", + "image_path": "aa28851a40893b860d41e47fb3fa64e01f56063b4a86eff12776c23b221d8f6d.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "text", + "content": "At every iteration, we determine whether to use a ground-truth pose or sample a random one with a probability of " + }, + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 642, + 547, + 713 + ], + "type": "text", + "content": ". We use the reconstruction loss and GAN loss for ground-truth poses, while for random poses, we only use the GAN loss. We provide the hyper-parameters and more implementation details in the appendix of our arXiv version." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4437" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 86, + 115, + 154 + ], + "blocks": [ + { + "bbox": [ + 58, + 73, + 111, + 84 + ], + "lines": [ + { + "bbox": [ + 58, + 73, + 111, + 84 + ], + "spans": [ + { + "bbox": [ + 58, + 73, + 111, + 84 + ], + "type": "text", + "content": "Input Seg Map" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 57, + 86, + 115, + 154 + ], + "lines": [ + { + "bbox": [ + 57, + 86, + 115, + 154 + ], + "spans": [ + { + "bbox": [ + 57, + 86, + 115, + 154 + ], + "type": "image", + "image_path": "6524c121ce230d05cd41feb9f75bed487b059bf5a675aa6a54d2e8ffe2d5a69d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 120, + 83, + 261, + 154 + ], + "blocks": [ + { + "bbox": [ + 175, + 74, + 194, + 82 + ], + "lines": [ + { + "bbox": [ + 175, + 74, + 194, + 82 + ], + "spans": [ + { + "bbox": [ + 175, + 74, + 194, + 82 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 120, + 83, + 261, + 154 + ], + "lines": [ + { + "bbox": [ + 120, + 83, + 261, + 154 + ], + "spans": [ + { + "bbox": [ + 120, + 83, + 261, + 154 + ], + "type": "image", + "image_path": "962e55fa0d7fee6b63d4c7ba2d40169c247bab59d0ed8d5aa78cd86a0ce374a6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 261, + 83, + 366, + 153 + ], + "blocks": [ + { + "bbox": [ + 301, + 73, + 338, + 81 + ], + "lines": [ + { + "bbox": [ + 301, + 73, + 338, + 81 + ], + "spans": [ + { + "bbox": [ + 301, + 73, + 338, + 81 + ], + "type": "text", + "content": "Pix2NeRF" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 261, + 83, + 366, + 153 + ], + "lines": [ + { + "bbox": [ + 261, + 83, + 366, + 153 + ], + "spans": [ + { + "bbox": [ + 261, + 83, + 366, + 153 + ], + "type": "image", + "image_path": "492c45d44d7ba6d8169e4d0c645e6a8da2d7374cd6ea95cdbfd93ce49e14e693.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 157, + 546, + 190 + ], + "lines": [ + { + "bbox": [ + 46, + 157, + 546, + 190 + ], + "spans": [ + { + "bbox": [ + 46, + 157, + 546, + 190 + ], + "type": "text", + "content": "Figure 4. Qualitative Comparison with Pix2NeRF [6], SoFGAN [11], and SEAN [87] on CelebAMask dataset for seg2face task. SEAN fails in multi-view synthesis, while SoFGAN suffers from multi-view inconsistency (e.g., face identity changes across viewpoints). Our method renders high-quality images while maintaining multi-view consistency. Please check our website for more examples." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 366, + 83, + 472, + 153 + ], + "blocks": [ + { + "bbox": [ + 400, + 73, + 434, + 81 + ], + "lines": [ + { + "bbox": [ + 400, + 73, + 434, + 81 + ], + "spans": [ + { + "bbox": [ + 400, + 73, + 434, + 81 + ], + "type": "text", + "content": "SoFGAN" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 366, + 83, + 472, + 153 + ], + "lines": [ + { + "bbox": [ + 366, + 83, + 472, + 153 + ], + "spans": [ + { + "bbox": [ + 366, + 83, + 472, + 153 + ], + "type": "image", + "image_path": "25d1c57f903489de4ac9d4c8d90385d68150f583be4b4f372a55bccb3970eaff.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 472, + 83, + 543, + 153 + ], + "blocks": [ + { + "bbox": [ + 495, + 73, + 519, + 81 + ], + "lines": [ + { + "bbox": [ + 495, + 73, + 519, + 81 + ], + "spans": [ + { + "bbox": [ + 495, + 73, + 519, + 81 + ], + "type": "text", + "content": "SEAN" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 472, + 83, + 543, + 153 + ], + "lines": [ + { + "bbox": [ + 472, + 83, + 543, + 153 + ], + "spans": [ + { + "bbox": [ + 472, + 83, + 543, + 153 + ], + "type": "image", + "image_path": "446644698823e9c07c72d457e262b4557f2b556ad8bda9432911171a4ac03a66.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 56, + 203, + 102, + 262 + ], + "blocks": [ + { + "bbox": [ + 56, + 193, + 104, + 203 + ], + "lines": [ + { + "bbox": [ + 56, + 193, + 104, + 203 + ], + "spans": [ + { + "bbox": [ + 56, + 193, + 104, + 203 + ], + "type": "text", + "content": "Input Seg Map" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 56, + 203, + 102, + 262 + ], + "lines": [ + { + "bbox": [ + 56, + 203, + 102, + 262 + ], + "spans": [ + { + "bbox": [ + 56, + 203, + 102, + 262 + ], + "type": "image", + "image_path": "73d0e8dcc36b189f63f1ea7b19c085511cee534a07c251b442368c72542146b3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 110, + 202, + 196, + 262 + ], + "blocks": [ + { + "bbox": [ + 145, + 193, + 162, + 201 + ], + "lines": [ + { + "bbox": [ + 145, + 193, + 162, + 201 + ], + "spans": [ + { + "bbox": [ + 145, + 193, + 162, + 201 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 110, + 202, + 196, + 262 + ], + "lines": [ + { + "bbox": [ + 110, + 202, + 196, + 262 + ], + "spans": [ + { + "bbox": [ + 110, + 202, + 196, + 262 + ], + "type": "image", + "image_path": "fa82f049d6800f9a4b8d6e0160c5b23261b5fbb18c367726987cf682cf2ab95d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 199, + 202, + 285, + 261 + ], + "blocks": [ + { + "bbox": [ + 215, + 193, + 263, + 202 + ], + "lines": [ + { + "bbox": [ + 215, + 193, + 263, + 202 + ], + "spans": [ + { + "bbox": [ + 215, + 193, + 263, + 202 + ], + "type": "text", + "content": "w/o 3D Labels" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 199, + 202, + 285, + 261 + ], + "lines": [ + { + "bbox": [ + 199, + 202, + 285, + 261 + ], + "spans": [ + { + "bbox": [ + 199, + 202, + 285, + 261 + ], + "type": "image", + "image_path": "2667b3ac10d618c8c04aee838c4b19b38a78be508b30b10c8f6b052b31387a13.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 50, + 262, + 108, + 321 + ], + "blocks": [ + { + "bbox": [ + 50, + 262, + 108, + 321 + ], + "lines": [ + { + "bbox": [ + 50, + 262, + 108, + 321 + ], + "spans": [ + { + "bbox": [ + 50, + 262, + 108, + 321 + ], + "type": "image", + "image_path": "1286d0a864f24b9a995cdaf2bfb3b7b1c4ed3425e1e932af73f41d5580cda404.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 110, + 262, + 196, + 322 + ], + "blocks": [ + { + "bbox": [ + 110, + 262, + 196, + 322 + ], + "lines": [ + { + "bbox": [ + 110, + 262, + 196, + 322 + ], + "spans": [ + { + "bbox": [ + 110, + 262, + 196, + 322 + ], + "type": "image", + "image_path": "293baefdf04c37c560ceb68f7e44c4547c13fcb023a6b1f32d5d0724828710b5.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 198, + 262, + 285, + 322 + ], + "blocks": [ + { + "bbox": [ + 198, + 262, + 285, + 322 + ], + "lines": [ + { + "bbox": [ + 198, + 262, + 285, + 322 + ], + "spans": [ + { + "bbox": [ + 198, + 262, + 285, + 322 + ], + "type": "image", + "image_path": "0754bdc1996513a0f381d6645fa35e4f747966a743de55433aa9c6cf0cd03390.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 50, + 323, + 102, + 381 + ], + "blocks": [ + { + "bbox": [ + 50, + 323, + 102, + 381 + ], + "lines": [ + { + "bbox": [ + 50, + 323, + 102, + 381 + ], + "spans": [ + { + "bbox": [ + 50, + 323, + 102, + 381 + ], + "type": "image", + "image_path": "df3d8e6c4c75d7110ef47a2f48f699d9fcf88ae9f5a6cf9ffbdfcdcc3d299080.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 384, + 546, + 405 + ], + "lines": [ + { + "bbox": [ + 46, + 384, + 546, + 405 + ], + "spans": [ + { + "bbox": [ + 46, + 384, + 546, + 405 + ], + "type": "text", + "content": "Figure 5. Qualitative ablation on seg2face and seg2cat. We ablate our method by removing the branch that renders label maps (w/o 3D Labels). Our results better align with input labels (e.g., hairlines and the cat's ear)." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 110, + 323, + 196, + 381 + ], + "blocks": [ + { + "bbox": [ + 110, + 323, + 196, + 381 + ], + "lines": [ + { + "bbox": [ + 110, + 323, + 196, + 381 + ], + "spans": [ + { + "bbox": [ + 110, + 323, + 196, + 381 + ], + "type": "image", + "image_path": "763d78b11909cd56b602c8cb563bda80d1ea09fa760c6ba2976ce6dc62826ded.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 198, + 323, + 285, + 381 + ], + "blocks": [ + { + "bbox": [ + 198, + 323, + 285, + 381 + ], + "lines": [ + { + "bbox": [ + 198, + 323, + 285, + 381 + ], + "spans": [ + { + "bbox": [ + 198, + 323, + 285, + 381 + ], + "type": "image", + "image_path": "e03c8222f29076d852d5ad90fbeeb9406e2a18927e788482a6e3617d19ad05bf.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 49, + 437, + 127, + 590 + ], + "blocks": [ + { + "bbox": [ + 56, + 426, + 119, + 437 + ], + "lines": [ + { + "bbox": [ + 56, + 426, + 119, + 437 + ], + "spans": [ + { + "bbox": [ + 56, + 426, + 119, + 437 + ], + "type": "text", + "content": "Input Edge Map" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 49, + 437, + 127, + 590 + ], + "lines": [ + { + "bbox": [ + 49, + 437, + 127, + 590 + ], + "spans": [ + { + "bbox": [ + 49, + 437, + 127, + 590 + ], + "type": "image", + "image_path": "689b58efb2e7efef39d2c744f5934ead645dc516fc6f60e473d266da6db4b071.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 132, + 437, + 286, + 591 + ], + "blocks": [ + { + "bbox": [ + 139, + 415, + 276, + 426 + ], + "lines": [ + { + "bbox": [ + 139, + 415, + 276, + 426 + ], + "spans": [ + { + "bbox": [ + 139, + 415, + 276, + 426 + ], + "type": "text", + "content": "Rendered RGB images & edge maps" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 154, + 426, + 190, + 435 + ], + "lines": [ + { + "bbox": [ + 154, + 426, + 190, + 435 + ], + "spans": [ + { + "bbox": [ + 154, + 426, + 190, + 435 + ], + "type": "text", + "content": "GT View" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 132, + 437, + 286, + 591 + ], + "lines": [ + { + "bbox": [ + 132, + 437, + 286, + 591 + ], + "spans": [ + { + "bbox": [ + 132, + 437, + 286, + 591 + ], + "type": "image", + "image_path": "3d3c8a105e49a2338d37b272fe6e18044354958d74ec02ee690fe6d523e90e42.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 593, + 288, + 615 + ], + "lines": [ + { + "bbox": [ + 46, + 593, + 288, + 615 + ], + "spans": [ + { + "bbox": [ + 46, + 593, + 288, + 615 + ], + "type": "text", + "content": "Figure 6. Results on edge2cat. Our model is trained on AFHQcat [16] with edges extracted by pidinet [66]." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 28 + }, + { + "bbox": [ + 47, + 627, + 123, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 627, + 123, + 641 + ], + "spans": [ + { + "bbox": [ + 47, + 627, + 123, + 641 + ], + "type": "text", + "content": "4. Experiment" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 46, + 648, + 288, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 648, + 288, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 648, + 288, + 696 + ], + "type": "text", + "content": "We first introduce the datasets and evaluation metrics. Then we compare our method with the baselines. Finally, we demonstrate cross-view editing and multi-modal synthesis applications enabled by our method." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 288, + 714 + ], + "type": "text", + "content": "Datasets. We consider four tasks: seg2face, seg2cat," + } + ] + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 307, + 203, + 365, + 262 + ], + "blocks": [ + { + "bbox": [ + 315, + 193, + 362, + 204 + ], + "lines": [ + { + "bbox": [ + 315, + 193, + 362, + 204 + ], + "spans": [ + { + "bbox": [ + 315, + 193, + 362, + 204 + ], + "type": "text", + "content": "Input Seg Map" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 203, + 365, + 262 + ], + "lines": [ + { + "bbox": [ + 307, + 203, + 365, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 203, + 365, + 262 + ], + "type": "image", + "image_path": "611b3a3f67c0d28c059363a578b0e5558d3d974a5ff4a53806747d05ed6cb5fa.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 369, + 203, + 454, + 261 + ], + "blocks": [ + { + "bbox": [ + 399, + 193, + 416, + 202 + ], + "lines": [ + { + "bbox": [ + 399, + 193, + 416, + 202 + ], + "spans": [ + { + "bbox": [ + 399, + 193, + 416, + 202 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 369, + 203, + 454, + 261 + ], + "lines": [ + { + "bbox": [ + 369, + 203, + 454, + 261 + ], + "spans": [ + { + "bbox": [ + 369, + 203, + 454, + 261 + ], + "type": "image", + "image_path": "da8d379482d15b6744c8d2fee408dc38176a00236fdc0e02e612b6f55aa221d2.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 458, + 202, + 544, + 261 + ], + "blocks": [ + { + "bbox": [ + 476, + 193, + 524, + 202 + ], + "lines": [ + { + "bbox": [ + 476, + 193, + 524, + 202 + ], + "spans": [ + { + "bbox": [ + 476, + 193, + 524, + 202 + ], + "type": "text", + "content": "w/o 3D Labels" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 458, + 202, + 544, + 261 + ], + "lines": [ + { + "bbox": [ + 458, + 202, + 544, + 261 + ], + "spans": [ + { + "bbox": [ + 458, + 202, + 544, + 261 + ], + "type": "image", + "image_path": "c3837de7e313c28c101c3a24263b10dbcd65e874fee7bd8a9466700be6d15ec3.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 308, + 262, + 368, + 317 + ], + "blocks": [ + { + "bbox": [ + 308, + 262, + 368, + 317 + ], + "lines": [ + { + "bbox": [ + 308, + 262, + 368, + 317 + ], + "spans": [ + { + "bbox": [ + 308, + 262, + 368, + 317 + ], + "type": "image", + "image_path": "075e8f080fffcde2a09c9e911fb4c114a3fb1e5e0c56c746265e0731c1264b44.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 370, + 262, + 455, + 321 + ], + "blocks": [ + { + "bbox": [ + 370, + 262, + 455, + 321 + ], + "lines": [ + { + "bbox": [ + 370, + 262, + 455, + 321 + ], + "spans": [ + { + "bbox": [ + 370, + 262, + 455, + 321 + ], + "type": "image", + "image_path": "654619b953a1a632b636e8147a0aaf0328730b9e93a0ac2f876bb36d04e60bdc.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 457, + 262, + 544, + 321 + ], + "blocks": [ + { + "bbox": [ + 457, + 262, + 544, + 321 + ], + "lines": [ + { + "bbox": [ + 457, + 262, + 544, + 321 + ], + "spans": [ + { + "bbox": [ + 457, + 262, + 544, + 321 + ], + "type": "image", + "image_path": "7ad193f647b58e706d59eba116ba5f003f6a531c1c23e3860a17c6c55c3b1890.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 308, + 323, + 368, + 376 + ], + "blocks": [ + { + "bbox": [ + 308, + 323, + 368, + 376 + ], + "lines": [ + { + "bbox": [ + 308, + 323, + 368, + 376 + ], + "spans": [ + { + "bbox": [ + 308, + 323, + 368, + 376 + ], + "type": "image", + "image_path": "2dc61c293039e792708ef518ce6744dd1404d7395c9f1e736c40b54c5d212cb9.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "type": "image", + "bbox": [ + 370, + 323, + 455, + 381 + ], + "blocks": [ + { + "bbox": [ + 370, + 323, + 455, + 381 + ], + "lines": [ + { + "bbox": [ + 370, + 323, + 455, + 381 + ], + "spans": [ + { + "bbox": [ + 370, + 323, + 455, + 381 + ], + "type": "image", + "image_path": "a81b6d3de8edacada11ba680be9da78a60da8c73063b4a6670252dddd8b0b201.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 458, + 323, + 544, + 381 + ], + "blocks": [ + { + "bbox": [ + 458, + 323, + 544, + 381 + ], + "lines": [ + { + "bbox": [ + 458, + 323, + 544, + 381 + ], + "spans": [ + { + "bbox": [ + 458, + 323, + 544, + 381 + ], + "type": "image", + "image_path": "332e8ae0fe3aaa5a4391ce7bcee64fcc21e0f83f2fbda3ae1f8e1a3214739393.jpg" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_body" + } + ], + "index": 44 + }, + { + "type": "image", + "bbox": [ + 312, + 475, + 359, + 520 + ], + "blocks": [ + { + "bbox": [ + 307, + 464, + 362, + 475 + ], + "lines": [ + { + "bbox": [ + 307, + 464, + 362, + 475 + ], + "spans": [ + { + "bbox": [ + 307, + 464, + 362, + 475 + ], + "type": "text", + "content": "Input Edge Map" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 312, + 475, + 359, + 520 + ], + "lines": [ + { + "bbox": [ + 312, + 475, + 359, + 520 + ], + "spans": [ + { + "bbox": [ + 312, + 475, + 359, + 520 + ], + "type": "image", + "image_path": "31b1df148d6c6de23c999ffade5f1dbd5fb4fc80aaa0116bd7e8a51219eec901.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 364, + 415, + 544, + 561 + ], + "blocks": [ + { + "bbox": [ + 364, + 415, + 544, + 561 + ], + "lines": [ + { + "bbox": [ + 364, + 415, + 544, + 561 + ], + "spans": [ + { + "bbox": [ + 364, + 415, + 544, + 561 + ], + "type": "image", + "image_path": "0e1af3b81c89ef415836333616082c06bb5a4f1e64c77537ccebfc922c49bfb7.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 563, + 545, + 597 + ], + "lines": [ + { + "bbox": [ + 305, + 563, + 545, + 597 + ], + "spans": [ + { + "bbox": [ + 305, + 563, + 545, + 597 + ], + "type": "text", + "content": "Figure 7. Qualitative comparisons on edge2car. pix2pix3D (Ours) and Pix2NeRF [6] are trained on shapenet-car [10], and pix2pix3D achieves better quality and alignment than Pix2NeRF." + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + }, + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 546, + 713 + ], + "type": "text", + "content": "edge2cat, and edge2car in our experiments. For seg2face, we use CelebAMask-HQ [38] for evaluation. CelebAMask-HQ contains 30,000 high-resolution face images from CelebA [41], and each image has a facial part segmentation mask and a predicted pose. The segmentation masks contain 19 classes, including skin, eyebrows, ears, mouth, lip, etc. The pose associated with each image segmentation is predicted by HopeNet [60]. We split the CelebAMask-HQ dataset into" + } + ] + } + ], + "index": 49 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4438" + } + ] + } + ], + "index": 50 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 70, + 298, + 194 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 298, + 194 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 298, + 194 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 298, + 194 + ], + "type": "table", + "html": "
Seg2FaceQUALITYALIGNMENT
SGFVV Identity ↓
CELEBAMASK [38]FID ↓KID ↓Diversity ↑mIoU ↑acc ↑
SEAN [87]32.740.0180.290.520.85N/A
SoFGAN [11]23.340.0120.330.530.890.58
PIX2NERF [6]54.230.0420.160.360.650.44
PIX2PIX3D (OURS)
W/O 3D LABELS12.960.0050.30N/A (0.43)N/A (0.81)0.38
W/O CVC11.620.0040.300.50 (0.50)0.87 (0.85)0.42
FULL MODEL11.540.0030.280.51 (0.52)0.90 (0.88)0.36
FULL MODEL†11.130.0030.290.51 (0.50)0.90 (0.87)0.36
", + "image_path": "2374ae64585ac4b56a0d85dde8600055ee5f40a442ea6d68e4a74da910eba874.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 56, + 405, + 278, + 496 + ], + "blocks": [ + { + "bbox": [ + 46, + 201, + 289, + 400 + ], + "lines": [ + { + "bbox": [ + 46, + 201, + 289, + 400 + ], + "spans": [ + { + "bbox": [ + 46, + 201, + 289, + 400 + ], + "type": "text", + "content": "Table 1. Seg2face Evaluation. Our metrics include image quality (FID, KID, SG Diversity), alignment (mIoU and acc against GT label maps), and multi-view consistency (FVV Identity). Single-generation diversity (SG Diversity) is obtained by computing the LPIPS metric between randomly generated pairs given a single conditional input. To evaluate alignment, we compare the generated label maps against the ground truth in terms of mIoU and pixel accuracy (acc). Alternatively, given a generated image, one could estimate label maps via a face parser, and compare those against the ground truth (numbers in parentheses). We include SEAN [87] and SoFGAN [11] as baselines, and modify Pix2NeRF [6] to take conditional input. Our method achieves the best quality, alignment ACC, and FVV Identity while being competitive on SG Diversity. SoFGAN tends to have better alignment but worse 3D consistency. We also ablate our method w.r.t the 3D labels and the cross-view consistency (CVC) loss. Our 3D labels are crucial for alignment, while the CVC loss improves multi-view consistency. Using pretrained models from EG3D " + }, + { + "bbox": [ + 46, + 201, + 289, + 400 + ], + "type": "inline_equation", + "content": "(\\dagger)" + }, + { + "bbox": [ + 46, + 201, + 289, + 400 + ], + "type": "text", + "content": " also improves the performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 405, + 278, + 496 + ], + "lines": [ + { + "bbox": [ + 56, + 405, + 278, + 496 + ], + "spans": [ + { + "bbox": [ + 56, + 405, + 278, + 496 + ], + "type": "table", + "html": "
Edge2CarQUALITYALIGNMENT
FID ↓KID ↓SG Diversity ↑AP ↑
PIX2NERF [6]23.420.0140.060.28
PIX2PIX3D (OURS)
w/o 3D LABELS10.730.0050.120.45 (0.42)
w/o CVC9.420.0040.130.61 (0.59)
FULL MODEL8.310.0040.130.63 (0.59)
", + "image_path": "1189952badc1b804dbe5b9f36263042554a0e2e5a2af12b3b6e49f5d3584f625.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": "a training set of 24,183, a validation set of 2,993, and a test set of 2,824, following the original work [38]. For seg2cat and edge2cat, we use AFHQ-cat [16], which contains 5,065 images at " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "512 \\times" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": " resolution. We estimate the viewpoints using unsup3d [74]. We extract the edges using pidinet [66] and obtain segmentation by clustering DINO features [2] into 6 classes. For edge2car, we use 3D models from shapenet-" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 307, + 70, + 545, + 161 + ], + "blocks": [ + { + "bbox": [ + 46, + 504, + 288, + 614 + ], + "lines": [ + { + "bbox": [ + 46, + 504, + 288, + 614 + ], + "spans": [ + { + "bbox": [ + 46, + 504, + 288, + 614 + ], + "type": "text", + "content": "Table 2. Edge2car Evaluation. We compare our method with Pix2NeRF [6] on edge2car using the shapenet-car [10] dataset. Similar to Table 1, we evaluate FID, KID, and SG Diversity for image quality. We also evaluate the alignment with the input edge map using AP. Similarly, we can either run informative drawing [7] on generated images to obtain edge maps (numbers in parentheses) or directly use generated edge maps to calculate the metrics. We achieve better image quality and alignment than Pix2NeRF. We also find that using 3D labels and cross-view consistency loss is helpful regarding FID and AP metrics." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 70, + 545, + 161 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 545, + 161 + ], + "type": "table", + "html": "
Seg2CatFID ↓QUALITYALIGNMENT
AFHQ-CAT [34]KID ↓SG Diversity ↑mIoU ↑acc ↑
PIX2NERF [6]43.920.0810.150.270.58
OURS
w/o 3D LABELS10.410.0040.26N/A (0.49)N/A (0.69)
w/o CVC9.640.0040.260.66 (0.63)0.76 (0.73)
FULL MODEL8.620.0030.270.66 (0.62)0.78 (0.73)
", + "image_path": "e822f0db70976b8ad5509a5bb9cb91533e8845b520d1e7ecbc87b76561ea8ff1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 169, + 547, + 224 + ], + "lines": [ + { + "bbox": [ + 304, + 169, + 547, + 224 + ], + "spans": [ + { + "bbox": [ + 304, + 169, + 547, + 224 + ], + "type": "text", + "content": "Table 3. Seg2cat Evaluation. We compare our method with Pix2NeRF [6] on Seg2Cat using AFHQ-cat dataset [16], with segmentation obtained by clustering DINO features [2]. Similar to Table 1, we evaluate the image quality and alignment. Ours performs better in all metrics." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 313, + 230, + 543, + 361 + ], + "blocks": [ + { + "bbox": [ + 313, + 230, + 543, + 361 + ], + "lines": [ + { + "bbox": [ + 313, + 230, + 543, + 361 + ], + "spans": [ + { + "bbox": [ + 313, + 230, + 543, + 361 + ], + "type": "image", + "image_path": "5b3d08623c8b1ae15837e2929eac3d8a625b8e094d1d60e27bc8bf860a30031c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 373, + 545, + 396 + ], + "lines": [ + { + "bbox": [ + 305, + 373, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 305, + 373, + 545, + 396 + ], + "type": "text", + "content": "Figure 8. Semantic Mesh. We show semantic meshes of human and cat faces from marching cubes colored by 3D labels." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 406, + 545, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 406, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 304, + 406, + 545, + 454 + ], + "type": "text", + "content": "car [10] and render 500,000 images at " + }, + { + "bbox": [ + 304, + 406, + 545, + 454 + ], + "type": "inline_equation", + "content": "128 \\times" + }, + { + "bbox": [ + 304, + 406, + 545, + 454 + ], + "type": "text", + "content": " resolution for training, and 30,000 for evaluation. We extract the edges using informative drawing [7]. We train our model at " + }, + { + "bbox": [ + 304, + 406, + 545, + 454 + ], + "type": "inline_equation", + "content": "512 \\times" + }, + { + "bbox": [ + 304, + 406, + 545, + 454 + ], + "type": "text", + "content": " resolution except for " + }, + { + "bbox": [ + 304, + 406, + 545, + 454 + ], + "type": "inline_equation", + "content": "128 \\times" + }, + { + "bbox": [ + 304, + 406, + 545, + 454 + ], + "type": "text", + "content": " in the edge2car task." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 457, + 545, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 457, + 545, + 554 + ], + "spans": [ + { + "bbox": [ + 304, + 457, + 545, + 554 + ], + "type": "text", + "content": "Running Time. For training the model at " + }, + { + "bbox": [ + 304, + 457, + 545, + 554 + ], + "type": "inline_equation", + "content": "512 \\times" + }, + { + "bbox": [ + 304, + 457, + 545, + 554 + ], + "type": "text", + "content": " resolution, it takes about three days on eight RTX 3090 GPUs. But we can significantly reduce the training time to 4 hours if we initialize parts of our model with pretrained weights from EG3D [8]. During inference, our model takes " + }, + { + "bbox": [ + 304, + 457, + 545, + 554 + ], + "type": "inline_equation", + "content": "10\\mathrm{ms}" + }, + { + "bbox": [ + 304, + 457, + 545, + 554 + ], + "type": "text", + "content": " to obtain the style vector, and another " + }, + { + "bbox": [ + 304, + 457, + 545, + 554 + ], + "type": "inline_equation", + "content": "30\\mathrm{ms}" + }, + { + "bbox": [ + 304, + 457, + 545, + 554 + ], + "type": "text", + "content": " to render the final image and the label map on a single RTX A5000. The low latency (25 FPS) allows for interactive user editing." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 560, + 417, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 560, + 417, + 572 + ], + "spans": [ + { + "bbox": [ + 306, + 560, + 417, + 572 + ], + "type": "text", + "content": "4.1. Evaluation metrics" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 578, + 545, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 578, + 545, + 615 + ], + "spans": [ + { + "bbox": [ + 304, + 578, + 545, + 615 + ], + "type": "text", + "content": "We evaluate the models from two aspects: 1) the image quality regarding fidelity and diversity, and 2) the alignment between input label maps and generated outputs." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 714 + ], + "type": "text", + "content": "Quality Metrics. Following prior works [21, 57], we use the clean-fid library [58] to compute Fréchet Inception Distance (FID) [23] and Kernel Inception Distance (KID) [4] to measure the distribution distance between synthesized results and real images. We also evaluate the single-generation diversity (SG Diversity) by calculating the LPIPS metric between randomly generated pairs given a single input following prior works [11, 85]. For FID and KID, we generate" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4439" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 171, + 162 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 171, + 162 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 171, + 162 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 171, + 162 + ], + "type": "image", + "image_path": "ddcc366d3be846fb4986f5b23ba0240c5f84a3fb77a866926f6ae612b9a99c3e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "lines": [ + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "spans": [ + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "type": "text", + "content": "Figure 9. We study the effect of random pose sampling probability " + }, + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "type": "text", + "content": " during training. Without random poses (" + }, + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "type": "inline_equation", + "content": "p = 0" + }, + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "type": "text", + "content": "), the model achieves the best alignment with input semantic maps, with reduced image quality. In contrast, only using random poses (" + }, + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "type": "inline_equation", + "content": "p = 1" + }, + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "type": "text", + "content": ") achieves the best image quality, while results fail to align with input maps. We find " + }, + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "type": "inline_equation", + "content": "p = 0.5" + }, + { + "bbox": [ + 46, + 164, + 547, + 198 + ], + "type": "text", + "content": " balances the image quality and input alignment." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 171, + 70, + 296, + 162 + ], + "blocks": [ + { + "bbox": [ + 171, + 70, + 296, + 162 + ], + "lines": [ + { + "bbox": [ + 171, + 70, + 296, + 162 + ], + "spans": [ + { + "bbox": [ + 171, + 70, + 296, + 162 + ], + "type": "image", + "image_path": "86a570277fc93bf09c5b134d5bfdbc4c976c22b004fdaeb0c62af6db371f5793.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 298, + 70, + 421, + 162 + ], + "blocks": [ + { + "bbox": [ + 298, + 70, + 421, + 162 + ], + "lines": [ + { + "bbox": [ + 298, + 70, + 421, + 162 + ], + "spans": [ + { + "bbox": [ + 298, + 70, + 421, + 162 + ], + "type": "image", + "image_path": "be042cfffb6bb81a1708257c1eb218309bcad47386f9707ff233a8fd004df1b2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 422, + 71, + 545, + 162 + ], + "blocks": [ + { + "bbox": [ + 422, + 71, + 545, + 162 + ], + "lines": [ + { + "bbox": [ + 422, + 71, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 422, + 71, + 545, + 162 + ], + "type": "image", + "image_path": "24569639882bfc6cc5f28e3701610500a4d7632303263fe8a02bc4e6d00ac557.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 55, + 202, + 545, + 326 + ], + "blocks": [ + { + "bbox": [ + 55, + 202, + 545, + 326 + ], + "lines": [ + { + "bbox": [ + 55, + 202, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 55, + 202, + 545, + 326 + ], + "type": "image", + "image_path": "8d4b5c7c30f547f27dc1f0dffe40615b6cedbbd5014ad3204a95d10f266d6b63.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 328, + 547, + 363 + ], + "lines": [ + { + "bbox": [ + 46, + 328, + 547, + 363 + ], + "spans": [ + { + "bbox": [ + 46, + 328, + 547, + 363 + ], + "type": "text", + "content": "Figure 10. Cross-view Editing of Edge2Car. Our 3D editing system allows users to edit label maps from any viewpoint instead of only the input view. Importantly, our feed-forward encoder allows fast inference of the latent code without GAN-inversion. Typically, a single forward pass of rendering takes only " + }, + { + "bbox": [ + 46, + 328, + 547, + 363 + ], + "type": "inline_equation", + "content": "40\\mathrm{ms}" + }, + { + "bbox": [ + 46, + 328, + 547, + 363 + ], + "type": "text", + "content": " on a single RTX A5000, which enables interactive editing. Please check our demo video on our website." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 373, + 288, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 373, + 288, + 410 + ], + "spans": [ + { + "bbox": [ + 46, + 373, + 288, + 410 + ], + "type": "text", + "content": "10 images per label map in the test set using randomly sampled " + }, + { + "bbox": [ + 46, + 373, + 288, + 410 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 46, + 373, + 288, + 410 + ], + "type": "text", + "content": ". We compare our generated images with the whole dataset, including training and test images." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 412, + 287, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 412, + 287, + 532 + ], + "spans": [ + { + "bbox": [ + 46, + 412, + 287, + 532 + ], + "type": "text", + "content": "Alignment Metrics. We evaluate models on the test set using mean Intersection-over-Union (mIoU) and pixel accuracy (acc) for segmentation maps following existing works [57, 63], and average precision (AP) for edge maps. For those models that render label maps as output, we directly compare them with ground-truth labels. Otherwise, we first predict the label maps from the output RGB images using off-the-shelf networks [38, 66], and then compare the prediction with the ground truth. The metrics regarding such predicted semantic maps are reported within brackets in Table 1 and Table 2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 533, + 288, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 288, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 288, + 568 + ], + "type": "text", + "content": "For seg2face, we evaluate the preservation of facial identity from different viewpoints (FVV Identity) by calculating their distances with the dlib face recognition algorithm*." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 577, + 167, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 577, + 167, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 577, + 167, + 590 + ], + "type": "text", + "content": "4.2. Baseline comparison" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 598, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 598, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 598, + 287, + 696 + ], + "type": "text", + "content": "Baselines. Since there are no prior works on conditional 3D-aware image synthesis, we make minimum modifications to Pix2NeRF [6] to be conditional on label maps instead of images. For a thorough comparison, we introduce several baselines: SEAN [87] and SoFGAN [11]. 2D baselines like SEAN [87] cannot generate multi-view images by design (N/A for FVV Identity), while SoFGAN [11] uses an unconditional 3D semantic map generator before the 2D" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 373, + 514, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 373, + 514, + 385 + ], + "spans": [ + { + "bbox": [ + 306, + 373, + 514, + 385 + ], + "type": "text", + "content": "generator so we can evaluate FVV Identity for that." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 394, + 547, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 394, + 547, + 574 + ], + "spans": [ + { + "bbox": [ + 304, + 394, + 547, + 574 + ], + "type": "text", + "content": "Results. Figure 4 shows the qualitative comparison for seg2face and Table 1 reports the evaluation results. SoFGAN [11] tends to produce results with slightly better alignment but worse 3D consistency for its 2D RGB generator. Our method achieves the best quality, alignment acc, and FVV Identity while being competitive with 2D baselines on SG diversity. Figure 5 shows the qualitative ablation on seg2face and seg2cat. Table ?? reports the metrics for seg2cat. Figure 6 shows the example results for edge2cat. Figure 7 shows the qualitative comparison for edge2car and Table 2 reports the metrics. Our method achieves the best image quality and alignment. Figure 8 shows semantic meshes of human and cat faces, extracted by marching cubes and colored by our learned 3D labels. We provide more evaluation results in the appendix of our arXiv version." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 582, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 548, + 715 + ], + "type": "text", + "content": "Ablation Study. We compare our full method to several variants. Specifically, (1) w/o 3D LABELS, we remove the branch of rendering label maps from our method, and (2) w/o CVC, we remove the cross-view consistency loss. From Table 1, Table 2, and Figure 5, rendering label maps is crucial for the alignment with the input. We posit that the joint learning of appearance, geometry, and label information poses strong constraints on correspondence between the input label maps and the 3D representation. Thus our method can synthesize images pixel-aligned with the inputs. Our CVC loss helps preserve the facial identity from different viewpoints." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 58, + 703, + 276, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 276, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 276, + 713 + ], + "type": "text", + "content": "\\*https://github.com/ageitgey/face_recognition" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "4440" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 286, + 320 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 286, + 320 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 286, + 320 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 286, + 320 + ], + "type": "image", + "image_path": "701762258dbb14addbc873741842e7690298e349830108143d07aff390405b8c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 324, + 288, + 369 + ], + "lines": [ + { + "bbox": [ + 46, + 324, + 288, + 369 + ], + "spans": [ + { + "bbox": [ + 46, + 324, + 288, + 369 + ], + "type": "text", + "content": "Figure 11. Multi-modal Synthesis. The leftmost column is the input segmentation map. We use the same segmentation map for each row. We generate multi-modal results by randomly sampling an appearance style for each column." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 373, + 288, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 373, + 288, + 480 + ], + "spans": [ + { + "bbox": [ + 46, + 373, + 288, + 480 + ], + "type": "text", + "content": "Analysis on random sampling of poses. We study the effect of the different probabilities of sampling random poses during training, as shown in Figure 9. When sampling no random poses " + }, + { + "bbox": [ + 46, + 373, + 288, + 480 + ], + "type": "inline_equation", + "content": "(p = 0)" + }, + { + "bbox": [ + 46, + 373, + 288, + 480 + ], + "type": "text", + "content": ", the model best aligns with input label maps with suboptimal image quality. Conversely, only sampling random poses " + }, + { + "bbox": [ + 46, + 373, + 288, + 480 + ], + "type": "inline_equation", + "content": "(p = 1)" + }, + { + "bbox": [ + 46, + 373, + 288, + 480 + ], + "type": "text", + "content": " gives the best image quality but suffers huge misalignment with input label maps. We find " + }, + { + "bbox": [ + 46, + 373, + 288, + 480 + ], + "type": "inline_equation", + "content": "p = 0.5" + }, + { + "bbox": [ + 46, + 373, + 288, + 480 + ], + "type": "text", + "content": " achieves the balance between the image quality and the alignment with the input." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 487, + 129, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 487, + 129, + 499 + ], + "spans": [ + { + "bbox": [ + 47, + 487, + 129, + 499 + ], + "type": "text", + "content": "4.3. Applications" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 508, + 287, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 508, + 287, + 603 + ], + "spans": [ + { + "bbox": [ + 46, + 508, + 287, + 603 + ], + "type": "text", + "content": "Cross-view Editing. As shown in Figure 10, our 3D editing system allows users to generate and edit label maps from any viewpoint instead of only the input view. The edited label map is further fed into the conditional encoder to update the 3D representation. Unlike GAN inversion [83], our feedforward conditional encoder allows fast inference of the latent code. Thus, a single forward pass of our full model takes only " + }, + { + "bbox": [ + 46, + 508, + 287, + 603 + ], + "type": "inline_equation", + "content": "40\\mathrm{ms}" + }, + { + "bbox": [ + 46, + 508, + 287, + 603 + ], + "type": "text", + "content": " on a single RTX A5000." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 605, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 289, + 715 + ], + "type": "text", + "content": "Multi-modal synthesis and interpolation. Like other style-based generative models [8, 21, 34, 36], our method can disentangle the geometry and appearance information. Specifically, the input label map captures the geometry information while the randomly sampled latent code controls the appearance. We show style manipulation results in Figure 11. We can also interpolate both the geometry styles and the appearance styles (Figure 12). These results show the clear disentanglement of our 3D representation." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 71, + 545, + 425 + ], + "blocks": [ + { + "bbox": [ + 307, + 71, + 545, + 425 + ], + "lines": [ + { + "bbox": [ + 307, + 71, + 545, + 425 + ], + "spans": [ + { + "bbox": [ + 307, + 71, + 545, + 425 + ], + "type": "image", + "image_path": "8ab2ba4ff5cd2044bcafae38e595a31eb99f389503ece2b7b84028a1763f0c5c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 427, + 547, + 483 + ], + "lines": [ + { + "bbox": [ + 304, + 427, + 547, + 483 + ], + "spans": [ + { + "bbox": [ + 304, + 427, + 547, + 483 + ], + "type": "text", + "content": "Figure 12. Interpolation. In each " + }, + { + "bbox": [ + 304, + 427, + 547, + 483 + ], + "type": "inline_equation", + "content": "5 \\times 5" + }, + { + "bbox": [ + 304, + 427, + 547, + 483 + ], + "type": "text", + "content": " grid, the images at the top left and bottom right are generated from the input maps next to them. Each row interpolates two images in label space, while each column interpolates the appearance. For camera poses, we interpolate the pitch along the row and the yaw along the column." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 493, + 375, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 493, + 375, + 506 + ], + "spans": [ + { + "bbox": [ + 306, + 493, + 375, + 506 + ], + "type": "text", + "content": "5. Discussion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 515, + 547, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 515, + 547, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 515, + 547, + 624 + ], + "type": "text", + "content": "We have introduced pix2pix3D, a 3D-aware conditional generative model for controllable image synthesis. Given a 2D label map, our model allows users to render images given any viewpoint. Our model augments the neural field with 3D labels, assigning label, color, and density to every 3D point, allowing for the simultaneous rendering of the image and a pixel-aligned label map. The learned 3D labels further enable interactive 3D cross-view editing. We discuss the limitations and societal impact in our arXiv version." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "text", + "content": "Acknowledgments. We thank Sheng-Yu Wang, Nupur Kumari, Gaurav Parmer, Ruihan Gao, Muyang Li, George Cazenavette, Andrew Song, Zhipeng Bao, Tamaki Kojima, Krishna Wadhwani, Takuya Narihira, and Tatsuo Fujiwara for their discussion and help. We are grateful for the support from Sony Corporation, Singapore DSTA, and the CMU Argo AI Center for Autonomous Vehicle Research." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "4441" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 288, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 288, + 134 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 288, + 134 + ], + "type": "text", + "content": "[1] Rameen Abdal, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 288, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 288, + 167 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 288, + 167 + ], + "type": "text", + "content": "[2] Shir Amir, Yossi Gandelsman, Shai Bagon, and Tali Dekel. Deep vit features as dense visual descriptors. ECCVW What is Motion For?, 2022. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 168, + 288, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 168, + 288, + 212 + ], + "spans": [ + { + "bbox": [ + 53, + 168, + 288, + 212 + ], + "type": "text", + "content": "[3] David Bau, Hendrik Strobelt, William Peebles, Jonas Wulff, Bolei Zhou, Jun-Yan Zhu, and Antonio Torralba. Semantic photo manipulation with a generative image prior. In ACM SIGGRAPH, 2019. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 213, + 288, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 213, + 288, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 213, + 288, + 247 + ], + "type": "text", + "content": "[4] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. In International Conference on Learning Representations (ICLR), 2018. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 247, + 288, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 288, + 291 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 288, + 291 + ], + "type": "text", + "content": "[5] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In International Conference on Learning Representations (ICLR), 2019. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 292, + 288, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 292, + 288, + 345 + ], + "spans": [ + { + "bbox": [ + 53, + 292, + 288, + 345 + ], + "type": "text", + "content": "[6] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional " + }, + { + "bbox": [ + 53, + 292, + 288, + 345 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 53, + 292, + 288, + 345 + ], + "type": "text", + "content": "-gan for single image to neural radiance fields translation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 5, 6, 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 347, + 288, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 347, + 288, + 391 + ], + "spans": [ + { + "bbox": [ + 53, + 347, + 288, + 391 + ], + "type": "text", + "content": "[7] Caroline Chan, Frédo Durand, and Phillip Isola. Learning to generate line drawings that convey geometry and semantics. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 392, + 288, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 288, + 457 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 288, + 457 + ], + "type": "text", + "content": "[8] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 3, 4, 6, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 459, + 288, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 459, + 288, + 513 + ], + "spans": [ + { + "bbox": [ + 53, + 459, + 288, + 513 + ], + "type": "text", + "content": "[9] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 514, + 288, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 288, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 288, + 590 + ], + "type": "text", + "content": "[10] Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An Information-Rich 3D Model Repository. Technical Report arXiv:1512.03012 [cs.GR], Stanford University — Princeton University — Toyota Technological Institute at Chicago, 2015. 2, 5, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 591, + 288, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 288, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 288, + 624 + ], + "type": "text", + "content": "[11] Anpei Chen, Ruiyang Liu, Ling Xie, Zhang Chen, Hao Su, and Jingyi Yu. Sofgan: A portrait image generator with dynamic styling. In ACM SIGGRAPH, 2021. 2, 5, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 625, + 288, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 288, + 658 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 288, + 658 + ], + "type": "text", + "content": "[12] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 712 + ], + "type": "text", + "content": "[13] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 116 + ], + "type": "text", + "content": "[14] Tao Chen, Zhe Zhu, Ariel Shamir, Shi-Min Hu, and Daniel Cohen-Or. 3-sweep: Extracting editable objects from a single photo. ACM Transactions on Graphics (TOG), 32(6):1-10, 2013. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 119, + 547, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 547, + 163 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 547, + 163 + ], + "type": "text", + "content": "[15] Yuedong Chen, Qianyi Wu, Chuanxia Zheng, Tat-Jen Cham, and Jianfei Cai. Sem2nerf: Converting single-view semantic masks to neural radiance fields. In European Conference on Computer Vision (ECCV), 2022. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 164, + 547, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 164, + 547, + 208 + ], + "spans": [ + { + "bbox": [ + 307, + 164, + 547, + 208 + ], + "type": "text", + "content": "[16] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 210, + 547, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 210, + 547, + 265 + ], + "spans": [ + { + "bbox": [ + 308, + 210, + 547, + 265 + ], + "type": "text", + "content": "[17] JOHANNA Delanoy, ADRIEN Bousseau, MATHIEU Aubry, PHILLIP Isola, and ALEXEIA A Efros. What you sketch is what you get: 3d sketching using multi-view deep volumetric prediction. In ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games (I3D), 2018. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 267, + 547, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 267, + 547, + 312 + ], + "spans": [ + { + "bbox": [ + 308, + 267, + 547, + 312 + ], + "type": "text", + "content": "[18] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised NeRF: Fewer views and faster training for free. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 313, + 547, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 313, + 547, + 357 + ], + "spans": [ + { + "bbox": [ + 308, + 313, + 547, + 357 + ], + "type": "text", + "content": "[19] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 358, + 546, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 358, + 546, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 358, + 546, + 403 + ], + "type": "text", + "content": "[20] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, 2014. 1, 2, 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 405, + 547, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 405, + 547, + 449 + ], + "spans": [ + { + "bbox": [ + 308, + 405, + 547, + 449 + ], + "type": "text", + "content": "[21] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2, 3, 4, 6, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 450, + 547, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 450, + 547, + 494 + ], + "spans": [ + { + "bbox": [ + 308, + 450, + 547, + 494 + ], + "type": "text", + "content": "[22] Philipp Henzler, Niloy J Mitra, and Tobias Ritschel. Escaping plato's cave: 3d shape from adversarial rendering. In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 497, + 547, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 497, + 547, + 550 + ], + "spans": [ + { + "bbox": [ + 308, + 497, + 547, + 550 + ], + "type": "text", + "content": "[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two timescale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems (NeurIPS), 2017. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 553, + 547, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 553, + 547, + 586 + ], + "spans": [ + { + "bbox": [ + 308, + 553, + 547, + 586 + ], + "type": "text", + "content": "[24] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 588, + 547, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 588, + 547, + 621 + ], + "spans": [ + { + "bbox": [ + 308, + 588, + 547, + 621 + ], + "type": "text", + "content": "[25] Hsin-Ping Huang, Hung-Yu Tseng, Hsin-Ying Lee, and Jia-Bin Huang. Semantic view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 624, + 547, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 547, + 656 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 547, + 656 + ], + "type": "text", + "content": "[26] Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. Multimodal unsupervised image-to-image translation. In European Conference on Computer Vision (ECCV), 2018. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 658, + 547, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 547, + 712 + ], + "type": "text", + "content": "[27] Zeng Huang, Tianye Li, Weikai Chen, Yajie Zhao, Jun Xing, Chloe Legendre, Linjie Luo, Chongyang Ma, and Hao Li. Deep volumetric video from very sparse multi-view performance capture. In European Conference on Computer Vision (ECCV), pages 351-369, 2018. 2" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4442" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 105 + ], + "type": "text", + "content": "[28] Takeo Igarashi, Satoshi Matsuoka, and Hidehiko Tanaka. Teddy: a sketching interface for 3d freeform design. In ACM SIGGRAPH, 1999. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 107, + 288, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 107, + 288, + 152 + ], + "spans": [ + { + "bbox": [ + 49, + 107, + 288, + 152 + ], + "type": "text", + "content": "[29] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 153, + 288, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 153, + 288, + 196 + ], + "spans": [ + { + "bbox": [ + 49, + 153, + 288, + 196 + ], + "type": "text", + "content": "[30] Ajay Jain, Matthew Tancik, and Pieter Abbeel. Putting nerf on a diet: Semantically consistent few-shot view synthesis. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 199, + 287, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 199, + 287, + 232 + ], + "spans": [ + { + "bbox": [ + 49, + 199, + 287, + 232 + ], + "type": "text", + "content": "[31] Kaiwen Jiang, Shu-Yu Chen, Feng-Lin Liu, Hongbo Fu, and Lin Gao. Nerffaceediting: Disentangled face editing in neural radiance fields. In ACM SIGGRAPH Asia, 2022. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 234, + 287, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 234, + 287, + 255 + ], + "spans": [ + { + "bbox": [ + 49, + 234, + 287, + 255 + ], + "type": "text", + "content": "[32] James T Kajiya and Brian P Von Herzen. Ray tracing volume densities. ACM SIGGRAPH, 18(3):165-174, 1984. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 258, + 288, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 258, + 288, + 301 + ], + "spans": [ + { + "bbox": [ + 49, + 258, + 288, + 301 + ], + "type": "text", + "content": "[33] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 304, + 288, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 304, + 288, + 346 + ], + "spans": [ + { + "bbox": [ + 49, + 304, + 288, + 346 + ], + "type": "text", + "content": "[34] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2, 6, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 350, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 350, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 49, + 350, + 288, + 392 + ], + "type": "text", + "content": "[35] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 395, + 288, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 395, + 288, + 438 + ], + "spans": [ + { + "bbox": [ + 49, + 395, + 288, + 438 + ], + "type": "text", + "content": "[36] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 441, + 288, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 441, + 288, + 483 + ], + "spans": [ + { + "bbox": [ + 49, + 441, + 288, + 483 + ], + "type": "text", + "content": "[37] Natasha Kholgade, Tomas Simon, Alexei Efros, and Yaser Sheikh. 3d object manipulation in a single photograph using stock 3d models. ACM Transactions on Graphics (TOG), 33(4):1-12, 2014." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 487, + 288, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 487, + 288, + 529 + ], + "spans": [ + { + "bbox": [ + 49, + 487, + 288, + 529 + ], + "type": "text", + "content": "[38] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 533, + 288, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 533, + 288, + 574 + ], + "spans": [ + { + "bbox": [ + 49, + 533, + 288, + 574 + ], + "type": "text", + "content": "[39] Chen-Hsuan Lin, Wei-Chiu Ma, Antonio Torralba, and Simon Lucey. Barf: Bundle-adjusting neural radiance fields. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 578, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 578, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 49, + 578, + 287, + 610 + ], + "type": "text", + "content": "[40] Ming-Yu Liu, Thomas Breuel, and Jan Kautz. Unsupervised image-to-image translation networks. Advances in neural information processing systems, 30, 2017. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 613, + 288, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 613, + 288, + 655 + ], + "spans": [ + { + "bbox": [ + 49, + 613, + 288, + 655 + ], + "type": "text", + "content": "[41] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 49, + 658, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 658, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 658, + 288, + 712 + ], + "type": "text", + "content": "[42] Zhaoliang Lun, Matheus Gadelha, Evangelos Kalogerakis, Subhransu Maji, and Rui Wang. 3d shape reconstruction from sketches via multi-view convolutional networks. In 2017 International Conference on 3D Vision (3DV). IEEE, 2017. 2" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 128 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 128 + ], + "type": "text", + "content": "[43] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 130, + 547, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 130, + 547, + 174 + ], + "spans": [ + { + "bbox": [ + 307, + 130, + 547, + 174 + ], + "type": "text", + "content": "[44] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 176, + 547, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 176, + 547, + 218 + ], + "spans": [ + { + "bbox": [ + 308, + 176, + 547, + 218 + ], + "type": "text", + "content": "[45] Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for gans do actually converge? In International Conference on Machine Learning (ICML), 2018. 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 222, + 547, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 222, + 547, + 276 + ], + "spans": [ + { + "bbox": [ + 308, + 222, + 547, + 276 + ], + "type": "text", + "content": "[46] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020. 2, 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 279, + 546, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 279, + 546, + 300 + ], + "spans": [ + { + "bbox": [ + 308, + 279, + 546, + 300 + ], + "type": "text", + "content": "[47] Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784, 2014. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 304, + 547, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 304, + 547, + 335 + ], + "spans": [ + { + "bbox": [ + 308, + 304, + 547, + 335 + ], + "type": "text", + "content": "[48] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. In ACM SIGGRAPH, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 338, + 547, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 338, + 547, + 392 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 547, + 392 + ], + "type": "text", + "content": "[49] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In IEEE International Conference on Computer Vision (ICCV), 2019. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 396, + 547, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 396, + 547, + 449 + ], + "spans": [ + { + "bbox": [ + 308, + 396, + 547, + 449 + ], + "type": "text", + "content": "[50] Michael Niemeyer, Jonathan T Barron, Ben Mildenhall, Mehdi SM Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 453, + 547, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 453, + 547, + 495 + ], + "spans": [ + { + "bbox": [ + 308, + 453, + 547, + 495 + ], + "type": "text", + "content": "[51] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 498, + 547, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 498, + 547, + 552 + ], + "spans": [ + { + "bbox": [ + 308, + 498, + 547, + 552 + ], + "type": "text", + "content": "[52] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 555, + 547, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 555, + 547, + 609 + ], + "spans": [ + { + "bbox": [ + 308, + 555, + 547, + 609 + ], + "type": "text", + "content": "[53] Xingang Pan, Xudong Xu, Chen Change Loy, Christian Theobalt, and Bo Dai. A shading-guided generative implicit model for shape-accurate 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2021. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 613, + 547, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 547, + 666 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 547, + 666 + ], + "type": "text", + "content": "[54] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 670, + 547, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 547, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 547, + 712 + ], + "type": "text", + "content": "[55] Taesung Park, Alexei A Efros, Richard Zhang, and Jun-Yan Zhu. Contrastive learning for unpaired image-to-image translation. In European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4443" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 116 + ], + "type": "text", + "content": "[56] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 288, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 288, + 161 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 288, + 161 + ], + "type": "text", + "content": "[57] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 6, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 162, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 162, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 49, + 162, + 288, + 205 + ], + "type": "text", + "content": "[58] Gaurav Parmar, Richard Zhang, and Jun-Yan Zhu. On aliased resizing and surprising subtleties in gan evaluation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 206, + 288, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 206, + 288, + 249 + ], + "spans": [ + { + "bbox": [ + 49, + 206, + 288, + 249 + ], + "type": "text", + "content": "[59] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In IEEE International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 250, + 288, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 250, + 288, + 294 + ], + "spans": [ + { + "bbox": [ + 49, + 250, + 288, + 294 + ], + "type": "text", + "content": "[60] Nataniel Ruiz, Eunji Chong, and James M. Rehg. Fine-grained head pose estimation without keypoints. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshop, 2018. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 295, + 288, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 295, + 288, + 347 + ], + "spans": [ + { + "bbox": [ + 49, + 295, + 288, + 347 + ], + "type": "text", + "content": "[61] Sara Fridovich-Keil and Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 350, + 288, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 350, + 288, + 392 + ], + "spans": [ + { + "bbox": [ + 49, + 350, + 288, + 392 + ], + "type": "text", + "content": "[62] Edgar Schonfeld, Vadim Sushko, Dan Zhang, Juergen Gall, Bernt Schiele, and Anna Khoreva. You only need adversarial supervision for semantic image synthesis. In International Conference on Learning Representations (ICLR), 2020. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 394, + 288, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 394, + 288, + 437 + ], + "spans": [ + { + "bbox": [ + 49, + 394, + 288, + 437 + ], + "type": "text", + "content": "[63] Edgar Schonfeld, Vadim Sushko, Dan Zhang, Juergen Gall, Bernt Schiele, and Anna Khoreva. You only need adversarial supervision for semantic image synthesis. In International Conference on Learning Representations (ICLR), 2021. 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 437, + 288, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 437, + 288, + 480 + ], + "spans": [ + { + "bbox": [ + 49, + 437, + 288, + 480 + ], + "type": "text", + "content": "[64] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 482, + 288, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 482, + 288, + 514 + ], + "spans": [ + { + "bbox": [ + 49, + 482, + 288, + 514 + ], + "type": "text", + "content": "[65] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 515, + 288, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 515, + 288, + 558 + ], + "spans": [ + { + "bbox": [ + 49, + 515, + 288, + 558 + ], + "type": "text", + "content": "[66] Zhuo Su, Wenzhe Liu, Zitong Yu, Dewen Hu, Qing Liao, Qi Tian, Matti Pietikainen, and Li Liu. Pixel difference networks for efficient edge detection. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 5, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 559, + 288, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 559, + 288, + 602 + ], + "spans": [ + { + "bbox": [ + 49, + 559, + 288, + 602 + ], + "type": "text", + "content": "[67] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew Davison. iMAP: Implicit mapping and positioning in real-time. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 603, + 288, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 603, + 288, + 647 + ], + "spans": [ + { + "bbox": [ + 49, + 603, + 288, + 647 + ], + "type": "text", + "content": "[68] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. In ACM Transactions on Graphics (TOG), 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 647, + 288, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 647, + 288, + 690 + ], + "spans": [ + { + "bbox": [ + 49, + 647, + 288, + 690 + ], + "type": "text", + "content": "[69] Jingxiang Sun, Xuan Wang, Yong Zhang, Xiaoyu Li, Qi Zhang, Yebin Liu, and Jue Wang. Fenerf: Face editing in neural radiance fields. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 49, + 691, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 691, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 691, + 288, + 712 + ], + "type": "text", + "content": "[70] Matthew Tancik, Ben Mildenhall, Terrance Wang, Divi Schmidt, Pratul P Srinivasan, Jonathan T Barron, and Ren" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 327, + 73, + 546, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 546, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 546, + 106 + ], + "type": "text", + "content": "Ng. Learned initializations for optimizing coordinate-based neural representations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 107, + 547, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 547, + 162 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 547, + 162 + ], + "type": "text", + "content": "[71] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 163, + 547, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 163, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 163, + 547, + 205 + ], + "type": "text", + "content": "[72] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. In ACM Transactions on Graphics (TOG), 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 208, + 547, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 208, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 308, + 208, + 547, + 262 + ], + "type": "text", + "content": "[73] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. High-resolution image synthesis and semantic manipulation with conditional gans. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 264, + 547, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 264, + 547, + 308 + ], + "spans": [ + { + "bbox": [ + 308, + 264, + 547, + 308 + ], + "type": "text", + "content": "[74] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 309, + 547, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 309, + 547, + 352 + ], + "spans": [ + { + "bbox": [ + 308, + 309, + 547, + 352 + ], + "type": "text", + "content": "[75] Xiaohua Xie, Kai Xu, Niloy J Mitra, Daniel Cohen-Or, Wenyong Gong, Qi Su, and Baoquan Chen. Sketch-to-design: Context-based part assembly. In Computer Graphics Forum, volume 32, pages 233–245. Wiley Online Library, 2013. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 354, + 546, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 354, + 546, + 397 + ], + "spans": [ + { + "bbox": [ + 308, + 354, + 546, + 397 + ], + "type": "text", + "content": "[76] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 399, + 546, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 546, + 442 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 546, + 442 + ], + "type": "text", + "content": "[77] Shunyu Yao, Tzu Ming Hsu, Jun-Yan Zhu, Jiajun Wu, Antonio Torralba, Bill Freeman, and Josh Tenenbaum. 3d-aware scene manipulation via inverse graphics. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 444, + 547, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 444, + 547, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 444, + 547, + 487 + ], + "type": "text", + "content": "[78] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. Pixelnerf: Neural radiance fields from one or few images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 488, + 547, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 488, + 547, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 488, + 547, + 533 + ], + "type": "text", + "content": "[79] Jichao Zhang, Enver Sangineto, Hao Tang, Aliaksandr Siarohin, Zhun Zhong, Nicu Sebe, and Wei Wang. 3d-aware semantic-guided generative model for human synthesis. In European Conference on Computer Vision (ECCV), 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 534, + 546, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 534, + 546, + 566 + ], + "spans": [ + { + "bbox": [ + 308, + 534, + 546, + 566 + ], + "type": "text", + "content": "[80] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 567, + 547, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 567, + 547, + 611 + ], + "spans": [ + { + "bbox": [ + 308, + 567, + 547, + 611 + ], + "type": "text", + "content": "[81] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 4" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 613, + 546, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 613, + 546, + 645 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 546, + 645 + ], + "type": "text", + "content": "[82] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European Conference on Computer Vision (ECCV), 2020. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 647, + 546, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 647, + 546, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 647, + 546, + 689 + ], + "type": "text", + "content": "[83] Jun-Yan Zhu, Philipp Krahenbuhl, Eli Shechtman, and Alexei A Efros. Generative visual manipulation on the natural image manifold. In European Conference on Computer Vision (ECCV), 2016. 2, 8" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 691, + 546, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 546, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 546, + 712 + ], + "type": "text", + "content": "[84] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4444" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 296 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "text", + "content": "adversarial networks. In IEEE International Conference on Computer Vision (ICCV), 2017. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[85] Jun-Yan Zhu, Richard Zhang, Deepak Pathak, Trevor Darrell, Alexei A Efros, Oliver Wang, and Eli Shechtman. Toward multimodal image-to-image translation. Advances in neural information processing systems, 30, 2017. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 288, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 288, + 195 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 288, + 195 + ], + "type": "text", + "content": "[86] Jun-Yan Zhu, Zhoutong Zhang, Chengkai Zhang, Jiajun Wu, Antonio Torralba, Josh Tenenbaum, and Bill Freeman. Visual object networks: Image generation with disentangled 3d representations. In Advances in Neural Information Processing Systems (NeurIPS), 2018. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 197, + 288, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 197, + 288, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 197, + 288, + 239 + ], + "type": "text", + "content": "[87] Peihao Zhu, Rameen Abdal, Yipeng Qin, and Peter Wonka. Sean: Image synthesis with semantic region-adaptive normalization. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 5, 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 241, + 288, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 241, + 288, + 296 + ], + "spans": [ + { + "bbox": [ + 48, + 241, + 288, + 296 + ], + "type": "text", + "content": "[88] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R. Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 2" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4445" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D-Aware Face Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_content_list.json b/2023/3D-Aware Face Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..cae6b719e488bbe899242ae3838141f7fad779aa --- /dev/null +++ b/2023/3D-Aware Face Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_content_list.json @@ -0,0 +1,1625 @@ +[ + { + "type": "text", + "text": "3D-Aware Face Swapping", + "text_level": 1, + "bbox": [ + 352, + 130, + 617, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yixuan Li Chao Ma* Yichao Yan* Wenhan Zhu Xiaokang Yang MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, China {lyx0208, chaoma, yanyichao, zhuwenhan823, xkyang}@sjtu.edu.cn", + "bbox": [ + 125, + 180, + 843, + 233 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/db2a32cb00c1a096529925fa25c9fba4b1870e608e0ae2061971e078920cfdd6.jpg", + "image_caption": [ + "Source" + ], + "image_footnote": [], + "bbox": [ + 81, + 270, + 197, + 450 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/645096ca6885eef89157cd30fb1a86f49e4367afe473d1792a150ff241f269a7.jpg", + "image_caption": [ + "Target" + ], + "image_footnote": [], + "bbox": [ + 197, + 270, + 428, + 450 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2beb072e7b4fef458b7c6794a12f1ee2512754c90d1d15db8a9f3820adc3bb98.jpg", + "image_caption": [ + "Intermediate Views", + "Figure 1. Demonstration of the proposed 3dSwap. Given single-view source and target images, our method synthesizes high-fidelity and multi-view-consistent images of the swapped faces and the corresponding geometries. More results can be found on our project page." + ], + "image_footnote": [], + "bbox": [ + 428, + 271, + 658, + 450 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/eb4dbc647fb79c18b1d27f2fa5a36f98974b2b986d386ec70793a2f5050469c1.jpg", + "image_caption": [ + "Source View" + ], + "image_footnote": [], + "bbox": [ + 658, + 271, + 774, + 450 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/f7226b694a58a983e49cf7078fa142fa89701e0100628cdfba1e0c25c2b1f54b.jpg", + "image_caption": [ + "Geometry" + ], + "image_footnote": [], + "bbox": [ + 774, + 271, + 888, + 450 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 522, + 313, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Face swapping is an important research topic in computer vision with wide applications in entertainment and privacy protection. Existing methods directly learn to swap 2D facial images, taking no account of the geometric information of human faces. In the presence of large pose variance between the source and the target faces, there always exist undesirable artifacts on the swapped face. In this paper, we present a novel 3D-aware face swapping method that generates high-fidelity and multi-view-consistent swapped faces from single-view source and target images. To achieve this, we take advantage of the strong geometry and texture prior of 3D human faces, where the 2D faces are projected into the latent space of a 3D generative model. By disentangling the identity and attribute features in the latent space, we succeed in swapping faces in a 3D-aware manner, being robust to pose variations while transferring fine-grained facial details. Extensive experiments demonstrate the superiority of our 3D-aware face swapping framework in terms of visual quality, identity similarity, and multi-view consistency. Code is available at https://1yx0208.github.io/3dSwap.", + "bbox": [ + 73, + 551, + 472, + 869 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 501, + 522, + 630, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Face swapping aims to transfer the identity of a person in the source image to another person in the target image while preserving other attributes like head pose, expression, illumination, background, etc. It has attracted extensive attention recently in the academic and industrial world for its potential wide applications in entertainment [14,30,38] and privacy protection [7,37,48].", + "bbox": [ + 496, + 550, + 890, + 655 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The key of face swapping is to transfer the geometric shape of the facial region (i.e., eyes, nose, mouth) and detailed texture information (such as the color of eyes) from the source image to the target image while preserving both geometry and texture of non-facial regions (i.e., hair, background, etc). Currently, some 3D-based methods consider geometry prior of human faces by fitting the input image to 3D face models such as 3D Morphable Model (3DMM) [8] to overcome the differences of face orientation and expression between sources and targets [7, 15, 34, 43]. However, these parametric face models only produce coarse frontal faces without fine-grained details, leading to low-resolution and fuzzy swapping results. On the other hand, following Generative Adversarial Network [24], GAN-based [6, 23, 32, 39, 40, 42] or GAN-inversion-based [44, 55, 57, 60] approaches adopt the ad", + "bbox": [ + 496, + 657, + 892, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding authors.", + "bbox": [ + 101, + 886, + 238, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "12705", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "versarial training strategy to learn texture information from inputs. Despite the demonstrated photorealistic and high-resolution images, the swapped faces via 2D GANs sustain undesirable artifacts when two input faces undergo large pose variation since the strong 3D geometry prior of human faces is ignored. Moreover, learning to swap faces in 2D images makes little use of the shaped details from sources, leading to poorer performance on identity transferring.", + "bbox": [ + 75, + 90, + 468, + 210 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Motivated by the recent advances of 3D generative models [12, 13, 20, 25, 45] in synthesizing multi-view consistent images and high-quality 3D shapes, it naturally raises a question: can we perform face swapping in a 3D-aware manner to exploit the strong geometry and texture priors? To answer this question, two challenges arise. First, how to infer 3D prior directly from 3D-GAN models still remains open. Current 3D-aware generative models synthesize their results from a random Gaussian noise $z$ , so that their output images are not controllable. This increases the complexity of inferring the required prior from arbitrary input. Second, the inferred prior corresponding to input images is in the form of a high-dimension feature vector in the latent space of 3D GANs. Simply synthesizing multi-view target images referring to the prior and applying 2D face swapping to them produces not only inconsistent artifacts but also a heavy computational load.", + "bbox": [ + 75, + 212, + 470, + 467 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these challenges, we systematically investigate the geometry and texture prior of these 3D generative models and propose a novel 3D-aware face swapping framework 3dSwap. We introduce a 3D GAN inversion framework to project the 2D inputs into the 3D latent space, motivated by recent GAN inversion approaches [46, 47, 51]. Specifically, we design a learning-based inversion algorithm that trains an encoding network to efficiently and robustly project input images into the latent space of EG3D [12]. However, directly borrowing the architecture from 2D approaches is not yet enough since a single-view input provides limited information about the whole human face. To further improve the multi-view consistency of latent code projection, we design a pseudo-multi-view training strategy. This design effectively bridges the domain gap between 2D and 3D. To tackle the second problem, we design a face swapping algorithm based on the 3D latent codes and directly synthesize the swapped faces with the 3D-aware generator. In this way, we achieve 3D GAN-inversion-based face swapping by a latent code manipulating algorithm consisting of style-mixing and interpolation, where latent code interpolation is responsible for identity transferring while style-mixing helps to preserve attributes.", + "bbox": [ + 75, + 468, + 468, + 815 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions are threefold:", + "bbox": [ + 96, + 816, + 388, + 830 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- To the best of our knowledge, we first address the 3D-aware face swapping task. The proposed 3dSwap method sets a strong baseline and we hope this work will foster future research into this task.", + "bbox": [ + 94, + 839, + 468, + 898 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We design a learning-based 3D GAN inversion with the pseudo-multi-view training strategy to extract geometry and texture prior from arbitrary input images. We further utilize these strong prior by designing a latent code manipulating algorithm, with which we directly synthesize the final results with the pretrained generator.", + "- Extensive experiments on benchmark datasets demonstrate the superiority of the proposed 3dSwap over state-of-the-art 2D face swapping approaches in identity transferring. Our reconstruction module for 3DGAN inversion performs favorably over the state-of-the-art methods as well." + ], + "bbox": [ + 517, + 90, + 890, + 292 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 308, + 640, + 324 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Face Swapping. Face swapping has emerged as a popular research topic in the field of computer vision in recent years. Currently, it can be classified into two categories: 3D-based and GAN-based methods. Specifically, 3D-based methods [7, 15, 34, 43] fit input images into 3D parametric face models (i.e. 3DMM [8]) to overcome the problems of posture or perspective difference between input images. However, the performance of such methods is usually limited by the reconstruction results. GAN-based methods [6, 18, 23, 31, 32, 39, 40, 42] adopt the adversarial training strategy to generate photorealistic fake faces.", + "bbox": [ + 496, + 334, + 890, + 500 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Early GAN-based face swapping methods are subject-specific, i.e. DeepFake [18] and Korshunova et al. [31] are required to train different models for different inputs. The subject-specific approaches have limited real applications since face swapping is required to be applicable to any unseen pair of input images, and such limitation is addressed in latter subject-agnostic face swapping approaches [6, 23, 32, 39, 40, 42]. To increase the resolution of generated images, MegaFS [60] firstly proposes a GAN-inversion-based face swapping method, utilizing StyleGAN [28] to synthesize megapixel-level swapping faces. Xu et al. [56] and StyleSwap [57] integrate the StyleGAN2 [29] generator to their face swapping pipeline, applying its strong prior to generate high-resolution swapped faces. Following these approaches, we furtherly extend the face swapping task into 3D latent space to capture fine-grained details of face shape and strengthen the robustness under large pose variance.", + "bbox": [ + 496, + 501, + 890, + 758 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D-Aware Generative Models. The 3D-aware generative models are aimed to synthesize 3D-aware (i.e., can be explicitly controlled by the camera pose) images from 2D image collections. HoloGAN [41] firstly proposes a 3D-aware generative model through learning the voxel features, whereas it only generates low-resolution results due to the limitation of computational cost. Recently, several works utilize the NeRF [36] representation [12, 20, 25, 45, 50]. GRAF [50] adopts the approach of patch sampling to elim", + "bbox": [ + 496, + 763, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "12706", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/20011f7b7de3afcc456fd6d41159db98b0d24f7f9b0e11b9c733257da699b212.jpg", + "image_caption": [ + "Figure 2. The pipeline of our 3D-aware face swapping method, 3dSwap. In the first stage, we infer 3D geometry and texture prior of both source and target images with an encoder. We then design a latent code manipulation algorithm consisting of style mixing and interpolation to conduct face swapping based on these priors. Finally, swapped faces in any view direction can be synthesized by 3dSwap after fine-tuning the parameters of the generator following the joint pivot tuning optimization." + ], + "image_footnote": [], + "bbox": [ + 78, + 88, + 890, + 310 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "inate computational costs during training. GRAM [20] estimates radiance manifolds to produce realistic images with fine details and strong 3D consistency. StyleNeRF [25] integrates NeRF with style-based generators and proposes a better up-sampler and a new regularization loss to mitigate inconsistencies. StyleSDF [45] presents a Signed Distance Field (SDF) based on 3D modeling that defines detailed 3D surfaces. EG3D [12] raises a novel tri-plane representation for efficient 3D-aware image generation. Due to the strong generative capability of these 3D-aware generative models, we leverage them to infer fine 3D prior from 2D images for our 3D-aware face swapping framework.", + "bbox": [ + 75, + 402, + 472, + 583 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "GAN Inversion. Since Generative Adversarial Network [24], numerous generative models reflect great abilities in synthesizing high-quality images [9,12,25,28,29,45]. To fully leverage these well-trained GANs, the task of GAN inversion emerges recently. In particular, GAN inversion is aimed to project a given image back to a vector $w$ in the latent space of a pretrained GAN model so that this image can be faithfully reconstructed from $w$ by the generator.", + "bbox": [ + 75, + 595, + 468, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Early works invert images into Gaussian noise $z \\in R^{1 \\times 512}$ or semantic latent space $\\mathcal{W} \\in R^{1 \\times 512}$ [1,16,17,59]. Abdal et al. [2] firstly extend latent space to $\\mathcal{W} + \\in R^{18 \\times 512}$ for more accurate reconstruction. To predict the latent code, learning-based methods [3,26,46,51,52] train an encoder for latent projection, while optimization-based methods [1,2,16,17] directly find the optimal code step-by-step from noise. Hybrid methods [4,47,59] combine both to optimize latent codes initialized by encoders.", + "bbox": [ + 76, + 717, + 468, + 853 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In addition, there are a few inversion works for 3D generative models. Pix2NeRF [10] is proposed to generate Neural Radiance Fields (NeRF) [36] of an object applying a", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "single input image based on a pretrained $\\pi$ -GAN [13]. Connor et al. [33] leverage EG3D [12] and a pretrained 3DMM predictor [22] to reconstruct a 3D human face, which could be further animated or edited. Our reconstruction model is also in this catalog, while the adopted learning-based algorithm is more robust and efficient compared with them.", + "bbox": [ + 496, + 402, + 893, + 494 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 500, + 513, + 591, + 530 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 500, + 542, + 609, + 556 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given single-view source and target images, we aim to synthesize multi-view-consistent face images with identity from source image $x_{s}$ and other attributes from target image $x_{t}$ . Fig. 2 demonstrates the overall pipeline and notations of the proposed 3dSwap. First, to extract accurate geometry and texture prior from 2D images, we conduct a learning-based 3D GAN inversion, training an encoding network to project the inputs into the latent space of a 3D-aware generative model. Specifically, we design a pseudomulti-view optimization strategy to train the encoder with a feature pyramid architecture from pSp [46], empowering the latent code projection with the 3D consistency of the state-of-the-art 3D GAN, i.e. EG3D [12] (Sec. 3.2). Then, to disentangle identity from attributes in the latent space, we design a latent code manipulation algorithm consisting of style mixing and interpolation (Sec. 3.3). Finally, for the purpose of improving the overall quality of our results, bridging the gap between 2D image generating and 3D rendering, we implement a joint pivot tuning on parameters of the pretrained EG3D generator (Sec. 3.4). The networks are trained with a set of well-designed loss functions to enforce identity transferring and attribute preserving (Sec. 3.5).", + "bbox": [ + 496, + 568, + 893, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "12707", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Inferring 3D Prior from 2D Images", + "text_level": 1, + "bbox": [ + 76, + 90, + 385, + 107 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To infer geometry and texture prior from a 2D image, we leverage the state-of-the-art 3D-aware generative model, i.e. EG3D [12] by projecting the inputs into its latent space. Since the optimization-based algorithm [47] is inefficient and less robust to non-front faces, we propose a learning-based inversion algorithm where an encoding network is trained to project the single-view inputs into the 3D latent space. Different from 2D StyleGAN-like models which totally rely on the latent code $w$ to generate the corresponding output: $y = \\mathcal{G}(w)$ , the 3D-aware generative model has an extra input $d$ which controls the pose of synthesized image: $y = \\mathcal{G}(w,d)$ . This indicates that latent codes and generated images are not bijections for 3D GANs since multi-view images of the same person can be synthesized using the same $w$ but different $d$ . Taking this property into account, we design a pseudo-multi-view training strategy, using a generated image in a different view from the source image to improve the consistency of latent code projection. Fig. 3 illustrates the pipeline of our design.", + "bbox": [ + 75, + 113, + 472, + 400 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, we first use an encoder to project the input image $x$ into the latent space $\\mathcal{W}$ and get a high-dimension intermediate latent vector $w_{x} = \\mathcal{E}_{\\theta}(x)$ , where $\\mathcal{E}_{\\theta}(\\cdot)$ is the pSp encoder with parameters $\\theta$ . Then, with the pretrained EG3D generator $\\mathcal{G}(\\cdot, \\cdot)$ and input direction $d$ estimated by Deep3d Face Reconstruction [21], we synthesize the reconstructed result $x' = \\mathcal{G}(w_{x}, d)$ . For a 2D GAN inversion approach, this ground-truth and reconstructed image pair $(x, x')$ is enough, but it is inadequate for 3D GANs due to the non-bijective property.", + "bbox": [ + 75, + 401, + 472, + 551 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Ideally, this issue can be addressed by feeding multi-view images of a person into the encoder and minimizing the distance between their output vectors. However, it is difficult to obtain large-scale multi-view data, and we usually only have single-view images of a person in the training dataset. To this end, we additionally sample a random direction $\\hat{d}$ and use the generator to synthesize $\\hat{x} = \\mathcal{G}(w_x,\\hat{d})$ with the same latent code. This output image $\\hat{x}$ , which is called a pseudo-input since it is generated by the 3D GAN, is again fed into the encoder-decoder structure to get $w_{\\hat{x}} = \\mathcal{E}_{\\theta}(\\hat{x})$ and $\\hat{x}^{\\prime} = \\mathcal{G}(w_{\\hat{x}},d)$ .", + "bbox": [ + 75, + 551, + 472, + 717 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Now, we can define our optimization objectives. Following the usual inversion approaches, we apply some pixelwise loss functions between the input $x$ and its reconstruction $x'$ . Under the setting of our pseudo-multi-view input, we add constraints between the two latent codes $w_{x}$ and $w_{\\hat{x}}$ for the purpose of maintaining 3D consistency. We further restrain pixel-level distance between the second-order output $\\hat{x}'$ synthesized with $w_{\\hat{x}}$ and the origin input $x$ to reinforce such constraint. In summary, this three-termed optimization can be written as:", + "bbox": [ + 75, + 718, + 472, + 868 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\theta} \\left\\{\\mathcal {L} \\left(x, x ^ {\\prime}\\right) + \\eta \\mathcal {L} \\left(x, \\hat {x} ^ {\\prime}\\right) + \\mathcal {L} \\left(w _ {x}, w _ {\\hat {x}}\\right) \\right\\}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 881, + 468, + 904 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/5f4dffb41fb01f22db715da2fc91824845b9416888bef1bcd43edbc53ea6e5aa.jpg", + "image_caption": [ + "Figure 3. The pipeline of our pseudo-multi-view training strategy." + ], + "image_footnote": [], + "bbox": [ + 501, + 88, + 893, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\theta$ is the parameter of encoder, $\\eta$ is a trade-off parameter and $\\mathcal{L}(\\cdot ,\\cdot)$ denotes the loss functions which will be further discussed in Sec. 3.5. After optimizing the parameters of the encoding network with this strategy, we can obtain rather accurate 3D prior $w_{x}$ from any given input $x$ .", + "bbox": [ + 496, + 309, + 890, + 386 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Face Swapping via Latent Code Manipulation", + "text_level": 1, + "bbox": [ + 498, + 393, + 888, + 411 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To take full advantage of the prior extracted from the 3D GAN model, we calculate the latent code for the swapped face based on latent codes $w_{s} = \\mathcal{E}_{\\theta}(x_{s})$ of the source image $x_{s}$ and $w_{t} = \\mathcal{E}_{\\theta}(x_{t})$ of the target image $x_{t}$ . Before that, we step back and think about what these latent codes represent.", + "bbox": [ + 496, + 417, + 890, + 492 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A face image usually contains different attributes such as face shape, hairstyle, skin color, etc. With the encoder discussed in Sec. 3.2, we embed all these attributes in the high-dimension latent vectors. However, identity features depending on the geometry of facial region (i.e., eyes, nose, mouth, cheek, and so on) also implicitly lie in such latent codes. For the task of face swapping, it is desirable if identity features can be disentangled from attribute features in the latent code. Afterward, we can simply exchange the identity part of the latent codes to achieve face swapping.", + "bbox": [ + 496, + 493, + 892, + 643 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Since such identity and attributes are typically entangled in the latent codes, we design an interpolation strategy between the source and target latent codes with learnable coefficients. Here, the source latent code $w_{s}$ plays a leading role in the identity part while $w_{t}$ dominates the others. To obtain these coefficients, we concatenate $w_{s}$ and $w_{t}$ to form a $1 \\times 1024$ vector and feed it into a four-layer Multilayer Perceptron whose output $\\rho$ is the interpolation coefficient.", + "bbox": [ + 496, + 643, + 893, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Moreover, StyleGAN-like [28,29] models share the style mixing property of latent codes, which means that different layers of latent codes control different parts of attributes. For example, coarse spatial resolutions control high-level aspects like face shape and orientation while fine resolution latent control details like hair color. Motivated by this, we also investigate the layer-wise attributes in EG3D and observed similar properties. This allows us to generate more desirable swapping results by only performing interpolation", + "bbox": [ + 496, + 763, + 893, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "12708", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "on part of the latent codes.", + "bbox": [ + 76, + 90, + 253, + 104 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In summary, the latent code of swapped face $w_{fs}$ can be obtained by:", + "bbox": [ + 76, + 106, + 468, + 137 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nw _ {f s} ^ {(i)} = \\left\\{ \\begin{array}{c c} \\rho^ {(i)} \\times w _ {t} ^ {(i)} + (1 - \\rho^ {(i)}) \\times w _ {s} ^ {(i)} & i \\in [ 5, 9 ], \\\\ w _ {t} ^ {(i)} & o t h e r w i s e, \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 80, + 147, + 468, + 199 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the superscript $i$ denotes the layer-wise expression of $w_{fs}$ and the choice of layer, from layer 5 to layer 9, follows the definition of \"middle\" from StyleGAN [28], while a slight modification is made since the dimension of EG3D latent space is lower (i.e. $\\mathcal{W} \\in R^{14 \\times 512}$ ). To better disentangle identity and attributes, we apply a Sigmoid-shaped activation function with a factor $\\lambda = 100$ to the $\\rho$ generated by MLPs, enforcing the coefficients to be closer to 0 or 1:", + "bbox": [ + 76, + 200, + 468, + 321 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\rho_ {n e w} ^ {(i)} = \\left(1 + e ^ {- \\lambda \\rho_ {o l d} ^ {(i)}}\\right) ^ {- 1}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 333, + 468, + 354 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Joint Pivot Tuning", + "text_level": 1, + "bbox": [ + 76, + 371, + 256, + 387 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With the encoding network trained by the well-designed optimization strategy in Sec. 3.2, we can project an input image into a code in the 3D latent space. However, the inevitable reconstruction error will degrade the performance of face swapping, which is a downstream task of 3D GAN inversion. Also, we observe that directly swap faces via latent manipulation leads to slight artifacts in the non-facial region. Motivated by PTI [47], we adopt pivot tuning on the parameters of the pretrained EG3D generator using a fixed latent code $w_{fs}$ from Sec. 3.3, but in an optimizing direction considering both reconstruction quality and face swapping performance. The process of this \"joint\" pivot tuning is:", + "bbox": [ + 75, + 395, + 468, + 577 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\theta^ {*}} \\left\\{\\mathcal {L} \\left(x _ {s / t}, \\mathcal {G} _ {\\theta^ {*}} \\left(w _ {s / t}, d _ {s / t}\\right)\\right) + \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 601, + 468, + 628 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left. \\mathcal {L} \\left(x _ {t} \\cdot M _ {f}, \\mathcal {G} _ {\\theta^ {*}} \\left(w _ {f s}, d _ {t}\\right) \\cdot M _ {f}\\right) \\right\\},\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 627, + 385, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\theta^{*}$ is the parameter of EG3D generator, $d_{s}$ is the direction of the source image, $M_{f}$ is a binary mask that shields facial region and $\\mathcal{L}(\\cdot ,\\cdot)$ is the optimization constraint including MSE, LPIPS [58] and ID [19] losses.", + "bbox": [ + 76, + 654, + 468, + 715 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, with this finetuned generator and the latent code calculated by Eq. 2, we can synthesize the swapped face $y$ in any direction $d$ by:", + "bbox": [ + 75, + 715, + 468, + 760 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ny = \\mathcal {G} _ {\\theta^ {*}} \\left(w _ {f s}, d\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 212, + 773, + 468, + 789 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5. Objective Functions", + "text_level": 1, + "bbox": [ + 76, + 801, + 267, + 816 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "GAN Inversion Losses. In Eq. 1, we generally use $\\mathcal{L}(\\cdot ,\\cdot)$ to denote the loss function of our pseudo-multi-view training strategy. Here, we give its detailed form. Following the previous work [46], we use three different objectives for supervising a pair of input image $x$ and reconstruction $x^{\\prime}$ (and", + "bbox": [ + 76, + 824, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the same for $\\hat{x}^{\\prime}$ ), including pixel-wise $\\mathcal{L}_1$ loss, Learned Perceptual Image Path Similarity [58] loss $\\mathcal{L}_{LPIPS}$ , and identity similarity loss $\\mathcal{L}_{id}$ maximizing the cosine similarity between two identity embeddings estimated by ArcFace [19]. The total reconstruction loss between $x$ and $x^{\\prime}$ is:", + "bbox": [ + 496, + 90, + 890, + 165 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r e c} \\left(x, x ^ {\\prime}\\right) = \\lambda_ {1} \\mathcal {L} _ {1} \\left(x, x ^ {\\prime}\\right) + \\lambda_ {2} \\mathcal {L} _ {L P I S P} \\left(x, x ^ {\\prime}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 522, + 172, + 890, + 196 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n+ \\lambda_ {3} \\mathcal {L} _ {i d} (x, x ^ {\\prime}),\n$$\n", + "text_format": "latex", + "bbox": [ + 740, + 193, + 846, + 208 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda_{1},\\lambda_{2}$ and $\\lambda_{3}$ are loss weights.", + "bbox": [ + 500, + 218, + 750, + 233 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For the constraint between two latent codes, we adopt a cosine similarity:", + "bbox": [ + 498, + 234, + 890, + 263 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {l a t} \\left(w _ {x}, w _ {\\hat {x}}\\right) = 1 - \\cos \\left(w _ {x}, w _ {\\hat {x}}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 580, + 273, + 890, + 290 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Besides, we adopt the latent code regularization loss from pSp [46], which constrains the generated latent vector in a region to be close to the average latent vector:", + "bbox": [ + 498, + 301, + 890, + 345 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r e g} (x) = \\left\\| \\mathcal {E} _ {\\theta} (x) - \\bar {x} \\right\\| _ {2}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 356, + 890, + 373 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\bar{x}$ is the average of 10000 randomly sampled latent codes of EG3D generator. The overall loss function for 3D GAN inversion is:", + "bbox": [ + 498, + 383, + 890, + 426 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {i n v} = \\mathcal {L} _ {r e c} \\left(x, x ^ {\\prime}\\right) + \\eta \\mathcal {L} _ {r e c} \\left(x, \\hat {x} ^ {\\prime}\\right) + \\mathcal {L} _ {l a t} \\left(w _ {x}, w _ {\\hat {x}}\\right) \\tag {9} \\\\ + \\mathcal {L} _ {r e g} (x). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 435, + 890, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Face Swapping Losses. For training our face swapping module, we first design a masked pixel-wise $\\mathcal{L}_2$ loss for the face irrelevant region:", + "bbox": [ + 498, + 484, + 890, + 529 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {2} \\left(x _ {t}, y\\right) = \\left\\| x _ {t} \\cdot M _ {f} - y \\cdot M _ {f} \\right\\| _ {2}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 578, + 540, + 890, + 556 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $M_{f}$ is the binary mask same as in Sec. 3.4. We generate this mask according to the face segmentation labels of FFHQ [28] datasets. For 3D GAN inversion, we adopt the LPIPS [58] loss $\\mathcal{L}_{LPIPS}(x_t,y)$ to learn the perceptual similarities and increase the quality of the generated images, and the binary mask is also added before feeding the image into the perceptual feature extractor.", + "bbox": [ + 496, + 566, + 890, + 671 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For 3D-aware face swapping, we additionally synthesize the swapped face $\\hat{y}$ in the view of the source image, calculating both $\\mathcal{L}_{id}(x_s,y)$ and $\\mathcal{L}_{id}(x_s,\\hat{y})$ for better identity transferring.", + "bbox": [ + 496, + 672, + 890, + 732 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Besides, $\\mathcal{L}_{color}$ is designed to maintain the skin color of swapped faces:", + "bbox": [ + 498, + 732, + 890, + 763 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {c o l o r}} (x _ {s}, y) = \\| \\bar {\\mathcal {C}} (x _ {s} \\cdot (1 - M _ {f})) - \\bar {\\mathcal {C}} (y \\cdot (1 - M _ {f})) \\| _ {2}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 506, + 772, + 890, + 790 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\bar{\\mathcal{C}} (\\cdot)$ denotes an average RGB value of the masked region.", + "bbox": [ + 498, + 799, + 890, + 828 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The overall loss function for training the face swapping module is:", + "bbox": [ + 498, + 830, + 890, + 859 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {f s} = \\mathcal {L} _ {2} \\left(x _ {t}, y\\right) + \\mathcal {L} _ {L P I P S} \\left(x _ {t}, y\\right) + \\mathcal {L} _ {i d} \\left(x _ {s}, y\\right) \\tag {12} \\\\ + \\mathcal {L} _ {i d} (x _ {s}, \\hat {y}) + \\mathcal {L} _ {c o l o r} (x _ {s}, y). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 867, + 890, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "12709", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f0959710d335e56833f3d38a55c940cce92e9bae72323375345958bc6bddd654.jpg", + "image_caption": [ + "Figure 4. Qualitative comparison of face swapping on CelebA-HQ dataset. Compared with all these 2D approaches, our method extracts facial shapes more accurately and transfers identity better. Moreover, since we conduct face swapping in latent space and a well-trained 3D GAN directly synthesizes the results, there are no obvious artifacts in the facial region." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 890, + 363 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 441, + 209, + 458 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we first compare the proposed 3dSwap with some state-of-the-art 2D-images-based face swapping approaches. Furthermore, face swapping in a 3D-aware manner and extra evaluation metrics designed for 3D face swapping are analyzed. We finally carry out ablation studies to evaluate the effectiveness of our major design.", + "bbox": [ + 75, + 467, + 468, + 558 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 76, + 569, + 294, + 585 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In all experiments, Ranger optimizer [54] is applied to train our networks with a learning rate of $1 \\times 10^{-4}$ . Hyperparameters are set as $\\lambda_1 = \\lambda_3 = 1$ , $\\lambda_2 = 0.8$ in Eq. 6 and $\\eta = 0.25$ in Eq. 9. For training time, the inversion module is trained for 1,000,000 steps on 4 NVIDIA RTX3090 GPUs for about 3 days while the face swapping module is trained for 500,000 steps also on 4 GPUs for about 2 days. The pivot tuning optimization during inference time takes about 8 minutes on a single GPU.", + "bbox": [ + 75, + 593, + 468, + 729 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Datasets", + "text_level": 1, + "bbox": [ + 76, + 739, + 179, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct experiments on two datasets: 1) The FFHQ [28] dataset contains 70,000 high-quality images of human faces crawled from Flicker with considerable variation in age, ethnicity, and background. All images of this dataset are in a resolution of $1024 \\times 1024$ . 2) The CelebA-HQ [27] dataset is the high-quality version of the large-scale face attributes dataset CelebA [35] which contains 30,000 images in $1024 \\times 1024$ . Specifically, we train our model on FFHQ, while comparison experiments are ex", + "bbox": [ + 75, + 763, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ecuted on CelebA-HQ. We follow the data preprocessing way of EG3D to crop images according to facial landmarks and resize them into a resolution of $512 \\times 512$ . Due to the relatively expensive inference cost of 3dSwap mentioned in Sec. 4.1, we operate the following comparison experiments on 1000 source-target image pairs.", + "bbox": [ + 496, + 441, + 892, + 536 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Comparison with 2D Face Swapping Methods", + "text_level": 1, + "bbox": [ + 498, + 544, + 887, + 561 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we compare the proposed 3dSwap with four 2D swapping methods: SimSwap [14], MegaFS [60], Infoswap [23] and Xu et al. [56]. These four methods are representative GAN-based [14,23] and GAN-inversion-based [56,60] approaches in recent years with state-of-the-art performance. Moreover, their official source codes are publicly available for us to make fair comparisons.", + "bbox": [ + 496, + 568, + 890, + 676 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Qualitative Comparison. The qualitative comparison results are shown in Fig. 4. Compared with all these 2D face swapping approaches, our methods transfer more accurate geometry features (i.e., facial contour) and detailed texture features like eye color to targets, reflecting better identity-transferring performance. Also, since we directly synthesize our final results with a well-trained generator with a properly calculated latent code, the swapped face we generate is more realistic without obvious artifacts in the facial region. More qualitative results on CelebA-HQ are provided in the supplementary material.", + "bbox": [ + 496, + 681, + 892, + 848 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative Comparison. We adopt several evaluation metrics in our quantitative experiments to show the effectiveness of our model in Table 1. Following MegaFS [60],", + "bbox": [ + 496, + 854, + 893, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "12710", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/54d4ad58df6408285f72ecca00fcb2dfc4642e4c4f51b2a7bc37d9ba3eb05a6e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodID ↑Pose ↓Exp. ↓
SimSwap [14]0.571.4910.48
MegaFS [60]0.483.9514.08
InfoSwap [23]0.612.5010.63
Xu et al. [56]0.542.6612.94
Ours0.721.6813.76
", + "bbox": [ + 130, + 88, + 415, + 195 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "we measure the ID similarity by calculating the cosine similarity between face embeddings of the source and swapped faces that are estimated by a pretrained face recognition network [19]. Meanwhile, pose error computes the $\\mathcal{L}_2$ distance between the estimated Euler Angle [49] of the target and swapped images. For expression error, we calculate an average distance among estimated facial landmarks [5].", + "bbox": [ + 75, + 276, + 467, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For cosine similarity of identity, which is a crucial indicator for face swapping since it evaluates the quality of identity transferring, we significantly outperform all these 2D approaches. Such results and the visual effects in Fig. 4 together show that our method transfers identity better due to the application of 3D prior. For attribute preserving, our method which can be explicitly controlled by a camera pose performs rather well in pose error since it is only slightly weaker than SimSwap [14] but it reflects a poorer performance compared with 2D approaches in expression error. However, we can still claim that the proposed 3dSwap is superior to 2D methods in identity transferring and performs close to them in attribute preserving after considering all three quantitative comparison results.", + "bbox": [ + 75, + 383, + 467, + 595 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4. Further Analysis on 3D-Aware Face Swapping", + "text_level": 1, + "bbox": [ + 76, + 608, + 467, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As the first 3D-aware face swapping method, the proposed 3dSwap is specialized in synthesizing multi-view-consistent results. In this section, we conduct more experiments in this track, showing some visualized comparisons on 3D consistency and raising brand-new criteria for 3D-aware face swapping.", + "bbox": [ + 75, + 633, + 467, + 723 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Visualization on Multi-View Images. To compare with 2D face swapping approaches in fairness, we first synthesize multi-view target images by using our reconstruction module and then apply SimSwap [14] and InfoSwap [23] to them. The visualized results are shown in Fig. 5, where results under different views are not as consistent as ours (i.e. shape of nose, mouth, and eyebrows changes) for the 2D face swapping method. More artifacts can be discovered when the target images are sideward. Please refer to the video in the supplementary material for more intuitional comparisons.", + "bbox": [ + 75, + 734, + 467, + 900 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/95246c90f0dc73ab07aa12a8f25de081f74e28862b788dd74dc9d525b9343c5a.jpg", + "image_caption": [ + "Figure 5. Visualized comparison on Multi-view results among Infoswap [23], Simswap [14] and Ours." + ], + "image_footnote": [], + "bbox": [ + 501, + 87, + 890, + 281 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Criteria for 3D-Aware Face Swapping. In Sec. 4.3, the performance of identity transferring is evaluated based on the face embedding estimated by pretrained face recognition networks [19]. However, such networks are not enough robust to pose variance so it could be an unfair criterion for face swapping. For 3D-aware face swapping, we can simply synthesize a swapped face in the view of the source image. In this way, the \"Aligned Identity Similarity\" can be a reasonable standard to evaluate 3D-aware face swapping models. Moreover, inspired by human's ability to recognize a familiarized person from any direction, we synthesize the swapped face into 9 different fixed poses and calculate an average identity similarity together with images in source and target views. We report our results of these two evaluation metrics in Table 2 and images under these fixed poses are shown in the supplementary material.", + "bbox": [ + 496, + 343, + 890, + 585 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/e3706ab8789bdb58b116e12940b6367a47ff1ddad7afcad186cfd72cf6f8b835.jpg", + "table_caption": [ + "Table 1. Quantitative Results. We compare our model with four competing methods in ID Similarity for identity transferring and Pose & Expression Error for attribute preserving." + ], + "table_footnote": [], + "table_body": "
MetricAligned ID Sim.↑Average ID Sim. ↑
Ours0.850.42
", + "bbox": [ + 531, + 597, + 859, + 643 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Quantitative Results of New Metrics. We test the proposed 3dSwap under the two new evaluation metrics.", + "bbox": [ + 498, + 654, + 888, + 683 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 707, + 663, + 722 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we conduct ablation experiments on the CelebA-HQ dataset to evaluate the effectiveness of the major design of the proposed 3dSwap.", + "bbox": [ + 496, + 729, + 888, + 775 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effectiveness of 3D GAN Inversion. Since previous works [12, 33] do not release the code of their 3D GAN-inversion part, we follow the paper of EG3D to reproduce a pivot tuning inversion [47] to the generator with the same hyperparameters. In this section, we mainly compare our design with the optimization-based latent code projection of PTI on EG3D to show the effectiveness of the learning-based inversion algorithm we use. For the sake of fairness,", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "12711", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d2f749adcbf42fd764f6462588f75b1fcd20e970194d065f742935a0f212b55a.jpg", + "image_caption": [ + "Figure 6. Qualitative Comparison on 3D GAN inversion. Comparing to the directly application of pivot tuning inversion, our design reconstruct details (i.e. shape and color of eyes, glasses etc.) better." + ], + "image_footnote": [], + "bbox": [ + 81, + 88, + 464, + 292 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "both models are tested on the same 2000 images in CelebAHQ and adopt a parameter tuning of the pretrained generator for 500 steps.", + "bbox": [ + 75, + 373, + 468, + 417 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We show the qualitative comparison results in Fig. 6. Our design performs better in details reconstruction (i.e., eye shape, glasses, etc.) despite the optimization-based approach still recovers accurate face shape, hair color, etc.", + "bbox": [ + 75, + 419, + 468, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For 3D GAN Inversion, we adopt the same metrics as 2D GAN inversion: $\\mathcal{L}_2$ distance (or MSE loss) to calculate the pixel-wise similarity, LPIPS [58] distance to evaluate the perceptual similarity and MS-SSIM [53] to show the structural similarity. Additionally, we calculate ID similarity to ensure the accuracy of the reconstruction, and the results are reported in Table 3. Our design outperforms the optimization-based approaches in all of the four criteria.", + "bbox": [ + 75, + 479, + 468, + 601 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/f98bcdba3fc9f82c0b11878b85d46c72bec3faef1d1c2d4c1c29491f3a2e29d3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodMSE ↓LPIPS ↓SSIM ↑ID Sim.↑
EG3D with Opt.0.08960.27610.61970.7318
Ours0.01680.10490.73480.8616
", + "bbox": [ + 76, + 614, + 477, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. Quantitative Results on 3D GAN inversion. We compare our 3D GAN inversion module with an optimization-based inversion on EG3D under four common evaluation metrics in the 2D GAN inversion task.", + "bbox": [ + 75, + 686, + 468, + 742 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effectiveness of Style Mixing. As mentioned in Sec. 3.3, we adopt style mixing and latent code interpolation for face swapping. Here, we briefly show the effectiveness of style mixing. A comparison of our model with and without style mixing can be seen in Fig. 7. Identity can be ideally transferred between sources and targets under both settings, however, attributes including skin color, background, etc. would be prominently affected if we interpolate in all layers of latent codes as shown in the third column.", + "bbox": [ + 75, + 763, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1aca44d9bbf2b8e7a9caa5de4e20c2168eb55309c7848ef6e6c14b1390186bd2.jpg", + "image_caption": [ + "Figure 7. Visualization of face swapping results with and without style mixing." + ], + "image_footnote": [], + "bbox": [ + 503, + 88, + 888, + 306 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 375, + 617, + 390 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We propose a novel 3D-aware face swapping method 3dSwap that generates high-fidelity and multi-view-consistent swapped faces. To leverage both geometry and texture prior of the 3D human face, we project the input images into the latent space of the 3D-aware generative model by introducing a learning-based inversion. A latent code manipulation algorithm, consisting of style mixing and latent code interpolation, is then designed to achieve 3D GAN-inversion-based face swapping. We further bridge the image quality between 2D generating and 3D rendering by applying a joint pivot tuning. To the best of our knowledge, 3dSwap is the first 3D-aware face swapping method, thus it sets a strong baseline for future research on 3D forgery detection and face swapping.", + "bbox": [ + 496, + 401, + 890, + 613 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. Since we need to project input images into the latent space of a 3D GAN which contains far more information than that of 2D GANs, we tune the parameters of the pretrained generator during testing, leading to a rather long inference time. Moreover, since the final results are rendered by a 3D generator, our method fails to accurately reconstruct clothing, backgrounds, etc in the image limited by the current development of 3D-aware generative models.", + "bbox": [ + 496, + 622, + 890, + 744 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Broader Impacts. Although not the purpose of this work, photorealistic swapped faces may potentially be abused. On the other hand, our model can be used to generate high-quality and multi-viewed examples to facilitate face forgery detection [11].", + "bbox": [ + 496, + 753, + 890, + 829 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. This work was supported by NSFC (62201342), Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102), and the Fundamental Research Funds for the Central Universities.", + "bbox": [ + 496, + 839, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "12712", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In ICCV, pages 4431-4440, 2019.", + "[2] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan++: How to edit the embedded images? In CVPR, pages 8293-8302, 2020.", + "[3] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. Restyle: A residual-based stylegan encoder via iterative refinement. In ICCV, pages 6691–6700, 2021.", + "[4] Yuval Alaluf, Omer Tov, Ron Mokady, Rinon Gal, and Amit Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. In CVPR, pages 18511-18521, 2022.", + "[5] Tadas Baltrusaitis, Peter Robinson, and Louis-Philippe Morency. Openface: An open source facial behavior analysis toolkit. In WACV, pages 1–10, 2016.", + "[6] Jianmin Bao, Dong Chen, Fang Wen, Houqiang Li, and Gang Hua. Towards open-set identity preserving face synthesis. In CVPR, pages 6713-6722, 2018.", + "[7] Volker Blanz, Kristina Scherbaum, Thomas Vetter, and Hans-Peter Seidel. Exchanging faces in images. Comput. Graph. Forum, 23(3):669-676, 2004.", + "[8] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In SIGGRAPH, pages 187-194, 1999.", + "[9] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In ICLR, 2019.", + "[10] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional $\\pi$ -gan for single image to neural radiance fields translation. In CVPR, pages 3971-3980, 2022.", + "[11] Junyi Cao, Chao Ma, Taiping Yao, Shen Chen, Shouhong Ding, and Xiaokang Yang. End-to-end reconstruction-classification learning for face forgery detection. In CVPR, pages 4103-4112, 2022.", + "[12] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J. Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks. In CVPR, pages 16123-16133, 2022.", + "[13] Eric R. Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. Pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In CVPR, pages 5799-5809, 2021.", + "[14] Renwang Chen, Xuanhong Chen, Bingbing Ni, and Yanhao Ge. Simswap: An efficient framework for high fidelity face swapping. In ACMMM, pages 2003-2011, 2020.", + "[15] Yi-Ting Cheng, Virginia Tzeng, Yu Liang, Chuan-Chang Wang, Bing-Yu Chen, Yung-Yu Chuang, and Ming Ouhyoung. 3d-model-based face replacement in video. In SIGGRAPH, 2009.", + "[16] Edo Collins, Raja Bala, Bob Price, and Sabine Süsstrunk. Editing in style: Uncovering the local semantics of gans. In CVPR, pages 5770-5779, 2020." + ], + "bbox": [ + 78, + 114, + 468, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Antonia Creswell and Anil Anthony Bharath. Inverting the generator of a generative adversarial network. IEEE Trans. Neural Networks Learn. Syst., 30(7):1967-1974, 2019.", + "[18] DeepFakes. https://github.com/ondyari/FaceForensics/tree/master/dataset/DeepFakes. Accessed:2022-10-18.", + "[19] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019.", + "[20] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. GRAM: generative radiance manifolds for 3d-aware image generation. In CVPR, pages 10663-10673, 2022.", + "[21] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In CVPRW, pages 285-295, 2019.", + "[22] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3d face model from in-the-wild images. ACM Trans. Graph., 40(4):88:1-88:13, 2021.", + "[23] Gege Gao, Huaibo Huang, Chaoyou Fu, Zhaoyang Li, and Ran He. Information bottleneck disentanglement for identity swapping. In CVPR, pages 3404-3413, 2021.", + "[24] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron C. Courville, and Yoshua Bengio. Generative adversarial networks. Commun. ACM, 63(11):139–144, 2020.", + "[25] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In ICLR, 2022.", + "[26] Shanyan Guan, Ying Tai, Bingbing Ni, Feida Zhu, Feiyue Huang, and Xiaokang Yang. Collaborative learning for faster stylegan embedding. CoRR, abs/2007.01758, 2020.", + "[27] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. In ICLR, 2018.", + "[28] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019.", + "[29] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8107-8116, 2020.", + "[30] Ira Kemelmacher-Shlizerman. Transfiguring portraits. ACM Trans. Graph., 35(4):94:1-94:8, 2016.", + "[31] Iryna Korshunova, Wenzhe Shi, Joni Dambre, and Lucas Theis. Fast face-swap using convolutional neural networks. In ICCV, pages 3697-3705, 2017.", + "[32] Lingzhi Li, Jianmin Bao, Hao Yang, Dong Chen, and Fang Wen. Faceshifter: Towards high fidelity and occlusion aware face swapping. CoRR, abs/1912.13457, 2019.", + "[33] Connor Z. Lin, David B. Lindell, Eric R. Chan, and Gordon Wetzstein. 3d GAN inversion for controllable portrait image animation. CoRR, abs/2203.13441, 2022.", + "[34] Yuan Lin, Shengjin Wang, Qian Lin, and Feng Tang. Face swapping under large pose variations: A 3d model based approach. In ICME, pages 333-338, 2012." + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "12713", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[35] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015.", + "[36] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, pages 405-421, 2020.", + "[37] Saleh Mosaddegh, Loïc Simon, and Frédéric Jurie. Photorealistic face de-identification by aggregating donors' face components. In ACCV, pages 159–174, 2014.", + "[38] Jacek Naruniec, Leonhard Helminger, Christopher Schroers, and Romann M. Weber. High-resolution neural face swapping for visual effects. Comput. Graph. Forum, 39(4):173-184, 2020.", + "[39] Ryota Natsume, Tatsuya Yatagawa, and Shigeo Morishima. Fsnet: An identity-aware generative model for image-based face swapping. In ACCV, pages 117-132, 2018.", + "[40] Ryota Natsume, Tatsuya Yatagawa, and Shigeo Morishima. RSGAN: face swapping and editing using face and hair representation in latent spaces. In SIGGRAPH, pages 69:1-69:2, 2018.", + "[41] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In ICCV, pages 7587–7596, 2019.", + "[42] Yuval Nirkin, Yosi Keller, and Tal Hassner. FSGAN: subject agnostic face swapping and reenactment. In ICCV, pages 7183-7192, 2019.", + "[43] Yuval Nirkin, Iacopo Masi, Anh Tuan Tran, Tal Hassner, and Gérard G. Medioni. On face segmentation, face swapping, and face perception. In AFGR, 2018.", + "[44] Yotam Nitzan, Amit Bermano, Yangyan Li, and Daniel Cohen-Or. Face identity disentanglement via latent space mapping. ACM Trans. Graph., 39(6):225:1-225:14, 2020.", + "[45] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In CVPR, pages 13493-13503, 2022.", + "[46] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: A stylegan encoder for image-to-image translation. In CVPR, pages 2287–2296, 2021.", + "[47] Daniel Roich, Ron Mokady, Amit H. Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. TOG, pages 1–13, 2022.", + "[48] Arun Ross and Asem A. Othman. Visual cryptography for biometric privacy. IEEE Trans. Inf. Forensics Secur., 6(1):70-81, 2011.", + "[49] Nataniel Ruiz, Eunji Chong, and James M. Rehg. Fine-grained head pose estimation without keypoints. In CVPR, pages 2074-2083, 2018.", + "[50] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. GRAF: generative radiance fields for 3d-aware image synthesis. In NeurIPS, 2020.", + "[51] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. ACM Trans. Graph., 40(4):133:1-133:14, 2021." + ], + "bbox": [ + 78, + 90, + 467, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] Tengfei Wang, Yong Zhang, Yanbo Fan, Jue Wang, and Qifeng Chen. High-fidelity gan inversion for image attribute editing. In CVPR, pages 11369-11378, 2022.", + "[53] Z. Wang, E.P. Simoncelli, and A.C. Bovik. Multiscale structural similarity for image quality assessment. In ACSSC, 2003.", + "[54] Less Wright. Ranger - a synergistic optimizer. https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer. Accessed: 2022-9-18.", + "[55] Yangyang Xu, Bailin Deng, Junle Wang, Yanqing Jing, Jia Pan, and Shengfeng He. High-resolution face swapping via latent semantics disentanglement. In CVPR, pages 7632-7641, 2022.", + "[56] Yangyang Xu, Bailin Deng, Junle Wang, Yanqing Jing, Jia Pan, and Shengfeng He. High-resolution face swapping via latent semantics disentanglement. In CVPR, pages 7632-7641, 2022.", + "[57] Zhiliang Xu, Hang Zhou, Zhibin Hong, Ziwei Liu, Jiaming Liu, Zhizhi Guo, Junyu Han, Jingtuo Liu, Errui Ding, and Jingdong Wang. Styleswap: Style-based generator empowers robust face swapping. In ECCV, pages 661-677, 2022.", + "[58] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, pages 586-595, 2018.", + "[59] Jun-Yan Zhu, Philipp Krahenbuhl, Eli Shechtman, and Alexei A. Efros. Generative visual manipulation on the natural image manifold. In ECCV, pages 597-613, 2016.", + "[60] Yuhao Zhu, Qi Li, Jian Wang, Cheng-Zhong Xu, and Zhenan Sun. One shot face swapping on megapixels. In CVPR, pages 4834-4844, 2021." + ], + "bbox": [ + 501, + 92, + 890, + 530 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "12714", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/3D-Aware Face Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_model.json b/2023/3D-Aware Face Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_model.json new file mode 100644 index 0000000000000000000000000000000000000000..585784f4ba58d0549654b58d407c6cd6a01e1697 --- /dev/null +++ b/2023/3D-Aware Face Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_model.json @@ -0,0 +1,2310 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.354, + 0.131, + 0.618, + 0.154 + ], + "angle": 0, + "content": "3D-Aware Face Swapping" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.181, + 0.844, + 0.234 + ], + "angle": 0, + "content": "Yixuan Li Chao Ma* Yichao Yan* Wenhan Zhu Xiaokang Yang MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, China {lyx0208, chaoma, yanyichao, zhuwenhan823, xkyang}@sjtu.edu.cn" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.271, + 0.199, + 0.452 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.455, + 0.166, + 0.469 + ], + "angle": 0, + "content": "Source" + }, + { + "type": "image", + "bbox": [ + 0.198, + 0.271, + 0.429, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.232, + 0.455, + 0.28, + 0.47 + ], + "angle": 0, + "content": "Target" + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.272, + 0.66, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.472, + 0.455, + 0.615, + 0.469 + ], + "angle": 0, + "content": "Intermediate Views" + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.272, + 0.775, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.671, + 0.455, + 0.766, + 0.469 + ], + "angle": 0, + "content": "Source View" + }, + { + "type": "image", + "bbox": [ + 0.775, + 0.272, + 0.89, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.794, + 0.455, + 0.869, + 0.47 + ], + "angle": 0, + "content": "Geometry" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.481, + 0.893, + 0.511 + ], + "angle": 0, + "content": "Figure 1. Demonstration of the proposed 3dSwap. Given single-view source and target images, our method synthesizes high-fidelity and multi-view-consistent images of the swapped faces and the corresponding geometries. More results can be found on our project page." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.523, + 0.314, + 0.538 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.552, + 0.473, + 0.87 + ], + "angle": 0, + "content": "Face swapping is an important research topic in computer vision with wide applications in entertainment and privacy protection. Existing methods directly learn to swap 2D facial images, taking no account of the geometric information of human faces. In the presence of large pose variance between the source and the target faces, there always exist undesirable artifacts on the swapped face. In this paper, we present a novel 3D-aware face swapping method that generates high-fidelity and multi-view-consistent swapped faces from single-view source and target images. To achieve this, we take advantage of the strong geometry and texture prior of 3D human faces, where the 2D faces are projected into the latent space of a 3D generative model. By disentangling the identity and attribute features in the latent space, we succeed in swapping faces in a 3D-aware manner, being robust to pose variations while transferring fine-grained facial details. Extensive experiments demonstrate the superiority of our 3D-aware face swapping framework in terms of visual quality, identity similarity, and multi-view consistency. Code is available at https://1yx0208.github.io/3dSwap." + }, + { + "type": "title", + "bbox": [ + 0.502, + 0.523, + 0.631, + 0.538 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.551, + 0.892, + 0.656 + ], + "angle": 0, + "content": "Face swapping aims to transfer the identity of a person in the source image to another person in the target image while preserving other attributes like head pose, expression, illumination, background, etc. It has attracted extensive attention recently in the academic and industrial world for its potential wide applications in entertainment [14,30,38] and privacy protection [7,37,48]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.893, + 0.901 + ], + "angle": 0, + "content": "The key of face swapping is to transfer the geometric shape of the facial region (i.e., eyes, nose, mouth) and detailed texture information (such as the color of eyes) from the source image to the target image while preserving both geometry and texture of non-facial regions (i.e., hair, background, etc). Currently, some 3D-based methods consider geometry prior of human faces by fitting the input image to 3D face models such as 3D Morphable Model (3DMM) [8] to overcome the differences of face orientation and expression between sources and targets [7, 15, 34, 43]. However, these parametric face models only produce coarse frontal faces without fine-grained details, leading to low-resolution and fuzzy swapping results. On the other hand, following Generative Adversarial Network [24], GAN-based [6, 23, 32, 39, 40, 42] or GAN-inversion-based [44, 55, 57, 60] approaches adopt the ad" + }, + { + "type": "page_footnote", + "bbox": [ + 0.102, + 0.887, + 0.239, + 0.9 + ], + "angle": 0, + "content": "* Corresponding authors." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12705" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.212 + ], + "angle": 0, + "content": "versarial training strategy to learn texture information from inputs. Despite the demonstrated photorealistic and high-resolution images, the swapped faces via 2D GANs sustain undesirable artifacts when two input faces undergo large pose variation since the strong 3D geometry prior of human faces is ignored. Moreover, learning to swap faces in 2D images makes little use of the shaped details from sources, leading to poorer performance on identity transferring." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.213, + 0.471, + 0.468 + ], + "angle": 0, + "content": "Motivated by the recent advances of 3D generative models [12, 13, 20, 25, 45] in synthesizing multi-view consistent images and high-quality 3D shapes, it naturally raises a question: can we perform face swapping in a 3D-aware manner to exploit the strong geometry and texture priors? To answer this question, two challenges arise. First, how to infer 3D prior directly from 3D-GAN models still remains open. Current 3D-aware generative models synthesize their results from a random Gaussian noise \\( z \\), so that their output images are not controllable. This increases the complexity of inferring the required prior from arbitrary input. Second, the inferred prior corresponding to input images is in the form of a high-dimension feature vector in the latent space of 3D GANs. Simply synthesizing multi-view target images referring to the prior and applying 2D face swapping to them produces not only inconsistent artifacts but also a heavy computational load." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.469, + 0.47, + 0.816 + ], + "angle": 0, + "content": "To address these challenges, we systematically investigate the geometry and texture prior of these 3D generative models and propose a novel 3D-aware face swapping framework 3dSwap. We introduce a 3D GAN inversion framework to project the 2D inputs into the 3D latent space, motivated by recent GAN inversion approaches [46, 47, 51]. Specifically, we design a learning-based inversion algorithm that trains an encoding network to efficiently and robustly project input images into the latent space of EG3D [12]. However, directly borrowing the architecture from 2D approaches is not yet enough since a single-view input provides limited information about the whole human face. To further improve the multi-view consistency of latent code projection, we design a pseudo-multi-view training strategy. This design effectively bridges the domain gap between 2D and 3D. To tackle the second problem, we design a face swapping algorithm based on the 3D latent codes and directly synthesize the swapped faces with the 3D-aware generator. In this way, we achieve 3D GAN-inversion-based face swapping by a latent code manipulating algorithm consisting of style-mixing and interpolation, where latent code interpolation is responsible for identity transferring while style-mixing helps to preserve attributes." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.817, + 0.39, + 0.831 + ], + "angle": 0, + "content": "In summary, our contributions are threefold:" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.84, + 0.47, + 0.9 + ], + "angle": 0, + "content": "- To the best of our knowledge, we first address the 3D-aware face swapping task. The proposed 3dSwap method sets a strong baseline and we hope this work will foster future research into this task." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "- We design a learning-based 3D GAN inversion with the pseudo-multi-view training strategy to extract geometry and texture prior from arbitrary input images. We further utilize these strong prior by designing a latent code manipulating algorithm, with which we directly synthesize the final results with the pretrained generator." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.203, + 0.892, + 0.293 + ], + "angle": 0, + "content": "- Extensive experiments on benchmark datasets demonstrate the superiority of the proposed 3dSwap over state-of-the-art 2D face swapping approaches in identity transferring. Our reconstruction module for 3DGAN inversion performs favorably over the state-of-the-art methods as well." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.092, + 0.892, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.309, + 0.642, + 0.325 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.335, + 0.892, + 0.501 + ], + "angle": 0, + "content": "Face Swapping. Face swapping has emerged as a popular research topic in the field of computer vision in recent years. Currently, it can be classified into two categories: 3D-based and GAN-based methods. Specifically, 3D-based methods [7, 15, 34, 43] fit input images into 3D parametric face models (i.e. 3DMM [8]) to overcome the problems of posture or perspective difference between input images. However, the performance of such methods is usually limited by the reconstruction results. GAN-based methods [6, 18, 23, 31, 32, 39, 40, 42] adopt the adversarial training strategy to generate photorealistic fake faces." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.502, + 0.892, + 0.759 + ], + "angle": 0, + "content": "Early GAN-based face swapping methods are subject-specific, i.e. DeepFake [18] and Korshunova et al. [31] are required to train different models for different inputs. The subject-specific approaches have limited real applications since face swapping is required to be applicable to any unseen pair of input images, and such limitation is addressed in latter subject-agnostic face swapping approaches [6, 23, 32, 39, 40, 42]. To increase the resolution of generated images, MegaFS [60] firstly proposes a GAN-inversion-based face swapping method, utilizing StyleGAN [28] to synthesize megapixel-level swapping faces. Xu et al. [56] and StyleSwap [57] integrate the StyleGAN2 [29] generator to their face swapping pipeline, applying its strong prior to generate high-resolution swapped faces. Following these approaches, we furtherly extend the face swapping task into 3D latent space to capture fine-grained details of face shape and strengthen the robustness under large pose variance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.901 + ], + "angle": 0, + "content": "3D-Aware Generative Models. The 3D-aware generative models are aimed to synthesize 3D-aware (i.e., can be explicitly controlled by the camera pose) images from 2D image collections. HoloGAN [41] firstly proposes a 3D-aware generative model through learning the voxel features, whereas it only generates low-resolution results due to the limitation of computational cost. Recently, several works utilize the NeRF [36] representation [12, 20, 25, 45, 50]. GRAF [50] adopts the approach of patch sampling to elim" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12706" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.08, + 0.089, + 0.891, + 0.311 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.319, + 0.896, + 0.378 + ], + "angle": 0, + "content": "Figure 2. The pipeline of our 3D-aware face swapping method, 3dSwap. In the first stage, we infer 3D geometry and texture prior of both source and target images with an encoder. We then design a latent code manipulation algorithm consisting of style mixing and interpolation to conduct face swapping based on these priors. Finally, swapped faces in any view direction can be synthesized by 3dSwap after fine-tuning the parameters of the generator following the joint pivot tuning optimization." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.403, + 0.473, + 0.584 + ], + "angle": 0, + "content": "inate computational costs during training. GRAM [20] estimates radiance manifolds to produce realistic images with fine details and strong 3D consistency. StyleNeRF [25] integrates NeRF with style-based generators and proposes a better up-sampler and a new regularization loss to mitigate inconsistencies. StyleSDF [45] presents a Signed Distance Field (SDF) based on 3D modeling that defines detailed 3D surfaces. EG3D [12] raises a novel tri-plane representation for efficient 3D-aware image generation. Due to the strong generative capability of these 3D-aware generative models, we leverage them to infer fine 3D prior from 2D images for our 3D-aware face swapping framework." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.596, + 0.47, + 0.717 + ], + "angle": 0, + "content": "GAN Inversion. Since Generative Adversarial Network [24], numerous generative models reflect great abilities in synthesizing high-quality images [9,12,25,28,29,45]. To fully leverage these well-trained GANs, the task of GAN inversion emerges recently. In particular, GAN inversion is aimed to project a given image back to a vector \\( w \\) in the latent space of a pretrained GAN model so that this image can be faithfully reconstructed from \\( w \\) by the generator." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.718, + 0.47, + 0.854 + ], + "angle": 0, + "content": "Early works invert images into Gaussian noise \\( z \\in R^{1 \\times 512} \\) or semantic latent space \\( \\mathcal{W} \\in R^{1 \\times 512} \\) [1,16,17,59]. Abdal et al. [2] firstly extend latent space to \\( \\mathcal{W} + \\in R^{18 \\times 512} \\) for more accurate reconstruction. To predict the latent code, learning-based methods [3,26,46,51,52] train an encoder for latent projection, while optimization-based methods [1,2,16,17] directly find the optimal code step-by-step from noise. Hybrid methods [4,47,59] combine both to optimize latent codes initialized by encoders." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.903 + ], + "angle": 0, + "content": "In addition, there are a few inversion works for 3D generative models. Pix2NeRF [10] is proposed to generate Neural Radiance Fields (NeRF) [36] of an object applying a" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.403, + 0.895, + 0.495 + ], + "angle": 0, + "content": "single input image based on a pretrained \\(\\pi\\)-GAN [13]. Connor et al. [33] leverage EG3D [12] and a pretrained 3DMM predictor [22] to reconstruct a 3D human face, which could be further animated or edited. Our reconstruction model is also in this catalog, while the adopted learning-based algorithm is more robust and efficient compared with them." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.515, + 0.593, + 0.531 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.543, + 0.611, + 0.558 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.569, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Given single-view source and target images, we aim to synthesize multi-view-consistent face images with identity from source image \\( x_{s} \\) and other attributes from target image \\( x_{t} \\). Fig. 2 demonstrates the overall pipeline and notations of the proposed 3dSwap. First, to extract accurate geometry and texture prior from 2D images, we conduct a learning-based 3D GAN inversion, training an encoding network to project the inputs into the latent space of a 3D-aware generative model. Specifically, we design a pseudomulti-view optimization strategy to train the encoder with a feature pyramid architecture from pSp [46], empowering the latent code projection with the 3D consistency of the state-of-the-art 3D GAN, i.e. EG3D [12] (Sec. 3.2). Then, to disentangle identity from attributes in the latent space, we design a latent code manipulation algorithm consisting of style mixing and interpolation (Sec. 3.3). Finally, for the purpose of improving the overall quality of our results, bridging the gap between 2D image generating and 3D rendering, we implement a joint pivot tuning on parameters of the pretrained EG3D generator (Sec. 3.4). The networks are trained with a set of well-designed loss functions to enforce identity transferring and attribute preserving (Sec. 3.5)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12707" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.091, + 0.387, + 0.108 + ], + "angle": 0, + "content": "3.2. Inferring 3D Prior from 2D Images" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.114, + 0.473, + 0.401 + ], + "angle": 0, + "content": "To infer geometry and texture prior from a 2D image, we leverage the state-of-the-art 3D-aware generative model, i.e. EG3D [12] by projecting the inputs into its latent space. Since the optimization-based algorithm [47] is inefficient and less robust to non-front faces, we propose a learning-based inversion algorithm where an encoding network is trained to project the single-view inputs into the 3D latent space. Different from 2D StyleGAN-like models which totally rely on the latent code \\( w \\) to generate the corresponding output: \\( y = \\mathcal{G}(w) \\), the 3D-aware generative model has an extra input \\( d \\) which controls the pose of synthesized image: \\( y = \\mathcal{G}(w,d) \\). This indicates that latent codes and generated images are not bijections for 3D GANs since multi-view images of the same person can be synthesized using the same \\( w \\) but different \\( d \\). Taking this property into account, we design a pseudo-multi-view training strategy, using a generated image in a different view from the source image to improve the consistency of latent code projection. Fig. 3 illustrates the pipeline of our design." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.402, + 0.473, + 0.552 + ], + "angle": 0, + "content": "Specifically, we first use an encoder to project the input image \\( x \\) into the latent space \\( \\mathcal{W} \\) and get a high-dimension intermediate latent vector \\( w_{x} = \\mathcal{E}_{\\theta}(x) \\), where \\( \\mathcal{E}_{\\theta}(\\cdot) \\) is the pSp encoder with parameters \\( \\theta \\). Then, with the pretrained EG3D generator \\( \\mathcal{G}(\\cdot, \\cdot) \\) and input direction \\( d \\) estimated by Deep3d Face Reconstruction [21], we synthesize the reconstructed result \\( x' = \\mathcal{G}(w_{x}, d) \\). For a 2D GAN inversion approach, this ground-truth and reconstructed image pair \\( (x, x') \\) is enough, but it is inadequate for 3D GANs due to the non-bijective property." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.553, + 0.473, + 0.718 + ], + "angle": 0, + "content": "Ideally, this issue can be addressed by feeding multi-view images of a person into the encoder and minimizing the distance between their output vectors. However, it is difficult to obtain large-scale multi-view data, and we usually only have single-view images of a person in the training dataset. To this end, we additionally sample a random direction \\(\\hat{d}\\) and use the generator to synthesize \\(\\hat{x} = \\mathcal{G}(w_x,\\hat{d})\\) with the same latent code. This output image \\(\\hat{x}\\), which is called a pseudo-input since it is generated by the 3D GAN, is again fed into the encoder-decoder structure to get \\(w_{\\hat{x}} = \\mathcal{E}_{\\theta}(\\hat{x})\\) and \\(\\hat{x}^{\\prime} = \\mathcal{G}(w_{\\hat{x}},d)\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.719, + 0.473, + 0.869 + ], + "angle": 0, + "content": "Now, we can define our optimization objectives. Following the usual inversion approaches, we apply some pixelwise loss functions between the input \\( x \\) and its reconstruction \\( x' \\). Under the setting of our pseudo-multi-view input, we add constraints between the two latent codes \\( w_{x} \\) and \\( w_{\\hat{x}} \\) for the purpose of maintaining 3D consistency. We further restrain pixel-level distance between the second-order output \\( \\hat{x}' \\) synthesized with \\( w_{\\hat{x}} \\) and the origin input \\( x \\) to reinforce such constraint. In summary, this three-termed optimization can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.132, + 0.882, + 0.47, + 0.905 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\theta} \\left\\{\\mathcal {L} \\left(x, x ^ {\\prime}\\right) + \\eta \\mathcal {L} \\left(x, \\hat {x} ^ {\\prime}\\right) + \\mathcal {L} \\left(w _ {x}, w _ {\\hat {x}}\\right) \\right\\}, \\tag {1}\n\\]" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.895, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.269, + 0.892, + 0.285 + ], + "angle": 0, + "content": "Figure 3. The pipeline of our pseudo-multi-view training strategy." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.31, + 0.892, + 0.387 + ], + "angle": 0, + "content": "where \\(\\theta\\) is the parameter of encoder, \\(\\eta\\) is a trade-off parameter and \\(\\mathcal{L}(\\cdot ,\\cdot)\\) denotes the loss functions which will be further discussed in Sec. 3.5. After optimizing the parameters of the encoding network with this strategy, we can obtain rather accurate 3D prior \\(w_{x}\\) from any given input \\(x\\)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.395, + 0.89, + 0.412 + ], + "angle": 0, + "content": "3.3. Face Swapping via Latent Code Manipulation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.418, + 0.892, + 0.493 + ], + "angle": 0, + "content": "To take full advantage of the prior extracted from the 3D GAN model, we calculate the latent code for the swapped face based on latent codes \\( w_{s} = \\mathcal{E}_{\\theta}(x_{s}) \\) of the source image \\( x_{s} \\) and \\( w_{t} = \\mathcal{E}_{\\theta}(x_{t}) \\) of the target image \\( x_{t} \\). Before that, we step back and think about what these latent codes represent." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.494, + 0.893, + 0.645 + ], + "angle": 0, + "content": "A face image usually contains different attributes such as face shape, hairstyle, skin color, etc. With the encoder discussed in Sec. 3.2, we embed all these attributes in the high-dimension latent vectors. However, identity features depending on the geometry of facial region (i.e., eyes, nose, mouth, cheek, and so on) also implicitly lie in such latent codes. For the task of face swapping, it is desirable if identity features can be disentangled from attribute features in the latent code. Afterward, we can simply exchange the identity part of the latent codes to achieve face swapping." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.645, + 0.894, + 0.765 + ], + "angle": 0, + "content": "Since such identity and attributes are typically entangled in the latent codes, we design an interpolation strategy between the source and target latent codes with learnable coefficients. Here, the source latent code \\( w_{s} \\) plays a leading role in the identity part while \\( w_{t} \\) dominates the others. To obtain these coefficients, we concatenate \\( w_{s} \\) and \\( w_{t} \\) to form a \\( 1 \\times 1024 \\) vector and feed it into a four-layer Multilayer Perceptron whose output \\( \\rho \\) is the interpolation coefficient." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Moreover, StyleGAN-like [28,29] models share the style mixing property of latent codes, which means that different layers of latent codes control different parts of attributes. For example, coarse spatial resolutions control high-level aspects like face shape and orientation while fine resolution latent control details like hair color. Motivated by this, we also investigate the layer-wise attributes in EG3D and observed similar properties. This allows us to generate more desirable swapping results by only performing interpolation" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12708" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.254, + 0.106 + ], + "angle": 0, + "content": "on part of the latent codes." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.107, + 0.469, + 0.138 + ], + "angle": 0, + "content": "In summary, the latent code of swapped face \\( w_{fs} \\) can be obtained by:" + }, + { + "type": "equation", + "bbox": [ + 0.081, + 0.148, + 0.469, + 0.2 + ], + "angle": 0, + "content": "\\[\nw _ {f s} ^ {(i)} = \\left\\{ \\begin{array}{c c} \\rho^ {(i)} \\times w _ {t} ^ {(i)} + (1 - \\rho^ {(i)}) \\times w _ {s} ^ {(i)} & i \\in [ 5, 9 ], \\\\ w _ {t} ^ {(i)} & o t h e r w i s e, \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.202, + 0.469, + 0.323 + ], + "angle": 0, + "content": "where the superscript \\(i\\) denotes the layer-wise expression of \\(w_{fs}\\) and the choice of layer, from layer 5 to layer 9, follows the definition of \"middle\" from StyleGAN [28], while a slight modification is made since the dimension of EG3D latent space is lower (i.e. \\(\\mathcal{W} \\in R^{14 \\times 512}\\)). To better disentangle identity and attributes, we apply a Sigmoid-shaped activation function with a factor \\(\\lambda = 100\\) to the \\(\\rho\\) generated by MLPs, enforcing the coefficients to be closer to 0 or 1:" + }, + { + "type": "equation", + "bbox": [ + 0.186, + 0.334, + 0.469, + 0.355 + ], + "angle": 0, + "content": "\\[\n\\rho_ {n e w} ^ {(i)} = \\left(1 + e ^ {- \\lambda \\rho_ {o l d} ^ {(i)}}\\right) ^ {- 1}. \\tag {3}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.372, + 0.257, + 0.388 + ], + "angle": 0, + "content": "3.4. Joint Pivot Tuning" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.396, + 0.47, + 0.578 + ], + "angle": 0, + "content": "With the encoding network trained by the well-designed optimization strategy in Sec. 3.2, we can project an input image into a code in the 3D latent space. However, the inevitable reconstruction error will degrade the performance of face swapping, which is a downstream task of 3D GAN inversion. Also, we observe that directly swap faces via latent manipulation leads to slight artifacts in the non-facial region. Motivated by PTI [47], we adopt pivot tuning on the parameters of the pretrained EG3D generator using a fixed latent code \\( w_{fs} \\) from Sec. 3.3, but in an optimizing direction considering both reconstruction quality and face swapping performance. The process of this \"joint\" pivot tuning is:" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.602, + 0.469, + 0.629 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\theta^ {*}} \\left\\{\\mathcal {L} \\left(x _ {s / t}, \\mathcal {G} _ {\\theta^ {*}} \\left(w _ {s / t}, d _ {s / t}\\right)\\right) + \\right. \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.628, + 0.387, + 0.644 + ], + "angle": 0, + "content": "\\[\n\\left. \\mathcal {L} \\left(x _ {t} \\cdot M _ {f}, \\mathcal {G} _ {\\theta^ {*}} \\left(w _ {f s}, d _ {t}\\right) \\cdot M _ {f}\\right) \\right\\},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.655, + 0.469, + 0.716 + ], + "angle": 0, + "content": "where \\(\\theta^{*}\\) is the parameter of EG3D generator, \\(d_{s}\\) is the direction of the source image, \\(M_{f}\\) is a binary mask that shields facial region and \\(\\mathcal{L}(\\cdot ,\\cdot)\\) is the optimization constraint including MSE, LPIPS [58] and ID [19] losses." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.716, + 0.469, + 0.761 + ], + "angle": 0, + "content": "Finally, with this finetuned generator and the latent code calculated by Eq. 2, we can synthesize the swapped face \\(y\\) in any direction \\(d\\) by:" + }, + { + "type": "equation", + "bbox": [ + 0.214, + 0.774, + 0.469, + 0.79 + ], + "angle": 0, + "content": "\\[\ny = \\mathcal {G} _ {\\theta^ {*}} \\left(w _ {f s}, d\\right). \\tag {5}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.802, + 0.268, + 0.818 + ], + "angle": 0, + "content": "3.5. Objective Functions" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.825, + 0.469, + 0.901 + ], + "angle": 0, + "content": "GAN Inversion Losses. In Eq. 1, we generally use \\(\\mathcal{L}(\\cdot ,\\cdot)\\) to denote the loss function of our pseudo-multi-view training strategy. Here, we give its detailed form. Following the previous work [46], we use three different objectives for supervising a pair of input image \\(x\\) and reconstruction \\(x^{\\prime}\\) (and" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.166 + ], + "angle": 0, + "content": "the same for \\(\\hat{x}^{\\prime}\\)), including pixel-wise \\(\\mathcal{L}_1\\) loss, Learned Perceptual Image Path Similarity [58] loss \\(\\mathcal{L}_{LPIPS}\\), and identity similarity loss \\(\\mathcal{L}_{id}\\) maximizing the cosine similarity between two identity embeddings estimated by ArcFace [19]. The total reconstruction loss between \\(x\\) and \\(x^{\\prime}\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.524, + 0.173, + 0.892, + 0.198 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r e c} \\left(x, x ^ {\\prime}\\right) = \\lambda_ {1} \\mathcal {L} _ {1} \\left(x, x ^ {\\prime}\\right) + \\lambda_ {2} \\mathcal {L} _ {L P I S P} \\left(x, x ^ {\\prime}\\right) \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.741, + 0.194, + 0.847, + 0.209 + ], + "angle": 0, + "content": "\\[\n+ \\lambda_ {3} \\mathcal {L} _ {i d} (x, x ^ {\\prime}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.219, + 0.75, + 0.234 + ], + "angle": 0, + "content": "where \\(\\lambda_{1},\\lambda_{2}\\) and \\(\\lambda_{3}\\) are loss weights." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.235, + 0.892, + 0.265 + ], + "angle": 0, + "content": "For the constraint between two latent codes, we adopt a cosine similarity:" + }, + { + "type": "equation", + "bbox": [ + 0.581, + 0.275, + 0.891, + 0.291 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {l a t} \\left(w _ {x}, w _ {\\hat {x}}\\right) = 1 - \\cos \\left(w _ {x}, w _ {\\hat {x}}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.302, + 0.892, + 0.347 + ], + "angle": 0, + "content": "Besides, we adopt the latent code regularization loss from pSp [46], which constrains the generated latent vector in a region to be close to the average latent vector:" + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.357, + 0.891, + 0.374 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r e g} (x) = \\left\\| \\mathcal {E} _ {\\theta} (x) - \\bar {x} \\right\\| _ {2}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.384, + 0.892, + 0.428 + ], + "angle": 0, + "content": "where \\(\\bar{x}\\) is the average of 10000 randomly sampled latent codes of EG3D generator. The overall loss function for 3D GAN inversion is:" + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.436, + 0.891, + 0.472 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {i n v} = \\mathcal {L} _ {r e c} \\left(x, x ^ {\\prime}\\right) + \\eta \\mathcal {L} _ {r e c} \\left(x, \\hat {x} ^ {\\prime}\\right) + \\mathcal {L} _ {l a t} \\left(w _ {x}, w _ {\\hat {x}}\\right) \\tag {9} \\\\ + \\mathcal {L} _ {r e g} (x). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.485, + 0.892, + 0.53 + ], + "angle": 0, + "content": "Face Swapping Losses. For training our face swapping module, we first design a masked pixel-wise \\(\\mathcal{L}_2\\) loss for the face irrelevant region:" + }, + { + "type": "equation", + "bbox": [ + 0.579, + 0.541, + 0.891, + 0.557 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {2} \\left(x _ {t}, y\\right) = \\left\\| x _ {t} \\cdot M _ {f} - y \\cdot M _ {f} \\right\\| _ {2}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.892, + 0.672 + ], + "angle": 0, + "content": "where \\( M_{f} \\) is the binary mask same as in Sec. 3.4. We generate this mask according to the face segmentation labels of FFHQ [28] datasets. For 3D GAN inversion, we adopt the LPIPS [58] loss \\( \\mathcal{L}_{LPIPS}(x_t,y) \\) to learn the perceptual similarities and increase the quality of the generated images, and the binary mask is also added before feeding the image into the perceptual feature extractor." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.673, + 0.892, + 0.733 + ], + "angle": 0, + "content": "For 3D-aware face swapping, we additionally synthesize the swapped face \\(\\hat{y}\\) in the view of the source image, calculating both \\(\\mathcal{L}_{id}(x_s,y)\\) and \\(\\mathcal{L}_{id}(x_s,\\hat{y})\\) for better identity transferring." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.733, + 0.892, + 0.764 + ], + "angle": 0, + "content": "Besides, \\(\\mathcal{L}_{color}\\) is designed to maintain the skin color of swapped faces:" + }, + { + "type": "equation", + "bbox": [ + 0.508, + 0.773, + 0.891, + 0.791 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {c o l o r}} (x _ {s}, y) = \\| \\bar {\\mathcal {C}} (x _ {s} \\cdot (1 - M _ {f})) - \\bar {\\mathcal {C}} (y \\cdot (1 - M _ {f})) \\| _ {2}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.8, + 0.891, + 0.829 + ], + "angle": 0, + "content": "where \\(\\bar{\\mathcal{C}} (\\cdot)\\) denotes an average RGB value of the masked region." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.831, + 0.891, + 0.86 + ], + "angle": 0, + "content": "The overall loss function for training the face swapping module is:" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.868, + 0.891, + 0.904 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {f s} = \\mathcal {L} _ {2} \\left(x _ {t}, y\\right) + \\mathcal {L} _ {L P I P S} \\left(x _ {t}, y\\right) + \\mathcal {L} _ {i d} \\left(x _ {s}, y\\right) \\tag {12} \\\\ + \\mathcal {L} _ {i d} (x _ {s}, \\hat {y}) + \\mathcal {L} _ {c o l o r} (x _ {s}, y). \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12709" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.891, + 0.364 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.375, + 0.893, + 0.418 + ], + "angle": 0, + "content": "Figure 4. Qualitative comparison of face swapping on CelebA-HQ dataset. Compared with all these 2D approaches, our method extracts facial shapes more accurately and transfers identity better. Moreover, since we conduct face swapping in latent space and a well-trained 3D GAN directly synthesizes the results, there are no obvious artifacts in the facial region." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.442, + 0.21, + 0.459 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.468, + 0.47, + 0.559 + ], + "angle": 0, + "content": "In this section, we first compare the proposed 3dSwap with some state-of-the-art 2D-images-based face swapping approaches. Furthermore, face swapping in a 3D-aware manner and extra evaluation metrics designed for 3D face swapping are analyzed. We finally carry out ablation studies to evaluate the effectiveness of our major design." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.57, + 0.295, + 0.587 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.594, + 0.47, + 0.731 + ], + "angle": 0, + "content": "In all experiments, Ranger optimizer [54] is applied to train our networks with a learning rate of \\(1 \\times 10^{-4}\\). Hyperparameters are set as \\(\\lambda_1 = \\lambda_3 = 1\\), \\(\\lambda_2 = 0.8\\) in Eq. 6 and \\(\\eta = 0.25\\) in Eq. 9. For training time, the inversion module is trained for 1,000,000 steps on 4 NVIDIA RTX3090 GPUs for about 3 days while the face swapping module is trained for 500,000 steps also on 4 GPUs for about 2 days. The pivot tuning optimization during inference time takes about 8 minutes on a single GPU." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.741, + 0.18, + 0.755 + ], + "angle": 0, + "content": "4.2. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.471, + 0.903 + ], + "angle": 0, + "content": "We conduct experiments on two datasets: 1) The FFHQ [28] dataset contains 70,000 high-quality images of human faces crawled from Flicker with considerable variation in age, ethnicity, and background. All images of this dataset are in a resolution of \\(1024 \\times 1024\\). 2) The CelebA-HQ [27] dataset is the high-quality version of the large-scale face attributes dataset CelebA [35] which contains 30,000 images in \\(1024 \\times 1024\\). Specifically, we train our model on FFHQ, while comparison experiments are ex" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.443, + 0.893, + 0.537 + ], + "angle": 0, + "content": "ecuted on CelebA-HQ. We follow the data preprocessing way of EG3D to crop images according to facial landmarks and resize them into a resolution of \\(512 \\times 512\\). Due to the relatively expensive inference cost of 3dSwap mentioned in Sec. 4.1, we operate the following comparison experiments on 1000 source-target image pairs." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.545, + 0.888, + 0.563 + ], + "angle": 0, + "content": "4.3. Comparison with 2D Face Swapping Methods" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.569, + 0.892, + 0.677 + ], + "angle": 0, + "content": "In this section, we compare the proposed 3dSwap with four 2D swapping methods: SimSwap [14], MegaFS [60], Infoswap [23] and Xu et al. [56]. These four methods are representative GAN-based [14,23] and GAN-inversion-based [56,60] approaches in recent years with state-of-the-art performance. Moreover, their official source codes are publicly available for us to make fair comparisons." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.682, + 0.893, + 0.849 + ], + "angle": 0, + "content": "Qualitative Comparison. The qualitative comparison results are shown in Fig. 4. Compared with all these 2D face swapping approaches, our methods transfer more accurate geometry features (i.e., facial contour) and detailed texture features like eye color to targets, reflecting better identity-transferring performance. Also, since we directly synthesize our final results with a well-trained generator with a properly calculated latent code, the swapped face we generate is more realistic without obvious artifacts in the facial region. More qualitative results on CelebA-HQ are provided in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Quantitative Comparison. We adopt several evaluation metrics in our quantitative experiments to show the effectiveness of our model in Table 1. Following MegaFS [60]," + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12710" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.132, + 0.089, + 0.416, + 0.196 + ], + "angle": 0, + "content": "
MethodID ↑Pose ↓Exp. ↓
SimSwap [14]0.571.4910.48
MegaFS [60]0.483.9514.08
InfoSwap [23]0.612.5010.63
Xu et al. [56]0.542.6612.94
Ours0.721.6813.76
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.206, + 0.47, + 0.249 + ], + "angle": 0, + "content": "Table 1. Quantitative Results. We compare our model with four competing methods in ID Similarity for identity transferring and Pose & Expression Error for attribute preserving." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.277, + 0.468, + 0.384 + ], + "angle": 0, + "content": "we measure the ID similarity by calculating the cosine similarity between face embeddings of the source and swapped faces that are estimated by a pretrained face recognition network [19]. Meanwhile, pose error computes the \\(\\mathcal{L}_2\\) distance between the estimated Euler Angle [49] of the target and swapped images. For expression error, we calculate an average distance among estimated facial landmarks [5]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.385, + 0.468, + 0.596 + ], + "angle": 0, + "content": "For cosine similarity of identity, which is a crucial indicator for face swapping since it evaluates the quality of identity transferring, we significantly outperform all these 2D approaches. Such results and the visual effects in Fig. 4 together show that our method transfers identity better due to the application of 3D prior. For attribute preserving, our method which can be explicitly controlled by a camera pose performs rather well in pose error since it is only slightly weaker than SimSwap [14] but it reflects a poorer performance compared with 2D approaches in expression error. However, we can still claim that the proposed 3dSwap is superior to 2D methods in identity transferring and performs close to them in attribute preserving after considering all three quantitative comparison results." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.609, + 0.468, + 0.626 + ], + "angle": 0, + "content": "4.4. Further Analysis on 3D-Aware Face Swapping" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.634, + 0.468, + 0.724 + ], + "angle": 0, + "content": "As the first 3D-aware face swapping method, the proposed 3dSwap is specialized in synthesizing multi-view-consistent results. In this section, we conduct more experiments in this track, showing some visualized comparisons on 3D consistency and raising brand-new criteria for 3D-aware face swapping." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Visualization on Multi-View Images. To compare with 2D face swapping approaches in fairness, we first synthesize multi-view target images by using our reconstruction module and then apply SimSwap [14] and InfoSwap [23] to them. The visualized results are shown in Fig. 5, where results under different views are not as consistent as ours (i.e. shape of nose, mouth, and eyebrows changes) for the 2D face swapping method. More artifacts can be discovered when the target images are sideward. Please refer to the video in the supplementary material for more intuitional comparisons." + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.088, + 0.891, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.291, + 0.89, + 0.32 + ], + "angle": 0, + "content": "Figure 5. Visualized comparison on Multi-view results among Infoswap [23], Simswap [14] and Ours." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.344, + 0.892, + 0.587 + ], + "angle": 0, + "content": "Criteria for 3D-Aware Face Swapping. In Sec. 4.3, the performance of identity transferring is evaluated based on the face embedding estimated by pretrained face recognition networks [19]. However, such networks are not enough robust to pose variance so it could be an unfair criterion for face swapping. For 3D-aware face swapping, we can simply synthesize a swapped face in the view of the source image. In this way, the \"Aligned Identity Similarity\" can be a reasonable standard to evaluate 3D-aware face swapping models. Moreover, inspired by human's ability to recognize a familiarized person from any direction, we synthesize the swapped face into 9 different fixed poses and calculate an average identity similarity together with images in source and target views. We report our results of these two evaluation metrics in Table 2 and images under these fixed poses are shown in the supplementary material." + }, + { + "type": "table", + "bbox": [ + 0.532, + 0.598, + 0.86, + 0.645 + ], + "angle": 0, + "content": "
MetricAligned ID Sim.↑Average ID Sim. ↑
Ours0.850.42
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.655, + 0.89, + 0.684 + ], + "angle": 0, + "content": "Table 2. Quantitative Results of New Metrics. We test the proposed 3dSwap under the two new evaluation metrics." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.708, + 0.664, + 0.723 + ], + "angle": 0, + "content": "4.5. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.731, + 0.89, + 0.776 + ], + "angle": 0, + "content": "In this section, we conduct ablation experiments on the CelebA-HQ dataset to evaluate the effectiveness of the major design of the proposed 3dSwap." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Effectiveness of 3D GAN Inversion. Since previous works [12, 33] do not release the code of their 3D GAN-inversion part, we follow the paper of EG3D to reproduce a pivot tuning inversion [47] to the generator with the same hyperparameters. In this section, we mainly compare our design with the optimization-based latent code projection of PTI on EG3D to show the effectiveness of the learning-based inversion algorithm we use. For the sake of fairness," + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "12711" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.089, + 0.465, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.305, + 0.47, + 0.348 + ], + "angle": 0, + "content": "Figure 6. Qualitative Comparison on 3D GAN inversion. Comparing to the directly application of pivot tuning inversion, our design reconstruct details (i.e. shape and color of eyes, glasses etc.) better." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.374, + 0.469, + 0.419 + ], + "angle": 0, + "content": "both models are tested on the same 2000 images in CelebAHQ and adopt a parameter tuning of the pretrained generator for 500 steps." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.42, + 0.469, + 0.48 + ], + "angle": 0, + "content": "We show the qualitative comparison results in Fig. 6. Our design performs better in details reconstruction (i.e., eye shape, glasses, etc.) despite the optimization-based approach still recovers accurate face shape, hair color, etc." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.481, + 0.469, + 0.602 + ], + "angle": 0, + "content": "For 3D GAN Inversion, we adopt the same metrics as 2D GAN inversion: \\(\\mathcal{L}_2\\) distance (or MSE loss) to calculate the pixel-wise similarity, LPIPS [58] distance to evaluate the perceptual similarity and MS-SSIM [53] to show the structural similarity. Additionally, we calculate ID similarity to ensure the accuracy of the reconstruction, and the results are reported in Table 3. Our design outperforms the optimization-based approaches in all of the four criteria." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.615, + 0.478, + 0.677 + ], + "angle": 0, + "content": "
MethodMSE ↓LPIPS ↓SSIM ↑ID Sim.↑
EG3D with Opt.0.08960.27610.61970.7318
Ours0.01680.10490.73480.8616
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.687, + 0.47, + 0.743 + ], + "angle": 0, + "content": "Table 3. Quantitative Results on 3D GAN inversion. We compare our 3D GAN inversion module with an optimization-based inversion on EG3D under four common evaluation metrics in the 2D GAN inversion task." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Effectiveness of Style Mixing. As mentioned in Sec. 3.3, we adopt style mixing and latent code interpolation for face swapping. Here, we briefly show the effectiveness of style mixing. A comparison of our model with and without style mixing can be seen in Fig. 7. Identity can be ideally transferred between sources and targets under both settings, however, attributes including skin color, background, etc. would be prominently affected if we interpolate in all layers of latent codes as shown in the third column." + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.089, + 0.89, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.5, + 0.319, + 0.892, + 0.348 + ], + "angle": 0, + "content": "Figure 7. Visualization of face swapping results with and without style mixing." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.375, + 0.618, + 0.391 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.402, + 0.892, + 0.614 + ], + "angle": 0, + "content": "We propose a novel 3D-aware face swapping method 3dSwap that generates high-fidelity and multi-view-consistent swapped faces. To leverage both geometry and texture prior of the 3D human face, we project the input images into the latent space of the 3D-aware generative model by introducing a learning-based inversion. A latent code manipulation algorithm, consisting of style mixing and latent code interpolation, is then designed to achieve 3D GAN-inversion-based face swapping. We further bridge the image quality between 2D generating and 3D rendering by applying a joint pivot tuning. To the best of our knowledge, 3dSwap is the first 3D-aware face swapping method, thus it sets a strong baseline for future research on 3D forgery detection and face swapping." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.623, + 0.892, + 0.745 + ], + "angle": 0, + "content": "Limitations. Since we need to project input images into the latent space of a 3D GAN which contains far more information than that of 2D GANs, we tune the parameters of the pretrained generator during testing, leading to a rather long inference time. Moreover, since the final results are rendered by a 3D generator, our method fails to accurately reconstruct clothing, backgrounds, etc in the image limited by the current development of 3D-aware generative models." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.755, + 0.892, + 0.83 + ], + "angle": 0, + "content": "Broader Impacts. Although not the purpose of this work, photorealistic swapped faces may potentially be abused. On the other hand, our model can be used to generate high-quality and multi-viewed examples to facilitate face forgery detection [11]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Acknowledgements. This work was supported by NSFC (62201342), Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102), and the Fundamental Research Funds for the Central Universities." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12712" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.157 + ], + "angle": 0, + "content": "[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In ICCV, pages 4431-4440, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.159, + 0.47, + 0.2 + ], + "angle": 0, + "content": "[2] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan++: How to edit the embedded images? In CVPR, pages 8293-8302, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.202, + 0.468, + 0.243 + ], + "angle": 0, + "content": "[3] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. Restyle: A residual-based stylegan encoder via iterative refinement. In ICCV, pages 6691–6700, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.246, + 0.468, + 0.299 + ], + "angle": 0, + "content": "[4] Yuval Alaluf, Omer Tov, Ron Mokady, Rinon Gal, and Amit Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. In CVPR, pages 18511-18521, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.302, + 0.468, + 0.343 + ], + "angle": 0, + "content": "[5] Tadas Baltrusaitis, Peter Robinson, and Louis-Philippe Morency. Openface: An open source facial behavior analysis toolkit. In WACV, pages 1–10, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.346, + 0.468, + 0.386 + ], + "angle": 0, + "content": "[6] Jianmin Bao, Dong Chen, Fang Wen, Houqiang Li, and Gang Hua. Towards open-set identity preserving face synthesis. In CVPR, pages 6713-6722, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.389, + 0.468, + 0.43 + ], + "angle": 0, + "content": "[7] Volker Blanz, Kristina Scherbaum, Thomas Vetter, and Hans-Peter Seidel. Exchanging faces in images. Comput. Graph. Forum, 23(3):669-676, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.432, + 0.468, + 0.459 + ], + "angle": 0, + "content": "[8] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In SIGGRAPH, pages 187-194, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.461, + 0.468, + 0.5 + ], + "angle": 0, + "content": "[9] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In ICLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.504, + 0.468, + 0.558 + ], + "angle": 0, + "content": "[10] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional \\(\\pi\\)-gan for single image to neural radiance fields translation. In CVPR, pages 3971-3980, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.561, + 0.468, + 0.616 + ], + "angle": 0, + "content": "[11] Junyi Cao, Chao Ma, Taiping Yao, Shen Chen, Shouhong Ding, and Xiaokang Yang. End-to-end reconstruction-classification learning for face forgery detection. In CVPR, pages 4103-4112, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.618, + 0.468, + 0.699 + ], + "angle": 0, + "content": "[12] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J. Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks. In CVPR, pages 16123-16133, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.703, + 0.468, + 0.757 + ], + "angle": 0, + "content": "[13] Eric R. Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. Pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In CVPR, pages 5799-5809, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.76, + 0.468, + 0.801 + ], + "angle": 0, + "content": "[14] Renwang Chen, Xuanhong Chen, Bingbing Ni, and Yanhao Ge. Simswap: An efficient framework for high fidelity face swapping. In ACMMM, pages 2003-2011, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.468, + 0.856 + ], + "angle": 0, + "content": "[15] Yi-Ting Cheng, Virginia Tzeng, Yu Liang, Chuan-Chang Wang, Bing-Yu Chen, Yung-Yu Chuang, and Ming Ouhyoung. 3d-model-based face replacement in video. In SIGGRAPH, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[16] Edo Collins, Raja Bala, Bob Price, and Sabine Süsstrunk. Editing in style: Uncovering the local semantics of gans. In CVPR, pages 5770-5779, 2020." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[17] Antonia Creswell and Anil Anthony Bharath. Inverting the generator of a generative adversarial network. IEEE Trans. Neural Networks Learn. Syst., 30(7):1967-1974, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.137, + 0.892, + 0.175 + ], + "angle": 0, + "content": "[18] DeepFakes. https://github.com/ondyari/FaceForensics/tree/master/dataset/DeepFakes. Accessed:2022-10-18." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[19] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.221, + 0.892, + 0.262 + ], + "angle": 0, + "content": "[20] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. GRAM: generative radiance manifolds for 3d-aware image generation. In CVPR, pages 10663-10673, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.264, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[21] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In CVPRW, pages 285-295, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.32, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[22] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3d face model from in-the-wild images. ACM Trans. Graph., 40(4):88:1-88:13, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.377, + 0.892, + 0.417 + ], + "angle": 0, + "content": "[23] Gege Gao, Huaibo Huang, Chaoyou Fu, Zhaoyang Li, and Ran He. Information bottleneck disentanglement for identity swapping. In CVPR, pages 3404-3413, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.42, + 0.892, + 0.473 + ], + "angle": 0, + "content": "[24] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron C. Courville, and Yoshua Bengio. Generative adversarial networks. Commun. ACM, 63(11):139–144, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.476, + 0.892, + 0.517 + ], + "angle": 0, + "content": "[25] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In ICLR, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.519, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[26] Shanyan Guan, Ying Tai, Bingbing Ni, Feida Zhu, Feiyue Huang, and Xiaokang Yang. Collaborative learning for faster stylegan embedding. CoRR, abs/2007.01758, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.561, + 0.892, + 0.601 + ], + "angle": 0, + "content": "[27] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. In ICLR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.892, + 0.644 + ], + "angle": 0, + "content": "[28] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.647, + 0.892, + 0.699 + ], + "angle": 0, + "content": "[29] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8107-8116, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.703, + 0.892, + 0.73 + ], + "angle": 0, + "content": "[30] Ira Kemelmacher-Shlizerman. Transfiguring portraits. ACM Trans. Graph., 35(4):94:1-94:8, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.733, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[31] Iryna Korshunova, Wenzhe Shi, Joni Dambre, and Lucas Theis. Fast face-swap using convolutional neural networks. In ICCV, pages 3697-3705, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.775, + 0.892, + 0.815 + ], + "angle": 0, + "content": "[32] Lingzhi Li, Jianmin Bao, Hao Yang, Dong Chen, and Fang Wen. Faceshifter: Towards high fidelity and occlusion aware face swapping. CoRR, abs/1912.13457, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[33] Connor Z. Lin, David B. Lindell, Eric R. Chan, and Gordon Wetzstein. 3d GAN inversion for controllable portrait image animation. CoRR, abs/2203.13441, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[34] Yuan Lin, Shengjin Wang, Qian Lin, and Feng Tang. Face swapping under large pose variations: A 3d model based approach. In ICME, pages 333-338, 2012." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12713" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.468, + 0.12 + ], + "angle": 0, + "content": "[35] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.468, + 0.177 + ], + "angle": 0, + "content": "[36] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, pages 405-421, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.468, + 0.22 + ], + "angle": 0, + "content": "[37] Saleh Mosaddegh, Loïc Simon, and Frédéric Jurie. Photorealistic face de-identification by aggregating donors' face components. In ACCV, pages 159–174, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.222, + 0.468, + 0.275 + ], + "angle": 0, + "content": "[38] Jacek Naruniec, Leonhard Helminger, Christopher Schroers, and Romann M. Weber. High-resolution neural face swapping for visual effects. Comput. Graph. Forum, 39(4):173-184, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.278, + 0.468, + 0.319 + ], + "angle": 0, + "content": "[39] Ryota Natsume, Tatsuya Yatagawa, and Shigeo Morishima. Fsnet: An identity-aware generative model for image-based face swapping. In ACCV, pages 117-132, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.321, + 0.468, + 0.374 + ], + "angle": 0, + "content": "[40] Ryota Natsume, Tatsuya Yatagawa, and Shigeo Morishima. RSGAN: face swapping and editing using face and hair representation in latent spaces. In SIGGRAPH, pages 69:1-69:2, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.377, + 0.468, + 0.432 + ], + "angle": 0, + "content": "[41] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In ICCV, pages 7587–7596, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.468, + 0.473 + ], + "angle": 0, + "content": "[42] Yuval Nirkin, Yosi Keller, and Tal Hassner. FSGAN: subject agnostic face swapping and reenactment. In ICCV, pages 7183-7192, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.476, + 0.468, + 0.517 + ], + "angle": 0, + "content": "[43] Yuval Nirkin, Iacopo Masi, Anh Tuan Tran, Tal Hassner, and Gérard G. Medioni. On face segmentation, face swapping, and face perception. In AFGR, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.519, + 0.468, + 0.56 + ], + "angle": 0, + "content": "[44] Yotam Nitzan, Amit Bermano, Yangyan Li, and Daniel Cohen-Or. Face identity disentanglement via latent space mapping. ACM Trans. Graph., 39(6):225:1-225:14, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.562, + 0.468, + 0.616 + ], + "angle": 0, + "content": "[45] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In CVPR, pages 13493-13503, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.618, + 0.468, + 0.673 + ], + "angle": 0, + "content": "[46] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: A stylegan encoder for image-to-image translation. In CVPR, pages 2287–2296, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.675, + 0.468, + 0.716 + ], + "angle": 0, + "content": "[47] Daniel Roich, Ron Mokady, Amit H. Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. TOG, pages 1–13, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.468, + 0.758 + ], + "angle": 0, + "content": "[48] Arun Ross and Asem A. Othman. Visual cryptography for biometric privacy. IEEE Trans. Inf. Forensics Secur., 6(1):70-81, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.468, + 0.801 + ], + "angle": 0, + "content": "[49] Nataniel Ruiz, Eunji Chong, and James M. Rehg. Fine-grained head pose estimation without keypoints. In CVPR, pages 2074-2083, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.804, + 0.468, + 0.844 + ], + "angle": 0, + "content": "[50] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. GRAF: generative radiance fields for 3d-aware image synthesis. In NeurIPS, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.468, + 0.899 + ], + "angle": 0, + "content": "[51] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. ACM Trans. Graph., 40(4):133:1-133:14, 2021." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.468, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[52] Tengfei Wang, Yong Zhang, Yanbo Fan, Jue Wang, and Qifeng Chen. High-fidelity gan inversion for image attribute editing. In CVPR, pages 11369-11378, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[53] Z. Wang, E.P. Simoncelli, and A.C. Bovik. Multiscale structural similarity for image quality assessment. In ACSSC, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.219 + ], + "angle": 0, + "content": "[54] Less Wright. Ranger - a synergistic optimizer. https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer. Accessed: 2022-9-18." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.222, + 0.892, + 0.275 + ], + "angle": 0, + "content": "[55] Yangyang Xu, Bailin Deng, Junle Wang, Yanqing Jing, Jia Pan, and Shengfeng He. High-resolution face swapping via latent semantics disentanglement. In CVPR, pages 7632-7641, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.892, + 0.331 + ], + "angle": 0, + "content": "[56] Yangyang Xu, Bailin Deng, Junle Wang, Yanqing Jing, Jia Pan, and Shengfeng He. High-resolution face swapping via latent semantics disentanglement. In CVPR, pages 7632-7641, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.334, + 0.892, + 0.389 + ], + "angle": 0, + "content": "[57] Zhiliang Xu, Hang Zhou, Zhibin Hong, Ziwei Liu, Jiaming Liu, Zhizhi Guo, Junyu Han, Jingtuo Liu, Errui Ding, and Jingdong Wang. Styleswap: Style-based generator empowers robust face swapping. In ECCV, pages 661-677, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.391, + 0.892, + 0.445 + ], + "angle": 0, + "content": "[58] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, pages 586-595, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.448, + 0.892, + 0.489 + ], + "angle": 0, + "content": "[59] Jun-Yan Zhu, Philipp Krahenbuhl, Eli Shechtman, and Alexei A. Efros. Generative visual manipulation on the natural image manifold. In ECCV, pages 597-613, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.49, + 0.892, + 0.531 + ], + "angle": 0, + "content": "[60] Yuhao Zhu, Qi Li, Jian Wang, Cheng-Zhong Xu, and Zhenan Sun. One shot face swapping on megapixels. In CVPR, pages 4834-4844, 2021." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12714" + } + ] +] \ No newline at end of file diff --git a/2023/3D-Aware Face Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_origin.pdf b/2023/3D-Aware Face Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..94c2fc91082957467ed13ced2527e4ee52abeba9 --- /dev/null +++ b/2023/3D-Aware Face Swapping/66d1bee4-1a69-4f6f-8a65-3f5202fddfc5_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44ee50f1431760db219c4566fa42074fbe8d77c4e57d104f44a1327417a2110b +size 9303477 diff --git a/2023/3D-Aware Face Swapping/full.md b/2023/3D-Aware Face Swapping/full.md new file mode 100644 index 0000000000000000000000000000000000000000..6c3ff421a9fe2417a9b977258132eb02936fa762 --- /dev/null +++ b/2023/3D-Aware Face Swapping/full.md @@ -0,0 +1,338 @@ +# 3D-Aware Face Swapping + +Yixuan Li Chao Ma* Yichao Yan* Wenhan Zhu Xiaokang Yang MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, China {lyx0208, chaoma, yanyichao, zhuwenhan823, xkyang}@sjtu.edu.cn + +![](images/db2a32cb00c1a096529925fa25c9fba4b1870e608e0ae2061971e078920cfdd6.jpg) +Source + +![](images/645096ca6885eef89157cd30fb1a86f49e4367afe473d1792a150ff241f269a7.jpg) +Target + +![](images/2beb072e7b4fef458b7c6794a12f1ee2512754c90d1d15db8a9f3820adc3bb98.jpg) +Intermediate Views +Figure 1. Demonstration of the proposed 3dSwap. Given single-view source and target images, our method synthesizes high-fidelity and multi-view-consistent images of the swapped faces and the corresponding geometries. More results can be found on our project page. + +![](images/eb4dbc647fb79c18b1d27f2fa5a36f98974b2b986d386ec70793a2f5050469c1.jpg) +Source View + +![](images/f7226b694a58a983e49cf7078fa142fa89701e0100628cdfba1e0c25c2b1f54b.jpg) +Geometry + +# Abstract + +Face swapping is an important research topic in computer vision with wide applications in entertainment and privacy protection. Existing methods directly learn to swap 2D facial images, taking no account of the geometric information of human faces. In the presence of large pose variance between the source and the target faces, there always exist undesirable artifacts on the swapped face. In this paper, we present a novel 3D-aware face swapping method that generates high-fidelity and multi-view-consistent swapped faces from single-view source and target images. To achieve this, we take advantage of the strong geometry and texture prior of 3D human faces, where the 2D faces are projected into the latent space of a 3D generative model. By disentangling the identity and attribute features in the latent space, we succeed in swapping faces in a 3D-aware manner, being robust to pose variations while transferring fine-grained facial details. Extensive experiments demonstrate the superiority of our 3D-aware face swapping framework in terms of visual quality, identity similarity, and multi-view consistency. Code is available at https://1yx0208.github.io/3dSwap. + +# 1. Introduction + +Face swapping aims to transfer the identity of a person in the source image to another person in the target image while preserving other attributes like head pose, expression, illumination, background, etc. It has attracted extensive attention recently in the academic and industrial world for its potential wide applications in entertainment [14,30,38] and privacy protection [7,37,48]. + +The key of face swapping is to transfer the geometric shape of the facial region (i.e., eyes, nose, mouth) and detailed texture information (such as the color of eyes) from the source image to the target image while preserving both geometry and texture of non-facial regions (i.e., hair, background, etc). Currently, some 3D-based methods consider geometry prior of human faces by fitting the input image to 3D face models such as 3D Morphable Model (3DMM) [8] to overcome the differences of face orientation and expression between sources and targets [7, 15, 34, 43]. However, these parametric face models only produce coarse frontal faces without fine-grained details, leading to low-resolution and fuzzy swapping results. On the other hand, following Generative Adversarial Network [24], GAN-based [6, 23, 32, 39, 40, 42] or GAN-inversion-based [44, 55, 57, 60] approaches adopt the ad + +versarial training strategy to learn texture information from inputs. Despite the demonstrated photorealistic and high-resolution images, the swapped faces via 2D GANs sustain undesirable artifacts when two input faces undergo large pose variation since the strong 3D geometry prior of human faces is ignored. Moreover, learning to swap faces in 2D images makes little use of the shaped details from sources, leading to poorer performance on identity transferring. + +Motivated by the recent advances of 3D generative models [12, 13, 20, 25, 45] in synthesizing multi-view consistent images and high-quality 3D shapes, it naturally raises a question: can we perform face swapping in a 3D-aware manner to exploit the strong geometry and texture priors? To answer this question, two challenges arise. First, how to infer 3D prior directly from 3D-GAN models still remains open. Current 3D-aware generative models synthesize their results from a random Gaussian noise $z$ , so that their output images are not controllable. This increases the complexity of inferring the required prior from arbitrary input. Second, the inferred prior corresponding to input images is in the form of a high-dimension feature vector in the latent space of 3D GANs. Simply synthesizing multi-view target images referring to the prior and applying 2D face swapping to them produces not only inconsistent artifacts but also a heavy computational load. + +To address these challenges, we systematically investigate the geometry and texture prior of these 3D generative models and propose a novel 3D-aware face swapping framework 3dSwap. We introduce a 3D GAN inversion framework to project the 2D inputs into the 3D latent space, motivated by recent GAN inversion approaches [46, 47, 51]. Specifically, we design a learning-based inversion algorithm that trains an encoding network to efficiently and robustly project input images into the latent space of EG3D [12]. However, directly borrowing the architecture from 2D approaches is not yet enough since a single-view input provides limited information about the whole human face. To further improve the multi-view consistency of latent code projection, we design a pseudo-multi-view training strategy. This design effectively bridges the domain gap between 2D and 3D. To tackle the second problem, we design a face swapping algorithm based on the 3D latent codes and directly synthesize the swapped faces with the 3D-aware generator. In this way, we achieve 3D GAN-inversion-based face swapping by a latent code manipulating algorithm consisting of style-mixing and interpolation, where latent code interpolation is responsible for identity transferring while style-mixing helps to preserve attributes. + +In summary, our contributions are threefold: + +- To the best of our knowledge, we first address the 3D-aware face swapping task. The proposed 3dSwap method sets a strong baseline and we hope this work will foster future research into this task. + +- We design a learning-based 3D GAN inversion with the pseudo-multi-view training strategy to extract geometry and texture prior from arbitrary input images. We further utilize these strong prior by designing a latent code manipulating algorithm, with which we directly synthesize the final results with the pretrained generator. +- Extensive experiments on benchmark datasets demonstrate the superiority of the proposed 3dSwap over state-of-the-art 2D face swapping approaches in identity transferring. Our reconstruction module for 3DGAN inversion performs favorably over the state-of-the-art methods as well. + +# 2. Related Work + +Face Swapping. Face swapping has emerged as a popular research topic in the field of computer vision in recent years. Currently, it can be classified into two categories: 3D-based and GAN-based methods. Specifically, 3D-based methods [7, 15, 34, 43] fit input images into 3D parametric face models (i.e. 3DMM [8]) to overcome the problems of posture or perspective difference between input images. However, the performance of such methods is usually limited by the reconstruction results. GAN-based methods [6, 18, 23, 31, 32, 39, 40, 42] adopt the adversarial training strategy to generate photorealistic fake faces. + +Early GAN-based face swapping methods are subject-specific, i.e. DeepFake [18] and Korshunova et al. [31] are required to train different models for different inputs. The subject-specific approaches have limited real applications since face swapping is required to be applicable to any unseen pair of input images, and such limitation is addressed in latter subject-agnostic face swapping approaches [6, 23, 32, 39, 40, 42]. To increase the resolution of generated images, MegaFS [60] firstly proposes a GAN-inversion-based face swapping method, utilizing StyleGAN [28] to synthesize megapixel-level swapping faces. Xu et al. [56] and StyleSwap [57] integrate the StyleGAN2 [29] generator to their face swapping pipeline, applying its strong prior to generate high-resolution swapped faces. Following these approaches, we furtherly extend the face swapping task into 3D latent space to capture fine-grained details of face shape and strengthen the robustness under large pose variance. + +3D-Aware Generative Models. The 3D-aware generative models are aimed to synthesize 3D-aware (i.e., can be explicitly controlled by the camera pose) images from 2D image collections. HoloGAN [41] firstly proposes a 3D-aware generative model through learning the voxel features, whereas it only generates low-resolution results due to the limitation of computational cost. Recently, several works utilize the NeRF [36] representation [12, 20, 25, 45, 50]. GRAF [50] adopts the approach of patch sampling to elim + +![](images/20011f7b7de3afcc456fd6d41159db98b0d24f7f9b0e11b9c733257da699b212.jpg) +Figure 2. The pipeline of our 3D-aware face swapping method, 3dSwap. In the first stage, we infer 3D geometry and texture prior of both source and target images with an encoder. We then design a latent code manipulation algorithm consisting of style mixing and interpolation to conduct face swapping based on these priors. Finally, swapped faces in any view direction can be synthesized by 3dSwap after fine-tuning the parameters of the generator following the joint pivot tuning optimization. + +inate computational costs during training. GRAM [20] estimates radiance manifolds to produce realistic images with fine details and strong 3D consistency. StyleNeRF [25] integrates NeRF with style-based generators and proposes a better up-sampler and a new regularization loss to mitigate inconsistencies. StyleSDF [45] presents a Signed Distance Field (SDF) based on 3D modeling that defines detailed 3D surfaces. EG3D [12] raises a novel tri-plane representation for efficient 3D-aware image generation. Due to the strong generative capability of these 3D-aware generative models, we leverage them to infer fine 3D prior from 2D images for our 3D-aware face swapping framework. + +GAN Inversion. Since Generative Adversarial Network [24], numerous generative models reflect great abilities in synthesizing high-quality images [9,12,25,28,29,45]. To fully leverage these well-trained GANs, the task of GAN inversion emerges recently. In particular, GAN inversion is aimed to project a given image back to a vector $w$ in the latent space of a pretrained GAN model so that this image can be faithfully reconstructed from $w$ by the generator. + +Early works invert images into Gaussian noise $z \in R^{1 \times 512}$ or semantic latent space $\mathcal{W} \in R^{1 \times 512}$ [1,16,17,59]. Abdal et al. [2] firstly extend latent space to $\mathcal{W} + \in R^{18 \times 512}$ for more accurate reconstruction. To predict the latent code, learning-based methods [3,26,46,51,52] train an encoder for latent projection, while optimization-based methods [1,2,16,17] directly find the optimal code step-by-step from noise. Hybrid methods [4,47,59] combine both to optimize latent codes initialized by encoders. + +In addition, there are a few inversion works for 3D generative models. Pix2NeRF [10] is proposed to generate Neural Radiance Fields (NeRF) [36] of an object applying a + +single input image based on a pretrained $\pi$ -GAN [13]. Connor et al. [33] leverage EG3D [12] and a pretrained 3DMM predictor [22] to reconstruct a 3D human face, which could be further animated or edited. Our reconstruction model is also in this catalog, while the adopted learning-based algorithm is more robust and efficient compared with them. + +# 3. Method + +# 3.1. Overview + +Given single-view source and target images, we aim to synthesize multi-view-consistent face images with identity from source image $x_{s}$ and other attributes from target image $x_{t}$ . Fig. 2 demonstrates the overall pipeline and notations of the proposed 3dSwap. First, to extract accurate geometry and texture prior from 2D images, we conduct a learning-based 3D GAN inversion, training an encoding network to project the inputs into the latent space of a 3D-aware generative model. Specifically, we design a pseudomulti-view optimization strategy to train the encoder with a feature pyramid architecture from pSp [46], empowering the latent code projection with the 3D consistency of the state-of-the-art 3D GAN, i.e. EG3D [12] (Sec. 3.2). Then, to disentangle identity from attributes in the latent space, we design a latent code manipulation algorithm consisting of style mixing and interpolation (Sec. 3.3). Finally, for the purpose of improving the overall quality of our results, bridging the gap between 2D image generating and 3D rendering, we implement a joint pivot tuning on parameters of the pretrained EG3D generator (Sec. 3.4). The networks are trained with a set of well-designed loss functions to enforce identity transferring and attribute preserving (Sec. 3.5). + +# 3.2. Inferring 3D Prior from 2D Images + +To infer geometry and texture prior from a 2D image, we leverage the state-of-the-art 3D-aware generative model, i.e. EG3D [12] by projecting the inputs into its latent space. Since the optimization-based algorithm [47] is inefficient and less robust to non-front faces, we propose a learning-based inversion algorithm where an encoding network is trained to project the single-view inputs into the 3D latent space. Different from 2D StyleGAN-like models which totally rely on the latent code $w$ to generate the corresponding output: $y = \mathcal{G}(w)$ , the 3D-aware generative model has an extra input $d$ which controls the pose of synthesized image: $y = \mathcal{G}(w,d)$ . This indicates that latent codes and generated images are not bijections for 3D GANs since multi-view images of the same person can be synthesized using the same $w$ but different $d$ . Taking this property into account, we design a pseudo-multi-view training strategy, using a generated image in a different view from the source image to improve the consistency of latent code projection. Fig. 3 illustrates the pipeline of our design. + +Specifically, we first use an encoder to project the input image $x$ into the latent space $\mathcal{W}$ and get a high-dimension intermediate latent vector $w_{x} = \mathcal{E}_{\theta}(x)$ , where $\mathcal{E}_{\theta}(\cdot)$ is the pSp encoder with parameters $\theta$ . Then, with the pretrained EG3D generator $\mathcal{G}(\cdot, \cdot)$ and input direction $d$ estimated by Deep3d Face Reconstruction [21], we synthesize the reconstructed result $x' = \mathcal{G}(w_{x}, d)$ . For a 2D GAN inversion approach, this ground-truth and reconstructed image pair $(x, x')$ is enough, but it is inadequate for 3D GANs due to the non-bijective property. + +Ideally, this issue can be addressed by feeding multi-view images of a person into the encoder and minimizing the distance between their output vectors. However, it is difficult to obtain large-scale multi-view data, and we usually only have single-view images of a person in the training dataset. To this end, we additionally sample a random direction $\hat{d}$ and use the generator to synthesize $\hat{x} = \mathcal{G}(w_x,\hat{d})$ with the same latent code. This output image $\hat{x}$ , which is called a pseudo-input since it is generated by the 3D GAN, is again fed into the encoder-decoder structure to get $w_{\hat{x}} = \mathcal{E}_{\theta}(\hat{x})$ and $\hat{x}^{\prime} = \mathcal{G}(w_{\hat{x}},d)$ . + +Now, we can define our optimization objectives. Following the usual inversion approaches, we apply some pixelwise loss functions between the input $x$ and its reconstruction $x'$ . Under the setting of our pseudo-multi-view input, we add constraints between the two latent codes $w_{x}$ and $w_{\hat{x}}$ for the purpose of maintaining 3D consistency. We further restrain pixel-level distance between the second-order output $\hat{x}'$ synthesized with $w_{\hat{x}}$ and the origin input $x$ to reinforce such constraint. In summary, this three-termed optimization can be written as: + +$$ +\min _ {\theta} \left\{\mathcal {L} \left(x, x ^ {\prime}\right) + \eta \mathcal {L} \left(x, \hat {x} ^ {\prime}\right) + \mathcal {L} \left(w _ {x}, w _ {\hat {x}}\right) \right\}, \tag {1} +$$ + +![](images/5f4dffb41fb01f22db715da2fc91824845b9416888bef1bcd43edbc53ea6e5aa.jpg) +Figure 3. The pipeline of our pseudo-multi-view training strategy. + +where $\theta$ is the parameter of encoder, $\eta$ is a trade-off parameter and $\mathcal{L}(\cdot ,\cdot)$ denotes the loss functions which will be further discussed in Sec. 3.5. After optimizing the parameters of the encoding network with this strategy, we can obtain rather accurate 3D prior $w_{x}$ from any given input $x$ . + +# 3.3. Face Swapping via Latent Code Manipulation + +To take full advantage of the prior extracted from the 3D GAN model, we calculate the latent code for the swapped face based on latent codes $w_{s} = \mathcal{E}_{\theta}(x_{s})$ of the source image $x_{s}$ and $w_{t} = \mathcal{E}_{\theta}(x_{t})$ of the target image $x_{t}$ . Before that, we step back and think about what these latent codes represent. + +A face image usually contains different attributes such as face shape, hairstyle, skin color, etc. With the encoder discussed in Sec. 3.2, we embed all these attributes in the high-dimension latent vectors. However, identity features depending on the geometry of facial region (i.e., eyes, nose, mouth, cheek, and so on) also implicitly lie in such latent codes. For the task of face swapping, it is desirable if identity features can be disentangled from attribute features in the latent code. Afterward, we can simply exchange the identity part of the latent codes to achieve face swapping. + +Since such identity and attributes are typically entangled in the latent codes, we design an interpolation strategy between the source and target latent codes with learnable coefficients. Here, the source latent code $w_{s}$ plays a leading role in the identity part while $w_{t}$ dominates the others. To obtain these coefficients, we concatenate $w_{s}$ and $w_{t}$ to form a $1 \times 1024$ vector and feed it into a four-layer Multilayer Perceptron whose output $\rho$ is the interpolation coefficient. + +Moreover, StyleGAN-like [28,29] models share the style mixing property of latent codes, which means that different layers of latent codes control different parts of attributes. For example, coarse spatial resolutions control high-level aspects like face shape and orientation while fine resolution latent control details like hair color. Motivated by this, we also investigate the layer-wise attributes in EG3D and observed similar properties. This allows us to generate more desirable swapping results by only performing interpolation + +on part of the latent codes. + +In summary, the latent code of swapped face $w_{fs}$ can be obtained by: + +$$ +w _ {f s} ^ {(i)} = \left\{ \begin{array}{c c} \rho^ {(i)} \times w _ {t} ^ {(i)} + (1 - \rho^ {(i)}) \times w _ {s} ^ {(i)} & i \in [ 5, 9 ], \\ w _ {t} ^ {(i)} & o t h e r w i s e, \end{array} \right. \tag {2} +$$ + +where the superscript $i$ denotes the layer-wise expression of $w_{fs}$ and the choice of layer, from layer 5 to layer 9, follows the definition of "middle" from StyleGAN [28], while a slight modification is made since the dimension of EG3D latent space is lower (i.e. $\mathcal{W} \in R^{14 \times 512}$ ). To better disentangle identity and attributes, we apply a Sigmoid-shaped activation function with a factor $\lambda = 100$ to the $\rho$ generated by MLPs, enforcing the coefficients to be closer to 0 or 1: + +$$ +\rho_ {n e w} ^ {(i)} = \left(1 + e ^ {- \lambda \rho_ {o l d} ^ {(i)}}\right) ^ {- 1}. \tag {3} +$$ + +# 3.4. Joint Pivot Tuning + +With the encoding network trained by the well-designed optimization strategy in Sec. 3.2, we can project an input image into a code in the 3D latent space. However, the inevitable reconstruction error will degrade the performance of face swapping, which is a downstream task of 3D GAN inversion. Also, we observe that directly swap faces via latent manipulation leads to slight artifacts in the non-facial region. Motivated by PTI [47], we adopt pivot tuning on the parameters of the pretrained EG3D generator using a fixed latent code $w_{fs}$ from Sec. 3.3, but in an optimizing direction considering both reconstruction quality and face swapping performance. The process of this "joint" pivot tuning is: + +$$ +\min _ {\theta^ {*}} \left\{\mathcal {L} \left(x _ {s / t}, \mathcal {G} _ {\theta^ {*}} \left(w _ {s / t}, d _ {s / t}\right)\right) + \right. \tag {4} +$$ + +$$ +\left. \mathcal {L} \left(x _ {t} \cdot M _ {f}, \mathcal {G} _ {\theta^ {*}} \left(w _ {f s}, d _ {t}\right) \cdot M _ {f}\right) \right\}, +$$ + +where $\theta^{*}$ is the parameter of EG3D generator, $d_{s}$ is the direction of the source image, $M_{f}$ is a binary mask that shields facial region and $\mathcal{L}(\cdot ,\cdot)$ is the optimization constraint including MSE, LPIPS [58] and ID [19] losses. + +Finally, with this finetuned generator and the latent code calculated by Eq. 2, we can synthesize the swapped face $y$ in any direction $d$ by: + +$$ +y = \mathcal {G} _ {\theta^ {*}} \left(w _ {f s}, d\right). \tag {5} +$$ + +# 3.5. Objective Functions + +GAN Inversion Losses. In Eq. 1, we generally use $\mathcal{L}(\cdot ,\cdot)$ to denote the loss function of our pseudo-multi-view training strategy. Here, we give its detailed form. Following the previous work [46], we use three different objectives for supervising a pair of input image $x$ and reconstruction $x^{\prime}$ (and + +the same for $\hat{x}^{\prime}$ ), including pixel-wise $\mathcal{L}_1$ loss, Learned Perceptual Image Path Similarity [58] loss $\mathcal{L}_{LPIPS}$ , and identity similarity loss $\mathcal{L}_{id}$ maximizing the cosine similarity between two identity embeddings estimated by ArcFace [19]. The total reconstruction loss between $x$ and $x^{\prime}$ is: + +$$ +\mathcal {L} _ {r e c} \left(x, x ^ {\prime}\right) = \lambda_ {1} \mathcal {L} _ {1} \left(x, x ^ {\prime}\right) + \lambda_ {2} \mathcal {L} _ {L P I S P} \left(x, x ^ {\prime}\right) \tag {6} +$$ + +$$ ++ \lambda_ {3} \mathcal {L} _ {i d} (x, x ^ {\prime}), +$$ + +where $\lambda_{1},\lambda_{2}$ and $\lambda_{3}$ are loss weights. + +For the constraint between two latent codes, we adopt a cosine similarity: + +$$ +\mathcal {L} _ {l a t} \left(w _ {x}, w _ {\hat {x}}\right) = 1 - \cos \left(w _ {x}, w _ {\hat {x}}\right). \tag {7} +$$ + +Besides, we adopt the latent code regularization loss from pSp [46], which constrains the generated latent vector in a region to be close to the average latent vector: + +$$ +\mathcal {L} _ {r e g} (x) = \left\| \mathcal {E} _ {\theta} (x) - \bar {x} \right\| _ {2}, \tag {8} +$$ + +where $\bar{x}$ is the average of 10000 randomly sampled latent codes of EG3D generator. The overall loss function for 3D GAN inversion is: + +$$ +\begin{array}{l} \mathcal {L} _ {i n v} = \mathcal {L} _ {r e c} \left(x, x ^ {\prime}\right) + \eta \mathcal {L} _ {r e c} \left(x, \hat {x} ^ {\prime}\right) + \mathcal {L} _ {l a t} \left(w _ {x}, w _ {\hat {x}}\right) \tag {9} \\ + \mathcal {L} _ {r e g} (x). \\ \end{array} +$$ + +Face Swapping Losses. For training our face swapping module, we first design a masked pixel-wise $\mathcal{L}_2$ loss for the face irrelevant region: + +$$ +\mathcal {L} _ {2} \left(x _ {t}, y\right) = \left\| x _ {t} \cdot M _ {f} - y \cdot M _ {f} \right\| _ {2}, \tag {10} +$$ + +where $M_{f}$ is the binary mask same as in Sec. 3.4. We generate this mask according to the face segmentation labels of FFHQ [28] datasets. For 3D GAN inversion, we adopt the LPIPS [58] loss $\mathcal{L}_{LPIPS}(x_t,y)$ to learn the perceptual similarities and increase the quality of the generated images, and the binary mask is also added before feeding the image into the perceptual feature extractor. + +For 3D-aware face swapping, we additionally synthesize the swapped face $\hat{y}$ in the view of the source image, calculating both $\mathcal{L}_{id}(x_s,y)$ and $\mathcal{L}_{id}(x_s,\hat{y})$ for better identity transferring. + +Besides, $\mathcal{L}_{color}$ is designed to maintain the skin color of swapped faces: + +$$ +\mathcal {L} _ {\text {c o l o r}} (x _ {s}, y) = \| \bar {\mathcal {C}} (x _ {s} \cdot (1 - M _ {f})) - \bar {\mathcal {C}} (y \cdot (1 - M _ {f})) \| _ {2}, \tag {11} +$$ + +where $\bar{\mathcal{C}} (\cdot)$ denotes an average RGB value of the masked region. + +The overall loss function for training the face swapping module is: + +$$ +\begin{array}{l} \mathcal {L} _ {f s} = \mathcal {L} _ {2} \left(x _ {t}, y\right) + \mathcal {L} _ {L P I P S} \left(x _ {t}, y\right) + \mathcal {L} _ {i d} \left(x _ {s}, y\right) \tag {12} \\ + \mathcal {L} _ {i d} (x _ {s}, \hat {y}) + \mathcal {L} _ {c o l o r} (x _ {s}, y). \\ \end{array} +$$ + +![](images/f0959710d335e56833f3d38a55c940cce92e9bae72323375345958bc6bddd654.jpg) +Figure 4. Qualitative comparison of face swapping on CelebA-HQ dataset. Compared with all these 2D approaches, our method extracts facial shapes more accurately and transfers identity better. Moreover, since we conduct face swapping in latent space and a well-trained 3D GAN directly synthesizes the results, there are no obvious artifacts in the facial region. + +# 4. Experiments + +In this section, we first compare the proposed 3dSwap with some state-of-the-art 2D-images-based face swapping approaches. Furthermore, face swapping in a 3D-aware manner and extra evaluation metrics designed for 3D face swapping are analyzed. We finally carry out ablation studies to evaluate the effectiveness of our major design. + +# 4.1. Implementation Details + +In all experiments, Ranger optimizer [54] is applied to train our networks with a learning rate of $1 \times 10^{-4}$ . Hyperparameters are set as $\lambda_1 = \lambda_3 = 1$ , $\lambda_2 = 0.8$ in Eq. 6 and $\eta = 0.25$ in Eq. 9. For training time, the inversion module is trained for 1,000,000 steps on 4 NVIDIA RTX3090 GPUs for about 3 days while the face swapping module is trained for 500,000 steps also on 4 GPUs for about 2 days. The pivot tuning optimization during inference time takes about 8 minutes on a single GPU. + +# 4.2. Datasets + +We conduct experiments on two datasets: 1) The FFHQ [28] dataset contains 70,000 high-quality images of human faces crawled from Flicker with considerable variation in age, ethnicity, and background. All images of this dataset are in a resolution of $1024 \times 1024$ . 2) The CelebA-HQ [27] dataset is the high-quality version of the large-scale face attributes dataset CelebA [35] which contains 30,000 images in $1024 \times 1024$ . Specifically, we train our model on FFHQ, while comparison experiments are ex + +ecuted on CelebA-HQ. We follow the data preprocessing way of EG3D to crop images according to facial landmarks and resize them into a resolution of $512 \times 512$ . Due to the relatively expensive inference cost of 3dSwap mentioned in Sec. 4.1, we operate the following comparison experiments on 1000 source-target image pairs. + +# 4.3. Comparison with 2D Face Swapping Methods + +In this section, we compare the proposed 3dSwap with four 2D swapping methods: SimSwap [14], MegaFS [60], Infoswap [23] and Xu et al. [56]. These four methods are representative GAN-based [14,23] and GAN-inversion-based [56,60] approaches in recent years with state-of-the-art performance. Moreover, their official source codes are publicly available for us to make fair comparisons. + +Qualitative Comparison. The qualitative comparison results are shown in Fig. 4. Compared with all these 2D face swapping approaches, our methods transfer more accurate geometry features (i.e., facial contour) and detailed texture features like eye color to targets, reflecting better identity-transferring performance. Also, since we directly synthesize our final results with a well-trained generator with a properly calculated latent code, the swapped face we generate is more realistic without obvious artifacts in the facial region. More qualitative results on CelebA-HQ are provided in the supplementary material. + +Quantitative Comparison. We adopt several evaluation metrics in our quantitative experiments to show the effectiveness of our model in Table 1. Following MegaFS [60], + +
MethodID ↑Pose ↓Exp. ↓
SimSwap [14]0.571.4910.48
MegaFS [60]0.483.9514.08
InfoSwap [23]0.612.5010.63
Xu et al. [56]0.542.6612.94
Ours0.721.6813.76
+ +we measure the ID similarity by calculating the cosine similarity between face embeddings of the source and swapped faces that are estimated by a pretrained face recognition network [19]. Meanwhile, pose error computes the $\mathcal{L}_2$ distance between the estimated Euler Angle [49] of the target and swapped images. For expression error, we calculate an average distance among estimated facial landmarks [5]. + +For cosine similarity of identity, which is a crucial indicator for face swapping since it evaluates the quality of identity transferring, we significantly outperform all these 2D approaches. Such results and the visual effects in Fig. 4 together show that our method transfers identity better due to the application of 3D prior. For attribute preserving, our method which can be explicitly controlled by a camera pose performs rather well in pose error since it is only slightly weaker than SimSwap [14] but it reflects a poorer performance compared with 2D approaches in expression error. However, we can still claim that the proposed 3dSwap is superior to 2D methods in identity transferring and performs close to them in attribute preserving after considering all three quantitative comparison results. + +# 4.4. Further Analysis on 3D-Aware Face Swapping + +As the first 3D-aware face swapping method, the proposed 3dSwap is specialized in synthesizing multi-view-consistent results. In this section, we conduct more experiments in this track, showing some visualized comparisons on 3D consistency and raising brand-new criteria for 3D-aware face swapping. + +Visualization on Multi-View Images. To compare with 2D face swapping approaches in fairness, we first synthesize multi-view target images by using our reconstruction module and then apply SimSwap [14] and InfoSwap [23] to them. The visualized results are shown in Fig. 5, where results under different views are not as consistent as ours (i.e. shape of nose, mouth, and eyebrows changes) for the 2D face swapping method. More artifacts can be discovered when the target images are sideward. Please refer to the video in the supplementary material for more intuitional comparisons. + +![](images/95246c90f0dc73ab07aa12a8f25de081f74e28862b788dd74dc9d525b9343c5a.jpg) +Figure 5. Visualized comparison on Multi-view results among Infoswap [23], Simswap [14] and Ours. + +Criteria for 3D-Aware Face Swapping. In Sec. 4.3, the performance of identity transferring is evaluated based on the face embedding estimated by pretrained face recognition networks [19]. However, such networks are not enough robust to pose variance so it could be an unfair criterion for face swapping. For 3D-aware face swapping, we can simply synthesize a swapped face in the view of the source image. In this way, the "Aligned Identity Similarity" can be a reasonable standard to evaluate 3D-aware face swapping models. Moreover, inspired by human's ability to recognize a familiarized person from any direction, we synthesize the swapped face into 9 different fixed poses and calculate an average identity similarity together with images in source and target views. We report our results of these two evaluation metrics in Table 2 and images under these fixed poses are shown in the supplementary material. + +Table 1. Quantitative Results. We compare our model with four competing methods in ID Similarity for identity transferring and Pose & Expression Error for attribute preserving. + +
MetricAligned ID Sim.↑Average ID Sim. ↑
Ours0.850.42
+ +Table 2. Quantitative Results of New Metrics. We test the proposed 3dSwap under the two new evaluation metrics. + +# 4.5. Ablation Studies + +In this section, we conduct ablation experiments on the CelebA-HQ dataset to evaluate the effectiveness of the major design of the proposed 3dSwap. + +Effectiveness of 3D GAN Inversion. Since previous works [12, 33] do not release the code of their 3D GAN-inversion part, we follow the paper of EG3D to reproduce a pivot tuning inversion [47] to the generator with the same hyperparameters. In this section, we mainly compare our design with the optimization-based latent code projection of PTI on EG3D to show the effectiveness of the learning-based inversion algorithm we use. For the sake of fairness, + +![](images/d2f749adcbf42fd764f6462588f75b1fcd20e970194d065f742935a0f212b55a.jpg) +Figure 6. Qualitative Comparison on 3D GAN inversion. Comparing to the directly application of pivot tuning inversion, our design reconstruct details (i.e. shape and color of eyes, glasses etc.) better. + +both models are tested on the same 2000 images in CelebAHQ and adopt a parameter tuning of the pretrained generator for 500 steps. + +We show the qualitative comparison results in Fig. 6. Our design performs better in details reconstruction (i.e., eye shape, glasses, etc.) despite the optimization-based approach still recovers accurate face shape, hair color, etc. + +For 3D GAN Inversion, we adopt the same metrics as 2D GAN inversion: $\mathcal{L}_2$ distance (or MSE loss) to calculate the pixel-wise similarity, LPIPS [58] distance to evaluate the perceptual similarity and MS-SSIM [53] to show the structural similarity. Additionally, we calculate ID similarity to ensure the accuracy of the reconstruction, and the results are reported in Table 3. Our design outperforms the optimization-based approaches in all of the four criteria. + +
MethodMSE ↓LPIPS ↓SSIM ↑ID Sim.↑
EG3D with Opt.0.08960.27610.61970.7318
Ours0.01680.10490.73480.8616
+ +Table 3. Quantitative Results on 3D GAN inversion. We compare our 3D GAN inversion module with an optimization-based inversion on EG3D under four common evaluation metrics in the 2D GAN inversion task. + +Effectiveness of Style Mixing. As mentioned in Sec. 3.3, we adopt style mixing and latent code interpolation for face swapping. Here, we briefly show the effectiveness of style mixing. A comparison of our model with and without style mixing can be seen in Fig. 7. Identity can be ideally transferred between sources and targets under both settings, however, attributes including skin color, background, etc. would be prominently affected if we interpolate in all layers of latent codes as shown in the third column. + +![](images/1aca44d9bbf2b8e7a9caa5de4e20c2168eb55309c7848ef6e6c14b1390186bd2.jpg) +Figure 7. Visualization of face swapping results with and without style mixing. + +# 5. Conclusion + +We propose a novel 3D-aware face swapping method 3dSwap that generates high-fidelity and multi-view-consistent swapped faces. To leverage both geometry and texture prior of the 3D human face, we project the input images into the latent space of the 3D-aware generative model by introducing a learning-based inversion. A latent code manipulation algorithm, consisting of style mixing and latent code interpolation, is then designed to achieve 3D GAN-inversion-based face swapping. We further bridge the image quality between 2D generating and 3D rendering by applying a joint pivot tuning. To the best of our knowledge, 3dSwap is the first 3D-aware face swapping method, thus it sets a strong baseline for future research on 3D forgery detection and face swapping. + +Limitations. Since we need to project input images into the latent space of a 3D GAN which contains far more information than that of 2D GANs, we tune the parameters of the pretrained generator during testing, leading to a rather long inference time. Moreover, since the final results are rendered by a 3D generator, our method fails to accurately reconstruct clothing, backgrounds, etc in the image limited by the current development of 3D-aware generative models. + +Broader Impacts. Although not the purpose of this work, photorealistic swapped faces may potentially be abused. On the other hand, our model can be used to generate high-quality and multi-viewed examples to facilitate face forgery detection [11]. + +Acknowledgements. This work was supported by NSFC (62201342), Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102), and the Fundamental Research Funds for the Central Universities. + +# References + +[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In ICCV, pages 4431-4440, 2019. +[2] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan++: How to edit the embedded images? In CVPR, pages 8293-8302, 2020. +[3] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. Restyle: A residual-based stylegan encoder via iterative refinement. In ICCV, pages 6691–6700, 2021. +[4] Yuval Alaluf, Omer Tov, Ron Mokady, Rinon Gal, and Amit Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. In CVPR, pages 18511-18521, 2022. +[5] Tadas Baltrusaitis, Peter Robinson, and Louis-Philippe Morency. Openface: An open source facial behavior analysis toolkit. In WACV, pages 1–10, 2016. +[6] Jianmin Bao, Dong Chen, Fang Wen, Houqiang Li, and Gang Hua. Towards open-set identity preserving face synthesis. In CVPR, pages 6713-6722, 2018. +[7] Volker Blanz, Kristina Scherbaum, Thomas Vetter, and Hans-Peter Seidel. Exchanging faces in images. Comput. Graph. Forum, 23(3):669-676, 2004. +[8] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In SIGGRAPH, pages 187-194, 1999. +[9] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In ICLR, 2019. +[10] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional $\pi$ -gan for single image to neural radiance fields translation. In CVPR, pages 3971-3980, 2022. +[11] Junyi Cao, Chao Ma, Taiping Yao, Shen Chen, Shouhong Ding, and Xiaokang Yang. End-to-end reconstruction-classification learning for face forgery detection. In CVPR, pages 4103-4112, 2022. +[12] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J. Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks. In CVPR, pages 16123-16133, 2022. +[13] Eric R. Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. Pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In CVPR, pages 5799-5809, 2021. +[14] Renwang Chen, Xuanhong Chen, Bingbing Ni, and Yanhao Ge. Simswap: An efficient framework for high fidelity face swapping. In ACMMM, pages 2003-2011, 2020. +[15] Yi-Ting Cheng, Virginia Tzeng, Yu Liang, Chuan-Chang Wang, Bing-Yu Chen, Yung-Yu Chuang, and Ming Ouhyoung. 3d-model-based face replacement in video. In SIGGRAPH, 2009. +[16] Edo Collins, Raja Bala, Bob Price, and Sabine Süsstrunk. Editing in style: Uncovering the local semantics of gans. In CVPR, pages 5770-5779, 2020. + +[17] Antonia Creswell and Anil Anthony Bharath. Inverting the generator of a generative adversarial network. IEEE Trans. Neural Networks Learn. Syst., 30(7):1967-1974, 2019. +[18] DeepFakes. https://github.com/ondyari/FaceForensics/tree/master/dataset/DeepFakes. Accessed:2022-10-18. +[19] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019. +[20] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. GRAM: generative radiance manifolds for 3d-aware image generation. In CVPR, pages 10663-10673, 2022. +[21] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In CVPRW, pages 285-295, 2019. +[22] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3d face model from in-the-wild images. ACM Trans. Graph., 40(4):88:1-88:13, 2021. +[23] Gege Gao, Huaibo Huang, Chaoyou Fu, Zhaoyang Li, and Ran He. Information bottleneck disentanglement for identity swapping. In CVPR, pages 3404-3413, 2021. +[24] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron C. Courville, and Yoshua Bengio. Generative adversarial networks. Commun. ACM, 63(11):139–144, 2020. +[25] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In ICLR, 2022. +[26] Shanyan Guan, Ying Tai, Bingbing Ni, Feida Zhu, Feiyue Huang, and Xiaokang Yang. Collaborative learning for faster stylegan embedding. CoRR, abs/2007.01758, 2020. +[27] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. In ICLR, 2018. +[28] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. +[29] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8107-8116, 2020. +[30] Ira Kemelmacher-Shlizerman. Transfiguring portraits. ACM Trans. Graph., 35(4):94:1-94:8, 2016. +[31] Iryna Korshunova, Wenzhe Shi, Joni Dambre, and Lucas Theis. Fast face-swap using convolutional neural networks. In ICCV, pages 3697-3705, 2017. +[32] Lingzhi Li, Jianmin Bao, Hao Yang, Dong Chen, and Fang Wen. Faceshifter: Towards high fidelity and occlusion aware face swapping. CoRR, abs/1912.13457, 2019. +[33] Connor Z. Lin, David B. Lindell, Eric R. Chan, and Gordon Wetzstein. 3d GAN inversion for controllable portrait image animation. CoRR, abs/2203.13441, 2022. +[34] Yuan Lin, Shengjin Wang, Qian Lin, and Feng Tang. Face swapping under large pose variations: A 3d model based approach. In ICME, pages 333-338, 2012. + +[35] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015. +[36] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, pages 405-421, 2020. +[37] Saleh Mosaddegh, Loïc Simon, and Frédéric Jurie. Photorealistic face de-identification by aggregating donors' face components. In ACCV, pages 159–174, 2014. +[38] Jacek Naruniec, Leonhard Helminger, Christopher Schroers, and Romann M. Weber. High-resolution neural face swapping for visual effects. Comput. Graph. Forum, 39(4):173-184, 2020. +[39] Ryota Natsume, Tatsuya Yatagawa, and Shigeo Morishima. Fsnet: An identity-aware generative model for image-based face swapping. In ACCV, pages 117-132, 2018. +[40] Ryota Natsume, Tatsuya Yatagawa, and Shigeo Morishima. RSGAN: face swapping and editing using face and hair representation in latent spaces. In SIGGRAPH, pages 69:1-69:2, 2018. +[41] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In ICCV, pages 7587–7596, 2019. +[42] Yuval Nirkin, Yosi Keller, and Tal Hassner. FSGAN: subject agnostic face swapping and reenactment. In ICCV, pages 7183-7192, 2019. +[43] Yuval Nirkin, Iacopo Masi, Anh Tuan Tran, Tal Hassner, and Gérard G. Medioni. On face segmentation, face swapping, and face perception. In AFGR, 2018. +[44] Yotam Nitzan, Amit Bermano, Yangyan Li, and Daniel Cohen-Or. Face identity disentanglement via latent space mapping. ACM Trans. Graph., 39(6):225:1-225:14, 2020. +[45] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In CVPR, pages 13493-13503, 2022. +[46] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: A stylegan encoder for image-to-image translation. In CVPR, pages 2287–2296, 2021. +[47] Daniel Roich, Ron Mokady, Amit H. Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. TOG, pages 1–13, 2022. +[48] Arun Ross and Asem A. Othman. Visual cryptography for biometric privacy. IEEE Trans. Inf. Forensics Secur., 6(1):70-81, 2011. +[49] Nataniel Ruiz, Eunji Chong, and James M. Rehg. Fine-grained head pose estimation without keypoints. In CVPR, pages 2074-2083, 2018. +[50] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. GRAF: generative radiance fields for 3d-aware image synthesis. In NeurIPS, 2020. +[51] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. ACM Trans. Graph., 40(4):133:1-133:14, 2021. + +[52] Tengfei Wang, Yong Zhang, Yanbo Fan, Jue Wang, and Qifeng Chen. High-fidelity gan inversion for image attribute editing. In CVPR, pages 11369-11378, 2022. +[53] Z. Wang, E.P. Simoncelli, and A.C. Bovik. Multiscale structural similarity for image quality assessment. In ACSSC, 2003. +[54] Less Wright. Ranger - a synergistic optimizer. https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer. Accessed: 2022-9-18. +[55] Yangyang Xu, Bailin Deng, Junle Wang, Yanqing Jing, Jia Pan, and Shengfeng He. High-resolution face swapping via latent semantics disentanglement. In CVPR, pages 7632-7641, 2022. +[56] Yangyang Xu, Bailin Deng, Junle Wang, Yanqing Jing, Jia Pan, and Shengfeng He. High-resolution face swapping via latent semantics disentanglement. In CVPR, pages 7632-7641, 2022. +[57] Zhiliang Xu, Hang Zhou, Zhibin Hong, Ziwei Liu, Jiaming Liu, Zhizhi Guo, Junyu Han, Jingtuo Liu, Errui Ding, and Jingdong Wang. Styleswap: Style-based generator empowers robust face swapping. In ECCV, pages 661-677, 2022. +[58] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, pages 586-595, 2018. +[59] Jun-Yan Zhu, Philipp Krahenbuhl, Eli Shechtman, and Alexei A. Efros. Generative visual manipulation on the natural image manifold. In ECCV, pages 597-613, 2016. +[60] Yuhao Zhu, Qi Li, Jian Wang, Cheng-Zhong Xu, and Zhenan Sun. One shot face swapping on megapixels. In CVPR, pages 4834-4844, 2021. \ No newline at end of file diff --git a/2023/3D-Aware Face Swapping/images.zip b/2023/3D-Aware Face Swapping/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..799347b54f43bd19f03e7808371091d2fd76406e --- /dev/null +++ b/2023/3D-Aware Face Swapping/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4177d0d5ded5572a1b7a8febcd0a44be654ee4eff6444dd587c1f8deda4d2dfa +size 577460 diff --git a/2023/3D-Aware Face Swapping/layout.json b/2023/3D-Aware Face Swapping/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1a197a477134003f0e98f72518178b252f1c9743 --- /dev/null +++ b/2023/3D-Aware Face Swapping/layout.json @@ -0,0 +1,8950 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 216, + 103, + 378, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 103, + 378, + 121 + ], + "spans": [ + { + "bbox": [ + 216, + 103, + 378, + 121 + ], + "type": "text", + "content": "3D-Aware Face Swapping" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 143, + 516, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 143, + 516, + 185 + ], + "spans": [ + { + "bbox": [ + 77, + 143, + 516, + 185 + ], + "type": "text", + "content": "Yixuan Li Chao Ma* Yichao Yan* Wenhan Zhu Xiaokang Yang MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, China {lyx0208, chaoma, yanyichao, zhuwenhan823, xkyang}@sjtu.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 50, + 214, + 121, + 357 + ], + "blocks": [ + { + "bbox": [ + 50, + 214, + 121, + 357 + ], + "lines": [ + { + "bbox": [ + 50, + 214, + 121, + 357 + ], + "spans": [ + { + "bbox": [ + 50, + 214, + 121, + 357 + ], + "type": "image", + "image_path": "db2a32cb00c1a096529925fa25c9fba4b1870e608e0ae2061971e078920cfdd6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 69, + 360, + 101, + 371 + ], + "lines": [ + { + "bbox": [ + 69, + 360, + 101, + 371 + ], + "spans": [ + { + "bbox": [ + 69, + 360, + 101, + 371 + ], + "type": "text", + "content": "Source" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 121, + 214, + 262, + 357 + ], + "blocks": [ + { + "bbox": [ + 121, + 214, + 262, + 357 + ], + "lines": [ + { + "bbox": [ + 121, + 214, + 262, + 357 + ], + "spans": [ + { + "bbox": [ + 121, + 214, + 262, + 357 + ], + "type": "image", + "image_path": "645096ca6885eef89157cd30fb1a86f49e4367afe473d1792a150ff241f269a7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 141, + 360, + 171, + 372 + ], + "lines": [ + { + "bbox": [ + 141, + 360, + 171, + 372 + ], + "spans": [ + { + "bbox": [ + 141, + 360, + 171, + 372 + ], + "type": "text", + "content": "Target" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 262, + 215, + 403, + 357 + ], + "blocks": [ + { + "bbox": [ + 262, + 215, + 403, + 357 + ], + "lines": [ + { + "bbox": [ + 262, + 215, + 403, + 357 + ], + "spans": [ + { + "bbox": [ + 262, + 215, + 403, + 357 + ], + "type": "image", + "image_path": "2beb072e7b4fef458b7c6794a12f1ee2512754c90d1d15db8a9f3820adc3bb98.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 288, + 360, + 376, + 371 + ], + "lines": [ + { + "bbox": [ + 288, + 360, + 376, + 371 + ], + "spans": [ + { + "bbox": [ + 288, + 360, + 376, + 371 + ], + "type": "text", + "content": "Intermediate Views" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 380, + 546, + 404 + ], + "lines": [ + { + "bbox": [ + 46, + 380, + 546, + 404 + ], + "spans": [ + { + "bbox": [ + 46, + 380, + 546, + 404 + ], + "type": "text", + "content": "Figure 1. Demonstration of the proposed 3dSwap. Given single-view source and target images, our method synthesizes high-fidelity and multi-view-consistent images of the swapped faces and the corresponding geometries. More results can be found on our project page." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 403, + 215, + 474, + 357 + ], + "blocks": [ + { + "bbox": [ + 403, + 215, + 474, + 357 + ], + "lines": [ + { + "bbox": [ + 403, + 215, + 474, + 357 + ], + "spans": [ + { + "bbox": [ + 403, + 215, + 474, + 357 + ], + "type": "image", + "image_path": "eb4dbc647fb79c18b1d27f2fa5a36f98974b2b986d386ec70793a2f5050469c1.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 360, + 468, + 371 + ], + "lines": [ + { + "bbox": [ + 410, + 360, + 468, + 371 + ], + "spans": [ + { + "bbox": [ + 410, + 360, + 468, + 371 + ], + "type": "text", + "content": "Source View" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 474, + 215, + 544, + 357 + ], + "blocks": [ + { + "bbox": [ + 474, + 215, + 544, + 357 + ], + "lines": [ + { + "bbox": [ + 474, + 215, + 544, + 357 + ], + "spans": [ + { + "bbox": [ + 474, + 215, + 544, + 357 + ], + "type": "image", + "image_path": "f7226b694a58a983e49cf7078fa142fa89701e0100628cdfba1e0c25c2b1f54b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 485, + 360, + 531, + 372 + ], + "lines": [ + { + "bbox": [ + 485, + 360, + 531, + 372 + ], + "spans": [ + { + "bbox": [ + 485, + 360, + 531, + 372 + ], + "type": "text", + "content": "Geometry" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 143, + 414, + 192, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 414, + 192, + 426 + ], + "spans": [ + { + "bbox": [ + 143, + 414, + 192, + 426 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 45, + 437, + 289, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 437, + 289, + 689 + ], + "spans": [ + { + "bbox": [ + 45, + 437, + 289, + 689 + ], + "type": "text", + "content": "Face swapping is an important research topic in computer vision with wide applications in entertainment and privacy protection. Existing methods directly learn to swap 2D facial images, taking no account of the geometric information of human faces. In the presence of large pose variance between the source and the target faces, there always exist undesirable artifacts on the swapped face. In this paper, we present a novel 3D-aware face swapping method that generates high-fidelity and multi-view-consistent swapped faces from single-view source and target images. To achieve this, we take advantage of the strong geometry and texture prior of 3D human faces, where the 2D faces are projected into the latent space of a 3D generative model. By disentangling the identity and attribute features in the latent space, we succeed in swapping faces in a 3D-aware manner, being robust to pose variations while transferring fine-grained facial details. Extensive experiments demonstrate the superiority of our 3D-aware face swapping framework in terms of visual quality, identity similarity, and multi-view consistency. Code is available at https://1yx0208.github.io/3dSwap." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 414, + 386, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 414, + 386, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 414, + 386, + 426 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 436, + 545, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 436, + 545, + 519 + ], + "spans": [ + { + "bbox": [ + 304, + 436, + 545, + 519 + ], + "type": "text", + "content": "Face swapping aims to transfer the identity of a person in the source image to another person in the target image while preserving other attributes like head pose, expression, illumination, background, etc. It has attracted extensive attention recently in the academic and industrial world for its potential wide applications in entertainment [14,30,38] and privacy protection [7,37,48]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 521, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 546, + 713 + ], + "type": "text", + "content": "The key of face swapping is to transfer the geometric shape of the facial region (i.e., eyes, nose, mouth) and detailed texture information (such as the color of eyes) from the source image to the target image while preserving both geometry and texture of non-facial regions (i.e., hair, background, etc). Currently, some 3D-based methods consider geometry prior of human faces by fitting the input image to 3D face models such as 3D Morphable Model (3DMM) [8] to overcome the differences of face orientation and expression between sources and targets [7, 15, 34, 43]. However, these parametric face models only produce coarse frontal faces without fine-grained details, leading to low-resolution and fuzzy swapping results. On the other hand, following Generative Adversarial Network [24], GAN-based [6, 23, 32, 39, 40, 42] or GAN-inversion-based [44, 55, 57, 60] approaches adopt the ad" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 62, + 702, + 146, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 702, + 146, + 712 + ], + "spans": [ + { + "bbox": [ + 62, + 702, + 146, + 712 + ], + "type": "text", + "content": "* Corresponding authors." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12705" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 167 + ], + "type": "text", + "content": "versarial training strategy to learn texture information from inputs. Despite the demonstrated photorealistic and high-resolution images, the swapped faces via 2D GANs sustain undesirable artifacts when two input faces undergo large pose variation since the strong 3D geometry prior of human faces is ignored. Moreover, learning to swap faces in 2D images makes little use of the shaped details from sources, leading to poorer performance on identity transferring." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 168, + 288, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 168, + 288, + 370 + ], + "spans": [ + { + "bbox": [ + 46, + 168, + 288, + 370 + ], + "type": "text", + "content": "Motivated by the recent advances of 3D generative models [12, 13, 20, 25, 45] in synthesizing multi-view consistent images and high-quality 3D shapes, it naturally raises a question: can we perform face swapping in a 3D-aware manner to exploit the strong geometry and texture priors? To answer this question, two challenges arise. First, how to infer 3D prior directly from 3D-GAN models still remains open. Current 3D-aware generative models synthesize their results from a random Gaussian noise " + }, + { + "bbox": [ + 46, + 168, + 288, + 370 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 46, + 168, + 288, + 370 + ], + "type": "text", + "content": ", so that their output images are not controllable. This increases the complexity of inferring the required prior from arbitrary input. Second, the inferred prior corresponding to input images is in the form of a high-dimension feature vector in the latent space of 3D GANs. Simply synthesizing multi-view target images referring to the prior and applying 2D face swapping to them produces not only inconsistent artifacts but also a heavy computational load." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 371, + 287, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 371, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 46, + 371, + 287, + 646 + ], + "type": "text", + "content": "To address these challenges, we systematically investigate the geometry and texture prior of these 3D generative models and propose a novel 3D-aware face swapping framework 3dSwap. We introduce a 3D GAN inversion framework to project the 2D inputs into the 3D latent space, motivated by recent GAN inversion approaches [46, 47, 51]. Specifically, we design a learning-based inversion algorithm that trains an encoding network to efficiently and robustly project input images into the latent space of EG3D [12]. However, directly borrowing the architecture from 2D approaches is not yet enough since a single-view input provides limited information about the whole human face. To further improve the multi-view consistency of latent code projection, we design a pseudo-multi-view training strategy. This design effectively bridges the domain gap between 2D and 3D. To tackle the second problem, we design a face swapping algorithm based on the 3D latent codes and directly synthesize the swapped faces with the 3D-aware generator. In this way, we achieve 3D GAN-inversion-based face swapping by a latent code manipulating algorithm consisting of style-mixing and interpolation, where latent code interpolation is responsible for identity transferring while style-mixing helps to preserve attributes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 647, + 238, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 647, + 238, + 658 + ], + "spans": [ + { + "bbox": [ + 59, + 647, + 238, + 658 + ], + "type": "text", + "content": "In summary, our contributions are threefold:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 665, + 287, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 665, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 665, + 287, + 712 + ], + "type": "text", + "content": "- To the best of our knowledge, we first address the 3D-aware face swapping task. The proposed 3dSwap method sets a strong baseline and we hope this work will foster future research into this task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 72, + 545, + 232 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 317, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 545, + 156 + ], + "type": "text", + "content": "- We design a learning-based 3D GAN inversion with the pseudo-multi-view training strategy to extract geometry and texture prior from arbitrary input images. We further utilize these strong prior by designing a latent code manipulating algorithm, with which we directly synthesize the final results with the pretrained generator." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 160, + 545, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 160, + 545, + 232 + ], + "spans": [ + { + "bbox": [ + 317, + 160, + 545, + 232 + ], + "type": "text", + "content": "- Extensive experiments on benchmark datasets demonstrate the superiority of the proposed 3dSwap over state-of-the-art 2D face swapping approaches in identity transferring. Our reconstruction module for 3DGAN inversion performs favorably over the state-of-the-art methods as well." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 244, + 392, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 244, + 392, + 257 + ], + "spans": [ + { + "bbox": [ + 306, + 244, + 392, + 257 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 265, + 545, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 265, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 304, + 265, + 545, + 396 + ], + "type": "text", + "content": "Face Swapping. Face swapping has emerged as a popular research topic in the field of computer vision in recent years. Currently, it can be classified into two categories: 3D-based and GAN-based methods. Specifically, 3D-based methods [7, 15, 34, 43] fit input images into 3D parametric face models (i.e. 3DMM [8]) to overcome the problems of posture or perspective difference between input images. However, the performance of such methods is usually limited by the reconstruction results. GAN-based methods [6, 18, 23, 31, 32, 39, 40, 42] adopt the adversarial training strategy to generate photorealistic fake faces." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 397, + 545, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 397, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 397, + 545, + 601 + ], + "type": "text", + "content": "Early GAN-based face swapping methods are subject-specific, i.e. DeepFake [18] and Korshunova et al. [31] are required to train different models for different inputs. The subject-specific approaches have limited real applications since face swapping is required to be applicable to any unseen pair of input images, and such limitation is addressed in latter subject-agnostic face swapping approaches [6, 23, 32, 39, 40, 42]. To increase the resolution of generated images, MegaFS [60] firstly proposes a GAN-inversion-based face swapping method, utilizing StyleGAN [28] to synthesize megapixel-level swapping faces. Xu et al. [56] and StyleSwap [57] integrate the StyleGAN2 [29] generator to their face swapping pipeline, applying its strong prior to generate high-resolution swapped faces. Following these approaches, we furtherly extend the face swapping task into 3D latent space to capture fine-grained details of face shape and strengthen the robustness under large pose variance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 713 + ], + "type": "text", + "content": "3D-Aware Generative Models. The 3D-aware generative models are aimed to synthesize 3D-aware (i.e., can be explicitly controlled by the camera pose) images from 2D image collections. HoloGAN [41] firstly proposes a 3D-aware generative model through learning the voxel features, whereas it only generates low-resolution results due to the limitation of computational cost. Recently, several works utilize the NeRF [36] representation [12, 20, 25, 45, 50]. GRAF [50] adopts the approach of patch sampling to elim" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "12706" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 70, + 545, + 246 + ], + "blocks": [ + { + "bbox": [ + 48, + 70, + 545, + 246 + ], + "lines": [ + { + "bbox": [ + 48, + 70, + 545, + 246 + ], + "spans": [ + { + "bbox": [ + 48, + 70, + 545, + 246 + ], + "type": "image", + "image_path": "20011f7b7de3afcc456fd6d41159db98b0d24f7f9b0e11b9c733257da699b212.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 252, + 548, + 299 + ], + "lines": [ + { + "bbox": [ + 46, + 252, + 548, + 299 + ], + "spans": [ + { + "bbox": [ + 46, + 252, + 548, + 299 + ], + "type": "text", + "content": "Figure 2. The pipeline of our 3D-aware face swapping method, 3dSwap. In the first stage, we infer 3D geometry and texture prior of both source and target images with an encoder. We then design a latent code manipulation algorithm consisting of style mixing and interpolation to conduct face swapping based on these priors. Finally, swapped faces in any view direction can be synthesized by 3dSwap after fine-tuning the parameters of the generator following the joint pivot tuning optimization." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 319, + 289, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 319, + 289, + 462 + ], + "spans": [ + { + "bbox": [ + 46, + 319, + 289, + 462 + ], + "type": "text", + "content": "inate computational costs during training. GRAM [20] estimates radiance manifolds to produce realistic images with fine details and strong 3D consistency. StyleNeRF [25] integrates NeRF with style-based generators and proposes a better up-sampler and a new regularization loss to mitigate inconsistencies. StyleSDF [45] presents a Signed Distance Field (SDF) based on 3D modeling that defines detailed 3D surfaces. EG3D [12] raises a novel tri-plane representation for efficient 3D-aware image generation. Due to the strong generative capability of these 3D-aware generative models, we leverage them to infer fine 3D prior from 2D images for our 3D-aware face swapping framework." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 472, + 287, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 472, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 287, + 567 + ], + "type": "text", + "content": "GAN Inversion. Since Generative Adversarial Network [24], numerous generative models reflect great abilities in synthesizing high-quality images [9,12,25,28,29,45]. To fully leverage these well-trained GANs, the task of GAN inversion emerges recently. In particular, GAN inversion is aimed to project a given image back to a vector " + }, + { + "bbox": [ + 46, + 472, + 287, + 567 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 472, + 287, + 567 + ], + "type": "text", + "content": " in the latent space of a pretrained GAN model so that this image can be faithfully reconstructed from " + }, + { + "bbox": [ + 46, + 472, + 287, + 567 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 472, + 287, + 567 + ], + "type": "text", + "content": " by the generator." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 568, + 287, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 568, + 287, + 676 + ], + "spans": [ + { + "bbox": [ + 47, + 568, + 287, + 676 + ], + "type": "text", + "content": "Early works invert images into Gaussian noise " + }, + { + "bbox": [ + 47, + 568, + 287, + 676 + ], + "type": "inline_equation", + "content": "z \\in R^{1 \\times 512}" + }, + { + "bbox": [ + 47, + 568, + 287, + 676 + ], + "type": "text", + "content": " or semantic latent space " + }, + { + "bbox": [ + 47, + 568, + 287, + 676 + ], + "type": "inline_equation", + "content": "\\mathcal{W} \\in R^{1 \\times 512}" + }, + { + "bbox": [ + 47, + 568, + 287, + 676 + ], + "type": "text", + "content": " [1,16,17,59]. Abdal et al. [2] firstly extend latent space to " + }, + { + "bbox": [ + 47, + 568, + 287, + 676 + ], + "type": "inline_equation", + "content": "\\mathcal{W} + \\in R^{18 \\times 512}" + }, + { + "bbox": [ + 47, + 568, + 287, + 676 + ], + "type": "text", + "content": " for more accurate reconstruction. To predict the latent code, learning-based methods [3,26,46,51,52] train an encoder for latent projection, while optimization-based methods [1,2,16,17] directly find the optimal code step-by-step from noise. Hybrid methods [4,47,59] combine both to optimize latent codes initialized by encoders." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "In addition, there are a few inversion works for 3D generative models. Pix2NeRF [10] is proposed to generate Neural Radiance Fields (NeRF) [36] of an object applying a" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 319, + 547, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 319, + 547, + 392 + ], + "spans": [ + { + "bbox": [ + 304, + 319, + 547, + 392 + ], + "type": "text", + "content": "single input image based on a pretrained " + }, + { + "bbox": [ + 304, + 319, + 547, + 392 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 304, + 319, + 547, + 392 + ], + "type": "text", + "content": "-GAN [13]. Connor et al. [33] leverage EG3D [12] and a pretrained 3DMM predictor [22] to reconstruct a 3D human face, which could be further animated or edited. Our reconstruction model is also in this catalog, while the adopted learning-based algorithm is more robust and efficient compared with them." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 407, + 362, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 407, + 362, + 420 + ], + "spans": [ + { + "bbox": [ + 306, + 407, + 362, + 420 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 430, + 373, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 430, + 373, + 441 + ], + "spans": [ + { + "bbox": [ + 306, + 430, + 373, + 441 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 450, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 450, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 547, + 715 + ], + "type": "text", + "content": "Given single-view source and target images, we aim to synthesize multi-view-consistent face images with identity from source image " + }, + { + "bbox": [ + 304, + 450, + 547, + 715 + ], + "type": "inline_equation", + "content": "x_{s}" + }, + { + "bbox": [ + 304, + 450, + 547, + 715 + ], + "type": "text", + "content": " and other attributes from target image " + }, + { + "bbox": [ + 304, + 450, + 547, + 715 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 304, + 450, + 547, + 715 + ], + "type": "text", + "content": ". Fig. 2 demonstrates the overall pipeline and notations of the proposed 3dSwap. First, to extract accurate geometry and texture prior from 2D images, we conduct a learning-based 3D GAN inversion, training an encoding network to project the inputs into the latent space of a 3D-aware generative model. Specifically, we design a pseudomulti-view optimization strategy to train the encoder with a feature pyramid architecture from pSp [46], empowering the latent code projection with the 3D consistency of the state-of-the-art 3D GAN, i.e. EG3D [12] (Sec. 3.2). Then, to disentangle identity from attributes in the latent space, we design a latent code manipulation algorithm consisting of style mixing and interpolation (Sec. 3.3). Finally, for the purpose of improving the overall quality of our results, bridging the gap between 2D image generating and 3D rendering, we implement a joint pivot tuning on parameters of the pretrained EG3D generator (Sec. 3.4). The networks are trained with a set of well-designed loss functions to enforce identity transferring and attribute preserving (Sec. 3.5)." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12707" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 236, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 236, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 236, + 85 + ], + "type": "text", + "content": "3.2. Inferring 3D Prior from 2D Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "spans": [ + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "text", + "content": "To infer geometry and texture prior from a 2D image, we leverage the state-of-the-art 3D-aware generative model, i.e. EG3D [12] by projecting the inputs into its latent space. Since the optimization-based algorithm [47] is inefficient and less robust to non-front faces, we propose a learning-based inversion algorithm where an encoding network is trained to project the single-view inputs into the 3D latent space. Different from 2D StyleGAN-like models which totally rely on the latent code " + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "text", + "content": " to generate the corresponding output: " + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "inline_equation", + "content": "y = \\mathcal{G}(w)" + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "text", + "content": ", the 3D-aware generative model has an extra input " + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "text", + "content": " which controls the pose of synthesized image: " + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "inline_equation", + "content": "y = \\mathcal{G}(w,d)" + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "text", + "content": ". This indicates that latent codes and generated images are not bijections for 3D GANs since multi-view images of the same person can be synthesized using the same " + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "text", + "content": " but different " + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 90, + 289, + 317 + ], + "type": "text", + "content": ". Taking this property into account, we design a pseudo-multi-view training strategy, using a generated image in a different view from the source image to improve the consistency of latent code projection. Fig. 3 illustrates the pipeline of our design." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": "Specifically, we first use an encoder to project the input image " + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": " into the latent space " + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": " and get a high-dimension intermediate latent vector " + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "inline_equation", + "content": "w_{x} = \\mathcal{E}_{\\theta}(x)" + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": " is the pSp encoder with parameters " + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": ". Then, with the pretrained EG3D generator " + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "inline_equation", + "content": "\\mathcal{G}(\\cdot, \\cdot)" + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": " and input direction " + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": " estimated by Deep3d Face Reconstruction [21], we synthesize the reconstructed result " + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "inline_equation", + "content": "x' = \\mathcal{G}(w_{x}, d)" + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": ". For a 2D GAN inversion approach, this ground-truth and reconstructed image pair " + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "inline_equation", + "content": "(x, x')" + }, + { + "bbox": [ + 46, + 318, + 289, + 437 + ], + "type": "text", + "content": " is enough, but it is inadequate for 3D GANs due to the non-bijective property." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "text", + "content": "Ideally, this issue can be addressed by feeding multi-view images of a person into the encoder and minimizing the distance between their output vectors. However, it is difficult to obtain large-scale multi-view data, and we usually only have single-view images of a person in the training dataset. To this end, we additionally sample a random direction " + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "inline_equation", + "content": "\\hat{d}" + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "text", + "content": " and use the generator to synthesize " + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "inline_equation", + "content": "\\hat{x} = \\mathcal{G}(w_x,\\hat{d})" + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "text", + "content": " with the same latent code. This output image " + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "inline_equation", + "content": "\\hat{x}" + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "text", + "content": ", which is called a pseudo-input since it is generated by the 3D GAN, is again fed into the encoder-decoder structure to get " + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "inline_equation", + "content": "w_{\\hat{x}} = \\mathcal{E}_{\\theta}(\\hat{x})" + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "inline_equation", + "content": "\\hat{x}^{\\prime} = \\mathcal{G}(w_{\\hat{x}},d)" + }, + { + "bbox": [ + 46, + 437, + 289, + 568 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "spans": [ + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "text", + "content": "Now, we can define our optimization objectives. Following the usual inversion approaches, we apply some pixelwise loss functions between the input " + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "text", + "content": " and its reconstruction " + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "inline_equation", + "content": "x'" + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "text", + "content": ". Under the setting of our pseudo-multi-view input, we add constraints between the two latent codes " + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "inline_equation", + "content": "w_{x}" + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "inline_equation", + "content": "w_{\\hat{x}}" + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "text", + "content": " for the purpose of maintaining 3D consistency. We further restrain pixel-level distance between the second-order output " + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "inline_equation", + "content": "\\hat{x}'" + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "text", + "content": " synthesized with " + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "inline_equation", + "content": "w_{\\hat{x}}" + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "text", + "content": " and the origin input " + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 569, + 289, + 688 + ], + "type": "text", + "content": " to reinforce such constraint. In summary, this three-termed optimization can be written as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 80, + 698, + 287, + 716 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 698, + 287, + 716 + ], + "spans": [ + { + "bbox": [ + 80, + 698, + 287, + 716 + ], + "type": "interline_equation", + "content": "\\min _ {\\theta} \\left\\{\\mathcal {L} \\left(x, x ^ {\\prime}\\right) + \\eta \\mathcal {L} \\left(x, \\hat {x} ^ {\\prime}\\right) + \\mathcal {L} \\left(w _ {x}, w _ {\\hat {x}}\\right) \\right\\}, \\tag {1}", + "image_path": "713533fbc23a02e48d87bf107d0ab07c74ab640d41416cb31f864b4601141ea0.jpg" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 547, + 205 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 547, + 205 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 547, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 547, + 205 + ], + "type": "image", + "image_path": "5f4dffb41fb01f22db715da2fc91824845b9416888bef1bcd43edbc53ea6e5aa.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 213, + 545, + 225 + ], + "lines": [ + { + "bbox": [ + 306, + 213, + 545, + 225 + ], + "spans": [ + { + "bbox": [ + 306, + 213, + 545, + 225 + ], + "type": "text", + "content": "Figure 3. The pipeline of our pseudo-multi-view training strategy." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "spans": [ + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "text", + "content": " is the parameter of encoder, " + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "text", + "content": " is a trade-off parameter and " + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "text", + "content": " denotes the loss functions which will be further discussed in Sec. 3.5. After optimizing the parameters of the encoding network with this strategy, we can obtain rather accurate 3D prior " + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "inline_equation", + "content": "w_{x}" + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "text", + "content": " from any given input " + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 245, + 545, + 306 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 312, + 544, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 312, + 544, + 326 + ], + "spans": [ + { + "bbox": [ + 305, + 312, + 544, + 326 + ], + "type": "text", + "content": "3.3. Face Swapping via Latent Code Manipulation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "text", + "content": "To take full advantage of the prior extracted from the 3D GAN model, we calculate the latent code for the swapped face based on latent codes " + }, + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "inline_equation", + "content": "w_{s} = \\mathcal{E}_{\\theta}(x_{s})" + }, + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "text", + "content": " of the source image " + }, + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "inline_equation", + "content": "x_{s}" + }, + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "inline_equation", + "content": "w_{t} = \\mathcal{E}_{\\theta}(x_{t})" + }, + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "text", + "content": " of the target image " + }, + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 304, + 331, + 545, + 390 + ], + "type": "text", + "content": ". Before that, we step back and think about what these latent codes represent." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 391, + 546, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 391, + 546, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 391, + 546, + 510 + ], + "type": "text", + "content": "A face image usually contains different attributes such as face shape, hairstyle, skin color, etc. With the encoder discussed in Sec. 3.2, we embed all these attributes in the high-dimension latent vectors. However, identity features depending on the geometry of facial region (i.e., eyes, nose, mouth, cheek, and so on) also implicitly lie in such latent codes. For the task of face swapping, it is desirable if identity features can be disentangled from attribute features in the latent code. Afterward, we can simply exchange the identity part of the latent codes to achieve face swapping." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "text", + "content": "Since such identity and attributes are typically entangled in the latent codes, we design an interpolation strategy between the source and target latent codes with learnable coefficients. Here, the source latent code " + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "inline_equation", + "content": "w_{s}" + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "text", + "content": " plays a leading role in the identity part while " + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "inline_equation", + "content": "w_{t}" + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "text", + "content": " dominates the others. To obtain these coefficients, we concatenate " + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "inline_equation", + "content": "w_{s}" + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "inline_equation", + "content": "w_{t}" + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "text", + "content": " to form a " + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "inline_equation", + "content": "1 \\times 1024" + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "text", + "content": " vector and feed it into a four-layer Multilayer Perceptron whose output " + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 510, + 547, + 605 + ], + "type": "text", + "content": " is the interpolation coefficient." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 714 + ], + "type": "text", + "content": "Moreover, StyleGAN-like [28,29] models share the style mixing property of latent codes, which means that different layers of latent codes control different parts of attributes. For example, coarse spatial resolutions control high-level aspects like face shape and orientation while fine resolution latent control details like hair color. Motivated by this, we also investigate the layer-wise attributes in EG3D and observed similar properties. This allows us to generate more desirable swapping results by only performing interpolation" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "12708" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 155, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 155, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 155, + 83 + ], + "type": "text", + "content": "on part of the latent codes." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 84, + 287, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 84, + 287, + 109 + ], + "spans": [ + { + "bbox": [ + 47, + 84, + 287, + 109 + ], + "type": "text", + "content": "In summary, the latent code of swapped face " + }, + { + "bbox": [ + 47, + 84, + 287, + 109 + ], + "type": "inline_equation", + "content": "w_{fs}" + }, + { + "bbox": [ + 47, + 84, + 287, + 109 + ], + "type": "text", + "content": " can be obtained by:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 117, + 287, + 158 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 117, + 287, + 158 + ], + "spans": [ + { + "bbox": [ + 49, + 117, + 287, + 158 + ], + "type": "interline_equation", + "content": "w _ {f s} ^ {(i)} = \\left\\{ \\begin{array}{c c} \\rho^ {(i)} \\times w _ {t} ^ {(i)} + (1 - \\rho^ {(i)}) \\times w _ {s} ^ {(i)} & i \\in [ 5, 9 ], \\\\ w _ {t} ^ {(i)} & o t h e r w i s e, \\end{array} \\right. \\tag {2}", + "image_path": "dc90195726a584dc8b483abd2285f89e94f01e5280cf8f72e6178ee1c380c7b0.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "spans": [ + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "text", + "content": "where the superscript " + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "text", + "content": " denotes the layer-wise expression of " + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "inline_equation", + "content": "w_{fs}" + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "text", + "content": " and the choice of layer, from layer 5 to layer 9, follows the definition of \"middle\" from StyleGAN [28], while a slight modification is made since the dimension of EG3D latent space is lower (i.e. " + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{W} \\in R^{14 \\times 512}" + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "text", + "content": "). To better disentangle identity and attributes, we apply a Sigmoid-shaped activation function with a factor " + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "inline_equation", + "content": "\\lambda = 100" + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "text", + "content": " to the " + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 47, + 159, + 287, + 255 + ], + "type": "text", + "content": " generated by MLPs, enforcing the coefficients to be closer to 0 or 1:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 264, + 287, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 264, + 287, + 281 + ], + "spans": [ + { + "bbox": [ + 113, + 264, + 287, + 281 + ], + "type": "interline_equation", + "content": "\\rho_ {n e w} ^ {(i)} = \\left(1 + e ^ {- \\lambda \\rho_ {o l d} ^ {(i)}}\\right) ^ {- 1}. \\tag {3}", + "image_path": "3465ffa33eeb153ea668437c2b25251b8ef1b0179786cfae7ead5c848e7f9948.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 294, + 157, + 307 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 294, + 157, + 307 + ], + "spans": [ + { + "bbox": [ + 47, + 294, + 157, + 307 + ], + "type": "text", + "content": "3.4. Joint Pivot Tuning" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 313, + 287, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 313, + 287, + 457 + ], + "spans": [ + { + "bbox": [ + 46, + 313, + 287, + 457 + ], + "type": "text", + "content": "With the encoding network trained by the well-designed optimization strategy in Sec. 3.2, we can project an input image into a code in the 3D latent space. However, the inevitable reconstruction error will degrade the performance of face swapping, which is a downstream task of 3D GAN inversion. Also, we observe that directly swap faces via latent manipulation leads to slight artifacts in the non-facial region. Motivated by PTI [47], we adopt pivot tuning on the parameters of the pretrained EG3D generator using a fixed latent code " + }, + { + "bbox": [ + 46, + 313, + 287, + 457 + ], + "type": "inline_equation", + "content": "w_{fs}" + }, + { + "bbox": [ + 46, + 313, + 287, + 457 + ], + "type": "text", + "content": " from Sec. 3.3, but in an optimizing direction considering both reconstruction quality and face swapping performance. The process of this \"joint\" pivot tuning is:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 97, + 476, + 287, + 498 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 476, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 97, + 476, + 287, + 498 + ], + "type": "interline_equation", + "content": "\\min _ {\\theta^ {*}} \\left\\{\\mathcal {L} \\left(x _ {s / t}, \\mathcal {G} _ {\\theta^ {*}} \\left(w _ {s / t}, d _ {s / t}\\right)\\right) + \\right. \\tag {4}", + "image_path": "ed97d527f9516ec2428ee184278b2f42a37ee5692a5746f65b8b352f09df6584.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 97, + 497, + 236, + 510 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 497, + 236, + 510 + ], + "spans": [ + { + "bbox": [ + 97, + 497, + 236, + 510 + ], + "type": "interline_equation", + "content": "\\left. \\mathcal {L} \\left(x _ {t} \\cdot M _ {f}, \\mathcal {G} _ {\\theta^ {*}} \\left(w _ {f s}, d _ {t}\\right) \\cdot M _ {f}\\right) \\right\\},", + "image_path": "6795588c2474a5aa41514cc85c39f4b1da6eca3dd4c31b30e19a95f32ea095ff.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "inline_equation", + "content": "\\theta^{*}" + }, + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "text", + "content": " is the parameter of EG3D generator, " + }, + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "inline_equation", + "content": "d_{s}" + }, + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "text", + "content": " is the direction of the source image, " + }, + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "inline_equation", + "content": "M_{f}" + }, + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "text", + "content": " is a binary mask that shields facial region and " + }, + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 47, + 518, + 287, + 567 + ], + "type": "text", + "content": " is the optimization constraint including MSE, LPIPS [58] and ID [19] losses." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 567, + 287, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 287, + 602 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 287, + 602 + ], + "type": "text", + "content": "Finally, with this finetuned generator and the latent code calculated by Eq. 2, we can synthesize the swapped face " + }, + { + "bbox": [ + 46, + 567, + 287, + 602 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 46, + 567, + 287, + 602 + ], + "type": "text", + "content": " in any direction " + }, + { + "bbox": [ + 46, + 567, + 287, + 602 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 567, + 287, + 602 + ], + "type": "text", + "content": " by:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 613, + 287, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 613, + 287, + 625 + ], + "spans": [ + { + "bbox": [ + 130, + 613, + 287, + 625 + ], + "type": "interline_equation", + "content": "y = \\mathcal {G} _ {\\theta^ {*}} \\left(w _ {f s}, d\\right). \\tag {5}", + "image_path": "890b5e05a0ae83f979171309023e23180348acccb61e4e0b6e2740cf2e8c4ee6.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 635, + 164, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 635, + 164, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 635, + 164, + 647 + ], + "type": "text", + "content": "3.5. Objective Functions" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 653, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 653, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 653, + 287, + 713 + ], + "type": "text", + "content": "GAN Inversion Losses. In Eq. 1, we generally use " + }, + { + "bbox": [ + 47, + 653, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 47, + 653, + 287, + 713 + ], + "type": "text", + "content": " to denote the loss function of our pseudo-multi-view training strategy. Here, we give its detailed form. Following the previous work [46], we use three different objectives for supervising a pair of input image " + }, + { + "bbox": [ + 47, + 653, + 287, + 713 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 47, + 653, + 287, + 713 + ], + "type": "text", + "content": " and reconstruction " + }, + { + "bbox": [ + 47, + 653, + 287, + 713 + ], + "type": "inline_equation", + "content": "x^{\\prime}" + }, + { + "bbox": [ + 47, + 653, + 287, + 713 + ], + "type": "text", + "content": " (and" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": "the same for " + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "inline_equation", + "content": "\\hat{x}^{\\prime}" + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": "), including pixel-wise " + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_1" + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": " loss, Learned Perceptual Image Path Similarity [58] loss " + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{LPIPS}" + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": ", and identity similarity loss " + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{id}" + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": " maximizing the cosine similarity between two identity embeddings estimated by ArcFace [19]. The total reconstruction loss between " + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "inline_equation", + "content": "x^{\\prime}" + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 320, + 137, + 545, + 156 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 137, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 320, + 137, + 545, + 156 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r e c} \\left(x, x ^ {\\prime}\\right) = \\lambda_ {1} \\mathcal {L} _ {1} \\left(x, x ^ {\\prime}\\right) + \\lambda_ {2} \\mathcal {L} _ {L P I S P} \\left(x, x ^ {\\prime}\\right) \\tag {6}", + "image_path": "88f4f37f1bacf29f7139da06907da45c83bedc9eb628338e62bc33cea6d10b1c.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 453, + 153, + 518, + 165 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 453, + 153, + 518, + 165 + ], + "spans": [ + { + "bbox": [ + 453, + 153, + 518, + 165 + ], + "type": "interline_equation", + "content": "+ \\lambda_ {3} \\mathcal {L} _ {i d} (x, x ^ {\\prime}),", + "image_path": "902fd76e33177c02bfd8b4c14931486ad87b27a83275da9bba9868445eb81bbd.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 173, + 459, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 173, + 459, + 185 + ], + "spans": [ + { + "bbox": [ + 306, + 173, + 459, + 185 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 306, + 173, + 459, + 185 + ], + "type": "inline_equation", + "content": "\\lambda_{1},\\lambda_{2}" + }, + { + "bbox": [ + 306, + 173, + 459, + 185 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 306, + 173, + 459, + 185 + ], + "type": "inline_equation", + "content": "\\lambda_{3}" + }, + { + "bbox": [ + 306, + 173, + 459, + 185 + ], + "type": "text", + "content": " are loss weights." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 186, + 545, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 186, + 545, + 209 + ], + "spans": [ + { + "bbox": [ + 305, + 186, + 545, + 209 + ], + "type": "text", + "content": "For the constraint between two latent codes, we adopt a cosine similarity:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 355, + 217, + 545, + 230 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 355, + 217, + 545, + 230 + ], + "spans": [ + { + "bbox": [ + 355, + 217, + 545, + 230 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {l a t} \\left(w _ {x}, w _ {\\hat {x}}\\right) = 1 - \\cos \\left(w _ {x}, w _ {\\hat {x}}\\right). \\tag {7}", + "image_path": "595cfd6ff2abdf0d5f17b72126490c00223aabf8402cec6e8fcfe3d2429727b3.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 239, + 545, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 239, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 305, + 239, + 545, + 274 + ], + "type": "text", + "content": "Besides, we adopt the latent code regularization loss from pSp [46], which constrains the generated latent vector in a region to be close to the average latent vector:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 372, + 282, + 545, + 296 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 282, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 372, + 282, + 545, + 296 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r e g} (x) = \\left\\| \\mathcal {E} _ {\\theta} (x) - \\bar {x} \\right\\| _ {2}, \\tag {8}", + "image_path": "47d30fac4ae25f9aca9c733376fe3f247dfa0fae6ceb45024ef1b6fb7222c772.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 304, + 545, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 304, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 305, + 304, + 545, + 338 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 304, + 545, + 338 + ], + "type": "inline_equation", + "content": "\\bar{x}" + }, + { + "bbox": [ + 305, + 304, + 545, + 338 + ], + "type": "text", + "content": " is the average of 10000 randomly sampled latent codes of EG3D generator. The overall loss function for 3D GAN inversion is:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 345, + 545, + 373 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 345, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 315, + 345, + 545, + 373 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {i n v} = \\mathcal {L} _ {r e c} \\left(x, x ^ {\\prime}\\right) + \\eta \\mathcal {L} _ {r e c} \\left(x, \\hat {x} ^ {\\prime}\\right) + \\mathcal {L} _ {l a t} \\left(w _ {x}, w _ {\\hat {x}}\\right) \\tag {9} \\\\ + \\mathcal {L} _ {r e g} (x). \\\\ \\end{array}", + "image_path": "60c03ce7ce61a977a6a41667fcf5cfb0f8f1b3c0f9c7874245854681db15db9e.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 384, + 545, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 384, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 305, + 384, + 545, + 419 + ], + "type": "text", + "content": "Face Swapping Losses. For training our face swapping module, we first design a masked pixel-wise " + }, + { + "bbox": [ + 305, + 384, + 545, + 419 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_2" + }, + { + "bbox": [ + 305, + 384, + 545, + 419 + ], + "type": "text", + "content": " loss for the face irrelevant region:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 354, + 428, + 545, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 428, + 545, + 441 + ], + "spans": [ + { + "bbox": [ + 354, + 428, + 545, + 441 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {2} \\left(x _ {t}, y\\right) = \\left\\| x _ {t} \\cdot M _ {f} - y \\cdot M _ {f} \\right\\| _ {2}, \\tag {10}", + "image_path": "5e24eed357e0832387d1b285b8acc2102136761f8016bde6f26af0b786749879.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 449, + 545, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 545, + 532 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 449, + 545, + 532 + ], + "type": "inline_equation", + "content": "M_{f}" + }, + { + "bbox": [ + 304, + 449, + 545, + 532 + ], + "type": "text", + "content": " is the binary mask same as in Sec. 3.4. We generate this mask according to the face segmentation labels of FFHQ [28] datasets. For 3D GAN inversion, we adopt the LPIPS [58] loss " + }, + { + "bbox": [ + 304, + 449, + 545, + 532 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{LPIPS}(x_t,y)" + }, + { + "bbox": [ + 304, + 449, + 545, + 532 + ], + "type": "text", + "content": " to learn the perceptual similarities and increase the quality of the generated images, and the binary mask is also added before feeding the image into the perceptual feature extractor." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 304, + 533, + 545, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 545, + 580 + ], + "type": "text", + "content": "For 3D-aware face swapping, we additionally synthesize the swapped face " + }, + { + "bbox": [ + 304, + 533, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\hat{y}" + }, + { + "bbox": [ + 304, + 533, + 545, + 580 + ], + "type": "text", + "content": " in the view of the source image, calculating both " + }, + { + "bbox": [ + 304, + 533, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{id}(x_s,y)" + }, + { + "bbox": [ + 304, + 533, + 545, + 580 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 533, + 545, + 580 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{id}(x_s,\\hat{y})" + }, + { + "bbox": [ + 304, + 533, + 545, + 580 + ], + "type": "text", + "content": " for better identity transferring." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 305, + 580, + 545, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 580, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 305, + 580, + 545, + 605 + ], + "type": "text", + "content": "Besides, " + }, + { + "bbox": [ + 305, + 580, + 545, + 605 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{color}" + }, + { + "bbox": [ + 305, + 580, + 545, + 605 + ], + "type": "text", + "content": " is designed to maintain the skin color of swapped faces:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 612, + 545, + 626 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 612, + 545, + 626 + ], + "spans": [ + { + "bbox": [ + 310, + 612, + 545, + 626 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {c o l o r}} (x _ {s}, y) = \\| \\bar {\\mathcal {C}} (x _ {s} \\cdot (1 - M _ {f})) - \\bar {\\mathcal {C}} (y \\cdot (1 - M _ {f})) \\| _ {2}, \\tag {11}", + "image_path": "b9a430a0a38b3af381da2042f601870a8503162a269931c6ee167572fd6d6ef8.jpg" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 305, + 633, + 545, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 633, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 305, + 633, + 545, + 656 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 633, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\bar{\\mathcal{C}} (\\cdot)" + }, + { + "bbox": [ + 305, + 633, + 545, + 656 + ], + "type": "text", + "content": " denotes an average RGB value of the masked region." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 305, + 658, + 545, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 658, + 545, + 681 + ], + "spans": [ + { + "bbox": [ + 305, + 658, + 545, + 681 + ], + "type": "text", + "content": "The overall loss function for training the face swapping module is:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 318, + 687, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 687, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 318, + 687, + 545, + 715 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {f s} = \\mathcal {L} _ {2} \\left(x _ {t}, y\\right) + \\mathcal {L} _ {L P I P S} \\left(x _ {t}, y\\right) + \\mathcal {L} _ {i d} \\left(x _ {s}, y\\right) \\tag {12} \\\\ + \\mathcal {L} _ {i d} (x _ {s}, \\hat {y}) + \\mathcal {L} _ {c o l o r} (x _ {s}, y). \\\\ \\end{array}", + "image_path": "f801cf449e702b33df537c7a08094fa1842c0c925ed5b30cc1b983ed4ffa4744.jpg" + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "12709" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 545, + 288 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 545, + 288 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 545, + 288 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 545, + 288 + ], + "type": "image", + "image_path": "f0959710d335e56833f3d38a55c940cce92e9bae72323375345958bc6bddd654.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 297, + 546, + 331 + ], + "lines": [ + { + "bbox": [ + 46, + 297, + 546, + 331 + ], + "spans": [ + { + "bbox": [ + 46, + 297, + 546, + 331 + ], + "type": "text", + "content": "Figure 4. Qualitative comparison of face swapping on CelebA-HQ dataset. Compared with all these 2D approaches, our method extracts facial shapes more accurately and transfers identity better. Moreover, since we conduct face swapping in latent space and a well-trained 3D GAN directly synthesizes the results, there are no obvious artifacts in the facial region." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 350, + 128, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 350, + 128, + 363 + ], + "spans": [ + { + "bbox": [ + 47, + 350, + 128, + 363 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 370, + 287, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 370, + 287, + 442 + ], + "spans": [ + { + "bbox": [ + 46, + 370, + 287, + 442 + ], + "type": "text", + "content": "In this section, we first compare the proposed 3dSwap with some state-of-the-art 2D-images-based face swapping approaches. Furthermore, face swapping in a 3D-aware manner and extra evaluation metrics designed for 3D face swapping are analyzed. We finally carry out ablation studies to evaluate the effectiveness of our major design." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 451, + 180, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 451, + 180, + 464 + ], + "spans": [ + { + "bbox": [ + 47, + 451, + 180, + 464 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "text", + "content": "In all experiments, Ranger optimizer [54] is applied to train our networks with a learning rate of " + }, + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "text", + "content": ". Hyperparameters are set as " + }, + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "inline_equation", + "content": "\\lambda_1 = \\lambda_3 = 1" + }, + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "inline_equation", + "content": "\\lambda_2 = 0.8" + }, + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "text", + "content": " in Eq. 6 and " + }, + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "inline_equation", + "content": "\\eta = 0.25" + }, + { + "bbox": [ + 46, + 470, + 287, + 578 + ], + "type": "text", + "content": " in Eq. 9. For training time, the inversion module is trained for 1,000,000 steps on 4 NVIDIA RTX3090 GPUs for about 3 days while the face swapping module is trained for 500,000 steps also on 4 GPUs for about 2 days. The pivot tuning optimization during inference time takes about 8 minutes on a single GPU." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 586, + 110, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 110, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 110, + 597 + ], + "type": "text", + "content": "4.2. Datasets" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "content": "We conduct experiments on two datasets: 1) The FFHQ [28] dataset contains 70,000 high-quality images of human faces crawled from Flicker with considerable variation in age, ethnicity, and background. All images of this dataset are in a resolution of " + }, + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "content": ". 2) The CelebA-HQ [27] dataset is the high-quality version of the large-scale face attributes dataset CelebA [35] which contains 30,000 images in " + }, + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 46, + 605, + 288, + 715 + ], + "type": "text", + "content": ". Specifically, we train our model on FFHQ, while comparison experiments are ex" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 350, + 546, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 546, + 425 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 546, + 425 + ], + "type": "text", + "content": "ecuted on CelebA-HQ. We follow the data preprocessing way of EG3D to crop images according to facial landmarks and resize them into a resolution of " + }, + { + "bbox": [ + 304, + 350, + 546, + 425 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 304, + 350, + 546, + 425 + ], + "type": "text", + "content": ". Due to the relatively expensive inference cost of 3dSwap mentioned in Sec. 4.1, we operate the following comparison experiments on 1000 source-target image pairs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 431, + 543, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 431, + 543, + 445 + ], + "spans": [ + { + "bbox": [ + 305, + 431, + 543, + 445 + ], + "type": "text", + "content": "4.3. Comparison with 2D Face Swapping Methods" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 450, + 545, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 450, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 304, + 450, + 545, + 536 + ], + "type": "text", + "content": "In this section, we compare the proposed 3dSwap with four 2D swapping methods: SimSwap [14], MegaFS [60], Infoswap [23] and Xu et al. [56]. These four methods are representative GAN-based [14,23] and GAN-inversion-based [56,60] approaches in recent years with state-of-the-art performance. Moreover, their official source codes are publicly available for us to make fair comparisons." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 540, + 546, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 540, + 546, + 672 + ], + "spans": [ + { + "bbox": [ + 304, + 540, + 546, + 672 + ], + "type": "text", + "content": "Qualitative Comparison. The qualitative comparison results are shown in Fig. 4. Compared with all these 2D face swapping approaches, our methods transfer more accurate geometry features (i.e., facial contour) and detailed texture features like eye color to targets, reflecting better identity-transferring performance. Also, since we directly synthesize our final results with a well-trained generator with a properly calculated latent code, the swapped face we generate is more realistic without obvious artifacts in the facial region. More qualitative results on CelebA-HQ are provided in the supplementary material." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 715 + ], + "type": "text", + "content": "Quantitative Comparison. We adopt several evaluation metrics in our quantitative experiments to show the effectiveness of our model in Table 1. Following MegaFS [60]," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "12710" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 80, + 70, + 254, + 155 + ], + "blocks": [ + { + "bbox": [ + 80, + 70, + 254, + 155 + ], + "lines": [ + { + "bbox": [ + 80, + 70, + 254, + 155 + ], + "spans": [ + { + "bbox": [ + 80, + 70, + 254, + 155 + ], + "type": "table", + "html": "
MethodID ↑Pose ↓Exp. ↓
SimSwap [14]0.571.4910.48
MegaFS [60]0.483.9514.08
InfoSwap [23]0.612.5010.63
Xu et al. [56]0.542.6612.94
Ours0.721.6813.76
", + "image_path": "54d4ad58df6408285f72ecca00fcb2dfc4642e4c4f51b2a7bc37d9ba3eb05a6e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 219, + 286, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 219, + 286, + 304 + ], + "spans": [ + { + "bbox": [ + 46, + 219, + 286, + 304 + ], + "type": "text", + "content": "we measure the ID similarity by calculating the cosine similarity between face embeddings of the source and swapped faces that are estimated by a pretrained face recognition network [19]. Meanwhile, pose error computes the " + }, + { + "bbox": [ + 46, + 219, + 286, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_2" + }, + { + "bbox": [ + 46, + 219, + 286, + 304 + ], + "type": "text", + "content": " distance between the estimated Euler Angle [49] of the target and swapped images. For expression error, we calculate an average distance among estimated facial landmarks [5]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 304, + 286, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 304, + 286, + 472 + ], + "spans": [ + { + "bbox": [ + 46, + 304, + 286, + 472 + ], + "type": "text", + "content": "For cosine similarity of identity, which is a crucial indicator for face swapping since it evaluates the quality of identity transferring, we significantly outperform all these 2D approaches. Such results and the visual effects in Fig. 4 together show that our method transfers identity better due to the application of 3D prior. For attribute preserving, our method which can be explicitly controlled by a camera pose performs rather well in pose error since it is only slightly weaker than SimSwap [14] but it reflects a poorer performance compared with 2D approaches in expression error. However, we can still claim that the proposed 3dSwap is superior to 2D methods in identity transferring and performs close to them in attribute preserving after considering all three quantitative comparison results." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 482, + 286, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 482, + 286, + 495 + ], + "spans": [ + { + "bbox": [ + 47, + 482, + 286, + 495 + ], + "type": "text", + "content": "4.4. Further Analysis on 3D-Aware Face Swapping" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 502, + 286, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 502, + 286, + 573 + ], + "spans": [ + { + "bbox": [ + 46, + 502, + 286, + 573 + ], + "type": "text", + "content": "As the first 3D-aware face swapping method, the proposed 3dSwap is specialized in synthesizing multi-view-consistent results. In this section, we conduct more experiments in this track, showing some visualized comparisons on 3D consistency and raising brand-new criteria for 3D-aware face swapping." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "content": "Visualization on Multi-View Images. To compare with 2D face swapping approaches in fairness, we first synthesize multi-view target images by using our reconstruction module and then apply SimSwap [14] and InfoSwap [23] to them. The visualized results are shown in Fig. 5, where results under different views are not as consistent as ours (i.e. shape of nose, mouth, and eyebrows changes) for the 2D face swapping method. More artifacts can be discovered when the target images are sideward. Please refer to the video in the supplementary material for more intuitional comparisons." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 69, + 545, + 223 + ], + "blocks": [ + { + "bbox": [ + 307, + 69, + 545, + 223 + ], + "lines": [ + { + "bbox": [ + 307, + 69, + 545, + 223 + ], + "spans": [ + { + "bbox": [ + 307, + 69, + 545, + 223 + ], + "type": "image", + "image_path": "95246c90f0dc73ab07aa12a8f25de081f74e28862b788dd74dc9d525b9343c5a.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 230, + 544, + 253 + ], + "lines": [ + { + "bbox": [ + 306, + 230, + 544, + 253 + ], + "spans": [ + { + "bbox": [ + 306, + 230, + 544, + 253 + ], + "type": "text", + "content": "Figure 5. Visualized comparison on Multi-view results among Infoswap [23], Simswap [14] and Ours." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 272, + 545, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 272, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 272, + 545, + 464 + ], + "type": "text", + "content": "Criteria for 3D-Aware Face Swapping. In Sec. 4.3, the performance of identity transferring is evaluated based on the face embedding estimated by pretrained face recognition networks [19]. However, such networks are not enough robust to pose variance so it could be an unfair criterion for face swapping. For 3D-aware face swapping, we can simply synthesize a swapped face in the view of the source image. In this way, the \"Aligned Identity Similarity\" can be a reasonable standard to evaluate 3D-aware face swapping models. Moreover, inspired by human's ability to recognize a familiarized person from any direction, we synthesize the swapped face into 9 different fixed poses and calculate an average identity similarity together with images in source and target views. We report our results of these two evaluation metrics in Table 2 and images under these fixed poses are shown in the supplementary material." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 325, + 473, + 526, + 510 + ], + "blocks": [ + { + "bbox": [ + 46, + 163, + 287, + 197 + ], + "lines": [ + { + "bbox": [ + 46, + 163, + 287, + 197 + ], + "spans": [ + { + "bbox": [ + 46, + 163, + 287, + 197 + ], + "type": "text", + "content": "Table 1. Quantitative Results. We compare our model with four competing methods in ID Similarity for identity transferring and Pose & Expression Error for attribute preserving." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 325, + 473, + 526, + 510 + ], + "lines": [ + { + "bbox": [ + 325, + 473, + 526, + 510 + ], + "spans": [ + { + "bbox": [ + 325, + 473, + 526, + 510 + ], + "type": "table", + "html": "
MetricAligned ID Sim.↑Average ID Sim. ↑
Ours0.850.42
", + "image_path": "e3706ab8789bdb58b116e12940b6367a47ff1ddad7afcad186cfd72cf6f8b835.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 518, + 544, + 541 + ], + "lines": [ + { + "bbox": [ + 305, + 518, + 544, + 541 + ], + "spans": [ + { + "bbox": [ + 305, + 518, + 544, + 541 + ], + "type": "text", + "content": "Table 2. Quantitative Results of New Metrics. We test the proposed 3dSwap under the two new evaluation metrics." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 560, + 406, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 560, + 406, + 572 + ], + "spans": [ + { + "bbox": [ + 306, + 560, + 406, + 572 + ], + "type": "text", + "content": "4.5. Ablation Studies" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 578, + 544, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 578, + 544, + 614 + ], + "spans": [ + { + "bbox": [ + 304, + 578, + 544, + 614 + ], + "type": "text", + "content": "In this section, we conduct ablation experiments on the CelebA-HQ dataset to evaluate the effectiveness of the major design of the proposed 3dSwap." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "Effectiveness of 3D GAN Inversion. Since previous works [12, 33] do not release the code of their 3D GAN-inversion part, we follow the paper of EG3D to reproduce a pivot tuning inversion [47] to the generator with the same hyperparameters. In this section, we mainly compare our design with the optimization-based latent code projection of PTI on EG3D to show the effectiveness of the learning-based inversion algorithm we use. For the sake of fairness," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12711" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 70, + 284, + 232 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 284, + 232 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 284, + 232 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 284, + 232 + ], + "type": "image", + "image_path": "d2f749adcbf42fd764f6462588f75b1fcd20e970194d065f742935a0f212b55a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 241, + 287, + 275 + ], + "lines": [ + { + "bbox": [ + 46, + 241, + 287, + 275 + ], + "spans": [ + { + "bbox": [ + 46, + 241, + 287, + 275 + ], + "type": "text", + "content": "Figure 6. Qualitative Comparison on 3D GAN inversion. Comparing to the directly application of pivot tuning inversion, our design reconstruct details (i.e. shape and color of eyes, glasses etc.) better." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 296, + 287, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 296, + 287, + 331 + ], + "spans": [ + { + "bbox": [ + 46, + 296, + 287, + 331 + ], + "type": "text", + "content": "both models are tested on the same 2000 images in CelebAHQ and adopt a parameter tuning of the pretrained generator for 500 steps." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 332, + 287, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 332, + 287, + 380 + ], + "spans": [ + { + "bbox": [ + 46, + 332, + 287, + 380 + ], + "type": "text", + "content": "We show the qualitative comparison results in Fig. 6. Our design performs better in details reconstruction (i.e., eye shape, glasses, etc.) despite the optimization-based approach still recovers accurate face shape, hair color, etc." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 380, + 287, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 380, + 287, + 476 + ], + "spans": [ + { + "bbox": [ + 46, + 380, + 287, + 476 + ], + "type": "text", + "content": "For 3D GAN Inversion, we adopt the same metrics as 2D GAN inversion: " + }, + { + "bbox": [ + 46, + 380, + 287, + 476 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_2" + }, + { + "bbox": [ + 46, + 380, + 287, + 476 + ], + "type": "text", + "content": " distance (or MSE loss) to calculate the pixel-wise similarity, LPIPS [58] distance to evaluate the perceptual similarity and MS-SSIM [53] to show the structural similarity. Additionally, we calculate ID similarity to ensure the accuracy of the reconstruction, and the results are reported in Table 3. Our design outperforms the optimization-based approaches in all of the four criteria." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 47, + 487, + 292, + 536 + ], + "blocks": [ + { + "bbox": [ + 47, + 487, + 292, + 536 + ], + "lines": [ + { + "bbox": [ + 47, + 487, + 292, + 536 + ], + "spans": [ + { + "bbox": [ + 47, + 487, + 292, + 536 + ], + "type": "table", + "html": "
MethodMSE ↓LPIPS ↓SSIM ↑ID Sim.↑
EG3D with Opt.0.08960.27610.61970.7318
Ours0.01680.10490.73480.8616
", + "image_path": "f98bcdba3fc9f82c0b11878b85d46c72bec3faef1d1c2d4c1c29491f3a2e29d3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 544, + 287, + 588 + ], + "lines": [ + { + "bbox": [ + 46, + 544, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 46, + 544, + 287, + 588 + ], + "type": "text", + "content": "Table 3. Quantitative Results on 3D GAN inversion. We compare our 3D GAN inversion module with an optimization-based inversion on EG3D under four common evaluation metrics in the 2D GAN inversion task." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 713 + ], + "type": "text", + "content": "Effectiveness of Style Mixing. As mentioned in Sec. 3.3, we adopt style mixing and latent code interpolation for face swapping. Here, we briefly show the effectiveness of style mixing. A comparison of our model with and without style mixing can be seen in Fig. 7. Identity can be ideally transferred between sources and targets under both settings, however, attributes including skin color, background, etc. would be prominently affected if we interpolate in all layers of latent codes as shown in the third column." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 308, + 70, + 544, + 243 + ], + "blocks": [ + { + "bbox": [ + 308, + 70, + 544, + 243 + ], + "lines": [ + { + "bbox": [ + 308, + 70, + 544, + 243 + ], + "spans": [ + { + "bbox": [ + 308, + 70, + 544, + 243 + ], + "type": "image", + "image_path": "1aca44d9bbf2b8e7a9caa5de4e20c2168eb55309c7848ef6e6c14b1390186bd2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 252, + 545, + 275 + ], + "lines": [ + { + "bbox": [ + 306, + 252, + 545, + 275 + ], + "spans": [ + { + "bbox": [ + 306, + 252, + 545, + 275 + ], + "type": "text", + "content": "Figure 7. Visualization of face swapping results with and without style mixing." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 297, + 378, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 297, + 378, + 309 + ], + "spans": [ + { + "bbox": [ + 306, + 297, + 378, + 309 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 318, + 545, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 318, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 304, + 318, + 545, + 486 + ], + "type": "text", + "content": "We propose a novel 3D-aware face swapping method 3dSwap that generates high-fidelity and multi-view-consistent swapped faces. To leverage both geometry and texture prior of the 3D human face, we project the input images into the latent space of the 3D-aware generative model by introducing a learning-based inversion. A latent code manipulation algorithm, consisting of style mixing and latent code interpolation, is then designed to achieve 3D GAN-inversion-based face swapping. We further bridge the image quality between 2D generating and 3D rendering by applying a joint pivot tuning. To the best of our knowledge, 3dSwap is the first 3D-aware face swapping method, thus it sets a strong baseline for future research on 3D forgery detection and face swapping." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 493, + 545, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 493, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 493, + 545, + 590 + ], + "type": "text", + "content": "Limitations. Since we need to project input images into the latent space of a 3D GAN which contains far more information than that of 2D GANs, we tune the parameters of the pretrained generator during testing, leading to a rather long inference time. Moreover, since the final results are rendered by a 3D generator, our method fails to accurately reconstruct clothing, backgrounds, etc in the image limited by the current development of 3D-aware generative models." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 597, + 545, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 597, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 304, + 597, + 545, + 657 + ], + "type": "text", + "content": "Broader Impacts. Although not the purpose of this work, photorealistic swapped faces may potentially be abused. On the other hand, our model can be used to generate high-quality and multi-viewed examples to facilitate face forgery detection [11]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 665, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 712 + ], + "type": "text", + "content": "Acknowledgements. This work was supported by NSFC (62201342), Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102), and the Fundamental Research Funds for the Central Universities." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12712" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 124 + ], + "type": "text", + "content": "[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In ICCV, pages 4431-4440, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 125, + 287, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 125, + 287, + 158 + ], + "spans": [ + { + "bbox": [ + 53, + 125, + 287, + 158 + ], + "type": "text", + "content": "[2] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan++: How to edit the embedded images? In CVPR, pages 8293-8302, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 159, + 286, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 286, + 192 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 286, + 192 + ], + "type": "text", + "content": "[3] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. Restyle: A residual-based stylegan encoder via iterative refinement. In ICCV, pages 6691–6700, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 194, + 286, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 194, + 286, + 236 + ], + "spans": [ + { + "bbox": [ + 53, + 194, + 286, + 236 + ], + "type": "text", + "content": "[4] Yuval Alaluf, Omer Tov, Ron Mokady, Rinon Gal, and Amit Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. In CVPR, pages 18511-18521, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 239, + 286, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 286, + 271 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 286, + 271 + ], + "type": "text", + "content": "[5] Tadas Baltrusaitis, Peter Robinson, and Louis-Philippe Morency. Openface: An open source facial behavior analysis toolkit. In WACV, pages 1–10, 2016." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 274, + 286, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 274, + 286, + 305 + ], + "spans": [ + { + "bbox": [ + 53, + 274, + 286, + 305 + ], + "type": "text", + "content": "[6] Jianmin Bao, Dong Chen, Fang Wen, Houqiang Li, and Gang Hua. Towards open-set identity preserving face synthesis. In CVPR, pages 6713-6722, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 308, + 286, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 308, + 286, + 340 + ], + "spans": [ + { + "bbox": [ + 53, + 308, + 286, + 340 + ], + "type": "text", + "content": "[7] Volker Blanz, Kristina Scherbaum, Thomas Vetter, and Hans-Peter Seidel. Exchanging faces in images. Comput. Graph. Forum, 23(3):669-676, 2004." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 342, + 286, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 342, + 286, + 363 + ], + "spans": [ + { + "bbox": [ + 53, + 342, + 286, + 363 + ], + "type": "text", + "content": "[8] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In SIGGRAPH, pages 187-194, 1999." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 365, + 286, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 365, + 286, + 396 + ], + "spans": [ + { + "bbox": [ + 53, + 365, + 286, + 396 + ], + "type": "text", + "content": "[9] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In ICLR, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 399, + 286, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 286, + 441 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 286, + 441 + ], + "type": "text", + "content": "[10] Shengqu Cai, Anton Obukhov, Dengxin Dai, and Luc Van Gool. Pix2nerf: Unsupervised conditional " + }, + { + "bbox": [ + 48, + 399, + 286, + 441 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 48, + 399, + 286, + 441 + ], + "type": "text", + "content": "-gan for single image to neural radiance fields translation. In CVPR, pages 3971-3980, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 444, + 286, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 444, + 286, + 487 + ], + "spans": [ + { + "bbox": [ + 48, + 444, + 286, + 487 + ], + "type": "text", + "content": "[11] Junyi Cao, Chao Ma, Taiping Yao, Shen Chen, Shouhong Ding, and Xiaokang Yang. End-to-end reconstruction-classification learning for face forgery detection. In CVPR, pages 4103-4112, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 489, + 286, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 489, + 286, + 553 + ], + "spans": [ + { + "bbox": [ + 48, + 489, + 286, + 553 + ], + "type": "text", + "content": "[12] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J. Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks. In CVPR, pages 16123-16133, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 556, + 286, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 556, + 286, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 556, + 286, + 599 + ], + "type": "text", + "content": "[13] Eric R. Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. Pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In CVPR, pages 5799-5809, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 601, + 286, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 286, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 286, + 634 + ], + "type": "text", + "content": "[14] Renwang Chen, Xuanhong Chen, Bingbing Ni, and Yanhao Ge. Simswap: An efficient framework for high fidelity face swapping. In ACMMM, pages 2003-2011, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 635, + 286, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 286, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 286, + 677 + ], + "type": "text", + "content": "[15] Yi-Ting Cheng, Virginia Tzeng, Yu Liang, Chuan-Chang Wang, Bing-Yu Chen, Yung-Yu Chuang, and Ming Ouhyoung. 3d-model-based face replacement in video. In SIGGRAPH, 2009." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 680, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 286, + 713 + ], + "type": "text", + "content": "[16] Edo Collins, Raja Bala, Bob Price, and Sabine Süsstrunk. Editing in style: Uncovering the local semantics of gans. In CVPR, pages 5770-5779, 2020." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[17] Antonia Creswell and Anil Anthony Bharath. Inverting the generator of a generative adversarial network. IEEE Trans. Neural Networks Learn. Syst., 30(7):1967-1974, 2019." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 108, + 545, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 108, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 307, + 108, + 545, + 138 + ], + "type": "text", + "content": "[18] DeepFakes. https://github.com/ondyari/FaceForensics/tree/master/dataset/DeepFakes. Accessed:2022-10-18." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 141, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 173 + ], + "type": "text", + "content": "[19] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 175, + 545, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 207 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 207 + ], + "type": "text", + "content": "[20] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. GRAM: generative radiance manifolds for 3d-aware image generation. In CVPR, pages 10663-10673, 2022." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 209, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 209, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 209, + 545, + 251 + ], + "type": "text", + "content": "[21] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In CVPRW, pages 285-295, 2019." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 253, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 253, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 307, + 253, + 545, + 296 + ], + "type": "text", + "content": "[22] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3d face model from in-the-wild images. ACM Trans. Graph., 40(4):88:1-88:13, 2021." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 298, + 545, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 298, + 545, + 330 + ], + "spans": [ + { + "bbox": [ + 307, + 298, + 545, + 330 + ], + "type": "text", + "content": "[23] Gege Gao, Huaibo Huang, Chaoyou Fu, Zhaoyang Li, and Ran He. Information bottleneck disentanglement for identity swapping. In CVPR, pages 3404-3413, 2021." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 332, + 545, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 332, + 545, + 374 + ], + "spans": [ + { + "bbox": [ + 307, + 332, + 545, + 374 + ], + "type": "text", + "content": "[24] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron C. Courville, and Yoshua Bengio. Generative adversarial networks. Commun. ACM, 63(11):139–144, 2020." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 376, + 545, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 545, + 409 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 545, + 409 + ], + "type": "text", + "content": "[25] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In ICLR, 2022." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 411, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 545, + 442 + ], + "type": "text", + "content": "[26] Shanyan Guan, Ying Tai, Bingbing Ni, Feida Zhu, Feiyue Huang, and Xiaokang Yang. Collaborative learning for faster stylegan embedding. CoRR, abs/2007.01758, 2020." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 444, + 545, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 444, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 307, + 444, + 545, + 475 + ], + "type": "text", + "content": "[27] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. In ICLR, 2018." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 478, + 545, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 545, + 510 + ], + "type": "text", + "content": "[28] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 512, + 545, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 545, + 553 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 545, + 553 + ], + "type": "text", + "content": "[29] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8107-8116, 2020." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 556, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 556, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 556, + 545, + 578 + ], + "type": "text", + "content": "[30] Ira Kemelmacher-Shlizerman. Transfiguring portraits. ACM Trans. Graph., 35(4):94:1-94:8, 2016." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 580, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 580, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 307, + 580, + 545, + 611 + ], + "type": "text", + "content": "[31] Iryna Korshunova, Wenzhe Shi, Joni Dambre, and Lucas Theis. Fast face-swap using convolutional neural networks. In ICCV, pages 3697-3705, 2017." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 613, + 545, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 645 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 645 + ], + "type": "text", + "content": "[32] Lingzhi Li, Jianmin Bao, Hao Yang, Dong Chen, and Fang Wen. Faceshifter: Towards high fidelity and occlusion aware face swapping. CoRR, abs/1912.13457, 2019." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 647, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 678 + ], + "type": "text", + "content": "[33] Connor Z. Lin, David B. Lindell, Eric R. Chan, and Gordon Wetzstein. 3d GAN inversion for controllable portrait image animation. CoRR, abs/2203.13441, 2022." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "text", + "content": "[34] Yuan Lin, Shengjin Wang, Qian Lin, and Feng Tang. Face swapping under large pose variations: A 3d model based approach. In ICME, pages 333-338, 2012." + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12713" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 286, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 48, + 72, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 286, + 95 + ], + "type": "text", + "content": "[35] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 286, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 286, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 286, + 140 + ], + "type": "text", + "content": "[36] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, pages 405-421, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 286, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 286, + 174 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 286, + 174 + ], + "type": "text", + "content": "[37] Saleh Mosaddegh, Loïc Simon, and Frédéric Jurie. Photorealistic face de-identification by aggregating donors' face components. In ACCV, pages 159–174, 2014." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 175, + 286, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 286, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 286, + 217 + ], + "type": "text", + "content": "[38] Jacek Naruniec, Leonhard Helminger, Christopher Schroers, and Romann M. Weber. High-resolution neural face swapping for visual effects. Comput. Graph. Forum, 39(4):173-184, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 220, + 286, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 220, + 286, + 252 + ], + "spans": [ + { + "bbox": [ + 48, + 220, + 286, + 252 + ], + "type": "text", + "content": "[39] Ryota Natsume, Tatsuya Yatagawa, and Shigeo Morishima. Fsnet: An identity-aware generative model for image-based face swapping. In ACCV, pages 117-132, 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 254, + 286, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 254, + 286, + 296 + ], + "spans": [ + { + "bbox": [ + 48, + 254, + 286, + 296 + ], + "type": "text", + "content": "[40] Ryota Natsume, Tatsuya Yatagawa, and Shigeo Morishima. RSGAN: face swapping and editing using face and hair representation in latent spaces. In SIGGRAPH, pages 69:1-69:2, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 298, + 286, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 298, + 286, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 298, + 286, + 342 + ], + "type": "text", + "content": "[41] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In ICCV, pages 7587–7596, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 343, + 286, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 286, + 374 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 286, + 374 + ], + "type": "text", + "content": "[42] Yuval Nirkin, Yosi Keller, and Tal Hassner. FSGAN: subject agnostic face swapping and reenactment. In ICCV, pages 7183-7192, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 376, + 286, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 286, + 409 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 286, + 409 + ], + "type": "text", + "content": "[43] Yuval Nirkin, Iacopo Masi, Anh Tuan Tran, Tal Hassner, and Gérard G. Medioni. On face segmentation, face swapping, and face perception. In AFGR, 2018." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 411, + 286, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 286, + 443 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 286, + 443 + ], + "type": "text", + "content": "[44] Yotam Nitzan, Amit Bermano, Yangyan Li, and Daniel Cohen-Or. Face identity disentanglement via latent space mapping. ACM Trans. Graph., 39(6):225:1-225:14, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 445, + 286, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 445, + 286, + 487 + ], + "spans": [ + { + "bbox": [ + 48, + 445, + 286, + 487 + ], + "type": "text", + "content": "[45] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In CVPR, pages 13493-13503, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 489, + 286, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 489, + 286, + 533 + ], + "spans": [ + { + "bbox": [ + 48, + 489, + 286, + 533 + ], + "type": "text", + "content": "[46] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: A stylegan encoder for image-to-image translation. In CVPR, pages 2287–2296, 2021." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 534, + 286, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 534, + 286, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 534, + 286, + 567 + ], + "type": "text", + "content": "[47] Daniel Roich, Ron Mokady, Amit H. Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. TOG, pages 1–13, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 568, + 286, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 286, + 600 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 286, + 600 + ], + "type": "text", + "content": "[48] Arun Ross and Asem A. Othman. Visual cryptography for biometric privacy. IEEE Trans. Inf. Forensics Secur., 6(1):70-81, 2011." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 602, + 286, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 286, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 286, + 634 + ], + "type": "text", + "content": "[49] Nataniel Ruiz, Eunji Chong, and James M. Rehg. Fine-grained head pose estimation without keypoints. In CVPR, pages 2074-2083, 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 636, + 286, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 636, + 286, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 636, + 286, + 668 + ], + "type": "text", + "content": "[50] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. GRAF: generative radiance fields for 3d-aware image synthesis. In NeurIPS, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 670, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 286, + 712 + ], + "type": "text", + "content": "[51] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. ACM Trans. Graph., 40(4):133:1-133:14, 2021." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 420 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[52] Tengfei Wang, Yong Zhang, Yanbo Fan, Jue Wang, and Qifeng Chen. High-fidelity gan inversion for image attribute editing. In CVPR, pages 11369-11378, 2022." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 107, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 139 + ], + "type": "text", + "content": "[53] Z. Wang, E.P. Simoncelli, and A.C. Bovik. Multiscale structural similarity for image quality assessment. In ACSSC, 2003." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 141, + 545, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 173 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 173 + ], + "type": "text", + "content": "[54] Less Wright. Ranger - a synergistic optimizer. https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer. Accessed: 2022-9-18." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 175, + 545, + 217 + ], + "type": "text", + "content": "[55] Yangyang Xu, Bailin Deng, Junle Wang, Yanqing Jing, Jia Pan, and Shengfeng He. High-resolution face swapping via latent semantics disentanglement. In CVPR, pages 7632-7641, 2022." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 220, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 545, + 262 + ], + "type": "text", + "content": "[56] Yangyang Xu, Bailin Deng, Junle Wang, Yanqing Jing, Jia Pan, and Shengfeng He. High-resolution face swapping via latent semantics disentanglement. In CVPR, pages 7632-7641, 2022." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 264, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 264, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 264, + 545, + 308 + ], + "type": "text", + "content": "[57] Zhiliang Xu, Hang Zhou, Zhibin Hong, Ziwei Liu, Jiaming Liu, Zhizhi Guo, Junyu Han, Jingtuo Liu, Errui Ding, and Jingdong Wang. Styleswap: Style-based generator empowers robust face swapping. In ECCV, pages 661-677, 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 309, + 545, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 309, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 307, + 309, + 545, + 352 + ], + "type": "text", + "content": "[58] Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, pages 586-595, 2018." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 354, + 545, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 387 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 387 + ], + "type": "text", + "content": "[59] Jun-Yan Zhu, Philipp Krahenbuhl, Eli Shechtman, and Alexei A. Efros. Generative visual manipulation on the natural image manifold. In ECCV, pages 597-613, 2016." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 388, + 545, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 420 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 420 + ], + "type": "text", + "content": "[60] Yuhao Zhu, Qi Li, Jian Wang, Cheng-Zhong Xu, and Zhenan Sun. One shot face swapping on megapixels. In CVPR, pages 4834-4844, 2021." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "12714" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_content_list.json b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..66b59c56153d89bb2d57e64983a2dd398c95c08c --- /dev/null +++ b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_content_list.json @@ -0,0 +1,1630 @@ +[ + { + "type": "text", + "text": "3D-aware Facial Landmark Detection via Multi-view Consistent Training on Synthetic Data", + "text_level": 1, + "bbox": [ + 233, + 130, + 736, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Libing Zeng $^{1*}$ , Lele Chen $^{2}$ , Wentao Bao $^{3*}$ , Zhong Li $^{2}$ , Yi Xu $^{2}$ , Junsong Yuan $^{4}$ , Nima K. Kalantari $^{1}$ $^{1}$ Texas A&M University, $^{2}$ OPPO US Research Center, InnoPeak Technology, Inc, \n $^{3}$ Michigan State University, $^{4}$ University at Buffalo", + "bbox": [ + 91, + 202, + 875, + 258 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/78e916daa4bd05091dac17e234910d011ec6556475c25fe43fb6d8e37bc3162e.jpg", + "image_caption": [ + "(a) Multi-view Inconsistency" + ], + "image_footnote": [], + "bbox": [ + 78, + 328, + 279, + 484 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4919b56620ae21697831b195c4fa557750b5cc1fe3d2388aaea65720dc521e69.jpg", + "image_caption": [ + "(b) DAD-3DNet" + ], + "image_footnote": [], + "bbox": [ + 282, + 328, + 482, + 483 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/57c47b59d38b7da15bb14f7590b3291a5ba60eda6fd9368720dcbec5a98be4e4.jpg", + "image_caption": [ + "(c) DAD-3DNet+ (Ours)", + "Figure 1. We plot the landmark annotations labeled by different annotators with different colors in view #1 of (a). Accurate annotation of non-frontal faces with large angles like view #1 is challenging. This is a major problem since small differences between annotated landmarks in view #1, becomes substantially magnified when projected to view #2. Training a system on such datasets could lead to poor landmark detection accuracy, as shown in (b). We address this issue by proposing a 3D-aware optimization module that enforces multi-view consistency. We show the landmark detection improvement in (c). Magnified insets in (b) and (c) are shown in (d). After refined by the proposed 3D-aware learning, the detected facial landmark is better aligned with the identity." + ], + "image_footnote": [], + "bbox": [ + 485, + 328, + 684, + 483 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e682e867ccf468210ab3cdbd5f548fd4edc26a0e75ce8c0f95aa2f6c76a46a88.jpg", + "image_caption": [ + "(d)" + ], + "image_footnote": [], + "bbox": [ + 687, + 327, + 787, + 483 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/76baf03b0bfbec6487cbc3e3ca9a8d82f8dea50c47b0094375bfffb23984ec43.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 790, + 327, + 890, + 483 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 607, + 313, + 625 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Accurate facial landmark detection on wild images plays an essential role in human-computer interaction, entertainment, and medical applications. Existing approaches have limitations in enforcing 3D consistency while detecting 3D/2D facial landmarks due to the lack of multi-view in-the-wild training data. Fortunately, with the recent advances in generative visual models and neural rendering, we have witnessed rapid progress towards high quality 3D image synthesis. In this work, we leverage such approaches to construct a synthetic dataset and propose a novel multiview consistent learning strategy to improve 3D facial landmark detection accuracy on in-the-wild images. The proposed 3D-aware module can be plugged into any learning-based landmark detection algorithm to enhance its accuracy. We demonstrate the superiority of the proposed plug", + "bbox": [ + 75, + 638, + 472, + 867 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "in module with extensive comparison against state-of-the-art methods on several real and synthetic datasets.", + "bbox": [ + 498, + 609, + 890, + 640 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 676, + 632, + 693 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Accurate and precise facial landmark plays a significant role in computer vision and graphics applications, such as face morphing [54], facial reenactment [58], 3D face reconstruction [17, 18, 30], head pose estimation [38], face recognition [1, 10, 13, 19, 32, 41, 71], and face generation [11, 21, 60, 69]. In these applications, facial landmark detection provides great sparse representation to ease the burden of network convergence in different training stages and is often used as performance evaluation metric. For instance, as a facial prior, it provides good initialization for subsequent training [66, 67, 69, 76], good intermediate representation to bridge the gap between different modalities for content generation [11, 27, 51, 79], loss terms which reg-", + "bbox": [ + 496, + 705, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*This work was done when Libing Zeng and Wentao Bao were interns at OPPO US Research Center, InnoPeak Technology, Inc.", + "bbox": [ + 75, + 875, + 468, + 902 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "12747", + "bbox": [ + 478, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ularize the facial expression [11, 52], or evaluation metrics to measure the facial motion quality [53, 73, 78].", + "bbox": [ + 76, + 90, + 468, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The aforementioned applications require the estimated facial landmarks to be accurate even with significantly varied facial appearance under different identities, facial expressions, and extreme head poses. Tremendous efforts have been devoted to address this problem [15, 22-24, 29, 34, 40, 56, 63, 74, 75, 77, 82, 84]. These approaches often rely on manually annotated large-scale lab-controlled or in-the-wild image datasets [4, 34] to handle various factors such as arbitrary facial expressions, head poses, illumination, facial occlusions, etc.", + "bbox": [ + 75, + 121, + 467, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, even with the high cost of human labeling, consistent and accurate manual annotation of landmarks remains challenging [22, 23, 34]. It is very difficult, if not impossible, to force a person to annotate the facial landmark keypoints at the same pixel locations for faces of different poses, let alone different annotators under different labeling environments. Such annotation inconsistency and inaccuracy in training images are often the killing factor to learn an accurate landmark localization model. This is particularly a major problem in non-frontal faces where annotation becomes extremely challenging. As shown in Fig. 1(a) a small annotation variation in view #1, results in a significant inaccuracy in view #2. This multi-view inconsistency and inaccuracy can ultimately lead to poor landmark detection accuracy, especially for facial images with extreme head pose.", + "bbox": [ + 75, + 272, + 467, + 513 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To mitigate this annotation inconsistency and inaccuracy issue, we propose to learn facial landmark detection by enforcing multi-view consistency during training. Given the images of the same facial identity captured with different head poses, instead of detecting facial landmark at each separate facial image, we propose a multi-view consistency supervision to locate facial landmark in a holistic 3D-aware manner. To enforce multi-view consistency, we introduce self-projection consistency loss and multi-view landmark loss in training. We also propose an annotation generation procedure to exploit the merits of lab-controlled data (e.g., multi-view images, consistent annotations) and in-the-wild data (e.g., wide range of facial expressions, identities). Thanks to this synthetic data, our method does not rely on human annotation to obtain the accurate facial landmark locations. Therefore, it alleviates the problem of learning from inaccurate and inconsistent annotations.", + "bbox": [ + 75, + 513, + 467, + 768 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We formulate our solution as a plug-in 3D aware module, which can be incorporated into any facial landmark detector and can boost a pre-trained model with higher accuracy and multi-view consistency. We demonstrate the effectiveness of our approach through extensive experiments on both synthetic and real datasets. The main contributions of our work are as follows:", + "bbox": [ + 75, + 770, + 467, + 875 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We show, for the first time, how to combine the merits", + "bbox": [ + 94, + 885, + 467, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "of lab captured face image data (e.g., multi-view) and the in-the-wild face image datasets (e.g., appearance diversity). Using our proposed approach we produce a large-scale synthetic, but realistic, multi-view face dataset, titled DAD-3DHeads-Syn.", + "bbox": [ + 529, + 90, + 890, + 167 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a novel 3D-aware optimization module, which can be plugged into any learning-based facial landmark detection methods. By refining an existing landmark detection algorithm using our optimization module, we are able to improve its accuracy and multiview consistency.", + "- We demonstrate the performance improvements of our module built on top multiple baseline methods on simulated dataset, lab-captured datasets, and in-the-wild datasets." + ], + "bbox": [ + 517, + 178, + 890, + 339 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 356, + 640, + 372 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we review face landmark datasets and detection algorithms that are most related to our approach. We also provide a brief review of data simulation tools related to our work.", + "bbox": [ + 498, + 382, + 890, + 441 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Face Landmark Detection Dataset", + "text_level": 1, + "bbox": [ + 500, + 453, + 799, + 468 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Lab-controlled dataset. Datasets under \"controlled\" conditions [8, 20, 36, 39, 46, 48, 64, 65, 72] typically collect video/images from indoor scenarios with certain restrictions, e.g. pre-defined expressions, head poses, etc. For example, FaceScape dataset [65] contains 938 individuals and each with 20 expressions using an array of 68 cameras under controlled illumination and positions. Thus, it contains aligned and consistent multi-view images and facial landmark annotations. However, the identities, poses, and expressions are limited. In addition, the environment conditions are fully controlled. These result in limited generalization capability of models trained on this dataset. Moreover, the annotation workflow of such a dataset is expensive and hard to scale.", + "bbox": [ + 498, + 477, + 890, + 686 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In-the-wild dataset. The boom of internet image sharing has enabled the creation of many \"in-the-wild\" facial landmark datasets [3,7,32,49,85], collected from the web, to facilitate facial landmark detection research. However, manually annotating facial landmarks on in-the-wild images is a time-consuming process and not scalable. Zhu et al. [83] release 300W-LP by extending the original 300W dataset with synthetic images with extreme pose through image profiling of frontal pose images. However, the novel view images are generated by simply applying rotation matrix on the original images, which leads to limited view range and poor image quality. Meanwhile, 300W-LP lacks diversity in face appearance and expression because of the intrinsic limitations of 300W. Recently, Martyniuk et al. [34] introduce a", + "bbox": [ + 496, + 688, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "12748", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "new dataset, DAD-3DHeads, by proposing a novel annotation scheme. Specifically, their approach allows the annotator to adjust the landmarks by looking at how well the mesh, generated from the landmarks, fits the input image. The proposed scheme addresses the problems exhibited by existing labeling tools, such as \"guessing\" the positions of the correct landmarks for invisible parts of the head, thus enabling accurate annotations. DAD-3DHeads dataset contains 44,898 in-the-wild images, covering extreme facial expressions, poses, and challenging illuminations. However, the DAD-3DHeads still has some drawbacks. First, even with the mesh fitting guidance, the annotations can be inaccurate. As shown in Fig. 1 (a), even a small inaccuracy in one view could result in a significant inconsistency when projected to another view. This inconsistency could negatively affect the training of the detection network. Second, since the depth is estimated by FLAME [33], annotation accuracy is limited by the FLAME model. Third, this dataset lacks multi-view images, and thus cannot be used to enforce multi-view consistency.", + "bbox": [ + 75, + 90, + 472, + 393 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Data Simulation", + "text_level": 1, + "bbox": [ + 76, + 402, + 238, + 417 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Simulation [26,28,35,42,44,45,50,59,61,62,70] is a useful tool in situations where training data for learning-based methods is expensive to annotate or even hard to acquire. For example, Zeng et al. [70] and Richardson et al. [42] use 3D Morphable Model (3DMM) to render training data with different lighting conditions, identities, expressions, and texture basis elements for reconstructing detailed facial geometry. However, the simulated images produced by these approaches lack realism and have severe domain gaps compared with real-world captures, limiting their usage. Bak et al. [2] adapt synthetic data using a CycleGAN [81] with a regularization term for preserving identities. Ayush et al. [57] use the images and latent code generated by StyleGAN [81] to train a controllable portrait image generation model. However, it is hard to control the attribute consistencies of images simulated by generative models, which limits the usage of the generated datasets.", + "bbox": [ + 75, + 426, + 472, + 684 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Face Landmark Detection Algorithms", + "text_level": 1, + "bbox": [ + 76, + 695, + 405, + 710 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Traditional facial landmark detection methods leverage either holistic facial appearance information [12], or the global facial shape patterns [31, 85]. They yield reasonable results for images captured in lab-controlled environments with frontal faces and good lighting, however the performance on most of in-the-wild images is inferior.", + "bbox": [ + 75, + 719, + 468, + 809 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, deep learning-based algorithms have made promising progress on 2D facial landmark localization [15, 22-24,29,34,40,56,63,74,75,77,82,84] in terms of robustness, generalizability, and accuracy. FAN [6] constructs, for the first time, a very strong baseline by combining a state-of-the-art residual block and a state-of-the-art architecture", + "bbox": [ + 75, + 810, + 470, + 898 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/9d20c4e0d9a754a241e5e59e0edbe4ec44f58de125cc9cf3743f44d19e6e7017.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset TypeLab-ControlledIn-the-wildOurs
Examples
In-the-wild×
Large Scale×
Balanced×
Multiview Consistent×
Annotation Consistent×
Scalable××
", + "bbox": [ + 500, + 90, + 893, + 287 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/094409b388bb96123759f3b785734f8945c95749b6beea7c0ccb43087beaf6f2.jpg", + "image_caption": [ + "Figure 2. The feature comparison of different type of datasets. For example, FaceScape [65] and MultiFace [64] are lab-controlled datasets, while 300W [47], AFLW2000 [68], and DAD-3DHeads [34] are in-the-wild datasets.", + "Figure 3. The proposed data simulation pipeline." + ], + "image_footnote": [], + "bbox": [ + 521, + 359, + 875, + 448 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "for landmark localization and trains it on a very large yet synthetically expanded 2D facial landmark dataset. To address self-occlusion and large appearance variation, Zhu et al. [82] propose a cascaded convolutional neural network and optimized weighted parameter distance cost loss function to formulate the priority of 3DMM parameters during training instead of predicting facial landmark keypoints. To further address the problems of shape reconstruction and pose estimation simultaneously, Martyniuk et al. propose an end-to-end trained DAD-3DNet [34] to regress 3DMM parameters and recover the 3D head geometry with differential FLAME decoder. However, due to the intrinsic limitation of the manually annotated in-the-wild dataset, the detection results are affected by the annotation noise and the 3D inconsistency of the single view images. In this paper, we mainly focus on improving the performance of deep-learning based methods.", + "bbox": [ + 496, + 481, + 893, + 737 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Balanced and Realistic Multi-view Face Dataset", + "text_level": 1, + "bbox": [ + 498, + 751, + 890, + 785 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We believe there are five desired properties that a good facial landmark dataset should fulfill: (1) contain full range of multi-view images; (2) bridge the domain gap between the dataset and the real-world captured images; (3) contain diverse facial appearance including different poses, expressions, illuminations, and identities; (4) have consistent and accurate annotations across the whole dataset; (5) be", + "bbox": [ + 496, + 794, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "12749", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "easy to obtain and scalable. The existing datasets can are either lab-controlled captures [64, 65] or in-the-wild collected [34, 47, 68]. Unfortunately, these datasets lack one or more desired attributes. In contrast, our dataset meets all of these criteria (Fig. 2).", + "bbox": [ + 75, + 90, + 468, + 167 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unlike previous graphics or generative model-based data synthesis approaches described in Sec. 2.2, we propose a novel facial dataset simulation scheme by leveraging Neural Radiance Field (NeRF) [37] to facilitate training a facial landmark detection network. Fig. 3 shows our dataset creation pipeline. We generate multiview images with consistent landmarks using a single in-the-wild image along with annotated landmark as input.", + "bbox": [ + 75, + 167, + 467, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, we choose DAD-3DHeads [34] as our initial dataset since it contains images under a variety of extreme poses, facial expressions, challenging illuminations, and severe occlusions cases. Given an image and its landmarks from this dataset, our goal is to reconstruct multiview images with their corresponding landmarks. Inspired by GAN inversion [80], we first fit a latent code to each image in DAD-3DHeads datasets using EG3D [9] as decoder by following Pivotal Tuning Inversion (PTI) [43]. Note that, EG3D GAN inversion requires the camera pose of the input image, which we estimate using Deep3DFace [14]. Then we can use EG3D to decode the optimized latent code to NeRF. Next, we use volume rendering on the NeRF with 512 uniformly sampled camera views from a large view range, producing 512 multi-view images.", + "bbox": [ + 75, + 289, + 467, + 516 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To obtain the landmarks for each image, we start with the well-annotated groundtruth 2D landmarks of the original images from the DAD-3DHeads dataset. Then we use the estimated camera pose of the input image to unproject the annotated landmarks to 3D space. At last, we project the 3D landmarks to the 512 sampled camera views to obtain landmark annotation on the simulated views. The simulated dataset not only inherits the merits of DAD-3DHeads (e.g. diverse identities, expressions, poses, and illuminations), but also comes with a lot of new features (e.g., balanced head pose, consistent annotation, and multi-view images). In total, there are 2,150,400 training pairs and 204,800 testing pairs in our extended dataset, called DAD-3DHeadsSyn.", + "bbox": [ + 75, + 517, + 467, + 728 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. 3D-Aware Multi-view Consistency Training", + "text_level": 1, + "bbox": [ + 76, + 744, + 464, + 762 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Overview", + "text_level": 1, + "bbox": [ + 76, + 770, + 186, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The state-of-the-art landmark detectors [5, 34] can output reasonable results on in-the-wild images. However, we may observe that the predicted landmark are floating on the face surface instead of fitting the face perfectly in a lot of cases. We can easily verify if the detected landmark fits the face by projecting the detected landmark to another view (see Fig. 1(a)). Armed by this observation of multi-view in", + "bbox": [ + 75, + 794, + 467, + 900 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 3D-Aware Plug-in Module." + ], + "code_body": "1: Input: pretrained detector $F$ with weights $\\theta$ , $M$ single-view images $I_{1,\\dots,M} \\in \\mathcal{D}$ along with ground truth landmark $L_{1,\\dots,M}$ , paired $N$ multi-view images $V_{1,\\dots,N} \\in \\hat{\\mathcal{D}}$ along with ground truth landmark $L_{1,\\dots,N}$ . \n2: Output: detector $F$ with updated weights $\\theta^{*}$ \n3: Initialization: set $\\theta$ to pre-trained weights \n4: Unfreeze $\\theta$ \n5: for number of iterations do \n6: Output predicted landmarks $\\hat{L}_{1,\\dots,N}$ for each view. \n7: Randomly sample $P$ landmarks from them, $(1 < P \\leq N)$ . \n8: Cast the landmarks into world space and estimate the approximate 3D landmark $\\dot{L}$ using Eq. 2, 3, 4, 5 \n9: Project $\\dot{L}$ onto the image planes of remaining $Q$ views $(Q = N - P)$ using Eq. 6, 7 \n10: Calculate Total Loss $\\mathcal{L}$ using Eq. 11 \n11: $\\theta^{*} \\gets Adam\\{\\mathcal{L}\\}$", + "bbox": [ + 508, + 109, + 890, + 372 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "consistency and inaccuracy, we propose a novel 3D-Aware training module $\\mathcal{R}$ to further improve the performance of baseline detection algorithm $F$ .", + "bbox": [ + 496, + 401, + 890, + 446 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given a facial landmark detection network $F_{\\theta}(\\cdot)$ pretrained on dataset $\\mathcal{D}$ , the proposed module $\\mathcal{R}$ further refines the network parameters $\\theta$ by leveraging our simulated DAD-3DHeads-Syn dataset $\\hat{\\mathcal{D}}$ in addition to the original dataset $\\mathcal{D}$ . Our module $\\mathcal{R}$ can be formulated as:", + "bbox": [ + 496, + 446, + 890, + 522 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nF _ {\\theta^ {*}} \\leftarrow \\mathcal {R} \\left(F _ {\\theta}, X, V _ {1, \\dots , N}\\right), X \\in \\mathcal {D}, V _ {1, \\dots , N} \\in \\hat {\\mathcal {D}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 535, + 890, + 551 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $X$ is the image batch sampled from $\\mathcal{D}$ and $V_{1,\\dots,N}$ are $N$ multi-view images sampled from $\\hat{\\mathcal{D}}$ . We refine the network parameters $\\theta$ through exploring 3D information among multi-view images and applying a novel projection consistency during the fine-tuning process. Our module $\\mathcal{R}$ does not result in any new network parameters and can be plugged into any learning-based network. We show the training protocol in Alg. 1.", + "bbox": [ + 496, + 565, + 890, + 689 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Multi-view Consistency Supervision", + "text_level": 1, + "bbox": [ + 500, + 699, + 808, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We propose a novel multi-view supervision to force the baseline network to learn to be 3D consistent. To simplify notation, we ignore the batch dimension and fixed camera intrinsic matrix. For every training iteration, we randomly sample $N$ image and landmark pairs $\\{V,\\mathrm{L}\\}_{1,\\dots,N}$ from $\\hat{\\mathcal{D}}$ and $M$ image and landmark pairs $\\{I,\\mathrm{L}\\}_{1,\\dots,M}$ from initial dataset $\\mathcal{D}^*$ .", + "bbox": [ + 496, + 723, + 890, + 828 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We pass $V_{1,\\dots,N}$ to the baseline network $F$ to obtain predicted landmarks $\\hat{\\mathrm{L}}_{1,\\dots,N}$ which are shown with green", + "bbox": [ + 496, + 830, + 890, + 863 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "$^{*}\\mathcal{D}$ is DAD-3DHeads dataset when training DAD-3DNet and is AFLW2000-3D when training 3DDFA.", + "bbox": [ + 500, + 875, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "12750", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/0384782a59611be0512f68c552e09ccf4cbd403b35b27bc352572b02ffebd08f.jpg", + "image_caption": [ + "Figure 4. Multi-view Consistency Supervision. Predicted landmarks $\\hat{\\mathbf{L}}_{1,\\dots,N}$ , estimated 3D landmark $\\dot{\\mathbf{L}}$ , projected landmarks $\\tilde{\\mathbf{L}}_{1,\\dots,Q}$ , and ground truth landmarks $L$ are denoted as green, blue, red, and yellow points respectively. The processes of calculating 3D landmark $\\dot{\\mathbf{L}}$ and the projection procedure are shown as light blue and pink arrows, respectively. $\\mathcal{L}_{\\mathrm{Self - Cons}}$ and $\\mathcal{L}_{\\mathrm{Multiview}}$ are represented as red and light green lines, respectively." + ], + "image_footnote": [], + "bbox": [ + 101, + 88, + 450, + 253 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "points in Fig. 4. We then randomly select $P$ predicted landmarks $\\hat{\\mathrm{L}}_{1,\\dots,P} \\in \\mathbb{R}^{P \\times 68 \\times 2}$ from $\\hat{\\mathrm{L}}_{1,\\dots,N}$ to calculate the \"canonical\" 3D landmark $\\dot{\\mathrm{L}} \\in \\mathbb{R}^{68 \\times 3}$ , as shown by the blue point in Fig. 4. We calculate each keypoint of the \"canonical\" 3D landmark $\\dot{\\mathrm{L}}^{(k)} \\in \\mathbb{R}^3, 1 \\leq k \\leq 68$ through Direct Linear Transformation (DLT) [16, 25], as follows:", + "bbox": [ + 75, + 368, + 472, + 460 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {p} = \\mathbb {M} _ {p} [ 0,: ] - \\mathbb {M} _ {p} [ 2,: ] \\cdot \\hat {\\mathrm {L}} _ {p} ^ {k} [ 0 ] \\in \\mathbb {R} ^ {4}, \\qquad (2)\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 473, + 468, + 494 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nv _ {p} = \\mathbb {M} _ {p} [ 1,: ] - \\mathbb {M} _ {p} [ 2,: ] \\cdot \\hat {\\mathrm {L}} _ {p} ^ {k} [ 1 ] \\in \\mathbb {R} ^ {4}, \\qquad (3)\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 500, + 468, + 522 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {A} = \\left[ \\mu_ {1} \\mid \\mu_ {2} \\mid \\dots \\mid \\mu_ {p} \\mid v _ {1} \\mid v _ {2} \\mid \\dots \\mid v _ {p} \\right] ^ {T} \\in \\mathbb {R} ^ {2 P \\times 4}, (4)\n$$\n", + "text_format": "latex", + "bbox": [ + 86, + 530, + 468, + 550 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\dot {\\mathbf {L}} ^ {(k)} = \\left(\\mathbf {A} [:,: 3 ] ^ {T} \\quad \\mathbf {A} [:,: 3 ]\\right) ^ {- 1} \\mathbf {A} [:,: 3 ] ^ {T} (- \\mathbf {A} [:,: 3 ]), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 88, + 563, + 468, + 592 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where, $p, 1 \\leq p \\leq P$ , is the index of views, and $\\mathbb{M}_{1,\\dots,P}$ are the corresponding camera extrinsic matrices which are pre-defined for view synthesis during volume rendering (see Sec.3). Moreover, $\\mathbb{M}_p[i,:]$ indicates the i-th row of $\\mathbb{M}_p$ , $\\mathbf{A}(:,i]$ indicates columns 0 to $i - 1$ of $\\mathbf{A}$ , and $\\mathbf{A}(:,i]$ indicates the $i$ -th column of $\\mathbf{A}$ . By Eq. 2 and Eq. 3, we first calculate the projection constraints for $\\dot{\\mathrm{L}}_{(k)}$ , i.e., $\\mu_p[:3] \\cdot \\dot{\\mathrm{L}}^{(k)} + \\mu_p[3] = 0$ , where ‘’ indicates the dot product. Then we stack all of the constraints into $\\mathbf{A} \\in \\mathbb{R}^{2P \\times 4}$ by Eq. 4. At last, we compute $\\dot{\\mathrm{L}}^{(k)}$ with a least square approach (Eq. 5).", + "bbox": [ + 75, + 601, + 468, + 768 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After obtaining the \"canonical\" 3D landmark $\\dot{\\mathrm{L}}$ , we project it onto the image planes of rest of $Q = N - P$ views to obtain the projected landmark $\\tilde{\\mathrm{L}}_{1,\\dots,Q}$ , shown as red points in Fig. 4, by the following equations:", + "bbox": [ + 75, + 768, + 470, + 830 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\ns = \\mathbb {M} _ {q} [:,: 3 ] \\dot {\\mathrm {L}} ^ {(k)} + \\mathbb {M} _ {q} [:,: 3 ] \\in \\mathbb {R} ^ {3 \\times 1}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 840, + 468, + 861 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\mathrm {L}} _ {q} ^ {(k)} = \\left[ \\begin{array}{c} s [ 0 ] / s [ 2 ] \\\\ s [ 1 ] / s [ 2 ] \\end{array} \\right] \\in \\mathbb {R} ^ {2 \\times 1}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 869, + 468, + 906 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where, in our case, $1 \\leq q \\leq Q$ . Eq. 6 transforms 3D landmark from \"canonical\" space to the camera space of view $q$ , and Eq. 7 transforms it from camera space to image space.", + "bbox": [ + 500, + 90, + 890, + 136 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Self-Projection Consistency Loss. Since all $M$ views are sampled from one NeRF with different camera views, the predicted landmarks $\\hat{\\mathrm{L}}_{1,\\dots,Q}$ and the projected landmarks $\\tilde{\\mathrm{L}}_{1,\\dots,Q}$ should be consistent. Therefore, we propose to minimize the error between the predicted and projected landmarks as follows:", + "bbox": [ + 498, + 137, + 893, + 228 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {S e l f - C o n s}} = \\sum_ {q = 1} ^ {Q} \\| \\hat {\\mathrm {L}} _ {q} - \\tilde {\\mathrm {L}} _ {q} \\| _ {1}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 239, + 890, + 281 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Mesh Consistency Loss* Besides the self-projection consistency, all the $N$ views also share one mesh topology in the canonical space. Therefore, we apply a mesh consistency loss in canonical space calculated by:", + "bbox": [ + 498, + 295, + 890, + 357 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {M e s h - C o n s}} = \\sum_ {n = 1} ^ {N} \\| \\hat {\\mathrm {M}} _ {n} - \\dot {\\mathrm {M}} \\| _ {2}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 368, + 890, + 411 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\hat{\\mathbf{M}}_n$ is the predicted mesh of view $n$ in the canonical space, and $\\hat{\\mathbf{M}}$ is the ground truth mesh of the original reference image.", + "bbox": [ + 496, + 417, + 890, + 463 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multiview Landmark Loss. We also minimize the distance between the predicted 2D facial landmarks and the corresponding multi-view ground truth landmarks we obtained in Sec. 3, which are denoted as yellow points in Fig. 4. The loss can be formulated as follows:", + "bbox": [ + 498, + 465, + 890, + 540 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {M u l t i v e w}} = \\sum_ {q = 1} ^ {N} \\| \\hat {\\mathrm {L}} _ {q} - \\mathrm {L} _ {q} \\| _ {1}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 545, + 890, + 587 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We also incorporate the original loss of the baseline method computed with the image and landmark pairs $\\{I,L\\}_{1,\\dots ,M}$ from dataset $\\mathcal{D}$ to stabilize our 3D-aware training. The overall loss is:", + "bbox": [ + 498, + 592, + 890, + 654 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\lambda_ {1} \\mathcal {L} _ {\\text {S e l f - C o n s}} + \\lambda_ {2} \\mathcal {L} _ {\\text {M e s h - C o n s}} + \\lambda_ {3} \\mathcal {L} _ {\\text {M u l t i v i e w}} + \\mathcal {L} _ {\\text {o r i g i n a l}}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 504, + 666, + 890, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\lambda_{1,2,3}$ are hyper parameters that control the contribution of each components. We set $\\lambda_{1,2,3}$ to 0.1 empirically.", + "bbox": [ + 498, + 696, + 890, + 727 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that our training is a plug-in module and can be incorporated into any existing facial landmark detector easily. For different pretrained models, we just need to change $\\mathcal{L}_{\\mathrm{original}}$ while the other novel loss components calculated on our balanced synthetic dataset $\\mathcal{D}$ can be applied directly. We show this plug-in capability on top of different baseline methods (e.g., DAD-3DNet [34] and 3DDFA [22]), and demonstrate that our 3D-aware training indeed improves their performance (see Sec. 5).", + "bbox": [ + 496, + 728, + 890, + 864 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "*We can apply it depending on whether the baseline network outputs mesh. In our case, the 3DDFA [22] and DAD-3DNet [34] both do.", + "bbox": [ + 500, + 875, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "12751", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/3192f680ce4272f7693275b14bd050c2eda3ba4d5d296c85ec22eaa960b48365.jpg", + "table_caption": [ + "Table 1. Facial landmark detection result (NME) on DAD-3DHeads [34], FaceScape [65], and MultiFace [64]. Lower values mean better results." + ], + "table_footnote": [], + "table_body": "
MethodDAD-3DHeadsFaceScapeMultiFace
FAN [6]7.14116.7416.143
Dlib [31]10.84129.43118.205
3DDFA-V2 [23]2.9266.8535.942
3DDFA [22]4.0827.9888.121
3DDFA+3.7847.4257.305
DAD-3DNet [34]2.5996.6815.786
DAD-3DNet+2.5036.0505.480
", + "bbox": [ + 76, + 143, + 480, + 271 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 281, + 209, + 297 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 76, + 306, + 284, + 323 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training Details. We implement our algorithm in Pytorch and adopt ADAM to optimize the baseline networks. We run our 3D-aware training for 100 epochs with a batch size of 4, and a learning rate of $1 \\times 10^{-4}$ on each baseline network. As to computational cost, fine-tuning DAD-3DNet take about and 16.25 hours on 4 NVIDIA RTX A6000 GPUs.", + "bbox": [ + 75, + 329, + 468, + 434 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dataset. Besides DAD-3DHeads, we use two additional datasets to conduct the evaluations.", + "bbox": [ + 76, + 435, + 468, + 465 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- DAD-3DHeads [34] is the state-of-the-art in-the-wild 3D head dataset, which contains dense, accurate annotations, and diverse facial appearances. It consists of 44,898 images collected from various sources (37,840 in the training set, 4,312 in the validation set, and 2,746 in the test set).", + "- FaceScape [65] is a large-scale high-quality lab-controlled 3D face dataset, which contains 18,760 examples, captured from 938 subjects and each with 20 specific expressions.", + "- MultiFace [64] is a new multi-view, high-resolution human face dataset collected from 13 identities for neural face rendering." + ], + "bbox": [ + 94, + 474, + 468, + 694 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training and Testing Split. In all the experiments, we only refine the baseline models with the training set of our DAD-3DHeads-Syn and their original training dataset. We use the test sets of DAD-3DHeads-Syn and DAD-3DHeads [34], and use the full datasets of FaceScape [65] and MultiFace [63] for performance evaluation. All the comparison methods have not been trained on the split test sets.", + "bbox": [ + 75, + 703, + 468, + 823 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Metrics. We evaluate the facial landmark distance by calculating the Normalized Mean Error (NME). We normalize the landmark error by dividing its image resolution instead of the eye distance [55], since all the test images are aligned with offline tools. We calculate the head", + "bbox": [ + 75, + 824, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "pose error by the absolute distance of the Euler angle values.", + "bbox": [ + 498, + 90, + 890, + 119 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Quantitative Evaluation", + "text_level": 1, + "bbox": [ + 500, + 132, + 720, + 148 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Landmark Detection Results. The quantitative landmark detection results on DAD-3DHeads [34], FaceScape [65], and MultiFace [64] are shown in Tab. 1. We can find that the DAD-3DNet+ refined by our 3D-aware multi-view consistency training achieves the best performance on all three datasets. Moreover, according to the results of 3DDFA [22], 3DDFA+, DAD-3DNet [34], and DAD-3DNet+, we find that after refinement, the new models (3DDFA+ and DAD-3DNet+) achieve much better results than the baseline models. For example, the detection error of DAD-3DNet [34] drops 0.631 and 0.306, a $9\\%$ and $5\\%$ improvement, on FaceScape and MultiFace datasets, respectively. Similarly, we improve the 3DDFA [22] by 0.298 ( $7\\%$ ), 0.563 ( $7\\%$ ), and 0.816 ( $10\\%$ ) on DAD-3DHeads, FaceScape and MultiFace datasets, respectively. We attribute the improvement to our proposed 3D aware multi-view training. One interesting phenomenon is that all the methods perform better on DAD-3DHeads dataset than the other two lab-captured datasets. We attribute this to the extreme head pose and challenging facial expressions in the other two datasets. We plot the head pose distribution of DAD-3DHeads (see supplementary materials) and find that distribution of head pose is not as uniform as the other two lab-controlled datasets.", + "bbox": [ + 496, + 155, + 890, + 516 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Head Pose Estimation Results. Tab. 2 shows the head pose estimation error on DAD-3DHeads [34] and FaceScape [65]. Our DAD-3DNet+ achieves best performance in most metrics. Similar to the landmark results, we can also conclude that head pose detection accuracy of the baseline methods (3DDFA and DAD-3DNet) is improved by our 3D aware multi-view consistency (3DDFA+ and DAD-3DNet+). For example, after refinement, DAD-3DNet+ achieves $11.9\\%$ and $18.8\\%$ performance boosts in overall head pose error on DAD-3DHeads and FaceScape dataset, respectively.", + "bbox": [ + 496, + 518, + 890, + 685 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Qualitative Evaluation", + "text_level": 1, + "bbox": [ + 500, + 695, + 710, + 710 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We fist show visual comparisons on images randomly sampled from DAD-3DHeads test set [34] in Fig. 5. The landmark predicted by our DAD-3DNet+ model fits the individual's face tighter than the other predictions. Furthermore, by comparing the third (3DDFA [22]) and forth columns (ours), we can see that refining model $(3\\mathrm{DDFA}+)$ improves the landmark accuracy dramatically. Similar visual improvements can be found in sixth (DAD-3DNet) and seventh (DAD-3DNet+) columns as well. Comparing the sixth and seventh column, we can see that the refinement training drags and rotates the landmark in 3D space to better fit it to the individual's face surface. We attribute this abl", + "bbox": [ + 496, + 719, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "12752", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7aac1a8c3b9941c1668256c37762e4d1d7cf20e416080d39a9115a187a821aa4.jpg", + "image_caption": [ + "Figure 5. The visual results of Dlib [31], FAN [5], 3DDFA [22], our refined 3DDFA+, 3DDFA-V2, DAD-3DNet [34], and our refined DAD-3DNet+ on images randomly sampled from DAD-3DHeads [34] testing set. We show the enlarged error region (while box) in the middle row." + ], + "image_footnote": [], + "bbox": [ + 119, + 78, + 849, + 321 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/dbeafcf7401a625fbde6bffab9671d2fac821f393c656c5087e752eca4ed2e7e.jpg", + "table_caption": [ + "Table 2. Head pose estimation results (head pose error) on DAD-3DHeads [34], FaceScape [65]. Lower values mean better results." + ], + "table_footnote": [], + "table_body": "
DAD-3DHeadsFaceScape
PitchRollYawOverallPitchRollYawOverall
FAN [5]9.7655.3766.3907.1778.7744.8956.5566.742
Dlib [31]13.35211.79914.65413.26817.86112.66319.54816.691
3DDFA-V2 [23]7.9014.9896.0886.32613.7419.71811.35311.604
3DDFA [22]9.8957.9778.9968.95620.78918.14519.69219.752
3DDFA+9.1956.7928.6928.22620.99616.42619.05418.826
DAD-3DNet [34]8.2744.6669.2067.38215.8519.67618.34614.624
DAD-3DNet+7.7004.2747.5286.50014.4667.24713.87611.863
", + "bbox": [ + 184, + 410, + 784, + 563 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ity to our 3D-aware multi-view consistency training, which lets the refined model gain the better sense in 3D space, and therefore, improve the landmark detection results.", + "bbox": [ + 75, + 588, + 470, + 633 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To further validate the improvement gained by the proposed 3D-aware multi-view consistency training, we show the visual results (Fig. 6) of 3DDFA [22], our refined 3DDFA+, DAD-3DNet [34], and our refined DAD-3DNet+ on images sampled from four different test sets. We can find that our proposed refinement improves the landmark detection results in the eye, mouth, and face contour regions, which usually contain more appearance dynamics than the other areas.", + "bbox": [ + 75, + 635, + 468, + 771 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.4. Performance Improvement Analysis", + "text_level": 1, + "bbox": [ + 76, + 785, + 390, + 801 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To systematically understand the source of improvement after refining the baseline methods (DAD-3DNet [34] and 3DDFA [22]) with our proposed 3D-aware multi-view consistency training, we further calculate and plot the landmark and head pose error improvements on DAD-3DHeads [34] (see Fig. 7). Instead of calculating the overall improved", + "bbox": [ + 75, + 810, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "error score, we split all the testing images into different groups according to their head pose value and calculate the improved error score within each group. We can find that the improvement by our training gets more obvious as the head pose gets more challenging. For example, the landmark error improvement (Fig. 7 upper section) using our method built on top of 3DDFA [22] increases from 0.12 to 0.71. Similarly, the head pose estimation error (Fig. 7 lower section) improvement using our method built on top of DAD-3DNet [34] increases from 0.02 to 2.7. We also show the detection result visualization in Fig. 8. We can see that from left to right, as the head pose increases, the error of the DAD-3DNet+ (second row) is more stable than the error (first row) of the DAD-3DNet. Base on this trend, we conclude that our proposed 3D-aware multi-view consistency training provides a more significant improvement over the baselines on images with larger head pose. This verifies our hypothesis that multi-view consistency training enables the network to learn 3D-aware information, which benefits the detection results on images with large head pose.", + "bbox": [ + 496, + 588, + 893, + 891 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "12753", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f2ac3d6ce01550466943c2f9f8af1f2f6a4d21af6e231534947de31734b3a21c.jpg", + "image_caption": [ + "Figure 6. The visual comparisons between baseline methods and the refined methods on four testing sets. The left column and upper row list the dataset and method names, respectively. $^+$ denotes the model that has been refined by our 3D-aware training." + ], + "image_footnote": [], + "bbox": [ + 80, + 87, + 467, + 369 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/eb3cbcd9dc8c8a1441823c5b341c0f8f352ccce72c74df1480e3a0358cf7b24d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 99, + 448, + 439, + 575 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/26af171f93b204cdcd7c84063eda237916b5601d8ebf9d2f080cc61ffedc438e.jpg", + "image_caption": [ + "Figure 7. The landmark (top) and head pose (bottom) error improvement over DAD-3DNet [34] and 3DDFA [22] on images from different head pose ranges. The solid and dotted lines indicate DAD-3DNet [34] vs. DAD-3DNet+ (ours) and 3DDFA [22] vs. 3DDFA+ (ours)." + ], + "image_footnote": [], + "bbox": [ + 99, + 577, + 439, + 703 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.5. Ablation Study", + "text_level": 1, + "bbox": [ + 76, + 800, + 228, + 816 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct ablation study on FaceScape [65] to verify the importance of main components of our novel design. As shown in Tab. 3, we calculate NME of landmark and MAE of pose estimation in these ablation experiments. Based on these numbers, we can see the performance degrades", + "bbox": [ + 75, + 824, + 468, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/af87e6bdb2c2cb2d76c4ffc07ba3a5063e33a0cefd29dff5abab44fd06970664.jpg", + "image_caption": [ + "Figure 8. The error visualization of DAD-3DNet [34] and our DAD-3DNet+ on MultiFace [64] dataset. The white and green dots are the ground truth and predicted landmarks, respectively. We use the red line to show the error distance. From left to right, the head pose increases gradually." + ], + "image_footnote": [], + "bbox": [ + 519, + 87, + 874, + 220 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3d511c2eb7e9abe6ec1581aecd39c58ced7d79d7f0b3e753785a7674c09a6a1e.jpg", + "table_caption": [ + "Table 3. Ablation Study on FaceScape [65]. The top 2 numbers are shown in bold." + ], + "table_footnote": [], + "table_body": "
ComponentNME ↓Pose ↓
1full model (P=4)6.05011.863
2w/o LMesh-Cons6.16812.327
3w/o LSelf-Cons6.54113.623
4full model (P=8)6.04811.923
5full model (P=16)6.09811.902
6full model (P=32)6.13911.912
", + "bbox": [ + 521, + 349, + 864, + 464 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "drastically when we remove $\\mathcal{L}_{\\mathrm{Self - Cons}}$ . Moreover, removing $\\mathcal{L}_{\\mathrm{Mesh - Cons}}$ negatively impacts the results, demonstrating its importance. Moreover, estimating the 3D landmarks in the world space using fewer views leads to better results. This is a significant advantage as it makes our fine-tuning process more efficient.", + "bbox": [ + 498, + 484, + 890, + 575 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 611, + 617, + 627 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We propose 3D-aware multi-view consistency training, a new framework for improving deep-learning base landmark detection algorithms. Through a set of novel loss functions, we force the network to produce landmarks that are 3D consistent. We additionally introduce a novel dataset simulation pipeline to combine the merits of lab-controlled captures and in-the-wild collected images. The model refined by our method outperforms previous approaches in terms of landmark detection accuracy and head pose estimation accuracy. Admittedly, our work has some limitations. For example, our proposed training relies on the performance of the baseline method. If the pretrained baseline yield poor initial predictions, our DLT would fail to estimate reasonable canonical 3D landmark, affecting the performance of the proposed self-projection consistency loss. Investigating ways to reduce the reliance on the accuracy of the baseline methods would be an interesting future research.", + "bbox": [ + 496, + 643, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "12754", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Vitor Albiero, Xingyu Chen, Xi Yin, Guan Pang, and Tal Hassner. img2pose: Face alignment and detection via 6dof, face pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7617-7627, 2021.", + "[2] Slawomir Bak, Peter Carr, and Jean-Francois Lalonde. Domain adaptation through synthesis for unsupervised person re-identification. In Proceedings of the European conference on computer vision (ECCV), pages 189–205, 2018.", + "[3] Peter N Belhumeur, David W Jacobs, David J Kriegman, and Neeraj Kumar. Localizing parts of faces using a consensus of exemplars. IEEE transactions on pattern analysis and machine intelligence, 35(12):2930-2940, 2013.", + "[4] Adrian Bulat and Georgios Tzimiropoulos. Two-stage convolutional part heatmap regression for the 1st 3d face alignment in the wild (3dfaw) challenge. In European Conference on Computer Vision, pages 616-624. Springer, 2016.", + "[5] Adrian Bulat and Georgios Tzimiropoulos. Binarized convolutional landmark localizers for human pose estimation and face alignment with limited resources. In Proceedings of the IEEE International Conference on Computer Vision, pages 3706-3714, 2017.", + "[6] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision, pages 1021-1030, 2017.", + "[7] Xavier P Burgos-Artizzu, Pietro Perona, and Piotr Dólar. Robust face landmark estimation under occlusion. In Proceedings of the IEEE international conference on computer vision, pages 1513-1520, 2013.", + "[8] Chen Cao, Yanlin Weng, Stephen Lin, and Kun Zhou. 3d shape regression for real-time facial animation. ACM Transactions on Graphics (TOG), 32(4):1-10, 2013.", + "[9] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022.", + "[10] Dong Chen, Shaoqing Ren, Yichen Wei, Xudong Cao, and Jian Sun. Joint cascade face detection and alignment. In European conference on computer vision, pages 109-122. Springer, 2014.", + "[11] Lele Chen, Ross K Maddox, Zhiyao Duan, and Chenliang Xu. Hierarchical cross-modal talking face generation with dynamic pixel-wise loss. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7832-7841, 2019.", + "[12] Timothy F Cootes, Gareth J Edwards, and Christopher J Taylor. Active appearance models. In European conference on computer vision, pages 484-498. Springer, 1998.", + "[13] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In Proceedings of the IEEE con" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ference on computer vision and pattern recognition, pages 5203-5212, 2020.", + "[14] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019.", + "[15] Xuanyi Dong, Yan Yan, Wanli Ouyang, and Yi Yang. Style aggregated network for facial landmark detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, June 2018.", + "[16] Xuanyi Dong, Yi Yang, Shih-En Wei, Xinshuo Weng, Yaser Sheikh, and Shouu-I Yu. Supervision by registration and triangulation for landmark detection. IEEE transactions on pattern analysis and machine intelligence, 43(10):3681-3694, 2020.", + "[17] Pengfei Dou, Shishir K Shah, and Ioannis A Kakadiaris. End-to-end 3d face reconstruction with deep neural networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pages 5908-5917, 2017.", + "[18] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In Proceedings of the European conference on computer vision (ECCV), pages 534-551, 2018.", + "[19] Golnaz Ghiasi and Charless C Fowlkes. Occlusion coherence: Detecting and localizing occluded faces. arXiv preprint arXiv:1506.08347, 2015.", + "[20] Ralph Gross, Iain Matthews, Jeffrey Cohn, Takeo Kanade, and Simon Baker. Multi-pie. Image and vision computing, 28(5):807-813, 2010.", + "[21] Kuangxiao Gu, Yuqian Zhou, and Thomas Huang. Fnet: Landmark driven fetching and learning network for faithful talking facial animation synthesis. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 10861-10868, 2020.", + "[22] Jianzhu Guo, Xiangyu Zhu, and Zhen Lei. 3ddfa. https://github.com/cleardusk/3DDFA, 2018.", + "[23] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. In European Conference on Computer Vision, pages 152-168. Springer, 2020.", + "[24] Xiaojie Guo, Siyuan Li, Jinke Yu, Jiawan Zhang, Jiayi Ma, Lin Ma, Wei Liu, and Haibin Ling. Pfld: A practical facial landmark detector. arXiv preprint arXiv:1902.10859, 2019.", + "[25] Richard Hartley and Andrew Zisserman. Multiple view geometry in computer vision. Cambridge university press, 2003.", + "[26] Stefan Hinterstoisser, Vincent Lepetit, Paul Wohlhart, and Kurt Konolige. On pre-trained image features and synthetic images for deep learning. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0-0, 2018.", + "[27] Xinya Ji, Hang Zhou, Kaisiyuan Wang, Qianyi Wu, Wayne Wu, Feng Xu, and Xun Cao. Eamm: One-shot emotional talking face via audio-based emotion-aware motion model. In ACM SIGGRAPH 2022 Conference Proceedings, SIGGRAPH '22, 2022." + ], + "bbox": [ + 501, + 93, + 893, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "12755", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017.", + "[29] Amin Jourabloo and Xiaoming Liu. Large-pose face alignment via cnn-based dense 3d model fitting. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4188-4196, 2016.", + "[30] Ira Kemelmacher-Shlizerman and Ronen Basri. 3d face reconstruction from a single image using a single reference face shape. IEEE transactions on pattern analysis and machine intelligence, 33(2):394-405, 2010.", + "[31] Davis E. King. Dlib-ml: A machine learning toolkit. Journal of Machine Learning Research, 10:1755-1758, 2009.", + "[32] Martin Koestinger, Paul Wohlhart, Peter M Roth, and Horst Bischof. Annotated facial landmarks in the wild: A largescale, real-world database for facial landmark localization. In 2011 IEEE international conference on computer vision workshops (ICCV workshops), pages 2144-2151. IEEE, 2011.", + "[33] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6), 2017.", + "[34] Tetiana Martyniuk, Orest Kupyn, Yana Kurlyak, Igor Krashenyi, Jiri Matas, and Viktoriya Sharmanska. Dad-3heads: A large-scale dense, accurate and diverse dataset for 3d head alignment from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 20942–20952, 2022.", + "[35] Nikolaus Mayer, Eddy Ilg, Philipp Fischer, Caner Hazirbas, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. What makes good synthetic training data for learning disparity and optical flow estimation? International Journal of Computer Vision, 126(9):942-960, 2018.", + "[36] Kieron Messer, Jiri Matas, Josef Kittler, Juergen Luettin, Gilbert Maitre, et al. Xm2vtsdb: The extended m2vts database. In Second international conference on audio and video-based biometric person authentication, volume 964, pages 965-966. Citeseer, 1999.", + "[37] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021.", + "[38] Erik Murphy-Chutorian and Mohan Manubhai Trivedi. Head pose estimation in computer vision: A survey. IEEE transactions on pattern analysis and machine intelligence, 31(4):607-626, 2008.", + "[39] P Jonathon Phillips, Patrick J Flynn, Todd Scruggs, Kevin W Bowyer, Jin Chang, Kevin Hoffman, Joe Marques, Jaesik Min, and William Worek. Overview of the face recognition grand challenge. In 2005 IEEE computer society conference on computer vision and pattern recognition (CVPR'05), volume 1, pages 947-954. IEEE, 2005.", + "[40] Shengju Qian, Keqiang Sun, Wayne Wu, Chen Qian, and Jiaya Jia. Aggregation via separation: Boosting facial land" + ], + "bbox": [ + 78, + 90, + 470, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "mark detector with semi-supervised style translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10153-10163, 2019.", + "[41] Rajeev Ranjan, Vishal M Patel, and Rama Chellappa. Hyperface: A deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE transactions on pattern analysis and machine intelligence, 41(1):121-135, 2017.", + "[42] Elad Richardson, Matan Sela, Roy Or-El, and Ron Kimmel. Learning detailed face reconstruction from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1259–1268, 2017.", + "[43] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Transactions on Graphics (TOG), 42(1):1-13, 2022.", + "[44] Andreas Rossler, Davide Cozzolino, Luisa Verdoliva, Christian Riess, Justus Thies, and Matthias Nießner. Faceforensics: Learning to detect manipulated facial images. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1-11, 2019.", + "[45] Nataniel Ruiz, Samuel Schulter, and Manmohan Chandraker. Learning to simulate. In International Conference on Learning Representations, 2019.", + "[46] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013.", + "[47] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013.", + "[48] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. A semi-automatic methodology for facial landmark annotation. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 896-903, 2013.", + "[49] Jie Shen, Stefanos Zafeiriou, Grigoris G Chrysos, Jean Kossaifi, Georgios Tzimiropoulos, and Maja Pantic. The first facial landmark tracking in-the-wild challenge: Benchmark and results. In Proceedings of the IEEE international conference on computer vision workshops, pages 50-58, 2015.", + "[50] Ashish Shrivastava, Tomas Pfister, Oncel Tuzel, Joshua Susskind, Wenda Wang, and Russell Webb. Learning from simulated and unsupervised images through adversarial training. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2107-2116, 2017.", + "[51] Linsen Song, Wayne Wu, Chaoyou Fu, Chen Change Loy, and Ran He. Audio-driven dubbing for user generated contents via style-aware semi-parametric synthesis. IEEE Transactions on Circuits and Systems for Video Technology, 2022.", + "[52] Linsen Song, Wayne Wu, Chen Qian, Ran He, and Chen Change Loy. Everybody's talkin': Let me talk as you want. IEEE Transactions on Information Forensics and Security, 17:585-598, 2022." + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "12756", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[53] Yang Song, Jingwen Zhu, Dawei Li, Andy Wang, and Hairong Qi. Talking face generation by conditional recurrent adversarial network. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19, pages 919–925. International Joint Conferences on Artificial Intelligence Organization, 7 2019.", + "[54] Luuk Spreeuwers, Maikel Schils, and Raymond Veldhuis. Towards robust evaluation of face morphing detection. In 2018 26th European Signal Processing Conference (EU-SIPCO), pages 1027-1031. IEEE, 2018.", + "[55] Keqiang Sun, Wayne Wu, Tinghao Liu, Shuo Yang, Quan Wang, Qiang Zhou, Zuochang Ye, and Chen Qian. Fab: A robust facial landmark detection framework for motion-blurred videos. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5462-5471, 2019.", + "[56] Yi Sun, Xiaogang Wang, and Xiaou Tang. Deep convolutional network cascade for facial point detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3476-3483, 2013.", + "[57] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6142-6151, 2020.", + "[58] Justus Thies, Michael Zollhofer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of rgb videos. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2387-2395, 2016.", + "[59] Boris van Breugel, Trent Kyono, Jeroen Berrevoets, and Michaela van der Schaar. Decaf: Generating fair synthetic data using causally-aware generative networks. Advances in Neural Information Processing Systems, 34:22221-22233, 2021.", + "[60] Ting-Chun Wang, Ming-Yu Liu, Andrew Tao, Guilin Liu, Jan Kautz, and Bryan Catanzaro. Few-shot video-to-video synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2019.", + "[61] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3681-3691, 2021.", + "[62] Erroll Wood, Tadas Baltrusaitis, Louis-Philippe Morency, Peter Robinson, and Andreas Bulling. Learning an appearance-based gaze estimator from one million synthesised images. In Proceedings of the Ninth Biennial ACM Symposium on Eye Tracking Research & Applications, pages 131–138, 2016.", + "[63] Yue Wu, Zuoguan Wang, and Qiang Ji. Facial feature tracking under varying facial expressions and face poses based on restricted boltzmann machines. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3452-3459, 2013.", + "[64] Cheng-hsin Wu, Ningyuan Zheng, Scott Ardisson, Rohan Bali, Danielle Belko, Eric Brockmeyer, Lucas Evans, Timothy Godisart, Hyowon Ha, Alexander Hypes, Taylor Koska," + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Steven Krenn, Stephen Lombardi, Xiaomin Luo, Kevyn McPhail, Laura Millerschoen, Michal Perdoch, Mark Pitts, Alexander Richard, Jason Saragih, Junko Saragih, Takaaki Shiratori, Tomas Simon, Matt Stewart, Autumn Trimble, Xinshuo Weng, David Whitewolf, Chenglei Wu, Shouu-I Yu, and Yaser Sheikh. Multiface: A dataset for neural face rendering. In arXiv, 2022.", + "[65] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 601-610, 2020.", + "[66] Ran Yi, Yong-Jin Liu, Yu-Kun Lai, and Paul L Rosin. Apdrawinggan: Generating artistic portrait drawings from face photos with hierarchical gans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10743-10752, 2019.", + "[67] Ran Yi, Zipeng Ye, Ruoyu Fan, Yezhi Shu, Yong-Jin Liu, Yu-Kun Lai, and Paul L Rosin. Animating portrait line drawings from a single face photo and a speech signal. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-8, 2022.", + "[68] Xi Yin, Xiang Yu, Kihyuk Sohn, Xiaoming Liu, and Manmohan Chandraker. Towards large-posed face frontalization in the wild. In In Proceeding of International Conference on Computer Vision, Venice, Italy, October 2017.", + "[69] Egor Zakharov, Aliaksandra Shysheya, Egor Burkov, and Victor Lempitsky. Few-shot adversarial learning of realistic neural talking head models. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9459-9468, 2019.", + "[70] Xiaoxing Zeng, Xiaojiang Peng, and Yu Qiao. Df2net: A dense-fine-finer network for detailed 3d face reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2315-2324, 2019.", + "[71] Kaipeng Zhang, Zhanpeng Zhang, Zhifeng Li, and Yu Qiao. Joint face detection and alignment using multitask cascaded convolutional networks. IEEE signal processing letters, 23(10):1499-1503, 2016.", + "[72] Xing Zhang, Lijun Yin, Jeffrey F Cohn, Shaun Canavan, Michael Reale, Andy Horowitz, and Peng Liu. A high-resolution spontaneous 3d dynamic facial expression database. In 2013 10th IEEE international conference and workshops on automatic face and gesture recognition (FG), pages 1-6. IEEE, 2013.", + "[73] Zhimeng Zhang, Lincheng Li, Yu Ding, and Changjie Fan. Flow-guided one-shot talking face generation with a high-resolution audio-visual dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3661–3670, 2021.", + "[74] Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaou Tang. Facial landmark detection by deep multi-task learning. In European conference on computer vision, pages 94-108. Springer, 2014.", + "[75] Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaou Tang. Learning deep representation for face alignment with auxiliary attributes. IEEE transactions on pattern analysis and machine intelligence, 38(5):918-930, 2015." + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "12757", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[76] Aihua Zheng, Feixia Zhu, Hao Zhu, Mandi Luo, and Ran He. Talking face generation via learning semantic and temporal synchronous landmarks. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 3682-3689. IEEE, 2021.", + "[77] Erjin Zhou, Haoqiang Fan, Zhimin Cao, Yuning Jiang, and Qi Yin. Extensive facial landmark localization with coarse-to-fine convolutional network cascade. In Proceedings of the IEEE international conference on computer vision workshops, pages 386-391, 2013.", + "[78] Hang Zhou, Yasheng Sun, Wayne Wu, Chen Change Loy, Xiaogang Wang, and Ziwei Liu. Pose-controllable talking face generation by implicitly modularized audio-visual representation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4176-4186, 2021.", + "[79] Yang Zhou, Xintong Han, Eli Shechtman, Jose Echevarria, Evangelos Kalogerakis, and Dingzeyu Li. Makelttalk: speaker-aware talking-head animation. ACM Transactions on Graphics (TOG), 39(6):1-15, 2020.", + "[80] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European conference on computer vision, pages 592-608. Springer, 2020.", + "[81] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 2223-2232, 2017.", + "[82] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z Li. Face alignment across large poses: A 3d solution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 146-155, 2016.", + "[83] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z Li. Face alignment across large poses: A 3d solution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 146-155, 2016.", + "[84] Xiangyu Zhu, Xiaoming Liu, Zhen Lei, and Stan Z Li. Face alignment in full pose range: A 3d total solution. IEEE transactions on pattern analysis and machine intelligence, 41(1):78-92, 2017.", + "[85] Xiangxin Zhu and Deva Ramanan. Face detection, pose estimation, and landmark localization in the wild. In 2012 IEEE conference on computer vision and pattern recognition, pages 2879-2886. IEEE, 2012." + ], + "bbox": [ + 78, + 90, + 470, + 726 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12758", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_model.json b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4211bb6d2d90992a1fa82670c11bfa2dd73ff587 --- /dev/null +++ b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_model.json @@ -0,0 +1,2644 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.234, + 0.131, + 0.738, + 0.177 + ], + "angle": 0, + "content": "3D-aware Facial Landmark Detection via Multi-view Consistent Training on Synthetic Data" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.203, + 0.877, + 0.259 + ], + "angle": 0, + "content": "Libing Zeng \\(^{1*}\\), Lele Chen\\(^{2}\\), Wentao Bao\\(^{3*}\\), Zhong Li\\(^{2}\\), Yi Xu\\(^{2}\\), Junsong Yuan\\(^{4}\\), Nima K. Kalantari\\(^{1}\\) \n\\(^{1}\\)Texas A&M University, \\(^{2}\\)OPPO US Research Center, InnoPeak Technology, Inc, \n\\(^{3}\\)Michigan State University, \\(^{4}\\)University at Buffalo" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.329, + 0.28, + 0.485 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.487, + 0.27, + 0.5 + ], + "angle": 0, + "content": "(a) Multi-view Inconsistency" + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.329, + 0.483, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.334, + 0.486, + 0.434, + 0.5 + ], + "angle": 0, + "content": "(b) DAD-3DNet" + }, + { + "type": "image", + "bbox": [ + 0.486, + 0.329, + 0.686, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.511, + 0.486, + 0.661, + 0.5 + ], + "angle": 0, + "content": "(c) DAD-3DNet+ (Ours)" + }, + { + "type": "image", + "bbox": [ + 0.689, + 0.328, + 0.788, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.783, + 0.486, + 0.801, + 0.499 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image", + "bbox": [ + 0.791, + 0.328, + 0.892, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.505, + 0.895, + 0.59 + ], + "angle": 0, + "content": "Figure 1. We plot the landmark annotations labeled by different annotators with different colors in view #1 of (a). Accurate annotation of non-frontal faces with large angles like view #1 is challenging. This is a major problem since small differences between annotated landmarks in view #1, becomes substantially magnified when projected to view #2. Training a system on such datasets could lead to poor landmark detection accuracy, as shown in (b). We address this issue by proposing a 3D-aware optimization module that enforces multi-view consistency. We show the landmark detection improvement in (c). Magnified insets in (b) and (c) are shown in (d). After refined by the proposed 3D-aware learning, the detected facial landmark is better aligned with the identity." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.608, + 0.314, + 0.625 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.64, + 0.473, + 0.868 + ], + "angle": 0, + "content": "Accurate facial landmark detection on wild images plays an essential role in human-computer interaction, entertainment, and medical applications. Existing approaches have limitations in enforcing 3D consistency while detecting 3D/2D facial landmarks due to the lack of multi-view in-the-wild training data. Fortunately, with the recent advances in generative visual models and neural rendering, we have witnessed rapid progress towards high quality 3D image synthesis. In this work, we leverage such approaches to construct a synthetic dataset and propose a novel multiview consistent learning strategy to improve 3D facial landmark detection accuracy on in-the-wild images. The proposed 3D-aware module can be plugged into any learning-based landmark detection algorithm to enhance its accuracy. We demonstrate the superiority of the proposed plug" + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.876, + 0.47, + 0.903 + ], + "angle": 0, + "content": "*This work was done when Libing Zeng and Wentao Bao were interns at OPPO US Research Center, InnoPeak Technology, Inc." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.61, + 0.892, + 0.641 + ], + "angle": 0, + "content": "in module with extensive comparison against state-of-the-art methods on several real and synthetic datasets." + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.677, + 0.633, + 0.694 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.706, + 0.895, + 0.904 + ], + "angle": 0, + "content": "Accurate and precise facial landmark plays a significant role in computer vision and graphics applications, such as face morphing [54], facial reenactment [58], 3D face reconstruction [17, 18, 30], head pose estimation [38], face recognition [1, 10, 13, 19, 32, 41, 71], and face generation [11, 21, 60, 69]. In these applications, facial landmark detection provides great sparse representation to ease the burden of network convergence in different training stages and is often used as performance evaluation metric. For instance, as a facial prior, it provides good initialization for subsequent training [66, 67, 69, 76], good intermediate representation to bridge the gap between different modalities for content generation [11, 27, 51, 79], loss terms which reg-" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12747" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "ularize the facial expression [11, 52], or evaluation metrics to measure the facial motion quality [53, 73, 78]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.122, + 0.468, + 0.272 + ], + "angle": 0, + "content": "The aforementioned applications require the estimated facial landmarks to be accurate even with significantly varied facial appearance under different identities, facial expressions, and extreme head poses. Tremendous efforts have been devoted to address this problem [15, 22-24, 29, 34, 40, 56, 63, 74, 75, 77, 82, 84]. These approaches often rely on manually annotated large-scale lab-controlled or in-the-wild image datasets [4, 34] to handle various factors such as arbitrary facial expressions, head poses, illumination, facial occlusions, etc." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.273, + 0.468, + 0.514 + ], + "angle": 0, + "content": "However, even with the high cost of human labeling, consistent and accurate manual annotation of landmarks remains challenging [22, 23, 34]. It is very difficult, if not impossible, to force a person to annotate the facial landmark keypoints at the same pixel locations for faces of different poses, let alone different annotators under different labeling environments. Such annotation inconsistency and inaccuracy in training images are often the killing factor to learn an accurate landmark localization model. This is particularly a major problem in non-frontal faces where annotation becomes extremely challenging. As shown in Fig. 1(a) a small annotation variation in view #1, results in a significant inaccuracy in view #2. This multi-view inconsistency and inaccuracy can ultimately lead to poor landmark detection accuracy, especially for facial images with extreme head pose." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.515, + 0.468, + 0.77 + ], + "angle": 0, + "content": "To mitigate this annotation inconsistency and inaccuracy issue, we propose to learn facial landmark detection by enforcing multi-view consistency during training. Given the images of the same facial identity captured with different head poses, instead of detecting facial landmark at each separate facial image, we propose a multi-view consistency supervision to locate facial landmark in a holistic 3D-aware manner. To enforce multi-view consistency, we introduce self-projection consistency loss and multi-view landmark loss in training. We also propose an annotation generation procedure to exploit the merits of lab-controlled data (e.g., multi-view images, consistent annotations) and in-the-wild data (e.g., wide range of facial expressions, identities). Thanks to this synthetic data, our method does not rely on human annotation to obtain the accurate facial landmark locations. Therefore, it alleviates the problem of learning from inaccurate and inconsistent annotations." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.771, + 0.468, + 0.875 + ], + "angle": 0, + "content": "We formulate our solution as a plug-in 3D aware module, which can be incorporated into any facial landmark detector and can boost a pre-trained model with higher accuracy and multi-view consistency. We demonstrate the effectiveness of our approach through extensive experiments on both synthetic and real datasets. The main contributions of our work are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.886, + 0.468, + 0.9 + ], + "angle": 0, + "content": "- We show, for the first time, how to combine the merits" + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "of lab captured face image data (e.g., multi-view) and the in-the-wild face image datasets (e.g., appearance diversity). Using our proposed approach we produce a large-scale synthetic, but realistic, multi-view face dataset, titled DAD-3DHeads-Syn." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.179, + 0.892, + 0.269 + ], + "angle": 0, + "content": "- We propose a novel 3D-aware optimization module, which can be plugged into any learning-based facial landmark detection methods. By refining an existing landmark detection algorithm using our optimization module, we are able to improve its accuracy and multiview consistency." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.282, + 0.892, + 0.34 + ], + "angle": 0, + "content": "- We demonstrate the performance improvements of our module built on top multiple baseline methods on simulated dataset, lab-captured datasets, and in-the-wild datasets." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.179, + 0.892, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.357, + 0.642, + 0.373 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.383, + 0.892, + 0.443 + ], + "angle": 0, + "content": "In this section, we review face landmark datasets and detection algorithms that are most related to our approach. We also provide a brief review of data simulation tools related to our work." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.454, + 0.8, + 0.469 + ], + "angle": 0, + "content": "2.1. Face Landmark Detection Dataset" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.478, + 0.892, + 0.688 + ], + "angle": 0, + "content": "Lab-controlled dataset. Datasets under \"controlled\" conditions [8, 20, 36, 39, 46, 48, 64, 65, 72] typically collect video/images from indoor scenarios with certain restrictions, e.g. pre-defined expressions, head poses, etc. For example, FaceScape dataset [65] contains 938 individuals and each with 20 expressions using an array of 68 cameras under controlled illumination and positions. Thus, it contains aligned and consistent multi-view images and facial landmark annotations. However, the identities, poses, and expressions are limited. In addition, the environment conditions are fully controlled. These result in limited generalization capability of models trained on this dataset. Moreover, the annotation workflow of such a dataset is expensive and hard to scale." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In-the-wild dataset. The boom of internet image sharing has enabled the creation of many \"in-the-wild\" facial landmark datasets [3,7,32,49,85], collected from the web, to facilitate facial landmark detection research. However, manually annotating facial landmarks on in-the-wild images is a time-consuming process and not scalable. Zhu et al. [83] release 300W-LP by extending the original 300W dataset with synthetic images with extreme pose through image profiling of frontal pose images. However, the novel view images are generated by simply applying rotation matrix on the original images, which leads to limited view range and poor image quality. Meanwhile, 300W-LP lacks diversity in face appearance and expression because of the intrinsic limitations of 300W. Recently, Martyniuk et al. [34] introduce a" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12748" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.395 + ], + "angle": 0, + "content": "new dataset, DAD-3DHeads, by proposing a novel annotation scheme. Specifically, their approach allows the annotator to adjust the landmarks by looking at how well the mesh, generated from the landmarks, fits the input image. The proposed scheme addresses the problems exhibited by existing labeling tools, such as \"guessing\" the positions of the correct landmarks for invisible parts of the head, thus enabling accurate annotations. DAD-3DHeads dataset contains 44,898 in-the-wild images, covering extreme facial expressions, poses, and challenging illuminations. However, the DAD-3DHeads still has some drawbacks. First, even with the mesh fitting guidance, the annotations can be inaccurate. As shown in Fig. 1 (a), even a small inaccuracy in one view could result in a significant inconsistency when projected to another view. This inconsistency could negatively affect the training of the detection network. Second, since the depth is estimated by FLAME [33], annotation accuracy is limited by the FLAME model. Third, this dataset lacks multi-view images, and thus cannot be used to enforce multi-view consistency." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.404, + 0.24, + 0.419 + ], + "angle": 0, + "content": "2.2. Data Simulation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.428, + 0.473, + 0.685 + ], + "angle": 0, + "content": "Simulation [26,28,35,42,44,45,50,59,61,62,70] is a useful tool in situations where training data for learning-based methods is expensive to annotate or even hard to acquire. For example, Zeng et al. [70] and Richardson et al. [42] use 3D Morphable Model (3DMM) to render training data with different lighting conditions, identities, expressions, and texture basis elements for reconstructing detailed facial geometry. However, the simulated images produced by these approaches lack realism and have severe domain gaps compared with real-world captures, limiting their usage. Bak et al. [2] adapt synthetic data using a CycleGAN [81] with a regularization term for preserving identities. Ayush et al. [57] use the images and latent code generated by StyleGAN [81] to train a controllable portrait image generation model. However, it is hard to control the attribute consistencies of images simulated by generative models, which limits the usage of the generated datasets." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.696, + 0.406, + 0.712 + ], + "angle": 0, + "content": "2.3. Face Landmark Detection Algorithms" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.47, + 0.81 + ], + "angle": 0, + "content": "Traditional facial landmark detection methods leverage either holistic facial appearance information [12], or the global facial shape patterns [31, 85]. They yield reasonable results for images captured in lab-controlled environments with frontal faces and good lighting, however the performance on most of in-the-wild images is inferior." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.9 + ], + "angle": 0, + "content": "Recently, deep learning-based algorithms have made promising progress on 2D facial landmark localization [15, 22-24,29,34,40,56,63,74,75,77,82,84] in terms of robustness, generalizability, and accuracy. FAN [6] constructs, for the first time, a very strong baseline by combining a state-of-the-art residual block and a state-of-the-art architecture" + }, + { + "type": "table", + "bbox": [ + 0.501, + 0.092, + 0.894, + 0.289 + ], + "angle": 0, + "content": "
Dataset TypeLab-ControlledIn-the-wildOurs
Examples
In-the-wild×
Large Scale×
Balanced×
Multiview Consistent×
Annotation Consistent×
Scalable××
" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.301, + 0.893, + 0.357 + ], + "angle": 0, + "content": "Figure 2. The feature comparison of different type of datasets. For example, FaceScape [65] and MultiFace [64] are lab-controlled datasets, while 300W [47], AFLW2000 [68], and DAD-3DHeads [34] are in-the-wild datasets." + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.361, + 0.877, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.459, + 0.842, + 0.473 + ], + "angle": 0, + "content": "Figure 3. The proposed data simulation pipeline." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.482, + 0.895, + 0.738 + ], + "angle": 0, + "content": "for landmark localization and trains it on a very large yet synthetically expanded 2D facial landmark dataset. To address self-occlusion and large appearance variation, Zhu et al. [82] propose a cascaded convolutional neural network and optimized weighted parameter distance cost loss function to formulate the priority of 3DMM parameters during training instead of predicting facial landmark keypoints. To further address the problems of shape reconstruction and pose estimation simultaneously, Martyniuk et al. propose an end-to-end trained DAD-3DNet [34] to regress 3DMM parameters and recover the 3D head geometry with differential FLAME decoder. However, due to the intrinsic limitation of the manually annotated in-the-wild dataset, the detection results are affected by the annotation noise and the 3D inconsistency of the single view images. In this paper, we mainly focus on improving the performance of deep-learning based methods." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.752, + 0.892, + 0.786 + ], + "angle": 0, + "content": "3. Balanced and Realistic Multi-view Face Dataset" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.894, + 0.901 + ], + "angle": 0, + "content": "We believe there are five desired properties that a good facial landmark dataset should fulfill: (1) contain full range of multi-view images; (2) bridge the domain gap between the dataset and the real-world captured images; (3) contain diverse facial appearance including different poses, expressions, illuminations, and identities; (4) have consistent and accurate annotations across the whole dataset; (5) be" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12749" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.168 + ], + "angle": 0, + "content": "easy to obtain and scalable. The existing datasets can are either lab-controlled captures [64, 65] or in-the-wild collected [34, 47, 68]. Unfortunately, these datasets lack one or more desired attributes. In contrast, our dataset meets all of these criteria (Fig. 2)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.168, + 0.468, + 0.289 + ], + "angle": 0, + "content": "Unlike previous graphics or generative model-based data synthesis approaches described in Sec. 2.2, we propose a novel facial dataset simulation scheme by leveraging Neural Radiance Field (NeRF) [37] to facilitate training a facial landmark detection network. Fig. 3 shows our dataset creation pipeline. We generate multiview images with consistent landmarks using a single in-the-wild image along with annotated landmark as input." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.29, + 0.468, + 0.517 + ], + "angle": 0, + "content": "Specifically, we choose DAD-3DHeads [34] as our initial dataset since it contains images under a variety of extreme poses, facial expressions, challenging illuminations, and severe occlusions cases. Given an image and its landmarks from this dataset, our goal is to reconstruct multiview images with their corresponding landmarks. Inspired by GAN inversion [80], we first fit a latent code to each image in DAD-3DHeads datasets using EG3D [9] as decoder by following Pivotal Tuning Inversion (PTI) [43]. Note that, EG3D GAN inversion requires the camera pose of the input image, which we estimate using Deep3DFace [14]. Then we can use EG3D to decode the optimized latent code to NeRF. Next, we use volume rendering on the NeRF with 512 uniformly sampled camera views from a large view range, producing 512 multi-view images." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.518, + 0.468, + 0.729 + ], + "angle": 0, + "content": "To obtain the landmarks for each image, we start with the well-annotated groundtruth 2D landmarks of the original images from the DAD-3DHeads dataset. Then we use the estimated camera pose of the input image to unproject the annotated landmarks to 3D space. At last, we project the 3D landmarks to the 512 sampled camera views to obtain landmark annotation on the simulated views. The simulated dataset not only inherits the merits of DAD-3DHeads (e.g. diverse identities, expressions, poses, and illuminations), but also comes with a lot of new features (e.g., balanced head pose, consistent annotation, and multi-view images). In total, there are 2,150,400 training pairs and 204,800 testing pairs in our extended dataset, called DAD-3DHeadsSyn." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.745, + 0.465, + 0.763 + ], + "angle": 0, + "content": "4. 3D-Aware Multi-view Consistency Training" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.771, + 0.187, + 0.786 + ], + "angle": 0, + "content": "4.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.468, + 0.901 + ], + "angle": 0, + "content": "The state-of-the-art landmark detectors [5, 34] can output reasonable results on in-the-wild images. However, we may observe that the predicted landmark are floating on the face surface instead of fitting the face perfectly in a lot of cases. We can easily verify if the detected landmark fits the face by projecting the detected landmark to another view (see Fig. 1(a)). Armed by this observation of multi-view in" + }, + { + "type": "code_caption", + "bbox": [ + 0.502, + 0.091, + 0.772, + 0.107 + ], + "angle": 0, + "content": "Algorithm 1 3D-Aware Plug-in Module." + }, + { + "type": "algorithm", + "bbox": [ + 0.509, + 0.11, + 0.892, + 0.373 + ], + "angle": 0, + "content": "1: Input: pretrained detector \\(F\\) with weights \\(\\theta\\), \\(M\\) single-view images \\(I_{1,\\dots,M} \\in \\mathcal{D}\\) along with ground truth landmark \\(L_{1,\\dots,M}\\), paired \\(N\\) multi-view images \\(V_{1,\\dots,N} \\in \\hat{\\mathcal{D}}\\) along with ground truth landmark \\(L_{1,\\dots,N}\\). \n2: Output: detector \\(F\\) with updated weights \\(\\theta^{*}\\) \n3: Initialization: set \\(\\theta\\) to pre-trained weights \n4: Unfreeze \\(\\theta\\) \n5: for number of iterations do \n6: Output predicted landmarks \\(\\hat{L}_{1,\\dots,N}\\) for each view. \n7: Randomly sample \\(P\\) landmarks from them, \\((1 < P \\leq N)\\). \n8: Cast the landmarks into world space and estimate the approximate 3D landmark \\(\\dot{L}\\) using Eq. 2, 3, 4, 5 \n9: Project \\(\\dot{L}\\) onto the image planes of remaining \\(Q\\) views \\((Q = N - P)\\) using Eq. 6, 7 \n10: Calculate Total Loss \\(\\mathcal{L}\\) using Eq. 11 \n11: \\(\\theta^{*} \\gets Adam\\{\\mathcal{L}\\}\\)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.402, + 0.892, + 0.447 + ], + "angle": 0, + "content": "consistency and inaccuracy, we propose a novel 3D-Aware training module \\(\\mathcal{R}\\) to further improve the performance of baseline detection algorithm \\(F\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.448, + 0.892, + 0.523 + ], + "angle": 0, + "content": "Given a facial landmark detection network \\(F_{\\theta}(\\cdot)\\) pretrained on dataset \\(\\mathcal{D}\\), the proposed module \\(\\mathcal{R}\\) further refines the network parameters \\(\\theta\\) by leveraging our simulated DAD-3DHeads-Syn dataset \\(\\hat{\\mathcal{D}}\\) in addition to the original dataset \\(\\mathcal{D}\\). Our module \\(\\mathcal{R}\\) can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.536, + 0.892, + 0.553 + ], + "angle": 0, + "content": "\\[\nF _ {\\theta^ {*}} \\leftarrow \\mathcal {R} \\left(F _ {\\theta}, X, V _ {1, \\dots , N}\\right), X \\in \\mathcal {D}, V _ {1, \\dots , N} \\in \\hat {\\mathcal {D}}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.566, + 0.892, + 0.69 + ], + "angle": 0, + "content": "where \\(X\\) is the image batch sampled from \\(\\mathcal{D}\\) and \\(V_{1,\\dots,N}\\) are \\(N\\) multi-view images sampled from \\(\\hat{\\mathcal{D}}\\). We refine the network parameters \\(\\theta\\) through exploring 3D information among multi-view images and applying a novel projection consistency during the fine-tuning process. Our module \\(\\mathcal{R}\\) does not result in any new network parameters and can be plugged into any learning-based network. We show the training protocol in Alg. 1." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.7, + 0.81, + 0.717 + ], + "angle": 0, + "content": "4.2. Multi-view Consistency Supervision" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.724, + 0.892, + 0.829 + ], + "angle": 0, + "content": "We propose a novel multi-view supervision to force the baseline network to learn to be 3D consistent. To simplify notation, we ignore the batch dimension and fixed camera intrinsic matrix. For every training iteration, we randomly sample \\(N\\) image and landmark pairs \\(\\{V,\\mathrm{L}\\}_{1,\\dots,N}\\) from \\(\\hat{\\mathcal{D}}\\) and \\(M\\) image and landmark pairs \\(\\{I,\\mathrm{L}\\}_{1,\\dots,M}\\) from initial dataset \\(\\mathcal{D}^*\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.831, + 0.892, + 0.864 + ], + "angle": 0, + "content": "We pass \\(V_{1,\\dots,N}\\) to the baseline network \\(F\\) to obtain predicted landmarks \\(\\hat{\\mathrm{L}}_{1,\\dots,N}\\) which are shown with green" + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.875, + 0.892, + 0.901 + ], + "angle": 0, + "content": "\\(^{*}\\mathcal{D}\\) is DAD-3DHeads dataset when training DAD-3DNet and is AFLW2000-3D when training 3DDFA." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12750" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.102, + 0.089, + 0.452, + 0.254 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.263, + 0.473, + 0.362 + ], + "angle": 0, + "content": "Figure 4. Multi-view Consistency Supervision. Predicted landmarks \\(\\hat{\\mathbf{L}}_{1,\\dots,N}\\), estimated 3D landmark \\(\\dot{\\mathbf{L}}\\), projected landmarks \\(\\tilde{\\mathbf{L}}_{1,\\dots,Q}\\), and ground truth landmarks \\(L\\) are denoted as green, blue, red, and yellow points respectively. The processes of calculating 3D landmark \\(\\dot{\\mathbf{L}}\\) and the projection procedure are shown as light blue and pink arrows, respectively. \\(\\mathcal{L}_{\\mathrm{Self - Cons}}\\) and \\(\\mathcal{L}_{\\mathrm{Multiview}}\\) are represented as red and light green lines, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.369, + 0.473, + 0.462 + ], + "angle": 0, + "content": "points in Fig. 4. We then randomly select \\(P\\) predicted landmarks \\(\\hat{\\mathrm{L}}_{1,\\dots,P} \\in \\mathbb{R}^{P \\times 68 \\times 2}\\) from \\(\\hat{\\mathrm{L}}_{1,\\dots,N}\\) to calculate the \"canonical\" 3D landmark \\(\\dot{\\mathrm{L}} \\in \\mathbb{R}^{68 \\times 3}\\), as shown by the blue point in Fig. 4. We calculate each keypoint of the \"canonical\" 3D landmark \\(\\dot{\\mathrm{L}}^{(k)} \\in \\mathbb{R}^3, 1 \\leq k \\leq 68\\) through Direct Linear Transformation (DLT) [16, 25], as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.474, + 0.47, + 0.496 + ], + "angle": 0, + "content": "\\[\n\\mu_ {p} = \\mathbb {M} _ {p} [ 0,: ] - \\mathbb {M} _ {p} [ 2,: ] \\cdot \\hat {\\mathrm {L}} _ {p} ^ {k} [ 0 ] \\in \\mathbb {R} ^ {4}, \\qquad (2)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.144, + 0.501, + 0.47, + 0.523 + ], + "angle": 0, + "content": "\\[\nv _ {p} = \\mathbb {M} _ {p} [ 1,: ] - \\mathbb {M} _ {p} [ 2,: ] \\cdot \\hat {\\mathrm {L}} _ {p} ^ {k} [ 1 ] \\in \\mathbb {R} ^ {4}, \\qquad (3)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.088, + 0.531, + 0.47, + 0.551 + ], + "angle": 0, + "content": "\\[\n\\mathbf {A} = \\left[ \\mu_ {1} \\mid \\mu_ {2} \\mid \\dots \\mid \\mu_ {p} \\mid v _ {1} \\mid v _ {2} \\mid \\dots \\mid v _ {p} \\right] ^ {T} \\in \\mathbb {R} ^ {2 P \\times 4}, (4)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.089, + 0.564, + 0.47, + 0.593 + ], + "angle": 0, + "content": "\\[\n\\dot {\\mathbf {L}} ^ {(k)} = \\left(\\mathbf {A} [:,: 3 ] ^ {T} \\quad \\mathbf {A} [:,: 3 ]\\right) ^ {- 1} \\mathbf {A} [:,: 3 ] ^ {T} (- \\mathbf {A} [:,: 3 ]), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.602, + 0.47, + 0.77 + ], + "angle": 0, + "content": "where, \\(p, 1 \\leq p \\leq P\\), is the index of views, and \\(\\mathbb{M}_{1,\\dots,P}\\) are the corresponding camera extrinsic matrices which are pre-defined for view synthesis during volume rendering (see Sec.3). Moreover, \\(\\mathbb{M}_p[i,:]\\) indicates the i-th row of \\(\\mathbb{M}_p\\), \\(\\mathbf{A}(:,i]\\) indicates columns 0 to \\(i - 1\\) of \\(\\mathbf{A}\\), and \\(\\mathbf{A}(:,i]\\) indicates the \\(i\\)-th column of \\(\\mathbf{A}\\). By Eq. 2 and Eq. 3, we first calculate the projection constraints for \\(\\dot{\\mathrm{L}}_{(k)}\\), i.e., \\(\\mu_p[:3] \\cdot \\dot{\\mathrm{L}}^{(k)} + \\mu_p[3] = 0\\), where ‘’ indicates the dot product. Then we stack all of the constraints into \\(\\mathbf{A} \\in \\mathbb{R}^{2P \\times 4}\\) by Eq. 4. At last, we compute \\(\\dot{\\mathrm{L}}^{(k)}\\) with a least square approach (Eq. 5)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.77, + 0.471, + 0.832 + ], + "angle": 0, + "content": "After obtaining the \"canonical\" 3D landmark \\(\\dot{\\mathrm{L}}\\), we project it onto the image planes of rest of \\(Q = N - P\\) views to obtain the projected landmark \\(\\tilde{\\mathrm{L}}_{1,\\dots,Q}\\), shown as red points in Fig. 4, by the following equations:" + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.841, + 0.47, + 0.862 + ], + "angle": 0, + "content": "\\[\ns = \\mathbb {M} _ {q} [:,: 3 ] \\dot {\\mathrm {L}} ^ {(k)} + \\mathbb {M} _ {q} [:,: 3 ] \\in \\mathbb {R} ^ {3 \\times 1}, \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.177, + 0.871, + 0.469, + 0.907 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\mathrm {L}} _ {q} ^ {(k)} = \\left[ \\begin{array}{c} s [ 0 ] / s [ 2 ] \\\\ s [ 1 ] / s [ 2 ] \\end{array} \\right] \\in \\mathbb {R} ^ {2 \\times 1}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "where, in our case, \\(1 \\leq q \\leq Q\\). Eq. 6 transforms 3D landmark from \"canonical\" space to the camera space of view \\(q\\), and Eq. 7 transforms it from camera space to image space." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.138, + 0.894, + 0.229 + ], + "angle": 0, + "content": "Self-Projection Consistency Loss. Since all \\(M\\) views are sampled from one NeRF with different camera views, the predicted landmarks \\(\\hat{\\mathrm{L}}_{1,\\dots,Q}\\) and the projected landmarks \\(\\tilde{\\mathrm{L}}_{1,\\dots,Q}\\) should be consistent. Therefore, we propose to minimize the error between the predicted and projected landmarks as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.24, + 0.892, + 0.282 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {S e l f - C o n s}} = \\sum_ {q = 1} ^ {Q} \\| \\hat {\\mathrm {L}} _ {q} - \\tilde {\\mathrm {L}} _ {q} \\| _ {1}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.296, + 0.892, + 0.358 + ], + "angle": 0, + "content": "Mesh Consistency Loss* Besides the self-projection consistency, all the \\(N\\) views also share one mesh topology in the canonical space. Therefore, we apply a mesh consistency loss in canonical space calculated by:" + }, + { + "type": "equation", + "bbox": [ + 0.587, + 0.369, + 0.892, + 0.412 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {M e s h - C o n s}} = \\sum_ {n = 1} ^ {N} \\| \\hat {\\mathrm {M}} _ {n} - \\dot {\\mathrm {M}} \\| _ {2}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.419, + 0.892, + 0.464 + ], + "angle": 0, + "content": "where \\(\\hat{\\mathbf{M}}_n\\) is the predicted mesh of view \\(n\\) in the canonical space, and \\(\\hat{\\mathbf{M}}\\) is the ground truth mesh of the original reference image." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.466, + 0.892, + 0.541 + ], + "angle": 0, + "content": "Multiview Landmark Loss. We also minimize the distance between the predicted 2D facial landmarks and the corresponding multi-view ground truth landmarks we obtained in Sec. 3, which are denoted as yellow points in Fig. 4. The loss can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.59, + 0.546, + 0.892, + 0.588 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {M u l t i v e w}} = \\sum_ {q = 1} ^ {N} \\| \\hat {\\mathrm {L}} _ {q} - \\mathrm {L} _ {q} \\| _ {1}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.593, + 0.892, + 0.655 + ], + "angle": 0, + "content": "We also incorporate the original loss of the baseline method computed with the image and landmark pairs \\(\\{I,L\\}_{1,\\dots ,M}\\) from dataset \\(\\mathcal{D}\\) to stabilize our 3D-aware training. The overall loss is:" + }, + { + "type": "equation", + "bbox": [ + 0.505, + 0.667, + 0.892, + 0.697 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\lambda_ {1} \\mathcal {L} _ {\\text {S e l f - C o n s}} + \\lambda_ {2} \\mathcal {L} _ {\\text {M e s h - C o n s}} + \\lambda_ {3} \\mathcal {L} _ {\\text {M u l t i v i e w}} + \\mathcal {L} _ {\\text {o r i g i n a l}}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.698, + 0.892, + 0.728 + ], + "angle": 0, + "content": "where \\(\\lambda_{1,2,3}\\) are hyper parameters that control the contribution of each components. We set \\(\\lambda_{1,2,3}\\) to 0.1 empirically." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.729, + 0.892, + 0.865 + ], + "angle": 0, + "content": "Note that our training is a plug-in module and can be incorporated into any existing facial landmark detector easily. For different pretrained models, we just need to change \\(\\mathcal{L}_{\\mathrm{original}}\\) while the other novel loss components calculated on our balanced synthetic dataset \\(\\mathcal{D}\\) can be applied directly. We show this plug-in capability on top of different baseline methods (e.g., DAD-3DNet [34] and 3DDFA [22]), and demonstrate that our 3D-aware training indeed improves their performance (see Sec. 5)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.875, + 0.892, + 0.901 + ], + "angle": 0, + "content": "*We can apply it depending on whether the baseline network outputs mesh. In our case, the 3DDFA [22] and DAD-3DNet [34] both do." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "12751" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.472, + 0.131 + ], + "angle": 0, + "content": "Table 1. Facial landmark detection result (NME) on DAD-3DHeads [34], FaceScape [65], and MultiFace [64]. Lower values mean better results." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.145, + 0.481, + 0.272 + ], + "angle": 0, + "content": "
MethodDAD-3DHeadsFaceScapeMultiFace
FAN [6]7.14116.7416.143
Dlib [31]10.84129.43118.205
3DDFA-V2 [23]2.9266.8535.942
3DDFA [22]4.0827.9888.121
3DDFA+3.7847.4257.305
DAD-3DNet [34]2.5996.6815.786
DAD-3DNet+2.5036.0505.480
" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.282, + 0.21, + 0.299 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.307, + 0.285, + 0.324 + ], + "angle": 0, + "content": "5.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.33, + 0.47, + 0.435 + ], + "angle": 0, + "content": "Training Details. We implement our algorithm in Pytorch and adopt ADAM to optimize the baseline networks. We run our 3D-aware training for 100 epochs with a batch size of 4, and a learning rate of \\(1 \\times 10^{-4}\\) on each baseline network. As to computational cost, fine-tuning DAD-3DNet take about and 16.25 hours on 4 NVIDIA RTX A6000 GPUs." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.436, + 0.47, + 0.466 + ], + "angle": 0, + "content": "Dataset. Besides DAD-3DHeads, we use two additional datasets to conduct the evaluations." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.476, + 0.47, + 0.567 + ], + "angle": 0, + "content": "- DAD-3DHeads [34] is the state-of-the-art in-the-wild 3D head dataset, which contains dense, accurate annotations, and diverse facial appearances. It consists of 44,898 images collected from various sources (37,840 in the training set, 4,312 in the validation set, and 2,746 in the test set)." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.578, + 0.47, + 0.639 + ], + "angle": 0, + "content": "- FaceScape [65] is a large-scale high-quality lab-controlled 3D face dataset, which contains 18,760 examples, captured from 938 subjects and each with 20 specific expressions." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.649, + 0.47, + 0.695 + ], + "angle": 0, + "content": "- MultiFace [64] is a new multi-view, high-resolution human face dataset collected from 13 identities for neural face rendering." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.476, + 0.47, + 0.695 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.704, + 0.47, + 0.824 + ], + "angle": 0, + "content": "Training and Testing Split. In all the experiments, we only refine the baseline models with the training set of our DAD-3DHeads-Syn and their original training dataset. We use the test sets of DAD-3DHeads-Syn and DAD-3DHeads [34], and use the full datasets of FaceScape [65] and MultiFace [63] for performance evaluation. All the comparison methods have not been trained on the split test sets." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Evaluation Metrics. We evaluate the facial landmark distance by calculating the Normalized Mean Error (NME). We normalize the landmark error by dividing its image resolution instead of the eye distance [55], since all the test images are aligned with offline tools. We calculate the head" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.121 + ], + "angle": 0, + "content": "pose error by the absolute distance of the Euler angle values." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.133, + 0.722, + 0.149 + ], + "angle": 0, + "content": "5.2. Quantitative Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.156, + 0.892, + 0.517 + ], + "angle": 0, + "content": "Landmark Detection Results. The quantitative landmark detection results on DAD-3DHeads [34], FaceScape [65], and MultiFace [64] are shown in Tab. 1. We can find that the DAD-3DNet+ refined by our 3D-aware multi-view consistency training achieves the best performance on all three datasets. Moreover, according to the results of 3DDFA [22], 3DDFA+, DAD-3DNet [34], and DAD-3DNet+, we find that after refinement, the new models (3DDFA+ and DAD-3DNet+) achieve much better results than the baseline models. For example, the detection error of DAD-3DNet [34] drops 0.631 and 0.306, a \\(9\\%\\) and \\(5\\%\\) improvement, on FaceScape and MultiFace datasets, respectively. Similarly, we improve the 3DDFA [22] by 0.298 (\\(7\\%\\)), 0.563 (\\(7\\%\\)), and 0.816 (\\(10\\%\\)) on DAD-3DHeads, FaceScape and MultiFace datasets, respectively. We attribute the improvement to our proposed 3D aware multi-view training. One interesting phenomenon is that all the methods perform better on DAD-3DHeads dataset than the other two lab-captured datasets. We attribute this to the extreme head pose and challenging facial expressions in the other two datasets. We plot the head pose distribution of DAD-3DHeads (see supplementary materials) and find that distribution of head pose is not as uniform as the other two lab-controlled datasets." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.519, + 0.892, + 0.686 + ], + "angle": 0, + "content": "Head Pose Estimation Results. Tab. 2 shows the head pose estimation error on DAD-3DHeads [34] and FaceScape [65]. Our DAD-3DNet+ achieves best performance in most metrics. Similar to the landmark results, we can also conclude that head pose detection accuracy of the baseline methods (3DDFA and DAD-3DNet) is improved by our 3D aware multi-view consistency (3DDFA+ and DAD-3DNet+). For example, after refinement, DAD-3DNet+ achieves \\(11.9\\%\\) and \\(18.8\\%\\) performance boosts in overall head pose error on DAD-3DHeads and FaceScape dataset, respectively." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.696, + 0.712, + 0.712 + ], + "angle": 0, + "content": "5.3. Qualitative Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We fist show visual comparisons on images randomly sampled from DAD-3DHeads test set [34] in Fig. 5. The landmark predicted by our DAD-3DNet+ model fits the individual's face tighter than the other predictions. Furthermore, by comparing the third (3DDFA [22]) and forth columns (ours), we can see that refining model \\((3\\mathrm{DDFA}+)\\) improves the landmark accuracy dramatically. Similar visual improvements can be found in sixth (DAD-3DNet) and seventh (DAD-3DNet+) columns as well. Comparing the sixth and seventh column, we can see that the refinement training drags and rotates the landmark in 3D space to better fit it to the individual's face surface. We attribute this abl" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12752" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.12, + 0.079, + 0.85, + 0.322 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.335, + 0.896, + 0.378 + ], + "angle": 0, + "content": "Figure 5. The visual results of Dlib [31], FAN [5], 3DDFA [22], our refined 3DDFA+, 3DDFA-V2, DAD-3DNet [34], and our refined DAD-3DNet+ on images randomly sampled from DAD-3DHeads [34] testing set. We show the enlarged error region (while box) in the middle row." + }, + { + "type": "table_caption", + "bbox": [ + 0.098, + 0.385, + 0.872, + 0.4 + ], + "angle": 0, + "content": "Table 2. Head pose estimation results (head pose error) on DAD-3DHeads [34], FaceScape [65]. Lower values mean better results." + }, + { + "type": "table", + "bbox": [ + 0.186, + 0.411, + 0.785, + 0.564 + ], + "angle": 0, + "content": "
DAD-3DHeadsFaceScape
PitchRollYawOverallPitchRollYawOverall
FAN [5]9.7655.3766.3907.1778.7744.8956.5566.742
Dlib [31]13.35211.79914.65413.26817.86112.66319.54816.691
3DDFA-V2 [23]7.9014.9896.0886.32613.7419.71811.35311.604
3DDFA [22]9.8957.9778.9968.95620.78918.14519.69219.752
3DDFA+9.1956.7928.6928.22620.99616.42619.05418.826
DAD-3DNet [34]8.2744.6669.2067.38215.8519.67618.34614.624
DAD-3DNet+7.7004.2747.5286.50014.4667.24713.87611.863
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.589, + 0.471, + 0.634 + ], + "angle": 0, + "content": "ity to our 3D-aware multi-view consistency training, which lets the refined model gain the better sense in 3D space, and therefore, improve the landmark detection results." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.636, + 0.47, + 0.772 + ], + "angle": 0, + "content": "To further validate the improvement gained by the proposed 3D-aware multi-view consistency training, we show the visual results (Fig. 6) of 3DDFA [22], our refined 3DDFA+, DAD-3DNet [34], and our refined DAD-3DNet+ on images sampled from four different test sets. We can find that our proposed refinement improves the landmark detection results in the eye, mouth, and face contour regions, which usually contain more appearance dynamics than the other areas." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.786, + 0.391, + 0.803 + ], + "angle": 0, + "content": "5.4. Performance Improvement Analysis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.903 + ], + "angle": 0, + "content": "To systematically understand the source of improvement after refining the baseline methods (DAD-3DNet [34] and 3DDFA [22]) with our proposed 3D-aware multi-view consistency training, we further calculate and plot the landmark and head pose error improvements on DAD-3DHeads [34] (see Fig. 7). Instead of calculating the overall improved" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.589, + 0.895, + 0.892 + ], + "angle": 0, + "content": "error score, we split all the testing images into different groups according to their head pose value and calculate the improved error score within each group. We can find that the improvement by our training gets more obvious as the head pose gets more challenging. For example, the landmark error improvement (Fig. 7 upper section) using our method built on top of 3DDFA [22] increases from 0.12 to 0.71. Similarly, the head pose estimation error (Fig. 7 lower section) improvement using our method built on top of DAD-3DNet [34] increases from 0.02 to 2.7. We also show the detection result visualization in Fig. 8. We can see that from left to right, as the head pose increases, the error of the DAD-3DNet+ (second row) is more stable than the error (first row) of the DAD-3DNet. Base on this trend, we conclude that our proposed 3D-aware multi-view consistency training provides a more significant improvement over the baselines on images with larger head pose. This verifies our hypothesis that multi-view consistency training enables the network to learn 3D-aware information, which benefits the detection results on images with large head pose." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12753" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.088, + 0.468, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.382, + 0.47, + 0.439 + ], + "angle": 0, + "content": "Figure 6. The visual comparisons between baseline methods and the refined methods on four testing sets. The left column and upper row list the dataset and method names, respectively. \\(^+\\) denotes the model that has been refined by our 3D-aware training." + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.449, + 0.441, + 0.577 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.578, + 0.441, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.711, + 0.47, + 0.781 + ], + "angle": 0, + "content": "Figure 7. The landmark (top) and head pose (bottom) error improvement over DAD-3DNet [34] and 3DDFA [22] on images from different head pose ranges. The solid and dotted lines indicate DAD-3DNet [34] vs. DAD-3DNet+ (ours) and 3DDFA [22] vs. 3DDFA+ (ours)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.801, + 0.23, + 0.817 + ], + "angle": 0, + "content": "5.5. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.903 + ], + "angle": 0, + "content": "We conduct ablation study on FaceScape [65] to verify the importance of main components of our novel design. As shown in Tab. 3, we calculate NME of landmark and MAE of pose estimation in these ablation experiments. Based on these numbers, we can see the performance degrades" + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.088, + 0.875, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.231, + 0.892, + 0.301 + ], + "angle": 0, + "content": "Figure 8. The error visualization of DAD-3DNet [34] and our DAD-3DNet+ on MultiFace [64] dataset. The white and green dots are the ground truth and predicted landmarks, respectively. We use the red line to show the error distance. From left to right, the head pose increases gradually." + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.311, + 0.892, + 0.339 + ], + "angle": 0, + "content": "Table 3. Ablation Study on FaceScape [65]. The top 2 numbers are shown in bold." + }, + { + "type": "table", + "bbox": [ + 0.522, + 0.351, + 0.865, + 0.465 + ], + "angle": 0, + "content": "
ComponentNME ↓Pose ↓
1full model (P=4)6.05011.863
2w/o LMesh-Cons6.16812.327
3w/o LSelf-Cons6.54113.623
4full model (P=8)6.04811.923
5full model (P=16)6.09811.902
6full model (P=32)6.13911.912
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.486, + 0.892, + 0.577 + ], + "angle": 0, + "content": "drastically when we remove \\(\\mathcal{L}_{\\mathrm{Self - Cons}}\\). Moreover, removing \\(\\mathcal{L}_{\\mathrm{Mesh - Cons}}\\) negatively impacts the results, demonstrating its importance. Moreover, estimating the 3D landmarks in the world space using fewer views leads to better results. This is a significant advantage as it makes our fine-tuning process more efficient." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.612, + 0.619, + 0.628 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.901 + ], + "angle": 0, + "content": "We propose 3D-aware multi-view consistency training, a new framework for improving deep-learning base landmark detection algorithms. Through a set of novel loss functions, we force the network to produce landmarks that are 3D consistent. We additionally introduce a novel dataset simulation pipeline to combine the merits of lab-controlled captures and in-the-wild collected images. The model refined by our method outperforms previous approaches in terms of landmark detection accuracy and head pose estimation accuracy. Admittedly, our work has some limitations. For example, our proposed training relies on the performance of the baseline method. If the pretrained baseline yield poor initial predictions, our DLT would fail to estimate reasonable canonical 3D landmark, affecting the performance of the proposed self-projection consistency loss. Investigating ways to reduce the reliance on the accuracy of the baseline methods would be an interesting future research." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12754" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.185 + ], + "angle": 0, + "content": "[1] Vitor Albiero, Xingyu Chen, Xi Yin, Guan Pang, and Tal Hassner. img2pose: Face alignment and detection via 6dof, face pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7617-7627, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.187, + 0.472, + 0.243 + ], + "angle": 0, + "content": "[2] Slawomir Bak, Peter Carr, and Jean-Francois Lalonde. Domain adaptation through synthesis for unsupervised person re-identification. In Proceedings of the European conference on computer vision (ECCV), pages 189–205, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.245, + 0.472, + 0.3 + ], + "angle": 0, + "content": "[3] Peter N Belhumeur, David W Jacobs, David J Kriegman, and Neeraj Kumar. Localizing parts of faces using a consensus of exemplars. IEEE transactions on pattern analysis and machine intelligence, 35(12):2930-2940, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.302, + 0.472, + 0.357 + ], + "angle": 0, + "content": "[4] Adrian Bulat and Georgios Tzimiropoulos. Two-stage convolutional part heatmap regression for the 1st 3d face alignment in the wild (3dfaw) challenge. In European Conference on Computer Vision, pages 616-624. Springer, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.36, + 0.472, + 0.428 + ], + "angle": 0, + "content": "[5] Adrian Bulat and Georgios Tzimiropoulos. Binarized convolutional landmark localizers for human pose estimation and face alignment with limited resources. In Proceedings of the IEEE International Conference on Computer Vision, pages 3706-3714, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.43, + 0.472, + 0.5 + ], + "angle": 0, + "content": "[6] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision, pages 1021-1030, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.502, + 0.472, + 0.557 + ], + "angle": 0, + "content": "[7] Xavier P Burgos-Artizzu, Pietro Perona, and Piotr Dólar. Robust face landmark estimation under occlusion. In Proceedings of the IEEE international conference on computer vision, pages 1513-1520, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.559, + 0.472, + 0.6 + ], + "angle": 0, + "content": "[8] Chen Cao, Yanlin Weng, Stephen Lin, and Kun Zhou. 3d shape regression for real-time facial animation. ACM Transactions on Graphics (TOG), 32(4):1-10, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.602, + 0.472, + 0.685 + ], + "angle": 0, + "content": "[9] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.47, + 0.743 + ], + "angle": 0, + "content": "[10] Dong Chen, Shaoqing Ren, Yichen Wei, Xudong Cao, and Jian Sun. Joint cascade face detection and alignment. In European conference on computer vision, pages 109-122. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.745, + 0.47, + 0.814 + ], + "angle": 0, + "content": "[11] Lele Chen, Ross K Maddox, Zhiyao Duan, and Chenliang Xu. Hierarchical cross-modal talking face generation with dynamic pixel-wise loss. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7832-7841, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.816, + 0.47, + 0.857 + ], + "angle": 0, + "content": "[12] Timothy F Cootes, Gareth J Edwards, and Christopher J Taylor. Active appearance models. In European conference on computer vision, pages 484-498. Springer, 1998." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[13] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In Proceedings of the IEEE con" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.094, + 0.892, + 0.119 + ], + "angle": 0, + "content": "ference on computer vision and pattern recognition, pages 5203-5212, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.121, + 0.894, + 0.189 + ], + "angle": 0, + "content": "[14] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.191, + 0.892, + 0.245 + ], + "angle": 0, + "content": "[15] Xuanyi Dong, Yan Yan, Wanli Ouyang, and Yi Yang. Style aggregated network for facial landmark detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, June 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.247, + 0.892, + 0.314 + ], + "angle": 0, + "content": "[16] Xuanyi Dong, Yi Yang, Shih-En Wei, Xinshuo Weng, Yaser Sheikh, and Shouu-I Yu. Supervision by registration and triangulation for landmark detection. IEEE transactions on pattern analysis and machine intelligence, 43(10):3681-3694, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.316, + 0.892, + 0.371 + ], + "angle": 0, + "content": "[17] Pengfei Dou, Shishir K Shah, and Ioannis A Kakadiaris. End-to-end 3d face reconstruction with deep neural networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pages 5908-5917, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.372, + 0.892, + 0.44 + ], + "angle": 0, + "content": "[18] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In Proceedings of the European conference on computer vision (ECCV), pages 534-551, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.442, + 0.892, + 0.482 + ], + "angle": 0, + "content": "[19] Golnaz Ghiasi and Charless C Fowlkes. Occlusion coherence: Detecting and localizing occluded faces. arXiv preprint arXiv:1506.08347, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.892, + 0.524 + ], + "angle": 0, + "content": "[20] Ralph Gross, Iain Matthews, Jeffrey Cohn, Takeo Kanade, and Simon Baker. Multi-pie. Image and vision computing, 28(5):807-813, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.525, + 0.892, + 0.593 + ], + "angle": 0, + "content": "[21] Kuangxiao Gu, Yuqian Zhou, and Thomas Huang. Fnet: Landmark driven fetching and learning network for faithful talking facial animation synthesis. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 10861-10868, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.595, + 0.892, + 0.622 + ], + "angle": 0, + "content": "[22] Jianzhu Guo, Xiangyu Zhu, and Zhen Lei. 3ddfa. https://github.com/cleardusk/3DDFA, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.624, + 0.892, + 0.678 + ], + "angle": 0, + "content": "[23] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. In European Conference on Computer Vision, pages 152-168. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.679, + 0.892, + 0.72 + ], + "angle": 0, + "content": "[24] Xiaojie Guo, Siyuan Li, Jinke Yu, Jiawan Zhang, Jiayi Ma, Lin Ma, Wei Liu, and Haibin Ling. Pfld: A practical facial landmark detector. arXiv preprint arXiv:1902.10859, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.721, + 0.892, + 0.761 + ], + "angle": 0, + "content": "[25] Richard Hartley and Andrew Zisserman. Multiple view geometry in computer vision. Cambridge university press, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.763, + 0.892, + 0.83 + ], + "angle": 0, + "content": "[26] Stefan Hinterstoisser, Vincent Lepetit, Paul Wohlhart, and Kurt Konolige. On pre-trained image features and synthetic images for deep learning. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0-0, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.832, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[27] Xinya Ji, Hang Zhou, Kaisiyuan Wang, Qianyi Wu, Wayne Wu, Feng Xu, and Xun Cao. Eamm: One-shot emotional talking face via audio-based emotion-aware motion model. In ACM SIGGRAPH 2022 Conference Proceedings, SIGGRAPH '22, 2022." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.094, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12755" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[28] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.177, + 0.472, + 0.232 + ], + "angle": 0, + "content": "[29] Amin Jourabloo and Xiaoming Liu. Large-pose face alignment via cnn-based dense 3d model fitting. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4188-4196, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.233, + 0.47, + 0.287 + ], + "angle": 0, + "content": "[30] Ira Kemelmacher-Shlizerman and Ronen Basri. 3d face reconstruction from a single image using a single reference face shape. IEEE transactions on pattern analysis and machine intelligence, 33(2):394-405, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.288, + 0.47, + 0.316 + ], + "angle": 0, + "content": "[31] Davis E. King. Dlib-ml: A machine learning toolkit. Journal of Machine Learning Research, 10:1755-1758, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.317, + 0.47, + 0.398 + ], + "angle": 0, + "content": "[32] Martin Koestinger, Paul Wohlhart, Peter M Roth, and Horst Bischof. Annotated facial landmarks in the wild: A largescale, real-world database for facial landmark localization. In 2011 IEEE international conference on computer vision workshops (ICCV workshops), pages 2144-2151. IEEE, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.4, + 0.47, + 0.455 + ], + "angle": 0, + "content": "[33] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6), 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.456, + 0.471, + 0.538 + ], + "angle": 0, + "content": "[34] Tetiana Martyniuk, Orest Kupyn, Yana Kurlyak, Igor Krashenyi, Jiri Matas, and Viktoriya Sharmanska. Dad-3heads: A large-scale dense, accurate and diverse dataset for 3d head alignment from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 20942–20952, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.539, + 0.471, + 0.609 + ], + "angle": 0, + "content": "[35] Nikolaus Mayer, Eddy Ilg, Philipp Fischer, Caner Hazirbas, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. What makes good synthetic training data for learning disparity and optical flow estimation? International Journal of Computer Vision, 126(9):942-960, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.61, + 0.47, + 0.678 + ], + "angle": 0, + "content": "[36] Kieron Messer, Jiri Matas, Josef Kittler, Juergen Luettin, Gilbert Maitre, et al. Xm2vtsdb: The extended m2vts database. In Second international conference on audio and video-based biometric person authentication, volume 964, pages 965-966. Citeseer, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.679, + 0.47, + 0.734 + ], + "angle": 0, + "content": "[37] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.735, + 0.47, + 0.789 + ], + "angle": 0, + "content": "[38] Erik Murphy-Chutorian and Mohan Manubhai Trivedi. Head pose estimation in computer vision: A survey. IEEE transactions on pattern analysis and machine intelligence, 31(4):607-626, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.47, + 0.873 + ], + "angle": 0, + "content": "[39] P Jonathon Phillips, Patrick J Flynn, Todd Scruggs, Kevin W Bowyer, Jin Chang, Kevin Hoffman, Joe Marques, Jaesik Min, and William Worek. Overview of the face recognition grand challenge. In 2005 IEEE computer society conference on computer vision and pattern recognition (CVPR'05), volume 1, pages 947-954. IEEE, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[40] Shengju Qian, Keqiang Sun, Wayne Wu, Chen Qian, and Jiaya Jia. Aggregation via separation: Boosting facial land" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.135 + ], + "angle": 0, + "content": "mark detector with semi-supervised style translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10153-10163, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.137, + 0.892, + 0.205 + ], + "angle": 0, + "content": "[41] Rajeev Ranjan, Vishal M Patel, and Rama Chellappa. Hyperface: A deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE transactions on pattern analysis and machine intelligence, 41(1):121-135, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.892, + 0.262 + ], + "angle": 0, + "content": "[42] Elad Richardson, Matan Sela, Roy Or-El, and Ron Kimmel. Learning detailed face reconstruction from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1259–1268, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.264, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[43] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Transactions on Graphics (TOG), 42(1):1-13, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.321, + 0.892, + 0.39 + ], + "angle": 0, + "content": "[44] Andreas Rossler, Davide Cozzolino, Luisa Verdoliva, Christian Riess, Justus Thies, and Matthias Nießner. Faceforensics: Learning to detect manipulated facial images. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1-11, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.392, + 0.892, + 0.433 + ], + "angle": 0, + "content": "[45] Nataniel Ruiz, Samuel Schulter, and Manmohan Chandraker. Learning to simulate. In International Conference on Learning Representations, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.435, + 0.892, + 0.504 + ], + "angle": 0, + "content": "[46] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.505, + 0.892, + 0.575 + ], + "angle": 0, + "content": "[47] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.577, + 0.892, + 0.646 + ], + "angle": 0, + "content": "[48] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. A semi-automatic methodology for facial landmark annotation. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 896-903, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.647, + 0.892, + 0.717 + ], + "angle": 0, + "content": "[49] Jie Shen, Stefanos Zafeiriou, Grigoris G Chrysos, Jean Kossaifi, Georgios Tzimiropoulos, and Maja Pantic. The first facial landmark tracking in-the-wild challenge: Benchmark and results. In Proceedings of the IEEE international conference on computer vision workshops, pages 50-58, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.718, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[50] Ashish Shrivastava, Tomas Pfister, Oncel Tuzel, Joshua Susskind, Wenda Wang, and Russell Webb. Learning from simulated and unsupervised images through adversarial training. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2107-2116, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.789, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[51] Linsen Song, Wayne Wu, Chaoyou Fu, Chen Change Loy, and Ran He. Audio-driven dubbing for user generated contents via style-aware semi-parametric synthesis. IEEE Transactions on Circuits and Systems for Video Technology, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.846, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[52] Linsen Song, Wayne Wu, Chen Qian, Ran He, and Chen Change Loy. Everybody's talkin': Let me talk as you want. IEEE Transactions on Information Forensics and Security, 17:585-598, 2022." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12756" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[53] Yang Song, Jingwen Zhu, Dawei Li, Andy Wang, and Hairong Qi. Talking face generation by conditional recurrent adversarial network. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19, pages 919–925. International Joint Conferences on Artificial Intelligence Organization, 7 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.177, + 0.469, + 0.232 + ], + "angle": 0, + "content": "[54] Luuk Spreeuwers, Maikel Schils, and Raymond Veldhuis. Towards robust evaluation of face morphing detection. In 2018 26th European Signal Processing Conference (EU-SIPCO), pages 1027-1031. IEEE, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.233, + 0.469, + 0.314 + ], + "angle": 0, + "content": "[55] Keqiang Sun, Wayne Wu, Tinghao Liu, Shuo Yang, Quan Wang, Qiang Zhou, Zuochang Ye, and Chen Qian. Fab: A robust facial landmark detection framework for motion-blurred videos. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5462-5471, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.316, + 0.469, + 0.372 + ], + "angle": 0, + "content": "[56] Yi Sun, Xiaogang Wang, and Xiaou Tang. Deep convolutional network cascade for facial point detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3476-3483, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.372, + 0.469, + 0.455 + ], + "angle": 0, + "content": "[57] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6142-6151, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.456, + 0.469, + 0.524 + ], + "angle": 0, + "content": "[58] Justus Thies, Michael Zollhofer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of rgb videos. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2387-2395, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.525, + 0.469, + 0.58 + ], + "angle": 0, + "content": "[59] Boris van Breugel, Trent Kyono, Jeroen Berrevoets, and Michaela van der Schaar. Decaf: Generating fair synthetic data using causally-aware generative networks. Advances in Neural Information Processing Systems, 34:22221-22233, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.58, + 0.469, + 0.636 + ], + "angle": 0, + "content": "[60] Ting-Chun Wang, Ming-Yu Liu, Andrew Tao, Guilin Liu, Jan Kautz, and Bryan Catanzaro. Few-shot video-to-video synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.636, + 0.469, + 0.707 + ], + "angle": 0, + "content": "[61] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3681-3691, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.707, + 0.469, + 0.789 + ], + "angle": 0, + "content": "[62] Erroll Wood, Tadas Baltrusaitis, Louis-Philippe Morency, Peter Robinson, and Andreas Bulling. Learning an appearance-based gaze estimator from one million synthesised images. In Proceedings of the Ninth Biennial ACM Symposium on Eye Tracking Research & Applications, pages 131–138, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.79, + 0.469, + 0.859 + ], + "angle": 0, + "content": "[63] Yue Wu, Zuoguan Wang, and Qiang Ji. Facial feature tracking under varying facial expressions and face poses based on restricted boltzmann machines. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3452-3459, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.86, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[64] Cheng-hsin Wu, Ningyuan Zheng, Scott Ardisson, Rohan Bali, Danielle Belko, Eric Brockmeyer, Lucas Evans, Timothy Godisart, Hyowon Ha, Alexander Hypes, Taylor Koska," + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.189 + ], + "angle": 0, + "content": "Steven Krenn, Stephen Lombardi, Xiaomin Luo, Kevyn McPhail, Laura Millerschoen, Michal Perdoch, Mark Pitts, Alexander Richard, Jason Saragih, Junko Saragih, Takaaki Shiratori, Tomas Simon, Matt Stewart, Autumn Trimble, Xinshuo Weng, David Whitewolf, Chenglei Wu, Shouu-I Yu, and Yaser Sheikh. Multiface: A dataset for neural face rendering. In arXiv, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.192, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[65] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 601-610, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.262, + 0.892, + 0.331 + ], + "angle": 0, + "content": "[66] Ran Yi, Yong-Jin Liu, Yu-Kun Lai, and Paul L Rosin. Apdrawinggan: Generating artistic portrait drawings from face photos with hierarchical gans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10743-10752, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.333, + 0.892, + 0.389 + ], + "angle": 0, + "content": "[67] Ran Yi, Zipeng Ye, Ruoyu Fan, Yezhi Shu, Yong-Jin Liu, Yu-Kun Lai, and Paul L Rosin. Animating portrait line drawings from a single face photo and a speech signal. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-8, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.39, + 0.892, + 0.446 + ], + "angle": 0, + "content": "[68] Xi Yin, Xiang Yu, Kihyuk Sohn, Xiaoming Liu, and Manmohan Chandraker. Towards large-posed face frontalization in the wild. In In Proceeding of International Conference on Computer Vision, Venice, Italy, October 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.447, + 0.892, + 0.516 + ], + "angle": 0, + "content": "[69] Egor Zakharov, Aliaksandra Shysheya, Egor Burkov, and Victor Lempitsky. Few-shot adversarial learning of realistic neural talking head models. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9459-9468, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.518, + 0.892, + 0.574 + ], + "angle": 0, + "content": "[70] Xiaoxing Zeng, Xiaojiang Peng, and Yu Qiao. Df2net: A dense-fine-finer network for detailed 3d face reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2315-2324, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.576, + 0.892, + 0.63 + ], + "angle": 0, + "content": "[71] Kaipeng Zhang, Zhanpeng Zhang, Zhifeng Li, and Yu Qiao. Joint face detection and alignment using multitask cascaded convolutional networks. IEEE signal processing letters, 23(10):1499-1503, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.632, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[72] Xing Zhang, Lijun Yin, Jeffrey F Cohn, Shaun Canavan, Michael Reale, Andy Horowitz, and Peng Liu. A high-resolution spontaneous 3d dynamic facial expression database. In 2013 10th IEEE international conference and workshops on automatic face and gesture recognition (FG), pages 1-6. IEEE, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.717, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[73] Zhimeng Zhang, Lincheng Li, Yu Ding, and Changjie Fan. Flow-guided one-shot talking face generation with a high-resolution audio-visual dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3661–3670, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.788, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[74] Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaou Tang. Facial landmark detection by deep multi-task learning. In European conference on computer vision, pages 94-108. Springer, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.845, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[75] Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaou Tang. Learning deep representation for face alignment with auxiliary attributes. IEEE transactions on pattern analysis and machine intelligence, 38(5):918-930, 2015." + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12757" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[76] Aihua Zheng, Feixia Zhu, Hao Zhu, Mandi Luo, and Ran He. Talking face generation via learning semantic and temporal synchronous landmarks. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 3682-3689. IEEE, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.472, + 0.233 + ], + "angle": 0, + "content": "[77] Erjin Zhou, Haoqiang Fan, Zhimin Cao, Yuning Jiang, and Qi Yin. Extensive facial landmark localization with coarse-to-fine convolutional network cascade. In Proceedings of the IEEE international conference on computer vision workshops, pages 386-391, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.234, + 0.472, + 0.316 + ], + "angle": 0, + "content": "[78] Hang Zhou, Yasheng Sun, Wayne Wu, Chen Change Loy, Xiaogang Wang, and Ziwei Liu. Pose-controllable talking face generation by implicitly modularized audio-visual representation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4176-4186, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.318, + 0.472, + 0.374 + ], + "angle": 0, + "content": "[79] Yang Zhou, Xintong Han, Eli Shechtman, Jose Echevarria, Evangelos Kalogerakis, and Dingzeyu Li. Makelttalk: speaker-aware talking-head animation. ACM Transactions on Graphics (TOG), 39(6):1-15, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.375, + 0.472, + 0.43 + ], + "angle": 0, + "content": "[80] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European conference on computer vision, pages 592-608. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.431, + 0.472, + 0.5 + ], + "angle": 0, + "content": "[81] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 2223-2232, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.501, + 0.472, + 0.558 + ], + "angle": 0, + "content": "[82] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z Li. Face alignment across large poses: A 3d solution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 146-155, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.559, + 0.472, + 0.614 + ], + "angle": 0, + "content": "[83] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z Li. Face alignment across large poses: A 3d solution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 146-155, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.615, + 0.472, + 0.67 + ], + "angle": 0, + "content": "[84] Xiangyu Zhu, Xiaoming Liu, Zhen Lei, and Stan Z Li. Face alignment in full pose range: A 3d total solution. IEEE transactions on pattern analysis and machine intelligence, 41(1):78-92, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.671, + 0.472, + 0.727 + ], + "angle": 0, + "content": "[85] Xiangxin Zhu and Deva Ramanan. Face detection, pose estimation, and landmark localization in the wild. In 2012 IEEE conference on computer vision and pattern recognition, pages 2879-2886. IEEE, 2012." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12758" + } + ] +] \ No newline at end of file diff --git a/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_origin.pdf b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b47d7afad77c215ff46def21d91c4b9306680b5b --- /dev/null +++ b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/4aaf53b5-ffe9-4822-bbbc-9f293082f284_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1034b4b2767e62ba3c95a94f26b0be06635ae1de9b1f5422b6d2aef956499555 +size 6587817 diff --git a/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/full.md b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/full.md new file mode 100644 index 0000000000000000000000000000000000000000..fbf89fdd113fc85bb18aaeb8e74ec2a5b1290100 --- /dev/null +++ b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/full.md @@ -0,0 +1,359 @@ +# 3D-aware Facial Landmark Detection via Multi-view Consistent Training on Synthetic Data + +Libing Zeng $^{1*}$ , Lele Chen $^{2}$ , Wentao Bao $^{3*}$ , Zhong Li $^{2}$ , Yi Xu $^{2}$ , Junsong Yuan $^{4}$ , Nima K. Kalantari $^{1}$ $^{1}$ Texas A&M University, $^{2}$ OPPO US Research Center, InnoPeak Technology, Inc, + $^{3}$ Michigan State University, $^{4}$ University at Buffalo + +![](images/78e916daa4bd05091dac17e234910d011ec6556475c25fe43fb6d8e37bc3162e.jpg) +(a) Multi-view Inconsistency + +![](images/4919b56620ae21697831b195c4fa557750b5cc1fe3d2388aaea65720dc521e69.jpg) +(b) DAD-3DNet + +![](images/57c47b59d38b7da15bb14f7590b3291a5ba60eda6fd9368720dcbec5a98be4e4.jpg) +(c) DAD-3DNet+ (Ours) +Figure 1. We plot the landmark annotations labeled by different annotators with different colors in view #1 of (a). Accurate annotation of non-frontal faces with large angles like view #1 is challenging. This is a major problem since small differences between annotated landmarks in view #1, becomes substantially magnified when projected to view #2. Training a system on such datasets could lead to poor landmark detection accuracy, as shown in (b). We address this issue by proposing a 3D-aware optimization module that enforces multi-view consistency. We show the landmark detection improvement in (c). Magnified insets in (b) and (c) are shown in (d). After refined by the proposed 3D-aware learning, the detected facial landmark is better aligned with the identity. + +![](images/e682e867ccf468210ab3cdbd5f548fd4edc26a0e75ce8c0f95aa2f6c76a46a88.jpg) +(d) + +![](images/76baf03b0bfbec6487cbc3e3ca9a8d82f8dea50c47b0094375bfffb23984ec43.jpg) + +# Abstract + +Accurate facial landmark detection on wild images plays an essential role in human-computer interaction, entertainment, and medical applications. Existing approaches have limitations in enforcing 3D consistency while detecting 3D/2D facial landmarks due to the lack of multi-view in-the-wild training data. Fortunately, with the recent advances in generative visual models and neural rendering, we have witnessed rapid progress towards high quality 3D image synthesis. In this work, we leverage such approaches to construct a synthetic dataset and propose a novel multiview consistent learning strategy to improve 3D facial landmark detection accuracy on in-the-wild images. The proposed 3D-aware module can be plugged into any learning-based landmark detection algorithm to enhance its accuracy. We demonstrate the superiority of the proposed plug + +in module with extensive comparison against state-of-the-art methods on several real and synthetic datasets. + +# 1. Introduction + +Accurate and precise facial landmark plays a significant role in computer vision and graphics applications, such as face morphing [54], facial reenactment [58], 3D face reconstruction [17, 18, 30], head pose estimation [38], face recognition [1, 10, 13, 19, 32, 41, 71], and face generation [11, 21, 60, 69]. In these applications, facial landmark detection provides great sparse representation to ease the burden of network convergence in different training stages and is often used as performance evaluation metric. For instance, as a facial prior, it provides good initialization for subsequent training [66, 67, 69, 76], good intermediate representation to bridge the gap between different modalities for content generation [11, 27, 51, 79], loss terms which reg- + +ularize the facial expression [11, 52], or evaluation metrics to measure the facial motion quality [53, 73, 78]. + +The aforementioned applications require the estimated facial landmarks to be accurate even with significantly varied facial appearance under different identities, facial expressions, and extreme head poses. Tremendous efforts have been devoted to address this problem [15, 22-24, 29, 34, 40, 56, 63, 74, 75, 77, 82, 84]. These approaches often rely on manually annotated large-scale lab-controlled or in-the-wild image datasets [4, 34] to handle various factors such as arbitrary facial expressions, head poses, illumination, facial occlusions, etc. + +However, even with the high cost of human labeling, consistent and accurate manual annotation of landmarks remains challenging [22, 23, 34]. It is very difficult, if not impossible, to force a person to annotate the facial landmark keypoints at the same pixel locations for faces of different poses, let alone different annotators under different labeling environments. Such annotation inconsistency and inaccuracy in training images are often the killing factor to learn an accurate landmark localization model. This is particularly a major problem in non-frontal faces where annotation becomes extremely challenging. As shown in Fig. 1(a) a small annotation variation in view #1, results in a significant inaccuracy in view #2. This multi-view inconsistency and inaccuracy can ultimately lead to poor landmark detection accuracy, especially for facial images with extreme head pose. + +To mitigate this annotation inconsistency and inaccuracy issue, we propose to learn facial landmark detection by enforcing multi-view consistency during training. Given the images of the same facial identity captured with different head poses, instead of detecting facial landmark at each separate facial image, we propose a multi-view consistency supervision to locate facial landmark in a holistic 3D-aware manner. To enforce multi-view consistency, we introduce self-projection consistency loss and multi-view landmark loss in training. We also propose an annotation generation procedure to exploit the merits of lab-controlled data (e.g., multi-view images, consistent annotations) and in-the-wild data (e.g., wide range of facial expressions, identities). Thanks to this synthetic data, our method does not rely on human annotation to obtain the accurate facial landmark locations. Therefore, it alleviates the problem of learning from inaccurate and inconsistent annotations. + +We formulate our solution as a plug-in 3D aware module, which can be incorporated into any facial landmark detector and can boost a pre-trained model with higher accuracy and multi-view consistency. We demonstrate the effectiveness of our approach through extensive experiments on both synthetic and real datasets. The main contributions of our work are as follows: + +- We show, for the first time, how to combine the merits + +of lab captured face image data (e.g., multi-view) and the in-the-wild face image datasets (e.g., appearance diversity). Using our proposed approach we produce a large-scale synthetic, but realistic, multi-view face dataset, titled DAD-3DHeads-Syn. + +- We propose a novel 3D-aware optimization module, which can be plugged into any learning-based facial landmark detection methods. By refining an existing landmark detection algorithm using our optimization module, we are able to improve its accuracy and multiview consistency. +- We demonstrate the performance improvements of our module built on top multiple baseline methods on simulated dataset, lab-captured datasets, and in-the-wild datasets. + +# 2. Related Work + +In this section, we review face landmark datasets and detection algorithms that are most related to our approach. We also provide a brief review of data simulation tools related to our work. + +# 2.1. Face Landmark Detection Dataset + +Lab-controlled dataset. Datasets under "controlled" conditions [8, 20, 36, 39, 46, 48, 64, 65, 72] typically collect video/images from indoor scenarios with certain restrictions, e.g. pre-defined expressions, head poses, etc. For example, FaceScape dataset [65] contains 938 individuals and each with 20 expressions using an array of 68 cameras under controlled illumination and positions. Thus, it contains aligned and consistent multi-view images and facial landmark annotations. However, the identities, poses, and expressions are limited. In addition, the environment conditions are fully controlled. These result in limited generalization capability of models trained on this dataset. Moreover, the annotation workflow of such a dataset is expensive and hard to scale. + +In-the-wild dataset. The boom of internet image sharing has enabled the creation of many "in-the-wild" facial landmark datasets [3,7,32,49,85], collected from the web, to facilitate facial landmark detection research. However, manually annotating facial landmarks on in-the-wild images is a time-consuming process and not scalable. Zhu et al. [83] release 300W-LP by extending the original 300W dataset with synthetic images with extreme pose through image profiling of frontal pose images. However, the novel view images are generated by simply applying rotation matrix on the original images, which leads to limited view range and poor image quality. Meanwhile, 300W-LP lacks diversity in face appearance and expression because of the intrinsic limitations of 300W. Recently, Martyniuk et al. [34] introduce a + +new dataset, DAD-3DHeads, by proposing a novel annotation scheme. Specifically, their approach allows the annotator to adjust the landmarks by looking at how well the mesh, generated from the landmarks, fits the input image. The proposed scheme addresses the problems exhibited by existing labeling tools, such as "guessing" the positions of the correct landmarks for invisible parts of the head, thus enabling accurate annotations. DAD-3DHeads dataset contains 44,898 in-the-wild images, covering extreme facial expressions, poses, and challenging illuminations. However, the DAD-3DHeads still has some drawbacks. First, even with the mesh fitting guidance, the annotations can be inaccurate. As shown in Fig. 1 (a), even a small inaccuracy in one view could result in a significant inconsistency when projected to another view. This inconsistency could negatively affect the training of the detection network. Second, since the depth is estimated by FLAME [33], annotation accuracy is limited by the FLAME model. Third, this dataset lacks multi-view images, and thus cannot be used to enforce multi-view consistency. + +# 2.2. Data Simulation + +Simulation [26,28,35,42,44,45,50,59,61,62,70] is a useful tool in situations where training data for learning-based methods is expensive to annotate or even hard to acquire. For example, Zeng et al. [70] and Richardson et al. [42] use 3D Morphable Model (3DMM) to render training data with different lighting conditions, identities, expressions, and texture basis elements for reconstructing detailed facial geometry. However, the simulated images produced by these approaches lack realism and have severe domain gaps compared with real-world captures, limiting their usage. Bak et al. [2] adapt synthetic data using a CycleGAN [81] with a regularization term for preserving identities. Ayush et al. [57] use the images and latent code generated by StyleGAN [81] to train a controllable portrait image generation model. However, it is hard to control the attribute consistencies of images simulated by generative models, which limits the usage of the generated datasets. + +# 2.3. Face Landmark Detection Algorithms + +Traditional facial landmark detection methods leverage either holistic facial appearance information [12], or the global facial shape patterns [31, 85]. They yield reasonable results for images captured in lab-controlled environments with frontal faces and good lighting, however the performance on most of in-the-wild images is inferior. + +Recently, deep learning-based algorithms have made promising progress on 2D facial landmark localization [15, 22-24,29,34,40,56,63,74,75,77,82,84] in terms of robustness, generalizability, and accuracy. FAN [6] constructs, for the first time, a very strong baseline by combining a state-of-the-art residual block and a state-of-the-art architecture + +
Dataset TypeLab-ControlledIn-the-wildOurs
Examples
In-the-wild×
Large Scale×
Balanced×
Multiview Consistent×
Annotation Consistent×
Scalable××
+ +![](images/094409b388bb96123759f3b785734f8945c95749b6beea7c0ccb43087beaf6f2.jpg) +Figure 2. The feature comparison of different type of datasets. For example, FaceScape [65] and MultiFace [64] are lab-controlled datasets, while 300W [47], AFLW2000 [68], and DAD-3DHeads [34] are in-the-wild datasets. +Figure 3. The proposed data simulation pipeline. + +for landmark localization and trains it on a very large yet synthetically expanded 2D facial landmark dataset. To address self-occlusion and large appearance variation, Zhu et al. [82] propose a cascaded convolutional neural network and optimized weighted parameter distance cost loss function to formulate the priority of 3DMM parameters during training instead of predicting facial landmark keypoints. To further address the problems of shape reconstruction and pose estimation simultaneously, Martyniuk et al. propose an end-to-end trained DAD-3DNet [34] to regress 3DMM parameters and recover the 3D head geometry with differential FLAME decoder. However, due to the intrinsic limitation of the manually annotated in-the-wild dataset, the detection results are affected by the annotation noise and the 3D inconsistency of the single view images. In this paper, we mainly focus on improving the performance of deep-learning based methods. + +# 3. Balanced and Realistic Multi-view Face Dataset + +We believe there are five desired properties that a good facial landmark dataset should fulfill: (1) contain full range of multi-view images; (2) bridge the domain gap between the dataset and the real-world captured images; (3) contain diverse facial appearance including different poses, expressions, illuminations, and identities; (4) have consistent and accurate annotations across the whole dataset; (5) be + +easy to obtain and scalable. The existing datasets can are either lab-controlled captures [64, 65] or in-the-wild collected [34, 47, 68]. Unfortunately, these datasets lack one or more desired attributes. In contrast, our dataset meets all of these criteria (Fig. 2). + +Unlike previous graphics or generative model-based data synthesis approaches described in Sec. 2.2, we propose a novel facial dataset simulation scheme by leveraging Neural Radiance Field (NeRF) [37] to facilitate training a facial landmark detection network. Fig. 3 shows our dataset creation pipeline. We generate multiview images with consistent landmarks using a single in-the-wild image along with annotated landmark as input. + +Specifically, we choose DAD-3DHeads [34] as our initial dataset since it contains images under a variety of extreme poses, facial expressions, challenging illuminations, and severe occlusions cases. Given an image and its landmarks from this dataset, our goal is to reconstruct multiview images with their corresponding landmarks. Inspired by GAN inversion [80], we first fit a latent code to each image in DAD-3DHeads datasets using EG3D [9] as decoder by following Pivotal Tuning Inversion (PTI) [43]. Note that, EG3D GAN inversion requires the camera pose of the input image, which we estimate using Deep3DFace [14]. Then we can use EG3D to decode the optimized latent code to NeRF. Next, we use volume rendering on the NeRF with 512 uniformly sampled camera views from a large view range, producing 512 multi-view images. + +To obtain the landmarks for each image, we start with the well-annotated groundtruth 2D landmarks of the original images from the DAD-3DHeads dataset. Then we use the estimated camera pose of the input image to unproject the annotated landmarks to 3D space. At last, we project the 3D landmarks to the 512 sampled camera views to obtain landmark annotation on the simulated views. The simulated dataset not only inherits the merits of DAD-3DHeads (e.g. diverse identities, expressions, poses, and illuminations), but also comes with a lot of new features (e.g., balanced head pose, consistent annotation, and multi-view images). In total, there are 2,150,400 training pairs and 204,800 testing pairs in our extended dataset, called DAD-3DHeadsSyn. + +# 4. 3D-Aware Multi-view Consistency Training + +# 4.1. Overview + +The state-of-the-art landmark detectors [5, 34] can output reasonable results on in-the-wild images. However, we may observe that the predicted landmark are floating on the face surface instead of fitting the face perfectly in a lot of cases. We can easily verify if the detected landmark fits the face by projecting the detected landmark to another view (see Fig. 1(a)). Armed by this observation of multi-view in + +Algorithm 1 3D-Aware Plug-in Module. +1: Input: pretrained detector $F$ with weights $\theta$ , $M$ single-view images $I_{1,\dots,M} \in \mathcal{D}$ along with ground truth landmark $L_{1,\dots,M}$ , paired $N$ multi-view images $V_{1,\dots,N} \in \hat{\mathcal{D}}$ along with ground truth landmark $L_{1,\dots,N}$ . +2: Output: detector $F$ with updated weights $\theta^{*}$ +3: Initialization: set $\theta$ to pre-trained weights +4: Unfreeze $\theta$ +5: for number of iterations do +6: Output predicted landmarks $\hat{L}_{1,\dots,N}$ for each view. +7: Randomly sample $P$ landmarks from them, $(1 < P \leq N)$ . +8: Cast the landmarks into world space and estimate the approximate 3D landmark $\dot{L}$ using Eq. 2, 3, 4, 5 +9: Project $\dot{L}$ onto the image planes of remaining $Q$ views $(Q = N - P)$ using Eq. 6, 7 +10: Calculate Total Loss $\mathcal{L}$ using Eq. 11 +11: $\theta^{*} \gets Adam\{\mathcal{L}\}$ + +consistency and inaccuracy, we propose a novel 3D-Aware training module $\mathcal{R}$ to further improve the performance of baseline detection algorithm $F$ . + +Given a facial landmark detection network $F_{\theta}(\cdot)$ pretrained on dataset $\mathcal{D}$ , the proposed module $\mathcal{R}$ further refines the network parameters $\theta$ by leveraging our simulated DAD-3DHeads-Syn dataset $\hat{\mathcal{D}}$ in addition to the original dataset $\mathcal{D}$ . Our module $\mathcal{R}$ can be formulated as: + +$$ +F _ {\theta^ {*}} \leftarrow \mathcal {R} \left(F _ {\theta}, X, V _ {1, \dots , N}\right), X \in \mathcal {D}, V _ {1, \dots , N} \in \hat {\mathcal {D}}, \tag {1} +$$ + +where $X$ is the image batch sampled from $\mathcal{D}$ and $V_{1,\dots,N}$ are $N$ multi-view images sampled from $\hat{\mathcal{D}}$ . We refine the network parameters $\theta$ through exploring 3D information among multi-view images and applying a novel projection consistency during the fine-tuning process. Our module $\mathcal{R}$ does not result in any new network parameters and can be plugged into any learning-based network. We show the training protocol in Alg. 1. + +# 4.2. Multi-view Consistency Supervision + +We propose a novel multi-view supervision to force the baseline network to learn to be 3D consistent. To simplify notation, we ignore the batch dimension and fixed camera intrinsic matrix. For every training iteration, we randomly sample $N$ image and landmark pairs $\{V,\mathrm{L}\}_{1,\dots,N}$ from $\hat{\mathcal{D}}$ and $M$ image and landmark pairs $\{I,\mathrm{L}\}_{1,\dots,M}$ from initial dataset $\mathcal{D}^*$ . + +We pass $V_{1,\dots,N}$ to the baseline network $F$ to obtain predicted landmarks $\hat{\mathrm{L}}_{1,\dots,N}$ which are shown with green + +![](images/0384782a59611be0512f68c552e09ccf4cbd403b35b27bc352572b02ffebd08f.jpg) +Figure 4. Multi-view Consistency Supervision. Predicted landmarks $\hat{\mathbf{L}}_{1,\dots,N}$ , estimated 3D landmark $\dot{\mathbf{L}}$ , projected landmarks $\tilde{\mathbf{L}}_{1,\dots,Q}$ , and ground truth landmarks $L$ are denoted as green, blue, red, and yellow points respectively. The processes of calculating 3D landmark $\dot{\mathbf{L}}$ and the projection procedure are shown as light blue and pink arrows, respectively. $\mathcal{L}_{\mathrm{Self - Cons}}$ and $\mathcal{L}_{\mathrm{Multiview}}$ are represented as red and light green lines, respectively. + +points in Fig. 4. We then randomly select $P$ predicted landmarks $\hat{\mathrm{L}}_{1,\dots,P} \in \mathbb{R}^{P \times 68 \times 2}$ from $\hat{\mathrm{L}}_{1,\dots,N}$ to calculate the "canonical" 3D landmark $\dot{\mathrm{L}} \in \mathbb{R}^{68 \times 3}$ , as shown by the blue point in Fig. 4. We calculate each keypoint of the "canonical" 3D landmark $\dot{\mathrm{L}}^{(k)} \in \mathbb{R}^3, 1 \leq k \leq 68$ through Direct Linear Transformation (DLT) [16, 25], as follows: + +$$ +\mu_ {p} = \mathbb {M} _ {p} [ 0,: ] - \mathbb {M} _ {p} [ 2,: ] \cdot \hat {\mathrm {L}} _ {p} ^ {k} [ 0 ] \in \mathbb {R} ^ {4}, \qquad (2) +$$ + +$$ +v _ {p} = \mathbb {M} _ {p} [ 1,: ] - \mathbb {M} _ {p} [ 2,: ] \cdot \hat {\mathrm {L}} _ {p} ^ {k} [ 1 ] \in \mathbb {R} ^ {4}, \qquad (3) +$$ + +$$ +\mathbf {A} = \left[ \mu_ {1} \mid \mu_ {2} \mid \dots \mid \mu_ {p} \mid v _ {1} \mid v _ {2} \mid \dots \mid v _ {p} \right] ^ {T} \in \mathbb {R} ^ {2 P \times 4}, (4) +$$ + +$$ +\dot {\mathbf {L}} ^ {(k)} = \left(\mathbf {A} [:,: 3 ] ^ {T} \quad \mathbf {A} [:,: 3 ]\right) ^ {- 1} \mathbf {A} [:,: 3 ] ^ {T} (- \mathbf {A} [:,: 3 ]), \tag {5} +$$ + +where, $p, 1 \leq p \leq P$ , is the index of views, and $\mathbb{M}_{1,\dots,P}$ are the corresponding camera extrinsic matrices which are pre-defined for view synthesis during volume rendering (see Sec.3). Moreover, $\mathbb{M}_p[i,:]$ indicates the i-th row of $\mathbb{M}_p$ , $\mathbf{A}(:,i]$ indicates columns 0 to $i - 1$ of $\mathbf{A}$ , and $\mathbf{A}(:,i]$ indicates the $i$ -th column of $\mathbf{A}$ . By Eq. 2 and Eq. 3, we first calculate the projection constraints for $\dot{\mathrm{L}}_{(k)}$ , i.e., $\mu_p[:3] \cdot \dot{\mathrm{L}}^{(k)} + \mu_p[3] = 0$ , where ‘’ indicates the dot product. Then we stack all of the constraints into $\mathbf{A} \in \mathbb{R}^{2P \times 4}$ by Eq. 4. At last, we compute $\dot{\mathrm{L}}^{(k)}$ with a least square approach (Eq. 5). + +After obtaining the "canonical" 3D landmark $\dot{\mathrm{L}}$ , we project it onto the image planes of rest of $Q = N - P$ views to obtain the projected landmark $\tilde{\mathrm{L}}_{1,\dots,Q}$ , shown as red points in Fig. 4, by the following equations: + +$$ +s = \mathbb {M} _ {q} [:,: 3 ] \dot {\mathrm {L}} ^ {(k)} + \mathbb {M} _ {q} [:,: 3 ] \in \mathbb {R} ^ {3 \times 1}, \tag {6} +$$ + +$$ +\tilde {\mathrm {L}} _ {q} ^ {(k)} = \left[ \begin{array}{c} s [ 0 ] / s [ 2 ] \\ s [ 1 ] / s [ 2 ] \end{array} \right] \in \mathbb {R} ^ {2 \times 1}, \tag {7} +$$ + +where, in our case, $1 \leq q \leq Q$ . Eq. 6 transforms 3D landmark from "canonical" space to the camera space of view $q$ , and Eq. 7 transforms it from camera space to image space. + +Self-Projection Consistency Loss. Since all $M$ views are sampled from one NeRF with different camera views, the predicted landmarks $\hat{\mathrm{L}}_{1,\dots,Q}$ and the projected landmarks $\tilde{\mathrm{L}}_{1,\dots,Q}$ should be consistent. Therefore, we propose to minimize the error between the predicted and projected landmarks as follows: + +$$ +\mathcal {L} _ {\text {S e l f - C o n s}} = \sum_ {q = 1} ^ {Q} \| \hat {\mathrm {L}} _ {q} - \tilde {\mathrm {L}} _ {q} \| _ {1}. \tag {8} +$$ + +Mesh Consistency Loss* Besides the self-projection consistency, all the $N$ views also share one mesh topology in the canonical space. Therefore, we apply a mesh consistency loss in canonical space calculated by: + +$$ +\mathcal {L} _ {\text {M e s h - C o n s}} = \sum_ {n = 1} ^ {N} \| \hat {\mathrm {M}} _ {n} - \dot {\mathrm {M}} \| _ {2}, \tag {9} +$$ + +where $\hat{\mathbf{M}}_n$ is the predicted mesh of view $n$ in the canonical space, and $\hat{\mathbf{M}}$ is the ground truth mesh of the original reference image. + +Multiview Landmark Loss. We also minimize the distance between the predicted 2D facial landmarks and the corresponding multi-view ground truth landmarks we obtained in Sec. 3, which are denoted as yellow points in Fig. 4. The loss can be formulated as follows: + +$$ +\mathcal {L} _ {\text {M u l t i v e w}} = \sum_ {q = 1} ^ {N} \| \hat {\mathrm {L}} _ {q} - \mathrm {L} _ {q} \| _ {1}. \tag {10} +$$ + +We also incorporate the original loss of the baseline method computed with the image and landmark pairs $\{I,L\}_{1,\dots ,M}$ from dataset $\mathcal{D}$ to stabilize our 3D-aware training. The overall loss is: + +$$ +\mathcal {L} = \lambda_ {1} \mathcal {L} _ {\text {S e l f - C o n s}} + \lambda_ {2} \mathcal {L} _ {\text {M e s h - C o n s}} + \lambda_ {3} \mathcal {L} _ {\text {M u l t i v i e w}} + \mathcal {L} _ {\text {o r i g i n a l}}, \tag {11} +$$ + +where $\lambda_{1,2,3}$ are hyper parameters that control the contribution of each components. We set $\lambda_{1,2,3}$ to 0.1 empirically. + +Note that our training is a plug-in module and can be incorporated into any existing facial landmark detector easily. For different pretrained models, we just need to change $\mathcal{L}_{\mathrm{original}}$ while the other novel loss components calculated on our balanced synthetic dataset $\mathcal{D}$ can be applied directly. We show this plug-in capability on top of different baseline methods (e.g., DAD-3DNet [34] and 3DDFA [22]), and demonstrate that our 3D-aware training indeed improves their performance (see Sec. 5). + +Table 1. Facial landmark detection result (NME) on DAD-3DHeads [34], FaceScape [65], and MultiFace [64]. Lower values mean better results. + +
MethodDAD-3DHeadsFaceScapeMultiFace
FAN [6]7.14116.7416.143
Dlib [31]10.84129.43118.205
3DDFA-V2 [23]2.9266.8535.942
3DDFA [22]4.0827.9888.121
3DDFA+3.7847.4257.305
DAD-3DNet [34]2.5996.6815.786
DAD-3DNet+2.5036.0505.480
+ +# 5. Experiments + +# 5.1. Experimental Settings + +Training Details. We implement our algorithm in Pytorch and adopt ADAM to optimize the baseline networks. We run our 3D-aware training for 100 epochs with a batch size of 4, and a learning rate of $1 \times 10^{-4}$ on each baseline network. As to computational cost, fine-tuning DAD-3DNet take about and 16.25 hours on 4 NVIDIA RTX A6000 GPUs. + +Dataset. Besides DAD-3DHeads, we use two additional datasets to conduct the evaluations. + +- DAD-3DHeads [34] is the state-of-the-art in-the-wild 3D head dataset, which contains dense, accurate annotations, and diverse facial appearances. It consists of 44,898 images collected from various sources (37,840 in the training set, 4,312 in the validation set, and 2,746 in the test set). +- FaceScape [65] is a large-scale high-quality lab-controlled 3D face dataset, which contains 18,760 examples, captured from 938 subjects and each with 20 specific expressions. +- MultiFace [64] is a new multi-view, high-resolution human face dataset collected from 13 identities for neural face rendering. + +Training and Testing Split. In all the experiments, we only refine the baseline models with the training set of our DAD-3DHeads-Syn and their original training dataset. We use the test sets of DAD-3DHeads-Syn and DAD-3DHeads [34], and use the full datasets of FaceScape [65] and MultiFace [63] for performance evaluation. All the comparison methods have not been trained on the split test sets. + +Evaluation Metrics. We evaluate the facial landmark distance by calculating the Normalized Mean Error (NME). We normalize the landmark error by dividing its image resolution instead of the eye distance [55], since all the test images are aligned with offline tools. We calculate the head + +pose error by the absolute distance of the Euler angle values. + +# 5.2. Quantitative Evaluation + +Landmark Detection Results. The quantitative landmark detection results on DAD-3DHeads [34], FaceScape [65], and MultiFace [64] are shown in Tab. 1. We can find that the DAD-3DNet+ refined by our 3D-aware multi-view consistency training achieves the best performance on all three datasets. Moreover, according to the results of 3DDFA [22], 3DDFA+, DAD-3DNet [34], and DAD-3DNet+, we find that after refinement, the new models (3DDFA+ and DAD-3DNet+) achieve much better results than the baseline models. For example, the detection error of DAD-3DNet [34] drops 0.631 and 0.306, a $9\%$ and $5\%$ improvement, on FaceScape and MultiFace datasets, respectively. Similarly, we improve the 3DDFA [22] by 0.298 ( $7\%$ ), 0.563 ( $7\%$ ), and 0.816 ( $10\%$ ) on DAD-3DHeads, FaceScape and MultiFace datasets, respectively. We attribute the improvement to our proposed 3D aware multi-view training. One interesting phenomenon is that all the methods perform better on DAD-3DHeads dataset than the other two lab-captured datasets. We attribute this to the extreme head pose and challenging facial expressions in the other two datasets. We plot the head pose distribution of DAD-3DHeads (see supplementary materials) and find that distribution of head pose is not as uniform as the other two lab-controlled datasets. + +Head Pose Estimation Results. Tab. 2 shows the head pose estimation error on DAD-3DHeads [34] and FaceScape [65]. Our DAD-3DNet+ achieves best performance in most metrics. Similar to the landmark results, we can also conclude that head pose detection accuracy of the baseline methods (3DDFA and DAD-3DNet) is improved by our 3D aware multi-view consistency (3DDFA+ and DAD-3DNet+). For example, after refinement, DAD-3DNet+ achieves $11.9\%$ and $18.8\%$ performance boosts in overall head pose error on DAD-3DHeads and FaceScape dataset, respectively. + +# 5.3. Qualitative Evaluation + +We fist show visual comparisons on images randomly sampled from DAD-3DHeads test set [34] in Fig. 5. The landmark predicted by our DAD-3DNet+ model fits the individual's face tighter than the other predictions. Furthermore, by comparing the third (3DDFA [22]) and forth columns (ours), we can see that refining model $(3\mathrm{DDFA}+)$ improves the landmark accuracy dramatically. Similar visual improvements can be found in sixth (DAD-3DNet) and seventh (DAD-3DNet+) columns as well. Comparing the sixth and seventh column, we can see that the refinement training drags and rotates the landmark in 3D space to better fit it to the individual's face surface. We attribute this abl + +![](images/7aac1a8c3b9941c1668256c37762e4d1d7cf20e416080d39a9115a187a821aa4.jpg) +Figure 5. The visual results of Dlib [31], FAN [5], 3DDFA [22], our refined 3DDFA+, 3DDFA-V2, DAD-3DNet [34], and our refined DAD-3DNet+ on images randomly sampled from DAD-3DHeads [34] testing set. We show the enlarged error region (while box) in the middle row. + +Table 2. Head pose estimation results (head pose error) on DAD-3DHeads [34], FaceScape [65]. Lower values mean better results. + +
DAD-3DHeadsFaceScape
PitchRollYawOverallPitchRollYawOverall
FAN [5]9.7655.3766.3907.1778.7744.8956.5566.742
Dlib [31]13.35211.79914.65413.26817.86112.66319.54816.691
3DDFA-V2 [23]7.9014.9896.0886.32613.7419.71811.35311.604
3DDFA [22]9.8957.9778.9968.95620.78918.14519.69219.752
3DDFA+9.1956.7928.6928.22620.99616.42619.05418.826
DAD-3DNet [34]8.2744.6669.2067.38215.8519.67618.34614.624
DAD-3DNet+7.7004.2747.5286.50014.4667.24713.87611.863
+ +ity to our 3D-aware multi-view consistency training, which lets the refined model gain the better sense in 3D space, and therefore, improve the landmark detection results. + +To further validate the improvement gained by the proposed 3D-aware multi-view consistency training, we show the visual results (Fig. 6) of 3DDFA [22], our refined 3DDFA+, DAD-3DNet [34], and our refined DAD-3DNet+ on images sampled from four different test sets. We can find that our proposed refinement improves the landmark detection results in the eye, mouth, and face contour regions, which usually contain more appearance dynamics than the other areas. + +# 5.4. Performance Improvement Analysis + +To systematically understand the source of improvement after refining the baseline methods (DAD-3DNet [34] and 3DDFA [22]) with our proposed 3D-aware multi-view consistency training, we further calculate and plot the landmark and head pose error improvements on DAD-3DHeads [34] (see Fig. 7). Instead of calculating the overall improved + +error score, we split all the testing images into different groups according to their head pose value and calculate the improved error score within each group. We can find that the improvement by our training gets more obvious as the head pose gets more challenging. For example, the landmark error improvement (Fig. 7 upper section) using our method built on top of 3DDFA [22] increases from 0.12 to 0.71. Similarly, the head pose estimation error (Fig. 7 lower section) improvement using our method built on top of DAD-3DNet [34] increases from 0.02 to 2.7. We also show the detection result visualization in Fig. 8. We can see that from left to right, as the head pose increases, the error of the DAD-3DNet+ (second row) is more stable than the error (first row) of the DAD-3DNet. Base on this trend, we conclude that our proposed 3D-aware multi-view consistency training provides a more significant improvement over the baselines on images with larger head pose. This verifies our hypothesis that multi-view consistency training enables the network to learn 3D-aware information, which benefits the detection results on images with large head pose. + +![](images/f2ac3d6ce01550466943c2f9f8af1f2f6a4d21af6e231534947de31734b3a21c.jpg) +Figure 6. The visual comparisons between baseline methods and the refined methods on four testing sets. The left column and upper row list the dataset and method names, respectively. $^+$ denotes the model that has been refined by our 3D-aware training. + +![](images/eb3cbcd9dc8c8a1441823c5b341c0f8f352ccce72c74df1480e3a0358cf7b24d.jpg) + +![](images/26af171f93b204cdcd7c84063eda237916b5601d8ebf9d2f080cc61ffedc438e.jpg) +Figure 7. The landmark (top) and head pose (bottom) error improvement over DAD-3DNet [34] and 3DDFA [22] on images from different head pose ranges. The solid and dotted lines indicate DAD-3DNet [34] vs. DAD-3DNet+ (ours) and 3DDFA [22] vs. 3DDFA+ (ours). + +# 5.5. Ablation Study + +We conduct ablation study on FaceScape [65] to verify the importance of main components of our novel design. As shown in Tab. 3, we calculate NME of landmark and MAE of pose estimation in these ablation experiments. Based on these numbers, we can see the performance degrades + +![](images/af87e6bdb2c2cb2d76c4ffc07ba3a5063e33a0cefd29dff5abab44fd06970664.jpg) +Figure 8. The error visualization of DAD-3DNet [34] and our DAD-3DNet+ on MultiFace [64] dataset. The white and green dots are the ground truth and predicted landmarks, respectively. We use the red line to show the error distance. From left to right, the head pose increases gradually. + +Table 3. Ablation Study on FaceScape [65]. The top 2 numbers are shown in bold. + +
ComponentNME ↓Pose ↓
1full model (P=4)6.05011.863
2w/o LMesh-Cons6.16812.327
3w/o LSelf-Cons6.54113.623
4full model (P=8)6.04811.923
5full model (P=16)6.09811.902
6full model (P=32)6.13911.912
+ +drastically when we remove $\mathcal{L}_{\mathrm{Self - Cons}}$ . Moreover, removing $\mathcal{L}_{\mathrm{Mesh - Cons}}$ negatively impacts the results, demonstrating its importance. Moreover, estimating the 3D landmarks in the world space using fewer views leads to better results. This is a significant advantage as it makes our fine-tuning process more efficient. + +# 6. Conclusion + +We propose 3D-aware multi-view consistency training, a new framework for improving deep-learning base landmark detection algorithms. Through a set of novel loss functions, we force the network to produce landmarks that are 3D consistent. We additionally introduce a novel dataset simulation pipeline to combine the merits of lab-controlled captures and in-the-wild collected images. The model refined by our method outperforms previous approaches in terms of landmark detection accuracy and head pose estimation accuracy. Admittedly, our work has some limitations. For example, our proposed training relies on the performance of the baseline method. If the pretrained baseline yield poor initial predictions, our DLT would fail to estimate reasonable canonical 3D landmark, affecting the performance of the proposed self-projection consistency loss. Investigating ways to reduce the reliance on the accuracy of the baseline methods would be an interesting future research. + +# References + +[1] Vitor Albiero, Xingyu Chen, Xi Yin, Guan Pang, and Tal Hassner. img2pose: Face alignment and detection via 6dof, face pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7617-7627, 2021. +[2] Slawomir Bak, Peter Carr, and Jean-Francois Lalonde. Domain adaptation through synthesis for unsupervised person re-identification. In Proceedings of the European conference on computer vision (ECCV), pages 189–205, 2018. +[3] Peter N Belhumeur, David W Jacobs, David J Kriegman, and Neeraj Kumar. Localizing parts of faces using a consensus of exemplars. IEEE transactions on pattern analysis and machine intelligence, 35(12):2930-2940, 2013. +[4] Adrian Bulat and Georgios Tzimiropoulos. Two-stage convolutional part heatmap regression for the 1st 3d face alignment in the wild (3dfaw) challenge. In European Conference on Computer Vision, pages 616-624. Springer, 2016. +[5] Adrian Bulat and Georgios Tzimiropoulos. Binarized convolutional landmark localizers for human pose estimation and face alignment with limited resources. In Proceedings of the IEEE International Conference on Computer Vision, pages 3706-3714, 2017. +[6] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision, pages 1021-1030, 2017. +[7] Xavier P Burgos-Artizzu, Pietro Perona, and Piotr Dólar. Robust face landmark estimation under occlusion. In Proceedings of the IEEE international conference on computer vision, pages 1513-1520, 2013. +[8] Chen Cao, Yanlin Weng, Stephen Lin, and Kun Zhou. 3d shape regression for real-time facial animation. ACM Transactions on Graphics (TOG), 32(4):1-10, 2013. +[9] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. +[10] Dong Chen, Shaoqing Ren, Yichen Wei, Xudong Cao, and Jian Sun. Joint cascade face detection and alignment. In European conference on computer vision, pages 109-122. Springer, 2014. +[11] Lele Chen, Ross K Maddox, Zhiyao Duan, and Chenliang Xu. Hierarchical cross-modal talking face generation with dynamic pixel-wise loss. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7832-7841, 2019. +[12] Timothy F Cootes, Gareth J Edwards, and Christopher J Taylor. Active appearance models. In European conference on computer vision, pages 484-498. Springer, 1998. +[13] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In Proceedings of the IEEE con + +ference on computer vision and pattern recognition, pages 5203-5212, 2020. +[14] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. +[15] Xuanyi Dong, Yan Yan, Wanli Ouyang, and Yi Yang. Style aggregated network for facial landmark detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, June 2018. +[16] Xuanyi Dong, Yi Yang, Shih-En Wei, Xinshuo Weng, Yaser Sheikh, and Shouu-I Yu. Supervision by registration and triangulation for landmark detection. IEEE transactions on pattern analysis and machine intelligence, 43(10):3681-3694, 2020. +[17] Pengfei Dou, Shishir K Shah, and Ioannis A Kakadiaris. End-to-end 3d face reconstruction with deep neural networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pages 5908-5917, 2017. +[18] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In Proceedings of the European conference on computer vision (ECCV), pages 534-551, 2018. +[19] Golnaz Ghiasi and Charless C Fowlkes. Occlusion coherence: Detecting and localizing occluded faces. arXiv preprint arXiv:1506.08347, 2015. +[20] Ralph Gross, Iain Matthews, Jeffrey Cohn, Takeo Kanade, and Simon Baker. Multi-pie. Image and vision computing, 28(5):807-813, 2010. +[21] Kuangxiao Gu, Yuqian Zhou, and Thomas Huang. Fnet: Landmark driven fetching and learning network for faithful talking facial animation synthesis. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 10861-10868, 2020. +[22] Jianzhu Guo, Xiangyu Zhu, and Zhen Lei. 3ddfa. https://github.com/cleardusk/3DDFA, 2018. +[23] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. In European Conference on Computer Vision, pages 152-168. Springer, 2020. +[24] Xiaojie Guo, Siyuan Li, Jinke Yu, Jiawan Zhang, Jiayi Ma, Lin Ma, Wei Liu, and Haibin Ling. Pfld: A practical facial landmark detector. arXiv preprint arXiv:1902.10859, 2019. +[25] Richard Hartley and Andrew Zisserman. Multiple view geometry in computer vision. Cambridge university press, 2003. +[26] Stefan Hinterstoisser, Vincent Lepetit, Paul Wohlhart, and Kurt Konolige. On pre-trained image features and synthetic images for deep learning. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0-0, 2018. +[27] Xinya Ji, Hang Zhou, Kaisiyuan Wang, Qianyi Wu, Wayne Wu, Feng Xu, and Xun Cao. Eamm: One-shot emotional talking face via audio-based emotion-aware motion model. In ACM SIGGRAPH 2022 Conference Proceedings, SIGGRAPH '22, 2022. + +[28] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017. +[29] Amin Jourabloo and Xiaoming Liu. Large-pose face alignment via cnn-based dense 3d model fitting. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4188-4196, 2016. +[30] Ira Kemelmacher-Shlizerman and Ronen Basri. 3d face reconstruction from a single image using a single reference face shape. IEEE transactions on pattern analysis and machine intelligence, 33(2):394-405, 2010. +[31] Davis E. King. Dlib-ml: A machine learning toolkit. Journal of Machine Learning Research, 10:1755-1758, 2009. +[32] Martin Koestinger, Paul Wohlhart, Peter M Roth, and Horst Bischof. Annotated facial landmarks in the wild: A largescale, real-world database for facial landmark localization. In 2011 IEEE international conference on computer vision workshops (ICCV workshops), pages 2144-2151. IEEE, 2011. +[33] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6), 2017. +[34] Tetiana Martyniuk, Orest Kupyn, Yana Kurlyak, Igor Krashenyi, Jiri Matas, and Viktoriya Sharmanska. Dad-3heads: A large-scale dense, accurate and diverse dataset for 3d head alignment from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 20942–20952, 2022. +[35] Nikolaus Mayer, Eddy Ilg, Philipp Fischer, Caner Hazirbas, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. What makes good synthetic training data for learning disparity and optical flow estimation? International Journal of Computer Vision, 126(9):942-960, 2018. +[36] Kieron Messer, Jiri Matas, Josef Kittler, Juergen Luettin, Gilbert Maitre, et al. Xm2vtsdb: The extended m2vts database. In Second international conference on audio and video-based biometric person authentication, volume 964, pages 965-966. Citeseer, 1999. +[37] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. +[38] Erik Murphy-Chutorian and Mohan Manubhai Trivedi. Head pose estimation in computer vision: A survey. IEEE transactions on pattern analysis and machine intelligence, 31(4):607-626, 2008. +[39] P Jonathon Phillips, Patrick J Flynn, Todd Scruggs, Kevin W Bowyer, Jin Chang, Kevin Hoffman, Joe Marques, Jaesik Min, and William Worek. Overview of the face recognition grand challenge. In 2005 IEEE computer society conference on computer vision and pattern recognition (CVPR'05), volume 1, pages 947-954. IEEE, 2005. +[40] Shengju Qian, Keqiang Sun, Wayne Wu, Chen Qian, and Jiaya Jia. Aggregation via separation: Boosting facial land + +mark detector with semi-supervised style translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10153-10163, 2019. +[41] Rajeev Ranjan, Vishal M Patel, and Rama Chellappa. Hyperface: A deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE transactions on pattern analysis and machine intelligence, 41(1):121-135, 2017. +[42] Elad Richardson, Matan Sela, Roy Or-El, and Ron Kimmel. Learning detailed face reconstruction from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1259–1268, 2017. +[43] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Transactions on Graphics (TOG), 42(1):1-13, 2022. +[44] Andreas Rossler, Davide Cozzolino, Luisa Verdoliva, Christian Riess, Justus Thies, and Matthias Nießner. Faceforensics: Learning to detect manipulated facial images. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1-11, 2019. +[45] Nataniel Ruiz, Samuel Schulter, and Manmohan Chandraker. Learning to simulate. In International Conference on Learning Representations, 2019. +[46] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013. +[47] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013. +[48] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. A semi-automatic methodology for facial landmark annotation. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 896-903, 2013. +[49] Jie Shen, Stefanos Zafeiriou, Grigoris G Chrysos, Jean Kossaifi, Georgios Tzimiropoulos, and Maja Pantic. The first facial landmark tracking in-the-wild challenge: Benchmark and results. In Proceedings of the IEEE international conference on computer vision workshops, pages 50-58, 2015. +[50] Ashish Shrivastava, Tomas Pfister, Oncel Tuzel, Joshua Susskind, Wenda Wang, and Russell Webb. Learning from simulated and unsupervised images through adversarial training. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2107-2116, 2017. +[51] Linsen Song, Wayne Wu, Chaoyou Fu, Chen Change Loy, and Ran He. Audio-driven dubbing for user generated contents via style-aware semi-parametric synthesis. IEEE Transactions on Circuits and Systems for Video Technology, 2022. +[52] Linsen Song, Wayne Wu, Chen Qian, Ran He, and Chen Change Loy. Everybody's talkin': Let me talk as you want. IEEE Transactions on Information Forensics and Security, 17:585-598, 2022. + +[53] Yang Song, Jingwen Zhu, Dawei Li, Andy Wang, and Hairong Qi. Talking face generation by conditional recurrent adversarial network. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19, pages 919–925. International Joint Conferences on Artificial Intelligence Organization, 7 2019. +[54] Luuk Spreeuwers, Maikel Schils, and Raymond Veldhuis. Towards robust evaluation of face morphing detection. In 2018 26th European Signal Processing Conference (EU-SIPCO), pages 1027-1031. IEEE, 2018. +[55] Keqiang Sun, Wayne Wu, Tinghao Liu, Shuo Yang, Quan Wang, Qiang Zhou, Zuochang Ye, and Chen Qian. Fab: A robust facial landmark detection framework for motion-blurred videos. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5462-5471, 2019. +[56] Yi Sun, Xiaogang Wang, and Xiaou Tang. Deep convolutional network cascade for facial point detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3476-3483, 2013. +[57] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6142-6151, 2020. +[58] Justus Thies, Michael Zollhofer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of rgb videos. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2387-2395, 2016. +[59] Boris van Breugel, Trent Kyono, Jeroen Berrevoets, and Michaela van der Schaar. Decaf: Generating fair synthetic data using causally-aware generative networks. Advances in Neural Information Processing Systems, 34:22221-22233, 2021. +[60] Ting-Chun Wang, Ming-Yu Liu, Andrew Tao, Guilin Liu, Jan Kautz, and Bryan Catanzaro. Few-shot video-to-video synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2019. +[61] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3681-3691, 2021. +[62] Erroll Wood, Tadas Baltrusaitis, Louis-Philippe Morency, Peter Robinson, and Andreas Bulling. Learning an appearance-based gaze estimator from one million synthesised images. In Proceedings of the Ninth Biennial ACM Symposium on Eye Tracking Research & Applications, pages 131–138, 2016. +[63] Yue Wu, Zuoguan Wang, and Qiang Ji. Facial feature tracking under varying facial expressions and face poses based on restricted boltzmann machines. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3452-3459, 2013. +[64] Cheng-hsin Wu, Ningyuan Zheng, Scott Ardisson, Rohan Bali, Danielle Belko, Eric Brockmeyer, Lucas Evans, Timothy Godisart, Hyowon Ha, Alexander Hypes, Taylor Koska, + +Steven Krenn, Stephen Lombardi, Xiaomin Luo, Kevyn McPhail, Laura Millerschoen, Michal Perdoch, Mark Pitts, Alexander Richard, Jason Saragih, Junko Saragih, Takaaki Shiratori, Tomas Simon, Matt Stewart, Autumn Trimble, Xinshuo Weng, David Whitewolf, Chenglei Wu, Shouu-I Yu, and Yaser Sheikh. Multiface: A dataset for neural face rendering. In arXiv, 2022. +[65] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 601-610, 2020. +[66] Ran Yi, Yong-Jin Liu, Yu-Kun Lai, and Paul L Rosin. Apdrawinggan: Generating artistic portrait drawings from face photos with hierarchical gans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10743-10752, 2019. +[67] Ran Yi, Zipeng Ye, Ruoyu Fan, Yezhi Shu, Yong-Jin Liu, Yu-Kun Lai, and Paul L Rosin. Animating portrait line drawings from a single face photo and a speech signal. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-8, 2022. +[68] Xi Yin, Xiang Yu, Kihyuk Sohn, Xiaoming Liu, and Manmohan Chandraker. Towards large-posed face frontalization in the wild. In In Proceeding of International Conference on Computer Vision, Venice, Italy, October 2017. +[69] Egor Zakharov, Aliaksandra Shysheya, Egor Burkov, and Victor Lempitsky. Few-shot adversarial learning of realistic neural talking head models. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9459-9468, 2019. +[70] Xiaoxing Zeng, Xiaojiang Peng, and Yu Qiao. Df2net: A dense-fine-finer network for detailed 3d face reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2315-2324, 2019. +[71] Kaipeng Zhang, Zhanpeng Zhang, Zhifeng Li, and Yu Qiao. Joint face detection and alignment using multitask cascaded convolutional networks. IEEE signal processing letters, 23(10):1499-1503, 2016. +[72] Xing Zhang, Lijun Yin, Jeffrey F Cohn, Shaun Canavan, Michael Reale, Andy Horowitz, and Peng Liu. A high-resolution spontaneous 3d dynamic facial expression database. In 2013 10th IEEE international conference and workshops on automatic face and gesture recognition (FG), pages 1-6. IEEE, 2013. +[73] Zhimeng Zhang, Lincheng Li, Yu Ding, and Changjie Fan. Flow-guided one-shot talking face generation with a high-resolution audio-visual dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3661–3670, 2021. +[74] Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaou Tang. Facial landmark detection by deep multi-task learning. In European conference on computer vision, pages 94-108. Springer, 2014. +[75] Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaou Tang. Learning deep representation for face alignment with auxiliary attributes. IEEE transactions on pattern analysis and machine intelligence, 38(5):918-930, 2015. + +[76] Aihua Zheng, Feixia Zhu, Hao Zhu, Mandi Luo, and Ran He. Talking face generation via learning semantic and temporal synchronous landmarks. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 3682-3689. IEEE, 2021. +[77] Erjin Zhou, Haoqiang Fan, Zhimin Cao, Yuning Jiang, and Qi Yin. Extensive facial landmark localization with coarse-to-fine convolutional network cascade. In Proceedings of the IEEE international conference on computer vision workshops, pages 386-391, 2013. +[78] Hang Zhou, Yasheng Sun, Wayne Wu, Chen Change Loy, Xiaogang Wang, and Ziwei Liu. Pose-controllable talking face generation by implicitly modularized audio-visual representation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4176-4186, 2021. +[79] Yang Zhou, Xintong Han, Eli Shechtman, Jose Echevarria, Evangelos Kalogerakis, and Dingzeyu Li. Makelttalk: speaker-aware talking-head animation. ACM Transactions on Graphics (TOG), 39(6):1-15, 2020. +[80] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European conference on computer vision, pages 592-608. Springer, 2020. +[81] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 2223-2232, 2017. +[82] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z Li. Face alignment across large poses: A 3d solution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 146-155, 2016. +[83] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z Li. Face alignment across large poses: A 3d solution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 146-155, 2016. +[84] Xiangyu Zhu, Xiaoming Liu, Zhen Lei, and Stan Z Li. Face alignment in full pose range: A 3d total solution. IEEE transactions on pattern analysis and machine intelligence, 41(1):78-92, 2017. +[85] Xiangxin Zhu and Deva Ramanan. Face detection, pose estimation, and landmark localization in the wild. In 2012 IEEE conference on computer vision and pattern recognition, pages 2879-2886. IEEE, 2012. \ No newline at end of file diff --git a/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/images.zip b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..fe06f402e1277fb2d122ffe8011704f12ef8eb5f --- /dev/null +++ b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63f5b89f786c8bbcc2a9eefaae25f1b78212e2f695780b12cc28cdca47d0e5f1 +size 532250 diff --git a/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/layout.json b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..66c8019acf0303390a9d1bf98fb5fa3c4c295a93 --- /dev/null +++ b/2023/3D-Aware Facial Landmark Detection via Multi-View Consistent Training on Synthetic Data/layout.json @@ -0,0 +1,10291 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 143, + 103, + 451, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 103, + 451, + 140 + ], + "spans": [ + { + "bbox": [ + 143, + 103, + 451, + 140 + ], + "type": "text", + "content": "3D-aware Facial Landmark Detection via Multi-view Consistent Training on Synthetic Data" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "spans": [ + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": "Libing Zeng " + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": ", Lele Chen" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": ", Wentao Bao" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{3*}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": ", Zhong Li" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": ", Yi Xu" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": ", Junsong Yuan" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": ", Nima K. Kalantari" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": "Texas A&M University, " + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": "OPPO US Research Center, InnoPeak Technology, Inc, \n" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": "Michigan State University, " + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 56, + 160, + 536, + 205 + ], + "type": "text", + "content": "University at Buffalo" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 48, + 260, + 171, + 384 + ], + "blocks": [ + { + "bbox": [ + 48, + 260, + 171, + 384 + ], + "lines": [ + { + "bbox": [ + 48, + 260, + 171, + 384 + ], + "spans": [ + { + "bbox": [ + 48, + 260, + 171, + 384 + ], + "type": "image", + "image_path": "78e916daa4bd05091dac17e234910d011ec6556475c25fe43fb6d8e37bc3162e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 385, + 165, + 396 + ], + "lines": [ + { + "bbox": [ + 55, + 385, + 165, + 396 + ], + "spans": [ + { + "bbox": [ + 55, + 385, + 165, + 396 + ], + "type": "text", + "content": "(a) Multi-view Inconsistency" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 173, + 260, + 295, + 383 + ], + "blocks": [ + { + "bbox": [ + 173, + 260, + 295, + 383 + ], + "lines": [ + { + "bbox": [ + 173, + 260, + 295, + 383 + ], + "spans": [ + { + "bbox": [ + 173, + 260, + 295, + 383 + ], + "type": "image", + "image_path": "4919b56620ae21697831b195c4fa557750b5cc1fe3d2388aaea65720dc521e69.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 204, + 384, + 265, + 396 + ], + "lines": [ + { + "bbox": [ + 204, + 384, + 265, + 396 + ], + "spans": [ + { + "bbox": [ + 204, + 384, + 265, + 396 + ], + "type": "text", + "content": "(b) DAD-3DNet" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 297, + 260, + 419, + 383 + ], + "blocks": [ + { + "bbox": [ + 297, + 260, + 419, + 383 + ], + "lines": [ + { + "bbox": [ + 297, + 260, + 419, + 383 + ], + "spans": [ + { + "bbox": [ + 297, + 260, + 419, + 383 + ], + "type": "image", + "image_path": "57c47b59d38b7da15bb14f7590b3291a5ba60eda6fd9368720dcbec5a98be4e4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 312, + 384, + 404, + 396 + ], + "lines": [ + { + "bbox": [ + 312, + 384, + 404, + 396 + ], + "spans": [ + { + "bbox": [ + 312, + 384, + 404, + 396 + ], + "type": "text", + "content": "(c) DAD-3DNet+ (Ours)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 399, + 547, + 467 + ], + "lines": [ + { + "bbox": [ + 46, + 399, + 547, + 467 + ], + "spans": [ + { + "bbox": [ + 46, + 399, + 547, + 467 + ], + "type": "text", + "content": "Figure 1. We plot the landmark annotations labeled by different annotators with different colors in view #1 of (a). Accurate annotation of non-frontal faces with large angles like view #1 is challenging. This is a major problem since small differences between annotated landmarks in view #1, becomes substantially magnified when projected to view #2. Training a system on such datasets could lead to poor landmark detection accuracy, as shown in (b). We address this issue by proposing a 3D-aware optimization module that enforces multi-view consistency. We show the landmark detection improvement in (c). Magnified insets in (b) and (c) are shown in (d). After refined by the proposed 3D-aware learning, the detected facial landmark is better aligned with the identity." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 421, + 259, + 482, + 383 + ], + "blocks": [ + { + "bbox": [ + 421, + 259, + 482, + 383 + ], + "lines": [ + { + "bbox": [ + 421, + 259, + 482, + 383 + ], + "spans": [ + { + "bbox": [ + 421, + 259, + 482, + 383 + ], + "type": "image", + "image_path": "e682e867ccf468210ab3cdbd5f548fd4edc26a0e75ce8c0f95aa2f6c76a46a88.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 479, + 384, + 490, + 395 + ], + "lines": [ + { + "bbox": [ + 479, + 384, + 490, + 395 + ], + "spans": [ + { + "bbox": [ + 479, + 384, + 490, + 395 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 484, + 259, + 545, + 383 + ], + "blocks": [ + { + "bbox": [ + 484, + 259, + 545, + 383 + ], + "lines": [ + { + "bbox": [ + 484, + 259, + 545, + 383 + ], + "spans": [ + { + "bbox": [ + 484, + 259, + 545, + 383 + ], + "type": "image", + "image_path": "76baf03b0bfbec6487cbc3e3ca9a8d82f8dea50c47b0094375bfffb23984ec43.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 143, + 481, + 192, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 481, + 192, + 495 + ], + "spans": [ + { + "bbox": [ + 143, + 481, + 192, + 495 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 506, + 289, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 506, + 289, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 506, + 289, + 687 + ], + "type": "text", + "content": "Accurate facial landmark detection on wild images plays an essential role in human-computer interaction, entertainment, and medical applications. Existing approaches have limitations in enforcing 3D consistency while detecting 3D/2D facial landmarks due to the lack of multi-view in-the-wild training data. Fortunately, with the recent advances in generative visual models and neural rendering, we have witnessed rapid progress towards high quality 3D image synthesis. In this work, we leverage such approaches to construct a synthetic dataset and propose a novel multiview consistent learning strategy to improve 3D facial landmark detection accuracy on in-the-wild images. The proposed 3D-aware module can be plugged into any learning-based landmark detection algorithm to enhance its accuracy. We demonstrate the superiority of the proposed plug" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 483, + 545, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 483, + 545, + 507 + ], + "spans": [ + { + "bbox": [ + 305, + 483, + 545, + 507 + ], + "type": "text", + "content": "in module with extensive comparison against state-of-the-art methods on several real and synthetic datasets." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 536, + 387, + 549 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 536, + 387, + 549 + ], + "spans": [ + { + "bbox": [ + 306, + 536, + 387, + 549 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 559, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 559, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 559, + 547, + 715 + ], + "type": "text", + "content": "Accurate and precise facial landmark plays a significant role in computer vision and graphics applications, such as face morphing [54], facial reenactment [58], 3D face reconstruction [17, 18, 30], head pose estimation [38], face recognition [1, 10, 13, 19, 32, 41, 71], and face generation [11, 21, 60, 69]. In these applications, facial landmark detection provides great sparse representation to ease the burden of network convergence in different training stages and is often used as performance evaluation metric. For instance, as a facial prior, it provides good initialization for subsequent training [66, 67, 69, 76], good intermediate representation to bridge the gap between different modalities for content generation [11, 27, 51, 79], loss terms which reg-" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 693, + 287, + 715 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 693, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 693, + 287, + 715 + ], + "type": "text", + "content": "*This work was done when Libing Zeng and Wentao Bao were interns at OPPO US Research Center, InnoPeak Technology, Inc." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 318, + 758 + ], + "type": "text", + "content": "12747" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 95 + ], + "type": "text", + "content": "ularize the facial expression [11, 52], or evaluation metrics to measure the facial motion quality [53, 73, 78]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 96, + 286, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 96, + 286, + 215 + ], + "spans": [ + { + "bbox": [ + 46, + 96, + 286, + 215 + ], + "type": "text", + "content": "The aforementioned applications require the estimated facial landmarks to be accurate even with significantly varied facial appearance under different identities, facial expressions, and extreme head poses. Tremendous efforts have been devoted to address this problem [15, 22-24, 29, 34, 40, 56, 63, 74, 75, 77, 82, 84]. These approaches often rely on manually annotated large-scale lab-controlled or in-the-wild image datasets [4, 34] to handle various factors such as arbitrary facial expressions, head poses, illumination, facial occlusions, etc." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 216, + 286, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 216, + 286, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 216, + 286, + 407 + ], + "type": "text", + "content": "However, even with the high cost of human labeling, consistent and accurate manual annotation of landmarks remains challenging [22, 23, 34]. It is very difficult, if not impossible, to force a person to annotate the facial landmark keypoints at the same pixel locations for faces of different poses, let alone different annotators under different labeling environments. Such annotation inconsistency and inaccuracy in training images are often the killing factor to learn an accurate landmark localization model. This is particularly a major problem in non-frontal faces where annotation becomes extremely challenging. As shown in Fig. 1(a) a small annotation variation in view #1, results in a significant inaccuracy in view #2. This multi-view inconsistency and inaccuracy can ultimately lead to poor landmark detection accuracy, especially for facial images with extreme head pose." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 407, + 286, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 407, + 286, + 609 + ], + "spans": [ + { + "bbox": [ + 46, + 407, + 286, + 609 + ], + "type": "text", + "content": "To mitigate this annotation inconsistency and inaccuracy issue, we propose to learn facial landmark detection by enforcing multi-view consistency during training. Given the images of the same facial identity captured with different head poses, instead of detecting facial landmark at each separate facial image, we propose a multi-view consistency supervision to locate facial landmark in a holistic 3D-aware manner. To enforce multi-view consistency, we introduce self-projection consistency loss and multi-view landmark loss in training. We also propose an annotation generation procedure to exploit the merits of lab-controlled data (e.g., multi-view images, consistent annotations) and in-the-wild data (e.g., wide range of facial expressions, identities). Thanks to this synthetic data, our method does not rely on human annotation to obtain the accurate facial landmark locations. Therefore, it alleviates the problem of learning from inaccurate and inconsistent annotations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 610, + 286, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 610, + 286, + 693 + ], + "spans": [ + { + "bbox": [ + 46, + 610, + 286, + 693 + ], + "type": "text", + "content": "We formulate our solution as a plug-in 3D aware module, which can be incorporated into any facial landmark detector and can boost a pre-trained model with higher accuracy and multi-view consistency. We demonstrate the effectiveness of our approach through extensive experiments on both synthetic and real datasets. The main contributions of our work are as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 701, + 286, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 701, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 701, + 286, + 712 + ], + "type": "text", + "content": "- We show, for the first time, how to combine the merits" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 324, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 324, + 72, + 545, + 133 + ], + "type": "text", + "content": "of lab captured face image data (e.g., multi-view) and the in-the-wild face image datasets (e.g., appearance diversity). Using our proposed approach we produce a large-scale synthetic, but realistic, multi-view face dataset, titled DAD-3DHeads-Syn." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 141, + 545, + 269 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 317, + 141, + 545, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 141, + 545, + 213 + ], + "spans": [ + { + "bbox": [ + 317, + 141, + 545, + 213 + ], + "type": "text", + "content": "- We propose a novel 3D-aware optimization module, which can be plugged into any learning-based facial landmark detection methods. By refining an existing landmark detection algorithm using our optimization module, we are able to improve its accuracy and multiview consistency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 223, + 545, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 223, + 545, + 269 + ], + "spans": [ + { + "bbox": [ + 317, + 223, + 545, + 269 + ], + "type": "text", + "content": "- We demonstrate the performance improvements of our module built on top multiple baseline methods on simulated dataset, lab-captured datasets, and in-the-wild datasets." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 282, + 392, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 282, + 392, + 295 + ], + "spans": [ + { + "bbox": [ + 306, + 282, + 392, + 295 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 303, + 545, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 303, + 545, + 350 + ], + "spans": [ + { + "bbox": [ + 305, + 303, + 545, + 350 + ], + "type": "text", + "content": "In this section, we review face landmark datasets and detection algorithms that are most related to our approach. We also provide a brief review of data simulation tools related to our work." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 359, + 489, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 359, + 489, + 371 + ], + "spans": [ + { + "bbox": [ + 306, + 359, + 489, + 371 + ], + "type": "text", + "content": "2.1. Face Landmark Detection Dataset" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 378, + 545, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 378, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 305, + 378, + 545, + 544 + ], + "type": "text", + "content": "Lab-controlled dataset. Datasets under \"controlled\" conditions [8, 20, 36, 39, 46, 48, 64, 65, 72] typically collect video/images from indoor scenarios with certain restrictions, e.g. pre-defined expressions, head poses, etc. For example, FaceScape dataset [65] contains 938 individuals and each with 20 expressions using an array of 68 cameras under controlled illumination and positions. Thus, it contains aligned and consistent multi-view images and facial landmark annotations. However, the identities, poses, and expressions are limited. In addition, the environment conditions are fully controlled. These result in limited generalization capability of models trained on this dataset. Moreover, the annotation workflow of such a dataset is expensive and hard to scale." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 545, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 713 + ], + "type": "text", + "content": "In-the-wild dataset. The boom of internet image sharing has enabled the creation of many \"in-the-wild\" facial landmark datasets [3,7,32,49,85], collected from the web, to facilitate facial landmark detection research. However, manually annotating facial landmarks on in-the-wild images is a time-consuming process and not scalable. Zhu et al. [83] release 300W-LP by extending the original 300W dataset with synthetic images with extreme pose through image profiling of frontal pose images. However, the novel view images are generated by simply applying rotation matrix on the original images, which leads to limited view range and poor image quality. Meanwhile, 300W-LP lacks diversity in face appearance and expression because of the intrinsic limitations of 300W. Recently, Martyniuk et al. [34] introduce a" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12748" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 312 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 312 + ], + "type": "text", + "content": "new dataset, DAD-3DHeads, by proposing a novel annotation scheme. Specifically, their approach allows the annotator to adjust the landmarks by looking at how well the mesh, generated from the landmarks, fits the input image. The proposed scheme addresses the problems exhibited by existing labeling tools, such as \"guessing\" the positions of the correct landmarks for invisible parts of the head, thus enabling accurate annotations. DAD-3DHeads dataset contains 44,898 in-the-wild images, covering extreme facial expressions, poses, and challenging illuminations. However, the DAD-3DHeads still has some drawbacks. First, even with the mesh fitting guidance, the annotations can be inaccurate. As shown in Fig. 1 (a), even a small inaccuracy in one view could result in a significant inconsistency when projected to another view. This inconsistency could negatively affect the training of the detection network. Second, since the depth is estimated by FLAME [33], annotation accuracy is limited by the FLAME model. Third, this dataset lacks multi-view images, and thus cannot be used to enforce multi-view consistency." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 319, + 146, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 319, + 146, + 331 + ], + "spans": [ + { + "bbox": [ + 47, + 319, + 146, + 331 + ], + "type": "text", + "content": "2.2. Data Simulation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 338, + 289, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 338, + 289, + 542 + ], + "spans": [ + { + "bbox": [ + 46, + 338, + 289, + 542 + ], + "type": "text", + "content": "Simulation [26,28,35,42,44,45,50,59,61,62,70] is a useful tool in situations where training data for learning-based methods is expensive to annotate or even hard to acquire. For example, Zeng et al. [70] and Richardson et al. [42] use 3D Morphable Model (3DMM) to render training data with different lighting conditions, identities, expressions, and texture basis elements for reconstructing detailed facial geometry. However, the simulated images produced by these approaches lack realism and have severe domain gaps compared with real-world captures, limiting their usage. Bak et al. [2] adapt synthetic data using a CycleGAN [81] with a regularization term for preserving identities. Ayush et al. [57] use the images and latent code generated by StyleGAN [81] to train a controllable portrait image generation model. However, it is hard to control the attribute consistencies of images simulated by generative models, which limits the usage of the generated datasets." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 551, + 248, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 551, + 248, + 563 + ], + "spans": [ + { + "bbox": [ + 47, + 551, + 248, + 563 + ], + "type": "text", + "content": "2.3. Face Landmark Detection Algorithms" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 570, + 287, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 641 + ], + "type": "text", + "content": "Traditional facial landmark detection methods leverage either holistic facial appearance information [12], or the global facial shape patterns [31, 85]. They yield reasonable results for images captured in lab-controlled environments with frontal faces and good lighting, however the performance on most of in-the-wild images is inferior." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 642, + 288, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 712 + ], + "type": "text", + "content": "Recently, deep learning-based algorithms have made promising progress on 2D facial landmark localization [15, 22-24,29,34,40,56,63,74,75,77,82,84] in terms of robustness, generalizability, and accuracy. FAN [6] constructs, for the first time, a very strong baseline by combining a state-of-the-art residual block and a state-of-the-art architecture" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 306, + 72, + 547, + 228 + ], + "blocks": [ + { + "bbox": [ + 306, + 72, + 547, + 228 + ], + "lines": [ + { + "bbox": [ + 306, + 72, + 547, + 228 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 547, + 228 + ], + "type": "table", + "html": "
Dataset TypeLab-ControlledIn-the-wildOurs
Examples
In-the-wild×
Large Scale×
Balanced×
Multiview Consistent×
Annotation Consistent×
Scalable××
", + "image_path": "9d20c4e0d9a754a241e5e59e0edbe4ec44f58de125cc9cf3743f44d19e6e7017.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 319, + 285, + 536, + 355 + ], + "blocks": [ + { + "bbox": [ + 304, + 238, + 546, + 282 + ], + "lines": [ + { + "bbox": [ + 304, + 238, + 546, + 282 + ], + "spans": [ + { + "bbox": [ + 304, + 238, + 546, + 282 + ], + "type": "text", + "content": "Figure 2. The feature comparison of different type of datasets. For example, FaceScape [65] and MultiFace [64] are lab-controlled datasets, while 300W [47], AFLW2000 [68], and DAD-3DHeads [34] are in-the-wild datasets." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 319, + 285, + 536, + 355 + ], + "lines": [ + { + "bbox": [ + 319, + 285, + 536, + 355 + ], + "spans": [ + { + "bbox": [ + 319, + 285, + 536, + 355 + ], + "type": "image", + "image_path": "094409b388bb96123759f3b785734f8945c95749b6beea7c0ccb43087beaf6f2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 363, + 515, + 374 + ], + "lines": [ + { + "bbox": [ + 336, + 363, + 515, + 374 + ], + "spans": [ + { + "bbox": [ + 336, + 363, + 515, + 374 + ], + "type": "text", + "content": "Figure 3. The proposed data simulation pipeline." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 381, + 547, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 381, + 547, + 584 + ], + "spans": [ + { + "bbox": [ + 304, + 381, + 547, + 584 + ], + "type": "text", + "content": "for landmark localization and trains it on a very large yet synthetically expanded 2D facial landmark dataset. To address self-occlusion and large appearance variation, Zhu et al. [82] propose a cascaded convolutional neural network and optimized weighted parameter distance cost loss function to formulate the priority of 3DMM parameters during training instead of predicting facial landmark keypoints. To further address the problems of shape reconstruction and pose estimation simultaneously, Martyniuk et al. propose an end-to-end trained DAD-3DNet [34] to regress 3DMM parameters and recover the 3D head geometry with differential FLAME decoder. However, due to the intrinsic limitation of the manually annotated in-the-wild dataset, the detection results are affected by the annotation noise and the 3D inconsistency of the single view images. In this paper, we mainly focus on improving the performance of deep-learning based methods." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 595, + 545, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 595, + 545, + 622 + ], + "spans": [ + { + "bbox": [ + 305, + 595, + 545, + 622 + ], + "type": "text", + "content": "3. Balanced and Realistic Multi-view Face Dataset" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 547, + 713 + ], + "type": "text", + "content": "We believe there are five desired properties that a good facial landmark dataset should fulfill: (1) contain full range of multi-view images; (2) bridge the domain gap between the dataset and the real-world captured images; (3) contain diverse facial appearance including different poses, expressions, illuminations, and identities; (4) have consistent and accurate annotations across the whole dataset; (5) be" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "12749" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": "easy to obtain and scalable. The existing datasets can are either lab-controlled captures [64, 65] or in-the-wild collected [34, 47, 68]. Unfortunately, these datasets lack one or more desired attributes. In contrast, our dataset meets all of these criteria (Fig. 2)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 133, + 286, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 133, + 286, + 228 + ], + "spans": [ + { + "bbox": [ + 46, + 133, + 286, + 228 + ], + "type": "text", + "content": "Unlike previous graphics or generative model-based data synthesis approaches described in Sec. 2.2, we propose a novel facial dataset simulation scheme by leveraging Neural Radiance Field (NeRF) [37] to facilitate training a facial landmark detection network. Fig. 3 shows our dataset creation pipeline. We generate multiview images with consistent landmarks using a single in-the-wild image along with annotated landmark as input." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 229, + 286, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 229, + 286, + 409 + ], + "spans": [ + { + "bbox": [ + 46, + 229, + 286, + 409 + ], + "type": "text", + "content": "Specifically, we choose DAD-3DHeads [34] as our initial dataset since it contains images under a variety of extreme poses, facial expressions, challenging illuminations, and severe occlusions cases. Given an image and its landmarks from this dataset, our goal is to reconstruct multiview images with their corresponding landmarks. Inspired by GAN inversion [80], we first fit a latent code to each image in DAD-3DHeads datasets using EG3D [9] as decoder by following Pivotal Tuning Inversion (PTI) [43]. Note that, EG3D GAN inversion requires the camera pose of the input image, which we estimate using Deep3DFace [14]. Then we can use EG3D to decode the optimized latent code to NeRF. Next, we use volume rendering on the NeRF with 512 uniformly sampled camera views from a large view range, producing 512 multi-view images." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 410, + 286, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 410, + 286, + 577 + ], + "spans": [ + { + "bbox": [ + 46, + 410, + 286, + 577 + ], + "type": "text", + "content": "To obtain the landmarks for each image, we start with the well-annotated groundtruth 2D landmarks of the original images from the DAD-3DHeads dataset. Then we use the estimated camera pose of the input image to unproject the annotated landmarks to 3D space. At last, we project the 3D landmarks to the 512 sampled camera views to obtain landmark annotation on the simulated views. The simulated dataset not only inherits the merits of DAD-3DHeads (e.g. diverse identities, expressions, poses, and illuminations), but also comes with a lot of new features (e.g., balanced head pose, consistent annotation, and multi-view images). In total, there are 2,150,400 training pairs and 204,800 testing pairs in our extended dataset, called DAD-3DHeadsSyn." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 590, + 284, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 590, + 284, + 604 + ], + "spans": [ + { + "bbox": [ + 47, + 590, + 284, + 604 + ], + "type": "text", + "content": "4. 3D-Aware Multi-view Consistency Training" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 610, + 114, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 610, + 114, + 622 + ], + "spans": [ + { + "bbox": [ + 47, + 610, + 114, + 622 + ], + "type": "text", + "content": "4.1. Overview" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 629, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 286, + 713 + ], + "type": "text", + "content": "The state-of-the-art landmark detectors [5, 34] can output reasonable results on in-the-wild images. However, we may observe that the predicted landmark are floating on the face surface instead of fitting the face perfectly in a lot of cases. We can easily verify if the detected landmark fits the face by projecting the detected landmark to another view (see Fig. 1(a)). Armed by this observation of multi-view in" + } + ] + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 311, + 87, + 545, + 295 + ], + "blocks": [ + { + "bbox": [ + 307, + 72, + 472, + 84 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 472, + 84 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 472, + 84 + ], + "type": "text", + "content": "Algorithm 1 3D-Aware Plug-in Module." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "lines": [ + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "spans": [ + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": "1: Input: pretrained detector " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " with weights " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " single-view images " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "I_{1,\\dots,M} \\in \\mathcal{D}" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " along with ground truth landmark " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "L_{1,\\dots,M}" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": ", paired " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " multi-view images " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "V_{1,\\dots,N} \\in \\hat{\\mathcal{D}}" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " along with ground truth landmark " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "L_{1,\\dots,N}" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": ". \n2: Output: detector " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " with updated weights " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\theta^{*}" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " \n3: Initialization: set " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " to pre-trained weights \n4: Unfreeze " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " \n5: for number of iterations do \n6: Output predicted landmarks " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\hat{L}_{1,\\dots,N}" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " for each view. \n7: Randomly sample " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " landmarks from them, " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "(1 < P \\leq N)" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": ". \n8: Cast the landmarks into world space and estimate the approximate 3D landmark " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\dot{L}" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " using Eq. 2, 3, 4, 5 \n9: Project " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\dot{L}" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " onto the image planes of remaining " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " views " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "(Q = N - P)" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " using Eq. 6, 7 \n10: Calculate Total Loss " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "text", + "content": " using Eq. 11 \n11: " + }, + { + "bbox": [ + 311, + 87, + 545, + 295 + ], + "type": "inline_equation", + "content": "\\theta^{*} \\gets Adam\\{\\mathcal{L}\\}" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "algorithm" + }, + { + "bbox": [ + 304, + 318, + 545, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 318, + 545, + 354 + ], + "spans": [ + { + "bbox": [ + 304, + 318, + 545, + 354 + ], + "type": "text", + "content": "consistency and inaccuracy, we propose a novel 3D-Aware training module " + }, + { + "bbox": [ + 304, + 318, + 545, + 354 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 304, + 318, + 545, + 354 + ], + "type": "text", + "content": " to further improve the performance of baseline detection algorithm " + }, + { + "bbox": [ + 304, + 318, + 545, + 354 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 318, + 545, + 354 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "text", + "content": "Given a facial landmark detection network " + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "inline_equation", + "content": "F_{\\theta}(\\cdot)" + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "text", + "content": " pretrained on dataset " + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "text", + "content": ", the proposed module " + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "text", + "content": " further refines the network parameters " + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "text", + "content": " by leveraging our simulated DAD-3DHeads-Syn dataset " + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{D}}" + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "text", + "content": " in addition to the original dataset " + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "text", + "content": ". Our module " + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 304, + 354, + 545, + 414 + ], + "type": "text", + "content": " can be formulated as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 321, + 424, + 545, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 424, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 321, + 424, + 545, + 437 + ], + "type": "interline_equation", + "content": "F _ {\\theta^ {*}} \\leftarrow \\mathcal {R} \\left(F _ {\\theta}, X, V _ {1, \\dots , N}\\right), X \\in \\mathcal {D}, V _ {1, \\dots , N} \\in \\hat {\\mathcal {D}}, \\tag {1}", + "image_path": "f3adabfe7c11193efbe3e1260a684a71b4dc4252722d7c07c0b853fe1c1124bd.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "spans": [ + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "text", + "content": " is the image batch sampled from " + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "inline_equation", + "content": "V_{1,\\dots,N}" + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "text", + "content": " multi-view images sampled from " + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{D}}" + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "text", + "content": ". We refine the network parameters " + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "text", + "content": " through exploring 3D information among multi-view images and applying a novel projection consistency during the fine-tuning process. Our module " + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 304, + 448, + 545, + 546 + ], + "type": "text", + "content": " does not result in any new network parameters and can be plugged into any learning-based network. We show the training protocol in Alg. 1." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 554, + 495, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 554, + 495, + 567 + ], + "spans": [ + { + "bbox": [ + 306, + 554, + 495, + 567 + ], + "type": "text", + "content": "4.2. Multi-view Consistency Supervision" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "text", + "content": "We propose a novel multi-view supervision to force the baseline network to learn to be 3D consistent. To simplify notation, we ignore the batch dimension and fixed camera intrinsic matrix. For every training iteration, we randomly sample " + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "text", + "content": " image and landmark pairs " + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\{V,\\mathrm{L}\\}_{1,\\dots,N}" + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{D}}" + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "text", + "content": " image and landmark pairs " + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\{I,\\mathrm{L}\\}_{1,\\dots,M}" + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "text", + "content": " from initial dataset " + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "inline_equation", + "content": "\\mathcal{D}^*" + }, + { + "bbox": [ + 304, + 573, + 545, + 656 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 658, + 545, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 658, + 545, + 684 + ], + "spans": [ + { + "bbox": [ + 304, + 658, + 545, + 684 + ], + "type": "text", + "content": "We pass " + }, + { + "bbox": [ + 304, + 658, + 545, + 684 + ], + "type": "inline_equation", + "content": "V_{1,\\dots,N}" + }, + { + "bbox": [ + 304, + 658, + 545, + 684 + ], + "type": "text", + "content": " to the baseline network " + }, + { + "bbox": [ + 304, + 658, + 545, + 684 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 658, + 545, + 684 + ], + "type": "text", + "content": " to obtain predicted landmarks " + }, + { + "bbox": [ + 304, + 658, + 545, + 684 + ], + "type": "inline_equation", + "content": "\\hat{\\mathrm{L}}_{1,\\dots,N}" + }, + { + "bbox": [ + 304, + 658, + 545, + 684 + ], + "type": "text", + "content": " which are shown with green" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "inline_equation", + "content": "^{*}\\mathcal{D}" + }, + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "text", + "content": " is DAD-3DHeads dataset when training DAD-3DNet and is AFLW2000-3D when training 3DDFA." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12750" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 70, + 276, + 201 + ], + "blocks": [ + { + "bbox": [ + 62, + 70, + 276, + 201 + ], + "lines": [ + { + "bbox": [ + 62, + 70, + 276, + 201 + ], + "spans": [ + { + "bbox": [ + 62, + 70, + 276, + 201 + ], + "type": "image", + "image_path": "0384782a59611be0512f68c552e09ccf4cbd403b35b27bc352572b02ffebd08f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "lines": [ + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "spans": [ + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "text", + "content": "Figure 4. Multi-view Consistency Supervision. Predicted landmarks " + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{L}}_{1,\\dots,N}" + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "text", + "content": ", estimated 3D landmark " + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "inline_equation", + "content": "\\dot{\\mathbf{L}}" + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "text", + "content": ", projected landmarks " + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{L}}_{1,\\dots,Q}" + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "text", + "content": ", and ground truth landmarks " + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "text", + "content": " are denoted as green, blue, red, and yellow points respectively. The processes of calculating 3D landmark " + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "inline_equation", + "content": "\\dot{\\mathbf{L}}" + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "text", + "content": " and the projection procedure are shown as light blue and pink arrows, respectively. " + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{Self - Cons}}" + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{Multiview}}" + }, + { + "bbox": [ + 46, + 208, + 289, + 286 + ], + "type": "text", + "content": " are represented as red and light green lines, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "spans": [ + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "text", + "content": "points in Fig. 4. We then randomly select " + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "text", + "content": " predicted landmarks " + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "inline_equation", + "content": "\\hat{\\mathrm{L}}_{1,\\dots,P} \\in \\mathbb{R}^{P \\times 68 \\times 2}" + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "inline_equation", + "content": "\\hat{\\mathrm{L}}_{1,\\dots,N}" + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "text", + "content": " to calculate the \"canonical\" 3D landmark " + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "inline_equation", + "content": "\\dot{\\mathrm{L}} \\in \\mathbb{R}^{68 \\times 3}" + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "text", + "content": ", as shown by the blue point in Fig. 4. We calculate each keypoint of the \"canonical\" 3D landmark " + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "inline_equation", + "content": "\\dot{\\mathrm{L}}^{(k)} \\in \\mathbb{R}^3, 1 \\leq k \\leq 68" + }, + { + "bbox": [ + 46, + 292, + 289, + 365 + ], + "type": "text", + "content": " through Direct Linear Transformation (DLT) [16, 25], as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 87, + 375, + 287, + 392 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 375, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 87, + 375, + 287, + 392 + ], + "type": "interline_equation", + "content": "\\mu_ {p} = \\mathbb {M} _ {p} [ 0,: ] - \\mathbb {M} _ {p} [ 2,: ] \\cdot \\hat {\\mathrm {L}} _ {p} ^ {k} [ 0 ] \\in \\mathbb {R} ^ {4}, \\qquad (2)", + "image_path": "cca6ae62598893a8a5c9c908b009e4000b492679bb38344022bb828b389b8af4.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 88, + 396, + 287, + 414 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 396, + 287, + 414 + ], + "spans": [ + { + "bbox": [ + 88, + 396, + 287, + 414 + ], + "type": "interline_equation", + "content": "v _ {p} = \\mathbb {M} _ {p} [ 1,: ] - \\mathbb {M} _ {p} [ 2,: ] \\cdot \\hat {\\mathrm {L}} _ {p} ^ {k} [ 1 ] \\in \\mathbb {R} ^ {4}, \\qquad (3)", + "image_path": "613ed6e2bace32a3bd3c4906d1805d582386667b1e997004b657e32ece2574ad.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 420, + 287, + 436 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 420, + 287, + 436 + ], + "spans": [ + { + "bbox": [ + 53, + 420, + 287, + 436 + ], + "type": "interline_equation", + "content": "\\mathbf {A} = \\left[ \\mu_ {1} \\mid \\mu_ {2} \\mid \\dots \\mid \\mu_ {p} \\mid v _ {1} \\mid v _ {2} \\mid \\dots \\mid v _ {p} \\right] ^ {T} \\in \\mathbb {R} ^ {2 P \\times 4}, (4)", + "image_path": "44603bff39aea5c954c8e41eae980a845ba50a69b8c505507f8a7d371bdca2d6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 446, + 287, + 469 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 446, + 287, + 469 + ], + "spans": [ + { + "bbox": [ + 54, + 446, + 287, + 469 + ], + "type": "interline_equation", + "content": "\\dot {\\mathbf {L}} ^ {(k)} = \\left(\\mathbf {A} [:,: 3 ] ^ {T} \\quad \\mathbf {A} [:,: 3 ]\\right) ^ {- 1} \\mathbf {A} [:,: 3 ] ^ {T} (- \\mathbf {A} [:,: 3 ]), \\tag {5}", + "image_path": "40e77f9d36a52705df40edde232acfdbe0fad4c96c003671ae8fb26b5c1df49a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "spans": [ + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "p, 1 \\leq p \\leq P" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": ", is the index of views, and " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\mathbb{M}_{1,\\dots,P}" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": " are the corresponding camera extrinsic matrices which are pre-defined for view synthesis during volume rendering (see Sec.3). Moreover, " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\mathbb{M}_p[i,:]" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": " indicates the i-th row of " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\mathbb{M}_p" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{A}(:,i]" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": " indicates columns 0 to " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "i - 1" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{A}(:,i]" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": " indicates the " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": "-th column of " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": ". By Eq. 2 and Eq. 3, we first calculate the projection constraints for " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\dot{\\mathrm{L}}_{(k)}" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\mu_p[:3] \\cdot \\dot{\\mathrm{L}}^{(k)} + \\mu_p[3] = 0" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": ", where ‘’ indicates the dot product. Then we stack all of the constraints into " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{A} \\in \\mathbb{R}^{2P \\times 4}" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": " by Eq. 4. At last, we compute " + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "inline_equation", + "content": "\\dot{\\mathrm{L}}^{(k)}" + }, + { + "bbox": [ + 46, + 476, + 287, + 609 + ], + "type": "text", + "content": " with a least square approach (Eq. 5)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 609, + 288, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 609, + 288, + 658 + ], + "spans": [ + { + "bbox": [ + 46, + 609, + 288, + 658 + ], + "type": "text", + "content": "After obtaining the \"canonical\" 3D landmark " + }, + { + "bbox": [ + 46, + 609, + 288, + 658 + ], + "type": "inline_equation", + "content": "\\dot{\\mathrm{L}}" + }, + { + "bbox": [ + 46, + 609, + 288, + 658 + ], + "type": "text", + "content": ", we project it onto the image planes of rest of " + }, + { + "bbox": [ + 46, + 609, + 288, + 658 + ], + "type": "inline_equation", + "content": "Q = N - P" + }, + { + "bbox": [ + 46, + 609, + 288, + 658 + ], + "type": "text", + "content": " views to obtain the projected landmark " + }, + { + "bbox": [ + 46, + 609, + 288, + 658 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathrm{L}}_{1,\\dots,Q}" + }, + { + "bbox": [ + 46, + 609, + 288, + 658 + ], + "type": "text", + "content": ", shown as red points in Fig. 4, by the following equations:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 88, + 666, + 287, + 682 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 666, + 287, + 682 + ], + "spans": [ + { + "bbox": [ + 88, + 666, + 287, + 682 + ], + "type": "interline_equation", + "content": "s = \\mathbb {M} _ {q} [:,: 3 ] \\dot {\\mathrm {L}} ^ {(k)} + \\mathbb {M} _ {q} [:,: 3 ] \\in \\mathbb {R} ^ {3 \\times 1}, \\tag {6}", + "image_path": "b78dab2b51880350f13631d6eaef455879f376f8c6fb937bbe3bc7cbbfd52109.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 689, + 287, + 718 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 689, + 287, + 718 + ], + "spans": [ + { + "bbox": [ + 108, + 689, + 287, + 718 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathrm {L}} _ {q} ^ {(k)} = \\left[ \\begin{array}{c} s [ 0 ] / s [ 2 ] \\\\ s [ 1 ] / s [ 2 ] \\end{array} \\right] \\in \\mathbb {R} ^ {2 \\times 1}, \\tag {7}", + "image_path": "1c9a998e3d8071d9d3c6d7b1bd267950d92a5c9884d2618e3a1e57031ea50cfa.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "type": "text", + "content": "where, in our case, " + }, + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "type": "inline_equation", + "content": "1 \\leq q \\leq Q" + }, + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "type": "text", + "content": ". Eq. 6 transforms 3D landmark from \"canonical\" space to the camera space of view " + }, + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 306, + 72, + 545, + 108 + ], + "type": "text", + "content": ", and Eq. 7 transforms it from camera space to image space." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 109, + 547, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 109, + 547, + 181 + ], + "spans": [ + { + "bbox": [ + 305, + 109, + 547, + 181 + ], + "type": "text", + "content": "Self-Projection Consistency Loss. Since all " + }, + { + "bbox": [ + 305, + 109, + 547, + 181 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 305, + 109, + 547, + 181 + ], + "type": "text", + "content": " views are sampled from one NeRF with different camera views, the predicted landmarks " + }, + { + "bbox": [ + 305, + 109, + 547, + 181 + ], + "type": "inline_equation", + "content": "\\hat{\\mathrm{L}}_{1,\\dots,Q}" + }, + { + "bbox": [ + 305, + 109, + 547, + 181 + ], + "type": "text", + "content": " and the projected landmarks " + }, + { + "bbox": [ + 305, + 109, + 547, + 181 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathrm{L}}_{1,\\dots,Q}" + }, + { + "bbox": [ + 305, + 109, + 547, + 181 + ], + "type": "text", + "content": " should be consistent. Therefore, we propose to minimize the error between the predicted and projected landmarks as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 361, + 190, + 545, + 223 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 190, + 545, + 223 + ], + "spans": [ + { + "bbox": [ + 361, + 190, + 545, + 223 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {S e l f - C o n s}} = \\sum_ {q = 1} ^ {Q} \\| \\hat {\\mathrm {L}} _ {q} - \\tilde {\\mathrm {L}} _ {q} \\| _ {1}. \\tag {8}", + "image_path": "36e85a13c1f5fcb38ef4e515632dd84442d04d16c90405b8327991dac5c23566.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 234, + 545, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 234, + 545, + 283 + ], + "spans": [ + { + "bbox": [ + 305, + 234, + 545, + 283 + ], + "type": "text", + "content": "Mesh Consistency Loss* Besides the self-projection consistency, all the " + }, + { + "bbox": [ + 305, + 234, + 545, + 283 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 305, + 234, + 545, + 283 + ], + "type": "text", + "content": " views also share one mesh topology in the canonical space. Therefore, we apply a mesh consistency loss in canonical space calculated by:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 359, + 292, + 545, + 326 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 292, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 359, + 292, + 545, + 326 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {M e s h - C o n s}} = \\sum_ {n = 1} ^ {N} \\| \\hat {\\mathrm {M}} _ {n} - \\dot {\\mathrm {M}} \\| _ {2}, \\tag {9}", + "image_path": "bc6377396fadde1407fb2d05d7b9407f601f13f3da79a4cd3d6ef4b372a1891c.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 331, + 545, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 545, + 367 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 545, + 367 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 331, + 545, + 367 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}}_n" + }, + { + "bbox": [ + 304, + 331, + 545, + 367 + ], + "type": "text", + "content": " is the predicted mesh of view " + }, + { + "bbox": [ + 304, + 331, + 545, + 367 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 331, + 545, + 367 + ], + "type": "text", + "content": " in the canonical space, and " + }, + { + "bbox": [ + 304, + 331, + 545, + 367 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{M}}" + }, + { + "bbox": [ + 304, + 331, + 545, + 367 + ], + "type": "text", + "content": " is the ground truth mesh of the original reference image." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 369, + 545, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 369, + 545, + 428 + ], + "spans": [ + { + "bbox": [ + 305, + 369, + 545, + 428 + ], + "type": "text", + "content": "Multiview Landmark Loss. We also minimize the distance between the predicted 2D facial landmarks and the corresponding multi-view ground truth landmarks we obtained in Sec. 3, which are denoted as yellow points in Fig. 4. The loss can be formulated as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 361, + 432, + 545, + 465 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 432, + 545, + 465 + ], + "spans": [ + { + "bbox": [ + 361, + 432, + 545, + 465 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {M u l t i v e w}} = \\sum_ {q = 1} ^ {N} \\| \\hat {\\mathrm {L}} _ {q} - \\mathrm {L} _ {q} \\| _ {1}. \\tag {10}", + "image_path": "ac32f8cc318d89dde8c6814c5923ecb17665ab15766a2d5f4d471b7f4e2f94c7.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 469, + 545, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 469, + 545, + 518 + ], + "spans": [ + { + "bbox": [ + 305, + 469, + 545, + 518 + ], + "type": "text", + "content": "We also incorporate the original loss of the baseline method computed with the image and landmark pairs " + }, + { + "bbox": [ + 305, + 469, + 545, + 518 + ], + "type": "inline_equation", + "content": "\\{I,L\\}_{1,\\dots ,M}" + }, + { + "bbox": [ + 305, + 469, + 545, + 518 + ], + "type": "text", + "content": " from dataset " + }, + { + "bbox": [ + 305, + 469, + 545, + 518 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 305, + 469, + 545, + 518 + ], + "type": "text", + "content": " to stabilize our 3D-aware training. The overall loss is:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 528, + 545, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 528, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 309, + 528, + 545, + 552 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\lambda_ {1} \\mathcal {L} _ {\\text {S e l f - C o n s}} + \\lambda_ {2} \\mathcal {L} _ {\\text {M e s h - C o n s}} + \\lambda_ {3} \\mathcal {L} _ {\\text {M u l t i v i e w}} + \\mathcal {L} _ {\\text {o r i g i n a l}}, \\tag {11}", + "image_path": "395b083fc36248dc0aa3bf9e4b1e86357427f6bb6241d99229c4da7189cee9c7.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 552, + 545, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 552, + 545, + 576 + ], + "spans": [ + { + "bbox": [ + 305, + 552, + 545, + 576 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 552, + 545, + 576 + ], + "type": "inline_equation", + "content": "\\lambda_{1,2,3}" + }, + { + "bbox": [ + 305, + 552, + 545, + 576 + ], + "type": "text", + "content": " are hyper parameters that control the contribution of each components. We set " + }, + { + "bbox": [ + 305, + 552, + 545, + 576 + ], + "type": "inline_equation", + "content": "\\lambda_{1,2,3}" + }, + { + "bbox": [ + 305, + 552, + 545, + 576 + ], + "type": "text", + "content": " to 0.1 empirically." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 577, + 545, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 577, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 304, + 577, + 545, + 685 + ], + "type": "text", + "content": "Note that our training is a plug-in module and can be incorporated into any existing facial landmark detector easily. For different pretrained models, we just need to change " + }, + { + "bbox": [ + 304, + 577, + 545, + 685 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{original}}" + }, + { + "bbox": [ + 304, + 577, + 545, + 685 + ], + "type": "text", + "content": " while the other novel loss components calculated on our balanced synthetic dataset " + }, + { + "bbox": [ + 304, + 577, + 545, + 685 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 304, + 577, + 545, + 685 + ], + "type": "text", + "content": " can be applied directly. We show this plug-in capability on top of different baseline methods (e.g., DAD-3DNet [34] and 3DDFA [22]), and demonstrate that our 3D-aware training indeed improves their performance (see Sec. 5)." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "text", + "content": "*We can apply it depending on whether the baseline network outputs mesh. In our case, the 3DDFA [22] and DAD-3DNet [34] both do." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12751" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 114, + 294, + 215 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 288, + 103 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 288, + 103 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 288, + 103 + ], + "type": "text", + "content": "Table 1. Facial landmark detection result (NME) on DAD-3DHeads [34], FaceScape [65], and MultiFace [64]. Lower values mean better results." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 114, + 294, + 215 + ], + "lines": [ + { + "bbox": [ + 47, + 114, + 294, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 114, + 294, + 215 + ], + "type": "table", + "html": "
MethodDAD-3DHeadsFaceScapeMultiFace
FAN [6]7.14116.7416.143
Dlib [31]10.84129.43118.205
3DDFA-V2 [23]2.9266.8535.942
3DDFA [22]4.0827.9888.121
3DDFA+3.7847.4257.305
DAD-3DNet [34]2.5996.6815.786
DAD-3DNet+2.5036.0505.480
", + "image_path": "3192f680ce4272f7693275b14bd050c2eda3ba4d5d296c85ec22eaa960b48365.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 223, + 128, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 223, + 128, + 236 + ], + "spans": [ + { + "bbox": [ + 47, + 223, + 128, + 236 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 243, + 174, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 243, + 174, + 256 + ], + "spans": [ + { + "bbox": [ + 47, + 243, + 174, + 256 + ], + "type": "text", + "content": "5.1. Experimental Settings" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 261, + 287, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 261, + 287, + 344 + ], + "spans": [ + { + "bbox": [ + 46, + 261, + 287, + 344 + ], + "type": "text", + "content": "Training Details. We implement our algorithm in Pytorch and adopt ADAM to optimize the baseline networks. We run our 3D-aware training for 100 epochs with a batch size of 4, and a learning rate of " + }, + { + "bbox": [ + 46, + 261, + 287, + 344 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 46, + 261, + 287, + 344 + ], + "type": "text", + "content": " on each baseline network. As to computational cost, fine-tuning DAD-3DNet take about and 16.25 hours on 4 NVIDIA RTX A6000 GPUs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 345, + 287, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 345, + 287, + 369 + ], + "spans": [ + { + "bbox": [ + 47, + 345, + 287, + 369 + ], + "type": "text", + "content": "Dataset. Besides DAD-3DHeads, we use two additional datasets to conduct the evaluations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 58, + 376, + 287, + 550 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 58, + 376, + 287, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 376, + 287, + 449 + ], + "spans": [ + { + "bbox": [ + 58, + 376, + 287, + 449 + ], + "type": "text", + "content": "- DAD-3DHeads [34] is the state-of-the-art in-the-wild 3D head dataset, which contains dense, accurate annotations, and diverse facial appearances. It consists of 44,898 images collected from various sources (37,840 in the training set, 4,312 in the validation set, and 2,746 in the test set)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 58, + 457, + 287, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 457, + 287, + 506 + ], + "spans": [ + { + "bbox": [ + 58, + 457, + 287, + 506 + ], + "type": "text", + "content": "- FaceScape [65] is a large-scale high-quality lab-controlled 3D face dataset, which contains 18,760 examples, captured from 938 subjects and each with 20 specific expressions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 58, + 514, + 287, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 514, + 287, + 550 + ], + "spans": [ + { + "bbox": [ + 58, + 514, + 287, + 550 + ], + "type": "text", + "content": "- MultiFace [64] is a new multi-view, high-resolution human face dataset collected from 13 identities for neural face rendering." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 46, + 557, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 557, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 557, + 287, + 652 + ], + "type": "text", + "content": "Training and Testing Split. In all the experiments, we only refine the baseline models with the training set of our DAD-3DHeads-Syn and their original training dataset. We use the test sets of DAD-3DHeads-Syn and DAD-3DHeads [34], and use the full datasets of FaceScape [65] and MultiFace [63] for performance evaluation. All the comparison methods have not been trained on the split test sets." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 653, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 714 + ], + "type": "text", + "content": "Evaluation Metrics. We evaluate the facial landmark distance by calculating the Normalized Mean Error (NME). We normalize the landmark error by dividing its image resolution instead of the eye distance [55], since all the test images are aligned with offline tools. We calculate the head" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": "pose error by the absolute distance of the Euler angle values." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 105, + 441, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 105, + 441, + 118 + ], + "spans": [ + { + "bbox": [ + 306, + 105, + 441, + 118 + ], + "type": "text", + "content": "5.2. Quantitative Evaluation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "spans": [ + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "text", + "content": "Landmark Detection Results. The quantitative landmark detection results on DAD-3DHeads [34], FaceScape [65], and MultiFace [64] are shown in Tab. 1. We can find that the DAD-3DNet+ refined by our 3D-aware multi-view consistency training achieves the best performance on all three datasets. Moreover, according to the results of 3DDFA [22], 3DDFA+, DAD-3DNet [34], and DAD-3DNet+, we find that after refinement, the new models (3DDFA+ and DAD-3DNet+) achieve much better results than the baseline models. For example, the detection error of DAD-3DNet [34] drops 0.631 and 0.306, a " + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "text", + "content": " improvement, on FaceScape and MultiFace datasets, respectively. Similarly, we improve the 3DDFA [22] by 0.298 (" + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "inline_equation", + "content": "7\\%" + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "text", + "content": "), 0.563 (" + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "inline_equation", + "content": "7\\%" + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "text", + "content": "), and 0.816 (" + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 304, + 123, + 545, + 409 + ], + "type": "text", + "content": ") on DAD-3DHeads, FaceScape and MultiFace datasets, respectively. We attribute the improvement to our proposed 3D aware multi-view training. One interesting phenomenon is that all the methods perform better on DAD-3DHeads dataset than the other two lab-captured datasets. We attribute this to the extreme head pose and challenging facial expressions in the other two datasets. We plot the head pose distribution of DAD-3DHeads (see supplementary materials) and find that distribution of head pose is not as uniform as the other two lab-controlled datasets." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 411, + 545, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 411, + 545, + 543 + ], + "spans": [ + { + "bbox": [ + 304, + 411, + 545, + 543 + ], + "type": "text", + "content": "Head Pose Estimation Results. Tab. 2 shows the head pose estimation error on DAD-3DHeads [34] and FaceScape [65]. Our DAD-3DNet+ achieves best performance in most metrics. Similar to the landmark results, we can also conclude that head pose detection accuracy of the baseline methods (3DDFA and DAD-3DNet) is improved by our 3D aware multi-view consistency (3DDFA+ and DAD-3DNet+). For example, after refinement, DAD-3DNet+ achieves " + }, + { + "bbox": [ + 304, + 411, + 545, + 543 + ], + "type": "inline_equation", + "content": "11.9\\%" + }, + { + "bbox": [ + 304, + 411, + 545, + 543 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 411, + 545, + 543 + ], + "type": "inline_equation", + "content": "18.8\\%" + }, + { + "bbox": [ + 304, + 411, + 545, + 543 + ], + "type": "text", + "content": " performance boosts in overall head pose error on DAD-3DHeads and FaceScape dataset, respectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 551, + 435, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 551, + 435, + 563 + ], + "spans": [ + { + "bbox": [ + 306, + 551, + 435, + 563 + ], + "type": "text", + "content": "5.3. Qualitative Evaluation" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": "We fist show visual comparisons on images randomly sampled from DAD-3DHeads test set [34] in Fig. 5. The landmark predicted by our DAD-3DNet+ model fits the individual's face tighter than the other predictions. Furthermore, by comparing the third (3DDFA [22]) and forth columns (ours), we can see that refining model " + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "inline_equation", + "content": "(3\\mathrm{DDFA}+)" + }, + { + "bbox": [ + 304, + 570, + 545, + 713 + ], + "type": "text", + "content": " improves the landmark accuracy dramatically. Similar visual improvements can be found in sixth (DAD-3DNet) and seventh (DAD-3DNet+) columns as well. Comparing the sixth and seventh column, we can see that the refinement training drags and rotates the landmark in 3D space to better fit it to the individual's face surface. We attribute this abl" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "12752" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 62, + 520, + 255 + ], + "blocks": [ + { + "bbox": [ + 73, + 62, + 520, + 255 + ], + "lines": [ + { + "bbox": [ + 73, + 62, + 520, + 255 + ], + "spans": [ + { + "bbox": [ + 73, + 62, + 520, + 255 + ], + "type": "image", + "image_path": "7aac1a8c3b9941c1668256c37762e4d1d7cf20e416080d39a9115a187a821aa4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 265, + 548, + 299 + ], + "lines": [ + { + "bbox": [ + 46, + 265, + 548, + 299 + ], + "spans": [ + { + "bbox": [ + 46, + 265, + 548, + 299 + ], + "type": "text", + "content": "Figure 5. The visual results of Dlib [31], FAN [5], 3DDFA [22], our refined 3DDFA+, 3DDFA-V2, DAD-3DNet [34], and our refined DAD-3DNet+ on images randomly sampled from DAD-3DHeads [34] testing set. We show the enlarged error region (while box) in the middle row." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 113, + 325, + 480, + 446 + ], + "blocks": [ + { + "bbox": [ + 59, + 304, + 533, + 316 + ], + "lines": [ + { + "bbox": [ + 59, + 304, + 533, + 316 + ], + "spans": [ + { + "bbox": [ + 59, + 304, + 533, + 316 + ], + "type": "text", + "content": "Table 2. Head pose estimation results (head pose error) on DAD-3DHeads [34], FaceScape [65]. Lower values mean better results." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 113, + 325, + 480, + 446 + ], + "lines": [ + { + "bbox": [ + 113, + 325, + 480, + 446 + ], + "spans": [ + { + "bbox": [ + 113, + 325, + 480, + 446 + ], + "type": "table", + "html": "
DAD-3DHeadsFaceScape
PitchRollYawOverallPitchRollYawOverall
FAN [5]9.7655.3766.3907.1778.7744.8956.5566.742
Dlib [31]13.35211.79914.65413.26817.86112.66319.54816.691
3DDFA-V2 [23]7.9014.9896.0886.32613.7419.71811.35311.604
3DDFA [22]9.8957.9778.9968.95620.78918.14519.69219.752
3DDFA+9.1956.7928.6928.22620.99616.42619.05418.826
DAD-3DNet [34]8.2744.6669.2067.38215.8519.67618.34614.624
DAD-3DNet+7.7004.2747.5286.50014.4667.24713.87611.863
", + "image_path": "dbeafcf7401a625fbde6bffab9671d2fac821f393c656c5087e752eca4ed2e7e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 466, + 288, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 466, + 288, + 502 + ], + "spans": [ + { + "bbox": [ + 46, + 466, + 288, + 502 + ], + "type": "text", + "content": "ity to our 3D-aware multi-view consistency training, which lets the refined model gain the better sense in 3D space, and therefore, improve the landmark detection results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 503, + 287, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 503, + 287, + 611 + ], + "spans": [ + { + "bbox": [ + 46, + 503, + 287, + 611 + ], + "type": "text", + "content": "To further validate the improvement gained by the proposed 3D-aware multi-view consistency training, we show the visual results (Fig. 6) of 3DDFA [22], our refined 3DDFA+, DAD-3DNet [34], and our refined DAD-3DNet+ on images sampled from four different test sets. We can find that our proposed refinement improves the landmark detection results in the eye, mouth, and face contour regions, which usually contain more appearance dynamics than the other areas." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 622, + 239, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 622, + 239, + 635 + ], + "spans": [ + { + "bbox": [ + 47, + 622, + 239, + 635 + ], + "type": "text", + "content": "5.4. Performance Improvement Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 715 + ], + "type": "text", + "content": "To systematically understand the source of improvement after refining the baseline methods (DAD-3DNet [34] and 3DDFA [22]) with our proposed 3D-aware multi-view consistency training, we further calculate and plot the landmark and head pose error improvements on DAD-3DHeads [34] (see Fig. 7). Instead of calculating the overall improved" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 466, + 547, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 466, + 547, + 706 + ], + "spans": [ + { + "bbox": [ + 304, + 466, + 547, + 706 + ], + "type": "text", + "content": "error score, we split all the testing images into different groups according to their head pose value and calculate the improved error score within each group. We can find that the improvement by our training gets more obvious as the head pose gets more challenging. For example, the landmark error improvement (Fig. 7 upper section) using our method built on top of 3DDFA [22] increases from 0.12 to 0.71. Similarly, the head pose estimation error (Fig. 7 lower section) improvement using our method built on top of DAD-3DNet [34] increases from 0.02 to 2.7. We also show the detection result visualization in Fig. 8. We can see that from left to right, as the head pose increases, the error of the DAD-3DNet+ (second row) is more stable than the error (first row) of the DAD-3DNet. Base on this trend, we conclude that our proposed 3D-aware multi-view consistency training provides a more significant improvement over the baselines on images with larger head pose. This verifies our hypothesis that multi-view consistency training enables the network to learn 3D-aware information, which benefits the detection results on images with large head pose." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "12753" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 69, + 286, + 293 + ], + "blocks": [ + { + "bbox": [ + 49, + 69, + 286, + 293 + ], + "lines": [ + { + "bbox": [ + 49, + 69, + 286, + 293 + ], + "spans": [ + { + "bbox": [ + 49, + 69, + 286, + 293 + ], + "type": "image", + "image_path": "f2ac3d6ce01550466943c2f9f8af1f2f6a4d21af6e231534947de31734b3a21c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 302, + 287, + 347 + ], + "lines": [ + { + "bbox": [ + 46, + 302, + 287, + 347 + ], + "spans": [ + { + "bbox": [ + 46, + 302, + 287, + 347 + ], + "type": "text", + "content": "Figure 6. The visual comparisons between baseline methods and the refined methods on four testing sets. The left column and upper row list the dataset and method names, respectively. " + }, + { + "bbox": [ + 46, + 302, + 287, + 347 + ], + "type": "inline_equation", + "content": "^+" + }, + { + "bbox": [ + 46, + 302, + 287, + 347 + ], + "type": "text", + "content": " denotes the model that has been refined by our 3D-aware training." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 61, + 355, + 269, + 456 + ], + "blocks": [ + { + "bbox": [ + 61, + 355, + 269, + 456 + ], + "lines": [ + { + "bbox": [ + 61, + 355, + 269, + 456 + ], + "spans": [ + { + "bbox": [ + 61, + 355, + 269, + 456 + ], + "type": "image", + "image_path": "eb3cbcd9dc8c8a1441823c5b341c0f8f352ccce72c74df1480e3a0358cf7b24d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 61, + 457, + 269, + 557 + ], + "blocks": [ + { + "bbox": [ + 61, + 457, + 269, + 557 + ], + "lines": [ + { + "bbox": [ + 61, + 457, + 269, + 557 + ], + "spans": [ + { + "bbox": [ + 61, + 457, + 269, + 557 + ], + "type": "image", + "image_path": "26af171f93b204cdcd7c84063eda237916b5601d8ebf9d2f080cc61ffedc438e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 563, + 287, + 618 + ], + "lines": [ + { + "bbox": [ + 46, + 563, + 287, + 618 + ], + "spans": [ + { + "bbox": [ + 46, + 563, + 287, + 618 + ], + "type": "text", + "content": "Figure 7. The landmark (top) and head pose (bottom) error improvement over DAD-3DNet [34] and 3DDFA [22] on images from different head pose ranges. The solid and dotted lines indicate DAD-3DNet [34] vs. DAD-3DNet+ (ours) and 3DDFA [22] vs. 3DDFA+ (ours)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 634, + 140, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 634, + 140, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 634, + 140, + 647 + ], + "type": "text", + "content": "5.5. Ablation Study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": "We conduct ablation study on FaceScape [65] to verify the importance of main components of our novel design. As shown in Tab. 3, we calculate NME of landmark and MAE of pose estimation in these ablation experiments. Based on these numbers, we can see the performance degrades" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 318, + 69, + 535, + 175 + ], + "blocks": [ + { + "bbox": [ + 318, + 69, + 535, + 175 + ], + "lines": [ + { + "bbox": [ + 318, + 69, + 535, + 175 + ], + "spans": [ + { + "bbox": [ + 318, + 69, + 535, + 175 + ], + "type": "image", + "image_path": "af87e6bdb2c2cb2d76c4ffc07ba3a5063e33a0cefd29dff5abab44fd06970664.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 182, + 545, + 238 + ], + "lines": [ + { + "bbox": [ + 305, + 182, + 545, + 238 + ], + "spans": [ + { + "bbox": [ + 305, + 182, + 545, + 238 + ], + "type": "text", + "content": "Figure 8. The error visualization of DAD-3DNet [34] and our DAD-3DNet+ on MultiFace [64] dataset. The white and green dots are the ground truth and predicted landmarks, respectively. We use the red line to show the error distance. From left to right, the head pose increases gradually." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 319, + 277, + 529, + 368 + ], + "blocks": [ + { + "bbox": [ + 306, + 246, + 545, + 268 + ], + "lines": [ + { + "bbox": [ + 306, + 246, + 545, + 268 + ], + "spans": [ + { + "bbox": [ + 306, + 246, + 545, + 268 + ], + "type": "text", + "content": "Table 3. Ablation Study on FaceScape [65]. The top 2 numbers are shown in bold." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 319, + 277, + 529, + 368 + ], + "lines": [ + { + "bbox": [ + 319, + 277, + 529, + 368 + ], + "spans": [ + { + "bbox": [ + 319, + 277, + 529, + 368 + ], + "type": "table", + "html": "
ComponentNME ↓Pose ↓
1full model (P=4)6.05011.863
2w/o LMesh-Cons6.16812.327
3w/o LSelf-Cons6.54113.623
4full model (P=8)6.04811.923
5full model (P=16)6.09811.902
6full model (P=32)6.13911.912
", + "image_path": "3d511c2eb7e9abe6ec1581aecd39c58ced7d79d7f0b3e753785a7674c09a6a1e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 384, + 545, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 384, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 305, + 384, + 545, + 456 + ], + "type": "text", + "content": "drastically when we remove " + }, + { + "bbox": [ + 305, + 384, + 545, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{Self - Cons}}" + }, + { + "bbox": [ + 305, + 384, + 545, + 456 + ], + "type": "text", + "content": ". Moreover, removing " + }, + { + "bbox": [ + 305, + 384, + 545, + 456 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{Mesh - Cons}}" + }, + { + "bbox": [ + 305, + 384, + 545, + 456 + ], + "type": "text", + "content": " negatively impacts the results, demonstrating its importance. Moreover, estimating the 3D landmarks in the world space using fewer views leads to better results. This is a significant advantage as it makes our fine-tuning process more efficient." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 484, + 378, + 497 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 484, + 378, + 497 + ], + "spans": [ + { + "bbox": [ + 306, + 484, + 378, + 497 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 713 + ], + "type": "text", + "content": "We propose 3D-aware multi-view consistency training, a new framework for improving deep-learning base landmark detection algorithms. Through a set of novel loss functions, we force the network to produce landmarks that are 3D consistent. We additionally introduce a novel dataset simulation pipeline to combine the merits of lab-controlled captures and in-the-wild collected images. The model refined by our method outperforms previous approaches in terms of landmark detection accuracy and head pose estimation accuracy. Admittedly, our work has some limitations. For example, our proposed training relies on the performance of the baseline method. If the pretrained baseline yield poor initial predictions, our DLT would fail to estimate reasonable canonical 3D landmark, affecting the performance of the proposed self-projection consistency loss. Investigating ways to reduce the reliance on the accuracy of the baseline methods would be an interesting future research." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12754" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 146 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 146 + ], + "type": "text", + "content": "[1] Vitor Albiero, Xingyu Chen, Xi Yin, Guan Pang, and Tal Hassner. img2pose: Face alignment and detection via 6dof, face pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7617-7627, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 148, + 288, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 288, + 192 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 288, + 192 + ], + "type": "text", + "content": "[2] Slawomir Bak, Peter Carr, and Jean-Francois Lalonde. Domain adaptation through synthesis for unsupervised person re-identification. In Proceedings of the European conference on computer vision (ECCV), pages 189–205, 2018." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 194, + 288, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 194, + 288, + 237 + ], + "spans": [ + { + "bbox": [ + 53, + 194, + 288, + 237 + ], + "type": "text", + "content": "[3] Peter N Belhumeur, David W Jacobs, David J Kriegman, and Neeraj Kumar. Localizing parts of faces using a consensus of exemplars. IEEE transactions on pattern analysis and machine intelligence, 35(12):2930-2940, 2013." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 239, + 288, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 288, + 282 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 288, + 282 + ], + "type": "text", + "content": "[4] Adrian Bulat and Georgios Tzimiropoulos. Two-stage convolutional part heatmap regression for the 1st 3d face alignment in the wild (3dfaw) challenge. In European Conference on Computer Vision, pages 616-624. Springer, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 285, + 288, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 285, + 288, + 338 + ], + "spans": [ + { + "bbox": [ + 53, + 285, + 288, + 338 + ], + "type": "text", + "content": "[5] Adrian Bulat and Georgios Tzimiropoulos. Binarized convolutional landmark localizers for human pose estimation and face alignment with limited resources. In Proceedings of the IEEE International Conference on Computer Vision, pages 3706-3714, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 340, + 288, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 340, + 288, + 396 + ], + "spans": [ + { + "bbox": [ + 53, + 340, + 288, + 396 + ], + "type": "text", + "content": "[6] Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In Proceedings of the IEEE International Conference on Computer Vision, pages 1021-1030, 2017." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 397, + 288, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 397, + 288, + 441 + ], + "spans": [ + { + "bbox": [ + 53, + 397, + 288, + 441 + ], + "type": "text", + "content": "[7] Xavier P Burgos-Artizzu, Pietro Perona, and Piotr Dólar. Robust face landmark estimation under occlusion. In Proceedings of the IEEE international conference on computer vision, pages 1513-1520, 2013." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 442, + 288, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 442, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 53, + 442, + 288, + 475 + ], + "type": "text", + "content": "[8] Chen Cao, Yanlin Weng, Stephen Lin, and Kun Zhou. 3d shape regression for real-time facial animation. ACM Transactions on Graphics (TOG), 32(4):1-10, 2013." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 476, + 288, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 476, + 288, + 542 + ], + "spans": [ + { + "bbox": [ + 53, + 476, + 288, + 542 + ], + "type": "text", + "content": "[9] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 544, + 287, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 287, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 287, + 588 + ], + "type": "text", + "content": "[10] Dong Chen, Shaoqing Ren, Yichen Wei, Xudong Cao, and Jian Sun. Joint cascade face detection and alignment. In European conference on computer vision, pages 109-122. Springer, 2014." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 590, + 287, + 644 + ], + "type": "text", + "content": "[11] Lele Chen, Ross K Maddox, Zhiyao Duan, and Chenliang Xu. Hierarchical cross-modal talking face generation with dynamic pixel-wise loss. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7832-7841, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 646, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 646, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 287, + 678 + ], + "type": "text", + "content": "[12] Timothy F Cootes, Gareth J Edwards, and Christopher J Taylor. Active appearance models. In European conference on computer vision, pages 484-498. Springer, 1998." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 713 + ], + "type": "text", + "content": "[13] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In Proceedings of the IEEE con" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 74, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 326, + 74, + 545, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 74, + 545, + 94 + ], + "spans": [ + { + "bbox": [ + 326, + 74, + 545, + 94 + ], + "type": "text", + "content": "ference on computer vision and pattern recognition, pages 5203-5212, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 95, + 547, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 95, + 547, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 547, + 149 + ], + "type": "text", + "content": "[14] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 151, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 151, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 151, + 545, + 194 + ], + "type": "text", + "content": "[15] Xuanyi Dong, Yan Yan, Wanli Ouyang, and Yi Yang. Style aggregated network for facial landmark detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, June 2018." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 195, + 545, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 195, + 545, + 248 + ], + "spans": [ + { + "bbox": [ + 307, + 195, + 545, + 248 + ], + "type": "text", + "content": "[16] Xuanyi Dong, Yi Yang, Shih-En Wei, Xinshuo Weng, Yaser Sheikh, and Shouu-I Yu. Supervision by registration and triangulation for landmark detection. IEEE transactions on pattern analysis and machine intelligence, 43(10):3681-3694, 2020." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 250, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 250, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 307, + 250, + 545, + 293 + ], + "type": "text", + "content": "[17] Pengfei Dou, Shishir K Shah, and Ioannis A Kakadiaris. End-to-end 3d face reconstruction with deep neural networks. In proceedings of the IEEE conference on computer vision and pattern recognition, pages 5908-5917, 2017." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 294, + 545, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 294, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 307, + 294, + 545, + 348 + ], + "type": "text", + "content": "[18] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In Proceedings of the European conference on computer vision (ECCV), pages 534-551, 2018." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 350, + 545, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 350, + 545, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 350, + 545, + 381 + ], + "type": "text", + "content": "[19] Golnaz Ghiasi and Charless C Fowlkes. Occlusion coherence: Detecting and localizing occluded faces. arXiv preprint arXiv:1506.08347, 2015." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 383, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 545, + 415 + ], + "type": "text", + "content": "[20] Ralph Gross, Iain Matthews, Jeffrey Cohn, Takeo Kanade, and Simon Baker. Multi-pie. Image and vision computing, 28(5):807-813, 2010." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 415, + 545, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 415, + 545, + 469 + ], + "spans": [ + { + "bbox": [ + 307, + 415, + 545, + 469 + ], + "type": "text", + "content": "[21] Kuangxiao Gu, Yuqian Zhou, and Thomas Huang. Fnet: Landmark driven fetching and learning network for faithful talking facial animation synthesis. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 10861-10868, 2020." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 471, + 545, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 471, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 307, + 471, + 545, + 492 + ], + "type": "text", + "content": "[22] Jianzhu Guo, Xiangyu Zhu, and Zhen Lei. 3ddfa. https://github.com/cleardusk/3DDFA, 2018." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 494, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 494, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 307, + 494, + 545, + 536 + ], + "type": "text", + "content": "[23] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. In European Conference on Computer Vision, pages 152-168. Springer, 2020." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 537, + 545, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 537, + 545, + 570 + ], + "spans": [ + { + "bbox": [ + 307, + 537, + 545, + 570 + ], + "type": "text", + "content": "[24] Xiaojie Guo, Siyuan Li, Jinke Yu, Jiawan Zhang, Jiayi Ma, Lin Ma, Wei Liu, and Haibin Ling. Pfld: A practical facial landmark detector. arXiv preprint arXiv:1902.10859, 2019." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 571, + 545, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 571, + 545, + 602 + ], + "spans": [ + { + "bbox": [ + 307, + 571, + 545, + 602 + ], + "type": "text", + "content": "[25] Richard Hartley and Andrew Zisserman. Multiple view geometry in computer vision. Cambridge university press, 2003." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 604, + 545, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 604, + 545, + 657 + ], + "spans": [ + { + "bbox": [ + 307, + 604, + 545, + 657 + ], + "type": "text", + "content": "[26] Stefan Hinterstoisser, Vincent Lepetit, Paul Wohlhart, and Kurt Konolige. On pre-trained image features and synthetic images for deep learning. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0-0, 2018." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 712 + ], + "type": "text", + "content": "[27] Xinya Ji, Hang Zhou, Kaisiyuan Wang, Qianyi Wu, Wayne Wu, Feng Xu, and Xun Cao. Eamm: One-shot emotional talking face via audio-based emotion-aware motion model. In ACM SIGGRAPH 2022 Conference Proceedings, SIGGRAPH '22, 2022." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12755" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "text", + "content": "[28] Justin Johnson, Bharath Hariharan, Laurens Van Der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2901–2910, 2017." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 140, + 288, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 140, + 288, + 183 + ], + "spans": [ + { + "bbox": [ + 49, + 140, + 288, + 183 + ], + "type": "text", + "content": "[29] Amin Jourabloo and Xiaoming Liu. Large-pose face alignment via cnn-based dense 3d model fitting. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4188-4196, 2016." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 184, + 287, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 184, + 287, + 227 + ], + "spans": [ + { + "bbox": [ + 48, + 184, + 287, + 227 + ], + "type": "text", + "content": "[30] Ira Kemelmacher-Shlizerman and Ronen Basri. 3d face reconstruction from a single image using a single reference face shape. IEEE transactions on pattern analysis and machine intelligence, 33(2):394-405, 2010." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 228, + 287, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 228, + 287, + 250 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 287, + 250 + ], + "type": "text", + "content": "[31] Davis E. King. Dlib-ml: A machine learning toolkit. Journal of Machine Learning Research, 10:1755-1758, 2009." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 251, + 287, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 251, + 287, + 315 + ], + "spans": [ + { + "bbox": [ + 48, + 251, + 287, + 315 + ], + "type": "text", + "content": "[32] Martin Koestinger, Paul Wohlhart, Peter M Roth, and Horst Bischof. Annotated facial landmarks in the wild: A largescale, real-world database for facial landmark localization. In 2011 IEEE international conference on computer vision workshops (ICCV workshops), pages 2144-2151. IEEE, 2011." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 316, + 287, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 316, + 287, + 360 + ], + "spans": [ + { + "bbox": [ + 48, + 316, + 287, + 360 + ], + "type": "text", + "content": "[33] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6), 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 361, + 288, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 361, + 288, + 426 + ], + "spans": [ + { + "bbox": [ + 48, + 361, + 288, + 426 + ], + "type": "text", + "content": "[34] Tetiana Martyniuk, Orest Kupyn, Yana Kurlyak, Igor Krashenyi, Jiri Matas, and Viktoriya Sharmanska. Dad-3heads: A large-scale dense, accurate and diverse dataset for 3d head alignment from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 20942–20952, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 426, + 288, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 426, + 288, + 482 + ], + "spans": [ + { + "bbox": [ + 48, + 426, + 288, + 482 + ], + "type": "text", + "content": "[35] Nikolaus Mayer, Eddy Ilg, Philipp Fischer, Caner Hazirbas, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. What makes good synthetic training data for learning disparity and optical flow estimation? International Journal of Computer Vision, 126(9):942-960, 2018." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 483, + 287, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 483, + 287, + 536 + ], + "spans": [ + { + "bbox": [ + 48, + 483, + 287, + 536 + ], + "type": "text", + "content": "[36] Kieron Messer, Jiri Matas, Josef Kittler, Juergen Luettin, Gilbert Maitre, et al. Xm2vtsdb: The extended m2vts database. In Second international conference on audio and video-based biometric person authentication, volume 964, pages 965-966. Citeseer, 1999." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 537, + 287, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 537, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 48, + 537, + 287, + 581 + ], + "type": "text", + "content": "[37] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 582, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 582, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 582, + 287, + 624 + ], + "type": "text", + "content": "[38] Erik Murphy-Chutorian and Mohan Manubhai Trivedi. Head pose estimation in computer vision: A survey. IEEE transactions on pattern analysis and machine intelligence, 31(4):607-626, 2008." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 625, + 287, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 691 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 691 + ], + "type": "text", + "content": "[39] P Jonathon Phillips, Patrick J Flynn, Todd Scruggs, Kevin W Bowyer, Jin Chang, Kevin Hoffman, Joe Marques, Jaesik Min, and William Worek. Overview of the face recognition grand challenge. In 2005 IEEE computer society conference on computer vision and pattern recognition (CVPR'05), volume 1, pages 947-954. IEEE, 2005." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 692, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 287, + 714 + ], + "type": "text", + "content": "[40] Shengju Qian, Keqiang Sun, Wayne Wu, Chen Qian, and Jiaya Jia. Aggregation via separation: Boosting facial land" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "mark detector with semi-supervised style translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10153-10163, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 108, + 545, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 108, + 545, + 162 + ], + "spans": [ + { + "bbox": [ + 307, + 108, + 545, + 162 + ], + "type": "text", + "content": "[41] Rajeev Ranjan, Vishal M Patel, and Rama Chellappa. Hyperface: A deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE transactions on pattern analysis and machine intelligence, 41(1):121-135, 2017." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 163, + 545, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 545, + 207 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 545, + 207 + ], + "type": "text", + "content": "[42] Elad Richardson, Matan Sela, Roy Or-El, and Ron Kimmel. Learning detailed face reconstruction from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1259–1268, 2017." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 209, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 209, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 209, + 545, + 251 + ], + "type": "text", + "content": "[43] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Transactions on Graphics (TOG), 42(1):1-13, 2022." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 254, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 254, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 308, + 254, + 545, + 308 + ], + "type": "text", + "content": "[44] Andreas Rossler, Davide Cozzolino, Luisa Verdoliva, Christian Riess, Justus Thies, and Matthias Nießner. Faceforensics: Learning to detect manipulated facial images. In Proceedings of the IEEE/CVF international conference on computer vision, pages 1-11, 2019." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 310, + 545, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 310, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 308, + 310, + 545, + 342 + ], + "type": "text", + "content": "[45] Nataniel Ruiz, Samuel Schulter, and Manmohan Chandraker. Learning to simulate. In International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 344, + 545, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 344, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 308, + 344, + 545, + 399 + ], + "type": "text", + "content": "[46] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 399, + 545, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 545, + 455 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 545, + 455 + ], + "type": "text", + "content": "[47] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 456, + 545, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 456, + 545, + 511 + ], + "spans": [ + { + "bbox": [ + 308, + 456, + 545, + 511 + ], + "type": "text", + "content": "[48] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. A semi-automatic methodology for facial landmark annotation. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 896-903, 2013." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 512, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 512, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 512, + 545, + 567 + ], + "type": "text", + "content": "[49] Jie Shen, Stefanos Zafeiriou, Grigoris G Chrysos, Jean Kossaifi, Georgios Tzimiropoulos, and Maja Pantic. The first facial landmark tracking in-the-wild challenge: Benchmark and results. In Proceedings of the IEEE international conference on computer vision workshops, pages 50-58, 2015." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 568, + 545, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 568, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 308, + 568, + 545, + 623 + ], + "type": "text", + "content": "[50] Ashish Shrivastava, Tomas Pfister, Oncel Tuzel, Joshua Susskind, Wenda Wang, and Russell Webb. Learning from simulated and unsupervised images through adversarial training. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2107-2116, 2017." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 624, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 545, + 669 + ], + "type": "text", + "content": "[51] Linsen Song, Wayne Wu, Chaoyou Fu, Chen Change Loy, and Ran He. Audio-driven dubbing for user generated contents via style-aware semi-parametric synthesis. IEEE Transactions on Circuits and Systems for Video Technology, 2022." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 670, + 545, + 713 + ], + "type": "text", + "content": "[52] Linsen Song, Wayne Wu, Chen Qian, Ran He, and Chen Change Loy. Everybody's talkin': Let me talk as you want. IEEE Transactions on Information Forensics and Security, 17:585-598, 2022." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "12756" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "text", + "content": "[53] Yang Song, Jingwen Zhu, Dawei Li, Andy Wang, and Hairong Qi. Talking face generation by conditional recurrent adversarial network. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19, pages 919–925. International Joint Conferences on Artificial Intelligence Organization, 7 2019." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 140, + 287, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 140, + 287, + 183 + ], + "spans": [ + { + "bbox": [ + 49, + 140, + 287, + 183 + ], + "type": "text", + "content": "[54] Luuk Spreeuwers, Maikel Schils, and Raymond Veldhuis. Towards robust evaluation of face morphing detection. In 2018 26th European Signal Processing Conference (EU-SIPCO), pages 1027-1031. IEEE, 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 184, + 287, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 184, + 287, + 248 + ], + "spans": [ + { + "bbox": [ + 49, + 184, + 287, + 248 + ], + "type": "text", + "content": "[55] Keqiang Sun, Wayne Wu, Tinghao Liu, Shuo Yang, Quan Wang, Qiang Zhou, Zuochang Ye, and Chen Qian. Fab: A robust facial landmark detection framework for motion-blurred videos. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5462-5471, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 250, + 287, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 250, + 287, + 294 + ], + "spans": [ + { + "bbox": [ + 49, + 250, + 287, + 294 + ], + "type": "text", + "content": "[56] Yi Sun, Xiaogang Wang, and Xiaou Tang. Deep convolutional network cascade for facial point detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3476-3483, 2013." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 294, + 287, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 294, + 287, + 360 + ], + "spans": [ + { + "bbox": [ + 49, + 294, + 287, + 360 + ], + "type": "text", + "content": "[57] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6142-6151, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 361, + 287, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 361, + 287, + 415 + ], + "spans": [ + { + "bbox": [ + 49, + 361, + 287, + 415 + ], + "type": "text", + "content": "[58] Justus Thies, Michael Zollhofer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of rgb videos. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2387-2395, 2016." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 415, + 287, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 415, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 49, + 415, + 287, + 459 + ], + "type": "text", + "content": "[59] Boris van Breugel, Trent Kyono, Jeroen Berrevoets, and Michaela van der Schaar. Decaf: Generating fair synthetic data using causally-aware generative networks. Advances in Neural Information Processing Systems, 34:22221-22233, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 459, + 287, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 459, + 287, + 503 + ], + "spans": [ + { + "bbox": [ + 49, + 459, + 287, + 503 + ], + "type": "text", + "content": "[60] Ting-Chun Wang, Ming-Yu Liu, Andrew Tao, Guilin Liu, Jan Kautz, and Bryan Catanzaro. Few-shot video-to-video synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 503, + 287, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 503, + 287, + 559 + ], + "spans": [ + { + "bbox": [ + 49, + 503, + 287, + 559 + ], + "type": "text", + "content": "[61] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3681-3691, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 559, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 559, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 49, + 559, + 287, + 624 + ], + "type": "text", + "content": "[62] Erroll Wood, Tadas Baltrusaitis, Louis-Philippe Morency, Peter Robinson, and Andreas Bulling. Learning an appearance-based gaze estimator from one million synthesised images. In Proceedings of the Ninth Biennial ACM Symposium on Eye Tracking Research & Applications, pages 131–138, 2016." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 625, + 287, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 625, + 287, + 680 + ], + "spans": [ + { + "bbox": [ + 49, + 625, + 287, + 680 + ], + "type": "text", + "content": "[63] Yue Wu, Zuoguan Wang, and Qiang Ji. Facial feature tracking under varying facial expressions and face poses based on restricted boltzmann machines. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3452-3459, 2013." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 681, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 681, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 49, + 681, + 287, + 713 + ], + "type": "text", + "content": "[64] Cheng-hsin Wu, Ningyuan Zheng, Scott Ardisson, Rohan Bali, Danielle Belko, Eric Brockmeyer, Lucas Evans, Timothy Godisart, Hyowon Ha, Alexander Hypes, Taylor Koska," + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 149 + ], + "type": "text", + "content": "Steven Krenn, Stephen Lombardi, Xiaomin Luo, Kevyn McPhail, Laura Millerschoen, Michal Perdoch, Mark Pitts, Alexander Richard, Jason Saragih, Junko Saragih, Takaaki Shiratori, Tomas Simon, Matt Stewart, Autumn Trimble, Xinshuo Weng, David Whitewolf, Chenglei Wu, Shouu-I Yu, and Yaser Sheikh. Multiface: A dataset for neural face rendering. In arXiv, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 152, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 152, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 545, + 206 + ], + "type": "text", + "content": "[65] Haotian Yang, Hao Zhu, Yanru Wang, Mingkai Huang, Qiu Shen, Ruigang Yang, and Xun Cao. Facescape: a large-scale high quality 3d face dataset and detailed riggable 3d face prediction. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 601-610, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 207, + 545, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 207, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 207, + 545, + 262 + ], + "type": "text", + "content": "[66] Ran Yi, Yong-Jin Liu, Yu-Kun Lai, and Paul L Rosin. Apdrawinggan: Generating artistic portrait drawings from face photos with hierarchical gans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10743-10752, 2019." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 263, + 545, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 263, + 545, + 308 + ], + "spans": [ + { + "bbox": [ + 308, + 263, + 545, + 308 + ], + "type": "text", + "content": "[67] Ran Yi, Zipeng Ye, Ruoyu Fan, Yezhi Shu, Yong-Jin Liu, Yu-Kun Lai, and Paul L Rosin. Animating portrait line drawings from a single face photo and a speech signal. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-8, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 308, + 545, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 308, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 308, + 308, + 545, + 353 + ], + "type": "text", + "content": "[68] Xi Yin, Xiang Yu, Kihyuk Sohn, Xiaoming Liu, and Manmohan Chandraker. Towards large-posed face frontalization in the wild. In In Proceeding of International Conference on Computer Vision, Venice, Italy, October 2017." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 354, + 545, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 354, + 545, + 408 + ], + "spans": [ + { + "bbox": [ + 308, + 354, + 545, + 408 + ], + "type": "text", + "content": "[69] Egor Zakharov, Aliaksandra Shysheya, Egor Burkov, and Victor Lempitsky. Few-shot adversarial learning of realistic neural talking head models. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9459-9468, 2019." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 410, + 545, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 410, + 545, + 454 + ], + "spans": [ + { + "bbox": [ + 308, + 410, + 545, + 454 + ], + "type": "text", + "content": "[70] Xiaoxing Zeng, Xiaojiang Peng, and Yu Qiao. Df2net: A dense-fine-finer network for detailed 3d face reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2315-2324, 2019." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 456, + 545, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 456, + 545, + 498 + ], + "spans": [ + { + "bbox": [ + 308, + 456, + 545, + 498 + ], + "type": "text", + "content": "[71] Kaipeng Zhang, Zhanpeng Zhang, Zhifeng Li, and Yu Qiao. Joint face detection and alignment using multitask cascaded convolutional networks. IEEE signal processing letters, 23(10):1499-1503, 2016." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 500, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 500, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 308, + 500, + 545, + 567 + ], + "type": "text", + "content": "[72] Xing Zhang, Lijun Yin, Jeffrey F Cohn, Shaun Canavan, Michael Reale, Andy Horowitz, and Peng Liu. A high-resolution spontaneous 3d dynamic facial expression database. In 2013 10th IEEE international conference and workshops on automatic face and gesture recognition (FG), pages 1-6. IEEE, 2013." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 567, + 545, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 567, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 308, + 567, + 545, + 623 + ], + "type": "text", + "content": "[73] Zhimeng Zhang, Lincheng Li, Yu Ding, and Changjie Fan. Flow-guided one-shot talking face generation with a high-resolution audio-visual dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3661–3670, 2021." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 624, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 624, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 308, + 624, + 545, + 668 + ], + "type": "text", + "content": "[74] Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaou Tang. Facial landmark detection by deep multi-task learning. In European conference on computer vision, pages 94-108. Springer, 2014." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 669, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 669, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 669, + 545, + 713 + ], + "type": "text", + "content": "[75] Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaou Tang. Learning deep representation for face alignment with auxiliary attributes. IEEE transactions on pattern analysis and machine intelligence, 38(5):918-930, 2015." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "12757" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 575 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[76] Aihua Zheng, Feixia Zhu, Hao Zhu, Mandi Luo, and Ran He. Talking face generation via learning semantic and temporal synchronous landmarks. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 3682-3689. IEEE, 2021." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 288, + 184 + ], + "type": "text", + "content": "[77] Erjin Zhou, Haoqiang Fan, Zhimin Cao, Yuning Jiang, and Qi Yin. Extensive facial landmark localization with coarse-to-fine convolutional network cascade. In Proceedings of the IEEE international conference on computer vision workshops, pages 386-391, 2013." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 185, + 288, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 185, + 288, + 250 + ], + "spans": [ + { + "bbox": [ + 48, + 185, + 288, + 250 + ], + "type": "text", + "content": "[78] Hang Zhou, Yasheng Sun, Wayne Wu, Chen Change Loy, Xiaogang Wang, and Ziwei Liu. Pose-controllable talking face generation by implicitly modularized audio-visual representation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4176-4186, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 251, + 288, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 251, + 288, + 296 + ], + "spans": [ + { + "bbox": [ + 48, + 251, + 288, + 296 + ], + "type": "text", + "content": "[79] Yang Zhou, Xintong Han, Eli Shechtman, Jose Echevarria, Evangelos Kalogerakis, and Dingzeyu Li. Makelttalk: speaker-aware talking-head animation. ACM Transactions on Graphics (TOG), 39(6):1-15, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 297, + 288, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 297, + 288, + 340 + ], + "spans": [ + { + "bbox": [ + 48, + 297, + 288, + 340 + ], + "type": "text", + "content": "[80] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European conference on computer vision, pages 592-608. Springer, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 341, + 288, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 341, + 288, + 396 + ], + "spans": [ + { + "bbox": [ + 48, + 341, + 288, + 396 + ], + "type": "text", + "content": "[81] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 2223-2232, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 396, + 288, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 396, + 288, + 441 + ], + "spans": [ + { + "bbox": [ + 48, + 396, + 288, + 441 + ], + "type": "text", + "content": "[82] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z Li. Face alignment across large poses: A 3d solution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 146-155, 2016." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 442, + 288, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 442, + 288, + 486 + ], + "spans": [ + { + "bbox": [ + 48, + 442, + 288, + 486 + ], + "type": "text", + "content": "[83] Xiangyu Zhu, Zhen Lei, Xiaoming Liu, Hailin Shi, and Stan Z Li. Face alignment across large poses: A 3d solution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 146-155, 2016." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 487, + 288, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 487, + 288, + 530 + ], + "spans": [ + { + "bbox": [ + 48, + 487, + 288, + 530 + ], + "type": "text", + "content": "[84] Xiangyu Zhu, Xiaoming Liu, Zhen Lei, and Stan Z Li. Face alignment in full pose range: A 3d total solution. IEEE transactions on pattern analysis and machine intelligence, 41(1):78-92, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 531, + 288, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 531, + 288, + 575 + ], + "spans": [ + { + "bbox": [ + 48, + 531, + 288, + 575 + ], + "type": "text", + "content": "[85] Xiangxin Zhu and Deva Ramanan. Face detection, pose estimation, and landmark localization in the wild. In 2012 IEEE conference on computer vision and pattern recognition, pages 2879-2886. IEEE, 2012." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "12758" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_content_list.json b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4249b1fceb00702748f798953decd67137e83cfd --- /dev/null +++ b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_content_list.json @@ -0,0 +1,1460 @@ +[ + { + "type": "text", + "text": "3D-Aware Multi-Class Image-to-Image Translation with NeRFs", + "text_level": 1, + "bbox": [ + 163, + 130, + 805, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Senmao Li $^{1}$ Joost van de Weijer $^{2}$ Yaxing Wang $^{1*}$", + "bbox": [ + 272, + 189, + 702, + 209 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fahad Shahbaz Khan $^{3,4}$ Meiqin Liu $^{5}$ Jian Yang $^{1}$", + "bbox": [ + 284, + 212, + 702, + 232 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ VCIP,CS, Nankai University, $^{2}$ Universitat Autònoma de Barcelona", + "bbox": [ + 217, + 234, + 751, + 253 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ Mohamed bin Zayed University of AI, $^{4}$ Linkoping University, $^{5}$ Beijing Jiaotong University", + "bbox": [ + 120, + 258, + 846, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "senmaonk@gmail.com {yaxing,csjyang}@nankai.edu.cn joost@cvc.uab.es", + "bbox": [ + 192, + 285, + 776, + 300 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "fahad.khan@liu.se mqliu@bjtu.edu.cn", + "bbox": [ + 326, + 308, + 637, + 321 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/22fc17fea1f2d332d96b8f08d9e486c59a1987425629a67389d12cce8b964b1e.jpg", + "image_caption": [ + "Figure 1. 3D-aware I2I translation: given a view-consistent 3D scene (the input), our method maps it into a high-quality target-specific image. Our approach produces consistent results across viewpoints." + ], + "image_footnote": [], + "bbox": [ + 76, + 330, + 890, + 665 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 712, + 313, + 728 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in 3D-aware generative models (3D-aware GANs) combined with Neural Radiance Fields (NeRF) have achieved impressive results. However no prior works investigate 3D-aware GANs for 3D consistent multiclass image-to-image (3D-aware I2I) translation. Naively using 2D-I2I translation methods suffers from unrealistic shape/identity change. To perform 3D-aware multi-class I2I translation, we decouple this learning process into a multi-class 3D-aware GAN step and a 3D-aware I2I trans", + "bbox": [ + 75, + 743, + 470, + 878 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "lation step. In the first step, we propose two novel techniques: a new conditional architecture and an effective training strategy. In the second step, based on the well-trained multi-class 3D-aware GAN architecture, that preserves view-consistency, we construct a 3D-aware I2I translation system. To further reduce the view-consistency problems, we propose several new techniques, including a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss. In extensive experiments on two datasets, quantitative and qualitative results demonstrate that we successfully perform 3D-aware I2I translation with multi-view consistency. Code is", + "bbox": [ + 500, + 713, + 893, + 893 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*The corresponding author.", + "bbox": [ + 94, + 888, + 243, + 901 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "12652", + "bbox": [ + 480, + 945, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 78, + 135, + 209, + 150 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Radiance Fields (NeRF) have increasingly gained attention with their outstanding capacity to synthesize high-quality view-consistent images [31,39,66]. Benefiting from the adversarial mechanism [11], StyleNeRF [12] and concurrent works [4, 8, 44, 69] have successfully synthesized high-quality view-consistent, detailed 3D scenes by combining NeRF with StyleGAN-like generator design [22]. This recent progress in 3D-aware image synthesis has not yet been extended to 3D-aware I2I translation, where the aim is to translate in a 3D-consistent manner from a source scene to a target scene of another class (see Figure 1).", + "bbox": [ + 75, + 160, + 468, + 325 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A naive strategy is to use well-designed 2D-I2I translation methods [15, 16, 26, 28, 46, 63, 65, 70]. These methods, however, suffer from unrealistic shape/identity changes when changing the viewpoint, which are especially notable when looking at a video. Main target class characteristics, such as hairs, ears, and noses, are not geometrically realistic, leading to unrealistic results which are especially disturbing when applying I2I to translate videos. Also, these methods typically underestimate the viewpoint change and result in target videos with less viewpoint change than the source video. Another direction is to apply video-to-video synthesis methods [2, 3, 6, 30, 53]. These approaches, however, either rely heavily on labeled data or multi-view frames for each object. In this work, we assume that we only have access to single-view RGB data.", + "bbox": [ + 75, + 327, + 470, + 551 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To perform 3D-aware I2I translation, we extend the theory developed for 2D-I2I with recent developments in 3D-aware image synthesis. We decouple the learning process into a multi-class 3D-aware generative model step and a 3D-aware I2I translation step. The former can synthesize view-consistent 3D scenes given a scene label, thereby addressing the 3D inconsistency problems we discussed for 2D-I2I. We will use this 3D-aware generative model to initialize our 3D-aware I2I model. It therefore inherits the capacity of synthesizing 3D consistent images. To train effectively a multi-class 3D-aware generative model (see Figure 2(b)), we provide a new training strategy consisting of: (1) training an unconditional 3D-aware generative model (i.e., StyleNeRF) and (2) partially initializing the multiclass 3D-aware generative model (i.e., multi-class StyleNeRF) with the weights learned from StyleNeRF. In the 3D-aware I2I translation step, we design a 3D-aware I2I translation architecture (Figure 2(f)) adapted from the trained multi-class StyleNeRF network. To be specific, we use the main network of the pretrained discriminator (Figure 2(b)) to initialize the encoder $E$ of the 3D-aware I2I translation model (Figure 2(f)), and correspondingly, the pretrained generator (Figure 2(b)) to initialize the 3D-aware I2I gen", + "bbox": [ + 75, + 551, + 470, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "erator (Figure 2(f)). This initialization inherits the capacity of being sensitive to the view information.", + "bbox": [ + 498, + 90, + 890, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Directly using the constructed 3D-aware I2I translation model (Figure 2(f)), there still exists some view-consistency problem. This is because of the lack of multi-view consistency regularization, and the usage of the single-view image. Therefore, to address these problems we introduce several techniques, including a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss.", + "bbox": [ + 496, + 121, + 893, + 241 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In sum, our work makes the following contributions:", + "bbox": [ + 517, + 242, + 874, + 257 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We are the first to explore 3D-aware multi-class I2I translation, which allows generating 3D consistent videos.", + "- We decouple 3D-aware I2I translation into two steps. First, we propose a multi-class StyleNeRF. To train this multi-class StyleNeRF effectively, we provide a new training strategy. The second step is the proposal of a 3D-aware I2I translation architecture.", + "- To further address the view-inconsistency problem of 3D-aware I2I translation, we propose several techniques: a U-net-like adaptor, a hierarchical representation constraint and a relative regularization loss.", + "- On extensive experiments, we considerably outperform existing 2D-I2I systems with our 3D-aware I2I method when evaluating temporal consistency." + ], + "bbox": [ + 500, + 265, + 890, + 503 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 500, + 517, + 650, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Neural Implicit Fields. Using neural implicit fields to represent 3D scenes has shown unprecedented quality. [37, 38, 43, 45, 48, 51] use 3D supervision to predict neural implicit fields. Recently, NeRF has shown powerful performance to neural implicit representations. NeRF and its variants [31, 39, 66] utilize a volume rendering technique for reconstructing a 3D scene as a combination of neural radiance and density fields to synthesize novel views.", + "bbox": [ + 496, + 545, + 890, + 666 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D-aware GANs Recent approaches [5, 9, 13, 19, 35, 40-42, 52, 62, 68] learn neural implicit representations without 3D or multi-view supervisions. Combined with the adversarial loss, these methods typically randomly sample viewpoints, render photorealistic 2D images, and finally optimize their 3D representations. StyleNeRF [12] and concurrent works [4,8,44,69] have successfully synthesized high-quality view-consistent, detailed 3D scenes with StyleGAN-like generator design [22]. In this paper, we investigate 3D-aware image-to-image (3D-aware I2I) translation, where the aim is to translate in a 3D-consistent manner from a source scene to a target scene of another class. We combine transfer learning of GANs [55, 60].", + "bbox": [ + 496, + 670, + 890, + 866 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "I2I translation. I2I translation with GAN [16, 57, 59, 61] has increasingly gained attention in computer vision. Based", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "available in 3DI2I.", + "bbox": [ + 76, + 90, + 207, + 104 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "12653", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "on the differences of the I2I translation task, recent works focus on paired I2I translation [10, 16, 71], unpaired I2I translation [1, 18, 24, 27, 32, 36, 46, 50, 56, 58, 63, 64, 70], diverse I2I translation [24, 32, 36, 46, 64, 70] and scalable I2I translation [7, 29, 65]. However, none of these approaches addresses the problem of 3D-aware I2I. For the 3D scenes represented by neural implicit fields, directly using these methods suffers from view-inconsistency.", + "bbox": [ + 75, + 90, + 472, + 212 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 229, + 168, + 244 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Problem setting. Our goal is to achieve 3D consistent multi-class I2I translation trained on single-view data only. The system is designed to translate a viewpoint-video consisting of multiple images (source domain) into a new, photorealistic viewpoint-video scene of a target class. Furthermore, the system should be able to handle multi-class target domains. We decouple our learning into a multi-class 3D-aware generative model step and a multi-class 3D-aware I2I translation step.", + "bbox": [ + 75, + 258, + 472, + 397 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Multi-class 3D-aware generative model", + "text_level": 1, + "bbox": [ + 76, + 407, + 413, + 424 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Let $\\mathcal{I}_{\\mathcal{R}\\mathcal{G}\\mathcal{B}} \\in \\mathbb{R}^{H \\times W \\times 3}$ be in the image domain. In this work, we aim to map a source image into a target sample conditioned on the target domain label $l \\in \\{1, \\dots, L\\}$ and a random noise vector $\\mathbf{z} \\in \\mathbb{R}^{\\mathbf{Z}}$ . Let vector $\\mathbf{x}$ and $\\mathbf{d}$ be 3D location and 2D viewing direction, respectively.", + "bbox": [ + 75, + 431, + 468, + 508 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Unconditional 3D-aware generative model. StyleNeRF [12] introduces a 5D function (3D location $x$ and 2D viewing direction $d$ ) to predict the volume density $\\sigma$ and RGB color $c$ . Both $\\sigma$ and $c$ are further used to render an image. As shown on Figure 2(a) StyleNeRF consists of four subnetworks: a mapping network $M$ , a fully connected layer $F$ , a generator $G$ and a discriminator $D$ . The mapping network $M$ takes random noise $z$ as input, and outputs latent code $w$ , which is further fed into both the fully connected layer $F$ and generator $G$ . Given the 3D location $x$ , the 2D viewing direction $d$ and latent code $w$ , StyleNeRF renders the feature map $f$ :", + "bbox": [ + 75, + 512, + 472, + 696 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {f} (\\boldsymbol {r}) = \\int_ {0} ^ {\\infty} p (t) \\boldsymbol {c} (\\boldsymbol {r} (t), \\boldsymbol {d}) d t\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 708, + 328, + 739 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np (t) = \\exp \\left(- \\int_ {0} ^ {t} \\sigma (\\boldsymbol {r} (s)) d s\\right) \\cdot \\sigma_ {\\boldsymbol {w}} (\\boldsymbol {r} (t)) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 742, + 468, + 776 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {c}, \\sigma = F (\\boldsymbol {x}, \\boldsymbol {d}, \\boldsymbol {w}),\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 779, + 263, + 795 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\boldsymbol{r}(t) = \\boldsymbol{o} + t\\boldsymbol{d}$ ( $\\boldsymbol{o}$ is the camera origin) is a camera ray for each feature representation position. Generator $G$ takes as an input the representation $\\boldsymbol{f}$ and the latent code $\\boldsymbol{w}$ , and outputs view-consistent photo-realistic novel result $\\hat{I}_{RGB}$ . The discriminator $D$ is to distinguish real images $I_{RGB}$ from generated images $\\hat{I}_{RGB}$ .", + "bbox": [ + 76, + 809, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The fully objective of StyleNeRF is as following:", + "bbox": [ + 517, + 90, + 846, + 107 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {G} = \\mathbb {E} _ {\\boldsymbol {z} \\sim \\mathcal {Z}, \\boldsymbol {p} \\sim \\mathcal {P}} [ v (D (G (F (\\boldsymbol {z}, \\boldsymbol {x}, \\boldsymbol {d}), M (\\boldsymbol {z}))) ] \\\\ + \\mathbb {E} _ {I _ {R G B} \\sim p _ {\\mathrm {d a t a}}} \\left[ v (- D (I _ {R G B}) + \\lambda \\| \\nabla D (I _ {R G B}) \\| ^ {2}) \\right] \\tag {2} \\\\ + \\beta \\cdot \\mathcal {L} _ {\\mathrm {N e R F - p a t h}} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 516, + 119, + 890, + 174 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $v(u) = -\\log (1 + \\exp (-u))$ , and $p_{\\mathrm{data}}$ is the data distribution. $\\mathcal{L}_{\\mathrm{NeRF - path}}$ is NeRF path regularization used in StyleNeRF. We also set $\\beta = 0.2$ and $\\lambda = 0.5$ following StyleNeRF.", + "bbox": [ + 496, + 185, + 893, + 246 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Conditional 3D-aware generative model. Figure 2(b) shows the proposed multi-class 3D-aware generative model (i.e., multi-class StyleNeRF). Compared to the StyleNeRF architecture (Figure 2(a)), we introduce two mapping networks: $M_{1}$ and $M_{2}$ . The mapping network $M_{1}$ outputs the latent code $\\boldsymbol{w}_{1}$ . While the mapping network $M_{2}$ takes as input the concatenated noise $\\boldsymbol{z}$ and class embedding $e_{l-th}$ , and outputs the latent code $\\boldsymbol{w}_{2}$ . The second mapping network $M_{2}$ aims to guide the generator $G$ to synthesize a class-specific image. Here we do not feed the latent code $\\boldsymbol{w}_{2}$ into NeRF's fully connected layer $F$ , since we expect $F$ to learn a class-agnostic feature representation, which contributes to perform multi-class 3D-aware I2I translation.", + "bbox": [ + 496, + 250, + 890, + 445 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To be able to train multi-class StyleNeRF we adapt the loss function. We require $D$ to address multiple adversarial classification tasks simultaneously, as in [33]. Specifically, given output $D \\in \\mathbb{R}^L$ , we locate the $l$ -th class response. Using the response for the $l$ -th class, we compute the adversarial loss and back-propagate gradients:", + "bbox": [ + 496, + 446, + 890, + 537 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {G} ^ {l} = \\mathbb {E} _ {\\boldsymbol {z} \\sim \\mathcal {Z}, \\boldsymbol {x} \\sim \\mathcal {P} _ {x}, \\boldsymbol {d} \\sim \\mathcal {P} _ {d}} \\left[ v (D (G (\\hat {I} _ {R G B})) _ {\\boldsymbol {l} - t h} \\right] \\\\ + \\mathbb {E} _ {I _ {R G B} \\sim p _ {\\mathrm {d a t a}}} \\left[ v (- D (I _ {R G B}) _ {l - t h} + \\lambda \\| \\nabla D (I _ {R G B}) _ {l _ {t h}} \\| ^ {2}) \\right] \\\\ + \\beta \\cdot \\mathcal {L} _ {\\text {N e R F - p a t h}}. \\tag {3} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 506, + 549, + 890, + 625 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We initialize the multi-class StyleNeRF with the weights learned with the unconditional StyleNeRF (E.q. 2), since the training from scratch fails to convergence. Results of this are show in Figs. 7. To be specific, we directly copy the weights from the one learned from StyleNeRF for $M_{1}$ , $F$ and $G$ with the same parameter size. For the mapping network $M_{2}$ , we duplicate the weight from $M$ except for the first layer, which is trained from scratch because of the different parameter sizes. The discriminator is similarly initialized except for the last layer, which is a new convolution layer with $L$ output channels. Using the proposed initialization method, we successfully generate class-specific photorealistic high-resolution result.", + "bbox": [ + 496, + 626, + 890, + 821 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. 3D-aware I2I translation", + "text_level": 1, + "bbox": [ + 500, + 830, + 725, + 845 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Figure 2 (f) shows the 3D-aware I2I translation network at inference time. It consists of the encoder $E$ , the generator $G$ and two mapping networks $M_{1}$ and $M_{2}$ . Inspired", + "bbox": [ + 496, + 854, + 893, + 901 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "12654", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/be837d796fb2659dd32c9f9068e8e2b55d4d30650a14b68ad0f939ad6e8997d9.jpg", + "image_caption": [ + "Figure 2. Overview of our method. (a) We first train a 3D-aware generative mode (i.e., StyleNeRF) with single-view photos. (b) We extend StyleNerf to multi-class StyleNerf. We introduce an effective training strategy: initializing multi-class StyleNeRF with StyleNeRF. (c) The training of the proposed 3D-aware I2I translation. It consists of the encoder $E$ , the adaptor $A$ , the generator $G$ and two mapping networks $M_1$ and $M_2$ . We freeze all networks except for training the adaptor $A$ . The encoder is initialized by the main networks of the pretrained discriminator. We introduce several techniques to address the view-consistency problems: including a U-net-like adaptor $A$ , (d) relative regularization loss and (e) hierarchical representation constrain. (f) Usage of proposed model at inference time." + ], + "image_footnote": [], + "bbox": [ + 76, + 85, + 888, + 356 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "by DeepI2I [61], we use the pretrained discriminator (Figure 2(b)) to initialize the encoder $E$ of the 3D-aware I2I translation model (Figure 2(f)), and correspondingly, the pretrained generator (Figure 2(b)) to initialize the 3D-aware I2I generator. To align the encoder with the generator, [61] introduces a Resnet-like adaptor network to communicate the encoder and decoder. The adaptor is trained without any real data. However, directly using these techniques for 3D-aware I2I translation still suffers from some view-consistency problems. Therefore, in the following, we introduce several designs to address this problem: a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss.", + "bbox": [ + 75, + 455, + 472, + 652 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "U-net-like adaptor. As shown in Figure 2(c), to overcome 3D-inconsistency in the results, we propose a U-net-like adaptor $A$ . This design contributes to preserve the spatial structure of the input feature. This has been used before for semantic segmentation tasks and label to image translation [17]. In this paper, we experimentally demonstrate that the U-net-like adaptor is effective to reduce the inconsistency.", + "bbox": [ + 75, + 662, + 468, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Hierarchical representation constrain. As shown in Figure 2(e), given the noise $\\mathbf{z}$ , 3D location $\\mathbf{x}$ and 2D viewing direction $\\mathbf{d}$ the fully connected layer $F$ renders the 3D-consistent feature map $\\mathbf{f} = F(\\mathbf{x}, \\mathbf{d}, \\mathbf{w}_1) = F(\\mathbf{x}, \\mathbf{d}, M1(\\mathbf{z}))$ . We further extract the hierarchical representation $\\{G(\\mathbf{f}, \\mathbf{w}_1, \\mathbf{w}_2)_k\\}$ as well as the synthesized image $\\hat{I}_{RGB} = G(\\mathbf{f}, \\mathbf{w}_1, \\mathbf{w}_2)$ . Here $G(\\mathbf{f}, \\mathbf{w}_1, \\mathbf{w}_2)_k$ is the", + "bbox": [ + 75, + 794, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$k$ -th $(k = m, \\dots, n, (n > m))$ ResBlock $^1$ output of the generator $G$ . We then take the generated image $\\hat{I}_{RGB}$ as input for the encoder $E$ : $E(\\hat{I}_{RGB})$ , which is fed into the adaptor network $A$ , that is $\\hat{\\pmb{f}} = A(E(\\hat{I}_{RGB}))$ . In this step, our loss is", + "bbox": [ + 496, + 455, + 890, + 530 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {A} = \\left\\| \\boldsymbol {f} - \\hat {\\boldsymbol {f}} \\right\\| _ {1}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 633, + 527, + 890, + 554 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the intermediate layers, we propose a hierarchical representation constrain. Given the output $\\hat{\\pmb{f}}$ and the latent codes (i.e., $\\pmb{w}_1$ and $\\pmb{w}_2$ )², we similarly collect the hierarchical feature $\\left\\{G(\\hat{\\pmb{f}}, \\pmb{w}_1, \\pmb{w}_2)_k\\right\\}$ . The objective is", + "bbox": [ + 496, + 560, + 890, + 630 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {H} = \\sum_ {k} \\left\\| G (\\boldsymbol {f}, \\boldsymbol {w} _ {1}, \\boldsymbol {w} _ {2}) _ {k} - G (\\hat {\\boldsymbol {f}}, \\boldsymbol {w} _ {1}, \\boldsymbol {w} _ {2}) _ {k} \\right\\| _ {1}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 519, + 638, + 890, + 671 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this step, we freeze every network except for the U-net-like adaptor which is learned. Note that we do not access to any real data to train the adaptor, since we utilize the generated image with from the trained generator (Figure 2(b)).", + "bbox": [ + 496, + 684, + 890, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Relative regularization loss. We expect to input the consistency of the translated 3D scene with single-image regularization instead of the images from the consecutive views. We propose a relative regularization loss based on neighboring patches. We assume that neighboring patches", + "bbox": [ + 496, + 750, + 890, + 825 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1After each ResBlock the feature resolution is half of the previous one in the encoder, and two times in generator. In the generator, the last output is image.", + "bbox": [ + 500, + 837, + 890, + 873 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "$^{2}$ Both $\\pmb{w}1$ and $\\pmb{w}2$ are the ones used when generating image $\\hat{I}_{RGB}$ .", + "bbox": [ + 517, + 873, + 874, + 887 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "3More precisely, that is the feature map in this paper.", + "bbox": [ + 517, + 887, + 797, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "12655", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/8591a38870ce726a2a5b62f90cc739510cd31273962cadf8cac94b529abe50a1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset MethodCelebA-HQAFHQ
TC↓FID↓TC↓FID↓
*MUNIT30.24031.428.49741.5
*DRIT35.45252.125.34195.6
*MSGAN31.64133.134.23661.4
StarGANv210.25013.63.02516.1
Ours (3D)3.74322.32.06715.3
TC↓(unc)FID↓TC↓(unc)FID↓
†Liu et al. [34]13.31517.83.46220.0
StarGANv210.25012.23.0259.9
†Kunhee et al. [23]10.4626.73.24110.0
Ours (3D)3.74318.72.06711.4
", + "bbox": [ + 81, + 88, + 467, + 250 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "are equivalent to that on corresponding patches of two consecutive views. For example, when inputting multi-view consistent scene images, the position of eyes are consistently moving. The fully connected layers (i.e., NeRF mode) $F$ renders the view-consistent feature map $f$ , which finally decides the view-consistent reconstructed 3D scene. Thus, we expect the output $\\hat{f}$ of the adaptor $A$ to obtain the view-consistent property of the feature map $f$ .", + "bbox": [ + 75, + 310, + 468, + 431 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We randomly sample one vector from the feature map $\\pmb{f}$ (e.g., red square in (Figure 2(d))), denoted as $\\pmb{f}^{\\eta}$ . Then we sample the eight nearest neighboring vectors of $\\pmb{f}^{\\eta}$ (dark green square in Figure 2(d)), denoted by $\\pmb{f}^{\\eta, \\varepsilon}$ where $\\varepsilon = 1, \\dots, 8$ is the neighbor index. Similarly, we sample vectors $\\hat{\\pmb{f}}^{\\eta}$ and $\\hat{\\pmb{f}}^{\\eta, \\varepsilon}$ from the feature map $\\hat{\\pmb{f}}$ (red and dark green dash square in Figure 2(d)). We then compute the patch difference:", + "bbox": [ + 76, + 431, + 470, + 551 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nd _ {\\boldsymbol {f}} ^ {\\eta , \\varepsilon} = \\boldsymbol {f} ^ {\\eta} \\ominus \\boldsymbol {f} ^ {\\eta , \\varepsilon}, d _ {\\hat {\\boldsymbol {f}}} ^ {\\eta , \\varepsilon} = \\hat {\\boldsymbol {f}} ^ {\\eta} \\ominus \\hat {\\boldsymbol {f}} ^ {\\eta , \\varepsilon}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 137, + 561, + 468, + 585 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\ominus$ represents vector subtraction. In order to preserve the consistency, we force these patch differences to be small:", + "bbox": [ + 75, + 594, + 468, + 635 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {R} = \\left\\| d _ {\\boldsymbol {f}} ^ {\\eta , \\varepsilon} - d _ {\\hat {\\boldsymbol {f}}} ^ {\\eta , \\varepsilon} \\right\\| _ {1}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 633, + 468, + 661 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The underlying intuition is straightforward: the difference vectors of the same location should be most relevant in the latent space compared to other random pairs.", + "bbox": [ + 75, + 664, + 468, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The final objective is", + "bbox": [ + 96, + 710, + 238, + 724 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {H} + \\mathcal {L} _ {A} + \\mathcal {L} _ {R}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 736, + 468, + 753 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 773, + 209, + 790 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental setup", + "text_level": 1, + "bbox": [ + 76, + 797, + 264, + 815 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training details. We use the trained StyleNeRF to partially initialize our multi-class StyleNeRF architecture. We adapt the structure of the multi-class StyleNeRF to the 3D-aware I2I architecture. The proposed method is implemented in Pytorch [47]. We use Adam [25] with a batch size", + "bbox": [ + 75, + 824, + 470, + 902 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/a7aff995da5b5ff66886b6a6eb7106846318f7b6ae9973f050081c13f9b404be.jpg", + "table_caption": [ + "Table 1. Comparison with baselines on TC and FID metrics.* denotes that we used the results provided by StarGANv2. † means that we used the pre-trained networks provided by authors." + ], + "table_footnote": [], + "table_body": "
Ini.Ada.Hrc.Rrl.TC↓FID↓
YNNN2.61223.8
YYNN2.32423.1
YYYN2.20416.1
YYYY2.06715.3
", + "bbox": [ + 500, + 87, + 888, + 143 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2. Impact of several components in the performance on AFHQ. The second row is the case where the 3D-aware I2I translation model is initialized by weights learned from the multi-class StylyNeRF. Then it is trained with a Resnet-based adaptor and $L_{1}$ loss between the representations $f$ and $\\hat{f}$ . The proposed techniques continuously improve the consistency and performance. Ini.: initialization method for multi-class StyleNeRF, Ada.: U-net-like adaptor, Hrc.: Hierarchical representation constrain, Rrl: Relative regularization loss.", + "bbox": [ + 496, + 154, + 893, + 280 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/eb275f460a0b3cfa2c4f46a236179adafb7a047d7edcab94d40fe236008a81fe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 297, + 888, + 470 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5352e8612629871db006d9a0389bb05f6f4aa4bd3b02af7470e50f420b899545.jpg", + "image_caption": [ + "Figure 3. (Top) Using a single mapping network which takes as input the concatenated class embedding and the noise. We find it fails to generate target-specific realistic image. (Bottom) we use two mapping networks without concatenating their outputs like the proposed method. This design fails to generate 3D-aware results." + ], + "image_footnote": [], + "bbox": [ + 501, + 474, + 890, + 647 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "of 64, using a learning rate of 0.0002. We use $2 \\times$ Quadro RTX 3090 GPUs (24 GB VRAM) to conduct all our experiments. We show the network details and more results on Supp. Mat..", + "bbox": [ + 496, + 743, + 890, + 805 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Datasets. Our experiments are conducted on the Animal Faces (AFHQ) [7] and CelebA-HQ [21] datasets. AFHQ contains 3 classes, each one has about 5000 images. In CelebA-HQ, we use gender as a class, with $\\sim 10\\mathrm{k}(10057)$ male and $\\sim 18\\mathrm{k}(17943)$ female images in the training set. In this paper, all images are resized to $256 \\times 256$ .", + "bbox": [ + 496, + 809, + 892, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "12656", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/362609a517965255f3860b4570bf070f137aeef444948dbcba8435065046331a.jpg", + "image_caption": [ + "Figure 4. Comparative results between the proposed method and StarGANv2. We observe that StarGANv2 suffers from underestimating viewpoint changes when changing the input viewpoint (first column). It also leads to identity change (third and fourth columns), and a geometrically unrealistic ear (last two columns)." + ], + "image_footnote": [], + "bbox": [ + 76, + 88, + 893, + 392 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d7188db05ea4a7091eb376ac596223270013f449c014d143d1237e2075be336d.jpg", + "image_caption": [ + "Figure 5. The generated images of (top) $G(\\pmb{f}, \\pmb{w}_1, \\pmb{w}_2)$ and (bottom) $G(\\hat{\\pmb{f}}, \\pmb{w}_1, \\pmb{w}_2)$ , which show that we correctly align the outputs of both the NeRF mode $F$ and the adaptor $A$ ." + ], + "image_footnote": [], + "bbox": [ + 78, + 450, + 468, + 750 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We compare to MUNIT [15], DRIT [28], MSGAN [20], StarGANv2 [7], [23] and [34], all of which perform image-to-image translation.", + "bbox": [ + 75, + 804, + 468, + 851 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Measures. We employ the widely used metric for evaluation, namely Fréchet Inception Distance (FID) [14]. We also propose a new measure in which we", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "combine two metrics, one which measures the consistency between neighboring frames (which we want to be low), and another that measures the diversity over the whole video (which we would like to be high). We adopt a modified temporal loss (TL) [54]. This temporal loss computes the Frobenius difference between two frames to evaluate the video consistency. Only considering this measure would lead to high scores when neighboring frames in the generated video are all the same. For successful 3D-aware I2I translation, we expect the system to be sensitive to view changes in the source video and therefore combine low consecutive frame changes with high diversity over the video. Therefore, we propose to compute LPIPS [67] for each video (vLPIPS), which indicates the diversity of the generated video sequence. To evaluate both the consistency and the sensitiveness of the generated video, we propose a new temporal consistency metric (TC):", + "bbox": [ + 496, + 453, + 893, + 709 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nT C = T L / v L P I P S. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 619, + 720, + 890, + 736 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Due to the small changes between two consecutive views, for each video we use frame interval 1, 2 and 4 in between to evaluate view-consistency. Note that a lower TC value is better.", + "bbox": [ + 496, + 747, + 893, + 806 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Quantitative and qualitative results.", + "text_level": 1, + "bbox": [ + 500, + 816, + 812, + 834 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate the performance of the proposed method on both the AFHQ animal and CelebA human face dataset. As reported in Table 1, in terms of TC the proposed method achieves the best score on two datasets. For example, we", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "12657", + "bbox": [ + 480, + 944, + 517, + 957 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/eaf356b46896241f4df6c80258727e840afe23a669cf43b2da75e5abcda410e1.jpg", + "image_caption": [ + "Figure 6. Interpolation between the dog and wildlife classes." + ], + "image_footnote": [], + "bbox": [ + 80, + 89, + 467, + 398 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "have 3.743 TC on CelebA-HQ, which is better than StarGANv2 (10.250 TC). This indicates that our method dramatically improves consistency. As reported in Table 1 (up), across both datasets, the proposed method consistently outperforms the baselines with significant gains in terms of FID and LPIPS, except for StarGANv2 which obtains superior results. However, on AFHQ we achieve better FID score than StarGANv2. Kunhee et al. [23] reports the unconditional FID ((unc)FID) value which is computed between synthesized images and training samples instead of each class. As reported in Table 1 (bottom), We are able to achieve completing results on uncFID metrics. Note that while 2D I2I translation (e.g., StarGANv2) can obtain high-quality for each image, they cannot synthesize images of the same scene with 3D consistency, and suffers from unrealistic shape/identity changes when changing the viewpoint, which are especially notable when looking at a video.", + "bbox": [ + 76, + 429, + 468, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Figures 1,4, we perform 3D-aware I2I translation. When changing the input viewpoint (Figure 4 (first two columns)), the outputs of StarGANv2 do not maintain the correct head pose, and underestimate the pose changes with respect to the frontal view. To estimate that this is actually the case, we also compute the diversity (i.e., vLPIPS) in a single video sequence. For example, both StarGANv2 and our method are 0.032 and 0.101 on CelebA-HQ. This confirms that the diversity (due to pose changes) is lowest for StarGANv2. More clearly showing the limitations of standard I2I methods for 3D-aware I2I, we observe that StarGANv2 suffers from unrealistic changes when changing the viewpoint. For example, when translating the class cat to wildlife, the generated images changes from wolf to leop", + "bbox": [ + 76, + 688, + 468, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ard when varying the viewpoint (Figure 4 (third and fourth columns)). Also, the main target class characteristics, such as ears, are not geometrically realistic, leading to unrealistic 3D scene videos. Our method, however, eliminates these shortcomings and performs efficient high-resolution image translation with high 3D-consistency, which preserves the input image pose and changes the style of the output images. We show high-resolution images $(1024 \\times 1024)$ on Supp. Mat..", + "bbox": [ + 496, + 90, + 890, + 227 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation study", + "text_level": 1, + "bbox": [ + 500, + 241, + 648, + 257 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Conditional 3D-aware generative architecture In this experiment, we verify our network design by comparing it with two alternative network designs. As shown in Figure 3(up), we explore a naive strategy: using one mapping which takes as input the concatenated class embedding and the noise. In this way, the fully connected network $F$ outputs the class-specific latent code $w$ , which is fed into the fully connected network $F$ to output the class-specific representation $f$ . Here, both the latent code $w$ and the representation $f$ are decided by the same class. However, when handling 3D-aware multi-class I2I translation task, the feature representation $\\hat{f}$ is combined with the latent code $w$ from varying class embeddings, which leads to unrealistic image generation (Figure. 3(up)).", + "bbox": [ + 496, + 268, + 890, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Figure 3(bottom), we utilize two mapping networks without concatenating their outputs like the proposed method. This design guarantees that the output of the fully connected layers $F$ are class-agnostic. We experimentally observe that this model fails to handle 3D-aware generation.", + "bbox": [ + 496, + 482, + 890, + 571 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effective training strategy for multi-class 3D-aware generative model. We evaluate the proposed training strategy on AFHQ and CelebA-HQ datasets. We initialize the proposed multi-class 3D I2I architecture from scratch and the proposed method, respectively. As shown on Figure 7 (up), the model trained from scratch synthesizes unrealistic faces on CelebA-HQ dataset, and low quality cats on AFHQ. This is due to the style-based conditional generator which is hard to be optimized and causes mode collapse directly [49]. The proposed training strategy, however, manages to synthesize photo-realistic high-resolution images with high multi-view consistency. This training strategy first performs unconditional learning, which leads to satisfactory generative ability. Thus, we relax the difficulty of directly training the conditional model.", + "bbox": [ + 496, + 578, + 890, + 803 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Alignment and interpolation. Figure 5 exhibits the outputs of the generator when taking as input the feature representation $\\pmb{f}$ and $\\hat{\\pmb{f}}$ . This confirms that the proposed method successfully aligns the outputs of the fully connected layers $F$ and the adaptor $A$ . Figure 6 reports interpolation by freezing the input images while interpolating the class em", + "bbox": [ + 496, + 810, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "12658", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/755a42b84a4c04efaedb54b9602a7203a1c2ff6c438f6557270cb0b943f1f84d.jpg", + "image_caption": [ + "Figure 7. Qualitative results of multi-class StyleNeRF training from scratch (up) and from the proposed strategy (bottom)." + ], + "image_footnote": [], + "bbox": [ + 76, + 88, + 890, + 575 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "bedding between two classes. Our model still manages to preserve the view-consistency, and generate high quantity images with even given never seen class embeddings.", + "bbox": [ + 75, + 611, + 470, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Techniques for improving the view-consistency. We perform an ablation study on the impact of several design elements on the overall performance of the system, which includes the proposed initialization 3D-aware I2I translation model (Ini.), U-net-like adaptor (Ada.), hierarchical representation constrain (Hrc.) and relative regularization loss (Rrl.). We evaluate these four factors in Table 2. The results show that only using the proposed initialization (the second row of the Table 2) has already improved the view-consistency comparing to StarGANv2 (Table 1). Utilizing either U-net-like adaptor (Ada.) or hierarchical representation constrain (Hrc.) further leads to performance gains. Finally we are able to get the best score when further adding relative regularization loss (Rrl.) to the 3D-aware I2I translation model.", + "bbox": [ + 75, + 672, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 609, + 617, + 626 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper we first explore 3D-aware I2I translation. We decouple the learning process into a multi-class 3D-aware generative model step and a 3D-aware I2I translation step. In the first step, we propose a new multi-class StyleNeRF architecture, and an effective training strategy. We design the 3D-aware I2I translation model with the well-optimized multi-class StyleNeRF model. It inherits the capacity of synthesizing 3D consistent images. In the second step, we propose several techniques to further reduce the view-consistency of the 3D-aware I2I translation.", + "bbox": [ + 496, + 643, + 890, + 796 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. We acknowledge the support from the Key Laboratory of Advanced Information Science and Network Technology of Beijing (XDXX2202), and the project supported by Youth Foundation (62202243). We acknowledge the Spanish Government funding for projects PID2019-104174GB-I00, TED2021-132513B-I00.", + "bbox": [ + 496, + 801, + 892, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "12659", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Kyungjune Baek, Yunjay Choi, Youngjung Uh, Jaejun Yoo, and Hyunjung Shim. Rethinking the truly unsupervised image-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14154-14163, 2021. 3", + "[2] Aayush Bansal, Shugao Ma, Deva Ramanan, and Yaser Sheikh. Recycle-gan: Unsupervised video retargeting. In Proceedings of the European conference on computer vision (ECCV), pages 119-135, 2018. 2", + "[3] Dina Bashkirova, Ben Usman, and Kate Saenko. Unsupervised video-to-video translation. arXiv preprint arXiv:1806.03698, 2018. 2", + "[4] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 2", + "[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 2", + "[6] Yang Chen, Yingwei Pan, Ting Yao, Xinmei Tian, and Tao Mei. Mocycle-gan: Unpaired video-to-video translation. In Proceedings of the 27th ACM International Conference on Multimedia, pages 647-655, 2019. 2", + "[7] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, 2020. 3, 5, 6", + "[8] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. Gram: Generative radiance manifolds for 3d-aware image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10673-10683, 2022. 2", + "[9] Matheus Gadelha, Subhransu Maji, and Rui Wang. 3d shape induction from 2d views of multiple objects. In 2017 International Conference on 3D Vision (3DV), pages 402-411. IEEE, 2017. 2", + "[10] Abel Gonzalez-Garcia, Joost van de Weijer, and Yoshua Bengio. Image-to-image translation for cross-domain disentanglement. In NeurIPS, pages 1294–1305, 2018. 3", + "[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, pages 2672-2680, 2014. 2", + "[12] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis. arXiv preprint arXiv:2110.08985, 2021. 2, 3", + "[13] Paul Henderson, Vagia Tsiminaki, and Christoph H Lampert. Leveraging 2d data to learn textured 3d mesh generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7498-7507, 2020. 2", + "[14] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a" + ], + "bbox": [ + 78, + 114, + 470, + 902 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "two time-scale update rule converge to a local nash equilibrium. In NeurIPS, pages 6626-6637, 2017. 6", + "[15] Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. Multimodal unsupervised image-to-image translation. In ECCV, pages 172-189, 2018. 2, 6", + "[16] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, 2017. 2, 3", + "[17] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, pages 1125-1134, 2017. 4", + "[18] Somi Jeong, Youngjung Kim, Eungbean Lee, and Kwanghoon Sohn. Memory-guided unsupervised image-to-image translation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6558-6567, 2021. 3", + "[19] Danilo Jimenez Rezende, SM Eslami, Shakir Mohamed, Peter Battaglia, Max Jaderberg, and Nicolas Heess. Unsupervised learning of 3d structure from images. Advances in neural information processing systems, 29, 2016. 2", + "[20] Animesh Karnewar and Oliver Wang. *Msg-gan: Multi-scale gradients for generative adversarial networks*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7799–7808, 2020. 6", + "[21] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. In ICLR, 2018. 5", + "[22] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 2", + "[23] Kunhee Kim, Sanghun Park, Eunyeong Jeon, Taehun Kim, and Daijin Kim. A style-aware discriminator for controllable image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18239-18248, 2022. 5, 6, 7", + "[24] Taeksoo Kim, Moonsu Cha, Hyunsoo Kim, Jungkwon Lee, and Jiwon Kim. Learning to discover cross-domain relations with generative adversarial networks. In ICML, 2017. 3", + "[25] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. *ICLR*, 2014. 5", + "[26] Minsu Ko, Eunju Cha, Sungjoo Suh, Huijin Lee, Jae-Joon Han, Jinwoo Shin, and Bohyung Han. Self-supervised dense consistency regularization for image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18301-18310, June 2022. 2", + "[27] Héctor Laria, Yaxing Wang, Joost van de Weijer, and Bogdan Raducanu. Hyper-gan: Transferring unconditional to conditional gans with hypernetworks. arXiv preprint arXiv:2112.02219, 2021. 3", + "[28] Hsin-Ying Lee, Hung-Yu Tseng, Jia-Bin Huang, Maneesh Kumar Singh, and Ming-Hsuan Yang. Diverse imaged-to-image translation via disentangled representations. In ECCV, 2018. 2, 6", + "[29] Hsin-Ying Lee, Hung-Yu Tseng, Qi Mao, Jia-Bin Huang, Yu-Ding Lu, Maneesh Singh, and Ming-Hsuan Yang. Drit++: Diverse image-to-image translation via disentangled representations. IJCV, pages 1-16, 2020. 3" + ], + "bbox": [ + 501, + 92, + 890, + 902 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "12660", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] Kangning Liu, Shuhang Gu, Andrés Romero, and Radu Timofte. Unsupervised multimodal video-to-video translation via self-supervised learning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1030–1040, 2021. 2", + "[31] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. NeurIPS, 2020. 2", + "[32] Ming-Yu Liu, Thomas Breuel, and Jan Kautz. Unsupervised image-to-image translation networks. In NeurIPS, pages 700-708, 2017. 3", + "[33] Ming-Yu Liu, Xun Huang, Arun Mallya, Tero Karras, Timo Aila, Jaakko Lehtinen, and Jan Kautz. Few-shot unsupervised image-to-image translation. In CVPR, pages 10551-10560, 2019. 3", + "[34] Yahui Liu, Enver Sangineto, Yajing Chen, Linchao Bao, Haoxian Zhang, Nicu Sebe, Bruno Lepri, Wei Wang, and Marco De Nadai. Smoothing the disentangled latent style space for unsupervised image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10785-10794, 2021. 5, 6", + "[35] Sebastian Lunz, Yingzhen Li, Andrew Fitzgibbon, and Nate Kushman. Inverse graphics gan: Learning to generate 3d shapes from unstructured 2d data. arXiv preprint arXiv:2002.12674, 2020. 2", + "[36] Youssef Alami Mejjati, Christian Richardt, James Tompkin, Darren Cosker, and Kwang In Kim. Unsupervised attention-guided image-to-image translation. In NeurIPS, pages 3693-3703, 2018. 3", + "[37] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[38] Mateusz Michalkiewicz, Jhony K. Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Implicit surface representations as layers in neural networks. In The IEEE International Conference on Computer Vision (ICCV), October 2019. 2", + "[39] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. arXiv preprint arXiv:2003.08934, 2020. 2", + "[40] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7588–7597, 2019. 2", + "[41] Michael Niemeyer and Andreas Geiger. Campari: Camera-aware decomposed generative neural radiance fields. In 2021 International Conference on 3D Vision (3DV), pages 951-961. IEEE, 2021. 2", + "[42] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[43] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. arXiv preprint arXiv:1912.07372, 2019. 2", + "[44] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 2", + "[45] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. International Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2", + "[46] Taesung Park, Alexei A. Efros, Richard Zhang, and Jun-Yan Zhu. Contrastive learning for conditional image synthesis. In ECCV, 2020. 2, 3", + "[47] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017. 5", + "[48] Songyou Peng, Michael Niemeyer, Lars M. Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. ArXiv, abs/2003.04618, 2020. 2", + "[49] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-10, 2022. 7", + "[50] Xuning Shao and Weidong Zhang. Spatchgan: A statistical feature based discriminator for unsupervised image-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6546-6555, 2021. 3", + "[51] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In Advances in Neural Information Processing Systems, pages 1119–1130, 2019. 2", + "[52] Ayush Tewari, Xingang Pan, Ohad Fried, Maneesh Agrawala, Christian Theobalt, et al. Disentangled3d: Learning a 3d generative model with disentangled geometry and appearance from monocular images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1516-1525, 2022. 2", + "[53] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Guilin Liu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. Video-to-video synthesis. In NeurIPS, 2018. 2", + "[54] Wenjing Wang, Shuai Yang, Jizheng Xu, and Jiaying Liu. Consistent video style transfer via relaxation and regularization. IEEE Transactions on Image Processing, 29:9125-9139, 2020. 6", + "[55] Yaxing Wang, Abel Gonzalez-Garcia, David Berga, Luis Herranz, Fahad Shahbaz Khan, and Joost van de Weijer. Minegan: effective knowledge transfer from gans to target domains with few images. In CVPR, 2020. 2", + "[56] Yaxing Wang, Abel Gonzalez-Garcia, Joost van de Weijer, and Luis Herranz. SDIT: Scalable and diverse cross-domain image translation. In ACM MM, 2019. 3" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "12661", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[57] Yaxing Wang, Salman Khan, Abel Gonzalez-Garcia, Joost van de Weijer, and Fahad Shahbaz Khan. Semi-supervised learning for few-shot image-to-image translation. In CVPR, 2020. 2", + "[58] Yaxing Wang, Hector Laria Mantecon, Joost van de Weijer, Laura Lopez-Fuentes, and Bogdan Raducanu. Transferi2i: Transfer learning for image-to-image translation from small datasets, 2021. 3", + "[59] Yaxing Wang, Joost van de Weijer, and Luis Herranz. Mix and match networks: encoder-decoder alignment for zeropair image translation. In CVPR, pages 5467-5476, 2018. 2", + "[60] Yaxing Wang, Chenshen Wu, Luis Herranz, Joost van de Weijer, Abel Gonzalez-Garcia, and Bogdan Raducanu. Transferring gans: generating images from limited data. In ECCV, pages 218-234, 2018. 2", + "[61] Yaxing Wang, Lu Yu, and Joost van de Weijer. Deep2i: Enabling deep hierarchical image-to-image translation by transferring from gans. NeurIPS, 2020. 2, 4", + "[62] Yang Xue, Yuheng Li, Krishna Kumar Singh, and Yong Jae Lee. Giraffe hd: A high-resolution 3d-aware generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18440-18449, 2022. 2", + "[63] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Unsupervised image-to-image translation with generative prior. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18332-18341, 2022. 2, 3", + "[64] Zili Yi, Hao Zhang, Ping Tan Gong, et al. Dualgan: Unsupervised dual learning for image-to-image translation. In ICCV, 2017. 3", + "[65] Xiaoming Yu, Yuanqi Chen, Shan Liu, Thomas Li, and Ge Li. Multi-mapping image-to-image translation via learning disentanglement. In NeurIPS, pages 2990-2999, 2019. 2, 3", + "[66] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2", + "[67] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 6", + "[68] Xuanmeng Zhang, Zhedong Zheng, Daiheng Gao, Bang Zhang, Pan Pan, and Yi Yang. Multi-view consistent generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18450-18459, 2022. 2", + "[69] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 2", + "[70] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In ICCV, pages 2223-2232, 2017. 2, 3" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 10 + }, + { + "type": "ref_text", + "text": "[71] Jun-Yan Zhu, Richard Zhang, Deepak Pathak, Trevor Darryll, Alexei A Efros, Oliver Wang, and Eli Shechtman. Toward multimodal image-to-image translation. In NeurIPS, pages 465-476, 2017. 3", + "bbox": [ + 501, + 90, + 890, + 147 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "12662", + "bbox": [ + 480, + 945, + 517, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_model.json b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6ef5f792b2500811faf6818c553b55123e003cd2 --- /dev/null +++ b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_model.json @@ -0,0 +1,2257 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.165, + 0.131, + 0.806, + 0.154 + ], + "angle": 0, + "content": "3D-Aware Multi-Class Image-to-Image Translation with NeRFs" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.19, + 0.704, + 0.21 + ], + "angle": 0, + "content": "Senmao Li\\(^{1}\\) Joost van de Weijer\\(^{2}\\) Yaxing Wang\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.285, + 0.213, + 0.704, + 0.233 + ], + "angle": 0, + "content": "Fahad Shahbaz Khan\\(^{3,4}\\) Meiqin Liu\\(^{5}\\) Jian Yang\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.236, + 0.753, + 0.255 + ], + "angle": 0, + "content": "\\(^{1}\\)VCIP,CS, Nankai University, \\(^{2}\\)Universitat Autònoma de Barcelona" + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.259, + 0.848, + 0.279 + ], + "angle": 0, + "content": "\\(^{3}\\)Mohamed bin Zayed University of AI, \\(^{4}\\)Linkoping University, \\(^{5}\\)Beijing Jiaotong University" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.286, + 0.777, + 0.301 + ], + "angle": 0, + "content": "senmaonk@gmail.com {yaxing,csjyang}@nankai.edu.cn joost@cvc.uab.es" + }, + { + "type": "text", + "bbox": [ + 0.327, + 0.309, + 0.638, + 0.323 + ], + "angle": 0, + "content": "fahad.khan@liu.se mqliu@bjtu.edu.cn" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.332, + 0.892, + 0.666 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.668, + 0.894, + 0.697 + ], + "angle": 0, + "content": "Figure 1. 3D-aware I2I translation: given a view-consistent 3D scene (the input), our method maps it into a high-quality target-specific image. Our approach produces consistent results across viewpoints." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.713, + 0.314, + 0.729 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.744, + 0.471, + 0.88 + ], + "angle": 0, + "content": "Recent advances in 3D-aware generative models (3D-aware GANs) combined with Neural Radiance Fields (NeRF) have achieved impressive results. However no prior works investigate 3D-aware GANs for 3D consistent multiclass image-to-image (3D-aware I2I) translation. Naively using 2D-I2I translation methods suffers from unrealistic shape/identity change. To perform 3D-aware multi-class I2I translation, we decouple this learning process into a multi-class 3D-aware GAN step and a 3D-aware I2I trans" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.714, + 0.894, + 0.895 + ], + "angle": 0, + "content": "lation step. In the first step, we propose two novel techniques: a new conditional architecture and an effective training strategy. In the second step, based on the well-trained multi-class 3D-aware GAN architecture, that preserves view-consistency, we construct a 3D-aware I2I translation system. To further reduce the view-consistency problems, we propose several new techniques, including a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss. In extensive experiments on two datasets, quantitative and qualitative results demonstrate that we successfully perform 3D-aware I2I translation with multi-view consistency. Code is" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.889, + 0.244, + 0.902 + ], + "angle": 0, + "content": "*The corresponding author." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12652" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.078, + 0.092, + 0.208, + 0.105 + ], + "angle": 0, + "content": "available in 3DI2I." + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.136, + 0.21, + 0.151 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.161, + 0.47, + 0.327 + ], + "angle": 0, + "content": "Neural Radiance Fields (NeRF) have increasingly gained attention with their outstanding capacity to synthesize high-quality view-consistent images [31,39,66]. Benefiting from the adversarial mechanism [11], StyleNeRF [12] and concurrent works [4, 8, 44, 69] have successfully synthesized high-quality view-consistent, detailed 3D scenes by combining NeRF with StyleGAN-like generator design [22]. This recent progress in 3D-aware image synthesis has not yet been extended to 3D-aware I2I translation, where the aim is to translate in a 3D-consistent manner from a source scene to a target scene of another class (see Figure 1)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.328, + 0.471, + 0.552 + ], + "angle": 0, + "content": "A naive strategy is to use well-designed 2D-I2I translation methods [15, 16, 26, 28, 46, 63, 65, 70]. These methods, however, suffer from unrealistic shape/identity changes when changing the viewpoint, which are especially notable when looking at a video. Main target class characteristics, such as hairs, ears, and noses, are not geometrically realistic, leading to unrealistic results which are especially disturbing when applying I2I to translate videos. Also, these methods typically underestimate the viewpoint change and result in target videos with less viewpoint change than the source video. Another direction is to apply video-to-video synthesis methods [2, 3, 6, 30, 53]. These approaches, however, either rely heavily on labeled data or multi-view frames for each object. In this work, we assume that we only have access to single-view RGB data." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.553, + 0.471, + 0.903 + ], + "angle": 0, + "content": "To perform 3D-aware I2I translation, we extend the theory developed for 2D-I2I with recent developments in 3D-aware image synthesis. We decouple the learning process into a multi-class 3D-aware generative model step and a 3D-aware I2I translation step. The former can synthesize view-consistent 3D scenes given a scene label, thereby addressing the 3D inconsistency problems we discussed for 2D-I2I. We will use this 3D-aware generative model to initialize our 3D-aware I2I model. It therefore inherits the capacity of synthesizing 3D consistent images. To train effectively a multi-class 3D-aware generative model (see Figure 2(b)), we provide a new training strategy consisting of: (1) training an unconditional 3D-aware generative model (i.e., StyleNeRF) and (2) partially initializing the multiclass 3D-aware generative model (i.e., multi-class StyleNeRF) with the weights learned from StyleNeRF. In the 3D-aware I2I translation step, we design a 3D-aware I2I translation architecture (Figure 2(f)) adapted from the trained multi-class StyleNeRF network. To be specific, we use the main network of the pretrained discriminator (Figure 2(b)) to initialize the encoder \\( E \\) of the 3D-aware I2I translation model (Figure 2(f)), and correspondingly, the pretrained generator (Figure 2(b)) to initialize the 3D-aware I2I gen" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.121 + ], + "angle": 0, + "content": "erator (Figure 2(f)). This initialization inherits the capacity of being sensitive to the view information." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.122, + 0.894, + 0.242 + ], + "angle": 0, + "content": "Directly using the constructed 3D-aware I2I translation model (Figure 2(f)), there still exists some view-consistency problem. This is because of the lack of multi-view consistency regularization, and the usage of the single-view image. Therefore, to address these problems we introduce several techniques, including a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.243, + 0.875, + 0.258 + ], + "angle": 0, + "content": "In sum, our work makes the following contributions:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.266, + 0.891, + 0.296 + ], + "angle": 0, + "content": "- We are the first to explore 3D-aware multi-class I2I translation, which allows generating 3D consistent videos." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.306, + 0.892, + 0.38 + ], + "angle": 0, + "content": "- We decouple 3D-aware I2I translation into two steps. First, we propose a multi-class StyleNeRF. To train this multi-class StyleNeRF effectively, we provide a new training strategy. The second step is the proposal of a 3D-aware I2I translation architecture." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.39, + 0.892, + 0.451 + ], + "angle": 0, + "content": "- To further address the view-inconsistency problem of 3D-aware I2I translation, we propose several techniques: a U-net-like adaptor, a hierarchical representation constraint and a relative regularization loss." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.46, + 0.892, + 0.505 + ], + "angle": 0, + "content": "- On extensive experiments, we considerably outperform existing 2D-I2I systems with our 3D-aware I2I method when evaluating temporal consistency." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.266, + 0.892, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.518, + 0.651, + 0.533 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.546, + 0.892, + 0.667 + ], + "angle": 0, + "content": "Neural Implicit Fields. Using neural implicit fields to represent 3D scenes has shown unprecedented quality. [37, 38, 43, 45, 48, 51] use 3D supervision to predict neural implicit fields. Recently, NeRF has shown powerful performance to neural implicit representations. NeRF and its variants [31, 39, 66] utilize a volume rendering technique for reconstructing a 3D scene as a combination of neural radiance and density fields to synthesize novel views." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.671, + 0.892, + 0.867 + ], + "angle": 0, + "content": "3D-aware GANs Recent approaches [5, 9, 13, 19, 35, 40-42, 52, 62, 68] learn neural implicit representations without 3D or multi-view supervisions. Combined with the adversarial loss, these methods typically randomly sample viewpoints, render photorealistic 2D images, and finally optimize their 3D representations. StyleNeRF [12] and concurrent works [4,8,44,69] have successfully synthesized high-quality view-consistent, detailed 3D scenes with StyleGAN-like generator design [22]. In this paper, we investigate 3D-aware image-to-image (3D-aware I2I) translation, where the aim is to translate in a 3D-consistent manner from a source scene to a target scene of another class. We combine transfer learning of GANs [55, 60]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "I2I translation. I2I translation with GAN [16, 57, 59, 61] has increasingly gained attention in computer vision. Based" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12653" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.213 + ], + "angle": 0, + "content": "on the differences of the I2I translation task, recent works focus on paired I2I translation [10, 16, 71], unpaired I2I translation [1, 18, 24, 27, 32, 36, 46, 50, 56, 58, 63, 64, 70], diverse I2I translation [24, 32, 36, 46, 64, 70] and scalable I2I translation [7, 29, 65]. However, none of these approaches addresses the problem of 3D-aware I2I. For the 3D scenes represented by neural implicit fields, directly using these methods suffers from view-inconsistency." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.23, + 0.169, + 0.245 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.26, + 0.473, + 0.398 + ], + "angle": 0, + "content": "Problem setting. Our goal is to achieve 3D consistent multi-class I2I translation trained on single-view data only. The system is designed to translate a viewpoint-video consisting of multiple images (source domain) into a new, photorealistic viewpoint-video scene of a target class. Furthermore, the system should be able to handle multi-class target domains. We decouple our learning into a multi-class 3D-aware generative model step and a multi-class 3D-aware I2I translation step." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.409, + 0.414, + 0.425 + ], + "angle": 0, + "content": "3.1. Multi-class 3D-aware generative model" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.432, + 0.47, + 0.51 + ], + "angle": 0, + "content": "Let \\(\\mathcal{I}_{\\mathcal{R}\\mathcal{G}\\mathcal{B}} \\in \\mathbb{R}^{H \\times W \\times 3}\\) be in the image domain. In this work, we aim to map a source image into a target sample conditioned on the target domain label \\(l \\in \\{1, \\dots, L\\}\\) and a random noise vector \\(\\mathbf{z} \\in \\mathbb{R}^{\\mathbf{Z}}\\). Let vector \\(\\mathbf{x}\\) and \\(\\mathbf{d}\\) be 3D location and 2D viewing direction, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.513, + 0.473, + 0.697 + ], + "angle": 0, + "content": "Unconditional 3D-aware generative model. StyleNeRF [12] introduces a 5D function (3D location \\( x \\) and 2D viewing direction \\( d \\)) to predict the volume density \\( \\sigma \\) and RGB color \\( c \\). Both \\( \\sigma \\) and \\( c \\) are further used to render an image. As shown on Figure 2(a) StyleNeRF consists of four subnetworks: a mapping network \\( M \\), a fully connected layer \\( F \\), a generator \\( G \\) and a discriminator \\( D \\). The mapping network \\( M \\) takes random noise \\( z \\) as input, and outputs latent code \\( w \\), which is further fed into both the fully connected layer \\( F \\) and generator \\( G \\). Given the 3D location \\( x \\), the 2D viewing direction \\( d \\) and latent code \\( w \\), StyleNeRF renders the feature map \\( f \\):" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.709, + 0.329, + 0.741 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {f} (\\boldsymbol {r}) = \\int_ {0} ^ {\\infty} p (t) \\boldsymbol {c} (\\boldsymbol {r} (t), \\boldsymbol {d}) d t\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.135, + 0.743, + 0.469, + 0.777 + ], + "angle": 0, + "content": "\\[\np (t) = \\exp \\left(- \\int_ {0} ^ {t} \\sigma (\\boldsymbol {r} (s)) d s\\right) \\cdot \\sigma_ {\\boldsymbol {w}} (\\boldsymbol {r} (t)) \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.78, + 0.264, + 0.796 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {c}, \\sigma = F (\\boldsymbol {x}, \\boldsymbol {d}, \\boldsymbol {w}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.81, + 0.473, + 0.903 + ], + "angle": 0, + "content": "where \\( \\boldsymbol{r}(t) = \\boldsymbol{o} + t\\boldsymbol{d} \\) (\\( \\boldsymbol{o} \\) is the camera origin) is a camera ray for each feature representation position. Generator \\( G \\) takes as an input the representation \\( \\boldsymbol{f} \\) and the latent code \\( \\boldsymbol{w} \\), and outputs view-consistent photo-realistic novel result \\( \\hat{I}_{RGB} \\). The discriminator \\( D \\) is to distinguish real images \\( I_{RGB} \\) from generated images \\( \\hat{I}_{RGB} \\)." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.092, + 0.848, + 0.108 + ], + "angle": 0, + "content": "The fully objective of StyleNeRF is as following:" + }, + { + "type": "equation", + "bbox": [ + 0.517, + 0.12, + 0.891, + 0.175 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {G} = \\mathbb {E} _ {\\boldsymbol {z} \\sim \\mathcal {Z}, \\boldsymbol {p} \\sim \\mathcal {P}} [ v (D (G (F (\\boldsymbol {z}, \\boldsymbol {x}, \\boldsymbol {d}), M (\\boldsymbol {z}))) ] \\\\ + \\mathbb {E} _ {I _ {R G B} \\sim p _ {\\mathrm {d a t a}}} \\left[ v (- D (I _ {R G B}) + \\lambda \\| \\nabla D (I _ {R G B}) \\| ^ {2}) \\right] \\tag {2} \\\\ + \\beta \\cdot \\mathcal {L} _ {\\mathrm {N e R F - p a t h}} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.186, + 0.894, + 0.247 + ], + "angle": 0, + "content": "where \\( v(u) = -\\log (1 + \\exp (-u)) \\), and \\( p_{\\mathrm{data}} \\) is the data distribution. \\( \\mathcal{L}_{\\mathrm{NeRF - path}} \\) is NeRF path regularization used in StyleNeRF. We also set \\( \\beta = 0.2 \\) and \\( \\lambda = 0.5 \\) following StyleNeRF." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.25, + 0.892, + 0.446 + ], + "angle": 0, + "content": "Conditional 3D-aware generative model. Figure 2(b) shows the proposed multi-class 3D-aware generative model (i.e., multi-class StyleNeRF). Compared to the StyleNeRF architecture (Figure 2(a)), we introduce two mapping networks: \\( M_{1} \\) and \\( M_{2} \\). The mapping network \\( M_{1} \\) outputs the latent code \\( \\boldsymbol{w}_{1} \\). While the mapping network \\( M_{2} \\) takes as input the concatenated noise \\( \\boldsymbol{z} \\) and class embedding \\( e_{l-th} \\), and outputs the latent code \\( \\boldsymbol{w}_{2} \\). The second mapping network \\( M_{2} \\) aims to guide the generator \\( G \\) to synthesize a class-specific image. Here we do not feed the latent code \\( \\boldsymbol{w}_{2} \\) into NeRF's fully connected layer \\( F \\), since we expect \\( F \\) to learn a class-agnostic feature representation, which contributes to perform multi-class 3D-aware I2I translation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.447, + 0.892, + 0.538 + ], + "angle": 0, + "content": "To be able to train multi-class StyleNeRF we adapt the loss function. We require \\(D\\) to address multiple adversarial classification tasks simultaneously, as in [33]. Specifically, given output \\(D \\in \\mathbb{R}^L\\), we locate the \\(l\\)-th class response. Using the response for the \\(l\\)-th class, we compute the adversarial loss and back-propagate gradients:" + }, + { + "type": "equation", + "bbox": [ + 0.507, + 0.55, + 0.891, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {G} ^ {l} = \\mathbb {E} _ {\\boldsymbol {z} \\sim \\mathcal {Z}, \\boldsymbol {x} \\sim \\mathcal {P} _ {x}, \\boldsymbol {d} \\sim \\mathcal {P} _ {d}} \\left[ v (D (G (\\hat {I} _ {R G B})) _ {\\boldsymbol {l} - t h} \\right] \\\\ + \\mathbb {E} _ {I _ {R G B} \\sim p _ {\\mathrm {d a t a}}} \\left[ v (- D (I _ {R G B}) _ {l - t h} + \\lambda \\| \\nabla D (I _ {R G B}) _ {l _ {t h}} \\| ^ {2}) \\right] \\\\ + \\beta \\cdot \\mathcal {L} _ {\\text {N e R F - p a t h}}. \\tag {3} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.627, + 0.892, + 0.822 + ], + "angle": 0, + "content": "We initialize the multi-class StyleNeRF with the weights learned with the unconditional StyleNeRF (E.q. 2), since the training from scratch fails to convergence. Results of this are show in Figs. 7. To be specific, we directly copy the weights from the one learned from StyleNeRF for \\( M_{1} \\), \\( F \\) and \\( G \\) with the same parameter size. For the mapping network \\( M_{2} \\), we duplicate the weight from \\( M \\) except for the first layer, which is trained from scratch because of the different parameter sizes. The discriminator is similarly initialized except for the last layer, which is a new convolution layer with \\( L \\) output channels. Using the proposed initialization method, we successfully generate class-specific photorealistic high-resolution result." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.727, + 0.847 + ], + "angle": 0, + "content": "3.2. 3D-aware I2I translation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.894, + 0.902 + ], + "angle": 0, + "content": "Figure 2 (f) shows the 3D-aware I2I translation network at inference time. It consists of the encoder \\( E \\), the generator \\( G \\) and two mapping networks \\( M_{1} \\) and \\( M_{2} \\). Inspired" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12654" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.087, + 0.89, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.364, + 0.893, + 0.449 + ], + "angle": 0, + "content": "Figure 2. Overview of our method. (a) We first train a 3D-aware generative mode (i.e., StyleNeRF) with single-view photos. (b) We extend StyleNerf to multi-class StyleNerf. We introduce an effective training strategy: initializing multi-class StyleNeRF with StyleNeRF. (c) The training of the proposed 3D-aware I2I translation. It consists of the encoder \\( E \\), the adaptor \\( A \\), the generator \\( G \\) and two mapping networks \\( M_1 \\) and \\( M_2 \\). We freeze all networks except for training the adaptor \\( A \\). The encoder is initialized by the main networks of the pretrained discriminator. We introduce several techniques to address the view-consistency problems: including a U-net-like adaptor \\( A \\), (d) relative regularization loss and (e) hierarchical representation constrain. (f) Usage of proposed model at inference time." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.457, + 0.473, + 0.653 + ], + "angle": 0, + "content": "by DeepI2I [61], we use the pretrained discriminator (Figure 2(b)) to initialize the encoder \\( E \\) of the 3D-aware I2I translation model (Figure 2(f)), and correspondingly, the pretrained generator (Figure 2(b)) to initialize the 3D-aware I2I generator. To align the encoder with the generator, [61] introduces a Resnet-like adaptor network to communicate the encoder and decoder. The adaptor is trained without any real data. However, directly using these techniques for 3D-aware I2I translation still suffers from some view-consistency problems. Therefore, in the following, we introduce several designs to address this problem: a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.664, + 0.47, + 0.786 + ], + "angle": 0, + "content": "U-net-like adaptor. As shown in Figure 2(c), to overcome 3D-inconsistency in the results, we propose a U-net-like adaptor \\( A \\). This design contributes to preserve the spatial structure of the input feature. This has been used before for semantic segmentation tasks and label to image translation [17]. In this paper, we experimentally demonstrate that the U-net-like adaptor is effective to reduce the inconsistency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.795, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Hierarchical representation constrain. As shown in Figure 2(e), given the noise \\( \\mathbf{z} \\), 3D location \\( \\mathbf{x} \\) and 2D viewing direction \\( \\mathbf{d} \\) the fully connected layer \\( F \\) renders the 3D-consistent feature map \\( \\mathbf{f} = F(\\mathbf{x}, \\mathbf{d}, \\mathbf{w}_1) = F(\\mathbf{x}, \\mathbf{d}, M1(\\mathbf{z})) \\). We further extract the hierarchical representation \\( \\{G(\\mathbf{f}, \\mathbf{w}_1, \\mathbf{w}_2)_k\\} \\) as well as the synthesized image \\( \\hat{I}_{RGB} = G(\\mathbf{f}, \\mathbf{w}_1, \\mathbf{w}_2) \\). Here \\( G(\\mathbf{f}, \\mathbf{w}_1, \\mathbf{w}_2)_k \\) is the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.456, + 0.892, + 0.531 + ], + "angle": 0, + "content": "\\(k\\)-th \\((k = m, \\dots, n, (n > m))\\) ResBlock \\(^1\\) output of the generator \\(G\\). We then take the generated image \\(\\hat{I}_{RGB}\\) as input for the encoder \\(E\\): \\(E(\\hat{I}_{RGB})\\), which is fed into the adaptor network \\(A\\), that is \\(\\hat{\\pmb{f}} = A(E(\\hat{I}_{RGB}))\\). In this step, our loss is" + }, + { + "type": "equation", + "bbox": [ + 0.634, + 0.528, + 0.892, + 0.555 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {A} = \\left\\| \\boldsymbol {f} - \\hat {\\boldsymbol {f}} \\right\\| _ {1}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.561, + 0.892, + 0.631 + ], + "angle": 0, + "content": "For the intermediate layers, we propose a hierarchical representation constrain. Given the output \\(\\hat{\\pmb{f}}\\) and the latent codes (i.e., \\(\\pmb{w}_1\\) and \\(\\pmb{w}_2\\))², we similarly collect the hierarchical feature \\(\\left\\{G(\\hat{\\pmb{f}}, \\pmb{w}_1, \\pmb{w}_2)_k\\right\\}\\). The objective is" + }, + { + "type": "equation", + "bbox": [ + 0.52, + 0.639, + 0.892, + 0.672 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {H} = \\sum_ {k} \\left\\| G (\\boldsymbol {f}, \\boldsymbol {w} _ {1}, \\boldsymbol {w} _ {2}) _ {k} - G (\\hat {\\boldsymbol {f}}, \\boldsymbol {w} _ {1}, \\boldsymbol {w} _ {2}) _ {k} \\right\\| _ {1}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.685, + 0.892, + 0.746 + ], + "angle": 0, + "content": "In this step, we freeze every network except for the U-net-like adaptor which is learned. Note that we do not access to any real data to train the adaptor, since we utilize the generated image with from the trained generator (Figure 2(b))." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.827 + ], + "angle": 0, + "content": "Relative regularization loss. We expect to input the consistency of the translated 3D scene with single-image regularization instead of the images from the consecutive views. We propose a relative regularization loss based on neighboring patches. We assume that neighboring patches" + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.838, + 0.892, + 0.874 + ], + "angle": 0, + "content": "1After each ResBlock the feature resolution is half of the previous one in the encoder, and two times in generator. In the generator, the last output is image." + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.874, + 0.875, + 0.888 + ], + "angle": 0, + "content": "\\(^{2}\\)Both \\(\\pmb{w}1\\) and \\(\\pmb{w}2\\) are the ones used when generating image \\(\\hat{I}_{RGB}\\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.518, + 0.888, + 0.799, + 0.901 + ], + "angle": 0, + "content": "3More precisely, that is the feature map in this paper." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.838, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "12655" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.082, + 0.089, + 0.468, + 0.25 + ], + "angle": 0, + "content": "
Dataset MethodCelebA-HQAFHQ
TC↓FID↓TC↓FID↓
*MUNIT30.24031.428.49741.5
*DRIT35.45252.125.34195.6
*MSGAN31.64133.134.23661.4
StarGANv210.25013.63.02516.1
Ours (3D)3.74322.32.06715.3
TC↓(unc)FID↓TC↓(unc)FID↓
†Liu et al. [34]13.31517.83.46220.0
StarGANv210.25012.23.0259.9
†Kunhee et al. [23]10.4626.73.24110.0
Ours (3D)3.74318.72.06711.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.26, + 0.47, + 0.304 + ], + "angle": 0, + "content": "Table 1. Comparison with baselines on TC and FID metrics.* denotes that we used the results provided by StarGANv2. † means that we used the pre-trained networks provided by authors." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.311, + 0.47, + 0.432 + ], + "angle": 0, + "content": "are equivalent to that on corresponding patches of two consecutive views. For example, when inputting multi-view consistent scene images, the position of eyes are consistently moving. The fully connected layers (i.e., NeRF mode) \\( F \\) renders the view-consistent feature map \\( f \\), which finally decides the view-consistent reconstructed 3D scene. Thus, we expect the output \\( \\hat{f} \\) of the adaptor \\( A \\) to obtain the view-consistent property of the feature map \\( f \\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.432, + 0.471, + 0.552 + ], + "angle": 0, + "content": "We randomly sample one vector from the feature map \\(\\pmb{f}\\) (e.g., red square in (Figure 2(d))), denoted as \\(\\pmb{f}^{\\eta}\\). Then we sample the eight nearest neighboring vectors of \\(\\pmb{f}^{\\eta}\\) (dark green square in Figure 2(d)), denoted by \\(\\pmb{f}^{\\eta, \\varepsilon}\\) where \\(\\varepsilon = 1, \\dots, 8\\) is the neighbor index. Similarly, we sample vectors \\(\\hat{\\pmb{f}}^{\\eta}\\) and \\(\\hat{\\pmb{f}}^{\\eta, \\varepsilon}\\) from the feature map \\(\\hat{\\pmb{f}}\\) (red and dark green dash square in Figure 2(d)). We then compute the patch difference:" + }, + { + "type": "equation", + "bbox": [ + 0.138, + 0.562, + 0.47, + 0.587 + ], + "angle": 0, + "content": "\\[\nd _ {\\boldsymbol {f}} ^ {\\eta , \\varepsilon} = \\boldsymbol {f} ^ {\\eta} \\ominus \\boldsymbol {f} ^ {\\eta , \\varepsilon}, d _ {\\hat {\\boldsymbol {f}}} ^ {\\eta , \\varepsilon} = \\hat {\\boldsymbol {f}} ^ {\\eta} \\ominus \\hat {\\boldsymbol {f}} ^ {\\eta , \\varepsilon}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.595, + 0.47, + 0.636 + ], + "angle": 0, + "content": "where \\(\\ominus\\) represents vector subtraction. In order to preserve the consistency, we force these patch differences to be small:" + }, + { + "type": "equation", + "bbox": [ + 0.196, + 0.635, + 0.47, + 0.662 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {R} = \\left\\| d _ {\\boldsymbol {f}} ^ {\\eta , \\varepsilon} - d _ {\\hat {\\boldsymbol {f}}} ^ {\\eta , \\varepsilon} \\right\\| _ {1}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.665, + 0.47, + 0.711 + ], + "angle": 0, + "content": "The underlying intuition is straightforward: the difference vectors of the same location should be most relevant in the latent space compared to other random pairs." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.711, + 0.24, + 0.726 + ], + "angle": 0, + "content": "The final objective is" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.737, + 0.47, + 0.754 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {H} + \\mathcal {L} _ {A} + \\mathcal {L} _ {R}. \\tag {8}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.774, + 0.21, + 0.791 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.798, + 0.265, + 0.816 + ], + "angle": 0, + "content": "4.1. Experimental setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Training details. We use the trained StyleNeRF to partially initialize our multi-class StyleNeRF architecture. We adapt the structure of the multi-class StyleNeRF to the 3D-aware I2I architecture. The proposed method is implemented in Pytorch [47]. We use Adam [25] with a batch size" + }, + { + "type": "table", + "bbox": [ + 0.501, + 0.088, + 0.89, + 0.144 + ], + "angle": 0, + "content": "
Ini.Ada.Hrc.Rrl.TC↓FID↓
YNNN2.61223.8
YYNN2.32423.1
YYYN2.20416.1
YYYY2.06715.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.155, + 0.894, + 0.281 + ], + "angle": 0, + "content": "Table 2. Impact of several components in the performance on AFHQ. The second row is the case where the 3D-aware I2I translation model is initialized by weights learned from the multi-class StylyNeRF. Then it is trained with a Resnet-based adaptor and \\( L_{1} \\) loss between the representations \\( f \\) and \\( \\hat{f} \\). The proposed techniques continuously improve the consistency and performance. Ini.: initialization method for multi-class StyleNeRF, Ada.: U-net-like adaptor, Hrc.: Hierarchical representation constrain, Rrl: Relative regularization loss." + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.298, + 0.89, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.476, + 0.891, + 0.648 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.658, + 0.892, + 0.73 + ], + "angle": 0, + "content": "Figure 3. (Top) Using a single mapping network which takes as input the concatenated class embedding and the noise. We find it fails to generate target-specific realistic image. (Bottom) we use two mapping networks without concatenating their outputs like the proposed method. This design fails to generate 3D-aware results." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.744, + 0.892, + 0.806 + ], + "angle": 0, + "content": "of 64, using a learning rate of 0.0002. We use \\(2 \\times\\) Quadro RTX 3090 GPUs (24 GB VRAM) to conduct all our experiments. We show the network details and more results on Supp. Mat.." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.893, + 0.902 + ], + "angle": 0, + "content": "Datasets. Our experiments are conducted on the Animal Faces (AFHQ) [7] and CelebA-HQ [21] datasets. AFHQ contains 3 classes, each one has about 5000 images. In CelebA-HQ, we use gender as a class, with \\(\\sim 10\\mathrm{k}(10057)\\) male and \\(\\sim 18\\mathrm{k}(17943)\\) female images in the training set. In this paper, all images are resized to \\(256 \\times 256\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "12656" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.089, + 0.895, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.403, + 0.893, + 0.445 + ], + "angle": 0, + "content": "Figure 4. Comparative results between the proposed method and StarGANv2. We observe that StarGANv2 suffers from underestimating viewpoint changes when changing the input viewpoint (first column). It also leads to identity change (third and fourth columns), and a geometrically unrealistic ear (last two columns)." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.452, + 0.47, + 0.75 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.759, + 0.47, + 0.802 + ], + "angle": 0, + "content": "Figure 5. The generated images of (top) \\( G(\\pmb{f}, \\pmb{w}_1, \\pmb{w}_2) \\) and (bottom) \\( G(\\hat{\\pmb{f}}, \\pmb{w}_1, \\pmb{w}_2) \\), which show that we correctly align the outputs of both the NeRF mode \\( F \\) and the adaptor \\( A \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.805, + 0.47, + 0.852 + ], + "angle": 0, + "content": "Baselines. We compare to MUNIT [15], DRIT [28], MSGAN [20], StarGANv2 [7], [23] and [34], all of which perform image-to-image translation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Evaluation Measures. We employ the widely used metric for evaluation, namely Fréchet Inception Distance (FID) [14]. We also propose a new measure in which we" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.454, + 0.895, + 0.71 + ], + "angle": 0, + "content": "combine two metrics, one which measures the consistency between neighboring frames (which we want to be low), and another that measures the diversity over the whole video (which we would like to be high). We adopt a modified temporal loss (TL) [54]. This temporal loss computes the Frobenius difference between two frames to evaluate the video consistency. Only considering this measure would lead to high scores when neighboring frames in the generated video are all the same. For successful 3D-aware I2I translation, we expect the system to be sensitive to view changes in the source video and therefore combine low consecutive frame changes with high diversity over the video. Therefore, we propose to compute LPIPS [67] for each video (vLPIPS), which indicates the diversity of the generated video sequence. To evaluate both the consistency and the sensitiveness of the generated video, we propose a new temporal consistency metric (TC):" + }, + { + "type": "equation", + "bbox": [ + 0.62, + 0.721, + 0.891, + 0.737 + ], + "angle": 0, + "content": "\\[\nT C = T L / v L P I P S. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.748, + 0.894, + 0.807 + ], + "angle": 0, + "content": "Due to the small changes between two consecutive views, for each video we use frame interval 1, 2 and 4 in between to evaluate view-consistency. Note that a lower TC value is better." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.818, + 0.813, + 0.835 + ], + "angle": 0, + "content": "4.2. Quantitative and qualitative results." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.903 + ], + "angle": 0, + "content": "We evaluate the performance of the proposed method on both the AFHQ animal and CelebA human face dataset. As reported in Table 1, in terms of TC the proposed method achieves the best score on two datasets. For example, we" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.958 + ], + "angle": 0, + "content": "12657" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.468, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.093, + 0.411, + 0.453, + 0.424 + ], + "angle": 0, + "content": "Figure 6. Interpolation between the dog and wildlife classes." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.43, + 0.47, + 0.687 + ], + "angle": 0, + "content": "have 3.743 TC on CelebA-HQ, which is better than StarGANv2 (10.250 TC). This indicates that our method dramatically improves consistency. As reported in Table 1 (up), across both datasets, the proposed method consistently outperforms the baselines with significant gains in terms of FID and LPIPS, except for StarGANv2 which obtains superior results. However, on AFHQ we achieve better FID score than StarGANv2. Kunhee et al. [23] reports the unconditional FID ((unc)FID) value which is computed between synthesized images and training samples instead of each class. As reported in Table 1 (bottom), We are able to achieve completing results on uncFID metrics. Note that while 2D I2I translation (e.g., StarGANv2) can obtain high-quality for each image, they cannot synthesize images of the same scene with 3D consistency, and suffers from unrealistic shape/identity changes when changing the viewpoint, which are especially notable when looking at a video." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.689, + 0.47, + 0.902 + ], + "angle": 0, + "content": "In Figures 1,4, we perform 3D-aware I2I translation. When changing the input viewpoint (Figure 4 (first two columns)), the outputs of StarGANv2 do not maintain the correct head pose, and underestimate the pose changes with respect to the frontal view. To estimate that this is actually the case, we also compute the diversity (i.e., vLPIPS) in a single video sequence. For example, both StarGANv2 and our method are 0.032 and 0.101 on CelebA-HQ. This confirms that the diversity (due to pose changes) is lowest for StarGANv2. More clearly showing the limitations of standard I2I methods for 3D-aware I2I, we observe that StarGANv2 suffers from unrealistic changes when changing the viewpoint. For example, when translating the class cat to wildlife, the generated images changes from wolf to leop" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.228 + ], + "angle": 0, + "content": "ard when varying the viewpoint (Figure 4 (third and fourth columns)). Also, the main target class characteristics, such as ears, are not geometrically realistic, leading to unrealistic 3D scene videos. Our method, however, eliminates these shortcomings and performs efficient high-resolution image translation with high 3D-consistency, which preserves the input image pose and changes the style of the output images. We show high-resolution images \\((1024 \\times 1024)\\) on Supp. Mat.." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.242, + 0.65, + 0.258 + ], + "angle": 0, + "content": "4.3. Ablation study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.27, + 0.892, + 0.481 + ], + "angle": 0, + "content": "Conditional 3D-aware generative architecture In this experiment, we verify our network design by comparing it with two alternative network designs. As shown in Figure 3(up), we explore a naive strategy: using one mapping which takes as input the concatenated class embedding and the noise. In this way, the fully connected network \\( F \\) outputs the class-specific latent code \\( w \\), which is fed into the fully connected network \\( F \\) to output the class-specific representation \\( f \\). Here, both the latent code \\( w \\) and the representation \\( f \\) are decided by the same class. However, when handling 3D-aware multi-class I2I translation task, the feature representation \\( \\hat{f} \\) is combined with the latent code \\( w \\) from varying class embeddings, which leads to unrealistic image generation (Figure. 3(up))." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.483, + 0.892, + 0.573 + ], + "angle": 0, + "content": "As shown in Figure 3(bottom), we utilize two mapping networks without concatenating their outputs like the proposed method. This design guarantees that the output of the fully connected layers \\( F \\) are class-agnostic. We experimentally observe that this model fails to handle 3D-aware generation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.579, + 0.892, + 0.804 + ], + "angle": 0, + "content": "Effective training strategy for multi-class 3D-aware generative model. We evaluate the proposed training strategy on AFHQ and CelebA-HQ datasets. We initialize the proposed multi-class 3D I2I architecture from scratch and the proposed method, respectively. As shown on Figure 7 (up), the model trained from scratch synthesizes unrealistic faces on CelebA-HQ dataset, and low quality cats on AFHQ. This is due to the style-based conditional generator which is hard to be optimized and causes mode collapse directly [49]. The proposed training strategy, however, manages to synthesize photo-realistic high-resolution images with high multi-view consistency. This training strategy first performs unconditional learning, which leads to satisfactory generative ability. Thus, we relax the difficulty of directly training the conditional model." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Alignment and interpolation. Figure 5 exhibits the outputs of the generator when taking as input the feature representation \\(\\pmb{f}\\) and \\(\\hat{\\pmb{f}}\\). This confirms that the proposed method successfully aligns the outputs of the fully connected layers \\(F\\) and the adaptor \\(A\\). Figure 6 reports interpolation by freezing the input images while interpolating the class em" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12658" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.078, + 0.089, + 0.892, + 0.576 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.123, + 0.586, + 0.844, + 0.601 + ], + "angle": 0, + "content": "Figure 7. Qualitative results of multi-class StyleNeRF training from scratch (up) and from the proposed strategy (bottom)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.612, + 0.471, + 0.658 + ], + "angle": 0, + "content": "bedding between two classes. Our model still manages to preserve the view-consistency, and generate high quantity images with even given never seen class embeddings." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Techniques for improving the view-consistency. We perform an ablation study on the impact of several design elements on the overall performance of the system, which includes the proposed initialization 3D-aware I2I translation model (Ini.), U-net-like adaptor (Ada.), hierarchical representation constrain (Hrc.) and relative regularization loss (Rrl.). We evaluate these four factors in Table 2. The results show that only using the proposed initialization (the second row of the Table 2) has already improved the view-consistency comparing to StarGANv2 (Table 1). Utilizing either U-net-like adaptor (Ada.) or hierarchical representation constrain (Hrc.) further leads to performance gains. Finally we are able to get the best score when further adding relative regularization loss (Rrl.) to the 3D-aware I2I translation model." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.611, + 0.619, + 0.627 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.892, + 0.797 + ], + "angle": 0, + "content": "In this paper we first explore 3D-aware I2I translation. We decouple the learning process into a multi-class 3D-aware generative model step and a 3D-aware I2I translation step. In the first step, we propose a new multi-class StyleNeRF architecture, and an effective training strategy. We design the 3D-aware I2I translation model with the well-optimized multi-class StyleNeRF model. It inherits the capacity of synthesizing 3D consistent images. In the second step, we propose several techniques to further reduce the view-consistency of the 3D-aware I2I translation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.803, + 0.893, + 0.9 + ], + "angle": 0, + "content": "Acknowledgement. We acknowledge the support from the Key Laboratory of Advanced Information Science and Network Technology of Beijing (XDXX2202), and the project supported by Youth Foundation (62202243). We acknowledge the Spanish Government funding for projects PID2019-104174GB-I00, TED2021-132513B-I00." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12659" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Kyungjune Baek, Yunjay Choi, Youngjung Uh, Jaejun Yoo, and Hyunjung Shim. Rethinking the truly unsupervised image-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14154-14163, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.185, + 0.472, + 0.242 + ], + "angle": 0, + "content": "[2] Aayush Bansal, Shugao Ma, Deva Ramanan, and Yaser Sheikh. Recycle-gan: Unsupervised video retargeting. In Proceedings of the European conference on computer vision (ECCV), pages 119-135, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.242, + 0.472, + 0.282 + ], + "angle": 0, + "content": "[3] Dina Bashkirova, Ben Usman, and Kate Saenko. Unsupervised video-to-video translation. arXiv preprint arXiv:1806.03698, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.283, + 0.472, + 0.367 + ], + "angle": 0, + "content": "[4] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.368, + 0.472, + 0.437 + ], + "angle": 0, + "content": "[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.438, + 0.472, + 0.493 + ], + "angle": 0, + "content": "[6] Yang Chen, Yingwei Pan, Ting Yao, Xinmei Tian, and Tao Mei. Mocycle-gan: Unpaired video-to-video translation. In Proceedings of the 27th ACM International Conference on Multimedia, pages 647-655, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.494, + 0.472, + 0.535 + ], + "angle": 0, + "content": "[7] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, 2020. 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.536, + 0.472, + 0.605 + ], + "angle": 0, + "content": "[8] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. Gram: Generative radiance manifolds for 3d-aware image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10673-10683, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.606, + 0.472, + 0.661 + ], + "angle": 0, + "content": "[9] Matheus Gadelha, Subhransu Maji, and Rui Wang. 3d shape induction from 2d views of multiple objects. In 2017 International Conference on 3D Vision (3DV), pages 402-411. IEEE, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.662, + 0.472, + 0.704 + ], + "angle": 0, + "content": "[10] Abel Gonzalez-Garcia, Joost van de Weijer, and Yoshua Bengio. Image-to-image translation for cross-domain disentanglement. In NeurIPS, pages 1294–1305, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.705, + 0.472, + 0.761 + ], + "angle": 0, + "content": "[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, pages 2672-2680, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.761, + 0.472, + 0.816 + ], + "angle": 0, + "content": "[12] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis. arXiv preprint arXiv:2110.08985, 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.817, + 0.472, + 0.873 + ], + "angle": 0, + "content": "[13] Paul Henderson, Vagia Tsiminaki, and Christoph H Lampert. Leveraging 2d data to learn textured 3d mesh generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7498-7507, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.873, + 0.472, + 0.903 + ], + "angle": 0, + "content": "[14] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "two time-scale update rule converge to a local nash equilibrium. In NeurIPS, pages 6626-6637, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.121, + 0.892, + 0.162 + ], + "angle": 0, + "content": "[15] Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. Multimodal unsupervised image-to-image translation. In ECCV, pages 172-189, 2018. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.163, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[16] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, 2017. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.205, + 0.892, + 0.247 + ], + "angle": 0, + "content": "[17] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, pages 1125-1134, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.247, + 0.892, + 0.315 + ], + "angle": 0, + "content": "[18] Somi Jeong, Youngjung Kim, Eungbean Lee, and Kwanghoon Sohn. Memory-guided unsupervised image-to-image translation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6558-6567, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.316, + 0.892, + 0.372 + ], + "angle": 0, + "content": "[19] Danilo Jimenez Rezende, SM Eslami, Shakir Mohamed, Peter Battaglia, Max Jaderberg, and Nicolas Heess. Unsupervised learning of 3d structure from images. Advances in neural information processing systems, 29, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.372, + 0.892, + 0.428 + ], + "angle": 0, + "content": "[20] Animesh Karnewar and Oliver Wang. *Msg-gan: Multi-scale gradients for generative adversarial networks*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7799–7808, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.429, + 0.892, + 0.469 + ], + "angle": 0, + "content": "[21] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. In ICLR, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.469, + 0.892, + 0.512 + ], + "angle": 0, + "content": "[22] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.512, + 0.892, + 0.58 + ], + "angle": 0, + "content": "[23] Kunhee Kim, Sanghun Park, Eunyeong Jeon, Taehun Kim, and Daijin Kim. A style-aware discriminator for controllable image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18239-18248, 2022. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.58, + 0.892, + 0.623 + ], + "angle": 0, + "content": "[24] Taeksoo Kim, Moonsu Cha, Hyunsoo Kim, Jungkwon Lee, and Jiwon Kim. Learning to discover cross-domain relations with generative adversarial networks. In ICML, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.623, + 0.892, + 0.651 + ], + "angle": 0, + "content": "[25] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. *ICLR*, 2014. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.651, + 0.892, + 0.734 + ], + "angle": 0, + "content": "[26] Minsu Ko, Eunju Cha, Sungjoo Suh, Huijin Lee, Jae-Joon Han, Jinwoo Shin, and Bohyung Han. Self-supervised dense consistency regularization for image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18301-18310, June 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.735, + 0.892, + 0.789 + ], + "angle": 0, + "content": "[27] Héctor Laria, Yaxing Wang, Joost van de Weijer, and Bogdan Raducanu. Hyper-gan: Transferring unconditional to conditional gans with hypernetworks. arXiv preprint arXiv:2112.02219, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.79, + 0.892, + 0.845 + ], + "angle": 0, + "content": "[28] Hsin-Ying Lee, Hung-Yu Tseng, Jia-Bin Huang, Maneesh Kumar Singh, and Ming-Hsuan Yang. Diverse imaged-to-image translation via disentangled representations. In ECCV, 2018. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.892, + 0.903 + ], + "angle": 0, + "content": "[29] Hsin-Ying Lee, Hung-Yu Tseng, Qi Mao, Jia-Bin Huang, Yu-Ding Lu, Maneesh Singh, and Ming-Hsuan Yang. Drit++: Diverse image-to-image translation via disentangled representations. IJCV, pages 1-16, 2020. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "12660" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[30] Kangning Liu, Shuhang Gu, Andrés Romero, and Radu Timofte. Unsupervised multimodal video-to-video translation via self-supervised learning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1030–1040, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.163, + 0.469, + 0.204 + ], + "angle": 0, + "content": "[31] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. NeurIPS, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.207, + 0.469, + 0.247 + ], + "angle": 0, + "content": "[32] Ming-Yu Liu, Thomas Breuel, and Jan Kautz. Unsupervised image-to-image translation networks. In NeurIPS, pages 700-708, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.25, + 0.469, + 0.304 + ], + "angle": 0, + "content": "[33] Ming-Yu Liu, Xun Huang, Arun Mallya, Tero Karras, Timo Aila, Jaakko Lehtinen, and Jan Kautz. Few-shot unsupervised image-to-image translation. In CVPR, pages 10551-10560, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.307, + 0.469, + 0.389 + ], + "angle": 0, + "content": "[34] Yahui Liu, Enver Sangineto, Yajing Chen, Linchao Bao, Haoxian Zhang, Nicu Sebe, Bruno Lepri, Wei Wang, and Marco De Nadai. Smoothing the disentangled latent style space for unsupervised image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10785-10794, 2021. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.392, + 0.469, + 0.446 + ], + "angle": 0, + "content": "[35] Sebastian Lunz, Yingzhen Li, Andrew Fitzgibbon, and Nate Kushman. Inverse graphics gan: Learning to generate 3d shapes from unstructured 2d data. arXiv preprint arXiv:2002.12674, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.449, + 0.469, + 0.503 + ], + "angle": 0, + "content": "[36] Youssef Alami Mejjati, Christian Richardt, James Tompkin, Darren Cosker, and Kwang In Kim. Unsupervised attention-guided image-to-image translation. In NeurIPS, pages 3693-3703, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.506, + 0.469, + 0.574 + ], + "angle": 0, + "content": "[37] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.577, + 0.469, + 0.644 + ], + "angle": 0, + "content": "[38] Mateusz Michalkiewicz, Jhony K. Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Implicit surface representations as layers in neural networks. In The IEEE International Conference on Computer Vision (ICCV), October 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.647, + 0.469, + 0.702 + ], + "angle": 0, + "content": "[39] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. arXiv preprint arXiv:2003.08934, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.704, + 0.469, + 0.773 + ], + "angle": 0, + "content": "[40] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7588–7597, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.776, + 0.469, + 0.829 + ], + "angle": 0, + "content": "[41] Michael Niemeyer and Andreas Geiger. Campari: Camera-aware decomposed generative neural radiance fields. In 2021 International Conference on 3D Vision (3DV), pages 951-961. IEEE, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.832, + 0.469, + 0.899 + ], + "angle": 0, + "content": "[42] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[43] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. arXiv preprint arXiv:1912.07372, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.149, + 0.892, + 0.231 + ], + "angle": 0, + "content": "[44] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.233, + 0.892, + 0.3 + ], + "angle": 0, + "content": "[45] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. International Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.302, + 0.892, + 0.342 + ], + "angle": 0, + "content": "[46] Taesung Park, Alexei A. Efros, Richard Zhang, and Jun-Yan Zhu. Contrastive learning for conditional image synthesis. In ECCV, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.344, + 0.892, + 0.399 + ], + "angle": 0, + "content": "[47] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.401, + 0.892, + 0.44 + ], + "angle": 0, + "content": "[48] Songyou Peng, Michael Niemeyer, Lars M. Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. ArXiv, abs/2003.04618, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.442, + 0.892, + 0.494 + ], + "angle": 0, + "content": "[49] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-10, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.497, + 0.892, + 0.552 + ], + "angle": 0, + "content": "[50] Xuning Shao and Weidong Zhang. Spatchgan: A statistical feature based discriminator for unsupervised image-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6546-6555, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.554, + 0.892, + 0.62 + ], + "angle": 0, + "content": "[51] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In Advances in Neural Information Processing Systems, pages 1119–1130, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.623, + 0.892, + 0.705 + ], + "angle": 0, + "content": "[52] Ayush Tewari, Xingang Pan, Ohad Fried, Maneesh Agrawala, Christian Theobalt, et al. Disentangled3d: Learning a 3d generative model with disentangled geometry and appearance from monocular images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1516-1525, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.707, + 0.892, + 0.747 + ], + "angle": 0, + "content": "[53] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Guilin Liu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. Video-to-video synthesis. In NeurIPS, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.749, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[54] Wenjing Wang, Shuai Yang, Jizheng Xu, and Jiaying Liu. Consistent video style transfer via relaxation and regularization. IEEE Transactions on Image Processing, 29:9125-9139, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.804, + 0.892, + 0.858 + ], + "angle": 0, + "content": "[55] Yaxing Wang, Abel Gonzalez-Garcia, David Berga, Luis Herranz, Fahad Shahbaz Khan, and Joost van de Weijer. Minegan: effective knowledge transfer from gans to target domains with few images. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[56] Yaxing Wang, Abel Gonzalez-Garcia, Joost van de Weijer, and Luis Herranz. SDIT: Scalable and diverse cross-domain image translation. In ACM MM, 2019. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.518, + 0.957 + ], + "angle": 0, + "content": "12661" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.146 + ], + "angle": 0, + "content": "[57] Yaxing Wang, Salman Khan, Abel Gonzalez-Garcia, Joost van de Weijer, and Fahad Shahbaz Khan. Semi-supervised learning for few-shot image-to-image translation. In CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.15, + 0.468, + 0.204 + ], + "angle": 0, + "content": "[58] Yaxing Wang, Hector Laria Mantecon, Joost van de Weijer, Laura Lopez-Fuentes, and Bogdan Raducanu. Transferi2i: Transfer learning for image-to-image translation from small datasets, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.207, + 0.468, + 0.259 + ], + "angle": 0, + "content": "[59] Yaxing Wang, Joost van de Weijer, and Luis Herranz. Mix and match networks: encoder-decoder alignment for zeropair image translation. In CVPR, pages 5467-5476, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.264, + 0.468, + 0.318 + ], + "angle": 0, + "content": "[60] Yaxing Wang, Chenshen Wu, Luis Herranz, Joost van de Weijer, Abel Gonzalez-Garcia, and Bogdan Raducanu. Transferring gans: generating images from limited data. In ECCV, pages 218-234, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.321, + 0.468, + 0.361 + ], + "angle": 0, + "content": "[61] Yaxing Wang, Lu Yu, and Joost van de Weijer. Deep2i: Enabling deep hierarchical image-to-image translation by transferring from gans. NeurIPS, 2020. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.364, + 0.468, + 0.431 + ], + "angle": 0, + "content": "[62] Yang Xue, Yuheng Li, Krishna Kumar Singh, and Yong Jae Lee. Giraffe hd: A high-resolution 3d-aware generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18440-18449, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.434, + 0.468, + 0.501 + ], + "angle": 0, + "content": "[63] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Unsupervised image-to-image translation with generative prior. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18332-18341, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.505, + 0.468, + 0.544 + ], + "angle": 0, + "content": "[64] Zili Yi, Hao Zhang, Ping Tan Gong, et al. Dualgan: Unsupervised dual learning for image-to-image translation. In ICCV, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.548, + 0.468, + 0.589 + ], + "angle": 0, + "content": "[65] Xiaoming Yu, Yuanqi Chen, Shan Liu, Thomas Li, and Ge Li. Multi-mapping image-to-image translation via learning disentanglement. In NeurIPS, pages 2990-2999, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.591, + 0.468, + 0.631 + ], + "angle": 0, + "content": "[66] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.634, + 0.468, + 0.702 + ], + "angle": 0, + "content": "[67] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.705, + 0.468, + 0.785 + ], + "angle": 0, + "content": "[68] Xuanmeng Zhang, Zhedong Zheng, Daiheng Gao, Bang Zhang, Pan Pan, and Yi Yang. Multi-view consistent generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18450-18459, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.789, + 0.468, + 0.842 + ], + "angle": 0, + "content": "[69] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.846, + 0.468, + 0.899 + ], + "angle": 0, + "content": "[70] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In ICCV, pages 2223-2232, 2017. 2, 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.092, + 0.892, + 0.148 + ], + "angle": 0, + "content": "[71] Jun-Yan Zhu, Richard Zhang, Deepak Pathak, Trevor Darryll, Alexei A Efros, Oliver Wang, and Eli Shechtman. Toward multimodal image-to-image translation. In NeurIPS, pages 465-476, 2017. 3" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.519, + 0.957 + ], + "angle": 0, + "content": "12662" + } + ] +] \ No newline at end of file diff --git a/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_origin.pdf b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..008afebf75eaac10d39483b5061365342d439c2e --- /dev/null +++ b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/38da797f-7f59-48cd-af34-af72487f73d0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f89fbca22ee3704d5ae325dd695ffbd4babf1948b76d878ca1bb759128b4597 +size 2580578 diff --git a/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/full.md b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/full.md new file mode 100644 index 0000000000000000000000000000000000000000..10b0391e76e5b9e2e716df6dda697d46e464bb56 --- /dev/null +++ b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/full.md @@ -0,0 +1,301 @@ +# 3D-Aware Multi-Class Image-to-Image Translation with NeRFs + +Senmao Li $^{1}$ Joost van de Weijer $^{2}$ Yaxing Wang $^{1*}$ + +Fahad Shahbaz Khan $^{3,4}$ Meiqin Liu $^{5}$ Jian Yang $^{1}$ + +$^{1}$ VCIP,CS, Nankai University, $^{2}$ Universitat Autònoma de Barcelona + +$^{3}$ Mohamed bin Zayed University of AI, $^{4}$ Linkoping University, $^{5}$ Beijing Jiaotong University + +senmaonk@gmail.com {yaxing,csjyang}@nankai.edu.cn joost@cvc.uab.es + +fahad.khan@liu.se mqliu@bjtu.edu.cn + +![](images/22fc17fea1f2d332d96b8f08d9e486c59a1987425629a67389d12cce8b964b1e.jpg) +Figure 1. 3D-aware I2I translation: given a view-consistent 3D scene (the input), our method maps it into a high-quality target-specific image. Our approach produces consistent results across viewpoints. + +# Abstract + +Recent advances in 3D-aware generative models (3D-aware GANs) combined with Neural Radiance Fields (NeRF) have achieved impressive results. However no prior works investigate 3D-aware GANs for 3D consistent multiclass image-to-image (3D-aware I2I) translation. Naively using 2D-I2I translation methods suffers from unrealistic shape/identity change. To perform 3D-aware multi-class I2I translation, we decouple this learning process into a multi-class 3D-aware GAN step and a 3D-aware I2I trans + +lation step. In the first step, we propose two novel techniques: a new conditional architecture and an effective training strategy. In the second step, based on the well-trained multi-class 3D-aware GAN architecture, that preserves view-consistency, we construct a 3D-aware I2I translation system. To further reduce the view-consistency problems, we propose several new techniques, including a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss. In extensive experiments on two datasets, quantitative and qualitative results demonstrate that we successfully perform 3D-aware I2I translation with multi-view consistency. Code is + +# 1. Introduction + +Neural Radiance Fields (NeRF) have increasingly gained attention with their outstanding capacity to synthesize high-quality view-consistent images [31,39,66]. Benefiting from the adversarial mechanism [11], StyleNeRF [12] and concurrent works [4, 8, 44, 69] have successfully synthesized high-quality view-consistent, detailed 3D scenes by combining NeRF with StyleGAN-like generator design [22]. This recent progress in 3D-aware image synthesis has not yet been extended to 3D-aware I2I translation, where the aim is to translate in a 3D-consistent manner from a source scene to a target scene of another class (see Figure 1). + +A naive strategy is to use well-designed 2D-I2I translation methods [15, 16, 26, 28, 46, 63, 65, 70]. These methods, however, suffer from unrealistic shape/identity changes when changing the viewpoint, which are especially notable when looking at a video. Main target class characteristics, such as hairs, ears, and noses, are not geometrically realistic, leading to unrealistic results which are especially disturbing when applying I2I to translate videos. Also, these methods typically underestimate the viewpoint change and result in target videos with less viewpoint change than the source video. Another direction is to apply video-to-video synthesis methods [2, 3, 6, 30, 53]. These approaches, however, either rely heavily on labeled data or multi-view frames for each object. In this work, we assume that we only have access to single-view RGB data. + +To perform 3D-aware I2I translation, we extend the theory developed for 2D-I2I with recent developments in 3D-aware image synthesis. We decouple the learning process into a multi-class 3D-aware generative model step and a 3D-aware I2I translation step. The former can synthesize view-consistent 3D scenes given a scene label, thereby addressing the 3D inconsistency problems we discussed for 2D-I2I. We will use this 3D-aware generative model to initialize our 3D-aware I2I model. It therefore inherits the capacity of synthesizing 3D consistent images. To train effectively a multi-class 3D-aware generative model (see Figure 2(b)), we provide a new training strategy consisting of: (1) training an unconditional 3D-aware generative model (i.e., StyleNeRF) and (2) partially initializing the multiclass 3D-aware generative model (i.e., multi-class StyleNeRF) with the weights learned from StyleNeRF. In the 3D-aware I2I translation step, we design a 3D-aware I2I translation architecture (Figure 2(f)) adapted from the trained multi-class StyleNeRF network. To be specific, we use the main network of the pretrained discriminator (Figure 2(b)) to initialize the encoder $E$ of the 3D-aware I2I translation model (Figure 2(f)), and correspondingly, the pretrained generator (Figure 2(b)) to initialize the 3D-aware I2I gen + +erator (Figure 2(f)). This initialization inherits the capacity of being sensitive to the view information. + +Directly using the constructed 3D-aware I2I translation model (Figure 2(f)), there still exists some view-consistency problem. This is because of the lack of multi-view consistency regularization, and the usage of the single-view image. Therefore, to address these problems we introduce several techniques, including a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss. + +In sum, our work makes the following contributions: + +- We are the first to explore 3D-aware multi-class I2I translation, which allows generating 3D consistent videos. +- We decouple 3D-aware I2I translation into two steps. First, we propose a multi-class StyleNeRF. To train this multi-class StyleNeRF effectively, we provide a new training strategy. The second step is the proposal of a 3D-aware I2I translation architecture. +- To further address the view-inconsistency problem of 3D-aware I2I translation, we propose several techniques: a U-net-like adaptor, a hierarchical representation constraint and a relative regularization loss. +- On extensive experiments, we considerably outperform existing 2D-I2I systems with our 3D-aware I2I method when evaluating temporal consistency. + +# 2. Related Works + +Neural Implicit Fields. Using neural implicit fields to represent 3D scenes has shown unprecedented quality. [37, 38, 43, 45, 48, 51] use 3D supervision to predict neural implicit fields. Recently, NeRF has shown powerful performance to neural implicit representations. NeRF and its variants [31, 39, 66] utilize a volume rendering technique for reconstructing a 3D scene as a combination of neural radiance and density fields to synthesize novel views. + +3D-aware GANs Recent approaches [5, 9, 13, 19, 35, 40-42, 52, 62, 68] learn neural implicit representations without 3D or multi-view supervisions. Combined with the adversarial loss, these methods typically randomly sample viewpoints, render photorealistic 2D images, and finally optimize their 3D representations. StyleNeRF [12] and concurrent works [4,8,44,69] have successfully synthesized high-quality view-consistent, detailed 3D scenes with StyleGAN-like generator design [22]. In this paper, we investigate 3D-aware image-to-image (3D-aware I2I) translation, where the aim is to translate in a 3D-consistent manner from a source scene to a target scene of another class. We combine transfer learning of GANs [55, 60]. + +I2I translation. I2I translation with GAN [16, 57, 59, 61] has increasingly gained attention in computer vision. Based + +on the differences of the I2I translation task, recent works focus on paired I2I translation [10, 16, 71], unpaired I2I translation [1, 18, 24, 27, 32, 36, 46, 50, 56, 58, 63, 64, 70], diverse I2I translation [24, 32, 36, 46, 64, 70] and scalable I2I translation [7, 29, 65]. However, none of these approaches addresses the problem of 3D-aware I2I. For the 3D scenes represented by neural implicit fields, directly using these methods suffers from view-inconsistency. + +# 3. Method + +Problem setting. Our goal is to achieve 3D consistent multi-class I2I translation trained on single-view data only. The system is designed to translate a viewpoint-video consisting of multiple images (source domain) into a new, photorealistic viewpoint-video scene of a target class. Furthermore, the system should be able to handle multi-class target domains. We decouple our learning into a multi-class 3D-aware generative model step and a multi-class 3D-aware I2I translation step. + +# 3.1. Multi-class 3D-aware generative model + +Let $\mathcal{I}_{\mathcal{R}\mathcal{G}\mathcal{B}} \in \mathbb{R}^{H \times W \times 3}$ be in the image domain. In this work, we aim to map a source image into a target sample conditioned on the target domain label $l \in \{1, \dots, L\}$ and a random noise vector $\mathbf{z} \in \mathbb{R}^{\mathbf{Z}}$ . Let vector $\mathbf{x}$ and $\mathbf{d}$ be 3D location and 2D viewing direction, respectively. + +Unconditional 3D-aware generative model. StyleNeRF [12] introduces a 5D function (3D location $x$ and 2D viewing direction $d$ ) to predict the volume density $\sigma$ and RGB color $c$ . Both $\sigma$ and $c$ are further used to render an image. As shown on Figure 2(a) StyleNeRF consists of four subnetworks: a mapping network $M$ , a fully connected layer $F$ , a generator $G$ and a discriminator $D$ . The mapping network $M$ takes random noise $z$ as input, and outputs latent code $w$ , which is further fed into both the fully connected layer $F$ and generator $G$ . Given the 3D location $x$ , the 2D viewing direction $d$ and latent code $w$ , StyleNeRF renders the feature map $f$ : + +$$ +\boldsymbol {f} (\boldsymbol {r}) = \int_ {0} ^ {\infty} p (t) \boldsymbol {c} (\boldsymbol {r} (t), \boldsymbol {d}) d t +$$ + +$$ +p (t) = \exp \left(- \int_ {0} ^ {t} \sigma (\boldsymbol {r} (s)) d s\right) \cdot \sigma_ {\boldsymbol {w}} (\boldsymbol {r} (t)) \tag {1} +$$ + +$$ +\boldsymbol {c}, \sigma = F (\boldsymbol {x}, \boldsymbol {d}, \boldsymbol {w}), +$$ + +where $\boldsymbol{r}(t) = \boldsymbol{o} + t\boldsymbol{d}$ ( $\boldsymbol{o}$ is the camera origin) is a camera ray for each feature representation position. Generator $G$ takes as an input the representation $\boldsymbol{f}$ and the latent code $\boldsymbol{w}$ , and outputs view-consistent photo-realistic novel result $\hat{I}_{RGB}$ . The discriminator $D$ is to distinguish real images $I_{RGB}$ from generated images $\hat{I}_{RGB}$ . + +The fully objective of StyleNeRF is as following: + +$$ +\begin{array}{l} \mathcal {L} _ {G} = \mathbb {E} _ {\boldsymbol {z} \sim \mathcal {Z}, \boldsymbol {p} \sim \mathcal {P}} [ v (D (G (F (\boldsymbol {z}, \boldsymbol {x}, \boldsymbol {d}), M (\boldsymbol {z}))) ] \\ + \mathbb {E} _ {I _ {R G B} \sim p _ {\mathrm {d a t a}}} \left[ v (- D (I _ {R G B}) + \lambda \| \nabla D (I _ {R G B}) \| ^ {2}) \right] \tag {2} \\ + \beta \cdot \mathcal {L} _ {\mathrm {N e R F - p a t h}} \\ \end{array} +$$ + +where $v(u) = -\log (1 + \exp (-u))$ , and $p_{\mathrm{data}}$ is the data distribution. $\mathcal{L}_{\mathrm{NeRF - path}}$ is NeRF path regularization used in StyleNeRF. We also set $\beta = 0.2$ and $\lambda = 0.5$ following StyleNeRF. + +Conditional 3D-aware generative model. Figure 2(b) shows the proposed multi-class 3D-aware generative model (i.e., multi-class StyleNeRF). Compared to the StyleNeRF architecture (Figure 2(a)), we introduce two mapping networks: $M_{1}$ and $M_{2}$ . The mapping network $M_{1}$ outputs the latent code $\boldsymbol{w}_{1}$ . While the mapping network $M_{2}$ takes as input the concatenated noise $\boldsymbol{z}$ and class embedding $e_{l-th}$ , and outputs the latent code $\boldsymbol{w}_{2}$ . The second mapping network $M_{2}$ aims to guide the generator $G$ to synthesize a class-specific image. Here we do not feed the latent code $\boldsymbol{w}_{2}$ into NeRF's fully connected layer $F$ , since we expect $F$ to learn a class-agnostic feature representation, which contributes to perform multi-class 3D-aware I2I translation. + +To be able to train multi-class StyleNeRF we adapt the loss function. We require $D$ to address multiple adversarial classification tasks simultaneously, as in [33]. Specifically, given output $D \in \mathbb{R}^L$ , we locate the $l$ -th class response. Using the response for the $l$ -th class, we compute the adversarial loss and back-propagate gradients: + +$$ +\begin{array}{l} \mathcal {L} _ {G} ^ {l} = \mathbb {E} _ {\boldsymbol {z} \sim \mathcal {Z}, \boldsymbol {x} \sim \mathcal {P} _ {x}, \boldsymbol {d} \sim \mathcal {P} _ {d}} \left[ v (D (G (\hat {I} _ {R G B})) _ {\boldsymbol {l} - t h} \right] \\ + \mathbb {E} _ {I _ {R G B} \sim p _ {\mathrm {d a t a}}} \left[ v (- D (I _ {R G B}) _ {l - t h} + \lambda \| \nabla D (I _ {R G B}) _ {l _ {t h}} \| ^ {2}) \right] \\ + \beta \cdot \mathcal {L} _ {\text {N e R F - p a t h}}. \tag {3} \\ \end{array} +$$ + +We initialize the multi-class StyleNeRF with the weights learned with the unconditional StyleNeRF (E.q. 2), since the training from scratch fails to convergence. Results of this are show in Figs. 7. To be specific, we directly copy the weights from the one learned from StyleNeRF for $M_{1}$ , $F$ and $G$ with the same parameter size. For the mapping network $M_{2}$ , we duplicate the weight from $M$ except for the first layer, which is trained from scratch because of the different parameter sizes. The discriminator is similarly initialized except for the last layer, which is a new convolution layer with $L$ output channels. Using the proposed initialization method, we successfully generate class-specific photorealistic high-resolution result. + +# 3.2. 3D-aware I2I translation + +Figure 2 (f) shows the 3D-aware I2I translation network at inference time. It consists of the encoder $E$ , the generator $G$ and two mapping networks $M_{1}$ and $M_{2}$ . Inspired + +![](images/be837d796fb2659dd32c9f9068e8e2b55d4d30650a14b68ad0f939ad6e8997d9.jpg) +Figure 2. Overview of our method. (a) We first train a 3D-aware generative mode (i.e., StyleNeRF) with single-view photos. (b) We extend StyleNerf to multi-class StyleNerf. We introduce an effective training strategy: initializing multi-class StyleNeRF with StyleNeRF. (c) The training of the proposed 3D-aware I2I translation. It consists of the encoder $E$ , the adaptor $A$ , the generator $G$ and two mapping networks $M_1$ and $M_2$ . We freeze all networks except for training the adaptor $A$ . The encoder is initialized by the main networks of the pretrained discriminator. We introduce several techniques to address the view-consistency problems: including a U-net-like adaptor $A$ , (d) relative regularization loss and (e) hierarchical representation constrain. (f) Usage of proposed model at inference time. + +by DeepI2I [61], we use the pretrained discriminator (Figure 2(b)) to initialize the encoder $E$ of the 3D-aware I2I translation model (Figure 2(f)), and correspondingly, the pretrained generator (Figure 2(b)) to initialize the 3D-aware I2I generator. To align the encoder with the generator, [61] introduces a Resnet-like adaptor network to communicate the encoder and decoder. The adaptor is trained without any real data. However, directly using these techniques for 3D-aware I2I translation still suffers from some view-consistency problems. Therefore, in the following, we introduce several designs to address this problem: a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss. + +U-net-like adaptor. As shown in Figure 2(c), to overcome 3D-inconsistency in the results, we propose a U-net-like adaptor $A$ . This design contributes to preserve the spatial structure of the input feature. This has been used before for semantic segmentation tasks and label to image translation [17]. In this paper, we experimentally demonstrate that the U-net-like adaptor is effective to reduce the inconsistency. + +Hierarchical representation constrain. As shown in Figure 2(e), given the noise $\mathbf{z}$ , 3D location $\mathbf{x}$ and 2D viewing direction $\mathbf{d}$ the fully connected layer $F$ renders the 3D-consistent feature map $\mathbf{f} = F(\mathbf{x}, \mathbf{d}, \mathbf{w}_1) = F(\mathbf{x}, \mathbf{d}, M1(\mathbf{z}))$ . We further extract the hierarchical representation $\{G(\mathbf{f}, \mathbf{w}_1, \mathbf{w}_2)_k\}$ as well as the synthesized image $\hat{I}_{RGB} = G(\mathbf{f}, \mathbf{w}_1, \mathbf{w}_2)$ . Here $G(\mathbf{f}, \mathbf{w}_1, \mathbf{w}_2)_k$ is the + +$k$ -th $(k = m, \dots, n, (n > m))$ ResBlock $^1$ output of the generator $G$ . We then take the generated image $\hat{I}_{RGB}$ as input for the encoder $E$ : $E(\hat{I}_{RGB})$ , which is fed into the adaptor network $A$ , that is $\hat{\pmb{f}} = A(E(\hat{I}_{RGB}))$ . In this step, our loss is + +$$ +\mathcal {L} _ {A} = \left\| \boldsymbol {f} - \hat {\boldsymbol {f}} \right\| _ {1}. \tag {4} +$$ + +For the intermediate layers, we propose a hierarchical representation constrain. Given the output $\hat{\pmb{f}}$ and the latent codes (i.e., $\pmb{w}_1$ and $\pmb{w}_2$ )², we similarly collect the hierarchical feature $\left\{G(\hat{\pmb{f}}, \pmb{w}_1, \pmb{w}_2)_k\right\}$ . The objective is + +$$ +\mathcal {L} _ {H} = \sum_ {k} \left\| G (\boldsymbol {f}, \boldsymbol {w} _ {1}, \boldsymbol {w} _ {2}) _ {k} - G (\hat {\boldsymbol {f}}, \boldsymbol {w} _ {1}, \boldsymbol {w} _ {2}) _ {k} \right\| _ {1}. \tag {5} +$$ + +In this step, we freeze every network except for the U-net-like adaptor which is learned. Note that we do not access to any real data to train the adaptor, since we utilize the generated image with from the trained generator (Figure 2(b)). + +Relative regularization loss. We expect to input the consistency of the translated 3D scene with single-image regularization instead of the images from the consecutive views. We propose a relative regularization loss based on neighboring patches. We assume that neighboring patches + +
Dataset MethodCelebA-HQAFHQ
TC↓FID↓TC↓FID↓
*MUNIT30.24031.428.49741.5
*DRIT35.45252.125.34195.6
*MSGAN31.64133.134.23661.4
StarGANv210.25013.63.02516.1
Ours (3D)3.74322.32.06715.3
TC↓(unc)FID↓TC↓(unc)FID↓
†Liu et al. [34]13.31517.83.46220.0
StarGANv210.25012.23.0259.9
†Kunhee et al. [23]10.4626.73.24110.0
Ours (3D)3.74318.72.06711.4
+ +are equivalent to that on corresponding patches of two consecutive views. For example, when inputting multi-view consistent scene images, the position of eyes are consistently moving. The fully connected layers (i.e., NeRF mode) $F$ renders the view-consistent feature map $f$ , which finally decides the view-consistent reconstructed 3D scene. Thus, we expect the output $\hat{f}$ of the adaptor $A$ to obtain the view-consistent property of the feature map $f$ . + +We randomly sample one vector from the feature map $\pmb{f}$ (e.g., red square in (Figure 2(d))), denoted as $\pmb{f}^{\eta}$ . Then we sample the eight nearest neighboring vectors of $\pmb{f}^{\eta}$ (dark green square in Figure 2(d)), denoted by $\pmb{f}^{\eta, \varepsilon}$ where $\varepsilon = 1, \dots, 8$ is the neighbor index. Similarly, we sample vectors $\hat{\pmb{f}}^{\eta}$ and $\hat{\pmb{f}}^{\eta, \varepsilon}$ from the feature map $\hat{\pmb{f}}$ (red and dark green dash square in Figure 2(d)). We then compute the patch difference: + +$$ +d _ {\boldsymbol {f}} ^ {\eta , \varepsilon} = \boldsymbol {f} ^ {\eta} \ominus \boldsymbol {f} ^ {\eta , \varepsilon}, d _ {\hat {\boldsymbol {f}}} ^ {\eta , \varepsilon} = \hat {\boldsymbol {f}} ^ {\eta} \ominus \hat {\boldsymbol {f}} ^ {\eta , \varepsilon}, \tag {6} +$$ + +where $\ominus$ represents vector subtraction. In order to preserve the consistency, we force these patch differences to be small: + +$$ +\mathcal {L} _ {R} = \left\| d _ {\boldsymbol {f}} ^ {\eta , \varepsilon} - d _ {\hat {\boldsymbol {f}}} ^ {\eta , \varepsilon} \right\| _ {1}. \tag {7} +$$ + +The underlying intuition is straightforward: the difference vectors of the same location should be most relevant in the latent space compared to other random pairs. + +The final objective is + +$$ +\mathcal {L} = \mathcal {L} _ {H} + \mathcal {L} _ {A} + \mathcal {L} _ {R}. \tag {8} +$$ + +# 4. Experiments + +# 4.1. Experimental setup + +Training details. We use the trained StyleNeRF to partially initialize our multi-class StyleNeRF architecture. We adapt the structure of the multi-class StyleNeRF to the 3D-aware I2I architecture. The proposed method is implemented in Pytorch [47]. We use Adam [25] with a batch size + +Table 1. Comparison with baselines on TC and FID metrics.* denotes that we used the results provided by StarGANv2. † means that we used the pre-trained networks provided by authors. + +
Ini.Ada.Hrc.Rrl.TC↓FID↓
YNNN2.61223.8
YYNN2.32423.1
YYYN2.20416.1
YYYY2.06715.3
+ +Table 2. Impact of several components in the performance on AFHQ. The second row is the case where the 3D-aware I2I translation model is initialized by weights learned from the multi-class StylyNeRF. Then it is trained with a Resnet-based adaptor and $L_{1}$ loss between the representations $f$ and $\hat{f}$ . The proposed techniques continuously improve the consistency and performance. Ini.: initialization method for multi-class StyleNeRF, Ada.: U-net-like adaptor, Hrc.: Hierarchical representation constrain, Rrl: Relative regularization loss. + +![](images/eb275f460a0b3cfa2c4f46a236179adafb7a047d7edcab94d40fe236008a81fe.jpg) + +![](images/5352e8612629871db006d9a0389bb05f6f4aa4bd3b02af7470e50f420b899545.jpg) +Figure 3. (Top) Using a single mapping network which takes as input the concatenated class embedding and the noise. We find it fails to generate target-specific realistic image. (Bottom) we use two mapping networks without concatenating their outputs like the proposed method. This design fails to generate 3D-aware results. + +of 64, using a learning rate of 0.0002. We use $2 \times$ Quadro RTX 3090 GPUs (24 GB VRAM) to conduct all our experiments. We show the network details and more results on Supp. Mat.. + +Datasets. Our experiments are conducted on the Animal Faces (AFHQ) [7] and CelebA-HQ [21] datasets. AFHQ contains 3 classes, each one has about 5000 images. In CelebA-HQ, we use gender as a class, with $\sim 10\mathrm{k}(10057)$ male and $\sim 18\mathrm{k}(17943)$ female images in the training set. In this paper, all images are resized to $256 \times 256$ . + +![](images/362609a517965255f3860b4570bf070f137aeef444948dbcba8435065046331a.jpg) +Figure 4. Comparative results between the proposed method and StarGANv2. We observe that StarGANv2 suffers from underestimating viewpoint changes when changing the input viewpoint (first column). It also leads to identity change (third and fourth columns), and a geometrically unrealistic ear (last two columns). + +![](images/d7188db05ea4a7091eb376ac596223270013f449c014d143d1237e2075be336d.jpg) +Figure 5. The generated images of (top) $G(\pmb{f}, \pmb{w}_1, \pmb{w}_2)$ and (bottom) $G(\hat{\pmb{f}}, \pmb{w}_1, \pmb{w}_2)$ , which show that we correctly align the outputs of both the NeRF mode $F$ and the adaptor $A$ . + +Baselines. We compare to MUNIT [15], DRIT [28], MSGAN [20], StarGANv2 [7], [23] and [34], all of which perform image-to-image translation. + +Evaluation Measures. We employ the widely used metric for evaluation, namely Fréchet Inception Distance (FID) [14]. We also propose a new measure in which we + +combine two metrics, one which measures the consistency between neighboring frames (which we want to be low), and another that measures the diversity over the whole video (which we would like to be high). We adopt a modified temporal loss (TL) [54]. This temporal loss computes the Frobenius difference between two frames to evaluate the video consistency. Only considering this measure would lead to high scores when neighboring frames in the generated video are all the same. For successful 3D-aware I2I translation, we expect the system to be sensitive to view changes in the source video and therefore combine low consecutive frame changes with high diversity over the video. Therefore, we propose to compute LPIPS [67] for each video (vLPIPS), which indicates the diversity of the generated video sequence. To evaluate both the consistency and the sensitiveness of the generated video, we propose a new temporal consistency metric (TC): + +$$ +T C = T L / v L P I P S. \tag {9} +$$ + +Due to the small changes between two consecutive views, for each video we use frame interval 1, 2 and 4 in between to evaluate view-consistency. Note that a lower TC value is better. + +# 4.2. Quantitative and qualitative results. + +We evaluate the performance of the proposed method on both the AFHQ animal and CelebA human face dataset. As reported in Table 1, in terms of TC the proposed method achieves the best score on two datasets. For example, we + +![](images/eaf356b46896241f4df6c80258727e840afe23a669cf43b2da75e5abcda410e1.jpg) +Figure 6. Interpolation between the dog and wildlife classes. + +have 3.743 TC on CelebA-HQ, which is better than StarGANv2 (10.250 TC). This indicates that our method dramatically improves consistency. As reported in Table 1 (up), across both datasets, the proposed method consistently outperforms the baselines with significant gains in terms of FID and LPIPS, except for StarGANv2 which obtains superior results. However, on AFHQ we achieve better FID score than StarGANv2. Kunhee et al. [23] reports the unconditional FID ((unc)FID) value which is computed between synthesized images and training samples instead of each class. As reported in Table 1 (bottom), We are able to achieve completing results on uncFID metrics. Note that while 2D I2I translation (e.g., StarGANv2) can obtain high-quality for each image, they cannot synthesize images of the same scene with 3D consistency, and suffers from unrealistic shape/identity changes when changing the viewpoint, which are especially notable when looking at a video. + +In Figures 1,4, we perform 3D-aware I2I translation. When changing the input viewpoint (Figure 4 (first two columns)), the outputs of StarGANv2 do not maintain the correct head pose, and underestimate the pose changes with respect to the frontal view. To estimate that this is actually the case, we also compute the diversity (i.e., vLPIPS) in a single video sequence. For example, both StarGANv2 and our method are 0.032 and 0.101 on CelebA-HQ. This confirms that the diversity (due to pose changes) is lowest for StarGANv2. More clearly showing the limitations of standard I2I methods for 3D-aware I2I, we observe that StarGANv2 suffers from unrealistic changes when changing the viewpoint. For example, when translating the class cat to wildlife, the generated images changes from wolf to leop + +ard when varying the viewpoint (Figure 4 (third and fourth columns)). Also, the main target class characteristics, such as ears, are not geometrically realistic, leading to unrealistic 3D scene videos. Our method, however, eliminates these shortcomings and performs efficient high-resolution image translation with high 3D-consistency, which preserves the input image pose and changes the style of the output images. We show high-resolution images $(1024 \times 1024)$ on Supp. Mat.. + +# 4.3. Ablation study + +Conditional 3D-aware generative architecture In this experiment, we verify our network design by comparing it with two alternative network designs. As shown in Figure 3(up), we explore a naive strategy: using one mapping which takes as input the concatenated class embedding and the noise. In this way, the fully connected network $F$ outputs the class-specific latent code $w$ , which is fed into the fully connected network $F$ to output the class-specific representation $f$ . Here, both the latent code $w$ and the representation $f$ are decided by the same class. However, when handling 3D-aware multi-class I2I translation task, the feature representation $\hat{f}$ is combined with the latent code $w$ from varying class embeddings, which leads to unrealistic image generation (Figure. 3(up)). + +As shown in Figure 3(bottom), we utilize two mapping networks without concatenating their outputs like the proposed method. This design guarantees that the output of the fully connected layers $F$ are class-agnostic. We experimentally observe that this model fails to handle 3D-aware generation. + +Effective training strategy for multi-class 3D-aware generative model. We evaluate the proposed training strategy on AFHQ and CelebA-HQ datasets. We initialize the proposed multi-class 3D I2I architecture from scratch and the proposed method, respectively. As shown on Figure 7 (up), the model trained from scratch synthesizes unrealistic faces on CelebA-HQ dataset, and low quality cats on AFHQ. This is due to the style-based conditional generator which is hard to be optimized and causes mode collapse directly [49]. The proposed training strategy, however, manages to synthesize photo-realistic high-resolution images with high multi-view consistency. This training strategy first performs unconditional learning, which leads to satisfactory generative ability. Thus, we relax the difficulty of directly training the conditional model. + +Alignment and interpolation. Figure 5 exhibits the outputs of the generator when taking as input the feature representation $\pmb{f}$ and $\hat{\pmb{f}}$ . This confirms that the proposed method successfully aligns the outputs of the fully connected layers $F$ and the adaptor $A$ . Figure 6 reports interpolation by freezing the input images while interpolating the class em + +![](images/755a42b84a4c04efaedb54b9602a7203a1c2ff6c438f6557270cb0b943f1f84d.jpg) +Figure 7. Qualitative results of multi-class StyleNeRF training from scratch (up) and from the proposed strategy (bottom). + +bedding between two classes. Our model still manages to preserve the view-consistency, and generate high quantity images with even given never seen class embeddings. + +Techniques for improving the view-consistency. We perform an ablation study on the impact of several design elements on the overall performance of the system, which includes the proposed initialization 3D-aware I2I translation model (Ini.), U-net-like adaptor (Ada.), hierarchical representation constrain (Hrc.) and relative regularization loss (Rrl.). We evaluate these four factors in Table 2. The results show that only using the proposed initialization (the second row of the Table 2) has already improved the view-consistency comparing to StarGANv2 (Table 1). Utilizing either U-net-like adaptor (Ada.) or hierarchical representation constrain (Hrc.) further leads to performance gains. Finally we are able to get the best score when further adding relative regularization loss (Rrl.) to the 3D-aware I2I translation model. + +# 5. Conclusion + +In this paper we first explore 3D-aware I2I translation. We decouple the learning process into a multi-class 3D-aware generative model step and a 3D-aware I2I translation step. In the first step, we propose a new multi-class StyleNeRF architecture, and an effective training strategy. We design the 3D-aware I2I translation model with the well-optimized multi-class StyleNeRF model. It inherits the capacity of synthesizing 3D consistent images. In the second step, we propose several techniques to further reduce the view-consistency of the 3D-aware I2I translation. + +Acknowledgement. We acknowledge the support from the Key Laboratory of Advanced Information Science and Network Technology of Beijing (XDXX2202), and the project supported by Youth Foundation (62202243). We acknowledge the Spanish Government funding for projects PID2019-104174GB-I00, TED2021-132513B-I00. + +# References + +[1] Kyungjune Baek, Yunjay Choi, Youngjung Uh, Jaejun Yoo, and Hyunjung Shim. Rethinking the truly unsupervised image-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14154-14163, 2021. 3 +[2] Aayush Bansal, Shugao Ma, Deva Ramanan, and Yaser Sheikh. Recycle-gan: Unsupervised video retargeting. In Proceedings of the European conference on computer vision (ECCV), pages 119-135, 2018. 2 +[3] Dina Bashkirova, Ben Usman, and Kate Saenko. Unsupervised video-to-video translation. arXiv preprint arXiv:1806.03698, 2018. 2 +[4] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 2 +[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 2 +[6] Yang Chen, Yingwei Pan, Ting Yao, Xinmei Tian, and Tao Mei. Mocycle-gan: Unpaired video-to-video translation. In Proceedings of the 27th ACM International Conference on Multimedia, pages 647-655, 2019. 2 +[7] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, 2020. 3, 5, 6 +[8] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. Gram: Generative radiance manifolds for 3d-aware image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10673-10683, 2022. 2 +[9] Matheus Gadelha, Subhransu Maji, and Rui Wang. 3d shape induction from 2d views of multiple objects. In 2017 International Conference on 3D Vision (3DV), pages 402-411. IEEE, 2017. 2 +[10] Abel Gonzalez-Garcia, Joost van de Weijer, and Yoshua Bengio. Image-to-image translation for cross-domain disentanglement. In NeurIPS, pages 1294–1305, 2018. 3 +[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, pages 2672-2680, 2014. 2 +[12] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis. arXiv preprint arXiv:2110.08985, 2021. 2, 3 +[13] Paul Henderson, Vagia Tsiminaki, and Christoph H Lampert. Leveraging 2d data to learn textured 3d mesh generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7498-7507, 2020. 2 +[14] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a + +two time-scale update rule converge to a local nash equilibrium. In NeurIPS, pages 6626-6637, 2017. 6 +[15] Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. Multimodal unsupervised image-to-image translation. In ECCV, pages 172-189, 2018. 2, 6 +[16] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, 2017. 2, 3 +[17] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, pages 1125-1134, 2017. 4 +[18] Somi Jeong, Youngjung Kim, Eungbean Lee, and Kwanghoon Sohn. Memory-guided unsupervised image-to-image translation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6558-6567, 2021. 3 +[19] Danilo Jimenez Rezende, SM Eslami, Shakir Mohamed, Peter Battaglia, Max Jaderberg, and Nicolas Heess. Unsupervised learning of 3d structure from images. Advances in neural information processing systems, 29, 2016. 2 +[20] Animesh Karnewar and Oliver Wang. *Msg-gan: Multi-scale gradients for generative adversarial networks*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7799–7808, 2020. 6 +[21] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. In ICLR, 2018. 5 +[22] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 2 +[23] Kunhee Kim, Sanghun Park, Eunyeong Jeon, Taehun Kim, and Daijin Kim. A style-aware discriminator for controllable image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18239-18248, 2022. 5, 6, 7 +[24] Taeksoo Kim, Moonsu Cha, Hyunsoo Kim, Jungkwon Lee, and Jiwon Kim. Learning to discover cross-domain relations with generative adversarial networks. In ICML, 2017. 3 +[25] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. *ICLR*, 2014. 5 +[26] Minsu Ko, Eunju Cha, Sungjoo Suh, Huijin Lee, Jae-Joon Han, Jinwoo Shin, and Bohyung Han. Self-supervised dense consistency regularization for image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18301-18310, June 2022. 2 +[27] Héctor Laria, Yaxing Wang, Joost van de Weijer, and Bogdan Raducanu. Hyper-gan: Transferring unconditional to conditional gans with hypernetworks. arXiv preprint arXiv:2112.02219, 2021. 3 +[28] Hsin-Ying Lee, Hung-Yu Tseng, Jia-Bin Huang, Maneesh Kumar Singh, and Ming-Hsuan Yang. Diverse imaged-to-image translation via disentangled representations. In ECCV, 2018. 2, 6 +[29] Hsin-Ying Lee, Hung-Yu Tseng, Qi Mao, Jia-Bin Huang, Yu-Ding Lu, Maneesh Singh, and Ming-Hsuan Yang. Drit++: Diverse image-to-image translation via disentangled representations. IJCV, pages 1-16, 2020. 3 + +[30] Kangning Liu, Shuhang Gu, Andrés Romero, and Radu Timofte. Unsupervised multimodal video-to-video translation via self-supervised learning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1030–1040, 2021. 2 +[31] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. NeurIPS, 2020. 2 +[32] Ming-Yu Liu, Thomas Breuel, and Jan Kautz. Unsupervised image-to-image translation networks. In NeurIPS, pages 700-708, 2017. 3 +[33] Ming-Yu Liu, Xun Huang, Arun Mallya, Tero Karras, Timo Aila, Jaakko Lehtinen, and Jan Kautz. Few-shot unsupervised image-to-image translation. In CVPR, pages 10551-10560, 2019. 3 +[34] Yahui Liu, Enver Sangineto, Yajing Chen, Linchao Bao, Haoxian Zhang, Nicu Sebe, Bruno Lepri, Wei Wang, and Marco De Nadai. Smoothing the disentangled latent style space for unsupervised image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10785-10794, 2021. 5, 6 +[35] Sebastian Lunz, Yingzhen Li, Andrew Fitzgibbon, and Nate Kushman. Inverse graphics gan: Learning to generate 3d shapes from unstructured 2d data. arXiv preprint arXiv:2002.12674, 2020. 2 +[36] Youssef Alami Mejjati, Christian Richardt, James Tompkin, Darren Cosker, and Kwang In Kim. Unsupervised attention-guided image-to-image translation. In NeurIPS, pages 3693-3703, 2018. 3 +[37] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[38] Mateusz Michalkiewicz, Jhony K. Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Implicit surface representations as layers in neural networks. In The IEEE International Conference on Computer Vision (ICCV), October 2019. 2 +[39] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. arXiv preprint arXiv:2003.08934, 2020. 2 +[40] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7588–7597, 2019. 2 +[41] Michael Niemeyer and Andreas Geiger. Campari: Camera-aware decomposed generative neural radiance fields. In 2021 International Conference on 3D Vision (3DV), pages 951-961. IEEE, 2021. 2 +[42] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2 + +[43] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. arXiv preprint arXiv:1912.07372, 2019. 2 +[44] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 2 +[45] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. International Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2 +[46] Taesung Park, Alexei A. Efros, Richard Zhang, and Jun-Yan Zhu. Contrastive learning for conditional image synthesis. In ECCV, 2020. 2, 3 +[47] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017. 5 +[48] Songyou Peng, Michael Niemeyer, Lars M. Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. ArXiv, abs/2003.04618, 2020. 2 +[49] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-10, 2022. 7 +[50] Xuning Shao and Weidong Zhang. Spatchgan: A statistical feature based discriminator for unsupervised image-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6546-6555, 2021. 3 +[51] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In Advances in Neural Information Processing Systems, pages 1119–1130, 2019. 2 +[52] Ayush Tewari, Xingang Pan, Ohad Fried, Maneesh Agrawala, Christian Theobalt, et al. Disentangled3d: Learning a 3d generative model with disentangled geometry and appearance from monocular images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1516-1525, 2022. 2 +[53] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Guilin Liu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. Video-to-video synthesis. In NeurIPS, 2018. 2 +[54] Wenjing Wang, Shuai Yang, Jizheng Xu, and Jiaying Liu. Consistent video style transfer via relaxation and regularization. IEEE Transactions on Image Processing, 29:9125-9139, 2020. 6 +[55] Yaxing Wang, Abel Gonzalez-Garcia, David Berga, Luis Herranz, Fahad Shahbaz Khan, and Joost van de Weijer. Minegan: effective knowledge transfer from gans to target domains with few images. In CVPR, 2020. 2 +[56] Yaxing Wang, Abel Gonzalez-Garcia, Joost van de Weijer, and Luis Herranz. SDIT: Scalable and diverse cross-domain image translation. In ACM MM, 2019. 3 + +[57] Yaxing Wang, Salman Khan, Abel Gonzalez-Garcia, Joost van de Weijer, and Fahad Shahbaz Khan. Semi-supervised learning for few-shot image-to-image translation. In CVPR, 2020. 2 +[58] Yaxing Wang, Hector Laria Mantecon, Joost van de Weijer, Laura Lopez-Fuentes, and Bogdan Raducanu. Transferi2i: Transfer learning for image-to-image translation from small datasets, 2021. 3 +[59] Yaxing Wang, Joost van de Weijer, and Luis Herranz. Mix and match networks: encoder-decoder alignment for zeropair image translation. In CVPR, pages 5467-5476, 2018. 2 +[60] Yaxing Wang, Chenshen Wu, Luis Herranz, Joost van de Weijer, Abel Gonzalez-Garcia, and Bogdan Raducanu. Transferring gans: generating images from limited data. In ECCV, pages 218-234, 2018. 2 +[61] Yaxing Wang, Lu Yu, and Joost van de Weijer. Deep2i: Enabling deep hierarchical image-to-image translation by transferring from gans. NeurIPS, 2020. 2, 4 +[62] Yang Xue, Yuheng Li, Krishna Kumar Singh, and Yong Jae Lee. Giraffe hd: A high-resolution 3d-aware generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18440-18449, 2022. 2 +[63] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Unsupervised image-to-image translation with generative prior. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18332-18341, 2022. 2, 3 +[64] Zili Yi, Hao Zhang, Ping Tan Gong, et al. Dualgan: Unsupervised dual learning for image-to-image translation. In ICCV, 2017. 3 +[65] Xiaoming Yu, Yuanqi Chen, Shan Liu, Thomas Li, and Ge Li. Multi-mapping image-to-image translation via learning disentanglement. In NeurIPS, pages 2990-2999, 2019. 2, 3 +[66] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2 +[67] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 6 +[68] Xuanmeng Zhang, Zhedong Zheng, Daiheng Gao, Bang Zhang, Pan Pan, and Yi Yang. Multi-view consistent generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18450-18459, 2022. 2 +[69] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 2 +[70] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In ICCV, pages 2223-2232, 2017. 2, 3 + +[71] Jun-Yan Zhu, Richard Zhang, Deepak Pathak, Trevor Darryll, Alexei A Efros, Oliver Wang, and Eli Shechtman. Toward multimodal image-to-image translation. In NeurIPS, pages 465-476, 2017. 3 \ No newline at end of file diff --git a/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/images.zip b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..36c03d4fc315057eff05439d55d7a190c2197228 --- /dev/null +++ b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:730049ff646980559f2e6cb850ec2025614c9cf6f24ee7bb68bf6e9857ef98d9 +size 1001715 diff --git a/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/layout.json b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0615eaf3137c6c4965b3bed15451125f96012dd8 --- /dev/null +++ b/2023/3D-Aware Multi-Class Image-to-Image Translation With NeRFs/layout.json @@ -0,0 +1,9752 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 100, + 103, + 493, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 103, + 493, + 121 + ], + "spans": [ + { + "bbox": [ + 100, + 103, + 493, + 121 + ], + "type": "text", + "content": "3D-Aware Multi-Class Image-to-Image Translation with NeRFs" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 167, + 150, + 430, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 150, + 430, + 166 + ], + "spans": [ + { + "bbox": [ + 167, + 150, + 430, + 166 + ], + "type": "text", + "content": "Senmao Li" + }, + { + "bbox": [ + 167, + 150, + 430, + 166 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 167, + 150, + 430, + 166 + ], + "type": "text", + "content": " Joost van de Weijer" + }, + { + "bbox": [ + 167, + 150, + 430, + 166 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 167, + 150, + 430, + 166 + ], + "type": "text", + "content": " Yaxing Wang" + }, + { + "bbox": [ + 167, + 150, + 430, + 166 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 174, + 168, + 430, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 168, + 430, + 184 + ], + "spans": [ + { + "bbox": [ + 174, + 168, + 430, + 184 + ], + "type": "text", + "content": "Fahad Shahbaz Khan" + }, + { + "bbox": [ + 174, + 168, + 430, + 184 + ], + "type": "inline_equation", + "content": "^{3,4}" + }, + { + "bbox": [ + 174, + 168, + 430, + 184 + ], + "type": "text", + "content": " Meiqin Liu" + }, + { + "bbox": [ + 174, + 168, + 430, + 184 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 174, + 168, + 430, + 184 + ], + "type": "text", + "content": " Jian Yang" + }, + { + "bbox": [ + 174, + 168, + 430, + 184 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 186, + 460, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 186, + 460, + 201 + ], + "spans": [ + { + "bbox": [ + 133, + 186, + 460, + 201 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 133, + 186, + 460, + 201 + ], + "type": "text", + "content": "VCIP,CS, Nankai University, " + }, + { + "bbox": [ + 133, + 186, + 460, + 201 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 186, + 460, + 201 + ], + "type": "text", + "content": "Universitat Autònoma de Barcelona" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 205, + 518, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 205, + 518, + 220 + ], + "spans": [ + { + "bbox": [ + 74, + 205, + 518, + 220 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 74, + 205, + 518, + 220 + ], + "type": "text", + "content": "Mohamed bin Zayed University of AI, " + }, + { + "bbox": [ + 74, + 205, + 518, + 220 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 74, + 205, + 518, + 220 + ], + "type": "text", + "content": "Linkoping University, " + }, + { + "bbox": [ + 74, + 205, + 518, + 220 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 74, + 205, + 518, + 220 + ], + "type": "text", + "content": "Beijing Jiaotong University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 118, + 226, + 475, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 226, + 475, + 238 + ], + "spans": [ + { + "bbox": [ + 118, + 226, + 475, + 238 + ], + "type": "text", + "content": "senmaonk@gmail.com {yaxing,csjyang}@nankai.edu.cn joost@cvc.uab.es" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 200, + 244, + 390, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 244, + 390, + 255 + ], + "spans": [ + { + "bbox": [ + 200, + 244, + 390, + 255 + ], + "type": "text", + "content": "fahad.khan@liu.se mqliu@bjtu.edu.cn" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 47, + 262, + 545, + 527 + ], + "blocks": [ + { + "bbox": [ + 47, + 262, + 545, + 527 + ], + "lines": [ + { + "bbox": [ + 47, + 262, + 545, + 527 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 545, + 527 + ], + "type": "image", + "image_path": "22fc17fea1f2d332d96b8f08d9e486c59a1987425629a67389d12cce8b964b1e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 529, + 547, + 552 + ], + "lines": [ + { + "bbox": [ + 46, + 529, + 547, + 552 + ], + "spans": [ + { + "bbox": [ + 46, + 529, + 547, + 552 + ], + "type": "text", + "content": "Figure 1. 3D-aware I2I translation: given a view-consistent 3D scene (the input), our method maps it into a high-quality target-specific image. Our approach produces consistent results across viewpoints." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 143, + 564, + 192, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 564, + 192, + 577 + ], + "spans": [ + { + "bbox": [ + 143, + 564, + 192, + 577 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 589, + 288, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 589, + 288, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 589, + 288, + 696 + ], + "type": "text", + "content": "Recent advances in 3D-aware generative models (3D-aware GANs) combined with Neural Radiance Fields (NeRF) have achieved impressive results. However no prior works investigate 3D-aware GANs for 3D consistent multiclass image-to-image (3D-aware I2I) translation. Naively using 2D-I2I translation methods suffers from unrealistic shape/identity change. To perform 3D-aware multi-class I2I translation, we decouple this learning process into a multi-class 3D-aware GAN step and a 3D-aware I2I trans" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 565, + 547, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 565, + 547, + 708 + ], + "spans": [ + { + "bbox": [ + 306, + 565, + 547, + 708 + ], + "type": "text", + "content": "lation step. In the first step, we propose two novel techniques: a new conditional architecture and an effective training strategy. In the second step, based on the well-trained multi-class 3D-aware GAN architecture, that preserves view-consistency, we construct a 3D-aware I2I translation system. To further reduce the view-consistency problems, we propose several new techniques, including a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss. In extensive experiments on two datasets, quantitative and qualitative results demonstrate that we successfully perform 3D-aware I2I translation with multi-view consistency. Code is" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 704, + 149, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 704, + 149, + 714 + ], + "spans": [ + { + "bbox": [ + 58, + 704, + 149, + 714 + ], + "type": "text", + "content": "*The corresponding author." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 294, + 749, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 758 + ], + "type": "text", + "content": "12652" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 107, + 128, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 128, + 119 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 128, + 119 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 127, + 287, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 127, + 287, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 127, + 287, + 258 + ], + "type": "text", + "content": "Neural Radiance Fields (NeRF) have increasingly gained attention with their outstanding capacity to synthesize high-quality view-consistent images [31,39,66]. Benefiting from the adversarial mechanism [11], StyleNeRF [12] and concurrent works [4, 8, 44, 69] have successfully synthesized high-quality view-consistent, detailed 3D scenes by combining NeRF with StyleGAN-like generator design [22]. This recent progress in 3D-aware image synthesis has not yet been extended to 3D-aware I2I translation, where the aim is to translate in a 3D-consistent manner from a source scene to a target scene of another class (see Figure 1)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 259, + 288, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 259, + 288, + 437 + ], + "spans": [ + { + "bbox": [ + 46, + 259, + 288, + 437 + ], + "type": "text", + "content": "A naive strategy is to use well-designed 2D-I2I translation methods [15, 16, 26, 28, 46, 63, 65, 70]. These methods, however, suffer from unrealistic shape/identity changes when changing the viewpoint, which are especially notable when looking at a video. Main target class characteristics, such as hairs, ears, and noses, are not geometrically realistic, leading to unrealistic results which are especially disturbing when applying I2I to translate videos. Also, these methods typically underestimate the viewpoint change and result in target videos with less viewpoint change than the source video. Another direction is to apply video-to-video synthesis methods [2, 3, 6, 30, 53]. These approaches, however, either rely heavily on labeled data or multi-view frames for each object. In this work, we assume that we only have access to single-view RGB data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 437, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 437, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 437, + 288, + 715 + ], + "type": "text", + "content": "To perform 3D-aware I2I translation, we extend the theory developed for 2D-I2I with recent developments in 3D-aware image synthesis. We decouple the learning process into a multi-class 3D-aware generative model step and a 3D-aware I2I translation step. The former can synthesize view-consistent 3D scenes given a scene label, thereby addressing the 3D inconsistency problems we discussed for 2D-I2I. We will use this 3D-aware generative model to initialize our 3D-aware I2I model. It therefore inherits the capacity of synthesizing 3D consistent images. To train effectively a multi-class 3D-aware generative model (see Figure 2(b)), we provide a new training strategy consisting of: (1) training an unconditional 3D-aware generative model (i.e., StyleNeRF) and (2) partially initializing the multiclass 3D-aware generative model (i.e., multi-class StyleNeRF) with the weights learned from StyleNeRF. In the 3D-aware I2I translation step, we design a 3D-aware I2I translation architecture (Figure 2(f)) adapted from the trained multi-class StyleNeRF network. To be specific, we use the main network of the pretrained discriminator (Figure 2(b)) to initialize the encoder " + }, + { + "bbox": [ + 46, + 437, + 288, + 715 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 437, + 288, + 715 + ], + "type": "text", + "content": " of the 3D-aware I2I translation model (Figure 2(f)), and correspondingly, the pretrained generator (Figure 2(b)) to initialize the 3D-aware I2I gen" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 95 + ], + "type": "text", + "content": "erator (Figure 2(f)). This initialization inherits the capacity of being sensitive to the view information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 96, + 547, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 96, + 547, + 191 + ], + "spans": [ + { + "bbox": [ + 304, + 96, + 547, + 191 + ], + "type": "text", + "content": "Directly using the constructed 3D-aware I2I translation model (Figure 2(f)), there still exists some view-consistency problem. This is because of the lack of multi-view consistency regularization, and the usage of the single-view image. Therefore, to address these problems we introduce several techniques, including a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 192, + 535, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 192, + 535, + 204 + ], + "spans": [ + { + "bbox": [ + 317, + 192, + 535, + 204 + ], + "type": "text", + "content": "In sum, our work makes the following contributions:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 210, + 545, + 399 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 306, + 210, + 545, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 210, + 545, + 234 + ], + "spans": [ + { + "bbox": [ + 306, + 210, + 545, + 234 + ], + "type": "text", + "content": "- We are the first to explore 3D-aware multi-class I2I translation, which allows generating 3D consistent videos." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 242, + 545, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 242, + 545, + 300 + ], + "spans": [ + { + "bbox": [ + 306, + 242, + 545, + 300 + ], + "type": "text", + "content": "- We decouple 3D-aware I2I translation into two steps. First, we propose a multi-class StyleNeRF. To train this multi-class StyleNeRF effectively, we provide a new training strategy. The second step is the proposal of a 3D-aware I2I translation architecture." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 308, + 545, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 308, + 545, + 357 + ], + "spans": [ + { + "bbox": [ + 306, + 308, + 545, + 357 + ], + "type": "text", + "content": "- To further address the view-inconsistency problem of 3D-aware I2I translation, we propose several techniques: a U-net-like adaptor, a hierarchical representation constraint and a relative regularization loss." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 364, + 545, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 364, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 306, + 364, + 545, + 399 + ], + "type": "text", + "content": "- On extensive experiments, we considerably outperform existing 2D-I2I systems with our 3D-aware I2I method when evaluating temporal consistency." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 410, + 398, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 410, + 398, + 422 + ], + "spans": [ + { + "bbox": [ + 306, + 410, + 398, + 422 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 432, + 545, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 432, + 545, + 528 + ], + "spans": [ + { + "bbox": [ + 304, + 432, + 545, + 528 + ], + "type": "text", + "content": "Neural Implicit Fields. Using neural implicit fields to represent 3D scenes has shown unprecedented quality. [37, 38, 43, 45, 48, 51] use 3D supervision to predict neural implicit fields. Recently, NeRF has shown powerful performance to neural implicit representations. NeRF and its variants [31, 39, 66] utilize a volume rendering technique for reconstructing a 3D scene as a combination of neural radiance and density fields to synthesize novel views." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 531, + 545, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 531, + 545, + 686 + ], + "spans": [ + { + "bbox": [ + 304, + 531, + 545, + 686 + ], + "type": "text", + "content": "3D-aware GANs Recent approaches [5, 9, 13, 19, 35, 40-42, 52, 62, 68] learn neural implicit representations without 3D or multi-view supervisions. Combined with the adversarial loss, these methods typically randomly sample viewpoints, render photorealistic 2D images, and finally optimize their 3D representations. StyleNeRF [12] and concurrent works [4,8,44,69] have successfully synthesized high-quality view-consistent, detailed 3D scenes with StyleGAN-like generator design [22]. In this paper, we investigate 3D-aware image-to-image (3D-aware I2I) translation, where the aim is to translate in a 3D-consistent manner from a source scene to a target scene of another class. We combine transfer learning of GANs [55, 60]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "I2I translation. I2I translation with GAN [16, 57, 59, 61] has increasingly gained attention in computer vision. Based" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 72, + 127, + 83 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 127, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 127, + 83 + ], + "type": "text", + "content": "available in 3DI2I." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12653" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 168 + ], + "type": "text", + "content": "on the differences of the I2I translation task, recent works focus on paired I2I translation [10, 16, 71], unpaired I2I translation [1, 18, 24, 27, 32, 36, 46, 50, 56, 58, 63, 64, 70], diverse I2I translation [24, 32, 36, 46, 64, 70] and scalable I2I translation [7, 29, 65]. However, none of these approaches addresses the problem of 3D-aware I2I. For the 3D scenes represented by neural implicit fields, directly using these methods suffers from view-inconsistency." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 182, + 103, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 182, + 103, + 194 + ], + "spans": [ + { + "bbox": [ + 47, + 182, + 103, + 194 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 205, + 289, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 205, + 289, + 315 + ], + "spans": [ + { + "bbox": [ + 46, + 205, + 289, + 315 + ], + "type": "text", + "content": "Problem setting. Our goal is to achieve 3D consistent multi-class I2I translation trained on single-view data only. The system is designed to translate a viewpoint-video consisting of multiple images (source domain) into a new, photorealistic viewpoint-video scene of a target class. Furthermore, the system should be able to handle multi-class target domains. We decouple our learning into a multi-class 3D-aware generative model step and a multi-class 3D-aware I2I translation step." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 323, + 253, + 336 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 323, + 253, + 336 + ], + "spans": [ + { + "bbox": [ + 47, + 323, + 253, + 336 + ], + "type": "text", + "content": "3.1. Multi-class 3D-aware generative model" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathcal{R}\\mathcal{G}\\mathcal{B}} \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "text", + "content": " be in the image domain. In this work, we aim to map a source image into a target sample conditioned on the target domain label " + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "inline_equation", + "content": "l \\in \\{1, \\dots, L\\}" + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "text", + "content": " and a random noise vector " + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^{\\mathbf{Z}}" + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "text", + "content": ". Let vector " + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 46, + 342, + 287, + 403 + ], + "type": "text", + "content": " be 3D location and 2D viewing direction, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "spans": [ + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": "Unconditional 3D-aware generative model. StyleNeRF [12] introduces a 5D function (3D location " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": " and 2D viewing direction " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ") to predict the volume density " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": " and RGB color " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ". Both " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": " are further used to render an image. As shown on Figure 2(a) StyleNeRF consists of four subnetworks: a mapping network " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ", a fully connected layer " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ", a generator " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": " and a discriminator " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ". The mapping network " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": " takes random noise " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": " as input, and outputs latent code " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ", which is further fed into both the fully connected layer " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": " and generator " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ". Given the 3D location " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ", the 2D viewing direction " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": " and latent code " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ", StyleNeRF renders the feature map " + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 406, + 289, + 552 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 561, + 201, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 561, + 201, + 586 + ], + "spans": [ + { + "bbox": [ + 83, + 561, + 201, + 586 + ], + "type": "interline_equation", + "content": "\\boldsymbol {f} (\\boldsymbol {r}) = \\int_ {0} ^ {\\infty} p (t) \\boldsymbol {c} (\\boldsymbol {r} (t), \\boldsymbol {d}) d t", + "image_path": "26589583b7de65ae072504f78aff60faf3b71cdf93e7f072c3c1beb269fa1987.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 588, + 287, + 615 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 588, + 287, + 615 + ], + "spans": [ + { + "bbox": [ + 82, + 588, + 287, + 615 + ], + "type": "interline_equation", + "content": "p (t) = \\exp \\left(- \\int_ {0} ^ {t} \\sigma (\\boldsymbol {r} (s)) d s\\right) \\cdot \\sigma_ {\\boldsymbol {w}} (\\boldsymbol {r} (t)) \\tag {1}", + "image_path": "b45b7e065cbfa1c61ea776354ac5ca7214ce7c87d43d40c61c4d42d08256116d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 617, + 161, + 630 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 617, + 161, + 630 + ], + "spans": [ + { + "bbox": [ + 83, + 617, + 161, + 630 + ], + "type": "interline_equation", + "content": "\\boldsymbol {c}, \\sigma = F (\\boldsymbol {x}, \\boldsymbol {d}, \\boldsymbol {w}),", + "image_path": "31f5da5637eeb2a2c0eccf4df081ddb8099fe306c6aa0b8e304cf66e4f7bb7c1.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\boldsymbol{r}(t) = \\boldsymbol{o} + t\\boldsymbol{d}" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\boldsymbol{o}" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": " is the camera origin) is a camera ray for each feature representation position. Generator " + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": " takes as an input the representation " + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\boldsymbol{f}" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": " and the latent code " + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": ", and outputs view-consistent photo-realistic novel result " + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\hat{I}_{RGB}" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": ". The discriminator " + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": " is to distinguish real images " + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "I_{RGB}" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": " from generated images " + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "inline_equation", + "content": "\\hat{I}_{RGB}" + }, + { + "bbox": [ + 47, + 641, + 289, + 715 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 72, + 518, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 518, + 85 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 518, + 85 + ], + "type": "text", + "content": "The fully objective of StyleNeRF is as following:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 316, + 95, + 545, + 138 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 95, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 316, + 95, + 545, + 138 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {G} = \\mathbb {E} _ {\\boldsymbol {z} \\sim \\mathcal {Z}, \\boldsymbol {p} \\sim \\mathcal {P}} [ v (D (G (F (\\boldsymbol {z}, \\boldsymbol {x}, \\boldsymbol {d}), M (\\boldsymbol {z}))) ] \\\\ + \\mathbb {E} _ {I _ {R G B} \\sim p _ {\\mathrm {d a t a}}} \\left[ v (- D (I _ {R G B}) + \\lambda \\| \\nabla D (I _ {R G B}) \\| ^ {2}) \\right] \\tag {2} \\\\ + \\beta \\cdot \\mathcal {L} _ {\\mathrm {N e R F - p a t h}} \\\\ \\end{array}", + "image_path": "ac9e6969651ebecde34ffb64b958e4433dfbd166c870ea500f414588e111c9f6.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "spans": [ + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "inline_equation", + "content": "v(u) = -\\log (1 + \\exp (-u))" + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "inline_equation", + "content": "p_{\\mathrm{data}}" + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "text", + "content": " is the data distribution. " + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{NeRF - path}}" + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "text", + "content": " is NeRF path regularization used in StyleNeRF. We also set " + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "inline_equation", + "content": "\\beta = 0.2" + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "inline_equation", + "content": "\\lambda = 0.5" + }, + { + "bbox": [ + 304, + 147, + 547, + 195 + ], + "type": "text", + "content": " following StyleNeRF." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": "Conditional 3D-aware generative model. Figure 2(b) shows the proposed multi-class 3D-aware generative model (i.e., multi-class StyleNeRF). Compared to the StyleNeRF architecture (Figure 2(a)), we introduce two mapping networks: " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "M_{1}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "M_{2}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": ". The mapping network " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "M_{1}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": " outputs the latent code " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_{1}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": ". While the mapping network " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "M_{2}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": " takes as input the concatenated noise " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "\\boldsymbol{z}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": " and class embedding " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "e_{l-th}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": ", and outputs the latent code " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_{2}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": ". The second mapping network " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "M_{2}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": " aims to guide the generator " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": " to synthesize a class-specific image. Here we do not feed the latent code " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_{2}" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": " into NeRF's fully connected layer " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": ", since we expect " + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 198, + 545, + 353 + ], + "type": "text", + "content": " to learn a class-agnostic feature representation, which contributes to perform multi-class 3D-aware I2I translation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "text", + "content": "To be able to train multi-class StyleNeRF we adapt the loss function. We require " + }, + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "text", + "content": " to address multiple adversarial classification tasks simultaneously, as in [33]. Specifically, given output " + }, + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "inline_equation", + "content": "D \\in \\mathbb{R}^L" + }, + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "text", + "content": ", we locate the " + }, + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "text", + "content": "-th class response. Using the response for the " + }, + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 354, + 545, + 426 + ], + "type": "text", + "content": "-th class, we compute the adversarial loss and back-propagate gradients:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 310, + 435, + 545, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 435, + 545, + 495 + ], + "spans": [ + { + "bbox": [ + 310, + 435, + 545, + 495 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {G} ^ {l} = \\mathbb {E} _ {\\boldsymbol {z} \\sim \\mathcal {Z}, \\boldsymbol {x} \\sim \\mathcal {P} _ {x}, \\boldsymbol {d} \\sim \\mathcal {P} _ {d}} \\left[ v (D (G (\\hat {I} _ {R G B})) _ {\\boldsymbol {l} - t h} \\right] \\\\ + \\mathbb {E} _ {I _ {R G B} \\sim p _ {\\mathrm {d a t a}}} \\left[ v (- D (I _ {R G B}) _ {l - t h} + \\lambda \\| \\nabla D (I _ {R G B}) _ {l _ {t h}} \\| ^ {2}) \\right] \\\\ + \\beta \\cdot \\mathcal {L} _ {\\text {N e R F - p a t h}}. \\tag {3} \\\\ \\end{array}", + "image_path": "b3198c10abeda94ac5061fc30a086b625263ede1697f64711add5879d83df9de.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "text", + "content": "We initialize the multi-class StyleNeRF with the weights learned with the unconditional StyleNeRF (E.q. 2), since the training from scratch fails to convergence. Results of this are show in Figs. 7. To be specific, we directly copy the weights from the one learned from StyleNeRF for " + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "inline_equation", + "content": "M_{1}" + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "text", + "content": " with the same parameter size. For the mapping network " + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "inline_equation", + "content": "M_{2}" + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "text", + "content": ", we duplicate the weight from " + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "text", + "content": " except for the first layer, which is trained from scratch because of the different parameter sizes. The discriminator is similarly initialized except for the last layer, which is a new convolution layer with " + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 496, + 545, + 651 + ], + "type": "text", + "content": " output channels. Using the proposed initialization method, we successfully generate class-specific photorealistic high-resolution result." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 658, + 444, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 444, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 444, + 670 + ], + "type": "text", + "content": "3.2. 3D-aware I2I translation" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "text", + "content": "Figure 2 (f) shows the 3D-aware I2I translation network at inference time. It consists of the encoder " + }, + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "text", + "content": ", the generator " + }, + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "text", + "content": " and two mapping networks " + }, + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "inline_equation", + "content": "M_{1}" + }, + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "inline_equation", + "content": "M_{2}" + }, + { + "bbox": [ + 304, + 677, + 547, + 714 + ], + "type": "text", + "content": ". Inspired" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "12654" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 68, + 544, + 282 + ], + "blocks": [ + { + "bbox": [ + 47, + 68, + 544, + 282 + ], + "lines": [ + { + "bbox": [ + 47, + 68, + 544, + 282 + ], + "spans": [ + { + "bbox": [ + 47, + 68, + 544, + 282 + ], + "type": "image", + "image_path": "be837d796fb2659dd32c9f9068e8e2b55d4d30650a14b68ad0f939ad6e8997d9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "lines": [ + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "spans": [ + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "text", + "content": "Figure 2. Overview of our method. (a) We first train a 3D-aware generative mode (i.e., StyleNeRF) with single-view photos. (b) We extend StyleNerf to multi-class StyleNerf. We introduce an effective training strategy: initializing multi-class StyleNeRF with StyleNeRF. (c) The training of the proposed 3D-aware I2I translation. It consists of the encoder " + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "text", + "content": ", the adaptor " + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "text", + "content": ", the generator " + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "text", + "content": " and two mapping networks " + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "inline_equation", + "content": "M_1" + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "inline_equation", + "content": "M_2" + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "text", + "content": ". We freeze all networks except for training the adaptor " + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "text", + "content": ". The encoder is initialized by the main networks of the pretrained discriminator. We introduce several techniques to address the view-consistency problems: including a U-net-like adaptor " + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 288, + 546, + 355 + ], + "type": "text", + "content": ", (d) relative regularization loss and (e) hierarchical representation constrain. (f) Usage of proposed model at inference time." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 361, + 289, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 361, + 289, + 517 + ], + "spans": [ + { + "bbox": [ + 46, + 361, + 289, + 517 + ], + "type": "text", + "content": "by DeepI2I [61], we use the pretrained discriminator (Figure 2(b)) to initialize the encoder " + }, + { + "bbox": [ + 46, + 361, + 289, + 517 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 46, + 361, + 289, + 517 + ], + "type": "text", + "content": " of the 3D-aware I2I translation model (Figure 2(f)), and correspondingly, the pretrained generator (Figure 2(b)) to initialize the 3D-aware I2I generator. To align the encoder with the generator, [61] introduces a Resnet-like adaptor network to communicate the encoder and decoder. The adaptor is trained without any real data. However, directly using these techniques for 3D-aware I2I translation still suffers from some view-consistency problems. Therefore, in the following, we introduce several designs to address this problem: a U-net-like adaptor network design, a hierarchical representation constrain and a relative regularization loss." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 525, + 287, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 525, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 46, + 525, + 287, + 622 + ], + "type": "text", + "content": "U-net-like adaptor. As shown in Figure 2(c), to overcome 3D-inconsistency in the results, we propose a U-net-like adaptor " + }, + { + "bbox": [ + 46, + 525, + 287, + 622 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 525, + 287, + 622 + ], + "type": "text", + "content": ". This design contributes to preserve the spatial structure of the input feature. This has been used before for semantic segmentation tasks and label to image translation [17]. In this paper, we experimentally demonstrate that the U-net-like adaptor is effective to reduce the inconsistency." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": "Hierarchical representation constrain. As shown in Figure 2(e), given the noise " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": ", 3D location " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": " and 2D viewing direction " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": " the fully connected layer " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": " renders the 3D-consistent feature map " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathbf{f} = F(\\mathbf{x}, \\mathbf{d}, \\mathbf{w}_1) = F(\\mathbf{x}, \\mathbf{d}, M1(\\mathbf{z}))" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": ". We further extract the hierarchical representation " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\{G(\\mathbf{f}, \\mathbf{w}_1, \\mathbf{w}_2)_k\\}" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": " as well as the synthesized image " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\hat{I}_{RGB} = G(\\mathbf{f}, \\mathbf{w}_1, \\mathbf{w}_2)" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": ". Here " + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "inline_equation", + "content": "G(\\mathbf{f}, \\mathbf{w}_1, \\mathbf{w}_2)_k" + }, + { + "bbox": [ + 46, + 629, + 288, + 715 + ], + "type": "text", + "content": " is the" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "spans": [ + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "content": "-th " + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "inline_equation", + "content": "(k = m, \\dots, n, (n > m))" + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "content": " ResBlock " + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "inline_equation", + "content": "^1" + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "content": " output of the generator " + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "content": ". We then take the generated image " + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "inline_equation", + "content": "\\hat{I}_{RGB}" + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "content": " as input for the encoder " + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "inline_equation", + "content": "E(\\hat{I}_{RGB})" + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "content": ", which is fed into the adaptor network " + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "content": ", that is " + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{f}} = A(E(\\hat{I}_{RGB}))" + }, + { + "bbox": [ + 304, + 361, + 545, + 420 + ], + "type": "text", + "content": ". In this step, our loss is" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 388, + 418, + 545, + 439 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 388, + 418, + 545, + 439 + ], + "spans": [ + { + "bbox": [ + 388, + 418, + 545, + 439 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {A} = \\left\\| \\boldsymbol {f} - \\hat {\\boldsymbol {f}} \\right\\| _ {1}. \\tag {4}", + "image_path": "20d3187b3f1c7e359c545d7f56dea91988efd15535eb9739a5ec1a07a124f57b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "text", + "content": "For the intermediate layers, we propose a hierarchical representation constrain. Given the output " + }, + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{f}}" + }, + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "text", + "content": " and the latent codes (i.e., " + }, + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "inline_equation", + "content": "\\pmb{w}_1" + }, + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "inline_equation", + "content": "\\pmb{w}_2" + }, + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "text", + "content": ")², we similarly collect the hierarchical feature " + }, + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "inline_equation", + "content": "\\left\\{G(\\hat{\\pmb{f}}, \\pmb{w}_1, \\pmb{w}_2)_k\\right\\}" + }, + { + "bbox": [ + 304, + 444, + 545, + 499 + ], + "type": "text", + "content": ". The objective is" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 318, + 506, + 545, + 532 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 506, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 318, + 506, + 545, + 532 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {H} = \\sum_ {k} \\left\\| G (\\boldsymbol {f}, \\boldsymbol {w} _ {1}, \\boldsymbol {w} _ {2}) _ {k} - G (\\hat {\\boldsymbol {f}}, \\boldsymbol {w} _ {1}, \\boldsymbol {w} _ {2}) _ {k} \\right\\| _ {1}. \\tag {5}", + "image_path": "528b69d162c7f54449b6692f7bfe3289d8856c9b2f20e3957252175fd2c62200.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "spans": [ + { + "bbox": [ + 304, + 542, + 545, + 590 + ], + "type": "text", + "content": "In this step, we freeze every network except for the U-net-like adaptor which is learned. Note that we do not access to any real data to train the adaptor, since we utilize the generated image with from the trained generator (Figure 2(b))." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 594, + 545, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 654 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 654 + ], + "type": "text", + "content": "Relative regularization loss. We expect to input the consistency of the translated 3D scene with single-image regularization instead of the images from the consecutive views. We propose a relative regularization loss based on neighboring patches. We assume that neighboring patches" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 663, + 545, + 692 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 663, + 545, + 692 + ], + "spans": [ + { + "bbox": [ + 306, + 663, + 545, + 692 + ], + "type": "text", + "content": "1After each ResBlock the feature resolution is half of the previous one in the encoder, and two times in generator. In the generator, the last output is image." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "spans": [ + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "type": "text", + "content": "Both " + }, + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "type": "inline_equation", + "content": "\\pmb{w}1" + }, + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "type": "inline_equation", + "content": "\\pmb{w}2" + }, + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "type": "text", + "content": " are the ones used when generating image " + }, + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "type": "inline_equation", + "content": "\\hat{I}_{RGB}" + }, + { + "bbox": [ + 317, + 692, + 535, + 703 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 703, + 488, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 703, + 488, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 703, + 488, + 713 + ], + "type": "text", + "content": "3More precisely, that is the feature map in this paper." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12655" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 70, + 286, + 198 + ], + "blocks": [ + { + "bbox": [ + 50, + 70, + 286, + 198 + ], + "lines": [ + { + "bbox": [ + 50, + 70, + 286, + 198 + ], + "spans": [ + { + "bbox": [ + 50, + 70, + 286, + 198 + ], + "type": "table", + "html": "
Dataset MethodCelebA-HQAFHQ
TC↓FID↓TC↓FID↓
*MUNIT30.24031.428.49741.5
*DRIT35.45252.125.34195.6
*MSGAN31.64133.134.23661.4
StarGANv210.25013.63.02516.1
Ours (3D)3.74322.32.06715.3
TC↓(unc)FID↓TC↓(unc)FID↓
†Liu et al. [34]13.31517.83.46220.0
StarGANv210.25012.23.0259.9
†Kunhee et al. [23]10.4626.73.24110.0
Ours (3D)3.74318.72.06711.4
", + "image_path": "8591a38870ce726a2a5b62f90cc739510cd31273962cadf8cac94b529abe50a1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "text", + "content": "are equivalent to that on corresponding patches of two consecutive views. For example, when inputting multi-view consistent scene images, the position of eyes are consistently moving. The fully connected layers (i.e., NeRF mode) " + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "text", + "content": " renders the view-consistent feature map " + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "text", + "content": ", which finally decides the view-consistent reconstructed 3D scene. Thus, we expect the output " + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "inline_equation", + "content": "\\hat{f}" + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "text", + "content": " of the adaptor " + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "text", + "content": " to obtain the view-consistent property of the feature map " + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 46, + 246, + 287, + 342 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "spans": [ + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "content": "We randomly sample one vector from the feature map " + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{f}" + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "content": " (e.g., red square in (Figure 2(d))), denoted as " + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{f}^{\\eta}" + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "content": ". Then we sample the eight nearest neighboring vectors of " + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{f}^{\\eta}" + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "content": " (dark green square in Figure 2(d)), denoted by " + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{f}^{\\eta, \\varepsilon}" + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\varepsilon = 1, \\dots, 8" + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "content": " is the neighbor index. Similarly, we sample vectors " + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{f}}^{\\eta}" + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{f}}^{\\eta, \\varepsilon}" + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "content": " from the feature map " + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{f}}" + }, + { + "bbox": [ + 47, + 342, + 288, + 437 + ], + "type": "text", + "content": " (red and dark green dash square in Figure 2(d)). We then compute the patch difference:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 445, + 287, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 445, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 84, + 445, + 287, + 464 + ], + "type": "interline_equation", + "content": "d _ {\\boldsymbol {f}} ^ {\\eta , \\varepsilon} = \\boldsymbol {f} ^ {\\eta} \\ominus \\boldsymbol {f} ^ {\\eta , \\varepsilon}, d _ {\\hat {\\boldsymbol {f}}} ^ {\\eta , \\varepsilon} = \\hat {\\boldsymbol {f}} ^ {\\eta} \\ominus \\hat {\\boldsymbol {f}} ^ {\\eta , \\varepsilon}, \\tag {6}", + "image_path": "cf13d6bc93b7da613d0e0925385f39b34fb21885871674878d6c31da827a52d0.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 471, + 287, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 471, + 287, + 503 + ], + "spans": [ + { + "bbox": [ + 46, + 471, + 287, + 503 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 471, + 287, + 503 + ], + "type": "inline_equation", + "content": "\\ominus" + }, + { + "bbox": [ + 46, + 471, + 287, + 503 + ], + "type": "text", + "content": " represents vector subtraction. In order to preserve the consistency, we force these patch differences to be small:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 119, + 502, + 287, + 524 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 502, + 287, + 524 + ], + "spans": [ + { + "bbox": [ + 119, + 502, + 287, + 524 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {R} = \\left\\| d _ {\\boldsymbol {f}} ^ {\\eta , \\varepsilon} - d _ {\\hat {\\boldsymbol {f}}} ^ {\\eta , \\varepsilon} \\right\\| _ {1}. \\tag {7}", + "image_path": "35897b5cb70639c29b640264b6a9c008cf0205f0bad06fd4b0f01c9b6269e21a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 526, + 287, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 526, + 287, + 563 + ], + "spans": [ + { + "bbox": [ + 46, + 526, + 287, + 563 + ], + "type": "text", + "content": "The underlying intuition is straightforward: the difference vectors of the same location should be most relevant in the latent space compared to other random pairs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 563, + 146, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 563, + 146, + 574 + ], + "spans": [ + { + "bbox": [ + 59, + 563, + 146, + 574 + ], + "type": "text", + "content": "The final objective is" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 583, + 287, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 583, + 287, + 597 + ], + "spans": [ + { + "bbox": [ + 121, + 583, + 287, + 597 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {H} + \\mathcal {L} _ {A} + \\mathcal {L} _ {R}. \\tag {8}", + "image_path": "0d9402bea978a6ce6d47529fef2212427253b07b8b9949d59fa5993dd2d44273.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 613, + 128, + 626 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 613, + 128, + 626 + ], + "spans": [ + { + "bbox": [ + 47, + 613, + 128, + 626 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 632, + 162, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 162, + 646 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 162, + 646 + ], + "type": "text", + "content": "4.1. Experimental setup" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 288, + 715 + ], + "type": "text", + "content": "Training details. We use the trained StyleNeRF to partially initialize our multi-class StyleNeRF architecture. We adapt the structure of the multi-class StyleNeRF to the 3D-aware I2I architecture. The proposed method is implemented in Pytorch [47]. We use Adam [25] with a batch size" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 306, + 69, + 544, + 114 + ], + "blocks": [ + { + "bbox": [ + 46, + 205, + 287, + 240 + ], + "lines": [ + { + "bbox": [ + 46, + 205, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 205, + 287, + 240 + ], + "type": "text", + "content": "Table 1. Comparison with baselines on TC and FID metrics.* denotes that we used the results provided by StarGANv2. † means that we used the pre-trained networks provided by authors." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 306, + 69, + 544, + 114 + ], + "lines": [ + { + "bbox": [ + 306, + 69, + 544, + 114 + ], + "spans": [ + { + "bbox": [ + 306, + 69, + 544, + 114 + ], + "type": "table", + "html": "
Ini.Ada.Hrc.Rrl.TC↓FID↓
YNNN2.61223.8
YYNN2.32423.1
YYYN2.20416.1
YYYY2.06715.3
", + "image_path": "a7aff995da5b5ff66886b6a6eb7106846318f7b6ae9973f050081c13f9b404be.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 122, + 547, + 222 + ], + "lines": [ + { + "bbox": [ + 304, + 122, + 547, + 222 + ], + "spans": [ + { + "bbox": [ + 304, + 122, + 547, + 222 + ], + "type": "text", + "content": "Table 2. Impact of several components in the performance on AFHQ. The second row is the case where the 3D-aware I2I translation model is initialized by weights learned from the multi-class StylyNeRF. Then it is trained with a Resnet-based adaptor and " + }, + { + "bbox": [ + 304, + 122, + 547, + 222 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 304, + 122, + 547, + 222 + ], + "type": "text", + "content": " loss between the representations " + }, + { + "bbox": [ + 304, + 122, + 547, + 222 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 122, + 547, + 222 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 122, + 547, + 222 + ], + "type": "inline_equation", + "content": "\\hat{f}" + }, + { + "bbox": [ + 304, + 122, + 547, + 222 + ], + "type": "text", + "content": ". The proposed techniques continuously improve the consistency and performance. Ini.: initialization method for multi-class StyleNeRF, Ada.: U-net-like adaptor, Hrc.: Hierarchical representation constrain, Rrl: Relative regularization loss." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 306, + 236, + 544, + 373 + ], + "blocks": [ + { + "bbox": [ + 306, + 236, + 544, + 373 + ], + "lines": [ + { + "bbox": [ + 306, + 236, + 544, + 373 + ], + "spans": [ + { + "bbox": [ + 306, + 236, + 544, + 373 + ], + "type": "image", + "image_path": "eb275f460a0b3cfa2c4f46a236179adafb7a047d7edcab94d40fe236008a81fe.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 307, + 376, + 545, + 513 + ], + "blocks": [ + { + "bbox": [ + 307, + 376, + 545, + 513 + ], + "lines": [ + { + "bbox": [ + 307, + 376, + 545, + 513 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 545, + 513 + ], + "type": "image", + "image_path": "5352e8612629871db006d9a0389bb05f6f4aa4bd3b02af7470e50f420b899545.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 521, + 545, + 578 + ], + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 578 + ], + "type": "text", + "content": "Figure 3. (Top) Using a single mapping network which takes as input the concatenated class embedding and the noise. We find it fails to generate target-specific realistic image. (Bottom) we use two mapping networks without concatenating their outputs like the proposed method. This design fails to generate 3D-aware results." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 589, + 545, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 589, + 545, + 638 + ], + "spans": [ + { + "bbox": [ + 304, + 589, + 545, + 638 + ], + "type": "text", + "content": "of 64, using a learning rate of 0.0002. We use " + }, + { + "bbox": [ + 304, + 589, + 545, + 638 + ], + "type": "inline_equation", + "content": "2 \\times" + }, + { + "bbox": [ + 304, + 589, + 545, + 638 + ], + "type": "text", + "content": " Quadro RTX 3090 GPUs (24 GB VRAM) to conduct all our experiments. We show the network details and more results on Supp. Mat.." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 641, + 546, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 546, + 714 + ], + "type": "text", + "content": "Datasets. Our experiments are conducted on the Animal Faces (AFHQ) [7] and CelebA-HQ [21] datasets. AFHQ contains 3 classes, each one has about 5000 images. In CelebA-HQ, we use gender as a class, with " + }, + { + "bbox": [ + 304, + 641, + 546, + 714 + ], + "type": "inline_equation", + "content": "\\sim 10\\mathrm{k}(10057)" + }, + { + "bbox": [ + 304, + 641, + 546, + 714 + ], + "type": "text", + "content": " male and " + }, + { + "bbox": [ + 304, + 641, + 546, + 714 + ], + "type": "inline_equation", + "content": "\\sim 18\\mathrm{k}(17943)" + }, + { + "bbox": [ + 304, + 641, + 546, + 714 + ], + "type": "text", + "content": " female images in the training set. In this paper, all images are resized to " + }, + { + "bbox": [ + 304, + 641, + 546, + 714 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 304, + 641, + 546, + 714 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "12656" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 70, + 547, + 311 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 547, + 311 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 547, + 311 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 547, + 311 + ], + "type": "image", + "image_path": "362609a517965255f3860b4570bf070f137aeef444948dbcba8435065046331a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 319, + 546, + 352 + ], + "lines": [ + { + "bbox": [ + 46, + 319, + 546, + 352 + ], + "spans": [ + { + "bbox": [ + 46, + 319, + 546, + 352 + ], + "type": "text", + "content": "Figure 4. Comparative results between the proposed method and StarGANv2. We observe that StarGANv2 suffers from underestimating viewpoint changes when changing the input viewpoint (first column). It also leads to identity change (third and fourth columns), and a geometrically unrealistic ear (last two columns)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 48, + 357, + 287, + 594 + ], + "blocks": [ + { + "bbox": [ + 48, + 357, + 287, + 594 + ], + "lines": [ + { + "bbox": [ + 48, + 357, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 48, + 357, + 287, + 594 + ], + "type": "image", + "image_path": "d7188db05ea4a7091eb376ac596223270013f449c014d143d1237e2075be336d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "lines": [ + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "type": "text", + "content": "Figure 5. The generated images of (top) " + }, + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "type": "inline_equation", + "content": "G(\\pmb{f}, \\pmb{w}_1, \\pmb{w}_2)" + }, + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "type": "text", + "content": " and (bottom) " + }, + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "type": "inline_equation", + "content": "G(\\hat{\\pmb{f}}, \\pmb{w}_1, \\pmb{w}_2)" + }, + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "type": "text", + "content": ", which show that we correctly align the outputs of both the NeRF mode " + }, + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "type": "text", + "content": " and the adaptor " + }, + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 601, + 287, + 635 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 637, + 287, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 637, + 287, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 637, + 287, + 674 + ], + "type": "text", + "content": "Baselines. We compare to MUNIT [15], DRIT [28], MSGAN [20], StarGANv2 [7], [23] and [34], all of which perform image-to-image translation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "Evaluation Measures. We employ the widely used metric for evaluation, namely Fréchet Inception Distance (FID) [14]. We also propose a new measure in which we" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 359, + 547, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 359, + 547, + 562 + ], + "spans": [ + { + "bbox": [ + 304, + 359, + 547, + 562 + ], + "type": "text", + "content": "combine two metrics, one which measures the consistency between neighboring frames (which we want to be low), and another that measures the diversity over the whole video (which we would like to be high). We adopt a modified temporal loss (TL) [54]. This temporal loss computes the Frobenius difference between two frames to evaluate the video consistency. Only considering this measure would lead to high scores when neighboring frames in the generated video are all the same. For successful 3D-aware I2I translation, we expect the system to be sensitive to view changes in the source video and therefore combine low consecutive frame changes with high diversity over the video. Therefore, we propose to compute LPIPS [67] for each video (vLPIPS), which indicates the diversity of the generated video sequence. To evaluate both the consistency and the sensitiveness of the generated video, we propose a new temporal consistency metric (TC):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 379, + 571, + 545, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 379, + 571, + 545, + 583 + ], + "spans": [ + { + "bbox": [ + 379, + 571, + 545, + 583 + ], + "type": "interline_equation", + "content": "T C = T L / v L P I P S. \\tag {9}", + "image_path": "097135a5693feede9f0305e951ba43250cccc4bc61a1f5769a4b073576970666.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 592, + 547, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 592, + 547, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 592, + 547, + 639 + ], + "type": "text", + "content": "Due to the small changes between two consecutive views, for each video we use frame interval 1, 2 and 4 in between to evaluate view-consistency. Note that a lower TC value is better." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 647, + 497, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 647, + 497, + 661 + ], + "spans": [ + { + "bbox": [ + 306, + 647, + 497, + 661 + ], + "type": "text", + "content": "4.2. Quantitative and qualitative results." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "We evaluate the performance of the proposed method on both the AFHQ animal and CelebA human face dataset. As reported in Table 1, in terms of TC the proposed method achieves the best score on two datasets. For example, we" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 758 + ], + "type": "text", + "content": "12657" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 286, + 316 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 286, + 316 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 286, + 316 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 286, + 316 + ], + "type": "image", + "image_path": "eaf356b46896241f4df6c80258727e840afe23a669cf43b2da75e5abcda410e1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 325, + 277, + 335 + ], + "lines": [ + { + "bbox": [ + 56, + 325, + 277, + 335 + ], + "spans": [ + { + "bbox": [ + 56, + 325, + 277, + 335 + ], + "type": "text", + "content": "Figure 6. Interpolation between the dog and wildlife classes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 340, + 287, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 340, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 47, + 340, + 287, + 544 + ], + "type": "text", + "content": "have 3.743 TC on CelebA-HQ, which is better than StarGANv2 (10.250 TC). This indicates that our method dramatically improves consistency. As reported in Table 1 (up), across both datasets, the proposed method consistently outperforms the baselines with significant gains in terms of FID and LPIPS, except for StarGANv2 which obtains superior results. However, on AFHQ we achieve better FID score than StarGANv2. Kunhee et al. [23] reports the unconditional FID ((unc)FID) value which is computed between synthesized images and training samples instead of each class. As reported in Table 1 (bottom), We are able to achieve completing results on uncFID metrics. Note that while 2D I2I translation (e.g., StarGANv2) can obtain high-quality for each image, they cannot synthesize images of the same scene with 3D consistency, and suffers from unrealistic shape/identity changes when changing the viewpoint, which are especially notable when looking at a video." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 545, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 545, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 545, + 287, + 714 + ], + "type": "text", + "content": "In Figures 1,4, we perform 3D-aware I2I translation. When changing the input viewpoint (Figure 4 (first two columns)), the outputs of StarGANv2 do not maintain the correct head pose, and underestimate the pose changes with respect to the frontal view. To estimate that this is actually the case, we also compute the diversity (i.e., vLPIPS) in a single video sequence. For example, both StarGANv2 and our method are 0.032 and 0.101 on CelebA-HQ. This confirms that the diversity (due to pose changes) is lowest for StarGANv2. More clearly showing the limitations of standard I2I methods for 3D-aware I2I, we observe that StarGANv2 suffers from unrealistic changes when changing the viewpoint. For example, when translating the class cat to wildlife, the generated images changes from wolf to leop" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": "ard when varying the viewpoint (Figure 4 (third and fourth columns)). Also, the main target class characteristics, such as ears, are not geometrically realistic, leading to unrealistic 3D scene videos. Our method, however, eliminates these shortcomings and performs efficient high-resolution image translation with high 3D-consistency, which preserves the input image pose and changes the style of the output images. We show high-resolution images " + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "inline_equation", + "content": "(1024 \\times 1024)" + }, + { + "bbox": [ + 304, + 72, + 545, + 180 + ], + "type": "text", + "content": " on Supp. Mat.." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 191, + 397, + 204 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 191, + 397, + 204 + ], + "spans": [ + { + "bbox": [ + 306, + 191, + 397, + 204 + ], + "type": "text", + "content": "4.3. Ablation study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "content": "Conditional 3D-aware generative architecture In this experiment, we verify our network design by comparing it with two alternative network designs. As shown in Figure 3(up), we explore a naive strategy: using one mapping which takes as input the concatenated class embedding and the noise. In this way, the fully connected network " + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "content": " outputs the class-specific latent code " + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "content": ", which is fed into the fully connected network " + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "content": " to output the class-specific representation " + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "content": ". Here, both the latent code " + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "content": " and the representation " + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "content": " are decided by the same class. However, when handling 3D-aware multi-class I2I translation task, the feature representation " + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "inline_equation", + "content": "\\hat{f}" + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "content": " is combined with the latent code " + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 213, + 545, + 380 + ], + "type": "text", + "content": " from varying class embeddings, which leads to unrealistic image generation (Figure. 3(up))." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 382, + 545, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 382, + 545, + 453 + ], + "spans": [ + { + "bbox": [ + 304, + 382, + 545, + 453 + ], + "type": "text", + "content": "As shown in Figure 3(bottom), we utilize two mapping networks without concatenating their outputs like the proposed method. This design guarantees that the output of the fully connected layers " + }, + { + "bbox": [ + 304, + 382, + 545, + 453 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 382, + 545, + 453 + ], + "type": "text", + "content": " are class-agnostic. We experimentally observe that this model fails to handle 3D-aware generation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 458, + 545, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 458, + 545, + 636 + ], + "spans": [ + { + "bbox": [ + 304, + 458, + 545, + 636 + ], + "type": "text", + "content": "Effective training strategy for multi-class 3D-aware generative model. We evaluate the proposed training strategy on AFHQ and CelebA-HQ datasets. We initialize the proposed multi-class 3D I2I architecture from scratch and the proposed method, respectively. As shown on Figure 7 (up), the model trained from scratch synthesizes unrealistic faces on CelebA-HQ dataset, and low quality cats on AFHQ. This is due to the style-based conditional generator which is hard to be optimized and causes mode collapse directly [49]. The proposed training strategy, however, manages to synthesize photo-realistic high-resolution images with high multi-view consistency. This training strategy first performs unconditional learning, which leads to satisfactory generative ability. Thus, we relax the difficulty of directly training the conditional model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": "Alignment and interpolation. Figure 5 exhibits the outputs of the generator when taking as input the feature representation " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\pmb{f}" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{f}}" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": ". This confirms that the proposed method successfully aligns the outputs of the fully connected layers " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": " and the adaptor " + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 304, + 642, + 545, + 713 + ], + "type": "text", + "content": ". Figure 6 reports interpolation by freezing the input images while interpolating the class em" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "12658" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 70, + 545, + 456 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 545, + 456 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 545, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 545, + 456 + ], + "type": "image", + "image_path": "755a42b84a4c04efaedb54b9602a7203a1c2ff6c438f6557270cb0b943f1f84d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 75, + 464, + 516, + 475 + ], + "lines": [ + { + "bbox": [ + 75, + 464, + 516, + 475 + ], + "spans": [ + { + "bbox": [ + 75, + 464, + 516, + 475 + ], + "type": "text", + "content": "Figure 7. Qualitative results of multi-class StyleNeRF training from scratch (up) and from the proposed strategy (bottom)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 484, + 288, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 484, + 288, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 484, + 288, + 521 + ], + "type": "text", + "content": "bedding between two classes. Our model still manages to preserve the view-consistency, and generate high quantity images with even given never seen class embeddings." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 533, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 713 + ], + "type": "text", + "content": "Techniques for improving the view-consistency. We perform an ablation study on the impact of several design elements on the overall performance of the system, which includes the proposed initialization 3D-aware I2I translation model (Ini.), U-net-like adaptor (Ada.), hierarchical representation constrain (Hrc.) and relative regularization loss (Rrl.). We evaluate these four factors in Table 2. The results show that only using the proposed initialization (the second row of the Table 2) has already improved the view-consistency comparing to StarGANv2 (Table 1). Utilizing either U-net-like adaptor (Ada.) or hierarchical representation constrain (Hrc.) further leads to performance gains. Finally we are able to get the best score when further adding relative regularization loss (Rrl.) to the 3D-aware I2I translation model." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 306, + 483, + 378, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 483, + 378, + 496 + ], + "spans": [ + { + "bbox": [ + 306, + 483, + 378, + 496 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 510, + 545, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 545, + 631 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 545, + 631 + ], + "type": "text", + "content": "In this paper we first explore 3D-aware I2I translation. We decouple the learning process into a multi-class 3D-aware generative model step and a 3D-aware I2I translation step. In the first step, we propose a new multi-class StyleNeRF architecture, and an effective training strategy. We design the 3D-aware I2I translation model with the well-optimized multi-class StyleNeRF model. It inherits the capacity of synthesizing 3D consistent images. In the second step, we propose several techniques to further reduce the view-consistency of the 3D-aware I2I translation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 635, + 546, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 635, + 546, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 635, + 546, + 712 + ], + "type": "text", + "content": "Acknowledgement. We acknowledge the support from the Key Laboratory of Advanced Information Science and Network Technology of Beijing (XDXX2202), and the project supported by Youth Foundation (62202243). We acknowledge the Spanish Government funding for projects PID2019-104174GB-I00, TED2021-132513B-I00." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "12659" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 715 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Kyungjune Baek, Yunjay Choi, Youngjung Uh, Jaejun Yoo, and Hyunjung Shim. Rethinking the truly unsupervised image-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14154-14163, 2021. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 146, + 288, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 146, + 288, + 191 + ], + "spans": [ + { + "bbox": [ + 53, + 146, + 288, + 191 + ], + "type": "text", + "content": "[2] Aayush Bansal, Shugao Ma, Deva Ramanan, and Yaser Sheikh. Recycle-gan: Unsupervised video retargeting. In Proceedings of the European conference on computer vision (ECCV), pages 119-135, 2018. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 191, + 288, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 191, + 288, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 191, + 288, + 223 + ], + "type": "text", + "content": "[3] Dina Bashkirova, Ben Usman, and Kate Saenko. Unsupervised video-to-video translation. arXiv preprint arXiv:1806.03698, 2018. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 224, + 288, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 224, + 288, + 290 + ], + "spans": [ + { + "bbox": [ + 53, + 224, + 288, + 290 + ], + "type": "text", + "content": "[4] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16123-16133, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 291, + 288, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 291, + 288, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 291, + 288, + 346 + ], + "type": "text", + "content": "[5] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5799-5809, 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 346, + 288, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 346, + 288, + 390 + ], + "spans": [ + { + "bbox": [ + 53, + 346, + 288, + 390 + ], + "type": "text", + "content": "[6] Yang Chen, Yingwei Pan, Ting Yao, Xinmei Tian, and Tao Mei. Mocycle-gan: Unpaired video-to-video translation. In Proceedings of the 27th ACM International Conference on Multimedia, pages 647-655, 2019. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 391, + 288, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 391, + 288, + 423 + ], + "spans": [ + { + "bbox": [ + 53, + 391, + 288, + 423 + ], + "type": "text", + "content": "[7] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, 2020. 3, 5, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 424, + 288, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 424, + 288, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 424, + 288, + 479 + ], + "type": "text", + "content": "[8] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. Gram: Generative radiance manifolds for 3d-aware image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10673-10683, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 479, + 288, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 479, + 288, + 523 + ], + "spans": [ + { + "bbox": [ + 53, + 479, + 288, + 523 + ], + "type": "text", + "content": "[9] Matheus Gadelha, Subhransu Maji, and Rui Wang. 3d shape induction from 2d views of multiple objects. In 2017 International Conference on 3D Vision (3DV), pages 402-411. IEEE, 2017. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 524, + 288, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 524, + 288, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 524, + 288, + 557 + ], + "type": "text", + "content": "[10] Abel Gonzalez-Garcia, Joost van de Weijer, and Yoshua Bengio. Image-to-image translation for cross-domain disentanglement. In NeurIPS, pages 1294–1305, 2018. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 558, + 288, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 288, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 288, + 602 + ], + "type": "text", + "content": "[11] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, pages 2672-2680, 2014. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 602, + 288, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 288, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 288, + 646 + ], + "type": "text", + "content": "[12] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis. arXiv preprint arXiv:2110.08985, 2021. 2, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 288, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 691 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 691 + ], + "type": "text", + "content": "[13] Paul Henderson, Vagia Tsiminaki, and Christoph H Lampert. Leveraging 2d data to learn textured 3d mesh generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7498-7507, 2020. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 288, + 715 + ], + "type": "text", + "content": "[14] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 715 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "two time-scale update rule converge to a local nash equilibrium. In NeurIPS, pages 6626-6637, 2017. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 95, + 545, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 95, + 545, + 128 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 545, + 128 + ], + "type": "text", + "content": "[15] Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. Multimodal unsupervised image-to-image translation. In ECCV, pages 172-189, 2018. 2, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 129, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 545, + 161 + ], + "type": "text", + "content": "[16] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, 2017. 2, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 162, + 545, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 162, + 545, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 162, + 545, + 195 + ], + "type": "text", + "content": "[17] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, pages 1125-1134, 2017. 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 195, + 545, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 195, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 307, + 195, + 545, + 249 + ], + "type": "text", + "content": "[18] Somi Jeong, Youngjung Kim, Eungbean Lee, and Kwanghoon Sohn. Memory-guided unsupervised image-to-image translation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6558-6567, 2021. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 250, + 545, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 250, + 545, + 294 + ], + "spans": [ + { + "bbox": [ + 307, + 250, + 545, + 294 + ], + "type": "text", + "content": "[19] Danilo Jimenez Rezende, SM Eslami, Shakir Mohamed, Peter Battaglia, Max Jaderberg, and Nicolas Heess. Unsupervised learning of 3d structure from images. Advances in neural information processing systems, 29, 2016. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 294, + 545, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 294, + 545, + 338 + ], + "spans": [ + { + "bbox": [ + 307, + 294, + 545, + 338 + ], + "type": "text", + "content": "[20] Animesh Karnewar and Oliver Wang. *Msg-gan: Multi-scale gradients for generative adversarial networks*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7799–7808, 2020. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 339, + 545, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 339, + 545, + 371 + ], + "spans": [ + { + "bbox": [ + 307, + 339, + 545, + 371 + ], + "type": "text", + "content": "[21] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. In ICLR, 2018. 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 371, + 545, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 371, + 545, + 405 + ], + "spans": [ + { + "bbox": [ + 307, + 371, + 545, + 405 + ], + "type": "text", + "content": "[22] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 405, + 545, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 405, + 545, + 459 + ], + "spans": [ + { + "bbox": [ + 307, + 405, + 545, + 459 + ], + "type": "text", + "content": "[23] Kunhee Kim, Sanghun Park, Eunyeong Jeon, Taehun Kim, and Daijin Kim. A style-aware discriminator for controllable image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18239-18248, 2022. 5, 6, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 459, + 545, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 459, + 545, + 493 + ], + "spans": [ + { + "bbox": [ + 307, + 459, + 545, + 493 + ], + "type": "text", + "content": "[24] Taeksoo Kim, Moonsu Cha, Hyunsoo Kim, Jungkwon Lee, and Jiwon Kim. Learning to discover cross-domain relations with generative adversarial networks. In ICML, 2017. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 493, + 545, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 493, + 545, + 515 + ], + "spans": [ + { + "bbox": [ + 307, + 493, + 545, + 515 + ], + "type": "text", + "content": "[25] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. *ICLR*, 2014. 5" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 515, + 545, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 515, + 545, + 581 + ], + "spans": [ + { + "bbox": [ + 307, + 515, + 545, + 581 + ], + "type": "text", + "content": "[26] Minsu Ko, Eunju Cha, Sungjoo Suh, Huijin Lee, Jae-Joon Han, Jinwoo Shin, and Bohyung Han. Self-supervised dense consistency regularization for image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18301-18310, June 2022. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 582, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 582, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 307, + 582, + 545, + 624 + ], + "type": "text", + "content": "[27] Héctor Laria, Yaxing Wang, Joost van de Weijer, and Bogdan Raducanu. Hyper-gan: Transferring unconditional to conditional gans with hypernetworks. arXiv preprint arXiv:2112.02219, 2021. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 625, + 545, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 625, + 545, + 669 + ], + "spans": [ + { + "bbox": [ + 307, + 625, + 545, + 669 + ], + "type": "text", + "content": "[28] Hsin-Ying Lee, Hung-Yu Tseng, Jia-Bin Huang, Maneesh Kumar Singh, and Ming-Hsuan Yang. Diverse imaged-to-image translation via disentangled representations. In ECCV, 2018. 2, 6" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 670, + 545, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 545, + 715 + ], + "type": "text", + "content": "[29] Hsin-Ying Lee, Hung-Yu Tseng, Qi Mao, Jia-Bin Huang, Yu-Ding Lu, Maneesh Singh, and Ming-Hsuan Yang. Drit++: Diverse image-to-image translation via disentangled representations. IJCV, pages 1-16, 2020. 3" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "12660" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[30] Kangning Liu, Shuhang Gu, Andrés Romero, and Radu Timofte. Unsupervised multimodal video-to-video translation via self-supervised learning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1030–1040, 2021. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 129, + 287, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 129, + 287, + 161 + ], + "spans": [ + { + "bbox": [ + 49, + 129, + 287, + 161 + ], + "type": "text", + "content": "[31] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. NeurIPS, 2020. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 163, + 287, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 163, + 287, + 195 + ], + "spans": [ + { + "bbox": [ + 49, + 163, + 287, + 195 + ], + "type": "text", + "content": "[32] Ming-Yu Liu, Thomas Breuel, and Jan Kautz. Unsupervised image-to-image translation networks. In NeurIPS, pages 700-708, 2017. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 198, + 287, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 198, + 287, + 240 + ], + "spans": [ + { + "bbox": [ + 49, + 198, + 287, + 240 + ], + "type": "text", + "content": "[33] Ming-Yu Liu, Xun Huang, Arun Mallya, Tero Karras, Timo Aila, Jaakko Lehtinen, and Jan Kautz. Few-shot unsupervised image-to-image translation. In CVPR, pages 10551-10560, 2019. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 243, + 287, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 243, + 287, + 308 + ], + "spans": [ + { + "bbox": [ + 49, + 243, + 287, + 308 + ], + "type": "text", + "content": "[34] Yahui Liu, Enver Sangineto, Yajing Chen, Linchao Bao, Haoxian Zhang, Nicu Sebe, Bruno Lepri, Wei Wang, and Marco De Nadai. Smoothing the disentangled latent style space for unsupervised image-to-image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10785-10794, 2021. 5, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 310, + 287, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 310, + 287, + 353 + ], + "spans": [ + { + "bbox": [ + 49, + 310, + 287, + 353 + ], + "type": "text", + "content": "[35] Sebastian Lunz, Yingzhen Li, Andrew Fitzgibbon, and Nate Kushman. Inverse graphics gan: Learning to generate 3d shapes from unstructured 2d data. arXiv preprint arXiv:2002.12674, 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 355, + 287, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 355, + 287, + 398 + ], + "spans": [ + { + "bbox": [ + 49, + 355, + 287, + 398 + ], + "type": "text", + "content": "[36] Youssef Alami Mejjati, Christian Richardt, James Tompkin, Darren Cosker, and Kwang In Kim. Unsupervised attention-guided image-to-image translation. In NeurIPS, pages 3693-3703, 2018. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 400, + 287, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 400, + 287, + 454 + ], + "spans": [ + { + "bbox": [ + 49, + 400, + 287, + 454 + ], + "type": "text", + "content": "[37] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 456, + 287, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 456, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 49, + 456, + 287, + 510 + ], + "type": "text", + "content": "[38] Mateusz Michalkiewicz, Jhony K. Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Implicit surface representations as layers in neural networks. In The IEEE International Conference on Computer Vision (ICCV), October 2019. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 512, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 512, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 49, + 512, + 287, + 555 + ], + "type": "text", + "content": "[39] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. arXiv preprint arXiv:2003.08934, 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 557, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 557, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 49, + 557, + 287, + 612 + ], + "type": "text", + "content": "[40] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7588–7597, 2019. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 614, + 287, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 614, + 287, + 656 + ], + "spans": [ + { + "bbox": [ + 49, + 614, + 287, + 656 + ], + "type": "text", + "content": "[41] Michael Niemeyer and Andreas Geiger. Campari: Camera-aware decomposed generative neural radiance fields. In 2021 International Conference on 3D Vision (3DV), pages 951-961. IEEE, 2021. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 658, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 658, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 658, + 287, + 712 + ], + "type": "text", + "content": "[42] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[43] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. arXiv preprint arXiv:1912.07372, 2019. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 118, + 545, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 118, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 308, + 118, + 545, + 182 + ], + "type": "text", + "content": "[44] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13503–13513, 2022. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 184, + 545, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 184, + 545, + 237 + ], + "spans": [ + { + "bbox": [ + 308, + 184, + 545, + 237 + ], + "type": "text", + "content": "[45] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. International Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 239, + 545, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 239, + 545, + 270 + ], + "spans": [ + { + "bbox": [ + 308, + 239, + 545, + 270 + ], + "type": "text", + "content": "[46] Taesung Park, Alexei A. Efros, Richard Zhang, and Jun-Yan Zhu. Contrastive learning for conditional image synthesis. In ECCV, 2020. 2, 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 272, + 545, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 272, + 545, + 316 + ], + "spans": [ + { + "bbox": [ + 308, + 272, + 545, + 316 + ], + "type": "text", + "content": "[47] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017. 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 317, + 545, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 317, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 308, + 317, + 545, + 348 + ], + "type": "text", + "content": "[48] Songyou Peng, Michael Niemeyer, Lars M. Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. ArXiv, abs/2003.04618, 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 350, + 545, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 350, + 545, + 391 + ], + "spans": [ + { + "bbox": [ + 308, + 350, + 545, + 391 + ], + "type": "text", + "content": "[49] Axel Sauer, Katja Schwarz, and Andreas Geiger. Styleganx1: Scaling stylegan to large diverse datasets. In ACM SIGGRAPH 2022 Conference Proceedings, pages 1-10, 2022. 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "spans": [ + { + "bbox": [ + 308, + 393, + 545, + 437 + ], + "type": "text", + "content": "[50] Xuning Shao and Weidong Zhang. Spatchgan: A statistical feature based discriminator for unsupervised image-to-image translation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6546-6555, 2021. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 438, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 438, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 308, + 438, + 545, + 491 + ], + "type": "text", + "content": "[51] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In Advances in Neural Information Processing Systems, pages 1119–1130, 2019. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 493, + 545, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 493, + 545, + 558 + ], + "spans": [ + { + "bbox": [ + 308, + 493, + 545, + 558 + ], + "type": "text", + "content": "[52] Ayush Tewari, Xingang Pan, Ohad Fried, Maneesh Agrawala, Christian Theobalt, et al. Disentangled3d: Learning a 3d generative model with disentangled geometry and appearance from monocular images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1516-1525, 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 559, + 545, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 559, + 545, + 591 + ], + "spans": [ + { + "bbox": [ + 308, + 559, + 545, + 591 + ], + "type": "text", + "content": "[53] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Guilin Liu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. Video-to-video synthesis. In NeurIPS, 2018. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 593, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 593, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 308, + 593, + 545, + 635 + ], + "type": "text", + "content": "[54] Wenjing Wang, Shuai Yang, Jizheng Xu, and Jiaying Liu. Consistent video style transfer via relaxation and regularization. IEEE Transactions on Image Processing, 29:9125-9139, 2020. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 636, + 545, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 636, + 545, + 679 + ], + "spans": [ + { + "bbox": [ + 308, + 636, + 545, + 679 + ], + "type": "text", + "content": "[55] Yaxing Wang, Abel Gonzalez-Garcia, David Berga, Luis Herranz, Fahad Shahbaz Khan, and Joost van de Weijer. Minegan: effective knowledge transfer from gans to target domains with few images. In CVPR, 2020. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 681, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 545, + 712 + ], + "type": "text", + "content": "[56] Yaxing Wang, Abel Gonzalez-Garcia, Joost van de Weijer, and Luis Herranz. SDIT: Scalable and diverse cross-domain image translation. In ACM MM, 2019. 3" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "12661" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "text", + "content": "[57] Yaxing Wang, Salman Khan, Abel Gonzalez-Garcia, Joost van de Weijer, and Fahad Shahbaz Khan. Semi-supervised learning for few-shot image-to-image translation. In CVPR, 2020. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 118, + 286, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 118, + 286, + 161 + ], + "spans": [ + { + "bbox": [ + 49, + 118, + 286, + 161 + ], + "type": "text", + "content": "[58] Yaxing Wang, Hector Laria Mantecon, Joost van de Weijer, Laura Lopez-Fuentes, and Bogdan Raducanu. Transferi2i: Transfer learning for image-to-image translation from small datasets, 2021. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 163, + 286, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 163, + 286, + 205 + ], + "spans": [ + { + "bbox": [ + 49, + 163, + 286, + 205 + ], + "type": "text", + "content": "[59] Yaxing Wang, Joost van de Weijer, and Luis Herranz. Mix and match networks: encoder-decoder alignment for zeropair image translation. In CVPR, pages 5467-5476, 2018. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 209, + 286, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 209, + 286, + 251 + ], + "spans": [ + { + "bbox": [ + 49, + 209, + 286, + 251 + ], + "type": "text", + "content": "[60] Yaxing Wang, Chenshen Wu, Luis Herranz, Joost van de Weijer, Abel Gonzalez-Garcia, and Bogdan Raducanu. Transferring gans: generating images from limited data. In ECCV, pages 218-234, 2018. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 254, + 286, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 254, + 286, + 285 + ], + "spans": [ + { + "bbox": [ + 49, + 254, + 286, + 285 + ], + "type": "text", + "content": "[61] Yaxing Wang, Lu Yu, and Joost van de Weijer. Deep2i: Enabling deep hierarchical image-to-image translation by transferring from gans. NeurIPS, 2020. 2, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 288, + 286, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 288, + 286, + 341 + ], + "spans": [ + { + "bbox": [ + 49, + 288, + 286, + 341 + ], + "type": "text", + "content": "[62] Yang Xue, Yuheng Li, Krishna Kumar Singh, and Yong Jae Lee. Giraffe hd: A high-resolution 3d-aware generative model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18440-18449, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 343, + 286, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 343, + 286, + 396 + ], + "spans": [ + { + "bbox": [ + 49, + 343, + 286, + 396 + ], + "type": "text", + "content": "[63] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Unsupervised image-to-image translation with generative prior. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18332-18341, 2022. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 399, + 286, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 399, + 286, + 430 + ], + "spans": [ + { + "bbox": [ + 49, + 399, + 286, + 430 + ], + "type": "text", + "content": "[64] Zili Yi, Hao Zhang, Ping Tan Gong, et al. Dualgan: Unsupervised dual learning for image-to-image translation. In ICCV, 2017. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 434, + 286, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 434, + 286, + 466 + ], + "spans": [ + { + "bbox": [ + 49, + 434, + 286, + 466 + ], + "type": "text", + "content": "[65] Xiaoming Yu, Yuanqi Chen, Shan Liu, Thomas Li, and Ge Li. Multi-mapping image-to-image translation via learning disentanglement. In NeurIPS, pages 2990-2999, 2019. 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 468, + 286, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 468, + 286, + 499 + ], + "spans": [ + { + "bbox": [ + 49, + 468, + 286, + 499 + ], + "type": "text", + "content": "[66] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 502, + 286, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 502, + 286, + 555 + ], + "spans": [ + { + "bbox": [ + 49, + 502, + 286, + 555 + ], + "type": "text", + "content": "[67] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 558, + 286, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 558, + 286, + 621 + ], + "spans": [ + { + "bbox": [ + 49, + 558, + 286, + 621 + ], + "type": "text", + "content": "[68] Xuanmeng Zhang, Zhedong Zheng, Daiheng Gao, Bang Zhang, Pan Pan, and Yi Yang. Multi-view consistent generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18450-18459, 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 624, + 286, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 624, + 286, + 666 + ], + "spans": [ + { + "bbox": [ + 49, + 624, + 286, + 666 + ], + "type": "text", + "content": "[69] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 670, + 286, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 670, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 49, + 670, + 286, + 712 + ], + "type": "text", + "content": "[70] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In ICCV, pages 2223-2232, 2017. 2, 3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 545, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 72, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 545, + 117 + ], + "type": "text", + "content": "[71] Jun-Yan Zhu, Richard Zhang, Deepak Pathak, Trevor Darryll, Alexei A Efros, Oliver Wang, and Eli Shechtman. Toward multimodal image-to-image translation. In NeurIPS, pages 465-476, 2017. 3" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 317, + 757 + ], + "type": "text", + "content": "12662" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/e3176243-c1cd-415f-8bca-116983524509_content_list.json b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/e3176243-c1cd-415f-8bca-116983524509_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e5db61252d38ca5956736a1bcd42b187897c614b --- /dev/null +++ b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/e3176243-c1cd-415f-8bca-116983524509_content_list.json @@ -0,0 +1,1599 @@ +[ + { + "type": "text", + "text": "3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification", + "text_level": 1, + "bbox": [ + 241, + 130, + 727, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiazhao Zhang $^{1,2,*}$", + "bbox": [ + 197, + 202, + 344, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Liu Dai $^{3*}$", + "bbox": [ + 385, + 203, + 462, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fanpeng Meng", + "bbox": [ + 496, + 203, + 624, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qingnan Fan", + "bbox": [ + 666, + 203, + 777, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xuelin Chen", + "bbox": [ + 336, + 220, + 446, + 237 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kai Xu $^{6}$ He Wang $^{1\\dagger}$", + "bbox": [ + 488, + 220, + 686, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ CFCS, Peking University $^{2}$ Beijing Academy of Artificial Intelligence $^{3}$ CEIE, Tongji University", + "bbox": [ + 151, + 239, + 826, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{4}$ Huazhong University of Science and Technology $^{5}$ Tencent AI Lab $^{6}$ National University of Defense Technology", + "bbox": [ + 104, + 257, + 880, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 308, + 313, + 324 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Object goal navigation (ObjectNav) in unseen environments is a fundamental task for Embodied AI. Agents in existing works learn ObjectNav policies based on 2D maps, scene graphs, or image sequences. Considering this task happens in 3D space, a 3D-aware agent can advance its ObjectNav capability via learning from fine-grained spatial information. However, leveraging 3D scene representation can be prohibitively unpractical for policy learning in this floor-level task, due to low sample efficiency and expensive computational cost. In this work, we propose a framework for the challenging 3D-aware ObjectNav based on two straightforward sub-policies. The two sub-policies, namely corner-guided exploration policy and category-aware identification policy, simultaneously perform by utilizing online fused 3D points as observation. Through extensive experiments, we show that this framework can dramatically improve the performance in ObjectNav through learning from 3D scene representation. Our framework achieves the best performance among all modular-based methods on the Matterport3D and Gibson datasets, while requiring (up to $30x$ ) less computational cost for training. The code will be released to benefit the community. $^1$", + "bbox": [ + 75, + 343, + 473, + 676 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 705, + 209, + 720 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As a vital task for intelligent embodied agents, object goal navigation (ObjectNav) [38, 49] requires an agent to find an object of a particular category in an unseen and unmapped scene. Existing works tackle this task through end-to-end reinforcement learning (RL) [27, 36, 47, 51] or modular-based methods [9, 14, 35]. End-to-end RL based methods take as input the image sequences and directly output low-level navigation actions, achieving competitive", + "bbox": [ + 75, + 734, + 468, + 854 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/05486a55480f98a35c0a2b1a5519d3fbfcf2490e19d38baddd71cb59cafcb114.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 306, + 570, + 371 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9c2e9dcd4bdeb316ff8e076f527473c4ae7ecbac964cf44db528c1f2d89ef3f8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 306, + 651, + 373 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8b67f6478be83602b9dc002185485f7ef7b29fac70a6bbcf1b3a95b9957cc908.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 653, + 306, + 733, + 373 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/84404b5677ea3732bb2936c13c479691cd6a6cdaaecbb7694e599d3d86305842.jpg", + "image_caption": [], + "image_footnote": [ + "c: cushion \ns: sofa \nt: chair" + ], + "bbox": [ + 738, + 306, + 821, + 372 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/805f0f28dc0b2ef5d21e412f70ee134fa5290359334ee6205cd3e74f8c18027c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 373, + 625, + 445 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/34688127bd9d4325f162524b96a33470eb72c31e5de3b2c39e4973b953280e1b.jpg", + "image_caption": [ + "Looking for a chair" + ], + "image_footnote": [], + "bbox": [ + 632, + 373, + 754, + 445 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/cba1cc57cf8037850b9b9e04a0237d740bc2ace04c8990eaf145cf36cfe9ee1a.jpg", + "image_caption": [ + "Figure 1. We present a 3D-aware ObjectNav framework along with simultaneous exploration and identification policies: $\\mathbf{A} \\rightarrow \\mathbf{B}$ , the agent was guided by an exploration policy to look for its target; $\\mathbf{B} \\rightarrow \\mathbf{C}$ , the agent consistently identified a target object and finally called STOP." + ], + "image_footnote": [], + "bbox": [ + 759, + 373, + 883, + 445 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "performance while suffering from lower sample efficiency and poor generalizability across datasets [3, 27]. Therefore, we favor modular-based methods, which usually contain the following modules: a semantic scene mapping module that aggregates the RGBD observations and the outputs from semantic segmentation networks to form a semantic scene map; an RL-based goal policy module that takes as input the semantic scene map and learns to online update a goal location; finally, a local path planning module that drives the agent to that goal. Under this design, the semantic accuracy and geometric structure of the scene map are crucial to the success of object goal navigation.", + "bbox": [ + 496, + 550, + 892, + 733 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We observe that the existing modular-based methods mainly construct 2D maps [8, 9], scene graphs [34, 56] or neural fields [43] as their scene maps. Given that objects lie in 3D space, these scene maps are inevitably deficient in leveraging 3D spatial information of the environment comprehensively and thus have been a bottleneck for further improving object goal navigation. In contrast, forming a 3D scene representation naturally offers more accurate, spatially dense and consistent semantic predictions than its 2D counterpart, as proved by [12, 31, 45]. Hence, if the agent could take advantage of the 3D scene understanding and", + "bbox": [ + 496, + 734, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Joint first authors", + "bbox": [ + 93, + 862, + 196, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author: hewang@pku.edu.cn", + "bbox": [ + 96, + 875, + 336, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1 Homepage: https://pku-epic.github.io/3D-Aware-ObjectNav/", + "bbox": [ + 96, + 887, + 421, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "6672", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "form a 3D semantic scene map, it is expected to advance the performance of ObjectNav.", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, leveraging 3D scene representation would bring great challenges to ObjectNav policy learning. First, building and querying fine-grained 3D representation across a floor-level scene requires extensive computational cost, which can significantly slow down the training of RL [7,55]. Also, 3D scene representation induces considerably more complex and high-dimensional observations to the goal policy than its 2D counterpart, leading to a lower sample efficiency and hampering the navigation policy learning [22, 57]. As a result, it is demanding to design a framework to efficiently and effectively leverage powerful 3D information for ObjectNav.", + "bbox": [ + 75, + 125, + 468, + 305 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To tackle these challenges, we propose a novel framework composed of an online semantic point fusion module for 3D semantic scene mapping and two parallel policy networks in charge of scene exploration and object identification, along with a local path planning module. Our online semantic point fusion module extends a highly efficient online point construction algorithm [53] to enable online semantic fusion and spatial semantic consistency computation from captured RGBD sequences. This 3D scene construction empowers a comprehensive 3D scene understanding for ObjectNav. Moreover, compared to dense voxel-based methods [7, 55], our point-based fusion algorithm are more memory-efficient [40, 46] which makes it practically usable for floor-level navigation task. (See Figure 1)", + "bbox": [ + 75, + 308, + 468, + 520 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Moreover, to ease the learning of navigation policy, we further propose to factorize the navigation policy into two sub-policies, namely exploration and identification. The two policies simultaneously perform to roll out an exploration goal and an identified object goal (if exist), respectively. Then the input for the local path planning module will switch between these two goals, depending on whether there exists an identified target object. More specifically, we propose a corner-guided exploration policy which learns to predict a long-term discrete goal at one of the four corners of the bounding box of the scene. These corner goals efficiently drive the agent to perceive the surroundings and explore regions where the target object is possibly settled. And for identification, a category-aware identification policy is proposed to dynamically learn a discrete confidence threshold to identify the semantic predictions for each category. Both of these policies are trained by RL in low-dimensional discrete action space. Through experiments, the simultaneous two-policy mechanism and discrete action space design dramatically reduce the difficulty in learning for 3D-aware ObjectNav and achieve better performance than existing modular-based navigation strategies [26, 35].", + "bbox": [ + 75, + 522, + 468, + 854 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Through extensive evaluation on the public benchmarks, we demonstrate that our method performs online 3D-aware", + "bbox": [ + 76, + 857, + 468, + 888 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ObjectNav at 15 FPS while achieving the state-of-the-art performance on navigation efficiency. Moreover, our method outperforms all other modular-based methods in both efficiency and success rate with up to $30\\mathrm{x}$ times less computational cost.", + "bbox": [ + 496, + 90, + 890, + 166 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our main contributions include:", + "bbox": [ + 517, + 169, + 732, + 183 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We present the first 3D-aware framework for Object-Nav task.", + "- We build an online point-based construction and fusion algorithm for efficient and comprehensive understanding of floor-level 3D scene representation.", + "- We propose a simultaneous two-policy mechanism which mitigates the problem of low sample efficiency in 3D-aware ObjectNav policy learning." + ], + "bbox": [ + 517, + 198, + 890, + 338 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 352, + 640, + 369 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "GoalNav with Visual Sequences. There are constantly emerging researches on object goal navigation. One line of recent works directly leverages RGBD sequences, called end-to-end RL methods [47], which tends to implicitly encode the environment and predict low-level actions. These works benefit from visual representation [29, 50], auxiliary task [51], and data augmentation [27], demonstrating strong results on object goal navigation benchmarks [1, 49]. However, aiming to learn all skills through one policy from scratch, e.g., avoiding collisions, exploration, and stopping, it's well known that end-to-end RL methods suffer from low sampling efficiency for training and limited generalizability when transferred to the real world [3, 35]. Instead, our work uses explicit map to represent the environment, which ensures our sample efficiency and also obtain more generalizability through a modular-based paradigm [1, 35].", + "bbox": [ + 496, + 380, + 890, + 623 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "GoalNav with Explicit Scene Representations. To ease the burden of learning directly from visual sequences, another category of methods, called modular-based methods [8,9,15,17,32], use explicit representations as a proxy for robot observations. By leveraging explicit scene representations like scene graph [34, 56] or 2D top-down map [14,35], modular-based methods benefit from the modularity and shorter time horizons. They are considered to be more sample efficient and generalizable [14, 35]. Recent progress in modular-based methods has proposed a frontier-based exploration strategy [35], a hallucinate-driven semantic mapping method [14], and novel verification stage [26]. In contrast with prior map-based works, our method utilizes 3D spatial knowledge, including 3D point semantic prediction and consistency, enabling a more comprehensive understanding of the environment.", + "bbox": [ + 496, + 625, + 890, + 867 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Embodied AI tasks with 3D Scene Representation. There are considerable research leveraging 3D scene repre", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "6673", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/dea7bdcc4332163e8adff0f00b879c135946d2551d13dca93986c44d9af806c4.jpg", + "image_caption": [ + "Figure 2. An overview of our framework. We take in a posed RGB-D image at time step $t$ and perform point-based construction algorithm to online fuse a 3D scene representation $(\\mathcal{M}_{3D}^{(t)})$ , along with a $\\mathcal{M}_{2D}^{(t)}$ from semantics projection. Then, we simultaneously leverage two policies, including a corner-guided exploration policy $\\pi_e$ and category-aware identification policy $\\pi_f$ , to predict a discrete corner goal $g_e^{(t)}$ and a target goal $g_f^{(t)}$ (if exist) respectively. Finally, the local planning module will drive the agent to the given target goal $g_f^{(t)}$ (top priority) or the corner goal $g_e^{(t)}$ ." + ], + "image_footnote": [], + "bbox": [ + 81, + 85, + 893, + 272 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "sensation on certain embodied AI tasks, e.g., object grasping [5, 10], drawer opening [30, 44]. These works leverage various routes, including reinforcement learning [13], imitation learning [44], and supervised learning [5] with 3D scene representation, such as mesh, dense grids. However, most of these 3D-aware embodied AI tasks only perform in a limited space [10, 30, 44], e.g., near one table or drawer. Under large scale environments, such as floor-level scenes in ObjectNav, the existing methods would suffer from complex 3D observation and large computational costs. In this work, we propose a framework through leveraging a point-based construction module and two dedicatedly designed exploration and identification policies, to enable a 3D-aware agnet for ObjectNav.", + "bbox": [ + 75, + 373, + 472, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 604, + 169, + 619 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Task Definition and Method Overview", + "text_level": 1, + "bbox": [ + 76, + 632, + 405, + 647 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Object Goal Navigation Task. In an unknown environment, the Object Goal Navigation task requires the agent to navigate to an instance of the specified target category. For fair comparison, we follow the previous problem setting [38, 49]. As initialization, the agent is located randomly without access to a pre-built environment map, and provided with a target category ID. At each time step $t$ , the agent receives noiseless onboard sensor readings, including an egocentric RGB-D image and a 3-DoF pose (2D position and 1D orientation) relative to the starting of the episode. Then the agent estimates its action $a_{t} \\in \\mathcal{A}$ for movement in a discrete action space, consisting of move_forward, turn_left, turn_right and stop. Given a limited time budget of 500 steps, the agent terminates the movement until it is within 1 meter of an object of the specified category.", + "bbox": [ + 75, + 657, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Method Overview. Figure 2 provides an overview of the proposed 3D-aware ObjectNav method. Our method takes RGBD frames along with pose sensor readings as input, to online construct a point-based scene representation $\\mathcal{M}_{3D}$ (Sec. 3.2), which is further projected to construct a 2D semantic map $\\mathcal{M}_{2D}$ . Given the structured 3D points $\\mathcal{M}_{3D}$ and 2D map $\\mathcal{M}_{2D}$ , our framework simultaneously performs two complementary policies (Sec. 3.3), the exploration policy and identification policy at a fixed time cycle of 25 steps. The exploration policy predicts a long-term discrete corner goal $g_{e}$ , to drive the agent to explore the surrounding environment. Meanwhile, the identification policy evaluates the 3D points $\\mathcal{M}_{3D}$ at each step and outputs a target object goal $g_{f}$ if its semantic prediction is confident and consistent. The $g_{f}$ will be set as the approaching target for the agent once it exists, otherwise the agent will navigate to the long-term corner goal $g_{e}$ . An underlying local planning module will navigate the agent towards the goal using analytical path planning.", + "bbox": [ + 496, + 373, + 890, + 660 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Navigation-Driven 3D Scene Construction", + "text_level": 1, + "bbox": [ + 500, + 676, + 864, + 694 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "During navigation, the 3D-aware agent will constantly obtain new observations and incrementally build a fine-grained 3D scene representation, integrating spatial and semantic information to drive the agent. However, given that our agent is deployed for a floor-level GoalNav task, it is fairly challenging to construct and leverage 3D representation across the entire scene while keeping an acceptable computational cost. Accordingly in this section, we extend an online point-based construction algorithm [53] to online organize the 3D points and further empower semantic fusion and consistency estimation. This design is tailored for a comprehensive scene understanding of the ObjectNav agent, requiring little computational resources.", + "bbox": [ + 496, + 704, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "6674", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D Scene Representation. At time step $t$ , we represent the 3D scene $\\mathcal{M}_{3D}$ as the point clouds, denoted as $P^{(t)} = \\{(P_{l}^{(t)}, P_{s}^{(t)}, P_{c}^{(t)})\\} \\in \\mathbb{R}^{N^{(t)} \\times (M + 4)}$ , where $N^{(t)}$ is the point number. For each point $i$ , the $M + 4$ channels include the point position $P_{i,l}^{(t)} \\in \\mathbb{R}^3$ , point semantics $P_{i,s}^{(t)} \\in \\mathbb{R}^M$ and the point-wise spatial semantic consistency information $P_{i,c}^{(t)} \\in \\mathbb{R}^1$ .", + "bbox": [ + 76, + 90, + 472, + 210 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Online 3D Point Fusion Given a new captured posed RGB image $I_{c}^{(t)}$ and depth image $I_{d}^{(t)}$ at time step $t$ , the agent can obtain the point position $P_{l}^{(t)}$ by back-projecting all the depth images into the 3D world space via their corresponding poses. These points will be organized by a point-based construction algorithm [53]. Here, we briefly revisit this strategy.", + "bbox": [ + 75, + 212, + 468, + 325 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The construction algorithm dynamically allocates occupied 3D blocks $\\{\\mathcal{B}_k\\}$ along with their index $k$ maintained by a tree-based method [20]. Each block $\\mathcal{B}_k$ is defined by the boundary of constant length (10cm) along the X, Y and Z axes, e.g., $[X_{min}(B_k), X_{max}(B_k)]$ . And the points $p_{l,x} \\in [X_{min}(B_k), X_{max}(B_k)]$ (the same requirement holds for Y and Z axes) be recorded by the block $\\mathcal{B}_k$ . Given any 3D point $p_i$ , the algorithm can achieve efficient neighborhood retrieval with the corresponding block index $k$ . Furthermore, a one-level octree $\\mathcal{O}_i$ for each point $p_i$ is constructed to obtain the fine-grained spatial information among points. Specifically, we connect each point with its nearest points in the eight quadrants of the Cartesian coordinate system (See Figure 3). Powered by this point-based construction strategy, give any point, we can efficiently querying this point with its neighbor points by blocks retrieval and octree. This algorithm for organizing 3D points can run at 15 FPS while requiring reasonable memory resources (about $\\sim 500$ MB for one entire scene). We provide more detailed description in the supplemental material.", + "bbox": [ + 75, + 327, + 472, + 628 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Online Semantic Fusion. With an efficient reconstruction algorithm in hand, we can directly fuse temporal information, e.g., multi-view semantic predictions, to achieve more accurate and consistent scene understanding. Specifically, any point $p_i$ which has been captured by a sequence of RGBD frames $\\{I_c^{(t)}, I_d^{(t)}\\}$ could have multiple semantic predictions $\\{p_{i,s}^{(t)}(I_c^{(t)})\\}$ . We thus propose to online aggregate the multi-view 2D semantic predictions using a max-fusion mechanism to obtain the final 3D semantic prediction:", + "bbox": [ + 75, + 631, + 468, + 785 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np _ {i, s} ^ {(t)} = \\mathcal {N} (\\max (\\{p _ {i, s} ^ {(t)} (I _ {c} ^ {(t)})) \\})), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 163, + 785, + 468, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the max is performed on each semantic category, followed by a normalization $\\mathcal{N}$ to linearly scale the probability distribution. Note that, the alternatives to fuse semantic predictions do exist, e.g. 3D convolution [19, 24], Bayesian updating [28]. However, directly conducting 3D convolution into such a floor-level 3D representation would in-", + "bbox": [ + 75, + 810, + 470, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4dd993af23292078a7bce4b6271b787202a70db028a106be49d5965ab55ae618.jpg", + "image_caption": [ + "Active Navigation" + ], + "image_footnote": [], + "bbox": [ + 511, + 95, + 669, + 212 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/bf02ca1f0460dfe37dee844f9bfe3371d969607e17b802ed1edfd208fa4e82b4.jpg", + "image_caption": [ + "Online Organized 3D points", + "Figure 3. Illustration of online 3D point fusion. (Left) A robot takes multi-view observations during navigation. (Right) The points $p$ are organized by dynamically allocated blocks $\\mathcal{B}$ and perpoint octrees $\\mathcal{O}$ , which can be used to query neighborhood points of any given point." + ], + "image_footnote": [], + "bbox": [ + 681, + 99, + 880, + 222 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "evitably lead to a huge rise of computational cost, especially in the context of learning-based policy. We find that maximizing the 2D semantic prediction can already achieve impressive improvement on semantic accuracy (see Figure 8), with higher memory efficiency and time efficiency. Similar findings have also been reported and exploited in relevant works [7, 16].", + "bbox": [ + 496, + 339, + 890, + 445 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Spatial Semantic Consistency. Based on the fact that semantic label should remain consistent for all the points in a single object, we propose to calculate the spatial semantic consistency information $P_{c}^{(t)}$ as part of the navigation-driven 3D scene representation. To be specific, $P_{i,c}^{(t)}$ is computed as the maximum semantic KL-divergence between point $P_{i}^{(t)}$ and its octree $\\mathcal{O}(P_i^{(t)})$ :", + "bbox": [ + 496, + 448, + 892, + 561 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nP _ {i, c} ^ {(t)} = \\max \\left(\\left\\{K L \\left(P _ {i, s} ^ {(t)}, P _ {j, s} ^ {(t)}\\right) \\mid \\forall P _ {j} ^ {(t)} \\in \\mathcal {O} \\left(P _ {i} ^ {(t)}\\right) \\right\\}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 568, + 890, + 590 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $KL$ denotes the KL-divergence computation, which is a statistical distance that measures the semantic probability distribution between $P_{i,s}^{(t)}$ and $P_{j,s}^{(t)}$ . Note for point $P_{i}^{(t)}$ , if we count all its spatially close points as the neighbourhood $\\mathcal{N}(P_i^{(t)})$ , it could be time consuming to calculate Equation 2, and the spatially close points do not help relieve the issue of outlier points as mentioned above. Therefore, we use the pre-built octree $\\mathcal{O}_i$ to retrieve 8 nearest point in the quadrants of the Cartesian coordinate system.", + "bbox": [ + 496, + 595, + 893, + 743 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Simultaneous Exploration and Identification", + "text_level": 1, + "bbox": [ + 500, + 753, + 877, + 768 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With the aggregated 3D information, we expect to empower a 3D-aware agent for the ObjectNav task. However, despite the efficient 3D scene representation, the agent still suffers from the complex and high-dimensional observations, leading to a lower sample efficiency in RL and hampering the navigation policy learning. Therefore, we leverage two complementary sub-policies: corner-guided exploration policy and category-aware identification policy. Each", + "bbox": [ + 496, + 779, + 893, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "6675", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/0cbe640eabeedfa32b989c7e85b39f6829120654d087b08d75cbd6f2e406b8be.jpg", + "image_caption": [ + "SemExp", + "Figure 4. Illustration of exploration policy. (Left) Learning-based continuous global goal [9]; (Middle) Heuristic direction selection [26]; (Right, ours) Learning-based corner goal prediction." + ], + "image_footnote": [ + ": Goal location : Candidate corner goals to be chosen in order" + ], + "bbox": [ + 83, + 88, + 205, + 186 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/96a956ec95b60458f383cf3aaa15ed77b6ef496b3b4edf5cc28300e7e54ab015.jpg", + "image_caption": [ + "Stubborn" + ], + "image_footnote": [ + ": Candidate corner goals to be predicted based on learning (Ours)" + ], + "bbox": [ + 210, + 90, + 334, + 185 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b37b22dbdb379eff69a08afc35a576b4948cb1675fbd4bf6fbbbb4e91ad9cd31.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 341, + 90, + 464, + 185 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "policy learns to predict low-dimensional discrete actions and outputs a goal location to navigate the agent, resulting in a strong performance while requiring less training time. We will detail the two policies below.", + "bbox": [ + 75, + 291, + 468, + 351 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Observation Space. At each time step $t$ , both policies take fine-grained 3D observation $x_{3D}^{(t)} = \\{P^{(t)} \\in ((4 + m) \\times N)\\}$ based on 3D scene representation $\\mathcal{M}_{3D}$ . Here, the $N$ indicates the point number (we sample 4096 points) and the $m + 4$ channels are comprised of point position $p_l^{(t)} \\in \\mathbb{R}^3$ , fused semantic predictions $p_s^{(t)} \\in \\mathbb{R}^m$ and spatial semantic consistency $p_c^{(t)} \\in \\mathbb{R}^1$ . Following existing works [8, 9], we use an additional egocentric 2D map $\\mathcal{M}_{2D}$ for exploration policy and the local path planning module, which is directly obtained by a project-to-ground operation. More detailedly, for 2D observation $x_{2D}^{(t)} \\in ((2 + m) \\times M \\times M)$ from 2D map $\\mathcal{M}_{2D}$ , the first two channels represent obstacles and explored area, and the rest of the channels each corresponds to an object category. Here, $\\mathcal{M}_{2D}$ (in a resolution of $M = 240$ with $20\\mathrm{cm}$ grids) is constructed to give a large perception view of the scene, while 3D points perform as a fine-grained observation of objects. In addition to the scene representations, we also pass the goal object category index $o_{ID}$ as the side input to both policies.", + "bbox": [ + 75, + 354, + 468, + 652 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Corner-Guided Exploration Policy. The exploration policy attempts to guide the agent to explore and perceive the surrounding environment where it could access any instance of the target object category. We observe that existing learning-based exploration policies predict goal locations over the 2D map in continuous or large-dimensional discrete action space (Figure 4 Left), suffering from low sample efficiency. Therefore, we define a corner-guided exploration policy $g_{e} = \\pi_{e}(x_{3D}, x_{2D}, o_{ID}; \\theta_{e})$ that predicts a corner goal $g_{e}$ to drive the agent (Figure 4 Right). Here, the $\\theta_{e}$ indicates the parameters of the policy, and $g_{e}$ is one of the four pre-defined corner goals {Top Left, Top Right, Bottom Left, Bottom Right} of the 2D map.", + "bbox": [ + 75, + 655, + 468, + 851 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compared to predicting goals in a continuous or high-dimensional action space, learning to predict the four corner goals significantly reduces the learning difficulty. More", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/8d59a1ce8c8b33399ddb1177464b126e5b36c9cafcde4a1457f6d17ebdfa405a.jpg", + "image_caption": [ + "(A)", + "Figure 5. Illustration of identification policy. From $\\mathrm{A} \\rightarrow \\mathrm{B}$ , fused points are filtered by the category-aware predicted threshold $\\tau$ . From $\\mathrm{B} \\rightarrow \\mathrm{C}$ , the policy further checks the spatial label consistency of the points and identifies the target goal." + ], + "image_footnote": [], + "bbox": [ + 501, + 88, + 620, + 179 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/3ce7fe35ee9e5c7a98f90bba72dcd394208b78781821d48a5b857660c17fa076.jpg", + "image_caption": [ + "(B)" + ], + "image_footnote": [], + "bbox": [ + 620, + 89, + 754, + 179 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/dfc8fd98c0ff490f699f7c6635eb637f8d33b5581d4630b8830c9e330e75ae9c.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 754, + 89, + 888, + 179 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "over, as noted by previous studies [4, 26], the corner-goal-based exploration strategy exhibits the capacity to achieve efficient exploration through avoiding back-and-forth pacing. Superior to using other heuristic corner goal exploration strategies (Figure 4 Middle), our agent can learn from the 3D scene priors to behave more intelligently. Demonstrations of our corner-guided exploration can be found in the attached video.", + "bbox": [ + 496, + 270, + 890, + 390 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Category-Aware Identification Policy. During navigation, the agent consistently makes semantic predictions to identify an instance of target object category. Most works [9, 14] simply use a preset hard confidence threshold for target identification. However, this strategy is inherently suboptimal due to the considerable variability in semantic prediction results across different categories and observation angles. As a result, a preset threshold would be unable to adequately adapt to the ever-changing nature of these scenarios. Also, it ignores the consistency of the semantic prediction in 3D space.", + "bbox": [ + 496, + 393, + 892, + 560 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To tackle this issues, we propose to leverage both dynamic confidence threshold and spatial semantic label consistency for target identification. We define a policy $s = \\pi_f(x_{3D}, o_{ID}; \\theta_f)$ which takes the 3D observation $x_{3D}$ and target category index $o_{ID}$ and outputs a threshold-indicating action $s \\in \\{0, 1, \\dots, 9\\}$ . And the dynamic threshold $\\tau$ can be obtained by:", + "bbox": [ + 496, + 563, + 890, + 669 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tau = \\tau_ {l o w} + s \\cdot \\frac {1 - \\tau_ {l o w}}{1 0}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 611, + 678, + 890, + 709 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the $\\tau_{low}$ is set to 0.5 in our implementation for a threshold range $\\tau \\in [0.5,0.95]$ . The $\\tau$ will be used to dynamically identify the points belonging to the target object (Figure 5 Middle). It is worth mentioning that this policy also utilizes a low-dimensional discrete action space, which is fairly easy for the agent to learn.", + "bbox": [ + 496, + 715, + 890, + 806 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To obtain the final target goal $g_{f}$ , our method further checks the spatial semantic label consistency. Specifically, we use the points $\\{p_i | (p_i, p) \\in \\mathcal{O}_p\\}$ connected by the perpoint octree $\\mathcal{O}_p$ to approximately represent the 3D surface of the target object. Our insight is that the points along the target's surface should have consistent semantic", + "bbox": [ + 496, + 810, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "6676", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "labels. Therefore, we only identify those points who have at least 2-ring neighbors across the octrees $\\{p_i|(p_i,p_j)\\in \\mathcal{O}_{p_j}|(p_j,p)\\in \\mathcal{O}_p\\}$ as the target object goal $g_{f}$ (Figure 5 Right). See Figure 5 for visualized illustration and more details can be found in supplemental material.", + "bbox": [ + 75, + 90, + 468, + 167 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Local Planning Module. The goals $g_{e}$ and $g_{f}$ from two polices will be consistently updated during navigation. Our method will preferentially utilize the target goal $g_{f}$ if it exists, otherwise take the long-term corner goal $g_{e}$ to explore. To navigate to the given location, we use the Fast Marching Method [42] to analytically plan the shortest path from the agent location. The agent then takes deterministic actions to follow this path.", + "bbox": [ + 75, + 169, + 468, + 290 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Rewards. For the exploration policy, we share a similar reward design as [1, 51]. The agent receives a sparse success reward $r_{success} = 2.5$ , a slack reward $r_{slack} = 10^{-2}$ and an exploration reward $r_{explore}$ . The exploration reward is a dense reward, defined by the number of new inserted point $n_p^{new}$ as $r_{explore} = n_p^{new} \\times 10^{-3}$ . The slack reward and exploration reward encourage the agent to take the most effective direction to the unobserved area. And for the identification policy, we combine the same success reward and slack reward borrowed from the exploration policy.", + "bbox": [ + 75, + 292, + 470, + 445 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 76, + 465, + 209, + 483 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experiment Setup.", + "text_level": 1, + "bbox": [ + 76, + 493, + 256, + 510 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We perform experiments on the Matterport3D (MP3D) [6] and Gibson [48] datasets with the Habitat simulator [39]. Both Gibson and MP3D contain photorealistic 3D reconstructions of real-world environments. For Gibson, we use 25 train / 5 val scenes from the Gibson tiny split. And we follow the same setting as in [9, 35] where we consider 6 goal categories, including chair, couch, potted plant, bed, toilet and TV. For MP3D, we use the standard split of 61 train / 11 val scenes with Habitat ObjectNav dataset [38], which consists of 21 goal categories (the full list can be found in the supplemental material). Note that, the RGB-D and pose readings are noise-free from simulation (follow the definition of [1]). Estimation of the pose from noisy sensor readings is out of the scope of this work and can be addressed if necessary, by incorporating off-the-shelf robust odometry [52, 54].", + "bbox": [ + 75, + 518, + 468, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation Details. On MP3D, we use the same pretrained 2D semantic model RedNet [21] as [35,51]. On Gibson, we leverage a Mask R-CNN [18], which is trained with COCO dataset [23]. For each frame, we randomly sample 512 points for point-based construction. Moreover, we use PointNet [33] and fully convolutional networks [25] to obtain the feature of 3D points and the 2D map, respectively. During training, we sample actions every 25 steps and use the Proximal Policy Optimization (PPO) [41] for both ex", + "bbox": [ + 75, + 763, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1. ObjectNav validation results on Gibson and MP3D. Our method is trained with 5 seeds and report the averaged performance. The best of all methods and the best of all modular-based methods are highlighted in **bold** and **underline** colors, respectively. Note that Habitat-Web takes use of extra data.", + "bbox": [ + 498, + 88, + 893, + 157 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/bc2a0d0e9cf1e617aa5de57cb0ac57139a98cec2e8d5419db7e9437bf719bb3a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodGibson (val)Matterport3D (val)
SPL(%)↑ Succ.(%)↑ DTS(m)↓SPL(%)↑ Succ.(%)↑ DTS(m)↓
DD-PPO [47]10.715.03.241.88.06.90
Red-Rabbit [51]---7.934.6-
THAD [27]---11.128.45.58
Habitat-Web [36]---10.235.4-
FBE [37]28.364.31.787.222.76.70
ANS [8]34.967.11.669.227.35.80
L2M* [14]---11.032.15.12
SemExp* [9]39.671.71.3910.928.36.06
Stubborn* [26]---13.531.25.01
PONI [35]41.073.61.2512.131.85.10
Ours42.174.51.1614.634.04.74
", + "bbox": [ + 506, + 169, + 897, + 321 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/3a88834df89b0f622787e6bf92c0ccd9b8a0de45743748289a90e6a9a89925c8.jpg", + "table_caption": [ + "Table 2. ObjectNav validation results on MP3D-L2M [14]." + ], + "table_footnote": [], + "table_body": "
MP3D-L2M
MethodSPL(%) ↑SoftSPL(%) ↑Succ.(%) ↑DTS(m)↓
SemExp [9]16.5-28.14.848
L2M [14]14.820.034.83.669
Ours21.230.540.23.278
", + "bbox": [ + 506, + 349, + 897, + 430 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ploration and identification policies. More implementation details can be found in the supplemental material.", + "bbox": [ + 498, + 444, + 890, + 473 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation Metrics. Following existing works [2, 14, 35], we adopt the following evaluation metrics: 1) SPL: success weighted by path length. It measures the efficiency of the agent over oracle path length, which serves as the primary evaluation metric for Habitat Challenge [49]. 2) Success rate: the percentage of successful episodes 3) Soft SPL: a softer version of SPL measure the progress towards the goal (even with 0 success). 4) DTS: geodesic distance (in m) to the success at the end of the episode.", + "bbox": [ + 496, + 477, + 890, + 613 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We consider mainstream baselines in the ObjectNav task. For end-to-end RL methods, we cover DD-PPO [47], Red-Rabiit [51], THDA [27], and Habiat-Web [36]. For modular based methods, we cover FBE [37], ANS [8], L2M [14], SemExp [9], Stubborn [26] and PONI [35]. Note that, some works use additional data to improve the performance, e.g. Habitat-web leverages human demonstration trajectories, and THDA utilizes data augmentation. It is challenging to compare all the methods fairly. Therefore, we are particularly interested in the three most relevant baselines: SemExp, Stubborn, and PONI. These three methods share the same 2D semantic predictors [18, 21] as our method.", + "bbox": [ + 496, + 616, + 890, + 811 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Results", + "text_level": 1, + "bbox": [ + 500, + 828, + 591, + 843 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison on MP3D and Gibson. We evaluate our approach on MP3D (val) and Gibson (val) with other baselines, including end-to-end RL(rows 1 - 4) and modular", + "bbox": [ + 498, + 854, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6677", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bcb7f3863f2cabb911e2b285896d351ff791cfb97144ef3a5e95e7aa36afe1c4.jpg", + "image_caption": [ + "Figure 6. An qualitative visualization of the trajectory of the proposed method. We visualize an episode from MP3D where an agent is expected to find a bed. The semantic prediction $p_{s}$ and spatial semantic consistency $p_{c}$ of points are visualized on the left. During navigation, the agent can successfully dismiss the wrong prediction and approach and finally call stop around the target object." + ], + "image_footnote": [], + "bbox": [ + 122, + 85, + 851, + 378 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/14e75e202f1f9ffb964f33cea9680870e4fd824181d058ed829026bcc38b801b.jpg", + "table_caption": [ + "Table 3. Comparison of different exploration policies. Here, all methods share the same identification strategy from [9] for fair comparison." + ], + "table_footnote": [], + "table_body": "
MethodSPL(%)Succ.(%)DTS(m)
Learn Continuous Goal.11.128.66.354
Learn dense Grid Goal.12.729.55.635
Learn 8 corner goal.12.930.75.112
Heuristic. 4 corner goal.13.533.04.995
Learn 4 corner goal. (Ours)13.933.54.931
", + "bbox": [ + 84, + 494, + 457, + 595 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "based methods (rows 5 - 10). Note that, SemExp and Stubborn did not report the results on MP3D validation, while L2M uses a self-made dataset MP3D-L2M based on MP3D and tests fewer categories than what we do. We therefore faithfully provide the results, denoted with *, by evaluating with their public available code. The results are demonstrated in Table 1. On both datasets, our method achieves the state-of-the-art ObjetNav efficiency (SPL) among all methods (2.6% higher on Gibson dataset and 8.1% higher on MP3D). For the success rate, our method achieves the best results among all modular-based methods, showing comparable performance with additional annotation methods THAD [27] and Habitat-web [36]. Especially, compared with the modular-based methods, SemExp, Stubborn, and PONI, which share the same 2D semantic predictor [21] as ours, the results fairly demonstrate the superiority of our framework on both efficiency and success rate. We also provide the results validated on MP3D-L2M in Table 2.", + "bbox": [ + 75, + 609, + 468, + 881 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We also provide a qualitative visualization of MP3D", + "bbox": [ + 96, + 883, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/990517034afe38f74e48dc64e0e91f3423a07cd1e151a70544794d503b2e24ad.jpg", + "table_caption": [ + "Table 4. Comparison on different identification policies." + ], + "table_footnote": [], + "table_body": "
MethodTypeSPL(%)Succ.(%)DTS(m)
Repr.Thre.
Deterministic2D0.8512.830.15.151
3D0.8513.832.54.987
Learning (Ours)3D-14.634.04.749
", + "bbox": [ + 508, + 467, + 885, + 551 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "episodes in Figure 6. Here, our method online updates the semantic prediction and successfully dismisses the wrong target goal. For more qualitative results, please refer to the supplemental material.", + "bbox": [ + 496, + 577, + 890, + 637 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparison on Exploration Policy. We conduct an experiment to verify the efficiency of our corner-guided exploration policy on MP3D. To remove the effect of the 2D semantic predictor and identification policy, all competitors share the same semantic predictor and a heuristic identification policy proposed in SemExp [9]. The results are reported in Table 3. Our corner-guided exploration policy outperforms the mainstream existing methods, including learning-based ones [8, 14] and heuristic ones [26]. Our findings indicate that the best performance is achieved through learning to predict discrete corner goals from the four corners of the scene. This suggests that the four-corner design, which benefits from a small, discrete action space, is already capable of efficiently guiding the agent in exploring the environment.", + "bbox": [ + 496, + 640, + 892, + 866 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comparison on Identification Policy. Another critical challenge in OjectNav is how to properly identify an in-", + "bbox": [ + 498, + 869, + 890, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "6678", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2ce05c5b20cb1e350538777bac655512bd833d2c7528abbb36a22e5aeb2c4cd4.jpg", + "image_caption": [ + "Figure 7. An comparison of predicted threshold distribution between different categories by our category-aware policy. We report the ratio of the each predicted threshold." + ], + "image_footnote": [], + "bbox": [ + 84, + 88, + 459, + 250 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "stance of target object category. Therefore, We evaluate our identification policy on MP3D along with other identifying strategies, including a 2D frame-based policy adopted in [9] and 3D point-based methods proposed by our approach. The results are shown in Table 4. We observe a performance improvement (rows 1 - 2) by simply leveraging 3D point-based construction and fusion algorithm. It can demonstrate that the multi-view observations provide more accurate semantic prediction, which effectively reduces false positive prediction (see examples in Figure 8). Moreover, our category-aware identification policy, through predicting dynamic threshold, demonstrates an even better performance.", + "bbox": [ + 75, + 316, + 468, + 513 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To further investigate the effect of our identification policy, We conduct a break down study in Figure 7 by plotting the distribution of predicted semantic confidence thresholds. Specifically, we plot the distribution of three different categories (table, cushion, plant). For a relatively easy-to-recognize category, such as table with $52.6\\%$ success rate (SR), our policy predict a broad threshold distribution. However, for more challenging categories, such as cushion $(36.9\\%)$ SR) and plant $(16.1\\%)$ SR), the policy tends to be more conservative through setting a higher threshold. The results demonstrate the category-aware characteristic of our identification policy which adapts well to different difficulty levels across categories.", + "bbox": [ + 75, + 517, + 468, + 714 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation Study. We also perform an ablation study to verify the effectiveness of different components of our method. The results are demonstrated in Table 5. The cooperation of the 2D top-down map and 3D points (row 4) shows significant improvement by incorporating extensive scene perception (in 2D) and fine-grained object perception (in 3D). Moreover, rows (3-4) and (4-5) proved the effectiveness of leveraging consistency information and the identification policy, respectively.", + "bbox": [ + 75, + 715, + 468, + 852 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Analysis of Computational Cost. Our framework is extremely memory efficient, which requires about 0.5GB for one scene, and can perform online construction and", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/1359a4878b9c18cc729b6fbe12c2065e9b39efcf6ce8c0c82f5c7ee1b8c6c147.jpg", + "table_caption": [ + "Table 5. Ablation study of main components in our method. The pos. indicates the semantic predictions $p_{s}$ , KL indicates the spatial semantic consistency $p_{c}$ and the I. policy indicates the usage of the proposed identification policy." + ], + "table_footnote": [], + "table_body": "
2D map3D points Pos. KLI. PolicySPL(%)Succ.(%)DTS(m)
11.229.66.213
13.032.35.769
13.733.85.620
13.933.54.931
14.634.0
", + "bbox": [ + 508, + 142, + 861, + 252 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8013c6b051e07ee2ed1541e3ef95bc29eace7f1ff50d29a9bb79a210aaa71363.jpg", + "image_caption": [ + "Figure 8. Visualization of the results of online 3D point fusion." + ], + "image_footnote": [], + "bbox": [ + 532, + 266, + 861, + 484 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "semantic fusion at a frame rate of 15 FPS. Moreover, our method requires only 48 GPU hours to train a 3D-aware agent on MP3D dataset to achieve the SOTA performance among all modular-based methods. This is significantly faster (30x) than other existing reinforcement learning based methods [9, 51], and is comparable to supervised learning modular-based methods [35]", + "bbox": [ + 498, + 523, + 890, + 630 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 657, + 617, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we present a 3D-aware framework for object goal navigation. Our method is based on a 3D point-based construction algorithm to observe the 3D scenes and simultaneously perform exploration and identification policies to navigate the agent. Our method achieve SOTA performance among all modular-based methods, while requiring less training time. In the future, we would like to exploit this 3D-aware framework in other embodied AI tasks, e.g. mobile manipulation, robotic nurses.", + "bbox": [ + 496, + 686, + 890, + 821 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. We thank anonymous reviewers for their valuable suggestions. This work was supported by National Key Research and Development Program of China (2018AAA0102200), NSFC (62132021), and Beijing Academy of Artificial Intelligence (BAAI).", + "bbox": [ + 496, + 825, + 893, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "6679", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Dhruv Batra, Aaron Gokaslan, Aniruddha Kembhavi, Oleksandr Maksymets, Roozbeh Mottaghi, Manolis Savva, Alexander Toshev, and Erik Wijmans. ObjectNav Revisited: On Evaluation of Embodied Agents Navigating to Objects. In arXiv:2006.13171, 2020. 2, 6, 13", + "[2] Dhruv Batra, Aaron Gokaslan, Aniruddha Kembhavi, Oleksandr Maksymets, Roozbeh Mottaghi, Manolis Savva, Alexander Toshev, and Erik Wijmans. Objectnav revisited: On evaluation of embodied agents navigating to objects. ArXiv, abs/2006.13171, 2020. 6", + "[3] Tommaso Campari, Paolo Eccher, Luciano Serafini, and Lamberto Ballan. Exploiting scene-specific features for object goal navigation. In European Conference on Computer Vision, pages 406-421. Springer, 2020. 1, 2", + "[4] Chao Cao, Hongbiao Zhu, Howie Choset, and Ji Zhang. Tare: A hierarchical framework for efficiently exploring complex 3d environments. In Robotics: Science and Systems, 2021. 5", + "[5] Hanwen Cao, Hao-Shu Fang, Wenhai Liu, and Cewu Lu. Suctionnet-1billion: A large-scale benchmark for suction grasping. IEEE Robotics and Automation Letters, 6(4):8718-8725, 2021. 3", + "[6] Angel Chang, Angela Dai, Thomas Funkhouser, Maciej Halber, Matthias Niessner, Manolis Savva, Shuran Song, Andy Zeng, and Yinda Zhang. Matterport3d: Learning from rgb-d data in indoor environments. arXiv preprint arXiv:1709.06158, 2017. 6, 13", + "[7] Devendra Singh Chaplot, Murtaza Dalal, Saurabh Gupta, Jitendra Malik, and Russ R Salakhutdinov. Seal: Self-supervised embodied active learning using exploration and 3d consistency. Advances in Neural Information Processing Systems, 34:13086-13098, 2021. 2, 4", + "[8] Devendra Singh Chaplot, Dhiraj Gandhi, Saurabh Gupta, Abhinav Gupta, and Ruslan Salakhutdinov. Learning to explore using active neural slam. arXiv preprint arXiv:2004.05155, 2020. 1, 2, 5, 6, 7, 13", + "[9] Devendra Singh Chaplot, Dhiraj Prakashchand Gandhi, Abhinav Gupta, and Russ R Salakhutdinov. Object goal navigation using goal-oriented semantic exploration. Advances in Neural Information Processing Systems, 33:4247-4258, 2020. 1, 2, 5, 6, 7, 8, 13", + "[10] Changhyun Choi, Wilko Schwarting, Joseph DelPreto, and Daniela Rus. Learning object grasping for soft robot hands. IEEE Robotics and Automation Letters, 3(3):2370-2377, 2018. 3", + "[11] Sungjoon Choi, Qian-Yi Zhou, and Vladlen Koltun. Robust reconstruction of indoor scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5556-5565, 2015. 14", + "[12] Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multiview prediction for 3d semantic scene segmentation. In ECCV, 2018. 1", + "[13] Samir Yitzhak Gadre, Kiana Ehsani, and Shuran Song. Act the part: Learning interaction strategies for articulated object part discovery. ICCV, 2021. 3" + ], + "bbox": [ + 78, + 117, + 470, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Georgios Georgakis, Bernadette Bucher, Karl Schmeckpeper, Siddharth Singh, and Kostas Daniilidis. Learning to map for active semantic goal navigation. In International Conference on Learning Representations (ICLR), 2022. 1, 2, 5, 6, 7, 13", + "[15] Georgios Georgakis, Yimeng Li, and Jana Kosecka. Simultaneous mapping and target driven navigation. ArXiv, abs/1911.07980, 2019. 2", + "[16] Margarita Grinvald, Fadri Furrer, Tonci Novkovic, Jen Jen Chung, Cesar Cadena, Roland Siegwart, and Juan Nieto. Volumetric instance-aware semantic mapping and 3d object discovery. IEEE Robotics and Automation Letters, 4(3):3037-3044, 2019. 4", + "[17] Saurabh Gupta, Varun Tolani, James Davidson, Sergey Levine, Rahul Sukthankar, and Jitendra Malik. Cognitive mapping and planning for visual navigation. International Journal of Computer Vision, 128:1311-1330, 2017. 2", + "[18] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross B. Girshick. Mask r-cnn. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42:386-397, 2020. 6", + "[19] Shi-Sheng Huang, Ze-Yu Ma, Tai-Jiang Mu, Hongbo Fu, and Shi-Min Hu. Supervoxel convolution for online 3d semantic segmentation. ACM Transactions on Graphics (TOG), 40(3):1-15, 2021. 4", + "[20] Hosagrahar V Jagadish, Beng Chin Ooi, Kian-Lee Tan, Cui Yu, and Rui Zhang. idistance: An adaptive b+-tree based indexing method for nearest neighbor search. ACM Transactions on Database Systems (TODS), 30(2):364-397, 2005. 4", + "[21] Jindong Jiang, Lunan Zheng, Fei Luo, and Zhijun Zhang. Rednet: Residual encoder-decoder network for indoorrgb-d semantic segmentation. arXiv preprint arXiv:1806.01054, 2018. 6, 7", + "[22] Cheng Lin, Tingxiang Fan, Wenping Wang, and Matthias Nießner. Modeling 3d shapes by reinforcement learning. In ECCV, 2020. 2", + "[23] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C. Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 6", + "[24] Leyao Liu, Tian Zheng, Yun-Jou Lin, Kai Ni, and Lu Fang. Ins-conv: Incremental sparse convolution for online 3d segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18975–18984, 2022. 4", + "[25] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 6, 13", + "[26] Haokuan Luo, Albert Yue, Zhang-Wei Hong, and Pulkit Agrawal. Stubborn: A strong baseline for indoor object navigation. arXiv preprint arXiv:2203.07359, 2022. 2, 5, 6, 7, 13", + "[27] Oleksandr Maksymets, Vincent Cartillier, Aaron Gokaslan, Erik Wijmans, Wojciech Galuba, Stefan Lee, and Dhruv Batra. Thda: Treasure hunt data augmentation for semantic navigation. In Proceedings of the IEEE/CVF International" + ], + "bbox": [ + 501, + 92, + 893, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "6680", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on Computer Vision, pages 15374-15383, 2021. 1, 2, 6, 7", + "[28] John McCormac, Ankur Handa, Andrew Davison, and Stefan Leutenegger. Semantic fusion: Dense 3d semantic mapping with convolutional neural networks. In 2017 IEEE International Conference on Robotics and automation (ICRA), pages 4628-4635. IEEE, 2017. 4", + "[29] Arsalan Mousavian, Alexander Toshev, Marek Fiser, Jana Košecka, Ayzaan Wahid, and James Davidson. Visual representations for semantic target driven navigation. In 2019 International Conference on Robotics and Automation (ICRA), pages 8846-8852. IEEE, 2019. 2", + "[30] Tongzhou Mu, Zhan Ling, Fanbo Xiang, Derek Cathera Yang, Xuanlin Li, Stone Tao, Zhiao Huang, Zhiwei Jia, and Hao Su. Maniskill: Generalizable manipulation skill benchmark with large-scale demonstrations. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021. 3", + "[31] Alexey Nekrasov, Jonas Schult, Or Litany, B. Leibe, and Francis Engelmann. Mix3d: Out-of-context data augmentation for 3d scenes. 2021 International Conference on 3D Vision (3DV), pages 116-125, 2021. 1", + "[32] Emilio Parisotto and Ruslan Salakhutdinov. Neural map: Structured memory for deep reinforcement learning. ArXiv, abs/1702.08360, 2018. 2", + "[33] C. Qi, Hao Su, Kaichun Mo, and Leonidas J. Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 77-85, 2017. 6, 13", + "[34] Yiding Qiu, Anwesan Pal, and Henrik I Christensen. Learning hierarchical relationships for object-goal navigation. arXiv preprint arXiv:2003.06749, 2020. 1, 2", + "[35] Santhosh Kumar Ramakrishnan, Devendra Singh Chaplot, Ziad Al-Halah, Jitendra Malik, and Kristen Grauman. Poni: Potential functions for objectgoal navigation with interaction-free learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18890-18900, 2022. 1, 2, 6, 8", + "[36] Ram Ramrakhya, Eric Undersander, Dhruv Batra, and Abhishek Das. Habitat-web: Learning embodied object-search strategies from human demonstrations at scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5173-5183, 2022. 1, 6, 7", + "[37] IEEE Robotics. Proceedings 1997, IEEE international symposium on computational intelligence in robotics and automation cira'97 - towards new computational principles for robotics and automation, july 10-11, 1997, monterey, california, usa. In CIRA, 1997. 6", + "[38] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, et al. Habitat: A platform for embodied ai research. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9339-9347, 2019. 1, 3, 6, 14", + "[39] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, Devi Parikh, and Dhruv" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Batra. Habitat: A Platform for Embodied AI Research. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 6", + "[40] Thomas Schops, Torsten Sattler, and Marc Pollefeys. Bad slam: Bundle adjusted direct rgb-d slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 134-144, 2019. 2", + "[41] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. ArXiv, abs/1707.06347, 2017. 6", + "[42] James A. Sethian. Fast marching methods. SIAM Rev., 41:199-235, 1999. 6", + "[43] Nur Muhammad Mahi Shafiullah, Chris Paxton, Lerrel Pinto, Soumith Chintala, and Arthur Szlam. Clip-fields: Weakly supervised semantic fields for robotic memory. arXiv preprint arXiv:2210.05663, 2022. 1", + "[44] Hao Shen, Weikang Wan, and He Wang. Learning category-level generalizable object manipulation policy via generative adversarial self-imitation learning from demonstrations. arXiv preprint arXiv:2203.02107, 2022. 3", + "[45] Thang Vu, Kookhoi Kim, Tung Minh Luu, Xuan Thanh Nguyen, and Chang-Dong Yoo. Softgroup for 3d instance segmentation on point clouds. ArXiv, abs/2203.01509, 2022. 1", + "[46] Thomas Whelan, Stefan Leutenegger, Renato Salas-Moreno, Ben Glocker, and Andrew Davison. Elasticfusion: Dense slam without a pose graph. Robotics: Science and Systems, 2015. 2", + "[47] Erik Wijmans, Abhishek Kadian, Ari Morcos, Stefan Lee, Irfan Essa, Devi Parikh, Manolis Savva, and Dhruv Batra. Dd-ppo: Learning near-perfect pointgoal navigators from 2.5 billion frames. arXiv preprint arXiv:1911.00357, 2019. 1, 2, 6", + "[48] Fei Xia, Amir R Zamir, Zhiyang He, Alexander Sax, Jitendra Malik, and Silvio Savarese. Gibson env: Real-world perception for embodied agents. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9068-9079, 2018. 6", + "[49] Karmesh Yadav, Santhosh Kumar Ramakrishnan, John Turner, Aaron Gokaslan, Oleksandr Maksymets, Rishabh Jain, Ram Ramrakhya, Angel X Chang, Alexander Clegg, Manolis Savva, Eric Undersander, Devendra Singh Chaplot, and Dhruv Batra. Habitat challenge 2022. https://aihabitat.org/challenge/2022/, 2022. 1, 2, 3, 6", + "[50] Wei Yang, Xiaolong Wang, Ali Farhadi, Abhinav Gupta, and Roozbeh Mottaghi. Visual semantic navigation using scene priors. arXiv preprint arXiv:1810.06543, 2018. 2", + "[51] Joel Ye, Dhruv Batra, Abhishek Das, and Erik Wijmans. Auxiliary tasks and exploration enable objectgoal navigation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16117-16126, 2021. 1, 2, 6, 8", + "[52] Jiazhao Zhang, Yijie Tang, He Wang, and Kai Xu. Asro-dio: Active subspace random optimization based depth inertial odometry. IEEE Transactions on Robotics, 2022. 6", + "[53] Jiazhao Zhang, Chenyang Zhu, Lintao Zheng, and Kai Xu. Fusion-aware point convolution for online semantic 3d scene" + ], + "bbox": [ + 503, + 92, + 893, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "6681", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4534-4543, 2020. 2, 3, 4, 12", + "[54] Xiaoming Zhao, Harsh Agrawal, Dhruv Batra, and Alexander G. Schwing. The surprising effectiveness of visual odometry techniques for embodied pointgoal navigation. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 16107-16116, 2021. 6", + "[55] Lintao Zheng, Chenyang Zhu, Jiazhao Zhang, Hang Zhao, Hui Huang, Matthias Nießner, and Kai Xu. Active scene understanding via online semantic reconstruction. Computer Graphics Forum, 38, 2019. 2", + "[56] Fengda Zhu, Xiwen Liang, Yi Zhu, Qizhi Yu, Xiaojun Chang, and Xiaodan Liang. Soon: Scenario oriented object navigation with graph-based exploration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12689-12699, 2021. 1, 2", + "[57] Yuke Zhu, Roozbeh Mottaghi, Eric Kolve, Joseph J. Lim, Abhinav Kumar Gupta, Li Fei-Fei, and Ali Farhadi. Target-driven visual navigation in indoor scenes using deep reinforcement learning. 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 3357–3364, 2017. 2" + ], + "bbox": [ + 78, + 90, + 470, + 401 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "6682", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/e3176243-c1cd-415f-8bca-116983524509_model.json b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/e3176243-c1cd-415f-8bca-116983524509_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f9d0473b3a267463e6d51cdbf4d7af0e1abef947 --- /dev/null +++ b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/e3176243-c1cd-415f-8bca-116983524509_model.json @@ -0,0 +1,2411 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.243, + 0.131, + 0.728, + 0.177 + ], + "angle": 0, + "content": "3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.203, + 0.346, + 0.221 + ], + "angle": 0, + "content": "Jiazhao Zhang\\(^{1,2,*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.387, + 0.204, + 0.464, + 0.221 + ], + "angle": 0, + "content": "Liu Dai\\(^{3*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.204, + 0.625, + 0.222 + ], + "angle": 0, + "content": "Fanpeng Meng" + }, + { + "type": "text", + "bbox": [ + 0.668, + 0.204, + 0.779, + 0.222 + ], + "angle": 0, + "content": "Qingnan Fan" + }, + { + "type": "text", + "bbox": [ + 0.338, + 0.222, + 0.447, + 0.238 + ], + "angle": 0, + "content": "Xuelin Chen" + }, + { + "type": "text", + "bbox": [ + 0.489, + 0.222, + 0.687, + 0.24 + ], + "angle": 0, + "content": "Kai Xu\\(^{6}\\) He Wang\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.153, + 0.24, + 0.827, + 0.257 + ], + "angle": 0, + "content": "\\(^{1}\\) CFCS, Peking University \\(^{2}\\) Beijing Academy of Artificial Intelligence \\(^{3}\\)CEIE, Tongji University" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.258, + 0.882, + 0.275 + ], + "angle": 0, + "content": "\\(^{4}\\)Huazhong University of Science and Technology \\(^{5}\\)Tencent AI Lab \\(^{6}\\)National University of Defense Technology" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.309, + 0.314, + 0.325 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.344, + 0.474, + 0.678 + ], + "angle": 0, + "content": "Object goal navigation (ObjectNav) in unseen environments is a fundamental task for Embodied AI. Agents in existing works learn ObjectNav policies based on 2D maps, scene graphs, or image sequences. Considering this task happens in 3D space, a 3D-aware agent can advance its ObjectNav capability via learning from fine-grained spatial information. However, leveraging 3D scene representation can be prohibitively unpractical for policy learning in this floor-level task, due to low sample efficiency and expensive computational cost. In this work, we propose a framework for the challenging 3D-aware ObjectNav based on two straightforward sub-policies. The two sub-policies, namely corner-guided exploration policy and category-aware identification policy, simultaneously perform by utilizing online fused 3D points as observation. Through extensive experiments, we show that this framework can dramatically improve the performance in ObjectNav through learning from 3D scene representation. Our framework achieves the best performance among all modular-based methods on the Matterport3D and Gibson datasets, while requiring (up to \\(30x\\)) less computational cost for training. The code will be released to benefit the community.\\(^1\\)" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.706, + 0.21, + 0.722 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.47, + 0.856 + ], + "angle": 0, + "content": "As a vital task for intelligent embodied agents, object goal navigation (ObjectNav) [38, 49] requires an agent to find an object of a particular category in an unseen and unmapped scene. Existing works tackle this task through end-to-end reinforcement learning (RL) [27, 36, 47, 51] or modular-based methods [9, 14, 35]. End-to-end RL based methods take as input the image sequences and directly output low-level navigation actions, achieving competitive" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.307, + 0.571, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.583, + 0.308, + 0.653, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.655, + 0.307, + 0.734, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.74, + 0.307, + 0.822, + 0.373 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.84, + 0.31, + 0.881, + 0.333 + ], + "angle": 0, + "content": "c: cushion \ns: sofa \nt: chair" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.374, + 0.627, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.374, + 0.755, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.761, + 0.374, + 0.885, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.653, + 0.45, + 0.739, + 0.461 + ], + "angle": 0, + "content": "Looking for a chair" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.473, + 0.895, + 0.543 + ], + "angle": 0, + "content": "Figure 1. We present a 3D-aware ObjectNav framework along with simultaneous exploration and identification policies: \\(\\mathbf{A} \\rightarrow \\mathbf{B}\\), the agent was guided by an exploration policy to look for its target; \\(\\mathbf{B} \\rightarrow \\mathbf{C}\\), the agent consistently identified a target object and finally called STOP." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.551, + 0.893, + 0.734 + ], + "angle": 0, + "content": "performance while suffering from lower sample efficiency and poor generalizability across datasets [3, 27]. Therefore, we favor modular-based methods, which usually contain the following modules: a semantic scene mapping module that aggregates the RGBD observations and the outputs from semantic segmentation networks to form a semantic scene map; an RL-based goal policy module that takes as input the semantic scene map and learns to online update a goal location; finally, a local path planning module that drives the agent to that goal. Under this design, the semantic accuracy and geometric structure of the scene map are crucial to the success of object goal navigation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.895, + 0.903 + ], + "angle": 0, + "content": "We observe that the existing modular-based methods mainly construct 2D maps [8, 9], scene graphs [34, 56] or neural fields [43] as their scene maps. Given that objects lie in 3D space, these scene maps are inevitably deficient in leveraging 3D spatial information of the environment comprehensively and thus have been a bottleneck for further improving object goal navigation. In contrast, forming a 3D scene representation naturally offers more accurate, spatially dense and consistent semantic predictions than its 2D counterpart, as proved by [12, 31, 45]. Hence, if the agent could take advantage of the 3D scene understanding and" + }, + { + "type": "page_footnote", + "bbox": [ + 0.094, + 0.863, + 0.197, + 0.875 + ], + "angle": 0, + "content": "*Joint first authors" + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.876, + 0.338, + 0.888 + ], + "angle": 0, + "content": "† Corresponding author: hewang@pku.edu.cn" + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.888, + 0.423, + 0.9 + ], + "angle": 0, + "content": "1 Homepage: https://pku-epic.github.io/3D-Aware-ObjectNav/" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.863, + 0.423, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6672" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "form a 3D semantic scene map, it is expected to advance the performance of ObjectNav." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.125, + 0.47, + 0.306 + ], + "angle": 0, + "content": "However, leveraging 3D scene representation would bring great challenges to ObjectNav policy learning. First, building and querying fine-grained 3D representation across a floor-level scene requires extensive computational cost, which can significantly slow down the training of RL [7,55]. Also, 3D scene representation induces considerably more complex and high-dimensional observations to the goal policy than its 2D counterpart, leading to a lower sample efficiency and hampering the navigation policy learning [22, 57]. As a result, it is demanding to design a framework to efficiently and effectively leverage powerful 3D information for ObjectNav." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.309, + 0.47, + 0.521 + ], + "angle": 0, + "content": "To tackle these challenges, we propose a novel framework composed of an online semantic point fusion module for 3D semantic scene mapping and two parallel policy networks in charge of scene exploration and object identification, along with a local path planning module. Our online semantic point fusion module extends a highly efficient online point construction algorithm [53] to enable online semantic fusion and spatial semantic consistency computation from captured RGBD sequences. This 3D scene construction empowers a comprehensive 3D scene understanding for ObjectNav. Moreover, compared to dense voxel-based methods [7, 55], our point-based fusion algorithm are more memory-efficient [40, 46] which makes it practically usable for floor-level navigation task. (See Figure 1)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.523, + 0.47, + 0.856 + ], + "angle": 0, + "content": "Moreover, to ease the learning of navigation policy, we further propose to factorize the navigation policy into two sub-policies, namely exploration and identification. The two policies simultaneously perform to roll out an exploration goal and an identified object goal (if exist), respectively. Then the input for the local path planning module will switch between these two goals, depending on whether there exists an identified target object. More specifically, we propose a corner-guided exploration policy which learns to predict a long-term discrete goal at one of the four corners of the bounding box of the scene. These corner goals efficiently drive the agent to perceive the surroundings and explore regions where the target object is possibly settled. And for identification, a category-aware identification policy is proposed to dynamically learn a discrete confidence threshold to identify the semantic predictions for each category. Both of these policies are trained by RL in low-dimensional discrete action space. Through experiments, the simultaneous two-policy mechanism and discrete action space design dramatically reduce the difficulty in learning for 3D-aware ObjectNav and achieve better performance than existing modular-based navigation strategies [26, 35]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.858, + 0.47, + 0.889 + ], + "angle": 0, + "content": "Through extensive evaluation on the public benchmarks, we demonstrate that our method performs online 3D-aware" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.167 + ], + "angle": 0, + "content": "ObjectNav at 15 FPS while achieving the state-of-the-art performance on navigation efficiency. Moreover, our method outperforms all other modular-based methods in both efficiency and success rate with up to \\(30\\mathrm{x}\\) times less computational cost." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.17, + 0.733, + 0.184 + ], + "angle": 0, + "content": "Our main contributions include:" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.199, + 0.891, + 0.228 + ], + "angle": 0, + "content": "- We present the first 3D-aware framework for Object-Nav task." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.239, + 0.892, + 0.284 + ], + "angle": 0, + "content": "- We build an online point-based construction and fusion algorithm for efficient and comprehensive understanding of floor-level 3D scene representation." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.293, + 0.892, + 0.339 + ], + "angle": 0, + "content": "- We propose a simultaneous two-policy mechanism which mitigates the problem of low sample efficiency in 3D-aware ObjectNav policy learning." + }, + { + "type": "list", + "bbox": [ + 0.519, + 0.199, + 0.892, + 0.339 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.353, + 0.642, + 0.37 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.381, + 0.892, + 0.624 + ], + "angle": 0, + "content": "GoalNav with Visual Sequences. There are constantly emerging researches on object goal navigation. One line of recent works directly leverages RGBD sequences, called end-to-end RL methods [47], which tends to implicitly encode the environment and predict low-level actions. These works benefit from visual representation [29, 50], auxiliary task [51], and data augmentation [27], demonstrating strong results on object goal navigation benchmarks [1, 49]. However, aiming to learn all skills through one policy from scratch, e.g., avoiding collisions, exploration, and stopping, it's well known that end-to-end RL methods suffer from low sampling efficiency for training and limited generalizability when transferred to the real world [3, 35]. Instead, our work uses explicit map to represent the environment, which ensures our sample efficiency and also obtain more generalizability through a modular-based paradigm [1, 35]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.626, + 0.892, + 0.868 + ], + "angle": 0, + "content": "GoalNav with Explicit Scene Representations. To ease the burden of learning directly from visual sequences, another category of methods, called modular-based methods [8,9,15,17,32], use explicit representations as a proxy for robot observations. By leveraging explicit scene representations like scene graph [34, 56] or 2D top-down map [14,35], modular-based methods benefit from the modularity and shorter time horizons. They are considered to be more sample efficient and generalizable [14, 35]. Recent progress in modular-based methods has proposed a frontier-based exploration strategy [35], a hallucinate-driven semantic mapping method [14], and novel verification stage [26]. In contrast with prior map-based works, our method utilizes 3D spatial knowledge, including 3D point semantic prediction and consistency, enabling a more comprehensive understanding of the environment." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Embodied AI tasks with 3D Scene Representation. There are considerable research leveraging 3D scene repre" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6673" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.087, + 0.895, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.283, + 0.895, + 0.361 + ], + "angle": 0, + "content": "Figure 2. An overview of our framework. We take in a posed RGB-D image at time step \\( t \\) and perform point-based construction algorithm to online fuse a 3D scene representation \\( (\\mathcal{M}_{3D}^{(t)}) \\), along with a \\( \\mathcal{M}_{2D}^{(t)} \\) from semantics projection. Then, we simultaneously leverage two policies, including a corner-guided exploration policy \\( \\pi_e \\) and category-aware identification policy \\( \\pi_f \\), to predict a discrete corner goal \\( g_e^{(t)} \\) and a target goal \\( g_f^{(t)} \\) (if exist) respectively. Finally, the local planning module will drive the agent to the given target goal \\( g_f^{(t)} \\) (top priority) or the corner goal \\( g_e^{(t)} \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.374, + 0.473, + 0.587 + ], + "angle": 0, + "content": "sensation on certain embodied AI tasks, e.g., object grasping [5, 10], drawer opening [30, 44]. These works leverage various routes, including reinforcement learning [13], imitation learning [44], and supervised learning [5] with 3D scene representation, such as mesh, dense grids. However, most of these 3D-aware embodied AI tasks only perform in a limited space [10, 30, 44], e.g., near one table or drawer. Under large scale environments, such as floor-level scenes in ObjectNav, the existing methods would suffer from complex 3D observation and large computational costs. In this work, we propose a framework through leveraging a point-based construction module and two dedicatedly designed exploration and identification policies, to enable a 3D-aware agnet for ObjectNav." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.605, + 0.17, + 0.621 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.633, + 0.406, + 0.648 + ], + "angle": 0, + "content": "3.1. Task Definition and Method Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.659, + 0.473, + 0.903 + ], + "angle": 0, + "content": "Object Goal Navigation Task. In an unknown environment, the Object Goal Navigation task requires the agent to navigate to an instance of the specified target category. For fair comparison, we follow the previous problem setting [38, 49]. As initialization, the agent is located randomly without access to a pre-built environment map, and provided with a target category ID. At each time step \\( t \\), the agent receives noiseless onboard sensor readings, including an egocentric RGB-D image and a 3-DoF pose (2D position and 1D orientation) relative to the starting of the episode. Then the agent estimates its action \\( a_{t} \\in \\mathcal{A} \\) for movement in a discrete action space, consisting of move_forward, turn_left, turn_right and stop. Given a limited time budget of 500 steps, the agent terminates the movement until it is within 1 meter of an object of the specified category." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.374, + 0.892, + 0.661 + ], + "angle": 0, + "content": "Method Overview. Figure 2 provides an overview of the proposed 3D-aware ObjectNav method. Our method takes RGBD frames along with pose sensor readings as input, to online construct a point-based scene representation \\(\\mathcal{M}_{3D}\\) (Sec. 3.2), which is further projected to construct a 2D semantic map \\(\\mathcal{M}_{2D}\\). Given the structured 3D points \\(\\mathcal{M}_{3D}\\) and 2D map \\(\\mathcal{M}_{2D}\\), our framework simultaneously performs two complementary policies (Sec. 3.3), the exploration policy and identification policy at a fixed time cycle of 25 steps. The exploration policy predicts a long-term discrete corner goal \\(g_{e}\\), to drive the agent to explore the surrounding environment. Meanwhile, the identification policy evaluates the 3D points \\(\\mathcal{M}_{3D}\\) at each step and outputs a target object goal \\(g_{f}\\) if its semantic prediction is confident and consistent. The \\(g_{f}\\) will be set as the approaching target for the agent once it exists, otherwise the agent will navigate to the long-term corner goal \\(g_{e}\\). An underlying local planning module will navigate the agent towards the goal using analytical path planning." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.678, + 0.866, + 0.695 + ], + "angle": 0, + "content": "3.2. Navigation-Driven 3D Scene Construction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.892, + 0.901 + ], + "angle": 0, + "content": "During navigation, the 3D-aware agent will constantly obtain new observations and incrementally build a fine-grained 3D scene representation, integrating spatial and semantic information to drive the agent. However, given that our agent is deployed for a floor-level GoalNav task, it is fairly challenging to construct and leverage 3D representation across the entire scene while keeping an acceptable computational cost. Accordingly in this section, we extend an online point-based construction algorithm [53] to online organize the 3D points and further empower semantic fusion and consistency estimation. This design is tailored for a comprehensive scene understanding of the ObjectNav agent, requiring little computational resources." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6674" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.212 + ], + "angle": 0, + "content": "3D Scene Representation. At time step \\(t\\), we represent the 3D scene \\(\\mathcal{M}_{3D}\\) as the point clouds, denoted as \\(P^{(t)} = \\{(P_{l}^{(t)}, P_{s}^{(t)}, P_{c}^{(t)})\\} \\in \\mathbb{R}^{N^{(t)} \\times (M + 4)}\\), where \\(N^{(t)}\\) is the point number. For each point \\(i\\), the \\(M + 4\\) channels include the point position \\(P_{i,l}^{(t)} \\in \\mathbb{R}^3\\), point semantics \\(P_{i,s}^{(t)} \\in \\mathbb{R}^M\\) and the point-wise spatial semantic consistency information \\(P_{i,c}^{(t)} \\in \\mathbb{R}^1\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.213, + 0.47, + 0.326 + ], + "angle": 0, + "content": "Online 3D Point Fusion Given a new captured posed RGB image \\( I_{c}^{(t)} \\) and depth image \\( I_{d}^{(t)} \\) at time step \\( t \\), the agent can obtain the point position \\( P_{l}^{(t)} \\) by back-projecting all the depth images into the 3D world space via their corresponding poses. These points will be organized by a point-based construction algorithm [53]. Here, we briefly revisit this strategy." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.328, + 0.473, + 0.63 + ], + "angle": 0, + "content": "The construction algorithm dynamically allocates occupied 3D blocks \\(\\{\\mathcal{B}_k\\}\\) along with their index \\(k\\) maintained by a tree-based method [20]. Each block \\(\\mathcal{B}_k\\) is defined by the boundary of constant length (10cm) along the X, Y and Z axes, e.g., \\([X_{min}(B_k), X_{max}(B_k)]\\). And the points \\(p_{l,x} \\in [X_{min}(B_k), X_{max}(B_k)]\\) (the same requirement holds for Y and Z axes) be recorded by the block \\(\\mathcal{B}_k\\). Given any 3D point \\(p_i\\), the algorithm can achieve efficient neighborhood retrieval with the corresponding block index \\(k\\). Furthermore, a one-level octree \\(\\mathcal{O}_i\\) for each point \\(p_i\\) is constructed to obtain the fine-grained spatial information among points. Specifically, we connect each point with its nearest points in the eight quadrants of the Cartesian coordinate system (See Figure 3). Powered by this point-based construction strategy, give any point, we can efficiently querying this point with its neighbor points by blocks retrieval and octree. This algorithm for organizing 3D points can run at 15 FPS while requiring reasonable memory resources (about \\(\\sim 500\\) MB for one entire scene). We provide more detailed description in the supplemental material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.632, + 0.47, + 0.786 + ], + "angle": 0, + "content": "Online Semantic Fusion. With an efficient reconstruction algorithm in hand, we can directly fuse temporal information, e.g., multi-view semantic predictions, to achieve more accurate and consistent scene understanding. Specifically, any point \\( p_i \\) which has been captured by a sequence of RGBD frames \\( \\{I_c^{(t)}, I_d^{(t)}\\} \\) could have multiple semantic predictions \\( \\{p_{i,s}^{(t)}(I_c^{(t)})\\} \\). We thus propose to online aggregate the multi-view 2D semantic predictions using a max-fusion mechanism to obtain the final 3D semantic prediction:" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.786, + 0.47, + 0.807 + ], + "angle": 0, + "content": "\\[\np _ {i, s} ^ {(t)} = \\mathcal {N} (\\max (\\{p _ {i, s} ^ {(t)} (I _ {c} ^ {(t)})) \\})), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.811, + 0.471, + 0.902 + ], + "angle": 0, + "content": "where the max is performed on each semantic category, followed by a normalization \\(\\mathcal{N}\\) to linearly scale the probability distribution. Note that, the alternatives to fuse semantic predictions do exist, e.g. 3D convolution [19, 24], Bayesian updating [28]. However, directly conducting 3D convolution into such a floor-level 3D representation would in-" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.097, + 0.67, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.532, + 0.23, + 0.64, + 0.243 + ], + "angle": 0, + "content": "Active Navigation" + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.1, + 0.882, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.705, + 0.23, + 0.871, + 0.244 + ], + "angle": 0, + "content": "Online Organized 3D points" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.26, + 0.895, + 0.331 + ], + "angle": 0, + "content": "Figure 3. Illustration of online 3D point fusion. (Left) A robot takes multi-view observations during navigation. (Right) The points \\( p \\) are organized by dynamically allocated blocks \\( \\mathcal{B} \\) and perpoint octrees \\( \\mathcal{O} \\), which can be used to query neighborhood points of any given point." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.34, + 0.892, + 0.446 + ], + "angle": 0, + "content": "evitably lead to a huge rise of computational cost, especially in the context of learning-based policy. We find that maximizing the 2D semantic prediction can already achieve impressive improvement on semantic accuracy (see Figure 8), with higher memory efficiency and time efficiency. Similar findings have also been reported and exploited in relevant works [7, 16]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.449, + 0.893, + 0.562 + ], + "angle": 0, + "content": "Spatial Semantic Consistency. Based on the fact that semantic label should remain consistent for all the points in a single object, we propose to calculate the spatial semantic consistency information \\( P_{c}^{(t)} \\) as part of the navigation-driven 3D scene representation. To be specific, \\( P_{i,c}^{(t)} \\) is computed as the maximum semantic KL-divergence between point \\( P_{i}^{(t)} \\) and its octree \\( \\mathcal{O}(P_i^{(t)}) \\):" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.569, + 0.892, + 0.592 + ], + "angle": 0, + "content": "\\[\nP _ {i, c} ^ {(t)} = \\max \\left(\\left\\{K L \\left(P _ {i, s} ^ {(t)}, P _ {j, s} ^ {(t)}\\right) \\mid \\forall P _ {j} ^ {(t)} \\in \\mathcal {O} \\left(P _ {i} ^ {(t)}\\right) \\right\\}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.597, + 0.895, + 0.744 + ], + "angle": 0, + "content": "where \\(KL\\) denotes the KL-divergence computation, which is a statistical distance that measures the semantic probability distribution between \\(P_{i,s}^{(t)}\\) and \\(P_{j,s}^{(t)}\\). Note for point \\(P_{i}^{(t)}\\), if we count all its spatially close points as the neighbourhood \\(\\mathcal{N}(P_i^{(t)})\\), it could be time consuming to calculate Equation 2, and the spatially close points do not help relieve the issue of outlier points as mentioned above. Therefore, we use the pre-built octree \\(\\mathcal{O}_i\\) to retrieve 8 nearest point in the quadrants of the Cartesian coordinate system." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.754, + 0.878, + 0.77 + ], + "angle": 0, + "content": "3.3. Simultaneous Exploration and Identification" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.903 + ], + "angle": 0, + "content": "With the aggregated 3D information, we expect to empower a 3D-aware agent for the ObjectNav task. However, despite the efficient 3D scene representation, the agent still suffers from the complex and high-dimensional observations, leading to a lower sample efficiency in RL and hampering the navigation policy learning. Therefore, we leverage two complementary sub-policies: corner-guided exploration policy and category-aware identification policy. Each" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6675" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.084, + 0.089, + 0.207, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.117, + 0.187, + 0.171, + 0.199 + ], + "angle": 0, + "content": "SemExp" + }, + { + "type": "image", + "bbox": [ + 0.211, + 0.091, + 0.335, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.241, + 0.187, + 0.302, + 0.198 + ], + "angle": 0, + "content": "Stubborn" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.091, + 0.465, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.387, + 0.187, + 0.419, + 0.196 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_footnote", + "bbox": [ + 0.113, + 0.207, + 0.432, + 0.217 + ], + "angle": 0, + "content": ": Goal location : Candidate corner goals to be chosen in order" + }, + { + "type": "image_footnote", + "bbox": [ + 0.114, + 0.219, + 0.429, + 0.23 + ], + "angle": 0, + "content": ": Candidate corner goals to be predicted based on learning (Ours)" + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.207, + 0.432, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.242, + 0.47, + 0.284 + ], + "angle": 0, + "content": "Figure 4. Illustration of exploration policy. (Left) Learning-based continuous global goal [9]; (Middle) Heuristic direction selection [26]; (Right, ours) Learning-based corner goal prediction." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.292, + 0.469, + 0.352 + ], + "angle": 0, + "content": "policy learns to predict low-dimensional discrete actions and outputs a goal location to navigate the agent, resulting in a strong performance while requiring less training time. We will detail the two policies below." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.355, + 0.47, + 0.653 + ], + "angle": 0, + "content": "Observation Space. At each time step \\( t \\), both policies take fine-grained 3D observation \\( x_{3D}^{(t)} = \\{P^{(t)} \\in ((4 + m) \\times N)\\} \\) based on 3D scene representation \\( \\mathcal{M}_{3D} \\). Here, the \\( N \\) indicates the point number (we sample 4096 points) and the \\( m + 4 \\) channels are comprised of point position \\( p_l^{(t)} \\in \\mathbb{R}^3 \\), fused semantic predictions \\( p_s^{(t)} \\in \\mathbb{R}^m \\) and spatial semantic consistency \\( p_c^{(t)} \\in \\mathbb{R}^1 \\). Following existing works [8, 9], we use an additional egocentric 2D map \\( \\mathcal{M}_{2D} \\) for exploration policy and the local path planning module, which is directly obtained by a project-to-ground operation. More detailedly, for 2D observation \\( x_{2D}^{(t)} \\in ((2 + m) \\times M \\times M) \\) from 2D map \\( \\mathcal{M}_{2D} \\), the first two channels represent obstacles and explored area, and the rest of the channels each corresponds to an object category. Here, \\( \\mathcal{M}_{2D} \\) (in a resolution of \\( M = 240 \\) with \\( 20\\mathrm{cm} \\) grids) is constructed to give a large perception view of the scene, while 3D points perform as a fine-grained observation of objects. In addition to the scene representations, we also pass the goal object category index \\( o_{ID} \\) as the side input to both policies." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.656, + 0.469, + 0.852 + ], + "angle": 0, + "content": "Corner-Guided Exploration Policy. The exploration policy attempts to guide the agent to explore and perceive the surrounding environment where it could access any instance of the target object category. We observe that existing learning-based exploration policies predict goal locations over the 2D map in continuous or large-dimensional discrete action space (Figure 4 Left), suffering from low sample efficiency. Therefore, we define a corner-guided exploration policy \\( g_{e} = \\pi_{e}(x_{3D}, x_{2D}, o_{ID}; \\theta_{e}) \\) that predicts a corner goal \\( g_{e} \\) to drive the agent (Figure 4 Right). Here, the \\( \\theta_{e} \\) indicates the parameters of the policy, and \\( g_{e} \\) is one of the four pre-defined corner goals {Top Left, Top Right, Bottom Left, Bottom Right} of the 2D map." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "Compared to predicting goals in a continuous or high-dimensional action space, learning to predict the four corner goals significantly reduces the learning difficulty. More" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.089, + 0.621, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.555, + 0.182, + 0.571, + 0.193 + ], + "angle": 0, + "content": "(A)" + }, + { + "type": "image", + "bbox": [ + 0.621, + 0.09, + 0.756, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.693, + 0.182, + 0.708, + 0.192 + ], + "angle": 0, + "content": "(B)" + }, + { + "type": "image", + "bbox": [ + 0.756, + 0.09, + 0.889, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.826, + 0.182, + 0.841, + 0.192 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.203, + 0.892, + 0.259 + ], + "angle": 0, + "content": "Figure 5. Illustration of identification policy. From \\(\\mathrm{A} \\rightarrow \\mathrm{B}\\), fused points are filtered by the category-aware predicted threshold \\(\\tau\\). From \\(\\mathrm{B} \\rightarrow \\mathrm{C}\\), the policy further checks the spatial label consistency of the points and identifies the target goal." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.271, + 0.892, + 0.391 + ], + "angle": 0, + "content": "over, as noted by previous studies [4, 26], the corner-goal-based exploration strategy exhibits the capacity to achieve efficient exploration through avoiding back-and-forth pacing. Superior to using other heuristic corner goal exploration strategies (Figure 4 Middle), our agent can learn from the 3D scene priors to behave more intelligently. Demonstrations of our corner-guided exploration can be found in the attached video." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.395, + 0.893, + 0.561 + ], + "angle": 0, + "content": "Category-Aware Identification Policy. During navigation, the agent consistently makes semantic predictions to identify an instance of target object category. Most works [9, 14] simply use a preset hard confidence threshold for target identification. However, this strategy is inherently suboptimal due to the considerable variability in semantic prediction results across different categories and observation angles. As a result, a preset threshold would be unable to adequately adapt to the ever-changing nature of these scenarios. Also, it ignores the consistency of the semantic prediction in 3D space." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.564, + 0.892, + 0.67 + ], + "angle": 0, + "content": "To tackle this issues, we propose to leverage both dynamic confidence threshold and spatial semantic label consistency for target identification. We define a policy \\( s = \\pi_f(x_{3D}, o_{ID}; \\theta_f) \\) which takes the 3D observation \\( x_{3D} \\) and target category index \\( o_{ID} \\) and outputs a threshold-indicating action \\( s \\in \\{0, 1, \\dots, 9\\} \\). And the dynamic threshold \\( \\tau \\) can be obtained by:" + }, + { + "type": "equation", + "bbox": [ + 0.612, + 0.679, + 0.892, + 0.71 + ], + "angle": 0, + "content": "\\[\n\\tau = \\tau_ {l o w} + s \\cdot \\frac {1 - \\tau_ {l o w}}{1 0}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.717, + 0.892, + 0.807 + ], + "angle": 0, + "content": "where the \\(\\tau_{low}\\) is set to 0.5 in our implementation for a threshold range \\(\\tau \\in [0.5,0.95]\\). The \\(\\tau\\) will be used to dynamically identify the points belonging to the target object (Figure 5 Middle). It is worth mentioning that this policy also utilizes a low-dimensional discrete action space, which is fairly easy for the agent to learn." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.811, + 0.892, + 0.9 + ], + "angle": 0, + "content": "To obtain the final target goal \\( g_{f} \\), our method further checks the spatial semantic label consistency. Specifically, we use the points \\( \\{p_i | (p_i, p) \\in \\mathcal{O}_p\\} \\) connected by the perpoint octree \\( \\mathcal{O}_p \\) to approximately represent the 3D surface of the target object. Our insight is that the points along the target's surface should have consistent semantic" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "6676" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.47, + 0.168 + ], + "angle": 0, + "content": "labels. Therefore, we only identify those points who have at least 2-ring neighbors across the octrees \\(\\{p_i|(p_i,p_j)\\in \\mathcal{O}_{p_j}|(p_j,p)\\in \\mathcal{O}_p\\}\\) as the target object goal \\(g_{f}\\) (Figure 5 Right). See Figure 5 for visualized illustration and more details can be found in supplemental material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.17, + 0.47, + 0.291 + ], + "angle": 0, + "content": "Local Planning Module. The goals \\( g_{e} \\) and \\( g_{f} \\) from two polices will be consistently updated during navigation. Our method will preferentially utilize the target goal \\( g_{f} \\) if it exists, otherwise take the long-term corner goal \\( g_{e} \\) to explore. To navigate to the given location, we use the Fast Marching Method [42] to analytically plan the shortest path from the agent location. The agent then takes deterministic actions to follow this path." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.294, + 0.471, + 0.446 + ], + "angle": 0, + "content": "Rewards. For the exploration policy, we share a similar reward design as [1, 51]. The agent receives a sparse success reward \\( r_{success} = 2.5 \\), a slack reward \\( r_{slack} = 10^{-2} \\) and an exploration reward \\( r_{explore} \\). The exploration reward is a dense reward, defined by the number of new inserted point \\( n_p^{new} \\) as \\( r_{explore} = n_p^{new} \\times 10^{-3} \\). The slack reward and exploration reward encourage the agent to take the most effective direction to the unobserved area. And for the identification policy, we combine the same success reward and slack reward borrowed from the exploration policy." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.467, + 0.21, + 0.484 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.494, + 0.258, + 0.511 + ], + "angle": 0, + "content": "4.1. Experiment Setup." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.52, + 0.47, + 0.763 + ], + "angle": 0, + "content": "We perform experiments on the Matterport3D (MP3D) [6] and Gibson [48] datasets with the Habitat simulator [39]. Both Gibson and MP3D contain photorealistic 3D reconstructions of real-world environments. For Gibson, we use 25 train / 5 val scenes from the Gibson tiny split. And we follow the same setting as in [9, 35] where we consider 6 goal categories, including chair, couch, potted plant, bed, toilet and TV. For MP3D, we use the standard split of 61 train / 11 val scenes with Habitat ObjectNav dataset [38], which consists of 21 goal categories (the full list can be found in the supplemental material). Note that, the RGB-D and pose readings are noise-free from simulation (follow the definition of [1]). Estimation of the pose from noisy sensor readings is out of the scope of this work and can be addressed if necessary, by incorporating off-the-shelf robust odometry [52, 54]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.765, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Implementation Details. On MP3D, we use the same pretrained 2D semantic model RedNet [21] as [35,51]. On Gibson, we leverage a Mask R-CNN [18], which is trained with COCO dataset [23]. For each frame, we randomly sample 512 points for point-based construction. Moreover, we use PointNet [33] and fully convolutional networks [25] to obtain the feature of 3D points and the 2D map, respectively. During training, we sample actions every 25 steps and use the Proximal Policy Optimization (PPO) [41] for both ex" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.089, + 0.894, + 0.159 + ], + "angle": 0, + "content": "Table 1. ObjectNav validation results on Gibson and MP3D. Our method is trained with 5 seeds and report the averaged performance. The best of all methods and the best of all modular-based methods are highlighted in **bold** and **underline** colors, respectively. Note that Habitat-Web takes use of extra data." + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.17, + 0.898, + 0.322 + ], + "angle": 0, + "content": "
MethodGibson (val)Matterport3D (val)
SPL(%)↑ Succ.(%)↑ DTS(m)↓SPL(%)↑ Succ.(%)↑ DTS(m)↓
DD-PPO [47]10.715.03.241.88.06.90
Red-Rabbit [51]---7.934.6-
THAD [27]---11.128.45.58
Habitat-Web [36]---10.235.4-
FBE [37]28.364.31.787.222.76.70
ANS [8]34.967.11.669.227.35.80
L2M* [14]---11.032.15.12
SemExp* [9]39.671.71.3910.928.36.06
Stubborn* [26]---13.531.25.01
PONI [35]41.073.61.2512.131.85.10
Ours42.174.51.1614.634.04.74
" + }, + { + "type": "table_caption", + "bbox": [ + 0.52, + 0.326, + 0.871, + 0.34 + ], + "angle": 0, + "content": "Table 2. ObjectNav validation results on MP3D-L2M [14]." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.351, + 0.898, + 0.431 + ], + "angle": 0, + "content": "
MP3D-L2M
MethodSPL(%) ↑SoftSPL(%) ↑Succ.(%) ↑DTS(m)↓
SemExp [9]16.5-28.14.848
L2M [14]14.820.034.83.669
Ours21.230.540.23.278
" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.445, + 0.892, + 0.474 + ], + "angle": 0, + "content": "ploration and identification policies. More implementation details can be found in the supplemental material." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.478, + 0.892, + 0.614 + ], + "angle": 0, + "content": "Evaluation Metrics. Following existing works [2, 14, 35], we adopt the following evaluation metrics: 1) SPL: success weighted by path length. It measures the efficiency of the agent over oracle path length, which serves as the primary evaluation metric for Habitat Challenge [49]. 2) Success rate: the percentage of successful episodes 3) Soft SPL: a softer version of SPL measure the progress towards the goal (even with 0 success). 4) DTS: geodesic distance (in m) to the success at the end of the episode." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.617, + 0.892, + 0.813 + ], + "angle": 0, + "content": "Baselines. We consider mainstream baselines in the ObjectNav task. For end-to-end RL methods, we cover DD-PPO [47], Red-Rabiit [51], THDA [27], and Habiat-Web [36]. For modular based methods, we cover FBE [37], ANS [8], L2M [14], SemExp [9], Stubborn [26] and PONI [35]. Note that, some works use additional data to improve the performance, e.g. Habitat-web leverages human demonstration trajectories, and THDA utilizes data augmentation. It is challenging to compare all the methods fairly. Therefore, we are particularly interested in the three most relevant baselines: SemExp, Stubborn, and PONI. These three methods share the same 2D semantic predictors [18, 21] as our method." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.829, + 0.593, + 0.844 + ], + "angle": 0, + "content": "4.2. Results" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Comparison on MP3D and Gibson. We evaluate our approach on MP3D (val) and Gibson (val) with other baselines, including end-to-end RL(rows 1 - 4) and modular" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6677" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.086, + 0.852, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.389, + 0.895, + 0.434 + ], + "angle": 0, + "content": "Figure 6. An qualitative visualization of the trajectory of the proposed method. We visualize an episode from MP3D where an agent is expected to find a bed. The semantic prediction \\( p_{s} \\) and spatial semantic consistency \\( p_{c} \\) of points are visualized on the left. During navigation, the agent can successfully dismiss the wrong prediction and approach and finally call stop around the target object." + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.443, + 0.471, + 0.486 + ], + "angle": 0, + "content": "Table 3. Comparison of different exploration policies. Here, all methods share the same identification strategy from [9] for fair comparison." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.495, + 0.458, + 0.596 + ], + "angle": 0, + "content": "
MethodSPL(%)Succ.(%)DTS(m)
Learn Continuous Goal.11.128.66.354
Learn dense Grid Goal.12.729.55.635
Learn 8 corner goal.12.930.75.112
Heuristic. 4 corner goal.13.533.04.995
Learn 4 corner goal. (Ours)13.933.54.931
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.611, + 0.47, + 0.882 + ], + "angle": 0, + "content": "based methods (rows 5 - 10). Note that, SemExp and Stubborn did not report the results on MP3D validation, while L2M uses a self-made dataset MP3D-L2M based on MP3D and tests fewer categories than what we do. We therefore faithfully provide the results, denoted with *, by evaluating with their public available code. The results are demonstrated in Table 1. On both datasets, our method achieves the state-of-the-art ObjetNav efficiency (SPL) among all methods (2.6% higher on Gibson dataset and 8.1% higher on MP3D). For the success rate, our method achieves the best results among all modular-based methods, showing comparable performance with additional annotation methods THAD [27] and Habitat-web [36]. Especially, compared with the modular-based methods, SemExp, Stubborn, and PONI, which share the same 2D semantic predictor [21] as ours, the results fairly demonstrate the superiority of our framework on both efficiency and success rate. We also provide the results validated on MP3D-L2M in Table 2." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.885, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We also provide a qualitative visualization of MP3D" + }, + { + "type": "table_caption", + "bbox": [ + 0.528, + 0.443, + 0.863, + 0.459 + ], + "angle": 0, + "content": "Table 4. Comparison on different identification policies." + }, + { + "type": "table", + "bbox": [ + 0.509, + 0.468, + 0.886, + 0.553 + ], + "angle": 0, + "content": "
MethodTypeSPL(%)Succ.(%)DTS(m)
Repr.Thre.
Deterministic2D0.8512.830.15.151
3D0.8513.832.54.987
Learning (Ours)3D-14.634.04.749
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.578, + 0.892, + 0.638 + ], + "angle": 0, + "content": "episodes in Figure 6. Here, our method online updates the semantic prediction and successfully dismisses the wrong target goal. For more qualitative results, please refer to the supplemental material." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.641, + 0.893, + 0.867 + ], + "angle": 0, + "content": "Comparison on Exploration Policy. We conduct an experiment to verify the efficiency of our corner-guided exploration policy on MP3D. To remove the effect of the 2D semantic predictor and identification policy, all competitors share the same semantic predictor and a heuristic identification policy proposed in SemExp [9]. The results are reported in Table 3. Our corner-guided exploration policy outperforms the mainstream existing methods, including learning-based ones [8, 14] and heuristic ones [26]. Our findings indicate that the best performance is achieved through learning to predict discrete corner goals from the four corners of the scene. This suggests that the four-corner design, which benefits from a small, discrete action space, is already capable of efficiently guiding the agent in exploring the environment." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.903 + ], + "angle": 0, + "content": "Comparison on Identification Policy. Another critical challenge in OjectNav is how to properly identify an in-" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6678" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.089, + 0.46, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.263, + 0.471, + 0.306 + ], + "angle": 0, + "content": "Figure 7. An comparison of predicted threshold distribution between different categories by our category-aware policy. We report the ratio of the each predicted threshold." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.318, + 0.47, + 0.514 + ], + "angle": 0, + "content": "stance of target object category. Therefore, We evaluate our identification policy on MP3D along with other identifying strategies, including a 2D frame-based policy adopted in [9] and 3D point-based methods proposed by our approach. The results are shown in Table 4. We observe a performance improvement (rows 1 - 2) by simply leveraging 3D point-based construction and fusion algorithm. It can demonstrate that the multi-view observations provide more accurate semantic prediction, which effectively reduces false positive prediction (see examples in Figure 8). Moreover, our category-aware identification policy, through predicting dynamic threshold, demonstrates an even better performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.518, + 0.47, + 0.715 + ], + "angle": 0, + "content": "To further investigate the effect of our identification policy, We conduct a break down study in Figure 7 by plotting the distribution of predicted semantic confidence thresholds. Specifically, we plot the distribution of three different categories (table, cushion, plant). For a relatively easy-to-recognize category, such as table with \\(52.6\\%\\) success rate (SR), our policy predict a broad threshold distribution. However, for more challenging categories, such as cushion \\((36.9\\%)\\) SR) and plant \\((16.1\\%)\\) SR), the policy tends to be more conservative through setting a higher threshold. The results demonstrate the category-aware characteristic of our identification policy which adapts well to different difficulty levels across categories." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.717, + 0.47, + 0.853 + ], + "angle": 0, + "content": "Ablation Study. We also perform an ablation study to verify the effectiveness of different components of our method. The results are demonstrated in Table 5. The cooperation of the 2D top-down map and 3D points (row 4) shows significant improvement by incorporating extensive scene perception (in 2D) and fine-grained object perception (in 3D). Moreover, rows (3-4) and (4-5) proved the effectiveness of leveraging consistency information and the identification policy, respectively." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Analysis of Computational Cost. Our framework is extremely memory efficient, which requires about 0.5GB for one scene, and can perform online construction and" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.077, + 0.894, + 0.133 + ], + "angle": 0, + "content": "Table 5. Ablation study of main components in our method. The pos. indicates the semantic predictions \\( p_{s} \\), KL indicates the spatial semantic consistency \\( p_{c} \\) and the I. policy indicates the usage of the proposed identification policy." + }, + { + "type": "table", + "bbox": [ + 0.509, + 0.143, + 0.862, + 0.253 + ], + "angle": 0, + "content": "
2D map3D points Pos. KLI. PolicySPL(%)Succ.(%)DTS(m)
11.229.66.213
13.032.35.769
13.733.85.620
13.933.54.931
14.634.0
" + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.267, + 0.862, + 0.485 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.498, + 0.882, + 0.512 + ], + "angle": 0, + "content": "Figure 8. Visualization of the results of online 3D point fusion." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.525, + 0.892, + 0.631 + ], + "angle": 0, + "content": "semantic fusion at a frame rate of 15 FPS. Moreover, our method requires only 48 GPU hours to train a 3D-aware agent on MP3D dataset to achieve the SOTA performance among all modular-based methods. This is significantly faster (30x) than other existing reinforcement learning based methods [9, 51], and is comparable to supervised learning modular-based methods [35]" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.658, + 0.619, + 0.673 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.687, + 0.892, + 0.822 + ], + "angle": 0, + "content": "In this work, we present a 3D-aware framework for object goal navigation. Our method is based on a 3D point-based construction algorithm to observe the 3D scenes and simultaneously perform exploration and identification policies to navigate the agent. Our method achieve SOTA performance among all modular-based methods, while requiring less training time. In the future, we would like to exploit this 3D-aware framework in other embodied AI tasks, e.g. mobile manipulation, robotic nurses." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Acknowledgements. We thank anonymous reviewers for their valuable suggestions. This work was supported by National Key Research and Development Program of China (2018AAA0102200), NSFC (62132021), and Beijing Academy of Artificial Intelligence (BAAI)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6679" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.118, + 0.47, + 0.187 + ], + "angle": 0, + "content": "[1] Dhruv Batra, Aaron Gokaslan, Aniruddha Kembhavi, Oleksandr Maksymets, Roozbeh Mottaghi, Manolis Savva, Alexander Toshev, and Erik Wijmans. ObjectNav Revisited: On Evaluation of Embodied Agents Navigating to Objects. In arXiv:2006.13171, 2020. 2, 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.189, + 0.472, + 0.258 + ], + "angle": 0, + "content": "[2] Dhruv Batra, Aaron Gokaslan, Aniruddha Kembhavi, Oleksandr Maksymets, Roozbeh Mottaghi, Manolis Savva, Alexander Toshev, and Erik Wijmans. Objectnav revisited: On evaluation of embodied agents navigating to objects. ArXiv, abs/2006.13171, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.261, + 0.472, + 0.316 + ], + "angle": 0, + "content": "[3] Tommaso Campari, Paolo Eccher, Luciano Serafini, and Lamberto Ballan. Exploiting scene-specific features for object goal navigation. In European Conference on Computer Vision, pages 406-421. Springer, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.318, + 0.472, + 0.373 + ], + "angle": 0, + "content": "[4] Chao Cao, Hongbiao Zhu, Howie Choset, and Ji Zhang. Tare: A hierarchical framework for efficiently exploring complex 3d environments. In Robotics: Science and Systems, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.375, + 0.472, + 0.43 + ], + "angle": 0, + "content": "[5] Hanwen Cao, Hao-Shu Fang, Wenhai Liu, and Cewu Lu. Suctionnet-1billion: A large-scale benchmark for suction grasping. IEEE Robotics and Automation Letters, 6(4):8718-8725, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.432, + 0.472, + 0.5 + ], + "angle": 0, + "content": "[6] Angel Chang, Angela Dai, Thomas Funkhouser, Maciej Halber, Matthias Niessner, Manolis Savva, Shuran Song, Andy Zeng, and Yinda Zhang. Matterport3d: Learning from rgb-d data in indoor environments. arXiv preprint arXiv:1709.06158, 2017. 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.503, + 0.472, + 0.572 + ], + "angle": 0, + "content": "[7] Devendra Singh Chaplot, Murtaza Dalal, Saurabh Gupta, Jitendra Malik, and Russ R Salakhutdinov. Seal: Self-supervised embodied active learning using exploration and 3d consistency. Advances in Neural Information Processing Systems, 34:13086-13098, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.574, + 0.472, + 0.628 + ], + "angle": 0, + "content": "[8] Devendra Singh Chaplot, Dhiraj Gandhi, Saurabh Gupta, Abhinav Gupta, and Ruslan Salakhutdinov. Learning to explore using active neural slam. arXiv preprint arXiv:2004.05155, 2020. 1, 2, 5, 6, 7, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.631, + 0.472, + 0.699 + ], + "angle": 0, + "content": "[9] Devendra Singh Chaplot, Dhiraj Prakashchand Gandhi, Abhinav Gupta, and Russ R Salakhutdinov. Object goal navigation using goal-oriented semantic exploration. Advances in Neural Information Processing Systems, 33:4247-4258, 2020. 1, 2, 5, 6, 7, 8, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.702, + 0.472, + 0.757 + ], + "angle": 0, + "content": "[10] Changhyun Choi, Wilko Schwarting, Joseph DelPreto, and Daniela Rus. Learning object grasping for soft robot hands. IEEE Robotics and Automation Letters, 3(3):2370-2377, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.759, + 0.472, + 0.814 + ], + "angle": 0, + "content": "[11] Sungjoon Choi, Qian-Yi Zhou, and Vladlen Koltun. Robust reconstruction of indoor scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5556-5565, 2015. 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.817, + 0.472, + 0.857 + ], + "angle": 0, + "content": "[12] Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multiview prediction for 3d semantic scene segmentation. In ECCV, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.859, + 0.472, + 0.902 + ], + "angle": 0, + "content": "[13] Samir Yitzhak Gadre, Kiana Ehsani, and Shuran Song. Act the part: Learning interaction strategies for articulated object part discovery. ICCV, 2021. 3" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.118, + 0.472, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.161 + ], + "angle": 0, + "content": "[14] Georgios Georgakis, Bernadette Bucher, Karl Schmeckpeper, Siddharth Singh, and Kostas Daniilidis. Learning to map for active semantic goal navigation. In International Conference on Learning Representations (ICLR), 2022. 1, 2, 5, 6, 7, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.894, + 0.204 + ], + "angle": 0, + "content": "[15] Georgios Georgakis, Yimeng Li, and Jana Kosecka. Simultaneous mapping and target driven navigation. ArXiv, abs/1911.07980, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.207, + 0.894, + 0.275 + ], + "angle": 0, + "content": "[16] Margarita Grinvald, Fadri Furrer, Tonci Novkovic, Jen Jen Chung, Cesar Cadena, Roland Siegwart, and Juan Nieto. Volumetric instance-aware semantic mapping and 3d object discovery. IEEE Robotics and Automation Letters, 4(3):3037-3044, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.278, + 0.894, + 0.332 + ], + "angle": 0, + "content": "[17] Saurabh Gupta, Varun Tolani, James Davidson, Sergey Levine, Rahul Sukthankar, and Jitendra Malik. Cognitive mapping and planning for visual navigation. International Journal of Computer Vision, 128:1311-1330, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.334, + 0.894, + 0.375 + ], + "angle": 0, + "content": "[18] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross B. Girshick. Mask r-cnn. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42:386-397, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.377, + 0.894, + 0.432 + ], + "angle": 0, + "content": "[19] Shi-Sheng Huang, Ze-Yu Ma, Tai-Jiang Mu, Hongbo Fu, and Shi-Min Hu. Supervoxel convolution for online 3d semantic segmentation. ACM Transactions on Graphics (TOG), 40(3):1-15, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.434, + 0.894, + 0.501 + ], + "angle": 0, + "content": "[20] Hosagrahar V Jagadish, Beng Chin Ooi, Kian-Lee Tan, Cui Yu, and Rui Zhang. idistance: An adaptive b+-tree based indexing method for nearest neighbor search. ACM Transactions on Database Systems (TODS), 30(2):364-397, 2005. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.894, + 0.559 + ], + "angle": 0, + "content": "[21] Jindong Jiang, Lunan Zheng, Fei Luo, and Zhijun Zhang. Rednet: Residual encoder-decoder network for indoorrgb-d semantic segmentation. arXiv preprint arXiv:1806.01054, 2018. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.562, + 0.894, + 0.602 + ], + "angle": 0, + "content": "[22] Cheng Lin, Tingxiang Fan, Wenping Wang, and Matthias Nießner. Modeling 3d shapes by reinforcement learning. In ECCV, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.894, + 0.659 + ], + "angle": 0, + "content": "[23] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C. Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.661, + 0.894, + 0.73 + ], + "angle": 0, + "content": "[24] Leyao Liu, Tian Zheng, Yun-Jou Lin, Kai Ni, and Lu Fang. Ins-conv: Incremental sparse convolution for online 3d segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18975–18984, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.733, + 0.894, + 0.788 + ], + "angle": 0, + "content": "[25] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.789, + 0.894, + 0.842 + ], + "angle": 0, + "content": "[26] Haokuan Luo, Albert Yue, Zhang-Wei Hong, and Pulkit Agrawal. Stubborn: A strong baseline for indoor object navigation. arXiv preprint arXiv:2203.07359, 2022. 2, 5, 6, 7, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.846, + 0.894, + 0.902 + ], + "angle": 0, + "content": "[27] Oleksandr Maksymets, Vincent Cartillier, Aaron Gokaslan, Erik Wijmans, Wojciech Galuba, Stefan Lee, and Dhruv Batra. Thda: Treasure hunt data augmentation for semantic navigation. In Proceedings of the IEEE/CVF International" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "6680" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.112, + 0.092, + 0.468, + 0.12 + ], + "angle": 0, + "content": "Conference on Computer Vision, pages 15374-15383, 2021. 1, 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.191 + ], + "angle": 0, + "content": "[28] John McCormac, Ankur Handa, Andrew Davison, and Stefan Leutenegger. Semantic fusion: Dense 3d semantic mapping with convolutional neural networks. In 2017 IEEE International Conference on Robotics and automation (ICRA), pages 4628-4635. IEEE, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.193, + 0.469, + 0.262 + ], + "angle": 0, + "content": "[29] Arsalan Mousavian, Alexander Toshev, Marek Fiser, Jana Košecka, Ayzaan Wahid, and James Davidson. Visual representations for semantic target driven navigation. In 2019 International Conference on Robotics and Automation (ICRA), pages 8846-8852. IEEE, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.264, + 0.469, + 0.346 + ], + "angle": 0, + "content": "[30] Tongzhou Mu, Zhan Ling, Fanbo Xiang, Derek Cathera Yang, Xuanlin Li, Stone Tao, Zhiao Huang, Zhiwei Jia, and Hao Su. Maniskill: Generalizable manipulation skill benchmark with large-scale demonstrations. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.349, + 0.469, + 0.403 + ], + "angle": 0, + "content": "[31] Alexey Nekrasov, Jonas Schult, Or Litany, B. Leibe, and Francis Engelmann. Mix3d: Out-of-context data augmentation for 3d scenes. 2021 International Conference on 3D Vision (3DV), pages 116-125, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.406, + 0.468, + 0.446 + ], + "angle": 0, + "content": "[32] Emilio Parisotto and Ruslan Salakhutdinov. Neural map: Structured memory for deep reinforcement learning. ArXiv, abs/1702.08360, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.449, + 0.469, + 0.504 + ], + "angle": 0, + "content": "[33] C. Qi, Hao Su, Kaichun Mo, and Leonidas J. Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 77-85, 2017. 6, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.506, + 0.469, + 0.546 + ], + "angle": 0, + "content": "[34] Yiding Qiu, Anwesan Pal, and Henrik I Christensen. Learning hierarchical relationships for object-goal navigation. arXiv preprint arXiv:2003.06749, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.549, + 0.469, + 0.631 + ], + "angle": 0, + "content": "[35] Santhosh Kumar Ramakrishnan, Devendra Singh Chaplot, Ziad Al-Halah, Jitendra Malik, and Kristen Grauman. Poni: Potential functions for objectgoal navigation with interaction-free learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18890-18900, 2022. 1, 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.633, + 0.469, + 0.702 + ], + "angle": 0, + "content": "[36] Ram Ramrakhya, Eric Undersander, Dhruv Batra, and Abhishek Das. Habitat-web: Learning embodied object-search strategies from human demonstrations at scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5173-5183, 2022. 1, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.704, + 0.469, + 0.772 + ], + "angle": 0, + "content": "[37] IEEE Robotics. Proceedings 1997, IEEE international symposium on computational intelligence in robotics and automation cira'97 - towards new computational principles for robotics and automation, july 10-11, 1997, monterey, california, usa. In CIRA, 1997. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.775, + 0.469, + 0.857 + ], + "angle": 0, + "content": "[38] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, et al. Habitat: A platform for embodied ai research. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9339-9347, 2019. 1, 3, 6, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.859, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[39] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, Devi Parikh, and Dhruv" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "Batra. Habitat: A Platform for Embodied AI Research. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.894, + 0.189 + ], + "angle": 0, + "content": "[40] Thomas Schops, Torsten Sattler, and Marc Pollefeys. Bad slam: Bundle adjusted direct rgb-d slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 134-144, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.191, + 0.892, + 0.231 + ], + "angle": 0, + "content": "[41] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. ArXiv, abs/1707.06347, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.233, + 0.892, + 0.258 + ], + "angle": 0, + "content": "[42] James A. Sethian. Fast marching methods. SIAM Rev., 41:199-235, 1999. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.261, + 0.892, + 0.315 + ], + "angle": 0, + "content": "[43] Nur Muhammad Mahi Shafiullah, Chris Paxton, Lerrel Pinto, Soumith Chintala, and Arthur Szlam. Clip-fields: Weakly supervised semantic fields for robotic memory. arXiv preprint arXiv:2210.05663, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.317, + 0.892, + 0.37 + ], + "angle": 0, + "content": "[44] Hao Shen, Weikang Wan, and He Wang. Learning category-level generalizable object manipulation policy via generative adversarial self-imitation learning from demonstrations. arXiv preprint arXiv:2203.02107, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.373, + 0.892, + 0.425 + ], + "angle": 0, + "content": "[45] Thang Vu, Kookhoi Kim, Tung Minh Luu, Xuan Thanh Nguyen, and Chang-Dong Yoo. Softgroup for 3d instance segmentation on point clouds. ArXiv, abs/2203.01509, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.428, + 0.892, + 0.481 + ], + "angle": 0, + "content": "[46] Thomas Whelan, Stefan Leutenegger, Renato Salas-Moreno, Ben Glocker, and Andrew Davison. Elasticfusion: Dense slam without a pose graph. Robotics: Science and Systems, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.484, + 0.892, + 0.55 + ], + "angle": 0, + "content": "[47] Erik Wijmans, Abhishek Kadian, Ari Morcos, Stefan Lee, Irfan Essa, Devi Parikh, Manolis Savva, and Dhruv Batra. Dd-ppo: Learning near-perfect pointgoal navigators from 2.5 billion frames. arXiv preprint arXiv:1911.00357, 2019. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.553, + 0.892, + 0.621 + ], + "angle": 0, + "content": "[48] Fei Xia, Amir R Zamir, Zhiyang He, Alexander Sax, Jitendra Malik, and Silvio Savarese. Gibson env: Real-world perception for embodied agents. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9068-9079, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.623, + 0.892, + 0.718 + ], + "angle": 0, + "content": "[49] Karmesh Yadav, Santhosh Kumar Ramakrishnan, John Turner, Aaron Gokaslan, Oleksandr Maksymets, Rishabh Jain, Ram Ramrakhya, Angel X Chang, Alexander Clegg, Manolis Savva, Eric Undersander, Devendra Singh Chaplot, and Dhruv Batra. Habitat challenge 2022. https://aihabitat.org/challenge/2022/, 2022. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.721, + 0.892, + 0.761 + ], + "angle": 0, + "content": "[50] Wei Yang, Xiaolong Wang, Ali Farhadi, Abhinav Gupta, and Roozbeh Mottaghi. Visual semantic navigation using scene priors. arXiv preprint arXiv:1810.06543, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.763, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[51] Joel Ye, Dhruv Batra, Abhishek Das, and Erik Wijmans. Auxiliary tasks and exploration enable objectgoal navigation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16117-16126, 2021. 1, 2, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.872 + ], + "angle": 0, + "content": "[52] Jiazhao Zhang, Yijie Tang, He Wang, and Kai Xu. Asro-dio: Active subspace random optimization based depth inertial odometry. IEEE Transactions on Robotics, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.874, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[53] Jiazhao Zhang, Chenyang Zhu, Lintao Zheng, and Kai Xu. Fusion-aware point convolution for online semantic 3d scene" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.894, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.957 + ], + "angle": 0, + "content": "6681" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4534-4543, 2020. 2, 3, 4, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.47, + 0.205 + ], + "angle": 0, + "content": "[54] Xiaoming Zhao, Harsh Agrawal, Dhruv Batra, and Alexander G. Schwing. The surprising effectiveness of visual odometry techniques for embodied pointgoal navigation. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 16107-16116, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.207, + 0.471, + 0.261 + ], + "angle": 0, + "content": "[55] Lintao Zheng, Chenyang Zhu, Jiazhao Zhang, Hang Zhao, Hui Huang, Matthias Nießner, and Kai Xu. Active scene understanding via online semantic reconstruction. Computer Graphics Forum, 38, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.263, + 0.471, + 0.331 + ], + "angle": 0, + "content": "[56] Fengda Zhu, Xiwen Liang, Yi Zhu, Qizhi Yu, Xiaojun Chang, and Xiaodan Liang. Soon: Scenario oriented object navigation with graph-based exploration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12689-12699, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.333, + 0.47, + 0.402 + ], + "angle": 0, + "content": "[57] Yuke Zhu, Roozbeh Mottaghi, Eric Kolve, Joseph J. Lim, Abhinav Kumar Gupta, Li Fei-Fei, and Ali Farhadi. Target-driven visual navigation in indoor scenes using deep reinforcement learning. 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 3357–3364, 2017. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.402 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "6682" + } + ] +] \ No newline at end of file diff --git a/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/e3176243-c1cd-415f-8bca-116983524509_origin.pdf b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/e3176243-c1cd-415f-8bca-116983524509_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e6c2c4e33a99b96f26760977c34f05f4c9673ebb --- /dev/null +++ b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/e3176243-c1cd-415f-8bca-116983524509_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:773ad6e052c6d9cfbc86aeb63e0b1f644fcadbf4b905ddfd58e1b57a60007569 +size 2703334 diff --git a/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/full.md b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/full.md new file mode 100644 index 0000000000000000000000000000000000000000..66e7bb30b72116e1bde18e7f4e3e391a76c05987 --- /dev/null +++ b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/full.md @@ -0,0 +1,316 @@ +# 3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification + +Jiazhao Zhang $^{1,2,*}$ + +Liu Dai $^{3*}$ + +Fanpeng Meng + +Qingnan Fan + +Xuelin Chen + +Kai Xu $^{6}$ He Wang $^{1\dagger}$ + +$^{1}$ CFCS, Peking University $^{2}$ Beijing Academy of Artificial Intelligence $^{3}$ CEIE, Tongji University + +$^{4}$ Huazhong University of Science and Technology $^{5}$ Tencent AI Lab $^{6}$ National University of Defense Technology + +# Abstract + +Object goal navigation (ObjectNav) in unseen environments is a fundamental task for Embodied AI. Agents in existing works learn ObjectNav policies based on 2D maps, scene graphs, or image sequences. Considering this task happens in 3D space, a 3D-aware agent can advance its ObjectNav capability via learning from fine-grained spatial information. However, leveraging 3D scene representation can be prohibitively unpractical for policy learning in this floor-level task, due to low sample efficiency and expensive computational cost. In this work, we propose a framework for the challenging 3D-aware ObjectNav based on two straightforward sub-policies. The two sub-policies, namely corner-guided exploration policy and category-aware identification policy, simultaneously perform by utilizing online fused 3D points as observation. Through extensive experiments, we show that this framework can dramatically improve the performance in ObjectNav through learning from 3D scene representation. Our framework achieves the best performance among all modular-based methods on the Matterport3D and Gibson datasets, while requiring (up to $30x$ ) less computational cost for training. The code will be released to benefit the community. $^1$ + +# 1. Introduction + +As a vital task for intelligent embodied agents, object goal navigation (ObjectNav) [38, 49] requires an agent to find an object of a particular category in an unseen and unmapped scene. Existing works tackle this task through end-to-end reinforcement learning (RL) [27, 36, 47, 51] or modular-based methods [9, 14, 35]. End-to-end RL based methods take as input the image sequences and directly output low-level navigation actions, achieving competitive + +![](images/05486a55480f98a35c0a2b1a5519d3fbfcf2490e19d38baddd71cb59cafcb114.jpg) + +![](images/9c2e9dcd4bdeb316ff8e076f527473c4ae7ecbac964cf44db528c1f2d89ef3f8.jpg) + +![](images/8b67f6478be83602b9dc002185485f7ef7b29fac70a6bbcf1b3a95b9957cc908.jpg) + +![](images/84404b5677ea3732bb2936c13c479691cd6a6cdaaecbb7694e599d3d86305842.jpg) +c: cushion +s: sofa +t: chair + +![](images/805f0f28dc0b2ef5d21e412f70ee134fa5290359334ee6205cd3e74f8c18027c.jpg) + +![](images/34688127bd9d4325f162524b96a33470eb72c31e5de3b2c39e4973b953280e1b.jpg) +Looking for a chair + +![](images/cba1cc57cf8037850b9b9e04a0237d740bc2ace04c8990eaf145cf36cfe9ee1a.jpg) +Figure 1. We present a 3D-aware ObjectNav framework along with simultaneous exploration and identification policies: $\mathbf{A} \rightarrow \mathbf{B}$ , the agent was guided by an exploration policy to look for its target; $\mathbf{B} \rightarrow \mathbf{C}$ , the agent consistently identified a target object and finally called STOP. + +performance while suffering from lower sample efficiency and poor generalizability across datasets [3, 27]. Therefore, we favor modular-based methods, which usually contain the following modules: a semantic scene mapping module that aggregates the RGBD observations and the outputs from semantic segmentation networks to form a semantic scene map; an RL-based goal policy module that takes as input the semantic scene map and learns to online update a goal location; finally, a local path planning module that drives the agent to that goal. Under this design, the semantic accuracy and geometric structure of the scene map are crucial to the success of object goal navigation. + +We observe that the existing modular-based methods mainly construct 2D maps [8, 9], scene graphs [34, 56] or neural fields [43] as their scene maps. Given that objects lie in 3D space, these scene maps are inevitably deficient in leveraging 3D spatial information of the environment comprehensively and thus have been a bottleneck for further improving object goal navigation. In contrast, forming a 3D scene representation naturally offers more accurate, spatially dense and consistent semantic predictions than its 2D counterpart, as proved by [12, 31, 45]. Hence, if the agent could take advantage of the 3D scene understanding and + +form a 3D semantic scene map, it is expected to advance the performance of ObjectNav. + +However, leveraging 3D scene representation would bring great challenges to ObjectNav policy learning. First, building and querying fine-grained 3D representation across a floor-level scene requires extensive computational cost, which can significantly slow down the training of RL [7,55]. Also, 3D scene representation induces considerably more complex and high-dimensional observations to the goal policy than its 2D counterpart, leading to a lower sample efficiency and hampering the navigation policy learning [22, 57]. As a result, it is demanding to design a framework to efficiently and effectively leverage powerful 3D information for ObjectNav. + +To tackle these challenges, we propose a novel framework composed of an online semantic point fusion module for 3D semantic scene mapping and two parallel policy networks in charge of scene exploration and object identification, along with a local path planning module. Our online semantic point fusion module extends a highly efficient online point construction algorithm [53] to enable online semantic fusion and spatial semantic consistency computation from captured RGBD sequences. This 3D scene construction empowers a comprehensive 3D scene understanding for ObjectNav. Moreover, compared to dense voxel-based methods [7, 55], our point-based fusion algorithm are more memory-efficient [40, 46] which makes it practically usable for floor-level navigation task. (See Figure 1) + +Moreover, to ease the learning of navigation policy, we further propose to factorize the navigation policy into two sub-policies, namely exploration and identification. The two policies simultaneously perform to roll out an exploration goal and an identified object goal (if exist), respectively. Then the input for the local path planning module will switch between these two goals, depending on whether there exists an identified target object. More specifically, we propose a corner-guided exploration policy which learns to predict a long-term discrete goal at one of the four corners of the bounding box of the scene. These corner goals efficiently drive the agent to perceive the surroundings and explore regions where the target object is possibly settled. And for identification, a category-aware identification policy is proposed to dynamically learn a discrete confidence threshold to identify the semantic predictions for each category. Both of these policies are trained by RL in low-dimensional discrete action space. Through experiments, the simultaneous two-policy mechanism and discrete action space design dramatically reduce the difficulty in learning for 3D-aware ObjectNav and achieve better performance than existing modular-based navigation strategies [26, 35]. + +Through extensive evaluation on the public benchmarks, we demonstrate that our method performs online 3D-aware + +ObjectNav at 15 FPS while achieving the state-of-the-art performance on navigation efficiency. Moreover, our method outperforms all other modular-based methods in both efficiency and success rate with up to $30\mathrm{x}$ times less computational cost. + +Our main contributions include: + +- We present the first 3D-aware framework for Object-Nav task. +- We build an online point-based construction and fusion algorithm for efficient and comprehensive understanding of floor-level 3D scene representation. +- We propose a simultaneous two-policy mechanism which mitigates the problem of low sample efficiency in 3D-aware ObjectNav policy learning. + +# 2. Related Work + +GoalNav with Visual Sequences. There are constantly emerging researches on object goal navigation. One line of recent works directly leverages RGBD sequences, called end-to-end RL methods [47], which tends to implicitly encode the environment and predict low-level actions. These works benefit from visual representation [29, 50], auxiliary task [51], and data augmentation [27], demonstrating strong results on object goal navigation benchmarks [1, 49]. However, aiming to learn all skills through one policy from scratch, e.g., avoiding collisions, exploration, and stopping, it's well known that end-to-end RL methods suffer from low sampling efficiency for training and limited generalizability when transferred to the real world [3, 35]. Instead, our work uses explicit map to represent the environment, which ensures our sample efficiency and also obtain more generalizability through a modular-based paradigm [1, 35]. + +GoalNav with Explicit Scene Representations. To ease the burden of learning directly from visual sequences, another category of methods, called modular-based methods [8,9,15,17,32], use explicit representations as a proxy for robot observations. By leveraging explicit scene representations like scene graph [34, 56] or 2D top-down map [14,35], modular-based methods benefit from the modularity and shorter time horizons. They are considered to be more sample efficient and generalizable [14, 35]. Recent progress in modular-based methods has proposed a frontier-based exploration strategy [35], a hallucinate-driven semantic mapping method [14], and novel verification stage [26]. In contrast with prior map-based works, our method utilizes 3D spatial knowledge, including 3D point semantic prediction and consistency, enabling a more comprehensive understanding of the environment. + +Embodied AI tasks with 3D Scene Representation. There are considerable research leveraging 3D scene repre + +![](images/dea7bdcc4332163e8adff0f00b879c135946d2551d13dca93986c44d9af806c4.jpg) +Figure 2. An overview of our framework. We take in a posed RGB-D image at time step $t$ and perform point-based construction algorithm to online fuse a 3D scene representation $(\mathcal{M}_{3D}^{(t)})$ , along with a $\mathcal{M}_{2D}^{(t)}$ from semantics projection. Then, we simultaneously leverage two policies, including a corner-guided exploration policy $\pi_e$ and category-aware identification policy $\pi_f$ , to predict a discrete corner goal $g_e^{(t)}$ and a target goal $g_f^{(t)}$ (if exist) respectively. Finally, the local planning module will drive the agent to the given target goal $g_f^{(t)}$ (top priority) or the corner goal $g_e^{(t)}$ . + +sensation on certain embodied AI tasks, e.g., object grasping [5, 10], drawer opening [30, 44]. These works leverage various routes, including reinforcement learning [13], imitation learning [44], and supervised learning [5] with 3D scene representation, such as mesh, dense grids. However, most of these 3D-aware embodied AI tasks only perform in a limited space [10, 30, 44], e.g., near one table or drawer. Under large scale environments, such as floor-level scenes in ObjectNav, the existing methods would suffer from complex 3D observation and large computational costs. In this work, we propose a framework through leveraging a point-based construction module and two dedicatedly designed exploration and identification policies, to enable a 3D-aware agnet for ObjectNav. + +# 3. Method + +# 3.1. Task Definition and Method Overview + +Object Goal Navigation Task. In an unknown environment, the Object Goal Navigation task requires the agent to navigate to an instance of the specified target category. For fair comparison, we follow the previous problem setting [38, 49]. As initialization, the agent is located randomly without access to a pre-built environment map, and provided with a target category ID. At each time step $t$ , the agent receives noiseless onboard sensor readings, including an egocentric RGB-D image and a 3-DoF pose (2D position and 1D orientation) relative to the starting of the episode. Then the agent estimates its action $a_{t} \in \mathcal{A}$ for movement in a discrete action space, consisting of move_forward, turn_left, turn_right and stop. Given a limited time budget of 500 steps, the agent terminates the movement until it is within 1 meter of an object of the specified category. + +Method Overview. Figure 2 provides an overview of the proposed 3D-aware ObjectNav method. Our method takes RGBD frames along with pose sensor readings as input, to online construct a point-based scene representation $\mathcal{M}_{3D}$ (Sec. 3.2), which is further projected to construct a 2D semantic map $\mathcal{M}_{2D}$ . Given the structured 3D points $\mathcal{M}_{3D}$ and 2D map $\mathcal{M}_{2D}$ , our framework simultaneously performs two complementary policies (Sec. 3.3), the exploration policy and identification policy at a fixed time cycle of 25 steps. The exploration policy predicts a long-term discrete corner goal $g_{e}$ , to drive the agent to explore the surrounding environment. Meanwhile, the identification policy evaluates the 3D points $\mathcal{M}_{3D}$ at each step and outputs a target object goal $g_{f}$ if its semantic prediction is confident and consistent. The $g_{f}$ will be set as the approaching target for the agent once it exists, otherwise the agent will navigate to the long-term corner goal $g_{e}$ . An underlying local planning module will navigate the agent towards the goal using analytical path planning. + +# 3.2. Navigation-Driven 3D Scene Construction + +During navigation, the 3D-aware agent will constantly obtain new observations and incrementally build a fine-grained 3D scene representation, integrating spatial and semantic information to drive the agent. However, given that our agent is deployed for a floor-level GoalNav task, it is fairly challenging to construct and leverage 3D representation across the entire scene while keeping an acceptable computational cost. Accordingly in this section, we extend an online point-based construction algorithm [53] to online organize the 3D points and further empower semantic fusion and consistency estimation. This design is tailored for a comprehensive scene understanding of the ObjectNav agent, requiring little computational resources. + +3D Scene Representation. At time step $t$ , we represent the 3D scene $\mathcal{M}_{3D}$ as the point clouds, denoted as $P^{(t)} = \{(P_{l}^{(t)}, P_{s}^{(t)}, P_{c}^{(t)})\} \in \mathbb{R}^{N^{(t)} \times (M + 4)}$ , where $N^{(t)}$ is the point number. For each point $i$ , the $M + 4$ channels include the point position $P_{i,l}^{(t)} \in \mathbb{R}^3$ , point semantics $P_{i,s}^{(t)} \in \mathbb{R}^M$ and the point-wise spatial semantic consistency information $P_{i,c}^{(t)} \in \mathbb{R}^1$ . + +Online 3D Point Fusion Given a new captured posed RGB image $I_{c}^{(t)}$ and depth image $I_{d}^{(t)}$ at time step $t$ , the agent can obtain the point position $P_{l}^{(t)}$ by back-projecting all the depth images into the 3D world space via their corresponding poses. These points will be organized by a point-based construction algorithm [53]. Here, we briefly revisit this strategy. + +The construction algorithm dynamically allocates occupied 3D blocks $\{\mathcal{B}_k\}$ along with their index $k$ maintained by a tree-based method [20]. Each block $\mathcal{B}_k$ is defined by the boundary of constant length (10cm) along the X, Y and Z axes, e.g., $[X_{min}(B_k), X_{max}(B_k)]$ . And the points $p_{l,x} \in [X_{min}(B_k), X_{max}(B_k)]$ (the same requirement holds for Y and Z axes) be recorded by the block $\mathcal{B}_k$ . Given any 3D point $p_i$ , the algorithm can achieve efficient neighborhood retrieval with the corresponding block index $k$ . Furthermore, a one-level octree $\mathcal{O}_i$ for each point $p_i$ is constructed to obtain the fine-grained spatial information among points. Specifically, we connect each point with its nearest points in the eight quadrants of the Cartesian coordinate system (See Figure 3). Powered by this point-based construction strategy, give any point, we can efficiently querying this point with its neighbor points by blocks retrieval and octree. This algorithm for organizing 3D points can run at 15 FPS while requiring reasonable memory resources (about $\sim 500$ MB for one entire scene). We provide more detailed description in the supplemental material. + +Online Semantic Fusion. With an efficient reconstruction algorithm in hand, we can directly fuse temporal information, e.g., multi-view semantic predictions, to achieve more accurate and consistent scene understanding. Specifically, any point $p_i$ which has been captured by a sequence of RGBD frames $\{I_c^{(t)}, I_d^{(t)}\}$ could have multiple semantic predictions $\{p_{i,s}^{(t)}(I_c^{(t)})\}$ . We thus propose to online aggregate the multi-view 2D semantic predictions using a max-fusion mechanism to obtain the final 3D semantic prediction: + +$$ +p _ {i, s} ^ {(t)} = \mathcal {N} (\max (\{p _ {i, s} ^ {(t)} (I _ {c} ^ {(t)})) \})), \tag {1} +$$ + +where the max is performed on each semantic category, followed by a normalization $\mathcal{N}$ to linearly scale the probability distribution. Note that, the alternatives to fuse semantic predictions do exist, e.g. 3D convolution [19, 24], Bayesian updating [28]. However, directly conducting 3D convolution into such a floor-level 3D representation would in- + +![](images/4dd993af23292078a7bce4b6271b787202a70db028a106be49d5965ab55ae618.jpg) +Active Navigation + +![](images/bf02ca1f0460dfe37dee844f9bfe3371d969607e17b802ed1edfd208fa4e82b4.jpg) +Online Organized 3D points +Figure 3. Illustration of online 3D point fusion. (Left) A robot takes multi-view observations during navigation. (Right) The points $p$ are organized by dynamically allocated blocks $\mathcal{B}$ and perpoint octrees $\mathcal{O}$ , which can be used to query neighborhood points of any given point. + +evitably lead to a huge rise of computational cost, especially in the context of learning-based policy. We find that maximizing the 2D semantic prediction can already achieve impressive improvement on semantic accuracy (see Figure 8), with higher memory efficiency and time efficiency. Similar findings have also been reported and exploited in relevant works [7, 16]. + +Spatial Semantic Consistency. Based on the fact that semantic label should remain consistent for all the points in a single object, we propose to calculate the spatial semantic consistency information $P_{c}^{(t)}$ as part of the navigation-driven 3D scene representation. To be specific, $P_{i,c}^{(t)}$ is computed as the maximum semantic KL-divergence between point $P_{i}^{(t)}$ and its octree $\mathcal{O}(P_i^{(t)})$ : + +$$ +P _ {i, c} ^ {(t)} = \max \left(\left\{K L \left(P _ {i, s} ^ {(t)}, P _ {j, s} ^ {(t)}\right) \mid \forall P _ {j} ^ {(t)} \in \mathcal {O} \left(P _ {i} ^ {(t)}\right) \right\}\right), \tag {2} +$$ + +where $KL$ denotes the KL-divergence computation, which is a statistical distance that measures the semantic probability distribution between $P_{i,s}^{(t)}$ and $P_{j,s}^{(t)}$ . Note for point $P_{i}^{(t)}$ , if we count all its spatially close points as the neighbourhood $\mathcal{N}(P_i^{(t)})$ , it could be time consuming to calculate Equation 2, and the spatially close points do not help relieve the issue of outlier points as mentioned above. Therefore, we use the pre-built octree $\mathcal{O}_i$ to retrieve 8 nearest point in the quadrants of the Cartesian coordinate system. + +# 3.3. Simultaneous Exploration and Identification + +With the aggregated 3D information, we expect to empower a 3D-aware agent for the ObjectNav task. However, despite the efficient 3D scene representation, the agent still suffers from the complex and high-dimensional observations, leading to a lower sample efficiency in RL and hampering the navigation policy learning. Therefore, we leverage two complementary sub-policies: corner-guided exploration policy and category-aware identification policy. Each + +SemExp +Figure 4. Illustration of exploration policy. (Left) Learning-based continuous global goal [9]; (Middle) Heuristic direction selection [26]; (Right, ours) Learning-based corner goal prediction. +![](images/0cbe640eabeedfa32b989c7e85b39f6829120654d087b08d75cbd6f2e406b8be.jpg) +: Goal location : Candidate corner goals to be chosen in order + +Stubborn +![](images/96a956ec95b60458f383cf3aaa15ed77b6ef496b3b4edf5cc28300e7e54ab015.jpg) +: Candidate corner goals to be predicted based on learning (Ours) + +![](images/b37b22dbdb379eff69a08afc35a576b4948cb1675fbd4bf6fbbbb4e91ad9cd31.jpg) +Ours + +policy learns to predict low-dimensional discrete actions and outputs a goal location to navigate the agent, resulting in a strong performance while requiring less training time. We will detail the two policies below. + +Observation Space. At each time step $t$ , both policies take fine-grained 3D observation $x_{3D}^{(t)} = \{P^{(t)} \in ((4 + m) \times N)\}$ based on 3D scene representation $\mathcal{M}_{3D}$ . Here, the $N$ indicates the point number (we sample 4096 points) and the $m + 4$ channels are comprised of point position $p_l^{(t)} \in \mathbb{R}^3$ , fused semantic predictions $p_s^{(t)} \in \mathbb{R}^m$ and spatial semantic consistency $p_c^{(t)} \in \mathbb{R}^1$ . Following existing works [8, 9], we use an additional egocentric 2D map $\mathcal{M}_{2D}$ for exploration policy and the local path planning module, which is directly obtained by a project-to-ground operation. More detailedly, for 2D observation $x_{2D}^{(t)} \in ((2 + m) \times M \times M)$ from 2D map $\mathcal{M}_{2D}$ , the first two channels represent obstacles and explored area, and the rest of the channels each corresponds to an object category. Here, $\mathcal{M}_{2D}$ (in a resolution of $M = 240$ with $20\mathrm{cm}$ grids) is constructed to give a large perception view of the scene, while 3D points perform as a fine-grained observation of objects. In addition to the scene representations, we also pass the goal object category index $o_{ID}$ as the side input to both policies. + +Corner-Guided Exploration Policy. The exploration policy attempts to guide the agent to explore and perceive the surrounding environment where it could access any instance of the target object category. We observe that existing learning-based exploration policies predict goal locations over the 2D map in continuous or large-dimensional discrete action space (Figure 4 Left), suffering from low sample efficiency. Therefore, we define a corner-guided exploration policy $g_{e} = \pi_{e}(x_{3D}, x_{2D}, o_{ID}; \theta_{e})$ that predicts a corner goal $g_{e}$ to drive the agent (Figure 4 Right). Here, the $\theta_{e}$ indicates the parameters of the policy, and $g_{e}$ is one of the four pre-defined corner goals {Top Left, Top Right, Bottom Left, Bottom Right} of the 2D map. + +Compared to predicting goals in a continuous or high-dimensional action space, learning to predict the four corner goals significantly reduces the learning difficulty. More + +![](images/8d59a1ce8c8b33399ddb1177464b126e5b36c9cafcde4a1457f6d17ebdfa405a.jpg) +(A) +Figure 5. Illustration of identification policy. From $\mathrm{A} \rightarrow \mathrm{B}$ , fused points are filtered by the category-aware predicted threshold $\tau$ . From $\mathrm{B} \rightarrow \mathrm{C}$ , the policy further checks the spatial label consistency of the points and identifies the target goal. + +![](images/3ce7fe35ee9e5c7a98f90bba72dcd394208b78781821d48a5b857660c17fa076.jpg) +(B) + +![](images/dfc8fd98c0ff490f699f7c6635eb637f8d33b5581d4630b8830c9e330e75ae9c.jpg) +(c) + +over, as noted by previous studies [4, 26], the corner-goal-based exploration strategy exhibits the capacity to achieve efficient exploration through avoiding back-and-forth pacing. Superior to using other heuristic corner goal exploration strategies (Figure 4 Middle), our agent can learn from the 3D scene priors to behave more intelligently. Demonstrations of our corner-guided exploration can be found in the attached video. + +Category-Aware Identification Policy. During navigation, the agent consistently makes semantic predictions to identify an instance of target object category. Most works [9, 14] simply use a preset hard confidence threshold for target identification. However, this strategy is inherently suboptimal due to the considerable variability in semantic prediction results across different categories and observation angles. As a result, a preset threshold would be unable to adequately adapt to the ever-changing nature of these scenarios. Also, it ignores the consistency of the semantic prediction in 3D space. + +To tackle this issues, we propose to leverage both dynamic confidence threshold and spatial semantic label consistency for target identification. We define a policy $s = \pi_f(x_{3D}, o_{ID}; \theta_f)$ which takes the 3D observation $x_{3D}$ and target category index $o_{ID}$ and outputs a threshold-indicating action $s \in \{0, 1, \dots, 9\}$ . And the dynamic threshold $\tau$ can be obtained by: + +$$ +\tau = \tau_ {l o w} + s \cdot \frac {1 - \tau_ {l o w}}{1 0}, \tag {3} +$$ + +where the $\tau_{low}$ is set to 0.5 in our implementation for a threshold range $\tau \in [0.5,0.95]$ . The $\tau$ will be used to dynamically identify the points belonging to the target object (Figure 5 Middle). It is worth mentioning that this policy also utilizes a low-dimensional discrete action space, which is fairly easy for the agent to learn. + +To obtain the final target goal $g_{f}$ , our method further checks the spatial semantic label consistency. Specifically, we use the points $\{p_i | (p_i, p) \in \mathcal{O}_p\}$ connected by the perpoint octree $\mathcal{O}_p$ to approximately represent the 3D surface of the target object. Our insight is that the points along the target's surface should have consistent semantic + +labels. Therefore, we only identify those points who have at least 2-ring neighbors across the octrees $\{p_i|(p_i,p_j)\in \mathcal{O}_{p_j}|(p_j,p)\in \mathcal{O}_p\}$ as the target object goal $g_{f}$ (Figure 5 Right). See Figure 5 for visualized illustration and more details can be found in supplemental material. + +Local Planning Module. The goals $g_{e}$ and $g_{f}$ from two polices will be consistently updated during navigation. Our method will preferentially utilize the target goal $g_{f}$ if it exists, otherwise take the long-term corner goal $g_{e}$ to explore. To navigate to the given location, we use the Fast Marching Method [42] to analytically plan the shortest path from the agent location. The agent then takes deterministic actions to follow this path. + +Rewards. For the exploration policy, we share a similar reward design as [1, 51]. The agent receives a sparse success reward $r_{success} = 2.5$ , a slack reward $r_{slack} = 10^{-2}$ and an exploration reward $r_{explore}$ . The exploration reward is a dense reward, defined by the number of new inserted point $n_p^{new}$ as $r_{explore} = n_p^{new} \times 10^{-3}$ . The slack reward and exploration reward encourage the agent to take the most effective direction to the unobserved area. And for the identification policy, we combine the same success reward and slack reward borrowed from the exploration policy. + +# 4. Experiments + +# 4.1. Experiment Setup. + +We perform experiments on the Matterport3D (MP3D) [6] and Gibson [48] datasets with the Habitat simulator [39]. Both Gibson and MP3D contain photorealistic 3D reconstructions of real-world environments. For Gibson, we use 25 train / 5 val scenes from the Gibson tiny split. And we follow the same setting as in [9, 35] where we consider 6 goal categories, including chair, couch, potted plant, bed, toilet and TV. For MP3D, we use the standard split of 61 train / 11 val scenes with Habitat ObjectNav dataset [38], which consists of 21 goal categories (the full list can be found in the supplemental material). Note that, the RGB-D and pose readings are noise-free from simulation (follow the definition of [1]). Estimation of the pose from noisy sensor readings is out of the scope of this work and can be addressed if necessary, by incorporating off-the-shelf robust odometry [52, 54]. + +Implementation Details. On MP3D, we use the same pretrained 2D semantic model RedNet [21] as [35,51]. On Gibson, we leverage a Mask R-CNN [18], which is trained with COCO dataset [23]. For each frame, we randomly sample 512 points for point-based construction. Moreover, we use PointNet [33] and fully convolutional networks [25] to obtain the feature of 3D points and the 2D map, respectively. During training, we sample actions every 25 steps and use the Proximal Policy Optimization (PPO) [41] for both ex + +Table 1. ObjectNav validation results on Gibson and MP3D. Our method is trained with 5 seeds and report the averaged performance. The best of all methods and the best of all modular-based methods are highlighted in **bold** and **underline** colors, respectively. Note that Habitat-Web takes use of extra data. + +
MethodGibson (val)Matterport3D (val)
SPL(%)↑ Succ.(%)↑ DTS(m)↓SPL(%)↑ Succ.(%)↑ DTS(m)↓
DD-PPO [47]10.715.03.241.88.06.90
Red-Rabbit [51]---7.934.6-
THAD [27]---11.128.45.58
Habitat-Web [36]---10.235.4-
FBE [37]28.364.31.787.222.76.70
ANS [8]34.967.11.669.227.35.80
L2M* [14]---11.032.15.12
SemExp* [9]39.671.71.3910.928.36.06
Stubborn* [26]---13.531.25.01
PONI [35]41.073.61.2512.131.85.10
Ours42.174.51.1614.634.04.74
+ +Table 2. ObjectNav validation results on MP3D-L2M [14]. + +
MP3D-L2M
MethodSPL(%) ↑SoftSPL(%) ↑Succ.(%) ↑DTS(m)↓
SemExp [9]16.5-28.14.848
L2M [14]14.820.034.83.669
Ours21.230.540.23.278
+ +ploration and identification policies. More implementation details can be found in the supplemental material. + +Evaluation Metrics. Following existing works [2, 14, 35], we adopt the following evaluation metrics: 1) SPL: success weighted by path length. It measures the efficiency of the agent over oracle path length, which serves as the primary evaluation metric for Habitat Challenge [49]. 2) Success rate: the percentage of successful episodes 3) Soft SPL: a softer version of SPL measure the progress towards the goal (even with 0 success). 4) DTS: geodesic distance (in m) to the success at the end of the episode. + +Baselines. We consider mainstream baselines in the ObjectNav task. For end-to-end RL methods, we cover DD-PPO [47], Red-Rabiit [51], THDA [27], and Habiat-Web [36]. For modular based methods, we cover FBE [37], ANS [8], L2M [14], SemExp [9], Stubborn [26] and PONI [35]. Note that, some works use additional data to improve the performance, e.g. Habitat-web leverages human demonstration trajectories, and THDA utilizes data augmentation. It is challenging to compare all the methods fairly. Therefore, we are particularly interested in the three most relevant baselines: SemExp, Stubborn, and PONI. These three methods share the same 2D semantic predictors [18, 21] as our method. + +# 4.2. Results + +Comparison on MP3D and Gibson. We evaluate our approach on MP3D (val) and Gibson (val) with other baselines, including end-to-end RL(rows 1 - 4) and modular + +![](images/bcb7f3863f2cabb911e2b285896d351ff791cfb97144ef3a5e95e7aa36afe1c4.jpg) +Figure 6. An qualitative visualization of the trajectory of the proposed method. We visualize an episode from MP3D where an agent is expected to find a bed. The semantic prediction $p_{s}$ and spatial semantic consistency $p_{c}$ of points are visualized on the left. During navigation, the agent can successfully dismiss the wrong prediction and approach and finally call stop around the target object. + +Table 3. Comparison of different exploration policies. Here, all methods share the same identification strategy from [9] for fair comparison. + +
MethodSPL(%)Succ.(%)DTS(m)
Learn Continuous Goal.11.128.66.354
Learn dense Grid Goal.12.729.55.635
Learn 8 corner goal.12.930.75.112
Heuristic. 4 corner goal.13.533.04.995
Learn 4 corner goal. (Ours)13.933.54.931
+ +based methods (rows 5 - 10). Note that, SemExp and Stubborn did not report the results on MP3D validation, while L2M uses a self-made dataset MP3D-L2M based on MP3D and tests fewer categories than what we do. We therefore faithfully provide the results, denoted with *, by evaluating with their public available code. The results are demonstrated in Table 1. On both datasets, our method achieves the state-of-the-art ObjetNav efficiency (SPL) among all methods (2.6% higher on Gibson dataset and 8.1% higher on MP3D). For the success rate, our method achieves the best results among all modular-based methods, showing comparable performance with additional annotation methods THAD [27] and Habitat-web [36]. Especially, compared with the modular-based methods, SemExp, Stubborn, and PONI, which share the same 2D semantic predictor [21] as ours, the results fairly demonstrate the superiority of our framework on both efficiency and success rate. We also provide the results validated on MP3D-L2M in Table 2. + +We also provide a qualitative visualization of MP3D + +Table 4. Comparison on different identification policies. + +
MethodTypeSPL(%)Succ.(%)DTS(m)
Repr.Thre.
Deterministic2D0.8512.830.15.151
3D0.8513.832.54.987
Learning (Ours)3D-14.634.04.749
+ +episodes in Figure 6. Here, our method online updates the semantic prediction and successfully dismisses the wrong target goal. For more qualitative results, please refer to the supplemental material. + +Comparison on Exploration Policy. We conduct an experiment to verify the efficiency of our corner-guided exploration policy on MP3D. To remove the effect of the 2D semantic predictor and identification policy, all competitors share the same semantic predictor and a heuristic identification policy proposed in SemExp [9]. The results are reported in Table 3. Our corner-guided exploration policy outperforms the mainstream existing methods, including learning-based ones [8, 14] and heuristic ones [26]. Our findings indicate that the best performance is achieved through learning to predict discrete corner goals from the four corners of the scene. This suggests that the four-corner design, which benefits from a small, discrete action space, is already capable of efficiently guiding the agent in exploring the environment. + +Comparison on Identification Policy. Another critical challenge in OjectNav is how to properly identify an in- + +![](images/2ce05c5b20cb1e350538777bac655512bd833d2c7528abbb36a22e5aeb2c4cd4.jpg) +Figure 7. An comparison of predicted threshold distribution between different categories by our category-aware policy. We report the ratio of the each predicted threshold. + +stance of target object category. Therefore, We evaluate our identification policy on MP3D along with other identifying strategies, including a 2D frame-based policy adopted in [9] and 3D point-based methods proposed by our approach. The results are shown in Table 4. We observe a performance improvement (rows 1 - 2) by simply leveraging 3D point-based construction and fusion algorithm. It can demonstrate that the multi-view observations provide more accurate semantic prediction, which effectively reduces false positive prediction (see examples in Figure 8). Moreover, our category-aware identification policy, through predicting dynamic threshold, demonstrates an even better performance. + +To further investigate the effect of our identification policy, We conduct a break down study in Figure 7 by plotting the distribution of predicted semantic confidence thresholds. Specifically, we plot the distribution of three different categories (table, cushion, plant). For a relatively easy-to-recognize category, such as table with $52.6\%$ success rate (SR), our policy predict a broad threshold distribution. However, for more challenging categories, such as cushion $(36.9\%)$ SR) and plant $(16.1\%)$ SR), the policy tends to be more conservative through setting a higher threshold. The results demonstrate the category-aware characteristic of our identification policy which adapts well to different difficulty levels across categories. + +Ablation Study. We also perform an ablation study to verify the effectiveness of different components of our method. The results are demonstrated in Table 5. The cooperation of the 2D top-down map and 3D points (row 4) shows significant improvement by incorporating extensive scene perception (in 2D) and fine-grained object perception (in 3D). Moreover, rows (3-4) and (4-5) proved the effectiveness of leveraging consistency information and the identification policy, respectively. + +Analysis of Computational Cost. Our framework is extremely memory efficient, which requires about 0.5GB for one scene, and can perform online construction and + +Table 5. Ablation study of main components in our method. The pos. indicates the semantic predictions $p_{s}$ , KL indicates the spatial semantic consistency $p_{c}$ and the I. policy indicates the usage of the proposed identification policy. + +
2D map3D points Pos. KLI. PolicySPL(%)Succ.(%)DTS(m)
11.229.66.213
13.032.35.769
13.733.85.620
13.933.54.931
14.634.0
+ +![](images/8013c6b051e07ee2ed1541e3ef95bc29eace7f1ff50d29a9bb79a210aaa71363.jpg) +Figure 8. Visualization of the results of online 3D point fusion. + +semantic fusion at a frame rate of 15 FPS. Moreover, our method requires only 48 GPU hours to train a 3D-aware agent on MP3D dataset to achieve the SOTA performance among all modular-based methods. This is significantly faster (30x) than other existing reinforcement learning based methods [9, 51], and is comparable to supervised learning modular-based methods [35] + +# 5. Conclusion + +In this work, we present a 3D-aware framework for object goal navigation. Our method is based on a 3D point-based construction algorithm to observe the 3D scenes and simultaneously perform exploration and identification policies to navigate the agent. Our method achieve SOTA performance among all modular-based methods, while requiring less training time. In the future, we would like to exploit this 3D-aware framework in other embodied AI tasks, e.g. mobile manipulation, robotic nurses. + +Acknowledgements. We thank anonymous reviewers for their valuable suggestions. This work was supported by National Key Research and Development Program of China (2018AAA0102200), NSFC (62132021), and Beijing Academy of Artificial Intelligence (BAAI). + +# References + +[1] Dhruv Batra, Aaron Gokaslan, Aniruddha Kembhavi, Oleksandr Maksymets, Roozbeh Mottaghi, Manolis Savva, Alexander Toshev, and Erik Wijmans. ObjectNav Revisited: On Evaluation of Embodied Agents Navigating to Objects. In arXiv:2006.13171, 2020. 2, 6, 13 +[2] Dhruv Batra, Aaron Gokaslan, Aniruddha Kembhavi, Oleksandr Maksymets, Roozbeh Mottaghi, Manolis Savva, Alexander Toshev, and Erik Wijmans. Objectnav revisited: On evaluation of embodied agents navigating to objects. ArXiv, abs/2006.13171, 2020. 6 +[3] Tommaso Campari, Paolo Eccher, Luciano Serafini, and Lamberto Ballan. Exploiting scene-specific features for object goal navigation. In European Conference on Computer Vision, pages 406-421. Springer, 2020. 1, 2 +[4] Chao Cao, Hongbiao Zhu, Howie Choset, and Ji Zhang. Tare: A hierarchical framework for efficiently exploring complex 3d environments. In Robotics: Science and Systems, 2021. 5 +[5] Hanwen Cao, Hao-Shu Fang, Wenhai Liu, and Cewu Lu. Suctionnet-1billion: A large-scale benchmark for suction grasping. IEEE Robotics and Automation Letters, 6(4):8718-8725, 2021. 3 +[6] Angel Chang, Angela Dai, Thomas Funkhouser, Maciej Halber, Matthias Niessner, Manolis Savva, Shuran Song, Andy Zeng, and Yinda Zhang. Matterport3d: Learning from rgb-d data in indoor environments. arXiv preprint arXiv:1709.06158, 2017. 6, 13 +[7] Devendra Singh Chaplot, Murtaza Dalal, Saurabh Gupta, Jitendra Malik, and Russ R Salakhutdinov. Seal: Self-supervised embodied active learning using exploration and 3d consistency. Advances in Neural Information Processing Systems, 34:13086-13098, 2021. 2, 4 +[8] Devendra Singh Chaplot, Dhiraj Gandhi, Saurabh Gupta, Abhinav Gupta, and Ruslan Salakhutdinov. Learning to explore using active neural slam. arXiv preprint arXiv:2004.05155, 2020. 1, 2, 5, 6, 7, 13 +[9] Devendra Singh Chaplot, Dhiraj Prakashchand Gandhi, Abhinav Gupta, and Russ R Salakhutdinov. Object goal navigation using goal-oriented semantic exploration. Advances in Neural Information Processing Systems, 33:4247-4258, 2020. 1, 2, 5, 6, 7, 8, 13 +[10] Changhyun Choi, Wilko Schwarting, Joseph DelPreto, and Daniela Rus. Learning object grasping for soft robot hands. IEEE Robotics and Automation Letters, 3(3):2370-2377, 2018. 3 +[11] Sungjoon Choi, Qian-Yi Zhou, and Vladlen Koltun. Robust reconstruction of indoor scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5556-5565, 2015. 14 +[12] Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multiview prediction for 3d semantic scene segmentation. In ECCV, 2018. 1 +[13] Samir Yitzhak Gadre, Kiana Ehsani, and Shuran Song. Act the part: Learning interaction strategies for articulated object part discovery. ICCV, 2021. 3 + +[14] Georgios Georgakis, Bernadette Bucher, Karl Schmeckpeper, Siddharth Singh, and Kostas Daniilidis. Learning to map for active semantic goal navigation. In International Conference on Learning Representations (ICLR), 2022. 1, 2, 5, 6, 7, 13 +[15] Georgios Georgakis, Yimeng Li, and Jana Kosecka. Simultaneous mapping and target driven navigation. ArXiv, abs/1911.07980, 2019. 2 +[16] Margarita Grinvald, Fadri Furrer, Tonci Novkovic, Jen Jen Chung, Cesar Cadena, Roland Siegwart, and Juan Nieto. Volumetric instance-aware semantic mapping and 3d object discovery. IEEE Robotics and Automation Letters, 4(3):3037-3044, 2019. 4 +[17] Saurabh Gupta, Varun Tolani, James Davidson, Sergey Levine, Rahul Sukthankar, and Jitendra Malik. Cognitive mapping and planning for visual navigation. International Journal of Computer Vision, 128:1311-1330, 2017. 2 +[18] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross B. Girshick. Mask r-cnn. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42:386-397, 2020. 6 +[19] Shi-Sheng Huang, Ze-Yu Ma, Tai-Jiang Mu, Hongbo Fu, and Shi-Min Hu. Supervoxel convolution for online 3d semantic segmentation. ACM Transactions on Graphics (TOG), 40(3):1-15, 2021. 4 +[20] Hosagrahar V Jagadish, Beng Chin Ooi, Kian-Lee Tan, Cui Yu, and Rui Zhang. idistance: An adaptive b+-tree based indexing method for nearest neighbor search. ACM Transactions on Database Systems (TODS), 30(2):364-397, 2005. 4 +[21] Jindong Jiang, Lunan Zheng, Fei Luo, and Zhijun Zhang. Rednet: Residual encoder-decoder network for indoorrgb-d semantic segmentation. arXiv preprint arXiv:1806.01054, 2018. 6, 7 +[22] Cheng Lin, Tingxiang Fan, Wenping Wang, and Matthias Nießner. Modeling 3d shapes by reinforcement learning. In ECCV, 2020. 2 +[23] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C. Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 6 +[24] Leyao Liu, Tian Zheng, Yun-Jou Lin, Kai Ni, and Lu Fang. Ins-conv: Incremental sparse convolution for online 3d segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18975–18984, 2022. 4 +[25] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 6, 13 +[26] Haokuan Luo, Albert Yue, Zhang-Wei Hong, and Pulkit Agrawal. Stubborn: A strong baseline for indoor object navigation. arXiv preprint arXiv:2203.07359, 2022. 2, 5, 6, 7, 13 +[27] Oleksandr Maksymets, Vincent Cartillier, Aaron Gokaslan, Erik Wijmans, Wojciech Galuba, Stefan Lee, and Dhruv Batra. Thda: Treasure hunt data augmentation for semantic navigation. In Proceedings of the IEEE/CVF International + +Conference on Computer Vision, pages 15374-15383, 2021. 1, 2, 6, 7 +[28] John McCormac, Ankur Handa, Andrew Davison, and Stefan Leutenegger. Semantic fusion: Dense 3d semantic mapping with convolutional neural networks. In 2017 IEEE International Conference on Robotics and automation (ICRA), pages 4628-4635. IEEE, 2017. 4 +[29] Arsalan Mousavian, Alexander Toshev, Marek Fiser, Jana Košecka, Ayzaan Wahid, and James Davidson. Visual representations for semantic target driven navigation. In 2019 International Conference on Robotics and Automation (ICRA), pages 8846-8852. IEEE, 2019. 2 +[30] Tongzhou Mu, Zhan Ling, Fanbo Xiang, Derek Cathera Yang, Xuanlin Li, Stone Tao, Zhiao Huang, Zhiwei Jia, and Hao Su. Maniskill: Generalizable manipulation skill benchmark with large-scale demonstrations. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021. 3 +[31] Alexey Nekrasov, Jonas Schult, Or Litany, B. Leibe, and Francis Engelmann. Mix3d: Out-of-context data augmentation for 3d scenes. 2021 International Conference on 3D Vision (3DV), pages 116-125, 2021. 1 +[32] Emilio Parisotto and Ruslan Salakhutdinov. Neural map: Structured memory for deep reinforcement learning. ArXiv, abs/1702.08360, 2018. 2 +[33] C. Qi, Hao Su, Kaichun Mo, and Leonidas J. Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 77-85, 2017. 6, 13 +[34] Yiding Qiu, Anwesan Pal, and Henrik I Christensen. Learning hierarchical relationships for object-goal navigation. arXiv preprint arXiv:2003.06749, 2020. 1, 2 +[35] Santhosh Kumar Ramakrishnan, Devendra Singh Chaplot, Ziad Al-Halah, Jitendra Malik, and Kristen Grauman. Poni: Potential functions for objectgoal navigation with interaction-free learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18890-18900, 2022. 1, 2, 6, 8 +[36] Ram Ramrakhya, Eric Undersander, Dhruv Batra, and Abhishek Das. Habitat-web: Learning embodied object-search strategies from human demonstrations at scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5173-5183, 2022. 1, 6, 7 +[37] IEEE Robotics. Proceedings 1997, IEEE international symposium on computational intelligence in robotics and automation cira'97 - towards new computational principles for robotics and automation, july 10-11, 1997, monterey, california, usa. In CIRA, 1997. 6 +[38] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, et al. Habitat: A platform for embodied ai research. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9339-9347, 2019. 1, 3, 6, 14 +[39] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, Devi Parikh, and Dhruv + +Batra. Habitat: A Platform for Embodied AI Research. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 6 +[40] Thomas Schops, Torsten Sattler, and Marc Pollefeys. Bad slam: Bundle adjusted direct rgb-d slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 134-144, 2019. 2 +[41] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. ArXiv, abs/1707.06347, 2017. 6 +[42] James A. Sethian. Fast marching methods. SIAM Rev., 41:199-235, 1999. 6 +[43] Nur Muhammad Mahi Shafiullah, Chris Paxton, Lerrel Pinto, Soumith Chintala, and Arthur Szlam. Clip-fields: Weakly supervised semantic fields for robotic memory. arXiv preprint arXiv:2210.05663, 2022. 1 +[44] Hao Shen, Weikang Wan, and He Wang. Learning category-level generalizable object manipulation policy via generative adversarial self-imitation learning from demonstrations. arXiv preprint arXiv:2203.02107, 2022. 3 +[45] Thang Vu, Kookhoi Kim, Tung Minh Luu, Xuan Thanh Nguyen, and Chang-Dong Yoo. Softgroup for 3d instance segmentation on point clouds. ArXiv, abs/2203.01509, 2022. 1 +[46] Thomas Whelan, Stefan Leutenegger, Renato Salas-Moreno, Ben Glocker, and Andrew Davison. Elasticfusion: Dense slam without a pose graph. Robotics: Science and Systems, 2015. 2 +[47] Erik Wijmans, Abhishek Kadian, Ari Morcos, Stefan Lee, Irfan Essa, Devi Parikh, Manolis Savva, and Dhruv Batra. Dd-ppo: Learning near-perfect pointgoal navigators from 2.5 billion frames. arXiv preprint arXiv:1911.00357, 2019. 1, 2, 6 +[48] Fei Xia, Amir R Zamir, Zhiyang He, Alexander Sax, Jitendra Malik, and Silvio Savarese. Gibson env: Real-world perception for embodied agents. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9068-9079, 2018. 6 +[49] Karmesh Yadav, Santhosh Kumar Ramakrishnan, John Turner, Aaron Gokaslan, Oleksandr Maksymets, Rishabh Jain, Ram Ramrakhya, Angel X Chang, Alexander Clegg, Manolis Savva, Eric Undersander, Devendra Singh Chaplot, and Dhruv Batra. Habitat challenge 2022. https://aihabitat.org/challenge/2022/, 2022. 1, 2, 3, 6 +[50] Wei Yang, Xiaolong Wang, Ali Farhadi, Abhinav Gupta, and Roozbeh Mottaghi. Visual semantic navigation using scene priors. arXiv preprint arXiv:1810.06543, 2018. 2 +[51] Joel Ye, Dhruv Batra, Abhishek Das, and Erik Wijmans. Auxiliary tasks and exploration enable objectgoal navigation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16117-16126, 2021. 1, 2, 6, 8 +[52] Jiazhao Zhang, Yijie Tang, He Wang, and Kai Xu. Asro-dio: Active subspace random optimization based depth inertial odometry. IEEE Transactions on Robotics, 2022. 6 +[53] Jiazhao Zhang, Chenyang Zhu, Lintao Zheng, and Kai Xu. Fusion-aware point convolution for online semantic 3d scene + +segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4534-4543, 2020. 2, 3, 4, 12 +[54] Xiaoming Zhao, Harsh Agrawal, Dhruv Batra, and Alexander G. Schwing. The surprising effectiveness of visual odometry techniques for embodied pointgoal navigation. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 16107-16116, 2021. 6 +[55] Lintao Zheng, Chenyang Zhu, Jiazhao Zhang, Hang Zhao, Hui Huang, Matthias Nießner, and Kai Xu. Active scene understanding via online semantic reconstruction. Computer Graphics Forum, 38, 2019. 2 +[56] Fengda Zhu, Xiwen Liang, Yi Zhu, Qizhi Yu, Xiaojun Chang, and Xiaodan Liang. Soon: Scenario oriented object navigation with graph-based exploration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12689-12699, 2021. 1, 2 +[57] Yuke Zhu, Roozbeh Mottaghi, Eric Kolve, Joseph J. Lim, Abhinav Kumar Gupta, Li Fei-Fei, and Ali Farhadi. Target-driven visual navigation in indoor scenes using deep reinforcement learning. 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 3357–3364, 2017. 2 \ No newline at end of file diff --git a/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/images.zip b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..6d54fc84fca375e1541154734df4f3b0b379120b --- /dev/null +++ b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f48b7c1609864593ecdf098a0029a5afa18c4b275ae2a4d0439e3404b2037c54 +size 548889 diff --git a/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/layout.json b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..353f187507a18424a2ed1ecde46184539c5596f0 --- /dev/null +++ b/2023/3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification/layout.json @@ -0,0 +1,9884 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 148, + 103, + 445, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 103, + 445, + 140 + ], + "spans": [ + { + "bbox": [ + 148, + 103, + 445, + 140 + ], + "type": "text", + "content": "3D-Aware Object Goal Navigation via Simultaneous Exploration and Identification" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 160, + 211, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 160, + 211, + 175 + ], + "spans": [ + { + "bbox": [ + 121, + 160, + 211, + 175 + ], + "type": "text", + "content": "Jiazhao Zhang" + }, + { + "bbox": [ + 121, + 160, + 211, + 175 + ], + "type": "inline_equation", + "content": "^{1,2,*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 236, + 161, + 283, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 161, + 283, + 175 + ], + "spans": [ + { + "bbox": [ + 236, + 161, + 283, + 175 + ], + "type": "text", + "content": "Liu Dai" + }, + { + "bbox": [ + 236, + 161, + 283, + 175 + ], + "type": "inline_equation", + "content": "^{3*}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 161, + 382, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 161, + 382, + 175 + ], + "spans": [ + { + "bbox": [ + 304, + 161, + 382, + 175 + ], + "type": "text", + "content": "Fanpeng Meng" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 408, + 161, + 476, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 408, + 161, + 476, + 175 + ], + "spans": [ + { + "bbox": [ + 408, + 161, + 476, + 175 + ], + "type": "text", + "content": "Qingnan Fan" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 206, + 175, + 273, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 175, + 273, + 188 + ], + "spans": [ + { + "bbox": [ + 206, + 175, + 273, + 188 + ], + "type": "text", + "content": "Xuelin Chen" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 299, + 175, + 420, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 175, + 420, + 190 + ], + "spans": [ + { + "bbox": [ + 299, + 175, + 420, + 190 + ], + "type": "text", + "content": "Kai Xu" + }, + { + "bbox": [ + 299, + 175, + 420, + 190 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 299, + 175, + 420, + 190 + ], + "type": "text", + "content": " He Wang" + }, + { + "bbox": [ + 299, + 175, + 420, + 190 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 93, + 190, + 506, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 190, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 93, + 190, + 506, + 203 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 93, + 190, + 506, + 203 + ], + "type": "text", + "content": " CFCS, Peking University " + }, + { + "bbox": [ + 93, + 190, + 506, + 203 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 93, + 190, + 506, + 203 + ], + "type": "text", + "content": " Beijing Academy of Artificial Intelligence " + }, + { + "bbox": [ + 93, + 190, + 506, + 203 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 93, + 190, + 506, + 203 + ], + "type": "text", + "content": "CEIE, Tongji University" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 64, + 204, + 539, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 204, + 539, + 217 + ], + "spans": [ + { + "bbox": [ + 64, + 204, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 64, + 204, + 539, + 217 + ], + "type": "text", + "content": "Huazhong University of Science and Technology " + }, + { + "bbox": [ + 64, + 204, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 64, + 204, + 539, + 217 + ], + "type": "text", + "content": "Tencent AI Lab " + }, + { + "bbox": [ + 64, + 204, + 539, + 217 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 64, + 204, + 539, + 217 + ], + "type": "text", + "content": "National University of Defense Technology" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 143, + 244, + 192, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 244, + 192, + 257 + ], + "spans": [ + { + "bbox": [ + 143, + 244, + 192, + 257 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 272, + 290, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 272, + 290, + 536 + ], + "spans": [ + { + "bbox": [ + 46, + 272, + 290, + 536 + ], + "type": "text", + "content": "Object goal navigation (ObjectNav) in unseen environments is a fundamental task for Embodied AI. Agents in existing works learn ObjectNav policies based on 2D maps, scene graphs, or image sequences. Considering this task happens in 3D space, a 3D-aware agent can advance its ObjectNav capability via learning from fine-grained spatial information. However, leveraging 3D scene representation can be prohibitively unpractical for policy learning in this floor-level task, due to low sample efficiency and expensive computational cost. In this work, we propose a framework for the challenging 3D-aware ObjectNav based on two straightforward sub-policies. The two sub-policies, namely corner-guided exploration policy and category-aware identification policy, simultaneously perform by utilizing online fused 3D points as observation. Through extensive experiments, we show that this framework can dramatically improve the performance in ObjectNav through learning from 3D scene representation. Our framework achieves the best performance among all modular-based methods on the Matterport3D and Gibson datasets, while requiring (up to " + }, + { + "bbox": [ + 46, + 272, + 290, + 536 + ], + "type": "inline_equation", + "content": "30x" + }, + { + "bbox": [ + 46, + 272, + 290, + 536 + ], + "type": "text", + "content": ") less computational cost for training. The code will be released to benefit the community." + }, + { + "bbox": [ + 46, + 272, + 290, + 536 + ], + "type": "inline_equation", + "content": "^1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 559, + 128, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 559, + 128, + 571 + ], + "spans": [ + { + "bbox": [ + 47, + 559, + 128, + 571 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 582, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 287, + 677 + ], + "type": "text", + "content": "As a vital task for intelligent embodied agents, object goal navigation (ObjectNav) [38, 49] requires an agent to find an object of a particular category in an unseen and unmapped scene. Existing works tackle this task through end-to-end reinforcement learning (RL) [27, 36, 47, 51] or modular-based methods [9, 14, 35]. End-to-end RL based methods take as input the image sequences and directly output low-level navigation actions, achieving competitive" + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 308, + 243, + 349, + 294 + ], + "blocks": [ + { + "bbox": [ + 308, + 243, + 349, + 294 + ], + "lines": [ + { + "bbox": [ + 308, + 243, + 349, + 294 + ], + "spans": [ + { + "bbox": [ + 308, + 243, + 349, + 294 + ], + "type": "image", + "image_path": "05486a55480f98a35c0a2b1a5519d3fbfcf2490e19d38baddd71cb59cafcb114.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 356, + 243, + 399, + 296 + ], + "blocks": [ + { + "bbox": [ + 356, + 243, + 399, + 296 + ], + "lines": [ + { + "bbox": [ + 356, + 243, + 399, + 296 + ], + "spans": [ + { + "bbox": [ + 356, + 243, + 399, + 296 + ], + "type": "image", + "image_path": "9c2e9dcd4bdeb316ff8e076f527473c4ae7ecbac964cf44db528c1f2d89ef3f8.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 400, + 243, + 449, + 296 + ], + "blocks": [ + { + "bbox": [ + 400, + 243, + 449, + 296 + ], + "lines": [ + { + "bbox": [ + 400, + 243, + 449, + 296 + ], + "spans": [ + { + "bbox": [ + 400, + 243, + 449, + 296 + ], + "type": "image", + "image_path": "8b67f6478be83602b9dc002185485f7ef7b29fac70a6bbcf1b3a95b9957cc908.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 452, + 243, + 503, + 295 + ], + "blocks": [ + { + "bbox": [ + 452, + 243, + 503, + 295 + ], + "lines": [ + { + "bbox": [ + 452, + 243, + 503, + 295 + ], + "spans": [ + { + "bbox": [ + 452, + 243, + 503, + 295 + ], + "type": "image", + "image_path": "84404b5677ea3732bb2936c13c479691cd6a6cdaaecbb7694e599d3d86305842.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 514, + 245, + 539, + 263 + ], + "lines": [ + { + "bbox": [ + 514, + 245, + 539, + 263 + ], + "spans": [ + { + "bbox": [ + 514, + 245, + 539, + 263 + ], + "type": "text", + "content": "c: cushion \ns: sofa \nt: chair" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 308, + 296, + 383, + 353 + ], + "blocks": [ + { + "bbox": [ + 308, + 296, + 383, + 353 + ], + "lines": [ + { + "bbox": [ + 308, + 296, + 383, + 353 + ], + "spans": [ + { + "bbox": [ + 308, + 296, + 383, + 353 + ], + "type": "image", + "image_path": "805f0f28dc0b2ef5d21e412f70ee134fa5290359334ee6205cd3e74f8c18027c.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 387, + 296, + 462, + 353 + ], + "blocks": [ + { + "bbox": [ + 387, + 296, + 462, + 353 + ], + "lines": [ + { + "bbox": [ + 387, + 296, + 462, + 353 + ], + "spans": [ + { + "bbox": [ + 387, + 296, + 462, + 353 + ], + "type": "image", + "image_path": "34688127bd9d4325f162524b96a33470eb72c31e5de3b2c39e4973b953280e1b.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 399, + 356, + 452, + 365 + ], + "lines": [ + { + "bbox": [ + 399, + 356, + 452, + 365 + ], + "spans": [ + { + "bbox": [ + 399, + 356, + 452, + 365 + ], + "type": "text", + "content": "Looking for a chair" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 465, + 296, + 541, + 353 + ], + "blocks": [ + { + "bbox": [ + 465, + 296, + 541, + 353 + ], + "lines": [ + { + "bbox": [ + 465, + 296, + 541, + 353 + ], + "spans": [ + { + "bbox": [ + 465, + 296, + 541, + 353 + ], + "type": "image", + "image_path": "cba1cc57cf8037850b9b9e04a0237d740bc2ace04c8990eaf145cf36cfe9ee1a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 374, + 547, + 430 + ], + "lines": [ + { + "bbox": [ + 304, + 374, + 547, + 430 + ], + "spans": [ + { + "bbox": [ + 304, + 374, + 547, + 430 + ], + "type": "text", + "content": "Figure 1. We present a 3D-aware ObjectNav framework along with simultaneous exploration and identification policies: " + }, + { + "bbox": [ + 304, + 374, + 547, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{A} \\rightarrow \\mathbf{B}" + }, + { + "bbox": [ + 304, + 374, + 547, + 430 + ], + "type": "text", + "content": ", the agent was guided by an exploration policy to look for its target; " + }, + { + "bbox": [ + 304, + 374, + 547, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{B} \\rightarrow \\mathbf{C}" + }, + { + "bbox": [ + 304, + 374, + 547, + 430 + ], + "type": "text", + "content": ", the agent consistently identified a target object and finally called STOP." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 436, + 546, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 436, + 546, + 581 + ], + "spans": [ + { + "bbox": [ + 304, + 436, + 546, + 581 + ], + "type": "text", + "content": "performance while suffering from lower sample efficiency and poor generalizability across datasets [3, 27]. Therefore, we favor modular-based methods, which usually contain the following modules: a semantic scene mapping module that aggregates the RGBD observations and the outputs from semantic segmentation networks to form a semantic scene map; an RL-based goal policy module that takes as input the semantic scene map and learns to online update a goal location; finally, a local path planning module that drives the agent to that goal. Under this design, the semantic accuracy and geometric structure of the scene map are crucial to the success of object goal navigation." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "content": "We observe that the existing modular-based methods mainly construct 2D maps [8, 9], scene graphs [34, 56] or neural fields [43] as their scene maps. Given that objects lie in 3D space, these scene maps are inevitably deficient in leveraging 3D spatial information of the environment comprehensively and thus have been a bottleneck for further improving object goal navigation. In contrast, forming a 3D scene representation naturally offers more accurate, spatially dense and consistent semantic predictions than its 2D counterpart, as proved by [12, 31, 45]. Hence, if the agent could take advantage of the 3D scene understanding and" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 683, + 120, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 683, + 120, + 693 + ], + "spans": [ + { + "bbox": [ + 57, + 683, + 120, + 693 + ], + "type": "text", + "content": "*Joint first authors" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 59, + 693, + 206, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 693, + 206, + 703 + ], + "spans": [ + { + "bbox": [ + 59, + 693, + 206, + 703 + ], + "type": "text", + "content": "† Corresponding author: hewang@pku.edu.cn" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 59, + 703, + 258, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 703, + 258, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 703, + 258, + 712 + ], + "type": "text", + "content": "1 Homepage: https://pku-epic.github.io/3D-Aware-ObjectNav/" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6672" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "form a 3D semantic scene map, it is expected to advance the performance of ObjectNav." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 99, + 287, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 99, + 287, + 242 + ], + "spans": [ + { + "bbox": [ + 46, + 99, + 287, + 242 + ], + "type": "text", + "content": "However, leveraging 3D scene representation would bring great challenges to ObjectNav policy learning. First, building and querying fine-grained 3D representation across a floor-level scene requires extensive computational cost, which can significantly slow down the training of RL [7,55]. Also, 3D scene representation induces considerably more complex and high-dimensional observations to the goal policy than its 2D counterpart, leading to a lower sample efficiency and hampering the navigation policy learning [22, 57]. As a result, it is demanding to design a framework to efficiently and effectively leverage powerful 3D information for ObjectNav." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 244, + 287, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 244, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 46, + 244, + 287, + 412 + ], + "type": "text", + "content": "To tackle these challenges, we propose a novel framework composed of an online semantic point fusion module for 3D semantic scene mapping and two parallel policy networks in charge of scene exploration and object identification, along with a local path planning module. Our online semantic point fusion module extends a highly efficient online point construction algorithm [53] to enable online semantic fusion and spatial semantic consistency computation from captured RGBD sequences. This 3D scene construction empowers a comprehensive 3D scene understanding for ObjectNav. Moreover, compared to dense voxel-based methods [7, 55], our point-based fusion algorithm are more memory-efficient [40, 46] which makes it practically usable for floor-level navigation task. (See Figure 1)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 414, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 414, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 414, + 287, + 677 + ], + "type": "text", + "content": "Moreover, to ease the learning of navigation policy, we further propose to factorize the navigation policy into two sub-policies, namely exploration and identification. The two policies simultaneously perform to roll out an exploration goal and an identified object goal (if exist), respectively. Then the input for the local path planning module will switch between these two goals, depending on whether there exists an identified target object. More specifically, we propose a corner-guided exploration policy which learns to predict a long-term discrete goal at one of the four corners of the bounding box of the scene. These corner goals efficiently drive the agent to perceive the surroundings and explore regions where the target object is possibly settled. And for identification, a category-aware identification policy is proposed to dynamically learn a discrete confidence threshold to identify the semantic predictions for each category. Both of these policies are trained by RL in low-dimensional discrete action space. Through experiments, the simultaneous two-policy mechanism and discrete action space design dramatically reduce the difficulty in learning for 3D-aware ObjectNav and achieve better performance than existing modular-based navigation strategies [26, 35]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 679, + 287, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 679, + 287, + 704 + ], + "spans": [ + { + "bbox": [ + 47, + 679, + 287, + 704 + ], + "type": "text", + "content": "Through extensive evaluation on the public benchmarks, we demonstrate that our method performs online 3D-aware" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 545, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 132 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 132 + ], + "type": "text", + "content": "ObjectNav at 15 FPS while achieving the state-of-the-art performance on navigation efficiency. Moreover, our method outperforms all other modular-based methods in both efficiency and success rate with up to " + }, + { + "bbox": [ + 304, + 72, + 545, + 132 + ], + "type": "inline_equation", + "content": "30\\mathrm{x}" + }, + { + "bbox": [ + 304, + 72, + 545, + 132 + ], + "type": "text", + "content": " times less computational cost." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 134, + 448, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 134, + 448, + 145 + ], + "spans": [ + { + "bbox": [ + 317, + 134, + 448, + 145 + ], + "type": "text", + "content": "Our main contributions include:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 157, + 545, + 268 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 317, + 157, + 545, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 157, + 545, + 180 + ], + "spans": [ + { + "bbox": [ + 317, + 157, + 545, + 180 + ], + "type": "text", + "content": "- We present the first 3D-aware framework for Object-Nav task." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 189, + 545, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 189, + 545, + 224 + ], + "spans": [ + { + "bbox": [ + 317, + 189, + 545, + 224 + ], + "type": "text", + "content": "- We build an online point-based construction and fusion algorithm for efficient and comprehensive understanding of floor-level 3D scene representation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 232, + 545, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 232, + 545, + 268 + ], + "spans": [ + { + "bbox": [ + 317, + 232, + 545, + 268 + ], + "type": "text", + "content": "- We propose a simultaneous two-policy mechanism which mitigates the problem of low sample efficiency in 3D-aware ObjectNav policy learning." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 279, + 392, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 279, + 392, + 293 + ], + "spans": [ + { + "bbox": [ + 306, + 279, + 392, + 293 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 301, + 545, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 301, + 545, + 494 + ], + "spans": [ + { + "bbox": [ + 304, + 301, + 545, + 494 + ], + "type": "text", + "content": "GoalNav with Visual Sequences. There are constantly emerging researches on object goal navigation. One line of recent works directly leverages RGBD sequences, called end-to-end RL methods [47], which tends to implicitly encode the environment and predict low-level actions. These works benefit from visual representation [29, 50], auxiliary task [51], and data augmentation [27], demonstrating strong results on object goal navigation benchmarks [1, 49]. However, aiming to learn all skills through one policy from scratch, e.g., avoiding collisions, exploration, and stopping, it's well known that end-to-end RL methods suffer from low sampling efficiency for training and limited generalizability when transferred to the real world [3, 35]. Instead, our work uses explicit map to represent the environment, which ensures our sample efficiency and also obtain more generalizability through a modular-based paradigm [1, 35]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 495, + 545, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 545, + 687 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 545, + 687 + ], + "type": "text", + "content": "GoalNav with Explicit Scene Representations. To ease the burden of learning directly from visual sequences, another category of methods, called modular-based methods [8,9,15,17,32], use explicit representations as a proxy for robot observations. By leveraging explicit scene representations like scene graph [34, 56] or 2D top-down map [14,35], modular-based methods benefit from the modularity and shorter time horizons. They are considered to be more sample efficient and generalizable [14, 35]. Recent progress in modular-based methods has proposed a frontier-based exploration strategy [35], a hallucinate-driven semantic mapping method [14], and novel verification stage [26]. In contrast with prior map-based works, our method utilizes 3D spatial knowledge, including 3D point semantic prediction and consistency, enabling a more comprehensive understanding of the environment." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Embodied AI tasks with 3D Scene Representation. There are considerable research leveraging 3D scene repre" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6673" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 68, + 547, + 216 + ], + "blocks": [ + { + "bbox": [ + 50, + 68, + 547, + 216 + ], + "lines": [ + { + "bbox": [ + 50, + 68, + 547, + 216 + ], + "spans": [ + { + "bbox": [ + 50, + 68, + 547, + 216 + ], + "type": "image", + "image_path": "dea7bdcc4332163e8adff0f00b879c135946d2551d13dca93986c44d9af806c4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "lines": [ + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "spans": [ + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": "Figure 2. An overview of our framework. We take in a posed RGB-D image at time step " + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": " and perform point-based construction algorithm to online fuse a 3D scene representation " + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "inline_equation", + "content": "(\\mathcal{M}_{3D}^{(t)})" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": ", along with a " + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{2D}^{(t)}" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": " from semantics projection. Then, we simultaneously leverage two policies, including a corner-guided exploration policy " + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "inline_equation", + "content": "\\pi_e" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": " and category-aware identification policy " + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "inline_equation", + "content": "\\pi_f" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": ", to predict a discrete corner goal " + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "inline_equation", + "content": "g_e^{(t)}" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": " and a target goal " + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "inline_equation", + "content": "g_f^{(t)}" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": " (if exist) respectively. Finally, the local planning module will drive the agent to the given target goal " + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "inline_equation", + "content": "g_f^{(t)}" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": " (top priority) or the corner goal " + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "inline_equation", + "content": "g_e^{(t)}" + }, + { + "bbox": [ + 46, + 224, + 547, + 285 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 296, + 289, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 296, + 289, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 296, + 289, + 464 + ], + "type": "text", + "content": "sensation on certain embodied AI tasks, e.g., object grasping [5, 10], drawer opening [30, 44]. These works leverage various routes, including reinforcement learning [13], imitation learning [44], and supervised learning [5] with 3D scene representation, such as mesh, dense grids. However, most of these 3D-aware embodied AI tasks only perform in a limited space [10, 30, 44], e.g., near one table or drawer. Under large scale environments, such as floor-level scenes in ObjectNav, the existing methods would suffer from complex 3D observation and large computational costs. In this work, we propose a framework through leveraging a point-based construction module and two dedicatedly designed exploration and identification policies, to enable a 3D-aware agnet for ObjectNav." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 479, + 104, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 479, + 104, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 479, + 104, + 491 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 501, + 248, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 501, + 248, + 513 + ], + "spans": [ + { + "bbox": [ + 47, + 501, + 248, + 513 + ], + "type": "text", + "content": "3.1. Task Definition and Method Overview" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 521, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 521, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 521, + 289, + 715 + ], + "type": "text", + "content": "Object Goal Navigation Task. In an unknown environment, the Object Goal Navigation task requires the agent to navigate to an instance of the specified target category. For fair comparison, we follow the previous problem setting [38, 49]. As initialization, the agent is located randomly without access to a pre-built environment map, and provided with a target category ID. At each time step " + }, + { + "bbox": [ + 46, + 521, + 289, + 715 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 521, + 289, + 715 + ], + "type": "text", + "content": ", the agent receives noiseless onboard sensor readings, including an egocentric RGB-D image and a 3-DoF pose (2D position and 1D orientation) relative to the starting of the episode. Then the agent estimates its action " + }, + { + "bbox": [ + 46, + 521, + 289, + 715 + ], + "type": "inline_equation", + "content": "a_{t} \\in \\mathcal{A}" + }, + { + "bbox": [ + 46, + 521, + 289, + 715 + ], + "type": "text", + "content": " for movement in a discrete action space, consisting of move_forward, turn_left, turn_right and stop. Given a limited time budget of 500 steps, the agent terminates the movement until it is within 1 meter of an object of the specified category." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "spans": [ + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": "Method Overview. Figure 2 provides an overview of the proposed 3D-aware ObjectNav method. Our method takes RGBD frames along with pose sensor readings as input, to online construct a point-based scene representation " + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{3D}" + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": " (Sec. 3.2), which is further projected to construct a 2D semantic map " + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{2D}" + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": ". Given the structured 3D points " + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{3D}" + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": " and 2D map " + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{2D}" + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": ", our framework simultaneously performs two complementary policies (Sec. 3.3), the exploration policy and identification policy at a fixed time cycle of 25 steps. The exploration policy predicts a long-term discrete corner goal " + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "inline_equation", + "content": "g_{e}" + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": ", to drive the agent to explore the surrounding environment. Meanwhile, the identification policy evaluates the 3D points " + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{3D}" + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": " at each step and outputs a target object goal " + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "inline_equation", + "content": "g_{f}" + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": " if its semantic prediction is confident and consistent. The " + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "inline_equation", + "content": "g_{f}" + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": " will be set as the approaching target for the agent once it exists, otherwise the agent will navigate to the long-term corner goal " + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "inline_equation", + "content": "g_{e}" + }, + { + "bbox": [ + 304, + 296, + 545, + 523 + ], + "type": "text", + "content": ". An underlying local planning module will navigate the agent towards the goal using analytical path planning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 536, + 529, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 536, + 529, + 550 + ], + "spans": [ + { + "bbox": [ + 306, + 536, + 529, + 550 + ], + "type": "text", + "content": "3.2. Navigation-Driven 3D Scene Construction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 545, + 713 + ], + "type": "text", + "content": "During navigation, the 3D-aware agent will constantly obtain new observations and incrementally build a fine-grained 3D scene representation, integrating spatial and semantic information to drive the agent. However, given that our agent is deployed for a floor-level GoalNav task, it is fairly challenging to construct and leverage 3D representation across the entire scene while keeping an acceptable computational cost. Accordingly in this section, we extend an online point-based construction algorithm [53] to online organize the 3D points and further empower semantic fusion and consistency estimation. This design is tailored for a comprehensive scene understanding of the ObjectNav agent, requiring little computational resources." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6674" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": "3D Scene Representation. At time step " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": ", we represent the 3D scene " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{3D}" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": " as the point clouds, denoted as " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "P^{(t)} = \\{(P_{l}^{(t)}, P_{s}^{(t)}, P_{c}^{(t)})\\} \\in \\mathbb{R}^{N^{(t)} \\times (M + 4)}" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "N^{(t)}" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": " is the point number. For each point " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": ", the " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "M + 4" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": " channels include the point position " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "P_{i,l}^{(t)} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": ", point semantics " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "P_{i,s}^{(t)} \\in \\mathbb{R}^M" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": " and the point-wise spatial semantic consistency information " + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "inline_equation", + "content": "P_{i,c}^{(t)} \\in \\mathbb{R}^1" + }, + { + "bbox": [ + 47, + 72, + 289, + 167 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "text", + "content": "Online 3D Point Fusion Given a new captured posed RGB image " + }, + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "inline_equation", + "content": "I_{c}^{(t)}" + }, + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "text", + "content": " and depth image " + }, + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "inline_equation", + "content": "I_{d}^{(t)}" + }, + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "text", + "content": " at time step " + }, + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "text", + "content": ", the agent can obtain the point position " + }, + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "inline_equation", + "content": "P_{l}^{(t)}" + }, + { + "bbox": [ + 46, + 168, + 287, + 258 + ], + "type": "text", + "content": " by back-projecting all the depth images into the 3D world space via their corresponding poses. These points will be organized by a point-based construction algorithm [53]. Here, we briefly revisit this strategy." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "spans": [ + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": "The construction algorithm dynamically allocates occupied 3D blocks " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{B}_k\\}" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": " along with their index " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": " maintained by a tree-based method [20]. Each block " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_k" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": " is defined by the boundary of constant length (10cm) along the X, Y and Z axes, e.g., " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "[X_{min}(B_k), X_{max}(B_k)]" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": ". And the points " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "p_{l,x} \\in [X_{min}(B_k), X_{max}(B_k)]" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": " (the same requirement holds for Y and Z axes) be recorded by the block " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_k" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": ". Given any 3D point " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": ", the algorithm can achieve efficient neighborhood retrieval with the corresponding block index " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": ". Furthermore, a one-level octree " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "\\mathcal{O}_i" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": " for each point " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": " is constructed to obtain the fine-grained spatial information among points. Specifically, we connect each point with its nearest points in the eight quadrants of the Cartesian coordinate system (See Figure 3). Powered by this point-based construction strategy, give any point, we can efficiently querying this point with its neighbor points by blocks retrieval and octree. This algorithm for organizing 3D points can run at 15 FPS while requiring reasonable memory resources (about " + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "inline_equation", + "content": "\\sim 500" + }, + { + "bbox": [ + 46, + 259, + 289, + 498 + ], + "type": "text", + "content": " MB for one entire scene). We provide more detailed description in the supplemental material." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 500, + 287, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 500, + 287, + 622 + ], + "spans": [ + { + "bbox": [ + 46, + 500, + 287, + 622 + ], + "type": "text", + "content": "Online Semantic Fusion. With an efficient reconstruction algorithm in hand, we can directly fuse temporal information, e.g., multi-view semantic predictions, to achieve more accurate and consistent scene understanding. Specifically, any point " + }, + { + "bbox": [ + 46, + 500, + 287, + 622 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 46, + 500, + 287, + 622 + ], + "type": "text", + "content": " which has been captured by a sequence of RGBD frames " + }, + { + "bbox": [ + 46, + 500, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\{I_c^{(t)}, I_d^{(t)}\\}" + }, + { + "bbox": [ + 46, + 500, + 287, + 622 + ], + "type": "text", + "content": " could have multiple semantic predictions " + }, + { + "bbox": [ + 46, + 500, + 287, + 622 + ], + "type": "inline_equation", + "content": "\\{p_{i,s}^{(t)}(I_c^{(t)})\\}" + }, + { + "bbox": [ + 46, + 500, + 287, + 622 + ], + "type": "text", + "content": ". We thus propose to online aggregate the multi-view 2D semantic predictions using a max-fusion mechanism to obtain the final 3D semantic prediction:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 100, + 622, + 287, + 639 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 622, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 100, + 622, + 287, + 639 + ], + "type": "interline_equation", + "content": "p _ {i, s} ^ {(t)} = \\mathcal {N} (\\max (\\{p _ {i, s} ^ {(t)} (I _ {c} ^ {(t)})) \\})), \\tag {1}", + "image_path": "18ea30a3fc84679d4f2cb77549e8190c5cbed246f2bf8b90dc2d832921986d09.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "content": "where the max is performed on each semantic category, followed by a normalization " + }, + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{N}" + }, + { + "bbox": [ + 46, + 642, + 288, + 714 + ], + "type": "text", + "content": " to linearly scale the probability distribution. Note that, the alternatives to fuse semantic predictions do exist, e.g. 3D convolution [19, 24], Bayesian updating [28]. However, directly conducting 3D convolution into such a floor-level 3D representation would in-" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 313, + 76, + 410, + 168 + ], + "blocks": [ + { + "bbox": [ + 313, + 76, + 410, + 168 + ], + "lines": [ + { + "bbox": [ + 313, + 76, + 410, + 168 + ], + "spans": [ + { + "bbox": [ + 313, + 76, + 410, + 168 + ], + "type": "image", + "image_path": "4dd993af23292078a7bce4b6271b787202a70db028a106be49d5965ab55ae618.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 325, + 182, + 391, + 192 + ], + "lines": [ + { + "bbox": [ + 325, + 182, + 391, + 192 + ], + "spans": [ + { + "bbox": [ + 325, + 182, + 391, + 192 + ], + "type": "text", + "content": "Active Navigation" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 417, + 79, + 539, + 176 + ], + "blocks": [ + { + "bbox": [ + 417, + 79, + 539, + 176 + ], + "lines": [ + { + "bbox": [ + 417, + 79, + 539, + 176 + ], + "spans": [ + { + "bbox": [ + 417, + 79, + 539, + 176 + ], + "type": "image", + "image_path": "bf02ca1f0460dfe37dee844f9bfe3371d969607e17b802ed1edfd208fa4e82b4.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 431, + 182, + 533, + 193 + ], + "lines": [ + { + "bbox": [ + 431, + 182, + 533, + 193 + ], + "spans": [ + { + "bbox": [ + 431, + 182, + 533, + 193 + ], + "type": "text", + "content": "Online Organized 3D points" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 205, + 547, + 262 + ], + "lines": [ + { + "bbox": [ + 304, + 205, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 304, + 205, + 547, + 262 + ], + "type": "text", + "content": "Figure 3. Illustration of online 3D point fusion. (Left) A robot takes multi-view observations during navigation. (Right) The points " + }, + { + "bbox": [ + 304, + 205, + 547, + 262 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 304, + 205, + 547, + 262 + ], + "type": "text", + "content": " are organized by dynamically allocated blocks " + }, + { + "bbox": [ + 304, + 205, + 547, + 262 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 304, + 205, + 547, + 262 + ], + "type": "text", + "content": " and perpoint octrees " + }, + { + "bbox": [ + 304, + 205, + 547, + 262 + ], + "type": "inline_equation", + "content": "\\mathcal{O}" + }, + { + "bbox": [ + 304, + 205, + 547, + 262 + ], + "type": "text", + "content": ", which can be used to query neighborhood points of any given point." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 269, + 545, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 269, + 545, + 353 + ], + "spans": [ + { + "bbox": [ + 304, + 269, + 545, + 353 + ], + "type": "text", + "content": "evitably lead to a huge rise of computational cost, especially in the context of learning-based policy. We find that maximizing the 2D semantic prediction can already achieve impressive improvement on semantic accuracy (see Figure 8), with higher memory efficiency and time efficiency. Similar findings have also been reported and exploited in relevant works [7, 16]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "spans": [ + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "text", + "content": "Spatial Semantic Consistency. Based on the fact that semantic label should remain consistent for all the points in a single object, we propose to calculate the spatial semantic consistency information " + }, + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "inline_equation", + "content": "P_{c}^{(t)}" + }, + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "text", + "content": " as part of the navigation-driven 3D scene representation. To be specific, " + }, + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "inline_equation", + "content": "P_{i,c}^{(t)}" + }, + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "text", + "content": " is computed as the maximum semantic KL-divergence between point " + }, + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "inline_equation", + "content": "P_{i}^{(t)}" + }, + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "text", + "content": " and its octree " + }, + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "inline_equation", + "content": "\\mathcal{O}(P_i^{(t)})" + }, + { + "bbox": [ + 304, + 355, + 546, + 445 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 450, + 545, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 450, + 545, + 468 + ], + "spans": [ + { + "bbox": [ + 315, + 450, + 545, + 468 + ], + "type": "interline_equation", + "content": "P _ {i, c} ^ {(t)} = \\max \\left(\\left\\{K L \\left(P _ {i, s} ^ {(t)}, P _ {j, s} ^ {(t)}\\right) \\mid \\forall P _ {j} ^ {(t)} \\in \\mathcal {O} \\left(P _ {i} ^ {(t)}\\right) \\right\\}\\right), \\tag {2}", + "image_path": "b8110dbafcd4ff91c5ab6a2a95e33485ffdfd5956c2a76c5b9a8838081b6be6d.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "spans": [ + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "inline_equation", + "content": "KL" + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "text", + "content": " denotes the KL-divergence computation, which is a statistical distance that measures the semantic probability distribution between " + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "inline_equation", + "content": "P_{i,s}^{(t)}" + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "inline_equation", + "content": "P_{j,s}^{(t)}" + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "text", + "content": ". Note for point " + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "inline_equation", + "content": "P_{i}^{(t)}" + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "text", + "content": ", if we count all its spatially close points as the neighbourhood " + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(P_i^{(t)})" + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "text", + "content": ", it could be time consuming to calculate Equation 2, and the spatially close points do not help relieve the issue of outlier points as mentioned above. Therefore, we use the pre-built octree " + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{O}_i" + }, + { + "bbox": [ + 304, + 472, + 547, + 589 + ], + "type": "text", + "content": " to retrieve 8 nearest point in the quadrants of the Cartesian coordinate system." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 597, + 537, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 597, + 537, + 609 + ], + "spans": [ + { + "bbox": [ + 306, + 597, + 537, + 609 + ], + "type": "text", + "content": "3.3. Simultaneous Exploration and Identification" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": "With the aggregated 3D information, we expect to empower a 3D-aware agent for the ObjectNav task. However, despite the efficient 3D scene representation, the agent still suffers from the complex and high-dimensional observations, leading to a lower sample efficiency in RL and hampering the navigation policy learning. Therefore, we leverage two complementary sub-policies: corner-guided exploration policy and category-aware identification policy. Each" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6675" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 70, + 126, + 148 + ], + "blocks": [ + { + "bbox": [ + 51, + 70, + 126, + 148 + ], + "lines": [ + { + "bbox": [ + 51, + 70, + 126, + 148 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 126, + 148 + ], + "type": "image", + "image_path": "0cbe640eabeedfa32b989c7e85b39f6829120654d087b08d75cbd6f2e406b8be.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 71, + 148, + 104, + 157 + ], + "lines": [ + { + "bbox": [ + 71, + 148, + 104, + 157 + ], + "spans": [ + { + "bbox": [ + 71, + 148, + 104, + 157 + ], + "type": "text", + "content": "SemExp" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 69, + 163, + 264, + 171 + ], + "lines": [ + { + "bbox": [ + 69, + 163, + 264, + 171 + ], + "spans": [ + { + "bbox": [ + 69, + 163, + 264, + 171 + ], + "type": "text", + "content": ": Goal location : Candidate corner goals to be chosen in order" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 46, + 191, + 287, + 224 + ], + "lines": [ + { + "bbox": [ + 46, + 191, + 287, + 224 + ], + "spans": [ + { + "bbox": [ + 46, + 191, + 287, + 224 + ], + "type": "text", + "content": "Figure 4. Illustration of exploration policy. (Left) Learning-based continuous global goal [9]; (Middle) Heuristic direction selection [26]; (Right, ours) Learning-based corner goal prediction." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 129, + 72, + 205, + 147 + ], + "blocks": [ + { + "bbox": [ + 129, + 72, + 205, + 147 + ], + "lines": [ + { + "bbox": [ + 129, + 72, + 205, + 147 + ], + "spans": [ + { + "bbox": [ + 129, + 72, + 205, + 147 + ], + "type": "image", + "image_path": "96a956ec95b60458f383cf3aaa15ed77b6ef496b3b4edf5cc28300e7e54ab015.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 147, + 148, + 184, + 156 + ], + "lines": [ + { + "bbox": [ + 147, + 148, + 184, + 156 + ], + "spans": [ + { + "bbox": [ + 147, + 148, + 184, + 156 + ], + "type": "text", + "content": "Stubborn" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 69, + 173, + 262, + 182 + ], + "lines": [ + { + "bbox": [ + 69, + 173, + 262, + 182 + ], + "spans": [ + { + "bbox": [ + 69, + 173, + 262, + 182 + ], + "type": "text", + "content": ": Candidate corner goals to be predicted based on learning (Ours)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 209, + 72, + 284, + 147 + ], + "blocks": [ + { + "bbox": [ + 209, + 72, + 284, + 147 + ], + "lines": [ + { + "bbox": [ + 209, + 72, + 284, + 147 + ], + "spans": [ + { + "bbox": [ + 209, + 72, + 284, + 147 + ], + "type": "image", + "image_path": "b37b22dbdb379eff69a08afc35a576b4948cb1675fbd4bf6fbbbb4e91ad9cd31.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 236, + 148, + 256, + 155 + ], + "lines": [ + { + "bbox": [ + 236, + 148, + 256, + 155 + ], + "spans": [ + { + "bbox": [ + 236, + 148, + 256, + 155 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 231, + 287, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 231, + 287, + 278 + ], + "spans": [ + { + "bbox": [ + 46, + 231, + 287, + 278 + ], + "type": "text", + "content": "policy learns to predict low-dimensional discrete actions and outputs a goal location to navigate the agent, resulting in a strong performance while requiring less training time. We will detail the two policies below." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "spans": [ + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": "Observation Space. At each time step " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": ", both policies take fine-grained 3D observation " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "x_{3D}^{(t)} = \\{P^{(t)} \\in ((4 + m) \\times N)\\}" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " based on 3D scene representation " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{3D}" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": ". Here, the " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " indicates the point number (we sample 4096 points) and the " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "m + 4" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " channels are comprised of point position " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "p_l^{(t)} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": ", fused semantic predictions " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "p_s^{(t)} \\in \\mathbb{R}^m" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " and spatial semantic consistency " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "p_c^{(t)} \\in \\mathbb{R}^1" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": ". Following existing works [8, 9], we use an additional egocentric 2D map " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{2D}" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " for exploration policy and the local path planning module, which is directly obtained by a project-to-ground operation. More detailedly, for 2D observation " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "x_{2D}^{(t)} \\in ((2 + m) \\times M \\times M)" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " from 2D map " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{2D}" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": ", the first two channels represent obstacles and explored area, and the rest of the channels each corresponds to an object category. Here, " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{2D}" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " (in a resolution of " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "M = 240" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "20\\mathrm{cm}" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " grids) is constructed to give a large perception view of the scene, while 3D points perform as a fine-grained observation of objects. In addition to the scene representations, we also pass the goal object category index " + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "inline_equation", + "content": "o_{ID}" + }, + { + "bbox": [ + 46, + 281, + 287, + 517 + ], + "type": "text", + "content": " as the side input to both policies." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "text", + "content": "Corner-Guided Exploration Policy. The exploration policy attempts to guide the agent to explore and perceive the surrounding environment where it could access any instance of the target object category. We observe that existing learning-based exploration policies predict goal locations over the 2D map in continuous or large-dimensional discrete action space (Figure 4 Left), suffering from low sample efficiency. Therefore, we define a corner-guided exploration policy " + }, + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "inline_equation", + "content": "g_{e} = \\pi_{e}(x_{3D}, x_{2D}, o_{ID}; \\theta_{e})" + }, + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "text", + "content": " that predicts a corner goal " + }, + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "inline_equation", + "content": "g_{e}" + }, + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "text", + "content": " to drive the agent (Figure 4 Right). Here, the " + }, + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "inline_equation", + "content": "\\theta_{e}" + }, + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "text", + "content": " indicates the parameters of the policy, and " + }, + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "inline_equation", + "content": "g_{e}" + }, + { + "bbox": [ + 46, + 519, + 287, + 674 + ], + "type": "text", + "content": " is one of the four pre-defined corner goals {Top Left, Top Right, Bottom Left, Bottom Right} of the 2D map." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Compared to predicting goals in a continuous or high-dimensional action space, learning to predict the four corner goals significantly reduces the learning difficulty. More" + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 380, + 142 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 380, + 142 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 380, + 142 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 380, + 142 + ], + "type": "image", + "image_path": "8d59a1ce8c8b33399ddb1177464b126e5b36c9cafcde4a1457f6d17ebdfa405a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 339, + 144, + 349, + 152 + ], + "lines": [ + { + "bbox": [ + 339, + 144, + 349, + 152 + ], + "spans": [ + { + "bbox": [ + 339, + 144, + 349, + 152 + ], + "type": "text", + "content": "(A)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 160, + 545, + 205 + ], + "lines": [ + { + "bbox": [ + 304, + 160, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 304, + 160, + 545, + 205 + ], + "type": "text", + "content": "Figure 5. Illustration of identification policy. From " + }, + { + "bbox": [ + 304, + 160, + 545, + 205 + ], + "type": "inline_equation", + "content": "\\mathrm{A} \\rightarrow \\mathrm{B}" + }, + { + "bbox": [ + 304, + 160, + 545, + 205 + ], + "type": "text", + "content": ", fused points are filtered by the category-aware predicted threshold " + }, + { + "bbox": [ + 304, + 160, + 545, + 205 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 160, + 545, + 205 + ], + "type": "text", + "content": ". From " + }, + { + "bbox": [ + 304, + 160, + 545, + 205 + ], + "type": "inline_equation", + "content": "\\mathrm{B} \\rightarrow \\mathrm{C}" + }, + { + "bbox": [ + 304, + 160, + 545, + 205 + ], + "type": "text", + "content": ", the policy further checks the spatial label consistency of the points and identifies the target goal." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 380, + 71, + 462, + 142 + ], + "blocks": [ + { + "bbox": [ + 380, + 71, + 462, + 142 + ], + "lines": [ + { + "bbox": [ + 380, + 71, + 462, + 142 + ], + "spans": [ + { + "bbox": [ + 380, + 71, + 462, + 142 + ], + "type": "image", + "image_path": "3ce7fe35ee9e5c7a98f90bba72dcd394208b78781821d48a5b857660c17fa076.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 424, + 144, + 433, + 152 + ], + "lines": [ + { + "bbox": [ + 424, + 144, + 433, + 152 + ], + "spans": [ + { + "bbox": [ + 424, + 144, + 433, + 152 + ], + "type": "text", + "content": "(B)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 462, + 71, + 544, + 142 + ], + "blocks": [ + { + "bbox": [ + 462, + 71, + 544, + 142 + ], + "lines": [ + { + "bbox": [ + 462, + 71, + 544, + 142 + ], + "spans": [ + { + "bbox": [ + 462, + 71, + 544, + 142 + ], + "type": "image", + "image_path": "dfc8fd98c0ff490f699f7c6635eb637f8d33b5581d4630b8830c9e330e75ae9c.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 505, + 144, + 514, + 152 + ], + "lines": [ + { + "bbox": [ + 505, + 144, + 514, + 152 + ], + "spans": [ + { + "bbox": [ + 505, + 144, + 514, + 152 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 214, + 545, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 214, + 545, + 309 + ], + "spans": [ + { + "bbox": [ + 304, + 214, + 545, + 309 + ], + "type": "text", + "content": "over, as noted by previous studies [4, 26], the corner-goal-based exploration strategy exhibits the capacity to achieve efficient exploration through avoiding back-and-forth pacing. Superior to using other heuristic corner goal exploration strategies (Figure 4 Middle), our agent can learn from the 3D scene priors to behave more intelligently. Demonstrations of our corner-guided exploration can be found in the attached video." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 312, + 546, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 312, + 546, + 444 + ], + "spans": [ + { + "bbox": [ + 304, + 312, + 546, + 444 + ], + "type": "text", + "content": "Category-Aware Identification Policy. During navigation, the agent consistently makes semantic predictions to identify an instance of target object category. Most works [9, 14] simply use a preset hard confidence threshold for target identification. However, this strategy is inherently suboptimal due to the considerable variability in semantic prediction results across different categories and observation angles. As a result, a preset threshold would be unable to adequately adapt to the ever-changing nature of these scenarios. Also, it ignores the consistency of the semantic prediction in 3D space." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "spans": [ + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "text", + "content": "To tackle this issues, we propose to leverage both dynamic confidence threshold and spatial semantic label consistency for target identification. We define a policy " + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "inline_equation", + "content": "s = \\pi_f(x_{3D}, o_{ID}; \\theta_f)" + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "text", + "content": " which takes the 3D observation " + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "inline_equation", + "content": "x_{3D}" + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "text", + "content": " and target category index " + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "inline_equation", + "content": "o_{ID}" + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "text", + "content": " and outputs a threshold-indicating action " + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "inline_equation", + "content": "s \\in \\{0, 1, \\dots, 9\\}" + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "text", + "content": ". And the dynamic threshold " + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 446, + 545, + 530 + ], + "type": "text", + "content": " can be obtained by:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 374, + 537, + 545, + 562 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 537, + 545, + 562 + ], + "spans": [ + { + "bbox": [ + 374, + 537, + 545, + 562 + ], + "type": "interline_equation", + "content": "\\tau = \\tau_ {l o w} + s \\cdot \\frac {1 - \\tau_ {l o w}}{1 0}, \\tag {3}", + "image_path": "e9a7449ee07261fcbb0dedf6c8e414ff6951d00ce1dbcb7d03b37552d558e005.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "text", + "content": "where the " + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "inline_equation", + "content": "\\tau_{low}" + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "text", + "content": " is set to 0.5 in our implementation for a threshold range " + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "inline_equation", + "content": "\\tau \\in [0.5,0.95]" + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "text", + "content": " will be used to dynamically identify the points belonging to the target object (Figure 5 Middle). It is worth mentioning that this policy also utilizes a low-dimensional discrete action space, which is fairly easy for the agent to learn." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "text", + "content": "To obtain the final target goal " + }, + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "inline_equation", + "content": "g_{f}" + }, + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "text", + "content": ", our method further checks the spatial semantic label consistency. Specifically, we use the points " + }, + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\{p_i | (p_i, p) \\in \\mathcal{O}_p\\}" + }, + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "text", + "content": " connected by the perpoint octree " + }, + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "inline_equation", + "content": "\\mathcal{O}_p" + }, + { + "bbox": [ + 304, + 642, + 545, + 712 + ], + "type": "text", + "content": " to approximately represent the 3D surface of the target object. Our insight is that the points along the target's surface should have consistent semantic" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6676" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": "labels. Therefore, we only identify those points who have at least 2-ring neighbors across the octrees " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\{p_i|(p_i,p_j)\\in \\mathcal{O}_{p_j}|(p_j,p)\\in \\mathcal{O}_p\\}" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " as the target object goal " + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "inline_equation", + "content": "g_{f}" + }, + { + "bbox": [ + 46, + 72, + 287, + 133 + ], + "type": "text", + "content": " (Figure 5 Right). See Figure 5 for visualized illustration and more details can be found in supplemental material." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "spans": [ + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "text", + "content": "Local Planning Module. The goals " + }, + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "inline_equation", + "content": "g_{e}" + }, + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "inline_equation", + "content": "g_{f}" + }, + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "text", + "content": " from two polices will be consistently updated during navigation. Our method will preferentially utilize the target goal " + }, + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "inline_equation", + "content": "g_{f}" + }, + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "text", + "content": " if it exists, otherwise take the long-term corner goal " + }, + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "inline_equation", + "content": "g_{e}" + }, + { + "bbox": [ + 46, + 134, + 287, + 230 + ], + "type": "text", + "content": " to explore. To navigate to the given location, we use the Fast Marching Method [42] to analytically plan the shortest path from the agent location. The agent then takes deterministic actions to follow this path." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "spans": [ + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "text", + "content": "Rewards. For the exploration policy, we share a similar reward design as [1, 51]. The agent receives a sparse success reward " + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "inline_equation", + "content": "r_{success} = 2.5" + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "text", + "content": ", a slack reward " + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "inline_equation", + "content": "r_{slack} = 10^{-2}" + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "text", + "content": " and an exploration reward " + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "inline_equation", + "content": "r_{explore}" + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "text", + "content": ". The exploration reward is a dense reward, defined by the number of new inserted point " + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "inline_equation", + "content": "n_p^{new}" + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "inline_equation", + "content": "r_{explore} = n_p^{new} \\times 10^{-3}" + }, + { + "bbox": [ + 46, + 232, + 288, + 353 + ], + "type": "text", + "content": ". The slack reward and exploration reward encourage the agent to take the most effective direction to the unobserved area. And for the identification policy, we combine the same success reward and slack reward borrowed from the exploration policy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 369, + 128, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 369, + 128, + 383 + ], + "spans": [ + { + "bbox": [ + 47, + 369, + 128, + 383 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 391, + 157, + 404 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 391, + 157, + 404 + ], + "spans": [ + { + "bbox": [ + 47, + 391, + 157, + 404 + ], + "type": "text", + "content": "4.1. Experiment Setup." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 411, + 287, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 411, + 287, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 411, + 287, + 604 + ], + "type": "text", + "content": "We perform experiments on the Matterport3D (MP3D) [6] and Gibson [48] datasets with the Habitat simulator [39]. Both Gibson and MP3D contain photorealistic 3D reconstructions of real-world environments. For Gibson, we use 25 train / 5 val scenes from the Gibson tiny split. And we follow the same setting as in [9, 35] where we consider 6 goal categories, including chair, couch, potted plant, bed, toilet and TV. For MP3D, we use the standard split of 61 train / 11 val scenes with Habitat ObjectNav dataset [38], which consists of 21 goal categories (the full list can be found in the supplemental material). Note that, the RGB-D and pose readings are noise-free from simulation (follow the definition of [1]). Estimation of the pose from noisy sensor readings is out of the scope of this work and can be addressed if necessary, by incorporating off-the-shelf robust odometry [52, 54]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 714 + ], + "type": "text", + "content": "Implementation Details. On MP3D, we use the same pretrained 2D semantic model RedNet [21] as [35,51]. On Gibson, we leverage a Mask R-CNN [18], which is trained with COCO dataset [23]. For each frame, we randomly sample 512 points for point-based construction. Moreover, we use PointNet [33] and fully convolutional networks [25] to obtain the feature of 3D points and the 2D map, respectively. During training, we sample actions every 25 steps and use the Proximal Policy Optimization (PPO) [41] for both ex" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 70, + 547, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 70, + 547, + 125 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 547, + 125 + ], + "type": "text", + "content": "Table 1. ObjectNav validation results on Gibson and MP3D. Our method is trained with 5 seeds and report the averaged performance. The best of all methods and the best of all modular-based methods are highlighted in **bold** and **underline** colors, respectively. Note that Habitat-Web takes use of extra data." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 310, + 134, + 549, + 255 + ], + "blocks": [ + { + "bbox": [ + 310, + 134, + 549, + 255 + ], + "lines": [ + { + "bbox": [ + 310, + 134, + 549, + 255 + ], + "spans": [ + { + "bbox": [ + 310, + 134, + 549, + 255 + ], + "type": "table", + "html": "
MethodGibson (val)Matterport3D (val)
SPL(%)↑ Succ.(%)↑ DTS(m)↓SPL(%)↑ Succ.(%)↑ DTS(m)↓
DD-PPO [47]10.715.03.241.88.06.90
Red-Rabbit [51]---7.934.6-
THAD [27]---11.128.45.58
Habitat-Web [36]---10.235.4-
FBE [37]28.364.31.787.222.76.70
ANS [8]34.967.11.669.227.35.80
L2M* [14]---11.032.15.12
SemExp* [9]39.671.71.3910.928.36.06
Stubborn* [26]---13.531.25.01
PONI [35]41.073.61.2512.131.85.10
Ours42.174.51.1614.634.04.74
", + "image_path": "bc2a0d0e9cf1e617aa5de57cb0ac57139a98cec2e8d5419db7e9437bf719bb3a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 310, + 277, + 549, + 341 + ], + "blocks": [ + { + "bbox": [ + 318, + 258, + 533, + 269 + ], + "lines": [ + { + "bbox": [ + 318, + 258, + 533, + 269 + ], + "spans": [ + { + "bbox": [ + 318, + 258, + 533, + 269 + ], + "type": "text", + "content": "Table 2. ObjectNav validation results on MP3D-L2M [14]." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 277, + 549, + 341 + ], + "lines": [ + { + "bbox": [ + 310, + 277, + 549, + 341 + ], + "spans": [ + { + "bbox": [ + 310, + 277, + 549, + 341 + ], + "type": "table", + "html": "
MP3D-L2M
MethodSPL(%) ↑SoftSPL(%) ↑Succ.(%) ↑DTS(m)↓
SemExp [9]16.5-28.14.848
L2M [14]14.820.034.83.669
Ours21.230.540.23.278
", + "image_path": "3a88834df89b0f622787e6bf92c0ccd9b8a0de45743748289a90e6a9a89925c8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 352, + 545, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 352, + 545, + 375 + ], + "spans": [ + { + "bbox": [ + 305, + 352, + 545, + 375 + ], + "type": "text", + "content": "ploration and identification policies. More implementation details can be found in the supplemental material." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 378, + 545, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 378, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 545, + 486 + ], + "type": "text", + "content": "Evaluation Metrics. Following existing works [2, 14, 35], we adopt the following evaluation metrics: 1) SPL: success weighted by path length. It measures the efficiency of the agent over oracle path length, which serves as the primary evaluation metric for Habitat Challenge [49]. 2) Success rate: the percentage of successful episodes 3) Soft SPL: a softer version of SPL measure the progress towards the goal (even with 0 success). 4) DTS: geodesic distance (in m) to the success at the end of the episode." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 488, + 545, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 488, + 545, + 643 + ], + "spans": [ + { + "bbox": [ + 304, + 488, + 545, + 643 + ], + "type": "text", + "content": "Baselines. We consider mainstream baselines in the ObjectNav task. For end-to-end RL methods, we cover DD-PPO [47], Red-Rabiit [51], THDA [27], and Habiat-Web [36]. For modular based methods, we cover FBE [37], ANS [8], L2M [14], SemExp [9], Stubborn [26] and PONI [35]. Note that, some works use additional data to improve the performance, e.g. Habitat-web leverages human demonstration trajectories, and THDA utilizes data augmentation. It is challenging to compare all the methods fairly. Therefore, we are particularly interested in the three most relevant baselines: SemExp, Stubborn, and PONI. These three methods share the same 2D semantic predictors [18, 21] as our method." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 656, + 362, + 668 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 656, + 362, + 668 + ], + "spans": [ + { + "bbox": [ + 306, + 656, + 362, + 668 + ], + "type": "text", + "content": "4.2. Results" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 677, + 545, + 713 + ], + "type": "text", + "content": "Comparison on MP3D and Gibson. We evaluate our approach on MP3D (val) and Gibson (val) with other baselines, including end-to-end RL(rows 1 - 4) and modular" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6677" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 68, + 521, + 300 + ], + "blocks": [ + { + "bbox": [ + 75, + 68, + 521, + 300 + ], + "lines": [ + { + "bbox": [ + 75, + 68, + 521, + 300 + ], + "spans": [ + { + "bbox": [ + 75, + 68, + 521, + 300 + ], + "type": "image", + "image_path": "bcb7f3863f2cabb911e2b285896d351ff791cfb97144ef3a5e95e7aa36afe1c4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 308, + 547, + 343 + ], + "lines": [ + { + "bbox": [ + 46, + 308, + 547, + 343 + ], + "spans": [ + { + "bbox": [ + 46, + 308, + 547, + 343 + ], + "type": "text", + "content": "Figure 6. An qualitative visualization of the trajectory of the proposed method. We visualize an episode from MP3D where an agent is expected to find a bed. The semantic prediction " + }, + { + "bbox": [ + 46, + 308, + 547, + 343 + ], + "type": "inline_equation", + "content": "p_{s}" + }, + { + "bbox": [ + 46, + 308, + 547, + 343 + ], + "type": "text", + "content": " and spatial semantic consistency " + }, + { + "bbox": [ + 46, + 308, + 547, + 343 + ], + "type": "inline_equation", + "content": "p_{c}" + }, + { + "bbox": [ + 46, + 308, + 547, + 343 + ], + "type": "text", + "content": " of points are visualized on the left. During navigation, the agent can successfully dismiss the wrong prediction and approach and finally call stop around the target object." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 52, + 392, + 280, + 472 + ], + "blocks": [ + { + "bbox": [ + 47, + 350, + 288, + 384 + ], + "lines": [ + { + "bbox": [ + 47, + 350, + 288, + 384 + ], + "spans": [ + { + "bbox": [ + 47, + 350, + 288, + 384 + ], + "type": "text", + "content": "Table 3. Comparison of different exploration policies. Here, all methods share the same identification strategy from [9] for fair comparison." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 392, + 280, + 472 + ], + "lines": [ + { + "bbox": [ + 52, + 392, + 280, + 472 + ], + "spans": [ + { + "bbox": [ + 52, + 392, + 280, + 472 + ], + "type": "table", + "html": "
MethodSPL(%)Succ.(%)DTS(m)
Learn Continuous Goal.11.128.66.354
Learn dense Grid Goal.12.729.55.635
Learn 8 corner goal.12.930.75.112
Heuristic. 4 corner goal.13.533.04.995
Learn 4 corner goal. (Ours)13.933.54.931
", + "image_path": "14e75e202f1f9ffb964f33cea9680870e4fd824181d058ed829026bcc38b801b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 483, + 287, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 287, + 698 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 287, + 698 + ], + "type": "text", + "content": "based methods (rows 5 - 10). Note that, SemExp and Stubborn did not report the results on MP3D validation, while L2M uses a self-made dataset MP3D-L2M based on MP3D and tests fewer categories than what we do. We therefore faithfully provide the results, denoted with *, by evaluating with their public available code. The results are demonstrated in Table 1. On both datasets, our method achieves the state-of-the-art ObjetNav efficiency (SPL) among all methods (2.6% higher on Gibson dataset and 8.1% higher on MP3D). For the success rate, our method achieves the best results among all modular-based methods, showing comparable performance with additional annotation methods THAD [27] and Habitat-web [36]. Especially, compared with the modular-based methods, SemExp, Stubborn, and PONI, which share the same 2D semantic predictor [21] as ours, the results fairly demonstrate the superiority of our framework on both efficiency and success rate. We also provide the results validated on MP3D-L2M in Table 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 700, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 700, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 59, + 700, + 287, + 713 + ], + "type": "text", + "content": "We also provide a qualitative visualization of MP3D" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 311, + 370, + 542, + 437 + ], + "blocks": [ + { + "bbox": [ + 323, + 350, + 528, + 363 + ], + "lines": [ + { + "bbox": [ + 323, + 350, + 528, + 363 + ], + "spans": [ + { + "bbox": [ + 323, + 350, + 528, + 363 + ], + "type": "text", + "content": "Table 4. Comparison on different identification policies." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 311, + 370, + 542, + 437 + ], + "lines": [ + { + "bbox": [ + 311, + 370, + 542, + 437 + ], + "spans": [ + { + "bbox": [ + 311, + 370, + 542, + 437 + ], + "type": "table", + "html": "
MethodTypeSPL(%)Succ.(%)DTS(m)
Repr.Thre.
Deterministic2D0.8512.830.15.151
3D0.8513.832.54.987
Learning (Ours)3D-14.634.04.749
", + "image_path": "990517034afe38f74e48dc64e0e91f3423a07cd1e151a70544794d503b2e24ad.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 457, + 545, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 457, + 545, + 505 + ], + "spans": [ + { + "bbox": [ + 304, + 457, + 545, + 505 + ], + "type": "text", + "content": "episodes in Figure 6. Here, our method online updates the semantic prediction and successfully dismisses the wrong target goal. For more qualitative results, please refer to the supplemental material." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 507, + 546, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 507, + 546, + 686 + ], + "spans": [ + { + "bbox": [ + 304, + 507, + 546, + 686 + ], + "type": "text", + "content": "Comparison on Exploration Policy. We conduct an experiment to verify the efficiency of our corner-guided exploration policy on MP3D. To remove the effect of the 2D semantic predictor and identification policy, all competitors share the same semantic predictor and a heuristic identification policy proposed in SemExp [9]. The results are reported in Table 3. Our corner-guided exploration policy outperforms the mainstream existing methods, including learning-based ones [8, 14] and heuristic ones [26]. Our findings indicate that the best performance is achieved through learning to predict discrete corner goals from the four corners of the scene. This suggests that the four-corner design, which benefits from a small, discrete action space, is already capable of efficiently guiding the agent in exploring the environment." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 715 + ], + "type": "text", + "content": "Comparison on Identification Policy. Another critical challenge in OjectNav is how to properly identify an in-" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6678" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 70, + 281, + 198 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 281, + 198 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 281, + 198 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 281, + 198 + ], + "type": "image", + "image_path": "2ce05c5b20cb1e350538777bac655512bd833d2c7528abbb36a22e5aeb2c4cd4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 208, + 288, + 242 + ], + "lines": [ + { + "bbox": [ + 46, + 208, + 288, + 242 + ], + "spans": [ + { + "bbox": [ + 46, + 208, + 288, + 242 + ], + "type": "text", + "content": "Figure 7. An comparison of predicted threshold distribution between different categories by our category-aware policy. We report the ratio of the each predicted threshold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 251, + 287, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 251, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 287, + 407 + ], + "type": "text", + "content": "stance of target object category. Therefore, We evaluate our identification policy on MP3D along with other identifying strategies, including a 2D frame-based policy adopted in [9] and 3D point-based methods proposed by our approach. The results are shown in Table 4. We observe a performance improvement (rows 1 - 2) by simply leveraging 3D point-based construction and fusion algorithm. It can demonstrate that the multi-view observations provide more accurate semantic prediction, which effectively reduces false positive prediction (see examples in Figure 8). Moreover, our category-aware identification policy, through predicting dynamic threshold, demonstrates an even better performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 410, + 287, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 410, + 287, + 566 + ], + "spans": [ + { + "bbox": [ + 46, + 410, + 287, + 566 + ], + "type": "text", + "content": "To further investigate the effect of our identification policy, We conduct a break down study in Figure 7 by plotting the distribution of predicted semantic confidence thresholds. Specifically, we plot the distribution of three different categories (table, cushion, plant). For a relatively easy-to-recognize category, such as table with " + }, + { + "bbox": [ + 46, + 410, + 287, + 566 + ], + "type": "inline_equation", + "content": "52.6\\%" + }, + { + "bbox": [ + 46, + 410, + 287, + 566 + ], + "type": "text", + "content": " success rate (SR), our policy predict a broad threshold distribution. However, for more challenging categories, such as cushion " + }, + { + "bbox": [ + 46, + 410, + 287, + 566 + ], + "type": "inline_equation", + "content": "(36.9\\%)" + }, + { + "bbox": [ + 46, + 410, + 287, + 566 + ], + "type": "text", + "content": " SR) and plant " + }, + { + "bbox": [ + 46, + 410, + 287, + 566 + ], + "type": "inline_equation", + "content": "(16.1\\%)" + }, + { + "bbox": [ + 46, + 410, + 287, + 566 + ], + "type": "text", + "content": " SR), the policy tends to be more conservative through setting a higher threshold. The results demonstrate the category-aware characteristic of our identification policy which adapts well to different difficulty levels across categories." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 567, + 287, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 567, + 287, + 675 + ], + "spans": [ + { + "bbox": [ + 46, + 567, + 287, + 675 + ], + "type": "text", + "content": "Ablation Study. We also perform an ablation study to verify the effectiveness of different components of our method. The results are demonstrated in Table 5. The cooperation of the 2D top-down map and 3D points (row 4) shows significant improvement by incorporating extensive scene perception (in 2D) and fine-grained object perception (in 3D). Moreover, rows (3-4) and (4-5) proved the effectiveness of leveraging consistency information and the identification policy, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Analysis of Computational Cost. Our framework is extremely memory efficient, which requires about 0.5GB for one scene, and can perform online construction and" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 311, + 113, + 527, + 200 + ], + "blocks": [ + { + "bbox": [ + 305, + 60, + 547, + 105 + ], + "lines": [ + { + "bbox": [ + 305, + 60, + 547, + 105 + ], + "spans": [ + { + "bbox": [ + 305, + 60, + 547, + 105 + ], + "type": "text", + "content": "Table 5. Ablation study of main components in our method. The pos. indicates the semantic predictions " + }, + { + "bbox": [ + 305, + 60, + 547, + 105 + ], + "type": "inline_equation", + "content": "p_{s}" + }, + { + "bbox": [ + 305, + 60, + 547, + 105 + ], + "type": "text", + "content": ", KL indicates the spatial semantic consistency " + }, + { + "bbox": [ + 305, + 60, + 547, + 105 + ], + "type": "inline_equation", + "content": "p_{c}" + }, + { + "bbox": [ + 305, + 60, + 547, + 105 + ], + "type": "text", + "content": " and the I. policy indicates the usage of the proposed identification policy." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 311, + 113, + 527, + 200 + ], + "lines": [ + { + "bbox": [ + 311, + 113, + 527, + 200 + ], + "spans": [ + { + "bbox": [ + 311, + 113, + 527, + 200 + ], + "type": "table", + "html": "
2D map3D points Pos. KLI. PolicySPL(%)Succ.(%)DTS(m)
11.229.66.213
13.032.35.769
13.733.85.620
13.933.54.931
14.634.0
", + "image_path": "1359a4878b9c18cc729b6fbe12c2065e9b39efcf6ce8c0c82f5c7ee1b8c6c147.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 326, + 211, + 527, + 384 + ], + "blocks": [ + { + "bbox": [ + 326, + 211, + 527, + 384 + ], + "lines": [ + { + "bbox": [ + 326, + 211, + 527, + 384 + ], + "spans": [ + { + "bbox": [ + 326, + 211, + 527, + 384 + ], + "type": "image", + "image_path": "8013c6b051e07ee2ed1541e3ef95bc29eace7f1ff50d29a9bb79a210aaa71363.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 394, + 539, + 405 + ], + "lines": [ + { + "bbox": [ + 310, + 394, + 539, + 405 + ], + "spans": [ + { + "bbox": [ + 310, + 394, + 539, + 405 + ], + "type": "text", + "content": "Figure 8. Visualization of the results of online 3D point fusion." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 415, + 545, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 415, + 545, + 499 + ], + "spans": [ + { + "bbox": [ + 305, + 415, + 545, + 499 + ], + "type": "text", + "content": "semantic fusion at a frame rate of 15 FPS. Moreover, our method requires only 48 GPU hours to train a 3D-aware agent on MP3D dataset to achieve the SOTA performance among all modular-based methods. This is significantly faster (30x) than other existing reinforcement learning based methods [9, 51], and is comparable to supervised learning modular-based methods [35]" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 521, + 378, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 521, + 378, + 533 + ], + "spans": [ + { + "bbox": [ + 306, + 521, + 378, + 533 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 544, + 545, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 545, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 545, + 651 + ], + "type": "text", + "content": "In this work, we present a 3D-aware framework for object goal navigation. Our method is based on a 3D point-based construction algorithm to observe the 3D scenes and simultaneously perform exploration and identification policies to navigate the agent. Our method achieve SOTA performance among all modular-based methods, while requiring less training time. In the future, we would like to exploit this 3D-aware framework in other embodied AI tasks, e.g. mobile manipulation, robotic nurses." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 713 + ], + "type": "text", + "content": "Acknowledgements. We thank anonymous reviewers for their valuable suggestions. This work was supported by National Key Research and Development Program of China (2018AAA0102200), NSFC (62132021), and Beijing Academy of Artificial Intelligence (BAAI)." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6679" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 93, + 288, + 714 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 93, + 287, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 93, + 287, + 148 + ], + "spans": [ + { + "bbox": [ + 53, + 93, + 287, + 148 + ], + "type": "text", + "content": "[1] Dhruv Batra, Aaron Gokaslan, Aniruddha Kembhavi, Oleksandr Maksymets, Roozbeh Mottaghi, Manolis Savva, Alexander Toshev, and Erik Wijmans. ObjectNav Revisited: On Evaluation of Embodied Agents Navigating to Objects. In arXiv:2006.13171, 2020. 2, 6, 13" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 149, + 288, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 149, + 288, + 204 + ], + "spans": [ + { + "bbox": [ + 53, + 149, + 288, + 204 + ], + "type": "text", + "content": "[2] Dhruv Batra, Aaron Gokaslan, Aniruddha Kembhavi, Oleksandr Maksymets, Roozbeh Mottaghi, Manolis Savva, Alexander Toshev, and Erik Wijmans. Objectnav revisited: On evaluation of embodied agents navigating to objects. ArXiv, abs/2006.13171, 2020. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 206, + 288, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 206, + 288, + 250 + ], + "spans": [ + { + "bbox": [ + 53, + 206, + 288, + 250 + ], + "type": "text", + "content": "[3] Tommaso Campari, Paolo Eccher, Luciano Serafini, and Lamberto Ballan. Exploiting scene-specific features for object goal navigation. In European Conference on Computer Vision, pages 406-421. Springer, 2020. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 251, + 288, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 288, + 295 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 288, + 295 + ], + "type": "text", + "content": "[4] Chao Cao, Hongbiao Zhu, Howie Choset, and Ji Zhang. Tare: A hierarchical framework for efficiently exploring complex 3d environments. In Robotics: Science and Systems, 2021. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 297, + 288, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 297, + 288, + 340 + ], + "spans": [ + { + "bbox": [ + 53, + 297, + 288, + 340 + ], + "type": "text", + "content": "[5] Hanwen Cao, Hao-Shu Fang, Wenhai Liu, and Cewu Lu. Suctionnet-1billion: A large-scale benchmark for suction grasping. IEEE Robotics and Automation Letters, 6(4):8718-8725, 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 342, + 288, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 342, + 288, + 396 + ], + "spans": [ + { + "bbox": [ + 53, + 342, + 288, + 396 + ], + "type": "text", + "content": "[6] Angel Chang, Angela Dai, Thomas Funkhouser, Maciej Halber, Matthias Niessner, Manolis Savva, Shuran Song, Andy Zeng, and Yinda Zhang. Matterport3d: Learning from rgb-d data in indoor environments. arXiv preprint arXiv:1709.06158, 2017. 6, 13" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 398, + 288, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 398, + 288, + 453 + ], + "spans": [ + { + "bbox": [ + 53, + 398, + 288, + 453 + ], + "type": "text", + "content": "[7] Devendra Singh Chaplot, Murtaza Dalal, Saurabh Gupta, Jitendra Malik, and Russ R Salakhutdinov. Seal: Self-supervised embodied active learning using exploration and 3d consistency. Advances in Neural Information Processing Systems, 34:13086-13098, 2021. 2, 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 454, + 288, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 454, + 288, + 497 + ], + "spans": [ + { + "bbox": [ + 53, + 454, + 288, + 497 + ], + "type": "text", + "content": "[8] Devendra Singh Chaplot, Dhiraj Gandhi, Saurabh Gupta, Abhinav Gupta, and Ruslan Salakhutdinov. Learning to explore using active neural slam. arXiv preprint arXiv:2004.05155, 2020. 1, 2, 5, 6, 7, 13" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 499, + 288, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 499, + 288, + 553 + ], + "spans": [ + { + "bbox": [ + 53, + 499, + 288, + 553 + ], + "type": "text", + "content": "[9] Devendra Singh Chaplot, Dhiraj Prakashchand Gandhi, Abhinav Gupta, and Russ R Salakhutdinov. Object goal navigation using goal-oriented semantic exploration. Advances in Neural Information Processing Systems, 33:4247-4258, 2020. 1, 2, 5, 6, 7, 8, 13" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 555, + 288, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 555, + 288, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 555, + 288, + 599 + ], + "type": "text", + "content": "[10] Changhyun Choi, Wilko Schwarting, Joseph DelPreto, and Daniela Rus. Learning object grasping for soft robot hands. IEEE Robotics and Automation Letters, 3(3):2370-2377, 2018. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 601, + 288, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 288, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 288, + 644 + ], + "type": "text", + "content": "[11] Sungjoon Choi, Qian-Yi Zhou, and Vladlen Koltun. Robust reconstruction of indoor scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5556-5565, 2015. 14" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 288, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 678 + ], + "type": "text", + "content": "[12] Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multiview prediction for 3d semantic scene segmentation. In ECCV, 2018. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 680, + 288, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 288, + 714 + ], + "type": "text", + "content": "[13] Samir Yitzhak Gadre, Kiana Ehsani, and Shuran Song. Act the part: Learning interaction strategies for articulated object part discovery. ICCV, 2021. 3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 714 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 547, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 547, + 127 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 547, + 127 + ], + "type": "text", + "content": "[14] Georgios Georgakis, Bernadette Bucher, Karl Schmeckpeper, Siddharth Singh, and Kostas Daniilidis. Learning to map for active semantic goal navigation. In International Conference on Learning Representations (ICLR), 2022. 1, 2, 5, 6, 7, 13" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 129, + 547, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 547, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 547, + 161 + ], + "type": "text", + "content": "[15] Georgios Georgakis, Yimeng Li, and Jana Kosecka. Simultaneous mapping and target driven navigation. ArXiv, abs/1911.07980, 2019. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 163, + 547, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 163, + 547, + 217 + ], + "spans": [ + { + "bbox": [ + 307, + 163, + 547, + 217 + ], + "type": "text", + "content": "[16] Margarita Grinvald, Fadri Furrer, Tonci Novkovic, Jen Jen Chung, Cesar Cadena, Roland Siegwart, and Juan Nieto. Volumetric instance-aware semantic mapping and 3d object discovery. IEEE Robotics and Automation Letters, 4(3):3037-3044, 2019. 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 220, + 547, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 220, + 547, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 220, + 547, + 262 + ], + "type": "text", + "content": "[17] Saurabh Gupta, Varun Tolani, James Davidson, Sergey Levine, Rahul Sukthankar, and Jitendra Malik. Cognitive mapping and planning for visual navigation. International Journal of Computer Vision, 128:1311-1330, 2017. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 264, + 547, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 264, + 547, + 297 + ], + "spans": [ + { + "bbox": [ + 307, + 264, + 547, + 297 + ], + "type": "text", + "content": "[18] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross B. Girshick. Mask r-cnn. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42:386-397, 2020. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 298, + 547, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 298, + 547, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 298, + 547, + 342 + ], + "type": "text", + "content": "[19] Shi-Sheng Huang, Ze-Yu Ma, Tai-Jiang Mu, Hongbo Fu, and Shi-Min Hu. Supervoxel convolution for online 3d semantic segmentation. ACM Transactions on Graphics (TOG), 40(3):1-15, 2021. 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 343, + 547, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 343, + 547, + 396 + ], + "spans": [ + { + "bbox": [ + 307, + 343, + 547, + 396 + ], + "type": "text", + "content": "[20] Hosagrahar V Jagadish, Beng Chin Ooi, Kian-Lee Tan, Cui Yu, and Rui Zhang. idistance: An adaptive b+-tree based indexing method for nearest neighbor search. ACM Transactions on Database Systems (TODS), 30(2):364-397, 2005. 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 399, + 547, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 547, + 442 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 547, + 442 + ], + "type": "text", + "content": "[21] Jindong Jiang, Lunan Zheng, Fei Luo, and Zhijun Zhang. Rednet: Residual encoder-decoder network for indoorrgb-d semantic segmentation. arXiv preprint arXiv:1806.01054, 2018. 6, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 445, + 547, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 445, + 547, + 476 + ], + "spans": [ + { + "bbox": [ + 307, + 445, + 547, + 476 + ], + "type": "text", + "content": "[22] Cheng Lin, Tingxiang Fan, Wenping Wang, and Matthias Nießner. Modeling 3d shapes by reinforcement learning. In ECCV, 2020. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 478, + 547, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 547, + 521 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 547, + 521 + ], + "type": "text", + "content": "[23] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C. Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 523, + 547, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 523, + 547, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 523, + 547, + 578 + ], + "type": "text", + "content": "[24] Leyao Liu, Tian Zheng, Yun-Jou Lin, Kai Ni, and Lu Fang. Ins-conv: Incremental sparse convolution for online 3d segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18975–18984, 2022. 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 580, + 547, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 580, + 547, + 624 + ], + "spans": [ + { + "bbox": [ + 307, + 580, + 547, + 624 + ], + "type": "text", + "content": "[25] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 6, 13" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 624, + 547, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 547, + 666 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 547, + 666 + ], + "type": "text", + "content": "[26] Haokuan Luo, Albert Yue, Zhang-Wei Hong, and Pulkit Agrawal. Stubborn: A strong baseline for indoor object navigation. arXiv preprint arXiv:2203.07359, 2022. 2, 5, 6, 7, 13" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 670, + 547, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 670, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 670, + 547, + 714 + ], + "type": "text", + "content": "[27] Oleksandr Maksymets, Vincent Cartillier, Aaron Gokaslan, Erik Wijmans, Wojciech Galuba, Stefan Lee, and Dhruv Batra. Thda: Treasure hunt data augmentation for semantic navigation. In Proceedings of the IEEE/CVF International" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "6680" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 68, + 72, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 286, + 95 + ], + "type": "text", + "content": "Conference on Computer Vision, pages 15374-15383, 2021. 1, 2, 6, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 151 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 151 + ], + "type": "text", + "content": "[28] John McCormac, Ankur Handa, Andrew Davison, and Stefan Leutenegger. Semantic fusion: Dense 3d semantic mapping with convolutional neural networks. In 2017 IEEE International Conference on Robotics and automation (ICRA), pages 4628-4635. IEEE, 2017. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 152, + 287, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 152, + 287, + 207 + ], + "spans": [ + { + "bbox": [ + 48, + 152, + 287, + 207 + ], + "type": "text", + "content": "[29] Arsalan Mousavian, Alexander Toshev, Marek Fiser, Jana Košecka, Ayzaan Wahid, and James Davidson. Visual representations for semantic target driven navigation. In 2019 International Conference on Robotics and Automation (ICRA), pages 8846-8852. IEEE, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 209, + 287, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 209, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 209, + 287, + 274 + ], + "type": "text", + "content": "[30] Tongzhou Mu, Zhan Ling, Fanbo Xiang, Derek Cathera Yang, Xuanlin Li, Stone Tao, Zhiao Huang, Zhiwei Jia, and Hao Su. Maniskill: Generalizable manipulation skill benchmark with large-scale demonstrations. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 276, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 276, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 276, + 287, + 319 + ], + "type": "text", + "content": "[31] Alexey Nekrasov, Jonas Schult, Or Litany, B. Leibe, and Francis Engelmann. Mix3d: Out-of-context data augmentation for 3d scenes. 2021 International Conference on 3D Vision (3DV), pages 116-125, 2021. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 321, + 286, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 321, + 286, + 353 + ], + "spans": [ + { + "bbox": [ + 48, + 321, + 286, + 353 + ], + "type": "text", + "content": "[32] Emilio Parisotto and Ruslan Salakhutdinov. Neural map: Structured memory for deep reinforcement learning. ArXiv, abs/1702.08360, 2018. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 355, + 287, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 355, + 287, + 399 + ], + "spans": [ + { + "bbox": [ + 48, + 355, + 287, + 399 + ], + "type": "text", + "content": "[33] C. Qi, Hao Su, Kaichun Mo, and Leonidas J. Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 77-85, 2017. 6, 13" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 400, + 287, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 400, + 287, + 432 + ], + "spans": [ + { + "bbox": [ + 48, + 400, + 287, + 432 + ], + "type": "text", + "content": "[34] Yiding Qiu, Anwesan Pal, and Henrik I Christensen. Learning hierarchical relationships for object-goal navigation. arXiv preprint arXiv:2003.06749, 2020. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 434, + 287, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 434, + 287, + 499 + ], + "spans": [ + { + "bbox": [ + 48, + 434, + 287, + 499 + ], + "type": "text", + "content": "[35] Santhosh Kumar Ramakrishnan, Devendra Singh Chaplot, Ziad Al-Halah, Jitendra Malik, and Kristen Grauman. Poni: Potential functions for objectgoal navigation with interaction-free learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18890-18900, 2022. 1, 2, 6, 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 501, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 501, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 48, + 501, + 287, + 555 + ], + "type": "text", + "content": "[36] Ram Ramrakhya, Eric Undersander, Dhruv Batra, and Abhishek Das. Habitat-web: Learning embodied object-search strategies from human demonstrations at scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5173-5183, 2022. 1, 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 557, + 287, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 557, + 287, + 611 + ], + "spans": [ + { + "bbox": [ + 48, + 557, + 287, + 611 + ], + "type": "text", + "content": "[37] IEEE Robotics. Proceedings 1997, IEEE international symposium on computational intelligence in robotics and automation cira'97 - towards new computational principles for robotics and automation, july 10-11, 1997, monterey, california, usa. In CIRA, 1997. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 613, + 287, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 678 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 678 + ], + "type": "text", + "content": "[38] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, et al. Habitat: A platform for embodied ai research. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9339-9347, 2019. 1, 3, 6, 14" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "text", + "content": "[39] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, Devi Parikh, and Dhruv" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 547, + 712 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 106 + ], + "type": "text", + "content": "Batra. Habitat: A Platform for Embodied AI Research. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 107, + 547, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 547, + 149 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 547, + 149 + ], + "type": "text", + "content": "[40] Thomas Schops, Torsten Sattler, and Marc Pollefeys. Bad slam: Bundle adjusted direct rgb-d slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 134-144, 2019. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 151, + 545, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 151, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 308, + 151, + 545, + 182 + ], + "type": "text", + "content": "[41] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. ArXiv, abs/1707.06347, 2017. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 184, + 545, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 184, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 308, + 184, + 545, + 204 + ], + "type": "text", + "content": "[42] James A. Sethian. Fast marching methods. SIAM Rev., 41:199-235, 1999. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 206, + 545, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 206, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 308, + 206, + 545, + 249 + ], + "type": "text", + "content": "[43] Nur Muhammad Mahi Shafiullah, Chris Paxton, Lerrel Pinto, Soumith Chintala, and Arthur Szlam. Clip-fields: Weakly supervised semantic fields for robotic memory. arXiv preprint arXiv:2210.05663, 2022. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 251, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 251, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 251, + 545, + 293 + ], + "type": "text", + "content": "[44] Hao Shen, Weikang Wan, and He Wang. Learning category-level generalizable object manipulation policy via generative adversarial self-imitation learning from demonstrations. arXiv preprint arXiv:2203.02107, 2022. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 295, + 545, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 295, + 545, + 336 + ], + "spans": [ + { + "bbox": [ + 308, + 295, + 545, + 336 + ], + "type": "text", + "content": "[45] Thang Vu, Kookhoi Kim, Tung Minh Luu, Xuan Thanh Nguyen, and Chang-Dong Yoo. Softgroup for 3d instance segmentation on point clouds. ArXiv, abs/2203.01509, 2022. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 338, + 545, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 338, + 545, + 380 + ], + "spans": [ + { + "bbox": [ + 308, + 338, + 545, + 380 + ], + "type": "text", + "content": "[46] Thomas Whelan, Stefan Leutenegger, Renato Salas-Moreno, Ben Glocker, and Andrew Davison. Elasticfusion: Dense slam without a pose graph. Robotics: Science and Systems, 2015. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 383, + 545, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 383, + 545, + 435 + ], + "spans": [ + { + "bbox": [ + 308, + 383, + 545, + 435 + ], + "type": "text", + "content": "[47] Erik Wijmans, Abhishek Kadian, Ari Morcos, Stefan Lee, Irfan Essa, Devi Parikh, Manolis Savva, and Dhruv Batra. Dd-ppo: Learning near-perfect pointgoal navigators from 2.5 billion frames. arXiv preprint arXiv:1911.00357, 2019. 1, 2, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 437, + 545, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 437, + 545, + 491 + ], + "spans": [ + { + "bbox": [ + 308, + 437, + 545, + 491 + ], + "type": "text", + "content": "[48] Fei Xia, Amir R Zamir, Zhiyang He, Alexander Sax, Jitendra Malik, and Silvio Savarese. Gibson env: Real-world perception for embodied agents. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9068-9079, 2018. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 493, + 545, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 493, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 308, + 493, + 545, + 568 + ], + "type": "text", + "content": "[49] Karmesh Yadav, Santhosh Kumar Ramakrishnan, John Turner, Aaron Gokaslan, Oleksandr Maksymets, Rishabh Jain, Ram Ramrakhya, Angel X Chang, Alexander Clegg, Manolis Savva, Eric Undersander, Devendra Singh Chaplot, and Dhruv Batra. Habitat challenge 2022. https://aihabitat.org/challenge/2022/, 2022. 1, 2, 3, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 571, + 545, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 571, + 545, + 602 + ], + "spans": [ + { + "bbox": [ + 308, + 571, + 545, + 602 + ], + "type": "text", + "content": "[50] Wei Yang, Xiaolong Wang, Ali Farhadi, Abhinav Gupta, and Roozbeh Mottaghi. Visual semantic navigation using scene priors. arXiv preprint arXiv:1810.06543, 2018. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 604, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 604, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 308, + 604, + 545, + 656 + ], + "type": "text", + "content": "[51] Joel Ye, Dhruv Batra, Abhishek Das, and Erik Wijmans. Auxiliary tasks and exploration enable objectgoal navigation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16117-16126, 2021. 1, 2, 6, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 658, + 545, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 690 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 690 + ], + "type": "text", + "content": "[52] Jiazhao Zhang, Yijie Tang, He Wang, and Kai Xu. Asro-dio: Active subspace random optimization based depth inertial odometry. IEEE Transactions on Robotics, 2022. 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 692, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 692, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 692, + 545, + 712 + ], + "type": "text", + "content": "[53] Jiazhao Zhang, Chenyang Zhu, Lintao Zheng, and Kai Xu. Fusion-aware point convolution for online semantic 3d scene" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "6681" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 318 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "text", + "content": "segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4534-4543, 2020. 2, 3, 4, 12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 162 + ], + "type": "text", + "content": "[54] Xiaoming Zhao, Harsh Agrawal, Dhruv Batra, and Alexander G. Schwing. The surprising effectiveness of visual odometry techniques for embodied pointgoal navigation. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 16107-16116, 2021. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 288, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 288, + 206 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 288, + 206 + ], + "type": "text", + "content": "[55] Lintao Zheng, Chenyang Zhu, Jiazhao Zhang, Hang Zhao, Hui Huang, Matthias Nießner, and Kai Xu. Active scene understanding via online semantic reconstruction. Computer Graphics Forum, 38, 2019. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 208, + 288, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 208, + 288, + 262 + ], + "spans": [ + { + "bbox": [ + 48, + 208, + 288, + 262 + ], + "type": "text", + "content": "[56] Fengda Zhu, Xiwen Liang, Yi Zhu, Qizhi Yu, Xiaojun Chang, and Xiaodan Liang. Soon: Scenario oriented object navigation with graph-based exploration. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12689-12699, 2021. 1, 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 263, + 287, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 263, + 287, + 318 + ], + "spans": [ + { + "bbox": [ + 48, + 263, + 287, + 318 + ], + "type": "text", + "content": "[57] Yuke Zhu, Roozbeh Mottaghi, Eric Kolve, Joseph J. Lim, Abhinav Kumar Gupta, Li Fei-Fei, and Ali Farhadi. Target-driven visual navigation in indoor scenes using deep reinforcement learning. 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 3357–3364, 2017. 2" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6682" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/5371de19-661e-4e67-a303-36ffc7847ea6_content_list.json b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/5371de19-661e-4e67-a303-36ffc7847ea6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f69c12fd80719e45fdfc9189f6968353f42bfd28 --- /dev/null +++ b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/5371de19-661e-4e67-a303-36ffc7847ea6_content_list.json @@ -0,0 +1,1466 @@ +[ + { + "type": "text", + "text": "3D-POP - An automated annotation approach to facilitate markerless 2D-3D tracking of freely moving birds with marker-based motion capture", + "text_level": 1, + "bbox": [ + 99, + 130, + 870, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hemal Naik $^{1234*}$ , Alex Hoi Hang Chan $^{12*}$ , Junran Yang $^{2}$ , Mathilde Delacoux $^{12}$ , Iain D. Couzin $^{123}$ , Fumihiro Kano $^{12\\dagger}$ , Máté Nagy $^{12356\\dagger}$", + "bbox": [ + 187, + 202, + 807, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Dept. of Collective Behavior and Dept. of Ecology of Animal Societies, Max Planck Institute of Animal Behavior, $^{2}$ Dept. of Biology, University of Konstanz, $^{3}$ Centre for the Advanced Study of Collective Behaviour, University of Konstanz, $^{4}$ Computer Aided Medial Procedures, Informatik Department, Technische Universität München, $^{5}$ Dept. of Biological Physics, Eötvös Loránd University, $^{6}$ MTA-ELTE 'Lendület' Collective Behaviour Research Group, Hungarian Academy of Sciences. *,† contributed equally. Full affiliation available in supplementary {hnaik, icouzin}@ab.mpg.de, nagymate@hal.elte.hu, {hoi-hang.chan, junran.yang, mathilde.delacoux, fumihiro.kano}@uni-konstanz.de", + "bbox": [ + 84, + 257, + 906, + 382 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 414, + 313, + 430 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in machine learning and computer vision are revolutionizing the field of animal behavior by enabling researchers to track the poses and locations of freely moving animals without any marker attachment. However, large datasets of annotated images of animals for markerless pose tracking, especially high-resolution images taken from multiple angles with accurate 3D annotations, are still scant. Here, we propose a method that uses a motion capture (mo-cap) system to obtain a large amount of annotated data on animal movement and posture (2D and 3D) in a semi-automatic manner. Our method is novel in that it extracts the 3D positions of morphological keypoints (e.g. eyes, beak, tail) in reference to the positions of markers attached to the animals. Using this method, we obtained, and offer here, a new dataset - 3D-POP with approximately 300k annotated frames (4 million instances) in the form of videos having groups of one to ten freely moving birds from 4 different camera views in a $3.6m \\times 4.2m$ area. 3D-POP is the first dataset of flocking birds with accurate keypoint annotations in 2D and 3D along with bounding box and individual identities and will facilitate the development of solutions for problems of 2D to 3D markerless pose, trajectory tracking, and identification in birds.", + "bbox": [ + 75, + 448, + 473, + 795 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 828, + 209, + 844 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Computer vision and machine learning are revolutionizing many facets of conventional research methods. For example, dataset-driven machine learning methods have", + "bbox": [ + 75, + 854, + 470, + 902 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/758ebd48f70c97be1422d5f815f4cb2e52889e544453b77b72dff3274f52851e.jpg", + "image_caption": [ + "Figure 1. Definition of morphological keypoints offered in the 3D-POP dataset" + ], + "image_footnote": [], + "bbox": [ + 560, + 414, + 794, + 559 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "demonstrated remarkable success in the field of animal behavior, in tasks related to object detection [40, 41], tracking and individual identification [11, 25, 43], species recognition [40], 2D pose estimation [14, 29] and 3D pose estimation [2, 4]. These automatic methods not only reduce the required labor and errors associated with manual coding of behaviours [6, 39] but also facilitate long-term continuous monitoring of animal behavior in both indoor (lab) [32, 36] and outdoor (wild) settings [12, 37]. Engineering and robotics experts use the data on animal locomotion to reverse-engineer the key mechanisms underlying behaviors and movements of animals [18, 20]. The development of new techniques critically depends on the quality of publicly-available datasets with accurate annotations.", + "bbox": [ + 496, + 625, + 893, + 837 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Creating large datasets with animals is particularly difficult because every species has distinct morphology, and also because it is generally challenging to film freely moving animals in a controlled environment. It is thus important for", + "bbox": [ + 496, + 839, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "21274", + "bbox": [ + 478, + 924, + 519, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "datasets to include a wide range of species and behaviors to maximize the practical application of machine learning methods for animal tracking. Although animals have been included in many popular image datasets collected from the internet such as ImageNet [9] and COCO [26], those datasets have not fulfilled more specific needs of animal behavior researchers. Hence, recently several datasets have been created with a focus on animal behavior research, such as species classification [12, 37, 40, 41, 48], behavioral classification [27, 31, 48] and posture tracking [2, 10, 15, 24, 47].", + "bbox": [ + 75, + 90, + 472, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The most common approach for creating datasets of animals is through manual annotations in the image space (2D). As a result, most solutions to single / multiple animal detection, tracking, or pose estimation problems are limited to the 2D space [14, 25], or use 2D image projections to validate the results of 3D predictions without ground truth [2, 4]. For nonhuman animals, a dataset similar to Human 3.6M [16] is necessary to develop solutions for problems on 2D/3D tracking and posture prediction with a range of constraints, such as single or multiviews, single or multi-individual, and tracking using single frame or temporal consistency. More recently, marker-based motion-capture technology has been used to create 3D datasets for rats [10] and dogs [22] with one individual. The application of mo-cap for animal behaviour studies has also increased in popularity, such as studying flight kinematics [23] and gaze behavior in a freely moving group [17, 21]. It is clear that datasets with mo-cap will not only enhance the size of the dataset but also improve the accuracy of annotations, thus providing a large 2D/3D ground truth dataset for the animal position, posture, and identity tracking. However, despite its potential, researchers have only begun using mo-cap for behavior studies, and further work is required in terms of method development and dataset collection.", + "bbox": [ + 75, + 244, + 472, + 607 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We propose a new mo-cap-based approach to create large-scale datasets with a bird species (homing pigeons, Columba livia), and provide a complete code base for further applications to other species. Along with 2D-3D posture, the dataset also offers annotations for 2D-3D movement trajectories (position) with ground truth on identities for up to 18 individuals. We overcame the unique challenge of needing to attach reflective markers on desired but often inaccessible morphological keypoints on animal bodies and instead determined the relative 3D position of these keypoints to markers attached on accessible parts of the animal (Figure 1).", + "bbox": [ + 75, + 609, + 472, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The method enables a large amount of training data to be generated in a semi-automatic manner with minimal time investment and human labor. Moreover, by tracking freely-moving animals in a relatively large area (3.6m x 4.2m), we were able to track a variety of naturalistic behaviors in a flock consisting of up to 10 individuals under realistic experimental conditions. Finally, we demonstrate through a", + "bbox": [ + 75, + 795, + 472, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "series of experiments that our method is consistent and the CNN models trained on our dataset are able to predict the postures of birds with no markers attached to their bodies.", + "bbox": [ + 496, + 90, + 890, + 137 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. State of the Art", + "text_level": 1, + "bbox": [ + 498, + 151, + 653, + 167 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. 2D posture", + "text_level": 1, + "bbox": [ + 500, + 176, + 620, + 191 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Animal Kingdom [31] is by far the largest dataset with 50 hours of video annotations that include 850 species of varied taxa (fish, birds, mammals, etc.), focusing on a generalizable solution for 2D pose estimation and activity recognition for a single individual. Other notable datasets contain images instead of videos and focus on capturing variations in terms of specific taxa e.g. mammals [48], birds [41] and monkeys [47], or specific species e.g. zebras [14], all of these focus on solving problems for a single individual recorded from a single viewpoint.", + "bbox": [ + 496, + 200, + 890, + 351 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Datasets based on single animal-based solutions are sufficient for some cases and rely on detection-based top-down approaches for extending the method for tracking the posture of multiple individuals [42]. There are few datasets that offer posture annotations for multiple individuals [2,24,25]. The problem of tracking multiple individuals is often simplified by placing the cameras above the animals, which minimizes occlusions [25, 43]. Tracking multiple individuals from side views may require multiple views, which may be important to resolve occlusions when animals interact in 3D spaces e.g. Cowbird dataset [2].", + "bbox": [ + 496, + 352, + 890, + 517 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The existing datasets have motivated the development of various methods for posture estimation. However, reliance on manual annotations limits the complexity of datasets in terms of the number of viewpoints or the number of individuals, especially for video sequences.", + "bbox": [ + 496, + 518, + 890, + 594 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2.3D posture", + "text_level": 1, + "bbox": [ + 500, + 604, + 620, + 619 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Datasets with ground truth on 3D posture are relatively difficult to obtain with a group of animals. One popular method for obtaining 3D ground truth posture is the triangulation of 2D postures using multiple views to record animals. Acinoset [20] (leopard in wild), Fly3D [15] (fly in a lab) and OpenMonkeyStudio [3] (macaque in a lab) use triangulation-based approaches to provide 3D posture of single individuals. The images for these datasets are also annotated manually and, therefore, the accuracy of the computed 3D pose depends on the quality of annotation and calibration.", + "bbox": [ + 496, + 628, + 890, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "An alternative approach is to use marker-based mo-cap with a skeleton tracking feature as used with humans [16]. Kearney et al. [22] used motion capture to generate 3D ground truth for dogs and combine their approach with depth sensors (RGB-D) with the aim of designing markerless tracking based on RGBD sensors (63 to 82 markers). Dunn et al. [10] offered Rat 7M dataset using mo-cap with", + "bbox": [ + 496, + 795, + 890, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "21275", + "bbox": [ + 478, + 924, + 519, + 936 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/375a0910fff9275b9e7bdb91e8372ce7b300417ddcc58685e7a2ca73f7c22987.jpg", + "image_caption": [ + "Figure 2. Illustration showing the experimental setup, with different defined coordinate systems, including the Vicon global coordinate system $(\\mathrm{O_{Vicon}})$ , the camera coordinate system for each RGB camera $(\\mathrm{O_{Camera}})$ , and the head $(\\mathrm{O_{head}})$ and backpack $(\\mathrm{O_{backpack}})$ coordinate system for each pigeon subject A) Detailed floor plan for data collection. B) Pigeon subject, with corresponding head and backpack markers and coordinate systems" + ], + "image_footnote": [], + "bbox": [ + 81, + 89, + 728, + 364 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RGB cameras and 20 markers. These datasets are useful for solving posture problems for a single individual from multiple views and offer the option of using temporal consistency.", + "bbox": [ + 75, + 393, + 468, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, Marshall et al. published PAIR-R24M [27], the first dataset with 3D ground truth with more than one animal, a pair of rats, using the approach of Dunn et al. [10]. Motion capture systems offer the huge advantage of creating millions of annotations in an automatic manner with high accuracy and low noise. The skeleton tracking feature with the mo-cap is primarily designed for tracking human posture and relies on a large number of markers. Additionally, the marker patterns have to be unique (at least partially) for maintaining the identity of each individual. Marker placement is a also limitation for smaller species and wild animals that lack the tolerance for having markers placed on specific locations of their body.", + "bbox": [ + 75, + 458, + 468, + 654 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Lack of ground truth in 3D posture had led to innovative work of predicting 3D posture using 2D keypoints and silhouettes [2,4] or using synthetic datasets [5] or toys [49]. These approaches are promising but lack quantitative evaluation for robust practical applications. Computer vision literature on 3D posture problems mainly focuses on extracting as much of detail as possible. For animal behavior experiments information required from videos is always defined in the context of the experiment. It is worth noting that for birds, tracking the head and body orientations could be sufficient to quantify many key behaviors in ground-foraging contexts, such as feeding (pecking ground), preening, vigilance (head scanning), courtship (head bowing), or walking. Measuring the head direction in 3D also allows gaze reconstruction [17, 21], to be applied in the study of social cognition and collective behavior.", + "bbox": [ + 75, + 659, + 470, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Multi-object tracking with identity", + "text_level": 1, + "bbox": [ + 500, + 393, + 803, + 410 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Identity recognition is a critical problem to solve in the context of biological studies, especially when tracking the behavior of multiple interacting individuals over long periods of time. Tracking and identification of multiple individuals in large groups are especially exciting to quantify group-level behaviors like social networks [44, 46], dominance, or leadership [30].", + "bbox": [ + 496, + 417, + 890, + 521 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For indoor experiments, the number of individuals is often controlled and the identification problem is linked with the tracking of animals [25, 43]. The task of tracking and identification is often resolved together using markers [14, 30] or marker-less [7, 11, 43] methods. The existing solutions perform well with specific perspectives (top-down view) and thus often fail to resolve cases of occlusion. Robust evaluation of simultaneous identification and tracking methods is difficult because true ground truth for identities is often not available in datasets with multiple animals or available for only a very short duration [25].", + "bbox": [ + 496, + 523, + 892, + 688 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "There are many good datasets available to independently solve problems of posture estimation, detection, tracking, and identification. Very few datasets offer the possibility of solving all of these problems simultaneously in realistic experimental scenarios.", + "bbox": [ + 496, + 689, + 890, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We aim to fill this gap with our contribution of a semi-automatic method for producing new datasets with animals. Our dataset, 3D-POP, includes video recordings of 18 unique pigeons in various group sizes (1,2,5,10) from multiple views. We offer ground truth for identity, 2D-3D trajectories, and 2D-3D posture mapping for all individuals across the entire dataset (300K frames). The dataset also consists of annotations for object detection in the form of bounding boxes.", + "bbox": [ + 496, + 763, + 893, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "21276", + "bbox": [ + 478, + 925, + 519, + 936 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methods", + "text_level": 1, + "bbox": [ + 76, + 90, + 174, + 104 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 114, + 266, + 132 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The dataset was collected from pigeons moving on a jute fabric $(3.6\\mathrm{m}\\times 4.2\\mathrm{m})$ onto which we evenly scattered grains to encourage the birds to feed in that area (Figure 2A). This feeding area was located inside a large enclosure equipped with a mo-cap system $(15\\mathrm{m}\\times 7\\mathrm{m}\\times 4\\mathrm{m})$ . This mo-cap system consists of 30 motion capture cameras (12 Vicon Vero 2.2, 18 Vicon Vantage-5 cameras; $100\\mathrm{Hz}$ ) and can track the 3D positions of reflective markers with submillimeter precision. At the corners of the feeding area, we placed 4 high-resolution (4K) Sony action cameras (rx0ii, $30\\mathrm{Hz}$ , $3840\\mathrm{x}2160\\mathrm{p}$ ) mounted on standard tripods and also an Arduino-based synchronization box which flashes RGB and infrared LED lights every 5 seconds (Figure 2). Details on the synchronization and calibration of RGB cameras are provided in the supplementary text.", + "bbox": [ + 75, + 138, + 472, + 367 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Animal Subjects", + "text_level": 1, + "bbox": [ + 76, + 377, + 238, + 393 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Eighteen pigeons (Columba livia) were subjected to this study over 6 experimental days. Each day 10 pigeons were randomly selected from the population. Four $6.4\\mathrm{mm}$ reflective markers were attached to each subject's head, and four $9.5\\mathrm{mm}$ markers were attached to a customized backpack worn by each subject (Figure 2B). Generally, pigeons tolerate markers on the head with minimal effects on their behavior and habituate quickly to backpacks. Backpacks are also widely used for bird studies in behavioral ecology [1, 45]. The four $9.5\\mathrm{mm}$ backpack markers had a unique geometric configuration to track the individual identities of each bird throughout each recording. Each day we performed up to 11 trials in the following order: 1 pigeon (4 trials), a pair of pigeons (4 trials), a flock of 5 pigeons (2 trials), and a flock of 10 pigeons (1 trial). It took approximately 1 hour to perform all trials each day. The total frames and duration of samples over the course of the experiment are described in Table 1. An additional session was recorded with birds without attaching any markers to validate the results of models trained on annotated data having birds with markers (see 5.2).", + "bbox": [ + 75, + 401, + 472, + 720 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Data annotation pipeline", + "text_level": 1, + "bbox": [ + 76, + 729, + 303, + 747 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.1 Annotation principle", + "text_level": 1, + "bbox": [ + 76, + 753, + 276, + 768 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The movement of all features on a rigid body can be tracked simultaneously in a 3D space by computing 6-DOF pose of the rigid object. We use this principle to achieve annotations for keypoint features that are rigidly attached to the head and body of the bird. The four markers attached to the head and body (using a backpack) of each pigeon are used to compute 6-DOF pose of these body parts using the mo-cap system.", + "bbox": [ + 75, + 779, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "By assuming that the head and body are rigid bodies in the case of walking or standing birds, we designed a pipeline to annotate the position of features on the head and body (beak, eyes, shoulder, and tail, etc.) in a few frames to compute their 3D location with respect to marker positions. Once computed, the relationship between markers and features does not change during the sequences and this ensures that 6-DOF pose of head and body for any frame can be used to project 3D positions of keypoint features onto the image space to obtain 2D annotations.", + "bbox": [ + 496, + 90, + 890, + 241 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "All keypoints defined for the head lie on the skull of the bird (Figure 1). The rigidity assumption is valid for these keypoints as they are rigidly placed on the skull. The keypoints chosen for the body lie actually on the rib cage and shoulders and exhibit a limited range of motion independent of each other. The rigidity assumption for the body is a reasonable assumption for the annotation pipeline if the birds do not move their wings and body(see 5.3).", + "bbox": [ + 496, + 242, + 890, + 363 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.2 Manual annotation", + "text_level": 1, + "bbox": [ + 500, + 381, + 687, + 395 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "6-DOF (Degrees of freedom) tracking of the head and body is used to create a bounding box around the bird and crop the image of the focal individual for annotation. For each individual pigeon, 9 morphological keypoints (Figure 1) are annotated on 5-10 frames from all available view angles. Ideally, four frames (1 per view) is sufficient, but all keypoints are rarely visible within a single instance. Moreover, multiple measurements (3-5 frames per view) improve the robustness of computed 3D keypoint positions. The position of each keypoint is first triangulated using sparse bundle adjustment (in the camera coordinate system), then the relative position of the keypoint is computed with respect to the markers (in the coordinate system of the body part). Finally, all resultant 3D positions of keypoints are averaged and stored as a template file. This process is repeated for each bird on each recording day.", + "bbox": [ + 496, + 405, + 890, + 647 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.3 Annotation propagation", + "text_level": 1, + "bbox": [ + 500, + 665, + 720, + 681 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this final step, the ground truth data is generated for each recording using 3D keypoint positions computed in the previous step. The 3D positions of the keypoint features are transferred to the global coordinate system using 6-DOF pose. Next, keypoints are transferred to the coordinate system of each camera and projected to the image space (using calibration parameters). Bounding box annotations for object detection or tracking tasks are derived from keypoint projections. We determined that keypoints with the minimum and maximum x-y pixel values with an offset of 60 pixels are sufficient to define a bounding box. Finally, the 6-DOF tracking with the mo-cap system maintains the identity of each bird and this is also stored with 2D-3D information for the entire sequence.", + "bbox": [ + 496, + 688, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "21277", + "bbox": [ + 478, + 924, + 517, + 936 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/54357dc3df76102af12910c863b8f637d25b8102652d76372d97efea7d8a9073.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 90, + 441, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f64204444059d167046dc366c9d8f8f7035b3da1384b6b6cbaf7d5ce1dcd7572.jpg", + "image_caption": [ + "Figure 3. Semi-automated annotation pipeline based on 6DOF tracking and RGB images. A) Input 6-DOF tracking data for head and backpack coordinate systems, and multi-view RGB videos. B) Manually annotate all visible keypoints from all views. C) Triangulate 3D position of all keypoints in the head and backpack coordinate system, assuming that keypoints and tracked markers are a rigid body. D) Apply across trials to get keypoints across all individuals" + ], + "image_footnote": [], + "bbox": [ + 450, + 92, + 635, + 224 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/565ce7a02cc250114da9de41f8e952692d71ed24c4c190ee9df77c6fe249a50b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 229, + 336, + 366 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/92f1ea26da7b6c833a6c0c2d36f0ade6337eac663eb3bc1a9bb9a5ece2d0eef9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 348, + 229, + 635, + 366 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. The 3D-POP dataset", + "text_level": 1, + "bbox": [ + 76, + 388, + 272, + 405 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Dataset Description", + "text_level": 1, + "bbox": [ + 76, + 422, + 264, + 439 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We present 3D-POP (3D Postures of Pigeons), a dataset that provides accurate ground truth for 3D keypoints, 2D keypoints, bounding boxes, and individual identities. The dataset includes RGB images from four high-resolution cameras (4K) and up to 6 hours of recordings divided into 57 sequences of 1,2,5, and 10 pigeons behaving naturalistically (Table 1). The dataset contains 3D coordinates (unfiltered) and 6-DOF pose obtained from the mo-cap facility along with calibration parameters. We also provide a total of 1 hour of recording (11 sequences) with pigeons (group size:1,2,5,11) without any markers on their body. These videos are provided for users to test the practical effectiveness of markerless solutions without the influence of markers. For realistic assessment, we show that a model trained with our dataset is able to infer keypoints on videos with pigeons without markers (see 5.2). Download the dataset here: https://doi.org/10.17617/3.HPBBC7", + "bbox": [ + 75, + 454, + 470, + 710 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/875ae1e6202967f3fd3e76afbae73e2ae3fb4e12ae675230a7bb62601272e49c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
No. \nindividualsAnnotated \nframesVideo length \n(min)
195,51355
2135,547119
544,24085
1020,32191
", + "bbox": [ + 102, + 750, + 444, + 844 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1. Dataset Summary: Total number of labeled frames with ground truth data for different group sizes.", + "bbox": [ + 76, + 867, + 467, + 896 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2.Customization", + "text_level": 1, + "bbox": [ + 500, + 391, + 648, + 405 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We release 3D-POPAP (3D-POP Annotation Pipeline) to manipulate the annotations of the dataset (Download: https://github.com/alexhang212/Dataset-3DPOP). As explained earlier, our use of the 6-DOF tracking decouples the keypoint annotations from the positions of markers used for mo-cap. Due to this design of the annotation approach, we can offer a unique dataset with the ability to easily add new 2D/3D keypoint annotations. The feature of keypoint modification is relevant for future work because defining the posture of birds is a difficult problem and depends on the final application. As of now, there are no datasets available with ground truth on the 3D posture of birds. The lack of ground truth has motivated novel ideas for solving the 3D reconstruction of bird pose using 2D annotations (silhouette and keypoints [2]). Among the available 2D datasets with birds, different numbers of keypoints are selected to define pose e.g. CUB-200: 15 [41], Cowbird dataset: 12 [2] and Animal Kingdom: 23 [31].", + "bbox": [ + 496, + 414, + 890, + 685 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To the best of our knowledge, the use of posture in behavior studies with birds is still limited and pose definition may rely completely on the nature of the study. Our inspiration for keypoint definition is inspired by gaze studies [17, 21] for which 9 keypoint-based posture sufficiently provides gaze direction with body and head orientation.", + "bbox": [ + 496, + 686, + 890, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Dataset Validation", + "text_level": 1, + "bbox": [ + 500, + 786, + 678, + 801 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The annotations in 3D-POP are obtained automatically, and therefore we designed three different tests to validate the accuracy and consistency of the annotations. The first test compares the accuracy of the 3D features computed with our method and the method presented by Kano et al. [21].", + "bbox": [ + 496, + 809, + 890, + 898 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "21278", + "bbox": [ + 478, + 925, + 517, + 936 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The second test measures the consistency of the 3D/2D annotations across the dataset. This test is required to identify errors in annotation introduced by erroneous 3D mo-cap tracking due to occlusion, rapid movement of the birds, or calibration and synchronization errors of the cameras. It is important to perform this test because manually checking millions of annotations is not practical. Finally, the third test checks the variation in the 3D pose captured in all sequences. This test shows that the dataset is not biased to specific types of motion or poses.", + "bbox": [ + 75, + 90, + 472, + 243 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.1 Accuracy", + "text_level": 1, + "bbox": [ + 76, + 260, + 197, + 276 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Kano et al. [21] use a calibration method to measure the 3D position of eyes w.r.t. mo-cap markers. This process involves a custom camera rig, made of 4 separate webcams that capture the head of each pigeon before data collection. We replicated this process to compute the ground truth 3D position of eyes and beak. Further, we compared the ground truth with the 3D position of the same features computed with our approach.", + "bbox": [ + 75, + 284, + 468, + 404 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We obtained root mean squared errors (RMSE) for all three features (Beak: $5.0\\mathrm{mm}$ , Left eye: $5.0\\mathrm{mm}$ , Right Eye: $4.9\\mathrm{mm}$ ), which is sufficient for pigeons considering that the diameter of the eyes is typically $6 - 7\\mathrm{mm}$ [8]. This method provides an approximation of the accuracy for a few features only, and a better method is required to test the accuracy of 3D features measured on the body. It should be noted that our method has comparable accuracy and alleviates the need of using dedicated calibrations rigs and thus saves time.", + "bbox": [ + 75, + 405, + 470, + 555 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.2 Consistency and outlier detection", + "text_level": 1, + "bbox": [ + 76, + 574, + 362, + 589 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "It is reasonable to assume that a small portion of the mo-cap sequences contains tracking errors and will produce inaccurate 6-DOF poses for body parts. As a result, the annotation for all keypoints associated with the relevant body parts is likely to be wrong. We know that models trained with large datasets with small noise still generalize to a solution [34]. Yet, it is important to identify and remove these sequences from the dataset. Keeping this in mind, we design a consistency check with the intuition that a well-trained model for keypoint detector will predict 2D features with reasonable accuracies for all frames. Therefore, a comparison between predicted keypoints and propagated keypoints is likely to show very large errors for all keypoints (of the same body part), especially for frames with faulty mo-cap tracking (Figure 5). We use this idea to automatically determine the consistency of the annotations throughout the trial.", + "bbox": [ + 75, + 598, + 468, + 852 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We trained a state-of-the-art 2D keypoint detection model (DLC [28]) on 15177 images with a ResNet50 backbone for 30,000 iterations with the adam optimizer. There", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fd1a912a877c2f4038eca05d2c01378a7b1c8da5d5601e3574699424331cadab.jpg", + "image_caption": [ + "Figure 4. Distribution of Euclidean distances (px) between model predictions of a trained DLC model and annotations, after outlier frames were filtered. Frequency shown in the y-axis and and only points of up to $10\\mathrm{px}$ error is shown on the x-axis. A) Head keypoints B) Backpack keypoints" + ], + "image_footnote": [], + "bbox": [ + 498, + 88, + 702, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f7695623f4510f25d39da8525995954a0856cdfcee318b0aa9d07e8028261444.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 88, + 901, + 212 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "is no reliable method available for tracking the posture of multiple birds simultaneously, therefore we use a top-down approach and train on single individual data using bounding box annotations. The training data excludes highly occluded frames with $>30\\%$ overlap with another bounding box to avoid sequences that have multiple individuals in the bounding box due to close proximity. GESD outlier analysis [35] is used for each keypoint independently setting the expected outliers at $20\\%$ of the dataset. The frames having more than 1 outlier keypoint are filtered out as we expect a higher number of outliers in case of erroneous annotations (explained above).", + "bbox": [ + 496, + 325, + 890, + 507 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Using this method we filtered out $2.9\\%$ of the overall dataset, which lowered the average Euclidean distance between annotation and predictions (see Table 2). We used the filtered training data and retrained a model (14,722 images, 30,000 iterations, ResNet50 backbone, adam optimizer), but obtained similar errors compared to the previous model (see Table 2). The consistency check reveals that the annotations are largely consistent with model predictions, with a typical error of 2-3 pixels for head features and 3-4 px for body features (See Figure 4). Figure 5 shows visual examples of outlier frames where mo-cap errors are likely due to behaviors such as flying or occlusions.", + "bbox": [ + 496, + 508, + 892, + 688 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The outlier filtering method introduces artificial gaps in the dataset. We computed the number of dropped frames and found that $96.1\\%$ of gaps are less than 30 frames (1 second) in length (see supplementary). Researchers in need of continuous temporal data can use gap-free segments or use interpolation to fill small gaps. For sake of completeness, we have included automatically rejected frames in the dataset.", + "bbox": [ + 496, + 689, + 893, + 809 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.3 Pose variation", + "text_level": 1, + "bbox": [ + 500, + 830, + 653, + 845 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We then compute the number of unique poses that each pigeon exhibit to understand the heterogeneity of pose present in the 3D-POP dataset. It is difficult to compute pose vari", + "bbox": [ + 496, + 854, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "21279", + "bbox": [ + 478, + 925, + 519, + 936 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/35abb15dcf3dc7c21b60c8974fde2ec8f7de791de5dde6688471023b94164896.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
RMSEMethod (px)BeakNoseLeft EyeRight EyeLeft ShoulderRight ShoulderTop KeelBottom KeelTail
RMSEBeforeFiltering10.17.97.57.58.48.79.49.98.8
RMSEAfterFiltering8.16.05.95.97.98.29.19.58.2
RMSEAfterRetraining8.46.56.46.38.08.29.19.58.4
", + "bbox": [ + 99, + 88, + 870, + 167 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2. Root mean squared 2D Euclidean error (px) of each keypoint with different data subsets and trained DLC 2D keypoint models. BeforeFiltering: Error of model trained on the full dataset with inference on frames before outliers were filtered. AfterFiltering: Errors of the model trained on the full dataset with inference on frames after outliers were filtered. AfterRetraining: Errors of the model trained on the filtered dataset with inference on frames after outliers were filtered", + "bbox": [ + 75, + 191, + 893, + 247 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/5702b9cb3eea2757ebc6f9cb99c1911395e52598807abb2cf93e7fa2705b19ba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 133, + 260, + 419, + 404 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2f17d349fc139697d58bb5cae57dc3e2127496b2a8b36e5d7dc676ed7ed8eefc.jpg", + "image_caption": [ + "Figure 5. Example frames that are filtered automatically by the outlier analysis, with descriptions of the cause of annotation inaccuracy. Green labels represent annotations, and red labels represent prediction from the trained DLC 2D keypoint detection model." + ], + "image_footnote": [], + "bbox": [ + 133, + 412, + 415, + 559 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ation directly using the 6-DOF pose defined by markers because the coordinate system is not defined in a standardized way for each pigeon. To create a standardized comparison, we compute two planes defined by keypoint features to represent the alignment of the head and body in 3D space. The head plane is computed using three points (beak and eyes) and the body plane is computed using three points (shoulders and tail). In this manner, the orientations of all planes representing the pose of all individuals are defined using the same features and can be compared in a unified coordinate system. We use the normal of the planes to compute the angles with the canonical coordinate system (See supplementary). It is assumed that a degree of change in rotational angles of either head or body corresponds to a new pose. We found a total of 74,924 unique orientations of the head", + "bbox": [ + 75, + 672, + 468, + 900 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "and 14,191 unique orientations of the body, and the combined 1.8 million unique poses present in the dataset. A graphical representation of the range of poses is provided in the supplementary material.", + "bbox": [ + 498, + 273, + 890, + 335 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 349, + 633, + 366 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1. Marker-based + Markerless Hybrid Approach", + "text_level": 1, + "bbox": [ + 500, + 375, + 890, + 390 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The first experiment shows that markerless tracking algorithm trained on 3D-POP is able to solve 3D tracking for cases when mo-cap fails to track markers. The solution is useful as an increasing number of pre-existing experimental setups are designed to use marker-based mocap technologies for biological studies [10, 17, 21, 23, 38]. A hybrid tracking solution, that uses markerless tracking to fill the gaps of the mo-cap system has many potential applications for future behavior studies.", + "bbox": [ + 496, + 398, + 890, + 532 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We chose a 5 min sequence with a single individual and artificially removed $25\\%$ of mo-cap tracking data. The gaps are randomly introduced for a duration of 30-90 frames (1-3 seconds), to mimic tracking loss. We used the 2D keypoint DLC model (see 4.3) to detect keypoints from all 4 camera views and triangulate the results with sparse bundle adjustment. We compared the result with the ground truth and achieved avg. RMS error of $9.2 \\, \\mathrm{mm}$ (details in supplementary). A simple linear interpolation-based approach to fill gaps resulted in avg. RMS error of $52.1 \\, \\mathrm{mm}$ . The proposed solution is a viable application because biologists are likely to keep using motion-tracking technology until a robust solution is designed for markerless 3D tracking. However, we acknowledge that better solutions can be designed for a hybrid approach using temporal consistency in the future [20].", + "bbox": [ + 496, + 534, + 892, + 762 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Markerless Bird Tracking", + "text_level": 1, + "bbox": [ + 500, + 771, + 736, + 787 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This experiment shows that models trained with our dataset can be directly used to track birds without any markers attached to their bodies. This experiment works as a \"sanity check\" to ensure that models trained with 3D-POP dataset are not biased toward the presence of markers. The test also demonstrates the potential contribution of our method toward developing a complete markerless solution", + "bbox": [ + 496, + 794, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "21280", + "bbox": [ + 478, + 925, + 519, + 936 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7d0f23d632aac8b761c77cd70dce5c6079009734f995b5bcc70f6444ee823ebd.jpg", + "image_caption": [ + "Figure 6. Pictures show that the 2D keypoint detection algorithm trained with the 3D-POP dataset can make predictions on videos with pigeons without any markers attached to the body." + ], + "image_footnote": [], + "bbox": [ + 81, + 74, + 465, + 175 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "for 3D tracking, posture estimation, and identification.", + "bbox": [ + 76, + 244, + 434, + 258 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Using a pre-trained object detection model (YOLOv5s [33]), we extracted the bounding box of a pigeon from a single individual sequence. We then used the 2D keypoint DLC model (see 4.3) to predict keypoints from the sequence. The models generalize well to the images of pigeons without markers (see Figure 6, supplementary video). The result is qualitatively checked, but sufficient to prove our claim. The same solution can be easily extended to multiple pigeon trials by designing a top-down approach (using YOLO) until better solutions are developed using 3D-POP.", + "bbox": [ + 75, + 260, + 468, + 411 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3. Manual Validation", + "text_level": 1, + "bbox": [ + 76, + 421, + 256, + 435 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This experiment demonstrates the validity of our assumption that keypoints on the body (shoulder, keel, etc.) behave like points on a rigid body. We selected 1000 frames randomly and manually annotated keypoints for the body part. We compared the manual annotations with automatic ground truth annotations using PCK05 and PCK10 (percentage correct keypoint within $5\\%$ and $10\\%$ of bounding box width) metrics. We report an average PCK05 of $66\\%$ and PCK10 of $94\\%$ across all keypoints on the body (Table 3). We also visually quantified that only $2.8\\%$ of the frames are cases where birds are moving their wings, thus the simplified skeletal representation of the body is valid in over $97\\%$ of the dataset.", + "bbox": [ + 75, + 445, + 468, + 640 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/78b2e21077ce8e0bf65431da9caa304aa20224d3a000acfb3b2753cd23b77da1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MetricLeft Shoul- derRight Shoul- derTop KeelBottom KeelTail
PCK050.780.750.580.570.60
PCK100.980.980.940.890.92
", + "bbox": [ + 78, + 651, + 468, + 731 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3. PCK errors per body keypoint between manual annotation and 3DPOP annotation. PCK is defined as the percentage of points that are within $5\\%$ and $10\\%$ of the bounding box width", + "bbox": [ + 76, + 753, + 468, + 796 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Limitations and Future work", + "text_level": 1, + "bbox": [ + 76, + 829, + 344, + 845 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The annotation method presented in the paper largely relies on the assumption that the head and body mostly behave as rigid bodies. This assumption does not hold for certain", + "bbox": [ + 76, + 854, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "body parts such as the neck, tail end, or feet and limits the selection of keypoints at these body parts. For similar reasons, the proposed approach will not support annotation for flying birds or birds that change the shape of body parts while performing certain behaviors e.g. courtship [19].", + "bbox": [ + 496, + 90, + 890, + 167 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our approach inherently depends on the tracking accuracy of the mo-cap system. Users must maintain mo-cap systems regularly calibrated for consistent results. Another possible source of error in the annotation pipeline is video camera calibration and its temporal synchronization with the mo-cap system. We do show that our outlier detection method is effective at identifying noisy annotations, however, noise can still be present in the dataset. Finally, since the dataset was curated semi-automatically in an existing motion tracking setup, the data we provide is limited to an indoor environment.", + "bbox": [ + 496, + 167, + 890, + 330 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We have improved the existing state of the art for multi-animal tracking by adding complexity in the form of the number of individuals and camera views. In the future, we intend to develop lifting-based approaches [13, 15] to learn the 2D-3D mapping obtained in the 3D-POP dataset to track birds in outdoor environments.", + "bbox": [ + 496, + 332, + 890, + 421 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 436, + 617, + 452 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we introduced a novel method to use a mop-cap system for generating large-scale datasets with multiple animals. We demonstrate that our semi-automated method offers an alternative for generating high-quality datasets with animals without manual effort. We offer 3D-POP, the first dataset with ground truth for 3D posture prediction and identity tracking in birds, which is extremely difficult to achieve even with manual labor. 3D-POP dataset offers an opportunity for the vision community to work on a complex set of vision problems relevant to achieving markerless tracking of birds in indoor and outdoor environments. At the same time, our method will motivate biologists to create new datasets as they have access to and work with different types of animals.", + "bbox": [ + 496, + 462, + 890, + 674 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "8. Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 686, + 686, + 704 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany's Excellence Strategy - EXC 2117 (ID: 422037984). The Ethical Committee of Baden-Württemberg approved all the experiments (Regierungspräsidium Freiburg, Referat 35, License Number: 35-9185.81/G-19/107). M.N. acknowledges additional support from the Hungarian Academy of Sciences (grant no. 95152) and Eötvös Loránd University. I.C. also acknowledge Office of Naval Research (grant ONR, N00014-19-1-2556), Horizon Europe Marie Sklodowska-Curie Actions (860949) and the Max Planck Society.", + "bbox": [ + 496, + 712, + 890, + 878 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "21281", + "bbox": [ + 478, + 925, + 517, + 936 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Gustavo Alarcón-Nieto, Jacob M Graving, James A Klarevas-Irby, Adriana A Maldonado-Chaparro, Inge Mueller, and Damien R Farine. An automated barcode tracking system for behavioural studies in birds. Methods in Ecology and Evolution, 9(6):1536-1547, 2018. 4", + "[2] Marc Badger, Yufu Wang, Adarsh Modh, Ammon Perkes, Nikos Kolotouros, Bernd G Pfrommer, Marc F Schmidt, and Kostas Daniilidis. 3d bird reconstruction: a dataset, model, and shape recovery from a single view. In European Conference on Computer Vision, pages 1-17. Springer, 2020. 1, 2, 3, 5", + "[3] Praneet C Bala, Benjamin R Eisenreich, Seng Bum Michael Yoo, Benjamin Y Hayden, Hyun Soo Park, and Jan Zimmermann. Automated markerless pose estimation in freely moving macaques with openmonkeystudio. Nature communications, 11(1):1-12, 2020. 2", + "[4] Benjamin Biggs, Thomas Roddick, Andrew Fitzgibbon, and Roberto Cipolla. Creatures Great and SMAL: Recovering the Shape and Motion of Animals from Video. In C.V. Jawahar, Hongdong Li, Greg Mori, and Konrad Schindler, editors, Computer Vision – ACCV 2018, Lecture Notes in Computer Science, pages 3–19, Cham, 2019. Springer International Publishing. 1, 2, 3", + "[5] Luis A Bolanos, Dongsheng Xiao, Nancy L Ford, Jeff M LeDue, Pankaj K Gupta, Carlos Doebeli, Hao Hu, Helge Rhodin, and Timothy H Murphy. A three-dimensional virtual mouse generates synthetic training data for behavioral analysis. Nature methods, 18(4):378-381, 2021. 3", + "[6] Marek L. Borowiec, Rebecca B. Dikow, Paul B. Frandsen, Alexander McKeeken, Gabriele Valentini, and Alexander E. White. Deep learning as a tool for ecology and evolution. Methods in Ecology and Evolution, 13(8):1640-1660, 2022. _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/2041-210X.13901.1", + "[7] Katarzyna Bozek, Laetitia Hebert, Yoann Portugal, Alexander S Mikheyev, and Greg J Stephens. Markerless tracking of an entire honey bee colony. Nature communications, 12(1):1-13, 2021. 3", + "[8] Ray D Chard and Ralph H Gundlach. The structure of the eye of the homing pigeon. Journal of Comparative Psychology, 25(2):249, 1938. 6", + "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, June 2009. ISSN: 1063-6919. 2", + "[10] Timothy W Dunn, Jesse D Marshall, Kyle S Severson, Diego E Aldarondo, David GC Hildebrand, Selmaan N Chettih, William L Wang, Amanda J Gellis, David E Carlson, Dmitriy Aronov, et al. Geometric deep learning enables 3d kinematic profiling across species and environments. Nature methods, 18(5):564-573, 2021. 2, 3, 7", + "[11] André C Ferreira, Liliana R Silva, Francesco Renna, Hanja B Brandl, Julien P Renoult, Damien R Farine, Rita Covas, and" + ], + "bbox": [ + 78, + 116, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Claire Doutrelant. Deep learning-based methods for individual recognition in small birds. Methods in Ecology and Evolution, 11(9):1072-1085, 2020. 1, 3", + "[12] Crystal Gagne, Jyoti Kini, Daniel Smith, and Mubarak Shah. Florida wildlife camera trap dataset. arXiv preprint arXiv:2106.12628, 2021. 1, 2", + "[13] Adam Gosztolai, Semih Günel, Victor Lobato-Ríos, Marco Pietro Abrate, Daniel Morales, Helge Rhodin, Pascal Fua, and Pavan Ramdya. LiftPose3D, a deep learning-based approach for transforming two-dimensional to three-dimensional poses in laboratory animals. Nature Methods, 18(8):975–981, Aug. 2021. Number: 8 Publisher: Nature Publishing Group. 8", + "[14] Jacob M Graving, Daniel Chae, Hemal Naik, Liang Li, Benjamin Koger, Blair R Costelloe, and Iain D Couzin. DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning. eLife, 8:e47994, Oct. 2019. Publisher: eLife Sciences Publications, Ltd. 1, 2, 3", + "[15] Semih Günel, Helge Rhodin, Daniel Morales, João Campagnolo, Pavan Ramdya, and Pascal Fua. Deepfly3d, a deep learning-based approach for 3d limb and appendage tracking in tethered, adult drosophila. *Elite*, 8:e48571, 2019. 2, 8", + "[16] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, jul 2014. 2", + "[17] Akihiro Itahara and Fumihiro Kano. \"Corvid Tracking Studio\": A custom-built motion capture system to track head movements of corvids. Japanese Journal of Animal Psychology, pages 72-1, 2022. 2, 3, 5, 7", + "[18] Noah T Jafferis, E Farrell Helbling, Michael Karpelson, and Robert J Wood. Untethered flight of an insect-sized flapping-wing microscale aerial vehicle. Nature, 570(7762):491-495, 2019. 1", + "[19] Judith Janisch, Elisa Perinot, Leonida Fusani, and Cliodhna Quigley. Deciphering choreographies of elaborate courtship displays of golden-collared manakins using markerless motion capture. Ethology, 127(7):550-562, 2021. _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/eth.13161.8", + "[20] Daniel Joska, Liam Clark, Naoya Muramatsu, Ricardo Jericevich, Fred Nicolls, Alexander Mathis, Mackenzie W Mathis, and Amir Patel. Acinoset: a 3d pose estimation dataset and baseline models for cheetahs in the wild. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 13901-13908. IEEE, 2021. 1, 2, 7", + "[21] Fumihiro Kano, Hemal Naik, Góksel Keskin, Iain D. Couzin, and Mate Nagy. Head-tracking of freely-behaving pigeons in a motion-capture system reveals the selective use of visual field regions. Scientific Reports, 12(1):19113, Nov 2022. 2, 3, 5, 6, 7", + "[22] Sinead Kearney, Wenbin Li, Martin Parsons, Kwang In Kim, and Darren Cosker. Rgbd-dog: Predicting canine pose from rgbd sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8336-8345, 2020. 2" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "21282", + "bbox": [ + 478, + 925, + 519, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[23] Marco KleinHeerenbrink, Lydia A France, Caroline H Brighton, and Graham K Taylor. Optimization of avian perching manoeuvres. Nature, 607(7917):91-96, 2022. 2, 7", + "[24] Rollyn Labuguen, Jumpei Matsumoto, Salvador Blanco Negrete, Hiroshi Nishimaru, Hisao Nishijo, Masahiko Takada, Yasuhiro Go, Ken-ichi Inoue, and Tomohiro Shibata. Macaquepose: A novel \"in the wild\" macaque monkey pose dataset for markerless motion capture. Frontiers in behavioral neuroscience, 14:581154, 2021. 2", + "[25] Jessy Lauer, Mu Zhou, Shaokai Ye, William Menegas, Steffen Schneider, Tanmay Nath, Mohammed Mostafizur Rahman, Valentina Di Santo, Daniel Soberanes, Guoping Feng, Venkatesh N. Murthy, George Lauder, Catherine Dulac, Mackenzie Weygandt Mathis, and Alexander Mathis. Multi-animal pose estimation, identification and tracking with DeepLabCut. Nature Methods, 19(4):496-504, Apr. 2022. Number: 4 Publisher: Nature Publishing Group. 1, 2, 3", + "[26] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dolkar, and C. Lawrence Zitnick. Microsoft COCO: Common Objects in Context. In David Fleet, Tomas Pajdla, Bernt Schiele, and Tinne Tuyte-laars, editors, Computer Vision – ECCV 2014, Lecture Notes in Computer Science, pages 740–755, Cham, 2014. Springer International Publishing. 2", + "[27] Jesse D. Marshall, Ugne Klibaite, Amanda Gellis, Diego E. Aldarondo, Bence P. Ölveczky, and Timothy W. Dunn. The PAIR-R24M Dataset for Multi-animal 3D Pose Estimation. Technical report, bioRxiv, Nov. 2021. Section: New Results Type: article. 2, 3", + "[28] Alexander Mathis, Pranav Mamidanna, Kevin M. Cury, Taiga Abe, Venkatesh N. Murthy, Mackenzie Weygandt Mathis, and Matthias Bethge. DeepLabCut: markerless pose estimation of user-defined body parts with deep learning. Nature Neuroscience, 21(9):1281-1289, Sept. 2018. Number: 9 Publisher: Nature Publishing Group. 6", + "[29] Mackenzie Weygandt Mathis and Alexander Mathis. Deep learning tools for the measurement of animal behavior in neuroscience. Current Opinion in Neurobiology, 60:1-11, Feb. 2020. 1", + "[30] Máté Nagy, Gábor Vásárhelyi, Benjamin Pettit, Isabella Roberts-Mariani, Tamás Vicsek, and Dora Biro. Context-dependent hierarchies in pigeons. Proceedings of the National Academy of Sciences, 110(32):13049-13054, 2013. 3", + "[31] Xun Long Ng, Kian Eng Ong, Qichen Zheng, Yun Ni, Si Yong Yeo, and Jun Liu. Animal kingdom: A large and diverse dataset for animal behavior understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19023-19034, 2022. 2, 5", + "[32] Ali Nourizonoz, Robert Zimmermann, Chun Lum Andy Ho, Sebastien Pellat, Yannick Ormen, Clément Prévost-Solie, Gilles Reymond, Fabien Pifferi, Fabienne Aujard, Anthony Herrel, et al. Etholoop: automated closed-loop neuroethology in naturalistic environments. Nature methods, 17(10):1052-1059, 2020. 1", + "[33] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object de" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 779-788, 2016. 8", + "[34] David Rolnick, Andreas Veit, Serge Belongie, and Nir Shavit. Deep learning is robust to massive label noise. arXiv preprint arXiv:1705.10694, 2017. 6", + "[35] Bernard Rosner. Percentage points for a generalized ESD many-outlier procedure. Technometrics, 25(2):165-172, 1983. 6", + "[36] John R Stowers, Maximilian Hofbauer, Renaud Bastien, Johannes Griessner, Peter Higgins, Sarfarazhussain Farooqui, Ruth M Fischer, Karin Nowikovsky, Wulf Haubensak, Iain D Couzin, et al. Virtual reality for freely moving animals. Nature methods, 14(10):995-1002, 2017. 1", + "[37] Alexandra Swanson, Margaret Kosmala, Chris Lintott, Robert Simpson, Arfon Smith, and Craig Packer. Snapshot serengeti, high-frequency annotated camera trap images of 40 mammalian species in an african savanna. Scientific data, 2(1):1-14, 2015. 1, 2", + "[38] Leslie M. Theunissen and Nikolaus F. Troje. Head Stabilization in the Pigeon: Role of Vision to Correct for Translational and Rotational Disturbances. Frontiers in Neuroscience, 11, 2017. 7", + "[39] Devis Tuia, Benjamin Kellenberger, Sara Beery, Blair R. Costelloe, Silvia Zuffi, Benjamin Risse, Alexander Mathis, Mackenzie W. Mathis, Frank van Langevelde, Tilo Burghardt, Roland Kays, Holger Klinck, Martin Wikelski, Iain D. Couzin, Grant van Horn, Margaret C. Crofoot, Charles V. Stewart, and Tanya Berger-Wolf. Perspectives in machine learning for wildlife conservation. Nature Communications, 13(1):792, Feb. 2022. Number: 1 Publisher: Nature Publishing Group. 1", + "[40] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8769-8778, 2018. 1, 2", + "[41] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The Caltech-UCSD Birds-200-2011 Dataset, July 2011. Issue: 2010-001 Num Pages: 8 Number: 2010-001 Place: Pasadena, CA Publisher: California Institute of Technology. 1, 2, 5", + "[42] Urs Waldmann, Hemal Naik, Nagy Mate, Fumihiro Kano, Iain D Couzin, Oliver Deussen, and Bastian Goldlücke. I-mpptet: Interactive multi-pigeon pose estimation and tracking. In DAGM German Conference on Pattern Recognition, pages 513-528. Springer, 2022. 2", + "[43] Tristan Walter and Iain D Couzin. Trex, a fast multi-animal tracking system with markerless identification, and 2d estimation of posture and visual fields. *Elife*, 10:e64000, 2021. 1, 2, 3", + "[44] Hal Whitehead. Analysing animal social structure. Animal behaviour, 53(5):1053-1067, 1997. 3", + "[45] Jessie L Williamson and Christopher C Witt. A lightweight backpack harness for tracking hummingbirds. Journal of Avian Biology, 52(9), 2021. 4" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "21283", + "bbox": [ + 478, + 926, + 519, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[46] Shiting Xiao, Yufu Wang, Ammon Perkes, Bernd Pfrommer, Marc Schmidt, Kostas Daniilidis, and Marc Badger. Multiview tracking, re-id, and social network analysis of a flock of visually similar birds in an outdoor aviary. arXiv preprint arXiv:2212.00266, 2022. 3", + "[47] Yuan Yao, Praneet Bala, Abhiraj Mohan, Eliza Bliss-Moreau, Kristine Coleman, Sienna M. Freeman, Christopher J. Machado, Jessica Raper, Jan Zimmermann, Benjamin Y. Hayden, and Hyun Soo Park. OpenMonkeyChallenge: Dataset and Benchmark Challenges for Pose Estimation of Non-human Primates. International Journal of Computer Vision, Oct. 2022. 2", + "[48] Hang Yu, Yufei Xu, Jing Zhang, Wei Zhao, Ziyu Guan, and Dacheng Tao. Ap-10k: A benchmark for animal pose estimation in the wild, 2021. 2", + "[49] Silvia Zuffi, Angjoo Kanazawa, David W Jacobs, and Michael J Black. 3d menagerie: Modeling the 3d shape and pose of animals. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6365-6373, 2017. 3" + ], + "bbox": [ + 78, + 90, + 468, + 371 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "21284", + "bbox": [ + 478, + 926, + 519, + 936 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/5371de19-661e-4e67-a303-36ffc7847ea6_model.json b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/5371de19-661e-4e67-a303-36ffc7847ea6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4f858de15095a7a9cc5d5a5f17eb91bc48e341d9 --- /dev/null +++ b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/5371de19-661e-4e67-a303-36ffc7847ea6_model.json @@ -0,0 +1,1982 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.1, + 0.131, + 0.872, + 0.177 + ], + "angle": 0, + "content": "3D-POP - An automated annotation approach to facilitate markerless 2D-3D tracking of freely moving birds with marker-based motion capture" + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.203, + 0.808, + 0.24 + ], + "angle": 0, + "content": "Hemal Naik\\(^{1234*}\\), Alex Hoi Hang Chan\\(^{12*}\\), Junran Yang\\(^{2}\\), Mathilde Delacoux\\(^{12}\\), Iain D. Couzin\\(^{123}\\), Fumihiro Kano\\(^{12\\dagger}\\), Máté Nagy\\(^{12356\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.258, + 0.908, + 0.383 + ], + "angle": 0, + "content": "\\(^{1}\\)Dept. of Collective Behavior and Dept. of Ecology of Animal Societies, Max Planck Institute of Animal Behavior, \\(^{2}\\)Dept. of Biology, University of Konstanz, \\(^{3}\\)Centre for the Advanced Study of Collective Behaviour, University of Konstanz, \\(^{4}\\)Computer Aided Medial Procedures, Informatik Department, Technische Universität München, \\(^{5}\\)Dept. of Biological Physics, Eötvös Loránd University, \\(^{6}\\)MTA-ELTE 'Lendület' Collective Behaviour Research Group, Hungarian Academy of Sciences. *,† contributed equally. Full affiliation available in supplementary {hnaik, icouzin}@ab.mpg.de, nagymate@hal.elte.hu, {hoi-hang.chan, junran.yang, mathilde.delacoux, fumihiro.kano}@uni-konstanz.de" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.415, + 0.314, + 0.431 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.449, + 0.474, + 0.796 + ], + "angle": 0, + "content": "Recent advances in machine learning and computer vision are revolutionizing the field of animal behavior by enabling researchers to track the poses and locations of freely moving animals without any marker attachment. However, large datasets of annotated images of animals for markerless pose tracking, especially high-resolution images taken from multiple angles with accurate 3D annotations, are still scant. Here, we propose a method that uses a motion capture (mo-cap) system to obtain a large amount of annotated data on animal movement and posture (2D and 3D) in a semi-automatic manner. Our method is novel in that it extracts the 3D positions of morphological keypoints (e.g. eyes, beak, tail) in reference to the positions of markers attached to the animals. Using this method, we obtained, and offer here, a new dataset - 3D-POP with approximately 300k annotated frames (4 million instances) in the form of videos having groups of one to ten freely moving birds from 4 different camera views in a \\(3.6m \\times 4.2m\\) area. 3D-POP is the first dataset of flocking birds with accurate keypoint annotations in 2D and 3D along with bounding box and individual identities and will facilitate the development of solutions for problems of 2D to 3D markerless pose, trajectory tracking, and identification in birds." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.829, + 0.21, + 0.845 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Computer vision and machine learning are revolutionizing many facets of conventional research methods. For example, dataset-driven machine learning methods have" + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.415, + 0.795, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.563, + 0.892, + 0.59 + ], + "angle": 0, + "content": "Figure 1. Definition of morphological keypoints offered in the 3D-POP dataset" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.625, + 0.895, + 0.838 + ], + "angle": 0, + "content": "demonstrated remarkable success in the field of animal behavior, in tasks related to object detection [40, 41], tracking and individual identification [11, 25, 43], species recognition [40], 2D pose estimation [14, 29] and 3D pose estimation [2, 4]. These automatic methods not only reduce the required labor and errors associated with manual coding of behaviours [6, 39] but also facilitate long-term continuous monitoring of animal behavior in both indoor (lab) [32, 36] and outdoor (wild) settings [12, 37]. Engineering and robotics experts use the data on animal locomotion to reverse-engineer the key mechanisms underlying behaviors and movements of animals [18, 20]. The development of new techniques critically depends on the quality of publicly-available datasets with accurate annotations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.895, + 0.901 + ], + "angle": 0, + "content": "Creating large datasets with animals is particularly difficult because every species has distinct morphology, and also because it is generally challenging to film freely moving animals in a controlled environment. It is thus important for" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.925, + 0.521, + 0.937 + ], + "angle": 0, + "content": "21274" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.244 + ], + "angle": 0, + "content": "datasets to include a wide range of species and behaviors to maximize the practical application of machine learning methods for animal tracking. Although animals have been included in many popular image datasets collected from the internet such as ImageNet [9] and COCO [26], those datasets have not fulfilled more specific needs of animal behavior researchers. Hence, recently several datasets have been created with a focus on animal behavior research, such as species classification [12, 37, 40, 41, 48], behavioral classification [27, 31, 48] and posture tracking [2, 10, 15, 24, 47]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.246, + 0.473, + 0.608 + ], + "angle": 0, + "content": "The most common approach for creating datasets of animals is through manual annotations in the image space (2D). As a result, most solutions to single / multiple animal detection, tracking, or pose estimation problems are limited to the 2D space [14, 25], or use 2D image projections to validate the results of 3D predictions without ground truth [2, 4]. For nonhuman animals, a dataset similar to Human 3.6M [16] is necessary to develop solutions for problems on 2D/3D tracking and posture prediction with a range of constraints, such as single or multiviews, single or multi-individual, and tracking using single frame or temporal consistency. More recently, marker-based motion-capture technology has been used to create 3D datasets for rats [10] and dogs [22] with one individual. The application of mo-cap for animal behaviour studies has also increased in popularity, such as studying flight kinematics [23] and gaze behavior in a freely moving group [17, 21]. It is clear that datasets with mo-cap will not only enhance the size of the dataset but also improve the accuracy of annotations, thus providing a large 2D/3D ground truth dataset for the animal position, posture, and identity tracking. However, despite its potential, researchers have only begun using mo-cap for behavior studies, and further work is required in terms of method development and dataset collection." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.611, + 0.473, + 0.793 + ], + "angle": 0, + "content": "We propose a new mo-cap-based approach to create large-scale datasets with a bird species (homing pigeons, Columba livia), and provide a complete code base for further applications to other species. Along with 2D-3D posture, the dataset also offers annotations for 2D-3D movement trajectories (position) with ground truth on identities for up to 18 individuals. We overcame the unique challenge of needing to attach reflective markers on desired but often inaccessible morphological keypoints on animal bodies and instead determined the relative 3D position of these keypoints to markers attached on accessible parts of the animal (Figure 1)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.473, + 0.903 + ], + "angle": 0, + "content": "The method enables a large amount of training data to be generated in a semi-automatic manner with minimal time investment and human labor. Moreover, by tracking freely-moving animals in a relatively large area (3.6m x 4.2m), we were able to track a variety of naturalistic behaviors in a flock consisting of up to 10 individuals under realistic experimental conditions. Finally, we demonstrate through a" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.138 + ], + "angle": 0, + "content": "series of experiments that our method is consistent and the CNN models trained on our dataset are able to predict the postures of birds with no markers attached to their bodies." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.152, + 0.655, + 0.168 + ], + "angle": 0, + "content": "2. State of the Art" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.177, + 0.622, + 0.193 + ], + "angle": 0, + "content": "2.1. 2D posture" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.201, + 0.892, + 0.352 + ], + "angle": 0, + "content": "Animal Kingdom [31] is by far the largest dataset with 50 hours of video annotations that include 850 species of varied taxa (fish, birds, mammals, etc.), focusing on a generalizable solution for 2D pose estimation and activity recognition for a single individual. Other notable datasets contain images instead of videos and focus on capturing variations in terms of specific taxa e.g. mammals [48], birds [41] and monkeys [47], or specific species e.g. zebras [14], all of these focus on solving problems for a single individual recorded from a single viewpoint." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.353, + 0.892, + 0.518 + ], + "angle": 0, + "content": "Datasets based on single animal-based solutions are sufficient for some cases and rely on detection-based top-down approaches for extending the method for tracking the posture of multiple individuals [42]. There are few datasets that offer posture annotations for multiple individuals [2,24,25]. The problem of tracking multiple individuals is often simplified by placing the cameras above the animals, which minimizes occlusions [25, 43]. Tracking multiple individuals from side views may require multiple views, which may be important to resolve occlusions when animals interact in 3D spaces e.g. Cowbird dataset [2]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.519, + 0.892, + 0.595 + ], + "angle": 0, + "content": "The existing datasets have motivated the development of various methods for posture estimation. However, reliance on manual annotations limits the complexity of datasets in terms of the number of viewpoints or the number of individuals, especially for video sequences." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.605, + 0.622, + 0.621 + ], + "angle": 0, + "content": "2.2.3D posture" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.892, + 0.794 + ], + "angle": 0, + "content": "Datasets with ground truth on 3D posture are relatively difficult to obtain with a group of animals. One popular method for obtaining 3D ground truth posture is the triangulation of 2D postures using multiple views to record animals. Acinoset [20] (leopard in wild), Fly3D [15] (fly in a lab) and OpenMonkeyStudio [3] (macaque in a lab) use triangulation-based approaches to provide 3D posture of single individuals. The images for these datasets are also annotated manually and, therefore, the accuracy of the computed 3D pose depends on the quality of annotation and calibration." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.892, + 0.902 + ], + "angle": 0, + "content": "An alternative approach is to use marker-based mo-cap with a skeleton tracking feature as used with humans [16]. Kearney et al. [22] used motion capture to generate 3D ground truth for dogs and combine their approach with depth sensors (RGB-D) with the aim of designing markerless tracking based on RGBD sensors (63 to 82 markers). Dunn et al. [10] offered Rat 7M dataset using mo-cap with" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.925, + 0.521, + 0.937 + ], + "angle": 0, + "content": "21275" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.09, + 0.73, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.74, + 0.105, + 0.895, + 0.369 + ], + "angle": 0, + "content": "Figure 2. Illustration showing the experimental setup, with different defined coordinate systems, including the Vicon global coordinate system \\((\\mathrm{O_{Vicon}})\\), the camera coordinate system for each RGB camera \\((\\mathrm{O_{Camera}})\\), and the head \\((\\mathrm{O_{head}})\\) and backpack \\((\\mathrm{O_{backpack}})\\) coordinate system for each pigeon subject A) Detailed floor plan for data collection. B) Pigeon subject, with corresponding head and backpack markers and coordinate systems" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.395, + 0.47, + 0.456 + ], + "angle": 0, + "content": "RGB cameras and 20 markers. These datasets are useful for solving posture problems for a single individual from multiple views and offer the option of using temporal consistency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.47, + 0.655 + ], + "angle": 0, + "content": "Recently, Marshall et al. published PAIR-R24M [27], the first dataset with 3D ground truth with more than one animal, a pair of rats, using the approach of Dunn et al. [10]. Motion capture systems offer the huge advantage of creating millions of annotations in an automatic manner with high accuracy and low noise. The skeleton tracking feature with the mo-cap is primarily designed for tracking human posture and relies on a large number of markers. Additionally, the marker patterns have to be unique (at least partially) for maintaining the identity of each individual. Marker placement is a also limitation for smaller species and wild animals that lack the tolerance for having markers placed on specific locations of their body." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Lack of ground truth in 3D posture had led to innovative work of predicting 3D posture using 2D keypoints and silhouettes [2,4] or using synthetic datasets [5] or toys [49]. These approaches are promising but lack quantitative evaluation for robust practical applications. Computer vision literature on 3D posture problems mainly focuses on extracting as much of detail as possible. For animal behavior experiments information required from videos is always defined in the context of the experiment. It is worth noting that for birds, tracking the head and body orientations could be sufficient to quantify many key behaviors in ground-foraging contexts, such as feeding (pecking ground), preening, vigilance (head scanning), courtship (head bowing), or walking. Measuring the head direction in 3D also allows gaze reconstruction [17, 21], to be applied in the study of social cognition and collective behavior." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.394, + 0.804, + 0.411 + ], + "angle": 0, + "content": "2.3. Multi-object tracking with identity" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.418, + 0.892, + 0.522 + ], + "angle": 0, + "content": "Identity recognition is a critical problem to solve in the context of biological studies, especially when tracking the behavior of multiple interacting individuals over long periods of time. Tracking and identification of multiple individuals in large groups are especially exciting to quantify group-level behaviors like social networks [44, 46], dominance, or leadership [30]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.893, + 0.689 + ], + "angle": 0, + "content": "For indoor experiments, the number of individuals is often controlled and the identification problem is linked with the tracking of animals [25, 43]. The task of tracking and identification is often resolved together using markers [14, 30] or marker-less [7, 11, 43] methods. The existing solutions perform well with specific perspectives (top-down view) and thus often fail to resolve cases of occlusion. Robust evaluation of simultaneous identification and tracking methods is difficult because true ground truth for identities is often not available in datasets with multiple animals or available for only a very short duration [25]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.892, + 0.764 + ], + "angle": 0, + "content": "There are many good datasets available to independently solve problems of posture estimation, detection, tracking, and identification. Very few datasets offer the possibility of solving all of these problems simultaneously in realistic experimental scenarios." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.895, + 0.901 + ], + "angle": 0, + "content": "We aim to fill this gap with our contribution of a semi-automatic method for producing new datasets with animals. Our dataset, 3D-POP, includes video recordings of 18 unique pigeons in various group sizes (1,2,5,10) from multiple views. We offer ground truth for identity, 2D-3D trajectories, and 2D-3D posture mapping for all individuals across the entire dataset (300K frames). The dataset also consists of annotations for object detection in the form of bounding boxes." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.926, + 0.521, + 0.938 + ], + "angle": 0, + "content": "21276" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.091, + 0.176, + 0.105 + ], + "angle": 0, + "content": "3. Methods" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.115, + 0.267, + 0.133 + ], + "angle": 0, + "content": "3.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.14, + 0.473, + 0.368 + ], + "angle": 0, + "content": "The dataset was collected from pigeons moving on a jute fabric \\((3.6\\mathrm{m}\\times 4.2\\mathrm{m})\\) onto which we evenly scattered grains to encourage the birds to feed in that area (Figure 2A). This feeding area was located inside a large enclosure equipped with a mo-cap system \\((15\\mathrm{m}\\times 7\\mathrm{m}\\times 4\\mathrm{m})\\). This mo-cap system consists of 30 motion capture cameras (12 Vicon Vero 2.2, 18 Vicon Vantage-5 cameras; \\(100\\mathrm{Hz}\\)) and can track the 3D positions of reflective markers with submillimeter precision. At the corners of the feeding area, we placed 4 high-resolution (4K) Sony action cameras (rx0ii, \\(30\\mathrm{Hz}\\), \\(3840\\mathrm{x}2160\\mathrm{p}\\)) mounted on standard tripods and also an Arduino-based synchronization box which flashes RGB and infrared LED lights every 5 seconds (Figure 2). Details on the synchronization and calibration of RGB cameras are provided in the supplementary text." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.378, + 0.24, + 0.394 + ], + "angle": 0, + "content": "3.2. Animal Subjects" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.402, + 0.473, + 0.721 + ], + "angle": 0, + "content": "Eighteen pigeons (Columba livia) were subjected to this study over 6 experimental days. Each day 10 pigeons were randomly selected from the population. Four \\(6.4\\mathrm{mm}\\) reflective markers were attached to each subject's head, and four \\(9.5\\mathrm{mm}\\) markers were attached to a customized backpack worn by each subject (Figure 2B). Generally, pigeons tolerate markers on the head with minimal effects on their behavior and habituate quickly to backpacks. Backpacks are also widely used for bird studies in behavioral ecology [1, 45]. The four \\(9.5\\mathrm{mm}\\) backpack markers had a unique geometric configuration to track the individual identities of each bird throughout each recording. Each day we performed up to 11 trials in the following order: 1 pigeon (4 trials), a pair of pigeons (4 trials), a flock of 5 pigeons (2 trials), and a flock of 10 pigeons (1 trial). It took approximately 1 hour to perform all trials each day. The total frames and duration of samples over the course of the experiment are described in Table 1. An additional session was recorded with birds without attaching any markers to validate the results of models trained on annotated data having birds with markers (see 5.2)." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.731, + 0.304, + 0.748 + ], + "angle": 0, + "content": "3.3. Data annotation pipeline" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.755, + 0.277, + 0.77 + ], + "angle": 0, + "content": "3.3.1 Annotation principle" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.471, + 0.903 + ], + "angle": 0, + "content": "The movement of all features on a rigid body can be tracked simultaneously in a 3D space by computing 6-DOF pose of the rigid object. We use this principle to achieve annotations for keypoint features that are rigidly attached to the head and body of the bird. The four markers attached to the head and body (using a backpack) of each pigeon are used to compute 6-DOF pose of these body parts using the mo-cap system." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.242 + ], + "angle": 0, + "content": "By assuming that the head and body are rigid bodies in the case of walking or standing birds, we designed a pipeline to annotate the position of features on the head and body (beak, eyes, shoulder, and tail, etc.) in a few frames to compute their 3D location with respect to marker positions. Once computed, the relationship between markers and features does not change during the sequences and this ensures that 6-DOF pose of head and body for any frame can be used to project 3D positions of keypoint features onto the image space to obtain 2D annotations." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.243, + 0.892, + 0.364 + ], + "angle": 0, + "content": "All keypoints defined for the head lie on the skull of the bird (Figure 1). The rigidity assumption is valid for these keypoints as they are rigidly placed on the skull. The keypoints chosen for the body lie actually on the rib cage and shoulders and exhibit a limited range of motion independent of each other. The rigidity assumption for the body is a reasonable assumption for the annotation pipeline if the birds do not move their wings and body(see 5.3)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.382, + 0.688, + 0.396 + ], + "angle": 0, + "content": "3.3.2 Manual annotation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.406, + 0.892, + 0.648 + ], + "angle": 0, + "content": "6-DOF (Degrees of freedom) tracking of the head and body is used to create a bounding box around the bird and crop the image of the focal individual for annotation. For each individual pigeon, 9 morphological keypoints (Figure 1) are annotated on 5-10 frames from all available view angles. Ideally, four frames (1 per view) is sufficient, but all keypoints are rarely visible within a single instance. Moreover, multiple measurements (3-5 frames per view) improve the robustness of computed 3D keypoint positions. The position of each keypoint is first triangulated using sparse bundle adjustment (in the camera coordinate system), then the relative position of the keypoint is computed with respect to the markers (in the coordinate system of the body part). Finally, all resultant 3D positions of keypoints are averaged and stored as a template file. This process is repeated for each bird on each recording day." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.666, + 0.722, + 0.682 + ], + "angle": 0, + "content": "3.3.3 Annotation propagation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In this final step, the ground truth data is generated for each recording using 3D keypoint positions computed in the previous step. The 3D positions of the keypoint features are transferred to the global coordinate system using 6-DOF pose. Next, keypoints are transferred to the coordinate system of each camera and projected to the image space (using calibration parameters). Bounding box annotations for object detection or tracking tasks are derived from keypoint projections. We determined that keypoints with the minimum and maximum x-y pixel values with an offset of 60 pixels are sufficient to define a bounding box. Finally, the 6-DOF tracking with the mo-cap system maintains the identity of each bird and this is also stored with 2D-3D information for the entire sequence." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.925, + 0.519, + 0.937 + ], + "angle": 0, + "content": "21277" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.119, + 0.092, + 0.442, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.093, + 0.637, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.231, + 0.338, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.349, + 0.231, + 0.637, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.663, + 0.185, + 0.892, + 0.364 + ], + "angle": 0, + "content": "Figure 3. Semi-automated annotation pipeline based on 6DOF tracking and RGB images. A) Input 6-DOF tracking data for head and backpack coordinate systems, and multi-view RGB videos. B) Manually annotate all visible keypoints from all views. C) Triangulate 3D position of all keypoints in the head and backpack coordinate system, assuming that keypoints and tracked markers are a rigid body. D) Apply across trials to get keypoints across all individuals" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.39, + 0.274, + 0.406 + ], + "angle": 0, + "content": "4. The 3D-POP dataset" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.424, + 0.265, + 0.44 + ], + "angle": 0, + "content": "4.1. Dataset Description" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.455, + 0.471, + 0.712 + ], + "angle": 0, + "content": "We present 3D-POP (3D Postures of Pigeons), a dataset that provides accurate ground truth for 3D keypoints, 2D keypoints, bounding boxes, and individual identities. The dataset includes RGB images from four high-resolution cameras (4K) and up to 6 hours of recordings divided into 57 sequences of 1,2,5, and 10 pigeons behaving naturalistically (Table 1). The dataset contains 3D coordinates (unfiltered) and 6-DOF pose obtained from the mo-cap facility along with calibration parameters. We also provide a total of 1 hour of recording (11 sequences) with pigeons (group size:1,2,5,11) without any markers on their body. These videos are provided for users to test the practical effectiveness of markerless solutions without the influence of markers. For realistic assessment, we show that a model trained with our dataset is able to infer keypoints on videos with pigeons without markers (see 5.2). Download the dataset here: https://doi.org/10.17617/3.HPBBC7" + }, + { + "type": "table", + "bbox": [ + 0.104, + 0.751, + 0.445, + 0.845 + ], + "angle": 0, + "content": "
No. \nindividualsAnnotated \nframesVideo length \n(min)
195,51355
2135,547119
544,24085
1020,32191
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.868, + 0.468, + 0.897 + ], + "angle": 0, + "content": "Table 1. Dataset Summary: Total number of labeled frames with ground truth data for different group sizes." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.392, + 0.649, + 0.406 + ], + "angle": 0, + "content": "4.2.Customization" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.415, + 0.892, + 0.686 + ], + "angle": 0, + "content": "We release 3D-POPAP (3D-POP Annotation Pipeline) to manipulate the annotations of the dataset (Download: https://github.com/alexhang212/Dataset-3DPOP). As explained earlier, our use of the 6-DOF tracking decouples the keypoint annotations from the positions of markers used for mo-cap. Due to this design of the annotation approach, we can offer a unique dataset with the ability to easily add new 2D/3D keypoint annotations. The feature of keypoint modification is relevant for future work because defining the posture of birds is a difficult problem and depends on the final application. As of now, there are no datasets available with ground truth on the 3D posture of birds. The lack of ground truth has motivated novel ideas for solving the 3D reconstruction of bird pose using 2D annotations (silhouette and keypoints [2]). Among the available 2D datasets with birds, different numbers of keypoints are selected to define pose e.g. CUB-200: 15 [41], Cowbird dataset: 12 [2] and Animal Kingdom: 23 [31]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.687, + 0.891, + 0.779 + ], + "angle": 0, + "content": "To the best of our knowledge, the use of posture in behavior studies with birds is still limited and pose definition may rely completely on the nature of the study. Our inspiration for keypoint definition is inspired by gaze studies [17, 21] for which 9 keypoint-based posture sufficiently provides gaze direction with body and head orientation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.787, + 0.679, + 0.802 + ], + "angle": 0, + "content": "4.3. Dataset Validation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.81, + 0.892, + 0.9 + ], + "angle": 0, + "content": "The annotations in 3D-POP are obtained automatically, and therefore we designed three different tests to validate the accuracy and consistency of the annotations. The first test compares the accuracy of the 3D features computed with our method and the method presented by Kano et al. [21]." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.926, + 0.519, + 0.938 + ], + "angle": 0, + "content": "21278" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.244 + ], + "angle": 0, + "content": "The second test measures the consistency of the 3D/2D annotations across the dataset. This test is required to identify errors in annotation introduced by erroneous 3D mo-cap tracking due to occlusion, rapid movement of the birds, or calibration and synchronization errors of the cameras. It is important to perform this test because manually checking millions of annotations is not practical. Finally, the third test checks the variation in the 3D pose captured in all sequences. This test shows that the dataset is not biased to specific types of motion or poses." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.261, + 0.199, + 0.277 + ], + "angle": 0, + "content": "4.3.1 Accuracy" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.285, + 0.47, + 0.405 + ], + "angle": 0, + "content": "Kano et al. [21] use a calibration method to measure the 3D position of eyes w.r.t. mo-cap markers. This process involves a custom camera rig, made of 4 separate webcams that capture the head of each pigeon before data collection. We replicated this process to compute the ground truth 3D position of eyes and beak. Further, we compared the ground truth with the 3D position of the same features computed with our approach." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.406, + 0.471, + 0.556 + ], + "angle": 0, + "content": "We obtained root mean squared errors (RMSE) for all three features (Beak: \\(5.0\\mathrm{mm}\\), Left eye: \\(5.0\\mathrm{mm}\\), Right Eye: \\(4.9\\mathrm{mm}\\)), which is sufficient for pigeons considering that the diameter of the eyes is typically \\(6 - 7\\mathrm{mm}\\) [8]. This method provides an approximation of the accuracy for a few features only, and a better method is required to test the accuracy of 3D features measured on the body. It should be noted that our method has comparable accuracy and alleviates the need of using dedicated calibrations rigs and thus saves time." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.575, + 0.364, + 0.59 + ], + "angle": 0, + "content": "4.3.2 Consistency and outlier detection" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.47, + 0.853 + ], + "angle": 0, + "content": "It is reasonable to assume that a small portion of the mo-cap sequences contains tracking errors and will produce inaccurate 6-DOF poses for body parts. As a result, the annotation for all keypoints associated with the relevant body parts is likely to be wrong. We know that models trained with large datasets with small noise still generalize to a solution [34]. Yet, it is important to identify and remove these sequences from the dataset. Keeping this in mind, we design a consistency check with the intuition that a well-trained model for keypoint detector will predict 2D features with reasonable accuracies for all frames. Therefore, a comparison between predicted keypoints and propagated keypoints is likely to show very large errors for all keypoints (of the same body part), especially for frames with faulty mo-cap tracking (Figure 5). We use this idea to automatically determine the consistency of the annotations throughout the trial." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "We trained a state-of-the-art 2D keypoint detection model (DLC [28]) on 15177 images with a ResNet50 backbone for 30,000 iterations with the adam optimizer. There" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.089, + 0.703, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.089, + 0.903, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.23, + 0.895, + 0.302 + ], + "angle": 0, + "content": "Figure 4. Distribution of Euclidean distances (px) between model predictions of a trained DLC model and annotations, after outlier frames were filtered. Frequency shown in the y-axis and and only points of up to \\(10\\mathrm{px}\\) error is shown on the x-axis. A) Head keypoints B) Backpack keypoints" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.327, + 0.892, + 0.508 + ], + "angle": 0, + "content": "is no reliable method available for tracking the posture of multiple birds simultaneously, therefore we use a top-down approach and train on single individual data using bounding box annotations. The training data excludes highly occluded frames with \\(>30\\%\\) overlap with another bounding box to avoid sequences that have multiple individuals in the bounding box due to close proximity. GESD outlier analysis [35] is used for each keypoint independently setting the expected outliers at \\(20\\%\\) of the dataset. The frames having more than 1 outlier keypoint are filtered out as we expect a higher number of outliers in case of erroneous annotations (explained above)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.509, + 0.893, + 0.689 + ], + "angle": 0, + "content": "Using this method we filtered out \\(2.9\\%\\) of the overall dataset, which lowered the average Euclidean distance between annotation and predictions (see Table 2). We used the filtered training data and retrained a model (14,722 images, 30,000 iterations, ResNet50 backbone, adam optimizer), but obtained similar errors compared to the previous model (see Table 2). The consistency check reveals that the annotations are largely consistent with model predictions, with a typical error of 2-3 pixels for head features and 3-4 px for body features (See Figure 4). Figure 5 shows visual examples of outlier frames where mo-cap errors are likely due to behaviors such as flying or occlusions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.894, + 0.81 + ], + "angle": 0, + "content": "The outlier filtering method introduces artificial gaps in the dataset. We computed the number of dropped frames and found that \\(96.1\\%\\) of gaps are less than 30 frames (1 second) in length (see supplementary). Researchers in need of continuous temporal data can use gap-free segments or use interpolation to fill small gaps. For sake of completeness, we have included automatically rejected frames in the dataset." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.832, + 0.655, + 0.846 + ], + "angle": 0, + "content": "4.3.3 Pose variation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.893, + 0.901 + ], + "angle": 0, + "content": "We then compute the number of unique poses that each pigeon exhibit to understand the heterogeneity of pose present in the 3D-POP dataset. It is difficult to compute pose vari" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.926, + 0.521, + 0.938 + ], + "angle": 0, + "content": "21279" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.1, + 0.089, + 0.872, + 0.169 + ], + "angle": 0, + "content": "
RMSEMethod (px)BeakNoseLeft EyeRight EyeLeft ShoulderRight ShoulderTop KeelBottom KeelTail
RMSEBeforeFiltering10.17.97.57.58.48.79.49.98.8
RMSEAfterFiltering8.16.05.95.97.98.29.19.58.2
RMSEAfterRetraining8.46.56.46.38.08.29.19.58.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.192, + 0.894, + 0.248 + ], + "angle": 0, + "content": "Table 2. Root mean squared 2D Euclidean error (px) of each keypoint with different data subsets and trained DLC 2D keypoint models. BeforeFiltering: Error of model trained on the full dataset with inference on frames before outliers were filtered. AfterFiltering: Errors of the model trained on the full dataset with inference on frames after outliers were filtered. AfterRetraining: Errors of the model trained on the filtered dataset with inference on frames after outliers were filtered" + }, + { + "type": "image", + "bbox": [ + 0.134, + 0.261, + 0.42, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.134, + 0.413, + 0.416, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.577, + 0.472, + 0.649 + ], + "angle": 0, + "content": "Figure 5. Example frames that are filtered automatically by the outlier analysis, with descriptions of the cause of annotation inaccuracy. Green labels represent annotations, and red labels represent prediction from the trained DLC 2D keypoint detection model." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.674, + 0.47, + 0.901 + ], + "angle": 0, + "content": "ation directly using the 6-DOF pose defined by markers because the coordinate system is not defined in a standardized way for each pigeon. To create a standardized comparison, we compute two planes defined by keypoint features to represent the alignment of the head and body in 3D space. The head plane is computed using three points (beak and eyes) and the body plane is computed using three points (shoulders and tail). In this manner, the orientations of all planes representing the pose of all individuals are defined using the same features and can be compared in a unified coordinate system. We use the normal of the planes to compute the angles with the canonical coordinate system (See supplementary). It is assumed that a degree of change in rotational angles of either head or body corresponds to a new pose. We found a total of 74,924 unique orientations of the head" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.275, + 0.892, + 0.337 + ], + "angle": 0, + "content": "and 14,191 unique orientations of the body, and the combined 1.8 million unique poses present in the dataset. A graphical representation of the range of poses is provided in the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.35, + 0.634, + 0.367 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.375, + 0.892, + 0.391 + ], + "angle": 0, + "content": "5.1. Marker-based + Markerless Hybrid Approach" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.399, + 0.892, + 0.534 + ], + "angle": 0, + "content": "The first experiment shows that markerless tracking algorithm trained on 3D-POP is able to solve 3D tracking for cases when mo-cap fails to track markers. The solution is useful as an increasing number of pre-existing experimental setups are designed to use marker-based mocap technologies for biological studies [10, 17, 21, 23, 38]. A hybrid tracking solution, that uses markerless tracking to fill the gaps of the mo-cap system has many potential applications for future behavior studies." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.535, + 0.893, + 0.763 + ], + "angle": 0, + "content": "We chose a 5 min sequence with a single individual and artificially removed \\(25\\%\\) of mo-cap tracking data. The gaps are randomly introduced for a duration of 30-90 frames (1-3 seconds), to mimic tracking loss. We used the 2D keypoint DLC model (see 4.3) to detect keypoints from all 4 camera views and triangulate the results with sparse bundle adjustment. We compared the result with the ground truth and achieved avg. RMS error of \\(9.2 \\, \\mathrm{mm}\\) (details in supplementary). A simple linear interpolation-based approach to fill gaps resulted in avg. RMS error of \\(52.1 \\, \\mathrm{mm}\\). The proposed solution is a viable application because biologists are likely to keep using motion-tracking technology until a robust solution is designed for markerless 3D tracking. However, we acknowledge that better solutions can be designed for a hybrid approach using temporal consistency in the future [20]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.772, + 0.738, + 0.789 + ], + "angle": 0, + "content": "5.2. Markerless Bird Tracking" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.892, + 0.901 + ], + "angle": 0, + "content": "This experiment shows that models trained with our dataset can be directly used to track birds without any markers attached to their bodies. This experiment works as a \"sanity check\" to ensure that models trained with 3D-POP dataset are not biased toward the presence of markers. The test also demonstrates the potential contribution of our method toward developing a complete markerless solution" + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.926, + 0.521, + 0.938 + ], + "angle": 0, + "content": "21280" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.075, + 0.467, + 0.176 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.177, + 0.47, + 0.219 + ], + "angle": 0, + "content": "Figure 6. Pictures show that the 2D keypoint detection algorithm trained with the 3D-POP dataset can make predictions on videos with pigeons without any markers attached to the body." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.246, + 0.436, + 0.26 + ], + "angle": 0, + "content": "for 3D tracking, posture estimation, and identification." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.261, + 0.47, + 0.412 + ], + "angle": 0, + "content": "Using a pre-trained object detection model (YOLOv5s [33]), we extracted the bounding box of a pigeon from a single individual sequence. We then used the 2D keypoint DLC model (see 4.3) to predict keypoints from the sequence. The models generalize well to the images of pigeons without markers (see Figure 6, supplementary video). The result is qualitatively checked, but sufficient to prove our claim. The same solution can be easily extended to multiple pigeon trials by designing a top-down approach (using YOLO) until better solutions are developed using 3D-POP." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.422, + 0.258, + 0.436 + ], + "angle": 0, + "content": "5.3. Manual Validation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.446, + 0.47, + 0.641 + ], + "angle": 0, + "content": "This experiment demonstrates the validity of our assumption that keypoints on the body (shoulder, keel, etc.) behave like points on a rigid body. We selected 1000 frames randomly and manually annotated keypoints for the body part. We compared the manual annotations with automatic ground truth annotations using PCK05 and PCK10 (percentage correct keypoint within \\(5\\%\\) and \\(10\\%\\) of bounding box width) metrics. We report an average PCK05 of \\(66\\%\\) and PCK10 of \\(94\\%\\) across all keypoints on the body (Table 3). We also visually quantified that only \\(2.8\\%\\) of the frames are cases where birds are moving their wings, thus the simplified skeletal representation of the body is valid in over \\(97\\%\\) of the dataset." + }, + { + "type": "table", + "bbox": [ + 0.08, + 0.652, + 0.469, + 0.732 + ], + "angle": 0, + "content": "
MetricLeft Shoul- derRight Shoul- derTop KeelBottom KeelTail
PCK050.780.750.580.570.60
PCK100.980.980.940.890.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.755, + 0.469, + 0.797 + ], + "angle": 0, + "content": "Table 3. PCK errors per body keypoint between manual annotation and 3DPOP annotation. PCK is defined as the percentage of points that are within \\(5\\%\\) and \\(10\\%\\) of the bounding box width" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.83, + 0.345, + 0.846 + ], + "angle": 0, + "content": "6. Limitations and Future work" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "The annotation method presented in the paper largely relies on the assumption that the head and body mostly behave as rigid bodies. This assumption does not hold for certain" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.168 + ], + "angle": 0, + "content": "body parts such as the neck, tail end, or feet and limits the selection of keypoints at these body parts. For similar reasons, the proposed approach will not support annotation for flying birds or birds that change the shape of body parts while performing certain behaviors e.g. courtship [19]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.168, + 0.892, + 0.332 + ], + "angle": 0, + "content": "Our approach inherently depends on the tracking accuracy of the mo-cap system. Users must maintain mo-cap systems regularly calibrated for consistent results. Another possible source of error in the annotation pipeline is video camera calibration and its temporal synchronization with the mo-cap system. We do show that our outlier detection method is effective at identifying noisy annotations, however, noise can still be present in the dataset. Finally, since the dataset was curated semi-automatically in an existing motion tracking setup, the data we provide is limited to an indoor environment." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.333, + 0.892, + 0.422 + ], + "angle": 0, + "content": "We have improved the existing state of the art for multi-animal tracking by adding complexity in the form of the number of individuals and camera views. In the future, we intend to develop lifting-based approaches [13, 15] to learn the 2D-3D mapping obtained in the 3D-POP dataset to track birds in outdoor environments." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.437, + 0.619, + 0.453 + ], + "angle": 0, + "content": "7. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.463, + 0.892, + 0.675 + ], + "angle": 0, + "content": "In this paper, we introduced a novel method to use a mop-cap system for generating large-scale datasets with multiple animals. We demonstrate that our semi-automated method offers an alternative for generating high-quality datasets with animals without manual effort. We offer 3D-POP, the first dataset with ground truth for 3D posture prediction and identity tracking in birds, which is extremely difficult to achieve even with manual labor. 3D-POP dataset offers an opportunity for the vision community to work on a complex set of vision problems relevant to achieving markerless tracking of birds in indoor and outdoor environments. At the same time, our method will motivate biologists to create new datasets as they have access to and work with different types of animals." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.688, + 0.687, + 0.705 + ], + "angle": 0, + "content": "8. Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.713, + 0.892, + 0.879 + ], + "angle": 0, + "content": "Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany's Excellence Strategy - EXC 2117 (ID: 422037984). The Ethical Committee of Baden-Württemberg approved all the experiments (Regierungspräsidium Freiburg, Referat 35, License Number: 35-9185.81/G-19/107). M.N. acknowledges additional support from the Hungarian Academy of Sciences (grant no. 95152) and Eötvös Loránd University. I.C. also acknowledge Office of Naval Research (grant ONR, N00014-19-1-2556), Horizon Europe Marie Sklodowska-Curie Actions (860949) and the Max Planck Society." + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.926, + 0.518, + 0.938 + ], + "angle": 0, + "content": "21281" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.117, + 0.47, + 0.186 + ], + "angle": 0, + "content": "[1] Gustavo Alarcón-Nieto, Jacob M Graving, James A Klarevas-Irby, Adriana A Maldonado-Chaparro, Inge Mueller, and Damien R Farine. An automated barcode tracking system for behavioural studies in birds. Methods in Ecology and Evolution, 9(6):1536-1547, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.188, + 0.472, + 0.27 + ], + "angle": 0, + "content": "[2] Marc Badger, Yufu Wang, Adarsh Modh, Ammon Perkes, Nikos Kolotouros, Bernd G Pfrommer, Marc F Schmidt, and Kostas Daniilidis. 3d bird reconstruction: a dataset, model, and shape recovery from a single view. In European Conference on Computer Vision, pages 1-17. Springer, 2020. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.273, + 0.47, + 0.342 + ], + "angle": 0, + "content": "[3] Praneet C Bala, Benjamin R Eisenreich, Seng Bum Michael Yoo, Benjamin Y Hayden, Hyun Soo Park, and Jan Zimmermann. Automated markerless pose estimation in freely moving macaques with openmonkeystudio. Nature communications, 11(1):1-12, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.345, + 0.47, + 0.442 + ], + "angle": 0, + "content": "[4] Benjamin Biggs, Thomas Roddick, Andrew Fitzgibbon, and Roberto Cipolla. Creatures Great and SMAL: Recovering the Shape and Motion of Animals from Video. In C.V. Jawahar, Hongdong Li, Greg Mori, and Konrad Schindler, editors, Computer Vision – ACCV 2018, Lecture Notes in Computer Science, pages 3–19, Cham, 2019. Springer International Publishing. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.444, + 0.47, + 0.514 + ], + "angle": 0, + "content": "[5] Luis A Bolanos, Dongsheng Xiao, Nancy L Ford, Jeff M LeDue, Pankaj K Gupta, Carlos Doebeli, Hao Hu, Helge Rhodin, and Timothy H Murphy. A three-dimensional virtual mouse generates synthetic training data for behavioral analysis. Nature methods, 18(4):378-381, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.516, + 0.47, + 0.612 + ], + "angle": 0, + "content": "[6] Marek L. Borowiec, Rebecca B. Dikow, Paul B. Frandsen, Alexander McKeeken, Gabriele Valentini, and Alexander E. White. Deep learning as a tool for ecology and evolution. Methods in Ecology and Evolution, 13(8):1640-1660, 2022. _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/2041-210X.13901.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.615, + 0.47, + 0.67 + ], + "angle": 0, + "content": "[7] Katarzyna Bozek, Laetitia Hebert, Yoann Portugal, Alexander S Mikheyev, and Greg J Stephens. Markerless tracking of an entire honey bee colony. Nature communications, 12(1):1-13, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.673, + 0.47, + 0.713 + ], + "angle": 0, + "content": "[8] Ray D Chard and Ralph H Gundlach. The structure of the eye of the homing pigeon. Journal of Comparative Psychology, 25(2):249, 1938. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.716, + 0.47, + 0.785 + ], + "angle": 0, + "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, June 2009. ISSN: 1063-6919. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.788, + 0.47, + 0.87 + ], + "angle": 0, + "content": "[10] Timothy W Dunn, Jesse D Marshall, Kyle S Severson, Diego E Aldarondo, David GC Hildebrand, Selmaan N Chettih, William L Wang, Amanda J Gellis, David E Carlson, Dmitriy Aronov, et al. Geometric deep learning enables 3d kinematic profiling across species and environments. Nature methods, 18(5):564-573, 2021. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[11] André C Ferreira, Liliana R Silva, Francesco Renna, Hanja B Brandl, Julien P Renoult, Damien R Farine, Rita Covas, and" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.117, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "Claire Doutrelant. Deep learning-based methods for individual recognition in small birds. Methods in Ecology and Evolution, 11(9):1072-1085, 2020. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[12] Crystal Gagne, Jyoti Kini, Daniel Smith, and Mubarak Shah. Florida wildlife camera trap dataset. arXiv preprint arXiv:2106.12628, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.277 + ], + "angle": 0, + "content": "[13] Adam Gosztolai, Semih Günel, Victor Lobato-Ríos, Marco Pietro Abrate, Daniel Morales, Helge Rhodin, Pascal Fua, and Pavan Ramdya. LiftPose3D, a deep learning-based approach for transforming two-dimensional to three-dimensional poses in laboratory animals. Nature Methods, 18(8):975–981, Aug. 2021. Number: 8 Publisher: Nature Publishing Group. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.278, + 0.892, + 0.347 + ], + "angle": 0, + "content": "[14] Jacob M Graving, Daniel Chae, Hemal Naik, Liang Li, Benjamin Koger, Blair R Costelloe, and Iain D Couzin. DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning. eLife, 8:e47994, Oct. 2019. Publisher: eLife Sciences Publications, Ltd. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.349, + 0.892, + 0.404 + ], + "angle": 0, + "content": "[15] Semih Günel, Helge Rhodin, Daniel Morales, João Campagnolo, Pavan Ramdya, and Pascal Fua. Deepfly3d, a deep learning-based approach for 3d limb and appendage tracking in tethered, adult drosophila. *Elite*, 8:e48571, 2019. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.406, + 0.892, + 0.476 + ], + "angle": 0, + "content": "[16] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, jul 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.477, + 0.892, + 0.533 + ], + "angle": 0, + "content": "[17] Akihiro Itahara and Fumihiro Kano. \"Corvid Tracking Studio\": A custom-built motion capture system to track head movements of corvids. Japanese Journal of Animal Psychology, pages 72-1, 2022. 2, 3, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.534, + 0.892, + 0.588 + ], + "angle": 0, + "content": "[18] Noah T Jafferis, E Farrell Helbling, Michael Karpelson, and Robert J Wood. Untethered flight of an insect-sized flapping-wing microscale aerial vehicle. Nature, 570(7762):491-495, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.591, + 0.892, + 0.673 + ], + "angle": 0, + "content": "[19] Judith Janisch, Elisa Perinot, Leonida Fusani, and Cliodhna Quigley. Deciphering choreographies of elaborate courtship displays of golden-collared manakins using markerless motion capture. Ethology, 127(7):550-562, 2021. _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/eth.13161.8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.676, + 0.892, + 0.759 + ], + "angle": 0, + "content": "[20] Daniel Joska, Liam Clark, Naoya Muramatsu, Ricardo Jericevich, Fred Nicolls, Alexander Mathis, Mackenzie W Mathis, and Amir Patel. Acinoset: a 3d pose estimation dataset and baseline models for cheetahs in the wild. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 13901-13908. IEEE, 2021. 1, 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.761, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[21] Fumihiro Kano, Hemal Naik, Góksel Keskin, Iain D. Couzin, and Mate Nagy. Head-tracking of freely-behaving pigeons in a motion-capture system reveals the selective use of visual field regions. Scientific Reports, 12(1):19113, Nov 2022. 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.832, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[22] Sinead Kearney, Wenbin Li, Martin Parsons, Kwang In Kim, and Darren Cosker. Rgbd-dog: Predicting canine pose from rgbd sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8336-8345, 2020. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.926, + 0.521, + 0.938 + ], + "angle": 0, + "content": "21282" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.146 + ], + "angle": 0, + "content": "[23] Marco KleinHeerenbrink, Lydia A France, Caroline H Brighton, and Graham K Taylor. Optimization of avian perching manoeuvres. Nature, 607(7917):91-96, 2022. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.47, + 0.231 + ], + "angle": 0, + "content": "[24] Rollyn Labuguen, Jumpei Matsumoto, Salvador Blanco Negrete, Hiroshi Nishimaru, Hisao Nishijo, Masahiko Takada, Yasuhiro Go, Ken-ichi Inoue, and Tomohiro Shibata. Macaquepose: A novel \"in the wild\" macaque monkey pose dataset for markerless motion capture. Frontiers in behavioral neuroscience, 14:581154, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.233, + 0.471, + 0.355 + ], + "angle": 0, + "content": "[25] Jessy Lauer, Mu Zhou, Shaokai Ye, William Menegas, Steffen Schneider, Tanmay Nath, Mohammed Mostafizur Rahman, Valentina Di Santo, Daniel Soberanes, Guoping Feng, Venkatesh N. Murthy, George Lauder, Catherine Dulac, Mackenzie Weygandt Mathis, and Alexander Mathis. Multi-animal pose estimation, identification and tracking with DeepLabCut. Nature Methods, 19(4):496-504, Apr. 2022. Number: 4 Publisher: Nature Publishing Group. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.357, + 0.47, + 0.453 + ], + "angle": 0, + "content": "[26] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dolkar, and C. Lawrence Zitnick. Microsoft COCO: Common Objects in Context. In David Fleet, Tomas Pajdla, Bernt Schiele, and Tinne Tuyte-laars, editors, Computer Vision – ECCV 2014, Lecture Notes in Computer Science, pages 740–755, Cham, 2014. Springer International Publishing. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.455, + 0.469, + 0.523 + ], + "angle": 0, + "content": "[27] Jesse D. Marshall, Ugne Klibaite, Amanda Gellis, Diego E. Aldarondo, Bence P. Ölveczky, and Timothy W. Dunn. The PAIR-R24M Dataset for Multi-animal 3D Pose Estimation. Technical report, bioRxiv, Nov. 2021. Section: New Results Type: article. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.525, + 0.469, + 0.607 + ], + "angle": 0, + "content": "[28] Alexander Mathis, Pranav Mamidanna, Kevin M. Cury, Taiga Abe, Venkatesh N. Murthy, Mackenzie Weygandt Mathis, and Matthias Bethge. DeepLabCut: markerless pose estimation of user-defined body parts with deep learning. Nature Neuroscience, 21(9):1281-1289, Sept. 2018. Number: 9 Publisher: Nature Publishing Group. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.608, + 0.469, + 0.661 + ], + "angle": 0, + "content": "[29] Mackenzie Weygandt Mathis and Alexander Mathis. Deep learning tools for the measurement of animal behavior in neuroscience. Current Opinion in Neurobiology, 60:1-11, Feb. 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.664, + 0.469, + 0.718 + ], + "angle": 0, + "content": "[30] Máté Nagy, Gábor Vásárhelyi, Benjamin Pettit, Isabella Roberts-Mariani, Tamás Vicsek, and Dora Biro. Context-dependent hierarchies in pigeons. Proceedings of the National Academy of Sciences, 110(32):13049-13054, 2013. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.72, + 0.469, + 0.789 + ], + "angle": 0, + "content": "[31] Xun Long Ng, Kian Eng Ong, Qichen Zheng, Yun Ni, Si Yong Yeo, and Jun Liu. Animal kingdom: A large and diverse dataset for animal behavior understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19023-19034, 2022. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[32] Ali Nourizonoz, Robert Zimmermann, Chun Lum Andy Ho, Sebastien Pellat, Yannick Ormen, Clément Prévost-Solie, Gilles Reymond, Fabien Pifferi, Fabienne Aujard, Anthony Herrel, et al. Etholoop: automated closed-loop neuroethology in naturalistic environments. Nature methods, 17(10):1052-1059, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[33] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object de" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "tection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 779-788, 2016. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.124, + 0.892, + 0.165 + ], + "angle": 0, + "content": "[34] David Rolnick, Andreas Veit, Serge Belongie, and Nir Shavit. Deep learning is robust to massive label noise. arXiv preprint arXiv:1705.10694, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.168, + 0.892, + 0.207 + ], + "angle": 0, + "content": "[35] Bernard Rosner. Percentage points for a generalized ESD many-outlier procedure. Technometrics, 25(2):165-172, 1983. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.212, + 0.892, + 0.28 + ], + "angle": 0, + "content": "[36] John R Stowers, Maximilian Hofbauer, Renaud Bastien, Johannes Griessner, Peter Higgins, Sarfarazhussain Farooqui, Ruth M Fischer, Karin Nowikovsky, Wulf Haubensak, Iain D Couzin, et al. Virtual reality for freely moving animals. Nature methods, 14(10):995-1002, 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.284, + 0.892, + 0.352 + ], + "angle": 0, + "content": "[37] Alexandra Swanson, Margaret Kosmala, Chris Lintott, Robert Simpson, Arfon Smith, and Craig Packer. Snapshot serengeti, high-frequency annotated camera trap images of 40 mammalian species in an african savanna. Scientific data, 2(1):1-14, 2015. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.356, + 0.892, + 0.409 + ], + "angle": 0, + "content": "[38] Leslie M. Theunissen and Nikolaus F. Troje. Head Stabilization in the Pigeon: Role of Vision to Correct for Translational and Rotational Disturbances. Frontiers in Neuroscience, 11, 2017. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.414, + 0.892, + 0.538 + ], + "angle": 0, + "content": "[39] Devis Tuia, Benjamin Kellenberger, Sara Beery, Blair R. Costelloe, Silvia Zuffi, Benjamin Risse, Alexander Mathis, Mackenzie W. Mathis, Frank van Langevelde, Tilo Burghardt, Roland Kays, Holger Klinck, Martin Wikelski, Iain D. Couzin, Grant van Horn, Margaret C. Crofoot, Charles V. Stewart, and Tanya Berger-Wolf. Perspectives in machine learning for wildlife conservation. Nature Communications, 13(1):792, Feb. 2022. Number: 1 Publisher: Nature Publishing Group. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.541, + 0.892, + 0.623 + ], + "angle": 0, + "content": "[40] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8769-8778, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.627, + 0.892, + 0.696 + ], + "angle": 0, + "content": "[41] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The Caltech-UCSD Birds-200-2011 Dataset, July 2011. Issue: 2010-001 Num Pages: 8 Number: 2010-001 Place: Pasadena, CA Publisher: California Institute of Technology. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.699, + 0.892, + 0.768 + ], + "angle": 0, + "content": "[42] Urs Waldmann, Hemal Naik, Nagy Mate, Fumihiro Kano, Iain D Couzin, Oliver Deussen, and Bastian Goldlücke. I-mpptet: Interactive multi-pigeon pose estimation and tracking. In DAGM German Conference on Pattern Recognition, pages 513-528. Springer, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.771, + 0.892, + 0.826 + ], + "angle": 0, + "content": "[43] Tristan Walter and Iain D Couzin. Trex, a fast multi-animal tracking system with markerless identification, and 2d estimation of posture and visual fields. *Elife*, 10:e64000, 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.829, + 0.892, + 0.856 + ], + "angle": 0, + "content": "[44] Hal Whitehead. Analysing animal social structure. Animal behaviour, 53(5):1053-1067, 1997. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[45] Jessie L Williamson and Christopher C Witt. A lightweight backpack harness for tracking hummingbirds. Journal of Avian Biology, 52(9), 2021. 4" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.479, + 0.927, + 0.52, + 0.938 + ], + "angle": 0, + "content": "21283" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.161 + ], + "angle": 0, + "content": "[46] Shiting Xiao, Yufu Wang, Ammon Perkes, Bernd Pfrommer, Marc Schmidt, Kostas Daniilidis, and Marc Badger. Multiview tracking, re-id, and social network analysis of a flock of visually similar birds in an outdoor aviary. arXiv preprint arXiv:2212.00266, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.163, + 0.47, + 0.26 + ], + "angle": 0, + "content": "[47] Yuan Yao, Praneet Bala, Abhiraj Mohan, Eliza Bliss-Moreau, Kristine Coleman, Sienna M. Freeman, Christopher J. Machado, Jessica Raper, Jan Zimmermann, Benjamin Y. Hayden, and Hyun Soo Park. OpenMonkeyChallenge: Dataset and Benchmark Challenges for Pose Estimation of Non-human Primates. International Journal of Computer Vision, Oct. 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.261, + 0.47, + 0.302 + ], + "angle": 0, + "content": "[48] Hang Yu, Yufei Xu, Jing Zhang, Wei Zhao, Ziyu Guan, and Dacheng Tao. Ap-10k: A benchmark for animal pose estimation in the wild, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.304, + 0.47, + 0.372 + ], + "angle": 0, + "content": "[49] Silvia Zuffi, Angjoo Kanazawa, David W Jacobs, and Michael J Black. 3d menagerie: Modeling the 3d shape and pose of animals. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6365-6373, 2017. 3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.927, + 0.52, + 0.938 + ], + "angle": 0, + "content": "21284" + } + ] +] \ No newline at end of file diff --git a/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/5371de19-661e-4e67-a303-36ffc7847ea6_origin.pdf b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/5371de19-661e-4e67-a303-36ffc7847ea6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2b3e81b494c7b3425e5449d196e89c86974d4ee6 --- /dev/null +++ b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/5371de19-661e-4e67-a303-36ffc7847ea6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bffa8fdd9d6dc8908df6424f0d20d91dd7dd39e177e0100e24eeb31222bfaf08 +size 817725 diff --git a/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/full.md b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/full.md new file mode 100644 index 0000000000000000000000000000000000000000..387adb84ada3276ded1bcd123617ec94ca8d366d --- /dev/null +++ b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/full.md @@ -0,0 +1,270 @@ +# 3D-POP - An automated annotation approach to facilitate markerless 2D-3D tracking of freely moving birds with marker-based motion capture + +Hemal Naik $^{1234*}$ , Alex Hoi Hang Chan $^{12*}$ , Junran Yang $^{2}$ , Mathilde Delacoux $^{12}$ , Iain D. Couzin $^{123}$ , Fumihiro Kano $^{12\dagger}$ , Máté Nagy $^{12356\dagger}$ + +$^{1}$ Dept. of Collective Behavior and Dept. of Ecology of Animal Societies, Max Planck Institute of Animal Behavior, $^{2}$ Dept. of Biology, University of Konstanz, $^{3}$ Centre for the Advanced Study of Collective Behaviour, University of Konstanz, $^{4}$ Computer Aided Medial Procedures, Informatik Department, Technische Universität München, $^{5}$ Dept. of Biological Physics, Eötvös Loránd University, $^{6}$ MTA-ELTE 'Lendület' Collective Behaviour Research Group, Hungarian Academy of Sciences. *,† contributed equally. Full affiliation available in supplementary {hnaik, icouzin}@ab.mpg.de, nagymate@hal.elte.hu, {hoi-hang.chan, junran.yang, mathilde.delacoux, fumihiro.kano}@uni-konstanz.de + +# Abstract + +Recent advances in machine learning and computer vision are revolutionizing the field of animal behavior by enabling researchers to track the poses and locations of freely moving animals without any marker attachment. However, large datasets of annotated images of animals for markerless pose tracking, especially high-resolution images taken from multiple angles with accurate 3D annotations, are still scant. Here, we propose a method that uses a motion capture (mo-cap) system to obtain a large amount of annotated data on animal movement and posture (2D and 3D) in a semi-automatic manner. Our method is novel in that it extracts the 3D positions of morphological keypoints (e.g. eyes, beak, tail) in reference to the positions of markers attached to the animals. Using this method, we obtained, and offer here, a new dataset - 3D-POP with approximately 300k annotated frames (4 million instances) in the form of videos having groups of one to ten freely moving birds from 4 different camera views in a $3.6m \times 4.2m$ area. 3D-POP is the first dataset of flocking birds with accurate keypoint annotations in 2D and 3D along with bounding box and individual identities and will facilitate the development of solutions for problems of 2D to 3D markerless pose, trajectory tracking, and identification in birds. + +# 1. Introduction + +Computer vision and machine learning are revolutionizing many facets of conventional research methods. For example, dataset-driven machine learning methods have + +![](images/758ebd48f70c97be1422d5f815f4cb2e52889e544453b77b72dff3274f52851e.jpg) +Figure 1. Definition of morphological keypoints offered in the 3D-POP dataset + +demonstrated remarkable success in the field of animal behavior, in tasks related to object detection [40, 41], tracking and individual identification [11, 25, 43], species recognition [40], 2D pose estimation [14, 29] and 3D pose estimation [2, 4]. These automatic methods not only reduce the required labor and errors associated with manual coding of behaviours [6, 39] but also facilitate long-term continuous monitoring of animal behavior in both indoor (lab) [32, 36] and outdoor (wild) settings [12, 37]. Engineering and robotics experts use the data on animal locomotion to reverse-engineer the key mechanisms underlying behaviors and movements of animals [18, 20]. The development of new techniques critically depends on the quality of publicly-available datasets with accurate annotations. + +Creating large datasets with animals is particularly difficult because every species has distinct morphology, and also because it is generally challenging to film freely moving animals in a controlled environment. It is thus important for + +datasets to include a wide range of species and behaviors to maximize the practical application of machine learning methods for animal tracking. Although animals have been included in many popular image datasets collected from the internet such as ImageNet [9] and COCO [26], those datasets have not fulfilled more specific needs of animal behavior researchers. Hence, recently several datasets have been created with a focus on animal behavior research, such as species classification [12, 37, 40, 41, 48], behavioral classification [27, 31, 48] and posture tracking [2, 10, 15, 24, 47]. + +The most common approach for creating datasets of animals is through manual annotations in the image space (2D). As a result, most solutions to single / multiple animal detection, tracking, or pose estimation problems are limited to the 2D space [14, 25], or use 2D image projections to validate the results of 3D predictions without ground truth [2, 4]. For nonhuman animals, a dataset similar to Human 3.6M [16] is necessary to develop solutions for problems on 2D/3D tracking and posture prediction with a range of constraints, such as single or multiviews, single or multi-individual, and tracking using single frame or temporal consistency. More recently, marker-based motion-capture technology has been used to create 3D datasets for rats [10] and dogs [22] with one individual. The application of mo-cap for animal behaviour studies has also increased in popularity, such as studying flight kinematics [23] and gaze behavior in a freely moving group [17, 21]. It is clear that datasets with mo-cap will not only enhance the size of the dataset but also improve the accuracy of annotations, thus providing a large 2D/3D ground truth dataset for the animal position, posture, and identity tracking. However, despite its potential, researchers have only begun using mo-cap for behavior studies, and further work is required in terms of method development and dataset collection. + +We propose a new mo-cap-based approach to create large-scale datasets with a bird species (homing pigeons, Columba livia), and provide a complete code base for further applications to other species. Along with 2D-3D posture, the dataset also offers annotations for 2D-3D movement trajectories (position) with ground truth on identities for up to 18 individuals. We overcame the unique challenge of needing to attach reflective markers on desired but often inaccessible morphological keypoints on animal bodies and instead determined the relative 3D position of these keypoints to markers attached on accessible parts of the animal (Figure 1). + +The method enables a large amount of training data to be generated in a semi-automatic manner with minimal time investment and human labor. Moreover, by tracking freely-moving animals in a relatively large area (3.6m x 4.2m), we were able to track a variety of naturalistic behaviors in a flock consisting of up to 10 individuals under realistic experimental conditions. Finally, we demonstrate through a + +series of experiments that our method is consistent and the CNN models trained on our dataset are able to predict the postures of birds with no markers attached to their bodies. + +# 2. State of the Art + +# 2.1. 2D posture + +Animal Kingdom [31] is by far the largest dataset with 50 hours of video annotations that include 850 species of varied taxa (fish, birds, mammals, etc.), focusing on a generalizable solution for 2D pose estimation and activity recognition for a single individual. Other notable datasets contain images instead of videos and focus on capturing variations in terms of specific taxa e.g. mammals [48], birds [41] and monkeys [47], or specific species e.g. zebras [14], all of these focus on solving problems for a single individual recorded from a single viewpoint. + +Datasets based on single animal-based solutions are sufficient for some cases and rely on detection-based top-down approaches for extending the method for tracking the posture of multiple individuals [42]. There are few datasets that offer posture annotations for multiple individuals [2,24,25]. The problem of tracking multiple individuals is often simplified by placing the cameras above the animals, which minimizes occlusions [25, 43]. Tracking multiple individuals from side views may require multiple views, which may be important to resolve occlusions when animals interact in 3D spaces e.g. Cowbird dataset [2]. + +The existing datasets have motivated the development of various methods for posture estimation. However, reliance on manual annotations limits the complexity of datasets in terms of the number of viewpoints or the number of individuals, especially for video sequences. + +# 2.2.3D posture + +Datasets with ground truth on 3D posture are relatively difficult to obtain with a group of animals. One popular method for obtaining 3D ground truth posture is the triangulation of 2D postures using multiple views to record animals. Acinoset [20] (leopard in wild), Fly3D [15] (fly in a lab) and OpenMonkeyStudio [3] (macaque in a lab) use triangulation-based approaches to provide 3D posture of single individuals. The images for these datasets are also annotated manually and, therefore, the accuracy of the computed 3D pose depends on the quality of annotation and calibration. + +An alternative approach is to use marker-based mo-cap with a skeleton tracking feature as used with humans [16]. Kearney et al. [22] used motion capture to generate 3D ground truth for dogs and combine their approach with depth sensors (RGB-D) with the aim of designing markerless tracking based on RGBD sensors (63 to 82 markers). Dunn et al. [10] offered Rat 7M dataset using mo-cap with + +![](images/375a0910fff9275b9e7bdb91e8372ce7b300417ddcc58685e7a2ca73f7c22987.jpg) +Figure 2. Illustration showing the experimental setup, with different defined coordinate systems, including the Vicon global coordinate system $(\mathrm{O_{Vicon}})$ , the camera coordinate system for each RGB camera $(\mathrm{O_{Camera}})$ , and the head $(\mathrm{O_{head}})$ and backpack $(\mathrm{O_{backpack}})$ coordinate system for each pigeon subject A) Detailed floor plan for data collection. B) Pigeon subject, with corresponding head and backpack markers and coordinate systems + +RGB cameras and 20 markers. These datasets are useful for solving posture problems for a single individual from multiple views and offer the option of using temporal consistency. + +Recently, Marshall et al. published PAIR-R24M [27], the first dataset with 3D ground truth with more than one animal, a pair of rats, using the approach of Dunn et al. [10]. Motion capture systems offer the huge advantage of creating millions of annotations in an automatic manner with high accuracy and low noise. The skeleton tracking feature with the mo-cap is primarily designed for tracking human posture and relies on a large number of markers. Additionally, the marker patterns have to be unique (at least partially) for maintaining the identity of each individual. Marker placement is a also limitation for smaller species and wild animals that lack the tolerance for having markers placed on specific locations of their body. + +Lack of ground truth in 3D posture had led to innovative work of predicting 3D posture using 2D keypoints and silhouettes [2,4] or using synthetic datasets [5] or toys [49]. These approaches are promising but lack quantitative evaluation for robust practical applications. Computer vision literature on 3D posture problems mainly focuses on extracting as much of detail as possible. For animal behavior experiments information required from videos is always defined in the context of the experiment. It is worth noting that for birds, tracking the head and body orientations could be sufficient to quantify many key behaviors in ground-foraging contexts, such as feeding (pecking ground), preening, vigilance (head scanning), courtship (head bowing), or walking. Measuring the head direction in 3D also allows gaze reconstruction [17, 21], to be applied in the study of social cognition and collective behavior. + +# 2.3. Multi-object tracking with identity + +Identity recognition is a critical problem to solve in the context of biological studies, especially when tracking the behavior of multiple interacting individuals over long periods of time. Tracking and identification of multiple individuals in large groups are especially exciting to quantify group-level behaviors like social networks [44, 46], dominance, or leadership [30]. + +For indoor experiments, the number of individuals is often controlled and the identification problem is linked with the tracking of animals [25, 43]. The task of tracking and identification is often resolved together using markers [14, 30] or marker-less [7, 11, 43] methods. The existing solutions perform well with specific perspectives (top-down view) and thus often fail to resolve cases of occlusion. Robust evaluation of simultaneous identification and tracking methods is difficult because true ground truth for identities is often not available in datasets with multiple animals or available for only a very short duration [25]. + +There are many good datasets available to independently solve problems of posture estimation, detection, tracking, and identification. Very few datasets offer the possibility of solving all of these problems simultaneously in realistic experimental scenarios. + +We aim to fill this gap with our contribution of a semi-automatic method for producing new datasets with animals. Our dataset, 3D-POP, includes video recordings of 18 unique pigeons in various group sizes (1,2,5,10) from multiple views. We offer ground truth for identity, 2D-3D trajectories, and 2D-3D posture mapping for all individuals across the entire dataset (300K frames). The dataset also consists of annotations for object detection in the form of bounding boxes. + +# 3. Methods + +# 3.1. Experimental Setup + +The dataset was collected from pigeons moving on a jute fabric $(3.6\mathrm{m}\times 4.2\mathrm{m})$ onto which we evenly scattered grains to encourage the birds to feed in that area (Figure 2A). This feeding area was located inside a large enclosure equipped with a mo-cap system $(15\mathrm{m}\times 7\mathrm{m}\times 4\mathrm{m})$ . This mo-cap system consists of 30 motion capture cameras (12 Vicon Vero 2.2, 18 Vicon Vantage-5 cameras; $100\mathrm{Hz}$ ) and can track the 3D positions of reflective markers with submillimeter precision. At the corners of the feeding area, we placed 4 high-resolution (4K) Sony action cameras (rx0ii, $30\mathrm{Hz}$ , $3840\mathrm{x}2160\mathrm{p}$ ) mounted on standard tripods and also an Arduino-based synchronization box which flashes RGB and infrared LED lights every 5 seconds (Figure 2). Details on the synchronization and calibration of RGB cameras are provided in the supplementary text. + +# 3.2. Animal Subjects + +Eighteen pigeons (Columba livia) were subjected to this study over 6 experimental days. Each day 10 pigeons were randomly selected from the population. Four $6.4\mathrm{mm}$ reflective markers were attached to each subject's head, and four $9.5\mathrm{mm}$ markers were attached to a customized backpack worn by each subject (Figure 2B). Generally, pigeons tolerate markers on the head with minimal effects on their behavior and habituate quickly to backpacks. Backpacks are also widely used for bird studies in behavioral ecology [1, 45]. The four $9.5\mathrm{mm}$ backpack markers had a unique geometric configuration to track the individual identities of each bird throughout each recording. Each day we performed up to 11 trials in the following order: 1 pigeon (4 trials), a pair of pigeons (4 trials), a flock of 5 pigeons (2 trials), and a flock of 10 pigeons (1 trial). It took approximately 1 hour to perform all trials each day. The total frames and duration of samples over the course of the experiment are described in Table 1. An additional session was recorded with birds without attaching any markers to validate the results of models trained on annotated data having birds with markers (see 5.2). + +# 3.3. Data annotation pipeline + +# 3.3.1 Annotation principle + +The movement of all features on a rigid body can be tracked simultaneously in a 3D space by computing 6-DOF pose of the rigid object. We use this principle to achieve annotations for keypoint features that are rigidly attached to the head and body of the bird. The four markers attached to the head and body (using a backpack) of each pigeon are used to compute 6-DOF pose of these body parts using the mo-cap system. + +By assuming that the head and body are rigid bodies in the case of walking or standing birds, we designed a pipeline to annotate the position of features on the head and body (beak, eyes, shoulder, and tail, etc.) in a few frames to compute their 3D location with respect to marker positions. Once computed, the relationship between markers and features does not change during the sequences and this ensures that 6-DOF pose of head and body for any frame can be used to project 3D positions of keypoint features onto the image space to obtain 2D annotations. + +All keypoints defined for the head lie on the skull of the bird (Figure 1). The rigidity assumption is valid for these keypoints as they are rigidly placed on the skull. The keypoints chosen for the body lie actually on the rib cage and shoulders and exhibit a limited range of motion independent of each other. The rigidity assumption for the body is a reasonable assumption for the annotation pipeline if the birds do not move their wings and body(see 5.3). + +# 3.3.2 Manual annotation + +6-DOF (Degrees of freedom) tracking of the head and body is used to create a bounding box around the bird and crop the image of the focal individual for annotation. For each individual pigeon, 9 morphological keypoints (Figure 1) are annotated on 5-10 frames from all available view angles. Ideally, four frames (1 per view) is sufficient, but all keypoints are rarely visible within a single instance. Moreover, multiple measurements (3-5 frames per view) improve the robustness of computed 3D keypoint positions. The position of each keypoint is first triangulated using sparse bundle adjustment (in the camera coordinate system), then the relative position of the keypoint is computed with respect to the markers (in the coordinate system of the body part). Finally, all resultant 3D positions of keypoints are averaged and stored as a template file. This process is repeated for each bird on each recording day. + +# 3.3.3 Annotation propagation + +In this final step, the ground truth data is generated for each recording using 3D keypoint positions computed in the previous step. The 3D positions of the keypoint features are transferred to the global coordinate system using 6-DOF pose. Next, keypoints are transferred to the coordinate system of each camera and projected to the image space (using calibration parameters). Bounding box annotations for object detection or tracking tasks are derived from keypoint projections. We determined that keypoints with the minimum and maximum x-y pixel values with an offset of 60 pixels are sufficient to define a bounding box. Finally, the 6-DOF tracking with the mo-cap system maintains the identity of each bird and this is also stored with 2D-3D information for the entire sequence. + +![](images/54357dc3df76102af12910c863b8f637d25b8102652d76372d97efea7d8a9073.jpg) + +![](images/f64204444059d167046dc366c9d8f8f7035b3da1384b6b6cbaf7d5ce1dcd7572.jpg) +Figure 3. Semi-automated annotation pipeline based on 6DOF tracking and RGB images. A) Input 6-DOF tracking data for head and backpack coordinate systems, and multi-view RGB videos. B) Manually annotate all visible keypoints from all views. C) Triangulate 3D position of all keypoints in the head and backpack coordinate system, assuming that keypoints and tracked markers are a rigid body. D) Apply across trials to get keypoints across all individuals + +![](images/565ce7a02cc250114da9de41f8e952692d71ed24c4c190ee9df77c6fe249a50b.jpg) + +![](images/92f1ea26da7b6c833a6c0c2d36f0ade6337eac663eb3bc1a9bb9a5ece2d0eef9.jpg) + +# 4. The 3D-POP dataset + +# 4.1. Dataset Description + +We present 3D-POP (3D Postures of Pigeons), a dataset that provides accurate ground truth for 3D keypoints, 2D keypoints, bounding boxes, and individual identities. The dataset includes RGB images from four high-resolution cameras (4K) and up to 6 hours of recordings divided into 57 sequences of 1,2,5, and 10 pigeons behaving naturalistically (Table 1). The dataset contains 3D coordinates (unfiltered) and 6-DOF pose obtained from the mo-cap facility along with calibration parameters. We also provide a total of 1 hour of recording (11 sequences) with pigeons (group size:1,2,5,11) without any markers on their body. These videos are provided for users to test the practical effectiveness of markerless solutions without the influence of markers. For realistic assessment, we show that a model trained with our dataset is able to infer keypoints on videos with pigeons without markers (see 5.2). Download the dataset here: https://doi.org/10.17617/3.HPBBC7 + +
No. +individualsAnnotated +framesVideo length +(min)
195,51355
2135,547119
544,24085
1020,32191
+ +Table 1. Dataset Summary: Total number of labeled frames with ground truth data for different group sizes. + +# 4.2.Customization + +We release 3D-POPAP (3D-POP Annotation Pipeline) to manipulate the annotations of the dataset (Download: https://github.com/alexhang212/Dataset-3DPOP). As explained earlier, our use of the 6-DOF tracking decouples the keypoint annotations from the positions of markers used for mo-cap. Due to this design of the annotation approach, we can offer a unique dataset with the ability to easily add new 2D/3D keypoint annotations. The feature of keypoint modification is relevant for future work because defining the posture of birds is a difficult problem and depends on the final application. As of now, there are no datasets available with ground truth on the 3D posture of birds. The lack of ground truth has motivated novel ideas for solving the 3D reconstruction of bird pose using 2D annotations (silhouette and keypoints [2]). Among the available 2D datasets with birds, different numbers of keypoints are selected to define pose e.g. CUB-200: 15 [41], Cowbird dataset: 12 [2] and Animal Kingdom: 23 [31]. + +To the best of our knowledge, the use of posture in behavior studies with birds is still limited and pose definition may rely completely on the nature of the study. Our inspiration for keypoint definition is inspired by gaze studies [17, 21] for which 9 keypoint-based posture sufficiently provides gaze direction with body and head orientation. + +# 4.3. Dataset Validation + +The annotations in 3D-POP are obtained automatically, and therefore we designed three different tests to validate the accuracy and consistency of the annotations. The first test compares the accuracy of the 3D features computed with our method and the method presented by Kano et al. [21]. + +The second test measures the consistency of the 3D/2D annotations across the dataset. This test is required to identify errors in annotation introduced by erroneous 3D mo-cap tracking due to occlusion, rapid movement of the birds, or calibration and synchronization errors of the cameras. It is important to perform this test because manually checking millions of annotations is not practical. Finally, the third test checks the variation in the 3D pose captured in all sequences. This test shows that the dataset is not biased to specific types of motion or poses. + +# 4.3.1 Accuracy + +Kano et al. [21] use a calibration method to measure the 3D position of eyes w.r.t. mo-cap markers. This process involves a custom camera rig, made of 4 separate webcams that capture the head of each pigeon before data collection. We replicated this process to compute the ground truth 3D position of eyes and beak. Further, we compared the ground truth with the 3D position of the same features computed with our approach. + +We obtained root mean squared errors (RMSE) for all three features (Beak: $5.0\mathrm{mm}$ , Left eye: $5.0\mathrm{mm}$ , Right Eye: $4.9\mathrm{mm}$ ), which is sufficient for pigeons considering that the diameter of the eyes is typically $6 - 7\mathrm{mm}$ [8]. This method provides an approximation of the accuracy for a few features only, and a better method is required to test the accuracy of 3D features measured on the body. It should be noted that our method has comparable accuracy and alleviates the need of using dedicated calibrations rigs and thus saves time. + +# 4.3.2 Consistency and outlier detection + +It is reasonable to assume that a small portion of the mo-cap sequences contains tracking errors and will produce inaccurate 6-DOF poses for body parts. As a result, the annotation for all keypoints associated with the relevant body parts is likely to be wrong. We know that models trained with large datasets with small noise still generalize to a solution [34]. Yet, it is important to identify and remove these sequences from the dataset. Keeping this in mind, we design a consistency check with the intuition that a well-trained model for keypoint detector will predict 2D features with reasonable accuracies for all frames. Therefore, a comparison between predicted keypoints and propagated keypoints is likely to show very large errors for all keypoints (of the same body part), especially for frames with faulty mo-cap tracking (Figure 5). We use this idea to automatically determine the consistency of the annotations throughout the trial. + +We trained a state-of-the-art 2D keypoint detection model (DLC [28]) on 15177 images with a ResNet50 backbone for 30,000 iterations with the adam optimizer. There + +![](images/fd1a912a877c2f4038eca05d2c01378a7b1c8da5d5601e3574699424331cadab.jpg) +Figure 4. Distribution of Euclidean distances (px) between model predictions of a trained DLC model and annotations, after outlier frames were filtered. Frequency shown in the y-axis and and only points of up to $10\mathrm{px}$ error is shown on the x-axis. A) Head keypoints B) Backpack keypoints + +![](images/f7695623f4510f25d39da8525995954a0856cdfcee318b0aa9d07e8028261444.jpg) + +is no reliable method available for tracking the posture of multiple birds simultaneously, therefore we use a top-down approach and train on single individual data using bounding box annotations. The training data excludes highly occluded frames with $>30\%$ overlap with another bounding box to avoid sequences that have multiple individuals in the bounding box due to close proximity. GESD outlier analysis [35] is used for each keypoint independently setting the expected outliers at $20\%$ of the dataset. The frames having more than 1 outlier keypoint are filtered out as we expect a higher number of outliers in case of erroneous annotations (explained above). + +Using this method we filtered out $2.9\%$ of the overall dataset, which lowered the average Euclidean distance between annotation and predictions (see Table 2). We used the filtered training data and retrained a model (14,722 images, 30,000 iterations, ResNet50 backbone, adam optimizer), but obtained similar errors compared to the previous model (see Table 2). The consistency check reveals that the annotations are largely consistent with model predictions, with a typical error of 2-3 pixels for head features and 3-4 px for body features (See Figure 4). Figure 5 shows visual examples of outlier frames where mo-cap errors are likely due to behaviors such as flying or occlusions. + +The outlier filtering method introduces artificial gaps in the dataset. We computed the number of dropped frames and found that $96.1\%$ of gaps are less than 30 frames (1 second) in length (see supplementary). Researchers in need of continuous temporal data can use gap-free segments or use interpolation to fill small gaps. For sake of completeness, we have included automatically rejected frames in the dataset. + +# 4.3.3 Pose variation + +We then compute the number of unique poses that each pigeon exhibit to understand the heterogeneity of pose present in the 3D-POP dataset. It is difficult to compute pose vari + +
RMSEMethod (px)BeakNoseLeft EyeRight EyeLeft ShoulderRight ShoulderTop KeelBottom KeelTail
RMSEBeforeFiltering10.17.97.57.58.48.79.49.98.8
RMSEAfterFiltering8.16.05.95.97.98.29.19.58.2
RMSEAfterRetraining8.46.56.46.38.08.29.19.58.4
+ +Table 2. Root mean squared 2D Euclidean error (px) of each keypoint with different data subsets and trained DLC 2D keypoint models. BeforeFiltering: Error of model trained on the full dataset with inference on frames before outliers were filtered. AfterFiltering: Errors of the model trained on the full dataset with inference on frames after outliers were filtered. AfterRetraining: Errors of the model trained on the filtered dataset with inference on frames after outliers were filtered + +![](images/5702b9cb3eea2757ebc6f9cb99c1911395e52598807abb2cf93e7fa2705b19ba.jpg) + +![](images/2f17d349fc139697d58bb5cae57dc3e2127496b2a8b36e5d7dc676ed7ed8eefc.jpg) +Figure 5. Example frames that are filtered automatically by the outlier analysis, with descriptions of the cause of annotation inaccuracy. Green labels represent annotations, and red labels represent prediction from the trained DLC 2D keypoint detection model. + +ation directly using the 6-DOF pose defined by markers because the coordinate system is not defined in a standardized way for each pigeon. To create a standardized comparison, we compute two planes defined by keypoint features to represent the alignment of the head and body in 3D space. The head plane is computed using three points (beak and eyes) and the body plane is computed using three points (shoulders and tail). In this manner, the orientations of all planes representing the pose of all individuals are defined using the same features and can be compared in a unified coordinate system. We use the normal of the planes to compute the angles with the canonical coordinate system (See supplementary). It is assumed that a degree of change in rotational angles of either head or body corresponds to a new pose. We found a total of 74,924 unique orientations of the head + +and 14,191 unique orientations of the body, and the combined 1.8 million unique poses present in the dataset. A graphical representation of the range of poses is provided in the supplementary material. + +# 5. Experiments + +# 5.1. Marker-based + Markerless Hybrid Approach + +The first experiment shows that markerless tracking algorithm trained on 3D-POP is able to solve 3D tracking for cases when mo-cap fails to track markers. The solution is useful as an increasing number of pre-existing experimental setups are designed to use marker-based mocap technologies for biological studies [10, 17, 21, 23, 38]. A hybrid tracking solution, that uses markerless tracking to fill the gaps of the mo-cap system has many potential applications for future behavior studies. + +We chose a 5 min sequence with a single individual and artificially removed $25\%$ of mo-cap tracking data. The gaps are randomly introduced for a duration of 30-90 frames (1-3 seconds), to mimic tracking loss. We used the 2D keypoint DLC model (see 4.3) to detect keypoints from all 4 camera views and triangulate the results with sparse bundle adjustment. We compared the result with the ground truth and achieved avg. RMS error of $9.2 \, \mathrm{mm}$ (details in supplementary). A simple linear interpolation-based approach to fill gaps resulted in avg. RMS error of $52.1 \, \mathrm{mm}$ . The proposed solution is a viable application because biologists are likely to keep using motion-tracking technology until a robust solution is designed for markerless 3D tracking. However, we acknowledge that better solutions can be designed for a hybrid approach using temporal consistency in the future [20]. + +# 5.2. Markerless Bird Tracking + +This experiment shows that models trained with our dataset can be directly used to track birds without any markers attached to their bodies. This experiment works as a "sanity check" to ensure that models trained with 3D-POP dataset are not biased toward the presence of markers. The test also demonstrates the potential contribution of our method toward developing a complete markerless solution + +![](images/7d0f23d632aac8b761c77cd70dce5c6079009734f995b5bcc70f6444ee823ebd.jpg) +Figure 6. Pictures show that the 2D keypoint detection algorithm trained with the 3D-POP dataset can make predictions on videos with pigeons without any markers attached to the body. + +for 3D tracking, posture estimation, and identification. + +Using a pre-trained object detection model (YOLOv5s [33]), we extracted the bounding box of a pigeon from a single individual sequence. We then used the 2D keypoint DLC model (see 4.3) to predict keypoints from the sequence. The models generalize well to the images of pigeons without markers (see Figure 6, supplementary video). The result is qualitatively checked, but sufficient to prove our claim. The same solution can be easily extended to multiple pigeon trials by designing a top-down approach (using YOLO) until better solutions are developed using 3D-POP. + +# 5.3. Manual Validation + +This experiment demonstrates the validity of our assumption that keypoints on the body (shoulder, keel, etc.) behave like points on a rigid body. We selected 1000 frames randomly and manually annotated keypoints for the body part. We compared the manual annotations with automatic ground truth annotations using PCK05 and PCK10 (percentage correct keypoint within $5\%$ and $10\%$ of bounding box width) metrics. We report an average PCK05 of $66\%$ and PCK10 of $94\%$ across all keypoints on the body (Table 3). We also visually quantified that only $2.8\%$ of the frames are cases where birds are moving their wings, thus the simplified skeletal representation of the body is valid in over $97\%$ of the dataset. + +
MetricLeft Shoul- derRight Shoul- derTop KeelBottom KeelTail
PCK050.780.750.580.570.60
PCK100.980.980.940.890.92
+ +Table 3. PCK errors per body keypoint between manual annotation and 3DPOP annotation. PCK is defined as the percentage of points that are within $5\%$ and $10\%$ of the bounding box width + +# 6. Limitations and Future work + +The annotation method presented in the paper largely relies on the assumption that the head and body mostly behave as rigid bodies. This assumption does not hold for certain + +body parts such as the neck, tail end, or feet and limits the selection of keypoints at these body parts. For similar reasons, the proposed approach will not support annotation for flying birds or birds that change the shape of body parts while performing certain behaviors e.g. courtship [19]. + +Our approach inherently depends on the tracking accuracy of the mo-cap system. Users must maintain mo-cap systems regularly calibrated for consistent results. Another possible source of error in the annotation pipeline is video camera calibration and its temporal synchronization with the mo-cap system. We do show that our outlier detection method is effective at identifying noisy annotations, however, noise can still be present in the dataset. Finally, since the dataset was curated semi-automatically in an existing motion tracking setup, the data we provide is limited to an indoor environment. + +We have improved the existing state of the art for multi-animal tracking by adding complexity in the form of the number of individuals and camera views. In the future, we intend to develop lifting-based approaches [13, 15] to learn the 2D-3D mapping obtained in the 3D-POP dataset to track birds in outdoor environments. + +# 7. Conclusion + +In this paper, we introduced a novel method to use a mop-cap system for generating large-scale datasets with multiple animals. We demonstrate that our semi-automated method offers an alternative for generating high-quality datasets with animals without manual effort. We offer 3D-POP, the first dataset with ground truth for 3D posture prediction and identity tracking in birds, which is extremely difficult to achieve even with manual labor. 3D-POP dataset offers an opportunity for the vision community to work on a complex set of vision problems relevant to achieving markerless tracking of birds in indoor and outdoor environments. At the same time, our method will motivate biologists to create new datasets as they have access to and work with different types of animals. + +# 8. Acknowledgements + +Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany's Excellence Strategy - EXC 2117 (ID: 422037984). The Ethical Committee of Baden-Württemberg approved all the experiments (Regierungspräsidium Freiburg, Referat 35, License Number: 35-9185.81/G-19/107). M.N. acknowledges additional support from the Hungarian Academy of Sciences (grant no. 95152) and Eötvös Loránd University. I.C. also acknowledge Office of Naval Research (grant ONR, N00014-19-1-2556), Horizon Europe Marie Sklodowska-Curie Actions (860949) and the Max Planck Society. + +# References + +[1] Gustavo Alarcón-Nieto, Jacob M Graving, James A Klarevas-Irby, Adriana A Maldonado-Chaparro, Inge Mueller, and Damien R Farine. An automated barcode tracking system for behavioural studies in birds. Methods in Ecology and Evolution, 9(6):1536-1547, 2018. 4 +[2] Marc Badger, Yufu Wang, Adarsh Modh, Ammon Perkes, Nikos Kolotouros, Bernd G Pfrommer, Marc F Schmidt, and Kostas Daniilidis. 3d bird reconstruction: a dataset, model, and shape recovery from a single view. In European Conference on Computer Vision, pages 1-17. Springer, 2020. 1, 2, 3, 5 +[3] Praneet C Bala, Benjamin R Eisenreich, Seng Bum Michael Yoo, Benjamin Y Hayden, Hyun Soo Park, and Jan Zimmermann. Automated markerless pose estimation in freely moving macaques with openmonkeystudio. Nature communications, 11(1):1-12, 2020. 2 +[4] Benjamin Biggs, Thomas Roddick, Andrew Fitzgibbon, and Roberto Cipolla. Creatures Great and SMAL: Recovering the Shape and Motion of Animals from Video. In C.V. Jawahar, Hongdong Li, Greg Mori, and Konrad Schindler, editors, Computer Vision – ACCV 2018, Lecture Notes in Computer Science, pages 3–19, Cham, 2019. Springer International Publishing. 1, 2, 3 +[5] Luis A Bolanos, Dongsheng Xiao, Nancy L Ford, Jeff M LeDue, Pankaj K Gupta, Carlos Doebeli, Hao Hu, Helge Rhodin, and Timothy H Murphy. A three-dimensional virtual mouse generates synthetic training data for behavioral analysis. Nature methods, 18(4):378-381, 2021. 3 +[6] Marek L. Borowiec, Rebecca B. Dikow, Paul B. Frandsen, Alexander McKeeken, Gabriele Valentini, and Alexander E. White. Deep learning as a tool for ecology and evolution. Methods in Ecology and Evolution, 13(8):1640-1660, 2022. _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/2041-210X.13901.1 +[7] Katarzyna Bozek, Laetitia Hebert, Yoann Portugal, Alexander S Mikheyev, and Greg J Stephens. Markerless tracking of an entire honey bee colony. Nature communications, 12(1):1-13, 2021. 3 +[8] Ray D Chard and Ralph H Gundlach. The structure of the eye of the homing pigeon. Journal of Comparative Psychology, 25(2):249, 1938. 6 +[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, June 2009. ISSN: 1063-6919. 2 +[10] Timothy W Dunn, Jesse D Marshall, Kyle S Severson, Diego E Aldarondo, David GC Hildebrand, Selmaan N Chettih, William L Wang, Amanda J Gellis, David E Carlson, Dmitriy Aronov, et al. Geometric deep learning enables 3d kinematic profiling across species and environments. Nature methods, 18(5):564-573, 2021. 2, 3, 7 +[11] André C Ferreira, Liliana R Silva, Francesco Renna, Hanja B Brandl, Julien P Renoult, Damien R Farine, Rita Covas, and + +Claire Doutrelant. Deep learning-based methods for individual recognition in small birds. Methods in Ecology and Evolution, 11(9):1072-1085, 2020. 1, 3 +[12] Crystal Gagne, Jyoti Kini, Daniel Smith, and Mubarak Shah. Florida wildlife camera trap dataset. arXiv preprint arXiv:2106.12628, 2021. 1, 2 +[13] Adam Gosztolai, Semih Günel, Victor Lobato-Ríos, Marco Pietro Abrate, Daniel Morales, Helge Rhodin, Pascal Fua, and Pavan Ramdya. LiftPose3D, a deep learning-based approach for transforming two-dimensional to three-dimensional poses in laboratory animals. Nature Methods, 18(8):975–981, Aug. 2021. Number: 8 Publisher: Nature Publishing Group. 8 +[14] Jacob M Graving, Daniel Chae, Hemal Naik, Liang Li, Benjamin Koger, Blair R Costelloe, and Iain D Couzin. DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning. eLife, 8:e47994, Oct. 2019. Publisher: eLife Sciences Publications, Ltd. 1, 2, 3 +[15] Semih Günel, Helge Rhodin, Daniel Morales, João Campagnolo, Pavan Ramdya, and Pascal Fua. Deepfly3d, a deep learning-based approach for 3d limb and appendage tracking in tethered, adult drosophila. *Elite*, 8:e48571, 2019. 2, 8 +[16] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, jul 2014. 2 +[17] Akihiro Itahara and Fumihiro Kano. "Corvid Tracking Studio": A custom-built motion capture system to track head movements of corvids. Japanese Journal of Animal Psychology, pages 72-1, 2022. 2, 3, 5, 7 +[18] Noah T Jafferis, E Farrell Helbling, Michael Karpelson, and Robert J Wood. Untethered flight of an insect-sized flapping-wing microscale aerial vehicle. Nature, 570(7762):491-495, 2019. 1 +[19] Judith Janisch, Elisa Perinot, Leonida Fusani, and Cliodhna Quigley. Deciphering choreographies of elaborate courtship displays of golden-collared manakins using markerless motion capture. Ethology, 127(7):550-562, 2021. _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/eth.13161.8 +[20] Daniel Joska, Liam Clark, Naoya Muramatsu, Ricardo Jericevich, Fred Nicolls, Alexander Mathis, Mackenzie W Mathis, and Amir Patel. Acinoset: a 3d pose estimation dataset and baseline models for cheetahs in the wild. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 13901-13908. IEEE, 2021. 1, 2, 7 +[21] Fumihiro Kano, Hemal Naik, Góksel Keskin, Iain D. Couzin, and Mate Nagy. Head-tracking of freely-behaving pigeons in a motion-capture system reveals the selective use of visual field regions. Scientific Reports, 12(1):19113, Nov 2022. 2, 3, 5, 6, 7 +[22] Sinead Kearney, Wenbin Li, Martin Parsons, Kwang In Kim, and Darren Cosker. Rgbd-dog: Predicting canine pose from rgbd sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8336-8345, 2020. 2 + +[23] Marco KleinHeerenbrink, Lydia A France, Caroline H Brighton, and Graham K Taylor. Optimization of avian perching manoeuvres. Nature, 607(7917):91-96, 2022. 2, 7 +[24] Rollyn Labuguen, Jumpei Matsumoto, Salvador Blanco Negrete, Hiroshi Nishimaru, Hisao Nishijo, Masahiko Takada, Yasuhiro Go, Ken-ichi Inoue, and Tomohiro Shibata. Macaquepose: A novel "in the wild" macaque monkey pose dataset for markerless motion capture. Frontiers in behavioral neuroscience, 14:581154, 2021. 2 +[25] Jessy Lauer, Mu Zhou, Shaokai Ye, William Menegas, Steffen Schneider, Tanmay Nath, Mohammed Mostafizur Rahman, Valentina Di Santo, Daniel Soberanes, Guoping Feng, Venkatesh N. Murthy, George Lauder, Catherine Dulac, Mackenzie Weygandt Mathis, and Alexander Mathis. Multi-animal pose estimation, identification and tracking with DeepLabCut. Nature Methods, 19(4):496-504, Apr. 2022. Number: 4 Publisher: Nature Publishing Group. 1, 2, 3 +[26] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dolkar, and C. Lawrence Zitnick. Microsoft COCO: Common Objects in Context. In David Fleet, Tomas Pajdla, Bernt Schiele, and Tinne Tuyte-laars, editors, Computer Vision – ECCV 2014, Lecture Notes in Computer Science, pages 740–755, Cham, 2014. Springer International Publishing. 2 +[27] Jesse D. Marshall, Ugne Klibaite, Amanda Gellis, Diego E. Aldarondo, Bence P. Ölveczky, and Timothy W. Dunn. The PAIR-R24M Dataset for Multi-animal 3D Pose Estimation. Technical report, bioRxiv, Nov. 2021. Section: New Results Type: article. 2, 3 +[28] Alexander Mathis, Pranav Mamidanna, Kevin M. Cury, Taiga Abe, Venkatesh N. Murthy, Mackenzie Weygandt Mathis, and Matthias Bethge. DeepLabCut: markerless pose estimation of user-defined body parts with deep learning. Nature Neuroscience, 21(9):1281-1289, Sept. 2018. Number: 9 Publisher: Nature Publishing Group. 6 +[29] Mackenzie Weygandt Mathis and Alexander Mathis. Deep learning tools for the measurement of animal behavior in neuroscience. Current Opinion in Neurobiology, 60:1-11, Feb. 2020. 1 +[30] Máté Nagy, Gábor Vásárhelyi, Benjamin Pettit, Isabella Roberts-Mariani, Tamás Vicsek, and Dora Biro. Context-dependent hierarchies in pigeons. Proceedings of the National Academy of Sciences, 110(32):13049-13054, 2013. 3 +[31] Xun Long Ng, Kian Eng Ong, Qichen Zheng, Yun Ni, Si Yong Yeo, and Jun Liu. Animal kingdom: A large and diverse dataset for animal behavior understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19023-19034, 2022. 2, 5 +[32] Ali Nourizonoz, Robert Zimmermann, Chun Lum Andy Ho, Sebastien Pellat, Yannick Ormen, Clément Prévost-Solie, Gilles Reymond, Fabien Pifferi, Fabienne Aujard, Anthony Herrel, et al. Etholoop: automated closed-loop neuroethology in naturalistic environments. Nature methods, 17(10):1052-1059, 2020. 1 +[33] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object de + +tection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 779-788, 2016. 8 +[34] David Rolnick, Andreas Veit, Serge Belongie, and Nir Shavit. Deep learning is robust to massive label noise. arXiv preprint arXiv:1705.10694, 2017. 6 +[35] Bernard Rosner. Percentage points for a generalized ESD many-outlier procedure. Technometrics, 25(2):165-172, 1983. 6 +[36] John R Stowers, Maximilian Hofbauer, Renaud Bastien, Johannes Griessner, Peter Higgins, Sarfarazhussain Farooqui, Ruth M Fischer, Karin Nowikovsky, Wulf Haubensak, Iain D Couzin, et al. Virtual reality for freely moving animals. Nature methods, 14(10):995-1002, 2017. 1 +[37] Alexandra Swanson, Margaret Kosmala, Chris Lintott, Robert Simpson, Arfon Smith, and Craig Packer. Snapshot serengeti, high-frequency annotated camera trap images of 40 mammalian species in an african savanna. Scientific data, 2(1):1-14, 2015. 1, 2 +[38] Leslie M. Theunissen and Nikolaus F. Troje. Head Stabilization in the Pigeon: Role of Vision to Correct for Translational and Rotational Disturbances. Frontiers in Neuroscience, 11, 2017. 7 +[39] Devis Tuia, Benjamin Kellenberger, Sara Beery, Blair R. Costelloe, Silvia Zuffi, Benjamin Risse, Alexander Mathis, Mackenzie W. Mathis, Frank van Langevelde, Tilo Burghardt, Roland Kays, Holger Klinck, Martin Wikelski, Iain D. Couzin, Grant van Horn, Margaret C. Crofoot, Charles V. Stewart, and Tanya Berger-Wolf. Perspectives in machine learning for wildlife conservation. Nature Communications, 13(1):792, Feb. 2022. Number: 1 Publisher: Nature Publishing Group. 1 +[40] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8769-8778, 2018. 1, 2 +[41] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The Caltech-UCSD Birds-200-2011 Dataset, July 2011. Issue: 2010-001 Num Pages: 8 Number: 2010-001 Place: Pasadena, CA Publisher: California Institute of Technology. 1, 2, 5 +[42] Urs Waldmann, Hemal Naik, Nagy Mate, Fumihiro Kano, Iain D Couzin, Oliver Deussen, and Bastian Goldlücke. I-mpptet: Interactive multi-pigeon pose estimation and tracking. In DAGM German Conference on Pattern Recognition, pages 513-528. Springer, 2022. 2 +[43] Tristan Walter and Iain D Couzin. Trex, a fast multi-animal tracking system with markerless identification, and 2d estimation of posture and visual fields. *Elife*, 10:e64000, 2021. 1, 2, 3 +[44] Hal Whitehead. Analysing animal social structure. Animal behaviour, 53(5):1053-1067, 1997. 3 +[45] Jessie L Williamson and Christopher C Witt. A lightweight backpack harness for tracking hummingbirds. Journal of Avian Biology, 52(9), 2021. 4 + +[46] Shiting Xiao, Yufu Wang, Ammon Perkes, Bernd Pfrommer, Marc Schmidt, Kostas Daniilidis, and Marc Badger. Multiview tracking, re-id, and social network analysis of a flock of visually similar birds in an outdoor aviary. arXiv preprint arXiv:2212.00266, 2022. 3 +[47] Yuan Yao, Praneet Bala, Abhiraj Mohan, Eliza Bliss-Moreau, Kristine Coleman, Sienna M. Freeman, Christopher J. Machado, Jessica Raper, Jan Zimmermann, Benjamin Y. Hayden, and Hyun Soo Park. OpenMonkeyChallenge: Dataset and Benchmark Challenges for Pose Estimation of Non-human Primates. International Journal of Computer Vision, Oct. 2022. 2 +[48] Hang Yu, Yufei Xu, Jing Zhang, Wei Zhao, Ziyu Guan, and Dacheng Tao. Ap-10k: A benchmark for animal pose estimation in the wild, 2021. 2 +[49] Silvia Zuffi, Angjoo Kanazawa, David W Jacobs, and Michael J Black. 3d menagerie: Modeling the 3d shape and pose of animals. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6365-6373, 2017. 3 \ No newline at end of file diff --git a/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/images.zip b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..b50a291f08ab1077dbbeb68955620c5c37ed1cfe --- /dev/null +++ b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:849cf8073032f699435187e39612e9501a76310a3e5e88ba8703655074ec3893 +size 294363 diff --git a/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/layout.json b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..16db60857c3351b5c4295c54090768efa13a48bb --- /dev/null +++ b/2023/3D-POP - An Automated Annotation Approach to Facilitate Markerless 2D-3D Tracking of Freely Moving Birds With Marker-Based Motion Capture/layout.json @@ -0,0 +1,6977 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 61, + 103, + 533, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 103, + 533, + 140 + ], + "spans": [ + { + "bbox": [ + 61, + 103, + 533, + 140 + ], + "type": "text", + "content": "3D-POP - An automated annotation approach to facilitate markerless 2D-3D tracking of freely moving birds with marker-based motion capture" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "spans": [ + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "text", + "content": "Hemal Naik" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "inline_equation", + "content": "^{1234*}" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "text", + "content": ", Alex Hoi Hang Chan" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "inline_equation", + "content": "^{12*}" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "text", + "content": ", Junran Yang" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "text", + "content": ", Mathilde Delacoux" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "text", + "content": ", Iain D. Couzin" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "inline_equation", + "content": "^{123}" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "text", + "content": ", Fumihiro Kano" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "inline_equation", + "content": "^{12\\dagger}" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "text", + "content": ", Máté Nagy" + }, + { + "bbox": [ + 115, + 160, + 494, + 190 + ], + "type": "inline_equation", + "content": "^{12356\\dagger}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "spans": [ + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "text", + "content": "Dept. of Collective Behavior and Dept. of Ecology of Animal Societies, Max Planck Institute of Animal Behavior, " + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "text", + "content": "Dept. of Biology, University of Konstanz, " + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "text", + "content": "Centre for the Advanced Study of Collective Behaviour, University of Konstanz, " + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "text", + "content": "Computer Aided Medial Procedures, Informatik Department, Technische Universität München, " + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "text", + "content": "Dept. of Biological Physics, Eötvös Loránd University, " + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 52, + 204, + 555, + 303 + ], + "type": "text", + "content": "MTA-ELTE 'Lendület' Collective Behaviour Research Group, Hungarian Academy of Sciences. *,† contributed equally. Full affiliation available in supplementary {hnaik, icouzin}@ab.mpg.de, nagymate@hal.elte.hu, {hoi-hang.chan, junran.yang, mathilde.delacoux, fumihiro.kano}@uni-konstanz.de" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 328, + 192, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 328, + 192, + 341 + ], + "spans": [ + { + "bbox": [ + 143, + 328, + 192, + 341 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 355, + 290, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 355, + 290, + 630 + ], + "spans": [ + { + "bbox": [ + 46, + 355, + 290, + 630 + ], + "type": "text", + "content": "Recent advances in machine learning and computer vision are revolutionizing the field of animal behavior by enabling researchers to track the poses and locations of freely moving animals without any marker attachment. However, large datasets of annotated images of animals for markerless pose tracking, especially high-resolution images taken from multiple angles with accurate 3D annotations, are still scant. Here, we propose a method that uses a motion capture (mo-cap) system to obtain a large amount of annotated data on animal movement and posture (2D and 3D) in a semi-automatic manner. Our method is novel in that it extracts the 3D positions of morphological keypoints (e.g. eyes, beak, tail) in reference to the positions of markers attached to the animals. Using this method, we obtained, and offer here, a new dataset - 3D-POP with approximately 300k annotated frames (4 million instances) in the form of videos having groups of one to ten freely moving birds from 4 different camera views in a " + }, + { + "bbox": [ + 46, + 355, + 290, + 630 + ], + "type": "inline_equation", + "content": "3.6m \\times 4.2m" + }, + { + "bbox": [ + 46, + 355, + 290, + 630 + ], + "type": "text", + "content": " area. 3D-POP is the first dataset of flocking birds with accurate keypoint annotations in 2D and 3D along with bounding box and individual identities and will facilitate the development of solutions for problems of 2D to 3D markerless pose, trajectory tracking, and identification in birds." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 656, + 128, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 656, + 128, + 669 + ], + "spans": [ + { + "bbox": [ + 47, + 656, + 128, + 669 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 715 + ], + "type": "text", + "content": "Computer vision and machine learning are revolutionizing many facets of conventional research methods. For example, dataset-driven machine learning methods have" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 343, + 328, + 486, + 443 + ], + "blocks": [ + { + "bbox": [ + 343, + 328, + 486, + 443 + ], + "lines": [ + { + "bbox": [ + 343, + 328, + 486, + 443 + ], + "spans": [ + { + "bbox": [ + 343, + 328, + 486, + 443 + ], + "type": "image", + "image_path": "758ebd48f70c97be1422d5f815f4cb2e52889e544453b77b72dff3274f52851e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 445, + 545, + 467 + ], + "lines": [ + { + "bbox": [ + 305, + 445, + 545, + 467 + ], + "spans": [ + { + "bbox": [ + 305, + 445, + 545, + 467 + ], + "type": "text", + "content": "Figure 1. Definition of morphological keypoints offered in the 3D-POP dataset" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 495, + 547, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 547, + 663 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 547, + 663 + ], + "type": "text", + "content": "demonstrated remarkable success in the field of animal behavior, in tasks related to object detection [40, 41], tracking and individual identification [11, 25, 43], species recognition [40], 2D pose estimation [14, 29] and 3D pose estimation [2, 4]. These automatic methods not only reduce the required labor and errors associated with manual coding of behaviours [6, 39] but also facilitate long-term continuous monitoring of animal behavior in both indoor (lab) [32, 36] and outdoor (wild) settings [12, 37]. Engineering and robotics experts use the data on animal locomotion to reverse-engineer the key mechanisms underlying behaviors and movements of animals [18, 20]. The development of new techniques critically depends on the quality of publicly-available datasets with accurate annotations." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": "Creating large datasets with animals is particularly difficult because every species has distinct morphology, and also because it is generally challenging to film freely moving animals in a controlled environment. It is thus important for" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 293, + 732, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 732, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 732, + 318, + 742 + ], + "type": "text", + "content": "21274" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "content": "datasets to include a wide range of species and behaviors to maximize the practical application of machine learning methods for animal tracking. Although animals have been included in many popular image datasets collected from the internet such as ImageNet [9] and COCO [26], those datasets have not fulfilled more specific needs of animal behavior researchers. Hence, recently several datasets have been created with a focus on animal behavior research, such as species classification [12, 37, 40, 41, 48], behavioral classification [27, 31, 48] and posture tracking [2, 10, 15, 24, 47]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 194, + 289, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 194, + 289, + 481 + ], + "spans": [ + { + "bbox": [ + 46, + 194, + 289, + 481 + ], + "type": "text", + "content": "The most common approach for creating datasets of animals is through manual annotations in the image space (2D). As a result, most solutions to single / multiple animal detection, tracking, or pose estimation problems are limited to the 2D space [14, 25], or use 2D image projections to validate the results of 3D predictions without ground truth [2, 4]. For nonhuman animals, a dataset similar to Human 3.6M [16] is necessary to develop solutions for problems on 2D/3D tracking and posture prediction with a range of constraints, such as single or multiviews, single or multi-individual, and tracking using single frame or temporal consistency. More recently, marker-based motion-capture technology has been used to create 3D datasets for rats [10] and dogs [22] with one individual. The application of mo-cap for animal behaviour studies has also increased in popularity, such as studying flight kinematics [23] and gaze behavior in a freely moving group [17, 21]. It is clear that datasets with mo-cap will not only enhance the size of the dataset but also improve the accuracy of annotations, thus providing a large 2D/3D ground truth dataset for the animal position, posture, and identity tracking. However, despite its potential, researchers have only begun using mo-cap for behavior studies, and further work is required in terms of method development and dataset collection." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 483, + 289, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 483, + 289, + 628 + ], + "spans": [ + { + "bbox": [ + 46, + 483, + 289, + 628 + ], + "type": "text", + "content": "We propose a new mo-cap-based approach to create large-scale datasets with a bird species (homing pigeons, Columba livia), and provide a complete code base for further applications to other species. Along with 2D-3D posture, the dataset also offers annotations for 2D-3D movement trajectories (position) with ground truth on identities for up to 18 individuals. We overcame the unique challenge of needing to attach reflective markers on desired but often inaccessible morphological keypoints on animal bodies and instead determined the relative 3D position of these keypoints to markers attached on accessible parts of the animal (Figure 1)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 630, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 289, + 715 + ], + "type": "text", + "content": "The method enables a large amount of training data to be generated in a semi-automatic manner with minimal time investment and human labor. Moreover, by tracking freely-moving animals in a relatively large area (3.6m x 4.2m), we were able to track a variety of naturalistic behaviors in a flock consisting of up to 10 individuals under realistic experimental conditions. Finally, we demonstrate through a" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 109 + ], + "type": "text", + "content": "series of experiments that our method is consistent and the CNN models trained on our dataset are able to predict the postures of birds with no markers attached to their bodies." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 120, + 400, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 120, + 400, + 133 + ], + "spans": [ + { + "bbox": [ + 305, + 120, + 400, + 133 + ], + "type": "text", + "content": "2. State of the Art" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 140, + 380, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 140, + 380, + 152 + ], + "spans": [ + { + "bbox": [ + 306, + 140, + 380, + 152 + ], + "type": "text", + "content": "2.1. 2D posture" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 159, + 545, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 159, + 545, + 278 + ], + "spans": [ + { + "bbox": [ + 304, + 159, + 545, + 278 + ], + "type": "text", + "content": "Animal Kingdom [31] is by far the largest dataset with 50 hours of video annotations that include 850 species of varied taxa (fish, birds, mammals, etc.), focusing on a generalizable solution for 2D pose estimation and activity recognition for a single individual. Other notable datasets contain images instead of videos and focus on capturing variations in terms of specific taxa e.g. mammals [48], birds [41] and monkeys [47], or specific species e.g. zebras [14], all of these focus on solving problems for a single individual recorded from a single viewpoint." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 279, + 545, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 279, + 545, + 410 + ], + "spans": [ + { + "bbox": [ + 304, + 279, + 545, + 410 + ], + "type": "text", + "content": "Datasets based on single animal-based solutions are sufficient for some cases and rely on detection-based top-down approaches for extending the method for tracking the posture of multiple individuals [42]. There are few datasets that offer posture annotations for multiple individuals [2,24,25]. The problem of tracking multiple individuals is often simplified by placing the cameras above the animals, which minimizes occlusions [25, 43]. Tracking multiple individuals from side views may require multiple views, which may be important to resolve occlusions when animals interact in 3D spaces e.g. Cowbird dataset [2]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 411, + 545, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 411, + 545, + 471 + ], + "spans": [ + { + "bbox": [ + 304, + 411, + 545, + 471 + ], + "type": "text", + "content": "The existing datasets have motivated the development of various methods for posture estimation. However, reliance on manual annotations limits the complexity of datasets in terms of the number of viewpoints or the number of individuals, especially for video sequences." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 479, + 380, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 479, + 380, + 491 + ], + "spans": [ + { + "bbox": [ + 306, + 479, + 380, + 491 + ], + "type": "text", + "content": "2.2.3D posture" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 498, + 545, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 628 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 628 + ], + "type": "text", + "content": "Datasets with ground truth on 3D posture are relatively difficult to obtain with a group of animals. One popular method for obtaining 3D ground truth posture is the triangulation of 2D postures using multiple views to record animals. Acinoset [20] (leopard in wild), Fly3D [15] (fly in a lab) and OpenMonkeyStudio [3] (macaque in a lab) use triangulation-based approaches to provide 3D posture of single individuals. The images for these datasets are also annotated manually and, therefore, the accuracy of the computed 3D pose depends on the quality of annotation and calibration." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 545, + 714 + ], + "type": "text", + "content": "An alternative approach is to use marker-based mo-cap with a skeleton tracking feature as used with humans [16]. Kearney et al. [22] used motion capture to generate 3D ground truth for dogs and combine their approach with depth sensors (RGB-D) with the aim of designing markerless tracking based on RGBD sensors (63 to 82 markers). Dunn et al. [10] offered Rat 7M dataset using mo-cap with" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 732, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 732, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 732, + 318, + 742 + ], + "type": "text", + "content": "21275" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 446, + 289 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 446, + 289 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 446, + 289 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 446, + 289 + ], + "type": "image", + "image_path": "375a0910fff9275b9e7bdb91e8372ce7b300417ddcc58685e7a2ca73f7c22987.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "lines": [ + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "spans": [ + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "type": "text", + "content": "Figure 2. Illustration showing the experimental setup, with different defined coordinate systems, including the Vicon global coordinate system " + }, + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "type": "inline_equation", + "content": "(\\mathrm{O_{Vicon}})" + }, + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "type": "text", + "content": ", the camera coordinate system for each RGB camera " + }, + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "type": "inline_equation", + "content": "(\\mathrm{O_{Camera}})" + }, + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "type": "text", + "content": ", and the head " + }, + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "type": "inline_equation", + "content": "(\\mathrm{O_{head}})" + }, + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "type": "text", + "content": " and backpack " + }, + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "type": "inline_equation", + "content": "(\\mathrm{O_{backpack}})" + }, + { + "bbox": [ + 452, + 83, + 547, + 292 + ], + "type": "text", + "content": " coordinate system for each pigeon subject A) Detailed floor plan for data collection. B) Pigeon subject, with corresponding head and backpack markers and coordinate systems" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 312, + 287, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 312, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 46, + 312, + 287, + 361 + ], + "type": "text", + "content": "RGB cameras and 20 markers. These datasets are useful for solving posture problems for a single individual from multiple views and offer the option of using temporal consistency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 363, + 287, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 287, + 518 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 287, + 518 + ], + "type": "text", + "content": "Recently, Marshall et al. published PAIR-R24M [27], the first dataset with 3D ground truth with more than one animal, a pair of rats, using the approach of Dunn et al. [10]. Motion capture systems offer the huge advantage of creating millions of annotations in an automatic manner with high accuracy and low noise. The skeleton tracking feature with the mo-cap is primarily designed for tracking human posture and relies on a large number of markers. Additionally, the marker patterns have to be unique (at least partially) for maintaining the identity of each individual. Marker placement is a also limitation for smaller species and wild animals that lack the tolerance for having markers placed on specific locations of their body." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 288, + 713 + ], + "type": "text", + "content": "Lack of ground truth in 3D posture had led to innovative work of predicting 3D posture using 2D keypoints and silhouettes [2,4] or using synthetic datasets [5] or toys [49]. These approaches are promising but lack quantitative evaluation for robust practical applications. Computer vision literature on 3D posture problems mainly focuses on extracting as much of detail as possible. For animal behavior experiments information required from videos is always defined in the context of the experiment. It is worth noting that for birds, tracking the head and body orientations could be sufficient to quantify many key behaviors in ground-foraging contexts, such as feeding (pecking ground), preening, vigilance (head scanning), courtship (head bowing), or walking. Measuring the head direction in 3D also allows gaze reconstruction [17, 21], to be applied in the study of social cognition and collective behavior." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 312, + 492, + 325 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 312, + 492, + 325 + ], + "spans": [ + { + "bbox": [ + 306, + 312, + 492, + 325 + ], + "type": "text", + "content": "2.3. Multi-object tracking with identity" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 331, + 545, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 331, + 545, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 331, + 545, + 413 + ], + "type": "text", + "content": "Identity recognition is a critical problem to solve in the context of biological studies, especially when tracking the behavior of multiple interacting individuals over long periods of time. Tracking and identification of multiple individuals in large groups are especially exciting to quantify group-level behaviors like social networks [44, 46], dominance, or leadership [30]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 415, + 546, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 546, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 546, + 545 + ], + "type": "text", + "content": "For indoor experiments, the number of individuals is often controlled and the identification problem is linked with the tracking of animals [25, 43]. The task of tracking and identification is often resolved together using markers [14, 30] or marker-less [7, 11, 43] methods. The existing solutions perform well with specific perspectives (top-down view) and thus often fail to resolve cases of occlusion. Robust evaluation of simultaneous identification and tracking methods is difficult because true ground truth for identities is often not available in datasets with multiple animals or available for only a very short duration [25]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 546, + 545, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 545, + 605 + ], + "type": "text", + "content": "There are many good datasets available to independently solve problems of posture estimation, detection, tracking, and identification. Very few datasets offer the possibility of solving all of these problems simultaneously in realistic experimental scenarios." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 605, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 713 + ], + "type": "text", + "content": "We aim to fill this gap with our contribution of a semi-automatic method for producing new datasets with animals. Our dataset, 3D-POP, includes video recordings of 18 unique pigeons in various group sizes (1,2,5,10) from multiple views. We offer ground truth for identity, 2D-3D trajectories, and 2D-3D posture mapping for all individuals across the entire dataset (300K frames). The dataset also consists of annotations for object detection in the form of bounding boxes." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "text", + "content": "21276" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 107, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 107, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 107, + 83 + ], + "type": "text", + "content": "3. Methods" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 163, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 163, + 105 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 163, + 105 + ], + "type": "text", + "content": "3.1. Experimental Setup" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "spans": [ + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "text", + "content": "The dataset was collected from pigeons moving on a jute fabric " + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "inline_equation", + "content": "(3.6\\mathrm{m}\\times 4.2\\mathrm{m})" + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "text", + "content": " onto which we evenly scattered grains to encourage the birds to feed in that area (Figure 2A). This feeding area was located inside a large enclosure equipped with a mo-cap system " + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "inline_equation", + "content": "(15\\mathrm{m}\\times 7\\mathrm{m}\\times 4\\mathrm{m})" + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "text", + "content": ". This mo-cap system consists of 30 motion capture cameras (12 Vicon Vero 2.2, 18 Vicon Vantage-5 cameras; " + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "inline_equation", + "content": "100\\mathrm{Hz}" + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "text", + "content": ") and can track the 3D positions of reflective markers with submillimeter precision. At the corners of the feeding area, we placed 4 high-resolution (4K) Sony action cameras (rx0ii, " + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "inline_equation", + "content": "30\\mathrm{Hz}" + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "inline_equation", + "content": "3840\\mathrm{x}2160\\mathrm{p}" + }, + { + "bbox": [ + 46, + 110, + 289, + 291 + ], + "type": "text", + "content": ") mounted on standard tripods and also an Arduino-based synchronization box which flashes RGB and infrared LED lights every 5 seconds (Figure 2). Details on the synchronization and calibration of RGB cameras are provided in the supplementary text." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 299, + 146, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 299, + 146, + 312 + ], + "spans": [ + { + "bbox": [ + 47, + 299, + 146, + 312 + ], + "type": "text", + "content": "3.2. Animal Subjects" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 318, + 289, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 318, + 289, + 571 + ], + "spans": [ + { + "bbox": [ + 46, + 318, + 289, + 571 + ], + "type": "text", + "content": "Eighteen pigeons (Columba livia) were subjected to this study over 6 experimental days. Each day 10 pigeons were randomly selected from the population. Four " + }, + { + "bbox": [ + 46, + 318, + 289, + 571 + ], + "type": "inline_equation", + "content": "6.4\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 318, + 289, + 571 + ], + "type": "text", + "content": " reflective markers were attached to each subject's head, and four " + }, + { + "bbox": [ + 46, + 318, + 289, + 571 + ], + "type": "inline_equation", + "content": "9.5\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 318, + 289, + 571 + ], + "type": "text", + "content": " markers were attached to a customized backpack worn by each subject (Figure 2B). Generally, pigeons tolerate markers on the head with minimal effects on their behavior and habituate quickly to backpacks. Backpacks are also widely used for bird studies in behavioral ecology [1, 45]. The four " + }, + { + "bbox": [ + 46, + 318, + 289, + 571 + ], + "type": "inline_equation", + "content": "9.5\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 318, + 289, + 571 + ], + "type": "text", + "content": " backpack markers had a unique geometric configuration to track the individual identities of each bird throughout each recording. Each day we performed up to 11 trials in the following order: 1 pigeon (4 trials), a pair of pigeons (4 trials), a flock of 5 pigeons (2 trials), and a flock of 10 pigeons (1 trial). It took approximately 1 hour to perform all trials each day. The total frames and duration of samples over the course of the experiment are described in Table 1. An additional session was recorded with birds without attaching any markers to validate the results of models trained on annotated data having birds with markers (see 5.2)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 578, + 186, + 592 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 578, + 186, + 592 + ], + "spans": [ + { + "bbox": [ + 47, + 578, + 186, + 592 + ], + "type": "text", + "content": "3.3. Data annotation pipeline" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 597, + 169, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 169, + 609 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 169, + 609 + ], + "type": "text", + "content": "3.3.1 Annotation principle" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 288, + 715 + ], + "type": "text", + "content": "The movement of all features on a rigid body can be tracked simultaneously in a 3D space by computing 6-DOF pose of the rigid object. We use this principle to achieve annotations for keypoint features that are rigidly attached to the head and body of the bird. The four markers attached to the head and body (using a backpack) of each pigeon are used to compute 6-DOF pose of these body parts using the mo-cap system." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 72, + 545, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 191 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 191 + ], + "type": "text", + "content": "By assuming that the head and body are rigid bodies in the case of walking or standing birds, we designed a pipeline to annotate the position of features on the head and body (beak, eyes, shoulder, and tail, etc.) in a few frames to compute their 3D location with respect to marker positions. Once computed, the relationship between markers and features does not change during the sequences and this ensures that 6-DOF pose of head and body for any frame can be used to project 3D positions of keypoint features onto the image space to obtain 2D annotations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 192, + 545, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 192, + 545, + 288 + ], + "spans": [ + { + "bbox": [ + 304, + 192, + 545, + 288 + ], + "type": "text", + "content": "All keypoints defined for the head lie on the skull of the bird (Figure 1). The rigidity assumption is valid for these keypoints as they are rigidly placed on the skull. The keypoints chosen for the body lie actually on the rib cage and shoulders and exhibit a limited range of motion independent of each other. The rigidity assumption for the body is a reasonable assumption for the annotation pipeline if the birds do not move their wings and body(see 5.3)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 302, + 421, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 302, + 421, + 313 + ], + "spans": [ + { + "bbox": [ + 306, + 302, + 421, + 313 + ], + "type": "text", + "content": "3.3.2 Manual annotation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 321, + 545, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 321, + 545, + 513 + ], + "spans": [ + { + "bbox": [ + 304, + 321, + 545, + 513 + ], + "type": "text", + "content": "6-DOF (Degrees of freedom) tracking of the head and body is used to create a bounding box around the bird and crop the image of the focal individual for annotation. For each individual pigeon, 9 morphological keypoints (Figure 1) are annotated on 5-10 frames from all available view angles. Ideally, four frames (1 per view) is sufficient, but all keypoints are rarely visible within a single instance. Moreover, multiple measurements (3-5 frames per view) improve the robustness of computed 3D keypoint positions. The position of each keypoint is first triangulated using sparse bundle adjustment (in the camera coordinate system), then the relative position of the keypoint is computed with respect to the markers (in the coordinate system of the body part). Finally, all resultant 3D positions of keypoints are averaged and stored as a template file. This process is repeated for each bird on each recording day." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 527, + 441, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 527, + 441, + 540 + ], + "spans": [ + { + "bbox": [ + 306, + 527, + 441, + 540 + ], + "type": "text", + "content": "3.3.3 Annotation propagation" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 545, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 713 + ], + "type": "text", + "content": "In this final step, the ground truth data is generated for each recording using 3D keypoint positions computed in the previous step. The 3D positions of the keypoint features are transferred to the global coordinate system using 6-DOF pose. Next, keypoints are transferred to the coordinate system of each camera and projected to the image space (using calibration parameters). Bounding box annotations for object detection or tracking tasks are derived from keypoint projections. We determined that keypoints with the minimum and maximum x-y pixel values with an offset of 60 pixels are sufficient to define a bounding box. Finally, the 6-DOF tracking with the mo-cap system maintains the identity of each bird and this is also stored with 2D-3D information for the entire sequence." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 732, + 317, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 732, + 317, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 732, + 317, + 742 + ], + "type": "text", + "content": "21277" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 72, + 72, + 270, + 178 + ], + "blocks": [ + { + "bbox": [ + 72, + 72, + 270, + 178 + ], + "lines": [ + { + "bbox": [ + 72, + 72, + 270, + 178 + ], + "spans": [ + { + "bbox": [ + 72, + 72, + 270, + 178 + ], + "type": "image", + "image_path": "54357dc3df76102af12910c863b8f637d25b8102652d76372d97efea7d8a9073.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 276, + 73, + 389, + 178 + ], + "blocks": [ + { + "bbox": [ + 276, + 73, + 389, + 178 + ], + "lines": [ + { + "bbox": [ + 276, + 73, + 389, + 178 + ], + "spans": [ + { + "bbox": [ + 276, + 73, + 389, + 178 + ], + "type": "image", + "image_path": "f64204444059d167046dc366c9d8f8f7035b3da1384b6b6cbaf7d5ce1dcd7572.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 146, + 545, + 288 + ], + "lines": [ + { + "bbox": [ + 405, + 146, + 545, + 288 + ], + "spans": [ + { + "bbox": [ + 405, + 146, + 545, + 288 + ], + "type": "text", + "content": "Figure 3. Semi-automated annotation pipeline based on 6DOF tracking and RGB images. A) Input 6-DOF tracking data for head and backpack coordinate systems, and multi-view RGB videos. B) Manually annotate all visible keypoints from all views. C) Triangulate 3D position of all keypoints in the head and backpack coordinate system, assuming that keypoints and tracked markers are a rigid body. D) Apply across trials to get keypoints across all individuals" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 72, + 182, + 206, + 290 + ], + "blocks": [ + { + "bbox": [ + 72, + 182, + 206, + 290 + ], + "lines": [ + { + "bbox": [ + 72, + 182, + 206, + 290 + ], + "spans": [ + { + "bbox": [ + 72, + 182, + 206, + 290 + ], + "type": "image", + "image_path": "565ce7a02cc250114da9de41f8e952692d71ed24c4c190ee9df77c6fe249a50b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 213, + 182, + 389, + 290 + ], + "blocks": [ + { + "bbox": [ + 213, + 182, + 389, + 290 + ], + "lines": [ + { + "bbox": [ + 213, + 182, + 389, + 290 + ], + "spans": [ + { + "bbox": [ + 213, + 182, + 389, + 290 + ], + "type": "image", + "image_path": "92f1ea26da7b6c833a6c0c2d36f0ade6337eac663eb3bc1a9bb9a5ece2d0eef9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 308, + 167, + 321 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 167, + 321 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 167, + 321 + ], + "type": "text", + "content": "4. The 3D-POP dataset" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 335, + 162, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 335, + 162, + 348 + ], + "spans": [ + { + "bbox": [ + 47, + 335, + 162, + 348 + ], + "type": "text", + "content": "4.1. Dataset Description" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 360, + 288, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 360, + 288, + 563 + ], + "spans": [ + { + "bbox": [ + 46, + 360, + 288, + 563 + ], + "type": "text", + "content": "We present 3D-POP (3D Postures of Pigeons), a dataset that provides accurate ground truth for 3D keypoints, 2D keypoints, bounding boxes, and individual identities. The dataset includes RGB images from four high-resolution cameras (4K) and up to 6 hours of recordings divided into 57 sequences of 1,2,5, and 10 pigeons behaving naturalistically (Table 1). The dataset contains 3D coordinates (unfiltered) and 6-DOF pose obtained from the mo-cap facility along with calibration parameters. We also provide a total of 1 hour of recording (11 sequences) with pigeons (group size:1,2,5,11) without any markers on their body. These videos are provided for users to test the practical effectiveness of markerless solutions without the influence of markers. For realistic assessment, we show that a model trained with our dataset is able to infer keypoints on videos with pigeons without markers (see 5.2). Download the dataset here: https://doi.org/10.17617/3.HPBBC7" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 63, + 594, + 272, + 669 + ], + "blocks": [ + { + "bbox": [ + 63, + 594, + 272, + 669 + ], + "lines": [ + { + "bbox": [ + 63, + 594, + 272, + 669 + ], + "spans": [ + { + "bbox": [ + 63, + 594, + 272, + 669 + ], + "type": "table", + "html": "
No. \nindividualsAnnotated \nframesVideo length \n(min)
195,51355
2135,547119
544,24085
1020,32191
", + "image_path": "875ae1e6202967f3fd3e76afbae73e2ae3fb4e12ae675230a7bb62601272e49c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 687, + 286, + 710 + ], + "lines": [ + { + "bbox": [ + 47, + 687, + 286, + 710 + ], + "spans": [ + { + "bbox": [ + 47, + 687, + 286, + 710 + ], + "type": "text", + "content": "Table 1. Dataset Summary: Total number of labeled frames with ground truth data for different group sizes." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 306, + 310, + 397, + 321 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 310, + 397, + 321 + ], + "spans": [ + { + "bbox": [ + 306, + 310, + 397, + 321 + ], + "type": "text", + "content": "4.2.Customization" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 328, + 545, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 328, + 545, + 543 + ], + "spans": [ + { + "bbox": [ + 304, + 328, + 545, + 543 + ], + "type": "text", + "content": "We release 3D-POPAP (3D-POP Annotation Pipeline) to manipulate the annotations of the dataset (Download: https://github.com/alexhang212/Dataset-3DPOP). As explained earlier, our use of the 6-DOF tracking decouples the keypoint annotations from the positions of markers used for mo-cap. Due to this design of the annotation approach, we can offer a unique dataset with the ability to easily add new 2D/3D keypoint annotations. The feature of keypoint modification is relevant for future work because defining the posture of birds is a difficult problem and depends on the final application. As of now, there are no datasets available with ground truth on the 3D posture of birds. The lack of ground truth has motivated novel ideas for solving the 3D reconstruction of bird pose using 2D annotations (silhouette and keypoints [2]). Among the available 2D datasets with birds, different numbers of keypoints are selected to define pose e.g. CUB-200: 15 [41], Cowbird dataset: 12 [2] and Animal Kingdom: 23 [31]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 544, + 545, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 544, + 545, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 544, + 545, + 616 + ], + "type": "text", + "content": "To the best of our knowledge, the use of posture in behavior studies with birds is still limited and pose definition may rely completely on the nature of the study. Our inspiration for keypoint definition is inspired by gaze studies [17, 21] for which 9 keypoint-based posture sufficiently provides gaze direction with body and head orientation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 623, + 415, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 623, + 415, + 635 + ], + "spans": [ + { + "bbox": [ + 306, + 623, + 415, + 635 + ], + "type": "text", + "content": "4.3. Dataset Validation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 641, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 641, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 641, + 545, + 712 + ], + "type": "text", + "content": "The annotations in 3D-POP are obtained automatically, and therefore we designed three different tests to validate the accuracy and consistency of the annotations. The first test compares the accuracy of the 3D features computed with our method and the method presented by Kano et al. [21]." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 733, + 317, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 733, + 317, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 733, + 317, + 742 + ], + "type": "text", + "content": "21278" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 193 + ], + "type": "text", + "content": "The second test measures the consistency of the 3D/2D annotations across the dataset. This test is required to identify errors in annotation introduced by erroneous 3D mo-cap tracking due to occlusion, rapid movement of the birds, or calibration and synchronization errors of the cameras. It is important to perform this test because manually checking millions of annotations is not practical. Finally, the third test checks the variation in the 3D pose captured in all sequences. This test shows that the dataset is not biased to specific types of motion or poses." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 206, + 121, + 219 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 206, + 121, + 219 + ], + "spans": [ + { + "bbox": [ + 47, + 206, + 121, + 219 + ], + "type": "text", + "content": "4.3.1 Accuracy" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 225, + 287, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 225, + 287, + 320 + ], + "spans": [ + { + "bbox": [ + 46, + 225, + 287, + 320 + ], + "type": "text", + "content": "Kano et al. [21] use a calibration method to measure the 3D position of eyes w.r.t. mo-cap markers. This process involves a custom camera rig, made of 4 separate webcams that capture the head of each pigeon before data collection. We replicated this process to compute the ground truth 3D position of eyes and beak. Further, we compared the ground truth with the 3D position of the same features computed with our approach." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "spans": [ + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "text", + "content": "We obtained root mean squared errors (RMSE) for all three features (Beak: " + }, + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "inline_equation", + "content": "5.0\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "text", + "content": ", Left eye: " + }, + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "inline_equation", + "content": "5.0\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "text", + "content": ", Right Eye: " + }, + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "inline_equation", + "content": "4.9\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "text", + "content": "), which is sufficient for pigeons considering that the diameter of the eyes is typically " + }, + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "inline_equation", + "content": "6 - 7\\mathrm{mm}" + }, + { + "bbox": [ + 46, + 321, + 288, + 440 + ], + "type": "text", + "content": " [8]. This method provides an approximation of the accuracy for a few features only, and a better method is required to test the accuracy of 3D features measured on the body. It should be noted that our method has comparable accuracy and alleviates the need of using dedicated calibrations rigs and thus saves time." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 455, + 222, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 455, + 222, + 467 + ], + "spans": [ + { + "bbox": [ + 47, + 455, + 222, + 467 + ], + "type": "text", + "content": "4.3.2 Consistency and outlier detection" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 474, + 287, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 675 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 675 + ], + "type": "text", + "content": "It is reasonable to assume that a small portion of the mo-cap sequences contains tracking errors and will produce inaccurate 6-DOF poses for body parts. As a result, the annotation for all keypoints associated with the relevant body parts is likely to be wrong. We know that models trained with large datasets with small noise still generalize to a solution [34]. Yet, it is important to identify and remove these sequences from the dataset. Keeping this in mind, we design a consistency check with the intuition that a well-trained model for keypoint detector will predict 2D features with reasonable accuracies for all frames. Therefore, a comparison between predicted keypoints and propagated keypoints is likely to show very large errors for all keypoints (of the same body part), especially for frames with faulty mo-cap tracking (Figure 5). We use this idea to automatically determine the consistency of the annotations throughout the trial." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "We trained a state-of-the-art 2D keypoint detection model (DLC [28]) on 15177 images with a ResNet50 backbone for 30,000 iterations with the adam optimizer. There" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 305, + 70, + 430, + 168 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 430, + 168 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 430, + 168 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 430, + 168 + ], + "type": "image", + "image_path": "fd1a912a877c2f4038eca05d2c01378a7b1c8da5d5601e3574699424331cadab.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 182, + 547, + 239 + ], + "lines": [ + { + "bbox": [ + 304, + 182, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 304, + 182, + 547, + 239 + ], + "type": "text", + "content": "Figure 4. Distribution of Euclidean distances (px) between model predictions of a trained DLC model and annotations, after outlier frames were filtered. Frequency shown in the y-axis and and only points of up to " + }, + { + "bbox": [ + 304, + 182, + 547, + 239 + ], + "type": "inline_equation", + "content": "10\\mathrm{px}" + }, + { + "bbox": [ + 304, + 182, + 547, + 239 + ], + "type": "text", + "content": " error is shown on the x-axis. A) Head keypoints B) Backpack keypoints" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 432, + 70, + 552, + 168 + ], + "blocks": [ + { + "bbox": [ + 432, + 70, + 552, + 168 + ], + "lines": [ + { + "bbox": [ + 432, + 70, + 552, + 168 + ], + "spans": [ + { + "bbox": [ + 432, + 70, + 552, + 168 + ], + "type": "image", + "image_path": "f7695623f4510f25d39da8525995954a0856cdfcee318b0aa9d07e8028261444.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 258, + 545, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 258, + 545, + 402 + ], + "spans": [ + { + "bbox": [ + 304, + 258, + 545, + 402 + ], + "type": "text", + "content": "is no reliable method available for tracking the posture of multiple birds simultaneously, therefore we use a top-down approach and train on single individual data using bounding box annotations. The training data excludes highly occluded frames with " + }, + { + "bbox": [ + 304, + 258, + 545, + 402 + ], + "type": "inline_equation", + "content": ">30\\%" + }, + { + "bbox": [ + 304, + 258, + 545, + 402 + ], + "type": "text", + "content": " overlap with another bounding box to avoid sequences that have multiple individuals in the bounding box due to close proximity. GESD outlier analysis [35] is used for each keypoint independently setting the expected outliers at " + }, + { + "bbox": [ + 304, + 258, + 545, + 402 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 304, + 258, + 545, + 402 + ], + "type": "text", + "content": " of the dataset. The frames having more than 1 outlier keypoint are filtered out as we expect a higher number of outliers in case of erroneous annotations (explained above)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 403, + 546, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 546, + 545 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 546, + 545 + ], + "type": "text", + "content": "Using this method we filtered out " + }, + { + "bbox": [ + 304, + 403, + 546, + 545 + ], + "type": "inline_equation", + "content": "2.9\\%" + }, + { + "bbox": [ + 304, + 403, + 546, + 545 + ], + "type": "text", + "content": " of the overall dataset, which lowered the average Euclidean distance between annotation and predictions (see Table 2). We used the filtered training data and retrained a model (14,722 images, 30,000 iterations, ResNet50 backbone, adam optimizer), but obtained similar errors compared to the previous model (see Table 2). The consistency check reveals that the annotations are largely consistent with model predictions, with a typical error of 2-3 pixels for head features and 3-4 px for body features (See Figure 4). Figure 5 shows visual examples of outlier frames where mo-cap errors are likely due to behaviors such as flying or occlusions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 546, + 547, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 547, + 641 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 547, + 641 + ], + "type": "text", + "content": "The outlier filtering method introduces artificial gaps in the dataset. We computed the number of dropped frames and found that " + }, + { + "bbox": [ + 304, + 546, + 547, + 641 + ], + "type": "inline_equation", + "content": "96.1\\%" + }, + { + "bbox": [ + 304, + 546, + 547, + 641 + ], + "type": "text", + "content": " of gaps are less than 30 frames (1 second) in length (see supplementary). Researchers in need of continuous temporal data can use gap-free segments or use interpolation to fill small gaps. For sake of completeness, we have included automatically rejected frames in the dataset." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 658, + 400, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 658, + 400, + 670 + ], + "spans": [ + { + "bbox": [ + 306, + 658, + 400, + 670 + ], + "type": "text", + "content": "4.3.3 Pose variation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 677, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 546, + 713 + ], + "type": "text", + "content": "We then compute the number of unique poses that each pigeon exhibit to understand the heterogeneity of pose present in the 3D-POP dataset. It is difficult to compute pose vari" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "text", + "content": "21279" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 61, + 70, + 533, + 133 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 533, + 133 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 533, + 133 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 533, + 133 + ], + "type": "table", + "html": "
RMSEMethod (px)BeakNoseLeft EyeRight EyeLeft ShoulderRight ShoulderTop KeelBottom KeelTail
RMSEBeforeFiltering10.17.97.57.58.48.79.49.98.8
RMSEAfterFiltering8.16.05.95.97.98.29.19.58.2
RMSEAfterRetraining8.46.56.46.38.08.29.19.58.4
", + "image_path": "35abb15dcf3dc7c21b60c8974fde2ec8f7de791de5dde6688471023b94164896.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 152, + 547, + 196 + ], + "lines": [ + { + "bbox": [ + 46, + 152, + 547, + 196 + ], + "spans": [ + { + "bbox": [ + 46, + 152, + 547, + 196 + ], + "type": "text", + "content": "Table 2. Root mean squared 2D Euclidean error (px) of each keypoint with different data subsets and trained DLC 2D keypoint models. BeforeFiltering: Error of model trained on the full dataset with inference on frames before outliers were filtered. AfterFiltering: Errors of the model trained on the full dataset with inference on frames after outliers were filtered. AfterRetraining: Errors of the model trained on the filtered dataset with inference on frames after outliers were filtered" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 82, + 206, + 257, + 320 + ], + "blocks": [ + { + "bbox": [ + 82, + 206, + 257, + 320 + ], + "lines": [ + { + "bbox": [ + 82, + 206, + 257, + 320 + ], + "spans": [ + { + "bbox": [ + 82, + 206, + 257, + 320 + ], + "type": "image", + "image_path": "5702b9cb3eea2757ebc6f9cb99c1911395e52598807abb2cf93e7fa2705b19ba.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 82, + 327, + 254, + 443 + ], + "blocks": [ + { + "bbox": [ + 82, + 327, + 254, + 443 + ], + "lines": [ + { + "bbox": [ + 82, + 327, + 254, + 443 + ], + "spans": [ + { + "bbox": [ + 82, + 327, + 254, + 443 + ], + "type": "image", + "image_path": "2f17d349fc139697d58bb5cae57dc3e2127496b2a8b36e5d7dc676ed7ed8eefc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 456, + 288, + 514 + ], + "lines": [ + { + "bbox": [ + 46, + 456, + 288, + 514 + ], + "spans": [ + { + "bbox": [ + 46, + 456, + 288, + 514 + ], + "type": "text", + "content": "Figure 5. Example frames that are filtered automatically by the outlier analysis, with descriptions of the cause of annotation inaccuracy. Green labels represent annotations, and red labels represent prediction from the trained DLC 2D keypoint detection model." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 533, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 713 + ], + "type": "text", + "content": "ation directly using the 6-DOF pose defined by markers because the coordinate system is not defined in a standardized way for each pigeon. To create a standardized comparison, we compute two planes defined by keypoint features to represent the alignment of the head and body in 3D space. The head plane is computed using three points (beak and eyes) and the body plane is computed using three points (shoulders and tail). In this manner, the orientations of all planes representing the pose of all individuals are defined using the same features and can be compared in a unified coordinate system. We use the normal of the planes to compute the angles with the canonical coordinate system (See supplementary). It is assumed that a degree of change in rotational angles of either head or body corresponds to a new pose. We found a total of 74,924 unique orientations of the head" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 217, + 545, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 217, + 545, + 266 + ], + "spans": [ + { + "bbox": [ + 305, + 217, + 545, + 266 + ], + "type": "text", + "content": "and 14,191 unique orientations of the body, and the combined 1.8 million unique poses present in the dataset. A graphical representation of the range of poses is provided in the supplementary material." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 277, + 388, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 277, + 388, + 290 + ], + "spans": [ + { + "bbox": [ + 306, + 277, + 388, + 290 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 297, + 545, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 297, + 545, + 309 + ], + "spans": [ + { + "bbox": [ + 306, + 297, + 545, + 309 + ], + "type": "text", + "content": "5.1. Marker-based + Markerless Hybrid Approach" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 316, + 545, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 545, + 422 + ], + "type": "text", + "content": "The first experiment shows that markerless tracking algorithm trained on 3D-POP is able to solve 3D tracking for cases when mo-cap fails to track markers. The solution is useful as an increasing number of pre-existing experimental setups are designed to use marker-based mocap technologies for biological studies [10, 17, 21, 23, 38]. A hybrid tracking solution, that uses markerless tracking to fill the gaps of the mo-cap system has many potential applications for future behavior studies." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 423, + 546, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 423, + 546, + 604 + ], + "spans": [ + { + "bbox": [ + 304, + 423, + 546, + 604 + ], + "type": "text", + "content": "We chose a 5 min sequence with a single individual and artificially removed " + }, + { + "bbox": [ + 304, + 423, + 546, + 604 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 304, + 423, + 546, + 604 + ], + "type": "text", + "content": " of mo-cap tracking data. The gaps are randomly introduced for a duration of 30-90 frames (1-3 seconds), to mimic tracking loss. We used the 2D keypoint DLC model (see 4.3) to detect keypoints from all 4 camera views and triangulate the results with sparse bundle adjustment. We compared the result with the ground truth and achieved avg. RMS error of " + }, + { + "bbox": [ + 304, + 423, + 546, + 604 + ], + "type": "inline_equation", + "content": "9.2 \\, \\mathrm{mm}" + }, + { + "bbox": [ + 304, + 423, + 546, + 604 + ], + "type": "text", + "content": " (details in supplementary). A simple linear interpolation-based approach to fill gaps resulted in avg. RMS error of " + }, + { + "bbox": [ + 304, + 423, + 546, + 604 + ], + "type": "inline_equation", + "content": "52.1 \\, \\mathrm{mm}" + }, + { + "bbox": [ + 304, + 423, + 546, + 604 + ], + "type": "text", + "content": ". The proposed solution is a viable application because biologists are likely to keep using motion-tracking technology until a robust solution is designed for markerless 3D tracking. However, we acknowledge that better solutions can be designed for a hybrid approach using temporal consistency in the future [20]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 611, + 451, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 611, + 451, + 624 + ], + "spans": [ + { + "bbox": [ + 306, + 611, + 451, + 624 + ], + "type": "text", + "content": "5.2. Markerless Bird Tracking" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 545, + 713 + ], + "type": "text", + "content": "This experiment shows that models trained with our dataset can be directly used to track birds without any markers attached to their bodies. This experiment works as a \"sanity check\" to ensure that models trained with 3D-POP dataset are not biased toward the presence of markers. The test also demonstrates the potential contribution of our method toward developing a complete markerless solution" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "text", + "content": "21280" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 59, + 285, + 139 + ], + "blocks": [ + { + "bbox": [ + 50, + 59, + 285, + 139 + ], + "lines": [ + { + "bbox": [ + 50, + 59, + 285, + 139 + ], + "spans": [ + { + "bbox": [ + 50, + 59, + 285, + 139 + ], + "type": "image", + "image_path": "7d0f23d632aac8b761c77cd70dce5c6079009734f995b5bcc70f6444ee823ebd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 140, + 287, + 173 + ], + "lines": [ + { + "bbox": [ + 47, + 140, + 287, + 173 + ], + "spans": [ + { + "bbox": [ + 47, + 140, + 287, + 173 + ], + "type": "text", + "content": "Figure 6. Pictures show that the 2D keypoint detection algorithm trained with the 3D-POP dataset can make predictions on videos with pigeons without any markers attached to the body." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 194, + 266, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 194, + 266, + 205 + ], + "spans": [ + { + "bbox": [ + 47, + 194, + 266, + 205 + ], + "type": "text", + "content": "for 3D tracking, posture estimation, and identification." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 206, + 287, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 206, + 287, + 326 + ], + "spans": [ + { + "bbox": [ + 46, + 206, + 287, + 326 + ], + "type": "text", + "content": "Using a pre-trained object detection model (YOLOv5s [33]), we extracted the bounding box of a pigeon from a single individual sequence. We then used the 2D keypoint DLC model (see 4.3) to predict keypoints from the sequence. The models generalize well to the images of pigeons without markers (see Figure 6, supplementary video). The result is qualitatively checked, but sufficient to prove our claim. The same solution can be easily extended to multiple pigeon trials by designing a top-down approach (using YOLO) until better solutions are developed using 3D-POP." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 334, + 157, + 345 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 334, + 157, + 345 + ], + "spans": [ + { + "bbox": [ + 47, + 334, + 157, + 345 + ], + "type": "text", + "content": "5.3. Manual Validation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "spans": [ + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "text", + "content": "This experiment demonstrates the validity of our assumption that keypoints on the body (shoulder, keel, etc.) behave like points on a rigid body. We selected 1000 frames randomly and manually annotated keypoints for the body part. We compared the manual annotations with automatic ground truth annotations using PCK05 and PCK10 (percentage correct keypoint within " + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "text", + "content": " of bounding box width) metrics. We report an average PCK05 of " + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "inline_equation", + "content": "66\\%" + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "text", + "content": " and PCK10 of " + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "inline_equation", + "content": "94\\%" + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "text", + "content": " across all keypoints on the body (Table 3). We also visually quantified that only " + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "inline_equation", + "content": "2.8\\%" + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "text", + "content": " of the frames are cases where birds are moving their wings, thus the simplified skeletal representation of the body is valid in over " + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "inline_equation", + "content": "97\\%" + }, + { + "bbox": [ + 46, + 353, + 287, + 507 + ], + "type": "text", + "content": " of the dataset." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 48, + 516, + 287, + 579 + ], + "blocks": [ + { + "bbox": [ + 48, + 516, + 287, + 579 + ], + "lines": [ + { + "bbox": [ + 48, + 516, + 287, + 579 + ], + "spans": [ + { + "bbox": [ + 48, + 516, + 287, + 579 + ], + "type": "table", + "html": "
MetricLeft Shoul- derRight Shoul- derTop KeelBottom KeelTail
PCK050.780.750.580.570.60
PCK100.980.980.940.890.92
", + "image_path": "78b2e21077ce8e0bf65431da9caa304aa20224d3a000acfb3b2753cd23b77da1.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 597, + 287, + 631 + ], + "lines": [ + { + "bbox": [ + 47, + 597, + 287, + 631 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 287, + 631 + ], + "type": "text", + "content": "Table 3. PCK errors per body keypoint between manual annotation and 3DPOP annotation. PCK is defined as the percentage of points that are within " + }, + { + "bbox": [ + 47, + 597, + 287, + 631 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 47, + 597, + 287, + 631 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 597, + 287, + 631 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 47, + 597, + 287, + 631 + ], + "type": "text", + "content": " of the bounding box width" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 47, + 657, + 211, + 670 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 657, + 211, + 670 + ], + "spans": [ + { + "bbox": [ + 47, + 657, + 211, + 670 + ], + "type": "text", + "content": "6. Limitations and Future work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 287, + 713 + ], + "type": "text", + "content": "The annotation method presented in the paper largely relies on the assumption that the head and body mostly behave as rigid bodies. This assumption does not hold for certain" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 133 + ], + "type": "text", + "content": "body parts such as the neck, tail end, or feet and limits the selection of keypoints at these body parts. For similar reasons, the proposed approach will not support annotation for flying birds or birds that change the shape of body parts while performing certain behaviors e.g. courtship [19]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 133, + 545, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 133, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 304, + 133, + 545, + 262 + ], + "type": "text", + "content": "Our approach inherently depends on the tracking accuracy of the mo-cap system. Users must maintain mo-cap systems regularly calibrated for consistent results. Another possible source of error in the annotation pipeline is video camera calibration and its temporal synchronization with the mo-cap system. We do show that our outlier detection method is effective at identifying noisy annotations, however, noise can still be present in the dataset. Finally, since the dataset was curated semi-automatically in an existing motion tracking setup, the data we provide is limited to an indoor environment." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 263, + 545, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 263, + 545, + 334 + ], + "spans": [ + { + "bbox": [ + 304, + 263, + 545, + 334 + ], + "type": "text", + "content": "We have improved the existing state of the art for multi-animal tracking by adding complexity in the form of the number of individuals and camera views. In the future, we intend to develop lifting-based approaches [13, 15] to learn the 2D-3D mapping obtained in the 3D-POP dataset to track birds in outdoor environments." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 346, + 378, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 346, + 378, + 358 + ], + "spans": [ + { + "bbox": [ + 306, + 346, + 378, + 358 + ], + "type": "text", + "content": "7. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 366, + 545, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 545, + 534 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 545, + 534 + ], + "type": "text", + "content": "In this paper, we introduced a novel method to use a mop-cap system for generating large-scale datasets with multiple animals. We demonstrate that our semi-automated method offers an alternative for generating high-quality datasets with animals without manual effort. We offer 3D-POP, the first dataset with ground truth for 3D posture prediction and identity tracking in birds, which is extremely difficult to achieve even with manual labor. 3D-POP dataset offers an opportunity for the vision community to work on a complex set of vision problems relevant to achieving markerless tracking of birds in indoor and outdoor environments. At the same time, our method will motivate biologists to create new datasets as they have access to and work with different types of animals." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 544, + 420, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 544, + 420, + 558 + ], + "spans": [ + { + "bbox": [ + 306, + 544, + 420, + 558 + ], + "type": "text", + "content": "8. Acknowledgements" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 564, + 545, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 564, + 545, + 696 + ], + "spans": [ + { + "bbox": [ + 304, + 564, + 545, + 696 + ], + "type": "text", + "content": "Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany's Excellence Strategy - EXC 2117 (ID: 422037984). The Ethical Committee of Baden-Württemberg approved all the experiments (Regierungspräsidium Freiburg, Referat 35, License Number: 35-9185.81/G-19/107). M.N. acknowledges additional support from the Hungarian Academy of Sciences (grant no. 95152) and Eötvös Loránd University. I.C. also acknowledge Office of Naval Research (grant ONR, N00014-19-1-2556), Horizon Europe Marie Sklodowska-Curie Actions (860949) and the Max Planck Society." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 733, + 317, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 733, + 317, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 733, + 317, + 742 + ], + "type": "text", + "content": "21281" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 92, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 92, + 287, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 92, + 287, + 147 + ], + "spans": [ + { + "bbox": [ + 53, + 92, + 287, + 147 + ], + "type": "text", + "content": "[1] Gustavo Alarcón-Nieto, Jacob M Graving, James A Klarevas-Irby, Adriana A Maldonado-Chaparro, Inge Mueller, and Damien R Farine. An automated barcode tracking system for behavioural studies in birds. Methods in Ecology and Evolution, 9(6):1536-1547, 2018. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 148, + 288, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 288, + 213 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 288, + 213 + ], + "type": "text", + "content": "[2] Marc Badger, Yufu Wang, Adarsh Modh, Ammon Perkes, Nikos Kolotouros, Bernd G Pfrommer, Marc F Schmidt, and Kostas Daniilidis. 3d bird reconstruction: a dataset, model, and shape recovery from a single view. In European Conference on Computer Vision, pages 1-17. Springer, 2020. 1, 2, 3, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 216, + 287, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 216, + 287, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 216, + 287, + 270 + ], + "type": "text", + "content": "[3] Praneet C Bala, Benjamin R Eisenreich, Seng Bum Michael Yoo, Benjamin Y Hayden, Hyun Soo Park, and Jan Zimmermann. Automated markerless pose estimation in freely moving macaques with openmonkeystudio. Nature communications, 11(1):1-12, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 273, + 287, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 273, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 273, + 287, + 350 + ], + "type": "text", + "content": "[4] Benjamin Biggs, Thomas Roddick, Andrew Fitzgibbon, and Roberto Cipolla. Creatures Great and SMAL: Recovering the Shape and Motion of Animals from Video. In C.V. Jawahar, Hongdong Li, Greg Mori, and Konrad Schindler, editors, Computer Vision – ACCV 2018, Lecture Notes in Computer Science, pages 3–19, Cham, 2019. Springer International Publishing. 1, 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 351, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 351, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 53, + 351, + 287, + 407 + ], + "type": "text", + "content": "[5] Luis A Bolanos, Dongsheng Xiao, Nancy L Ford, Jeff M LeDue, Pankaj K Gupta, Carlos Doebeli, Hao Hu, Helge Rhodin, and Timothy H Murphy. A three-dimensional virtual mouse generates synthetic training data for behavioral analysis. Nature methods, 18(4):378-381, 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 408, + 287, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 408, + 287, + 484 + ], + "spans": [ + { + "bbox": [ + 53, + 408, + 287, + 484 + ], + "type": "text", + "content": "[6] Marek L. Borowiec, Rebecca B. Dikow, Paul B. Frandsen, Alexander McKeeken, Gabriele Valentini, and Alexander E. White. Deep learning as a tool for ecology and evolution. Methods in Ecology and Evolution, 13(8):1640-1660, 2022. _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/2041-210X.13901.1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 487, + 287, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 487, + 287, + 530 + ], + "spans": [ + { + "bbox": [ + 53, + 487, + 287, + 530 + ], + "type": "text", + "content": "[7] Katarzyna Bozek, Laetitia Hebert, Yoann Portugal, Alexander S Mikheyev, and Greg J Stephens. Markerless tracking of an entire honey bee colony. Nature communications, 12(1):1-13, 2021. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 533, + 287, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 533, + 287, + 564 + ], + "spans": [ + { + "bbox": [ + 53, + 533, + 287, + 564 + ], + "type": "text", + "content": "[8] Ray D Chard and Ralph H Gundlach. The structure of the eye of the homing pigeon. Journal of Comparative Psychology, 25(2):249, 1938. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 567, + 287, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 567, + 287, + 621 + ], + "spans": [ + { + "bbox": [ + 53, + 567, + 287, + 621 + ], + "type": "text", + "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, June 2009. ISSN: 1063-6919. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 624, + 287, + 689 + ], + "type": "text", + "content": "[10] Timothy W Dunn, Jesse D Marshall, Kyle S Severson, Diego E Aldarondo, David GC Hildebrand, Selmaan N Chettih, William L Wang, Amanda J Gellis, David E Carlson, Dmitriy Aronov, et al. Geometric deep learning enables 3d kinematic profiling across species and environments. Nature methods, 18(5):564-573, 2021. 2, 3, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[11] André C Ferreira, Liliana R Silva, Francesco Renna, Hanja B Brandl, Julien P Renoult, Damien R Farine, Rita Covas, and" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 106 + ], + "type": "text", + "content": "Claire Doutrelant. Deep learning-based methods for individual recognition in small birds. Methods in Ecology and Evolution, 11(9):1072-1085, 2020. 1, 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 107, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 140 + ], + "type": "text", + "content": "[12] Crystal Gagne, Jyoti Kini, Daniel Smith, and Mubarak Shah. Florida wildlife camera trap dataset. arXiv preprint arXiv:2106.12628, 2021. 1, 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 141, + 545, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 219 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 219 + ], + "type": "text", + "content": "[13] Adam Gosztolai, Semih Günel, Victor Lobato-Ríos, Marco Pietro Abrate, Daniel Morales, Helge Rhodin, Pascal Fua, and Pavan Ramdya. LiftPose3D, a deep learning-based approach for transforming two-dimensional to three-dimensional poses in laboratory animals. Nature Methods, 18(8):975–981, Aug. 2021. Number: 8 Publisher: Nature Publishing Group. 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 220, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 220, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 308, + 220, + 545, + 274 + ], + "type": "text", + "content": "[14] Jacob M Graving, Daniel Chae, Hemal Naik, Liang Li, Benjamin Koger, Blair R Costelloe, and Iain D Couzin. DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning. eLife, 8:e47994, Oct. 2019. Publisher: eLife Sciences Publications, Ltd. 1, 2, 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 276, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 276, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 308, + 276, + 545, + 319 + ], + "type": "text", + "content": "[15] Semih Günel, Helge Rhodin, Daniel Morales, João Campagnolo, Pavan Ramdya, and Pascal Fua. Deepfly3d, a deep learning-based approach for 3d limb and appendage tracking in tethered, adult drosophila. *Elite*, 8:e48571, 2019. 2, 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 321, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 321, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 308, + 321, + 545, + 376 + ], + "type": "text", + "content": "[16] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(7):1325-1339, jul 2014. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 377, + 545, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 377, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 308, + 377, + 545, + 422 + ], + "type": "text", + "content": "[17] Akihiro Itahara and Fumihiro Kano. \"Corvid Tracking Studio\": A custom-built motion capture system to track head movements of corvids. Japanese Journal of Animal Psychology, pages 72-1, 2022. 2, 3, 5, 7" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 422, + 545, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 422, + 545, + 465 + ], + "spans": [ + { + "bbox": [ + 308, + 422, + 545, + 465 + ], + "type": "text", + "content": "[18] Noah T Jafferis, E Farrell Helbling, Michael Karpelson, and Robert J Wood. Untethered flight of an insect-sized flapping-wing microscale aerial vehicle. Nature, 570(7762):491-495, 2019. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 468, + 545, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 468, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 308, + 468, + 545, + 533 + ], + "type": "text", + "content": "[19] Judith Janisch, Elisa Perinot, Leonida Fusani, and Cliodhna Quigley. Deciphering choreographies of elaborate courtship displays of golden-collared manakins using markerless motion capture. Ethology, 127(7):550-562, 2021. _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/eth.13161.8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 535, + 545, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 535, + 545, + 601 + ], + "spans": [ + { + "bbox": [ + 308, + 535, + 545, + 601 + ], + "type": "text", + "content": "[20] Daniel Joska, Liam Clark, Naoya Muramatsu, Ricardo Jericevich, Fred Nicolls, Alexander Mathis, Mackenzie W Mathis, and Amir Patel. Acinoset: a 3d pose estimation dataset and baseline models for cheetahs in the wild. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 13901-13908. IEEE, 2021. 1, 2, 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 602, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 602, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 308, + 602, + 545, + 656 + ], + "type": "text", + "content": "[21] Fumihiro Kano, Hemal Naik, Góksel Keskin, Iain D. Couzin, and Mate Nagy. Head-tracking of freely-behaving pigeons in a motion-capture system reveals the selective use of visual field regions. Scientific Reports, 12(1):19113, Nov 2022. 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 658, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 658, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 658, + 545, + 713 + ], + "type": "text", + "content": "[22] Sinead Kearney, Wenbin Li, Martin Parsons, Kwang In Kim, and Darren Cosker. Rgbd-dog: Predicting canine pose from rgbd sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8336-8345, 2020. 2" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 733, + 318, + 742 + ], + "type": "text", + "content": "21282" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "text", + "content": "[23] Marco KleinHeerenbrink, Lydia A France, Caroline H Brighton, and Graham K Taylor. Optimization of avian perching manoeuvres. Nature, 607(7917):91-96, 2022. 2, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 182 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 182 + ], + "type": "text", + "content": "[24] Rollyn Labuguen, Jumpei Matsumoto, Salvador Blanco Negrete, Hiroshi Nishimaru, Hisao Nishijo, Masahiko Takada, Yasuhiro Go, Ken-ichi Inoue, and Tomohiro Shibata. Macaquepose: A novel \"in the wild\" macaque monkey pose dataset for markerless motion capture. Frontiers in behavioral neuroscience, 14:581154, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 184, + 288, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 184, + 288, + 281 + ], + "spans": [ + { + "bbox": [ + 48, + 184, + 288, + 281 + ], + "type": "text", + "content": "[25] Jessy Lauer, Mu Zhou, Shaokai Ye, William Menegas, Steffen Schneider, Tanmay Nath, Mohammed Mostafizur Rahman, Valentina Di Santo, Daniel Soberanes, Guoping Feng, Venkatesh N. Murthy, George Lauder, Catherine Dulac, Mackenzie Weygandt Mathis, and Alexander Mathis. Multi-animal pose estimation, identification and tracking with DeepLabCut. Nature Methods, 19(4):496-504, Apr. 2022. Number: 4 Publisher: Nature Publishing Group. 1, 2, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 282, + 287, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 282, + 287, + 358 + ], + "spans": [ + { + "bbox": [ + 48, + 282, + 287, + 358 + ], + "type": "text", + "content": "[26] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dolkar, and C. Lawrence Zitnick. Microsoft COCO: Common Objects in Context. In David Fleet, Tomas Pajdla, Bernt Schiele, and Tinne Tuyte-laars, editors, Computer Vision – ECCV 2014, Lecture Notes in Computer Science, pages 740–755, Cham, 2014. Springer International Publishing. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 360, + 287, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 360, + 287, + 414 + ], + "spans": [ + { + "bbox": [ + 48, + 360, + 287, + 414 + ], + "type": "text", + "content": "[27] Jesse D. Marshall, Ugne Klibaite, Amanda Gellis, Diego E. Aldarondo, Bence P. Ölveczky, and Timothy W. Dunn. The PAIR-R24M Dataset for Multi-animal 3D Pose Estimation. Technical report, bioRxiv, Nov. 2021. Section: New Results Type: article. 2, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 415, + 287, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 415, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 48, + 415, + 287, + 480 + ], + "type": "text", + "content": "[28] Alexander Mathis, Pranav Mamidanna, Kevin M. Cury, Taiga Abe, Venkatesh N. Murthy, Mackenzie Weygandt Mathis, and Matthias Bethge. DeepLabCut: markerless pose estimation of user-defined body parts with deep learning. Nature Neuroscience, 21(9):1281-1289, Sept. 2018. Number: 9 Publisher: Nature Publishing Group. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 481, + 287, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 481, + 287, + 523 + ], + "spans": [ + { + "bbox": [ + 48, + 481, + 287, + 523 + ], + "type": "text", + "content": "[29] Mackenzie Weygandt Mathis and Alexander Mathis. Deep learning tools for the measurement of animal behavior in neuroscience. Current Opinion in Neurobiology, 60:1-11, Feb. 2020. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 525, + 287, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 525, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 48, + 525, + 287, + 568 + ], + "type": "text", + "content": "[30] Máté Nagy, Gábor Vásárhelyi, Benjamin Pettit, Isabella Roberts-Mariani, Tamás Vicsek, and Dora Biro. Context-dependent hierarchies in pigeons. Proceedings of the National Academy of Sciences, 110(32):13049-13054, 2013. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 570, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 570, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 287, + 624 + ], + "type": "text", + "content": "[31] Xun Long Ng, Kian Eng Ong, Qichen Zheng, Yun Ni, Si Yong Yeo, and Jun Liu. Animal kingdom: A large and diverse dataset for animal behavior understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19023-19034, 2022. 2, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 625, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 689 + ], + "type": "text", + "content": "[32] Ali Nourizonoz, Robert Zimmermann, Chun Lum Andy Ho, Sebastien Pellat, Yannick Ormen, Clément Prévost-Solie, Gilles Reymond, Fabien Pifferi, Fabienne Aujard, Anthony Herrel, et al. Etholoop: automated closed-loop neuroethology in naturalistic environments. Nature methods, 17(10):1052-1059, 2020. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[33] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object de" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 95 + ], + "type": "text", + "content": "tection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 779-788, 2016. 8" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 98, + 545, + 130 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 98, + 545, + 130 + ], + "spans": [ + { + "bbox": [ + 307, + 98, + 545, + 130 + ], + "type": "text", + "content": "[34] David Rolnick, Andreas Veit, Serge Belongie, and Nir Shavit. Deep learning is robust to massive label noise. arXiv preprint arXiv:1705.10694, 2017. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 133, + 545, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 133, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 307, + 133, + 545, + 163 + ], + "type": "text", + "content": "[35] Bernard Rosner. Percentage points for a generalized ESD many-outlier procedure. Technometrics, 25(2):165-172, 1983. 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 167, + 545, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 167, + 545, + 221 + ], + "spans": [ + { + "bbox": [ + 307, + 167, + 545, + 221 + ], + "type": "text", + "content": "[36] John R Stowers, Maximilian Hofbauer, Renaud Bastien, Johannes Griessner, Peter Higgins, Sarfarazhussain Farooqui, Ruth M Fischer, Karin Nowikovsky, Wulf Haubensak, Iain D Couzin, et al. Virtual reality for freely moving animals. Nature methods, 14(10):995-1002, 2017. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 224, + 545, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 224, + 545, + 278 + ], + "spans": [ + { + "bbox": [ + 307, + 224, + 545, + 278 + ], + "type": "text", + "content": "[37] Alexandra Swanson, Margaret Kosmala, Chris Lintott, Robert Simpson, Arfon Smith, and Craig Packer. Snapshot serengeti, high-frequency annotated camera trap images of 40 mammalian species in an african savanna. Scientific data, 2(1):1-14, 2015. 1, 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 281, + 545, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 281, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 307, + 281, + 545, + 323 + ], + "type": "text", + "content": "[38] Leslie M. Theunissen and Nikolaus F. Troje. Head Stabilization in the Pigeon: Role of Vision to Correct for Translational and Rotational Disturbances. Frontiers in Neuroscience, 11, 2017. 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 327, + 545, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 327, + 545, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 327, + 545, + 426 + ], + "type": "text", + "content": "[39] Devis Tuia, Benjamin Kellenberger, Sara Beery, Blair R. Costelloe, Silvia Zuffi, Benjamin Risse, Alexander Mathis, Mackenzie W. Mathis, Frank van Langevelde, Tilo Burghardt, Roland Kays, Holger Klinck, Martin Wikelski, Iain D. Couzin, Grant van Horn, Margaret C. Crofoot, Charles V. Stewart, and Tanya Berger-Wolf. Perspectives in machine learning for wildlife conservation. Nature Communications, 13(1):792, Feb. 2022. Number: 1 Publisher: Nature Publishing Group. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 428, + 545, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 428, + 545, + 493 + ], + "spans": [ + { + "bbox": [ + 307, + 428, + 545, + 493 + ], + "type": "text", + "content": "[40] Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. The inaturalist species classification and detection dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8769-8778, 2018. 1, 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 496, + 545, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 496, + 545, + 551 + ], + "spans": [ + { + "bbox": [ + 307, + 496, + 545, + 551 + ], + "type": "text", + "content": "[41] Catherine Wah, Steve Branson, Peter Welinder, Pietro Perona, and Serge Belongie. The Caltech-UCSD Birds-200-2011 Dataset, July 2011. Issue: 2010-001 Num Pages: 8 Number: 2010-001 Place: Pasadena, CA Publisher: California Institute of Technology. 1, 2, 5" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 553, + 545, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 553, + 545, + 608 + ], + "spans": [ + { + "bbox": [ + 307, + 553, + 545, + 608 + ], + "type": "text", + "content": "[42] Urs Waldmann, Hemal Naik, Nagy Mate, Fumihiro Kano, Iain D Couzin, Oliver Deussen, and Bastian Goldlücke. I-mpptet: Interactive multi-pigeon pose estimation and tracking. In DAGM German Conference on Pattern Recognition, pages 513-528. Springer, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 610, + 545, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 610, + 545, + 654 + ], + "spans": [ + { + "bbox": [ + 307, + 610, + 545, + 654 + ], + "type": "text", + "content": "[43] Tristan Walter and Iain D Couzin. Trex, a fast multi-animal tracking system with markerless identification, and 2d estimation of posture and visual fields. *Elife*, 10:e64000, 2021. 1, 2, 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 656, + 545, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 656, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 307, + 656, + 545, + 677 + ], + "type": "text", + "content": "[44] Hal Whitehead. Analysing animal social structure. Animal behaviour, 53(5):1053-1067, 1997. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "text", + "content": "[45] Jessie L Williamson and Christopher C Witt. A lightweight backpack harness for tracking hummingbirds. Journal of Avian Biology, 52(9), 2021. 4" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 734, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 734, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 734, + 318, + 742 + ], + "type": "text", + "content": "21283" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 294 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 127 + ], + "type": "text", + "content": "[46] Shiting Xiao, Yufu Wang, Ammon Perkes, Bernd Pfrommer, Marc Schmidt, Kostas Daniilidis, and Marc Badger. Multiview tracking, re-id, and social network analysis of a flock of visually similar birds in an outdoor aviary. arXiv preprint arXiv:2212.00266, 2022. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 129, + 287, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 129, + 287, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 129, + 287, + 205 + ], + "type": "text", + "content": "[47] Yuan Yao, Praneet Bala, Abhiraj Mohan, Eliza Bliss-Moreau, Kristine Coleman, Sienna M. Freeman, Christopher J. Machado, Jessica Raper, Jan Zimmermann, Benjamin Y. Hayden, and Hyun Soo Park. OpenMonkeyChallenge: Dataset and Benchmark Challenges for Pose Estimation of Non-human Primates. International Journal of Computer Vision, Oct. 2022. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 206, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 206, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 206, + 287, + 239 + ], + "type": "text", + "content": "[48] Hang Yu, Yufei Xu, Jing Zhang, Wei Zhao, Ziyu Guan, and Dacheng Tao. Ap-10k: A benchmark for animal pose estimation in the wild, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 240, + 287, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 240, + 287, + 294 + ], + "spans": [ + { + "bbox": [ + 48, + 240, + 287, + 294 + ], + "type": "text", + "content": "[49] Silvia Zuffi, Angjoo Kanazawa, David W Jacobs, and Michael J Black. 3d menagerie: Modeling the 3d shape and pose of animals. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6365-6373, 2017. 3" + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 734, + 318, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 734, + 318, + 742 + ], + "spans": [ + { + "bbox": [ + 293, + 734, + 318, + 742 + ], + "type": "text", + "content": "21284" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_content_list.json b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..de7b0f2db7957904ca954b687677895e9e2ee943 --- /dev/null +++ b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_content_list.json @@ -0,0 +1,1317 @@ +[ + { + "type": "text", + "text": "3DAvatarGAN: Bridging Domains for Personalized Editable Avatars", + "text_level": 1, + "bbox": [ + 138, + 130, + 831, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rameen Abdal $^{\\dagger 1}$ Hsin-Ying Lee $^{2}$ Peihao Zhu $^{\\dagger 1}$ Minglei Chai $^{2}$ Aliaksandr Siarohin $^{2}$ \nPeter Wonka $^{1}$ Sergey Tulyakov $^{2}$ $^{1}$ KAUST $^{2}$ Snap Inc.", + "bbox": [ + 114, + 178, + 854, + 268 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/df16d596e57fb3925b8f10971a3cd7e1f29201be7f32e2232fd0034c6fca15b4.jpg", + "image_caption": [ + "Figure 1. Editable 3D avatars. We present 3DAvatarGAN, a 3D GAN able to produce and edit personalized 3D avatars from a single photograph (real or generated). Our method distills information from a 2D-GAN trained on 2D artistic datasets like Caricatures, Pixar toons, Cartoons, Comics etc. and requires no camera annotations." + ], + "image_footnote": [], + "bbox": [ + 81, + 285, + 883, + 556 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 622, + 313, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Modern 3D-GANs synthesize geometry and texture by training on large-scale datasets with a consistent structure. Training such models on stylized, artistic data, with often unknown, highly variable geometry, and camera information has not yet been shown possible. Can we train a 3D GAN on such artistic data, while maintaining multi-view consistency and texture quality? To this end, we propose an adaptation framework, where the source domain is a pre-trained 3D-GAN, while the target domain is a 2D-GAN trained on artistic datasets. We, then, distill the knowledge from a 2D generator to the source 3D generator. To do that, we first propose an optimization-based method to align the distributions of camera parameters across domains. Second, we propose regularizations necessary to learn high-quality texture, while avoiding degenerate geometric solutions, such as flat shapes. Third, we show", + "bbox": [ + 75, + 659, + 473, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "a deformation-based technique for modeling exaggerated geometry of artistic domains, enabling—as a byproduct—personalized geometric editing. Finally, we propose a novel inversion method for 3D-GANs linking the latent spaces of the source and the target domains. Our contributions—for the first time—allow for the generation, editing, and animation of personalized artistic 3D avatars on artistic datasets. Project Page: https://rameenabdal.github.io/3DAvatarGAN", + "bbox": [ + 496, + 623, + 895, + 747 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 500, + 758, + 632, + 773 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Photo-realistic portrait face generation is an iconic application demonstrating the capability of generative models especially GANs [28,30,31]. A recent development has witnessed an advancement from straightforwardly synthesizing 2D images to learning 3D structures without 3D supervision, referred to as 3D-GANs [10,41,55,64]. Such training", + "bbox": [ + 496, + 782, + 893, + 876 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Part of the work was done during an internship at Snap Inc.", + "bbox": [ + 524, + 886, + 846, + 901 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "4552", + "bbox": [ + 482, + 944, + 514, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "is feasible with the datasets containing objects with highly consistent geometry, enabling a 3D-GAN to learn a distribution of shapes and textures. In contrast, artistically stylized datasets [25, 65] have arbitrary exaggerations of both geometry and texture, for example, the nose, cheeks, and eyes can be arbitrarily drawn, depending on the style of the artist as well as on the features of the subject, see Fig. 1. Training a 3D-GAN on such data becomes problematic due to the challenge of learning such an arbitrary distribution of geometry and texture. In our experiments (Sec. 5.1), 3D-GANs [10] generate flat geometry and become 2D-GANs essentially. A natural question arises, whether a 3D-GAN can synthesize consistent novel views of images belonging to artistically stylized domains, such as the ones in Fig. 1.", + "bbox": [ + 75, + 90, + 472, + 303 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we propose a domain-adaption framework that allows us to answer the question positively. Specifically, we fine-tune a pre-trained 3D-GAN using a 2D-GAN trained on a target domain. Despite being well explored for 2D-GANs [25, 65], existing domain adaptation techniques are not directly applicable to 3D-GANs, due to the nature of 3D data and characteristics of 3D generators.", + "bbox": [ + 75, + 304, + 472, + 411 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The geometry and texture of stylized 2D datasets can be arbitrarily exaggerated depending on the context, artist, and production requirements. Due to this, no reliable way to estimate camera parameters for each image exists, whether using an off-the-shelf pose detector [72] or a manual labeling effort. To enable the training of 3D-GANs on such challenging datasets, we propose three contributions. ① An optimization-based method to align distributions of camera parameters between domains. ② Texture, depth, and geometry regularizations to avoid degenerate, flat solutions and ensure high visual quality. Furthermore, we redesign the discriminator training to make it compatible with our task. We then propose ③ a Thin Plate Spline (TPS) 3D deformation module operating on a tri-plane representation to allow for certain large and sometimes extreme geometric deformations, which are so typical in artistic domains.", + "bbox": [ + 75, + 412, + 472, + 654 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The proposed adaptation framework enables the training of 3D-GANs on complex and challenging artistic data. The previous success of domain adaptation in 2D-GANs unleashed a number of exciting applications in the content creation area [25, 65]. Given a single image such methods first find a latent code corresponding to it using GAN inversion, followed by latent editing producing the desired effect in the image space. Compared to 2D-GANs, the latent space of 3D-GANs is more entangled, making it more challenging to link the latent spaces between domains, rendering the existing inversion and editing techniques not directly applicable. Hence, we take a step further and explore the use of our approach to 3D artistic avatar generation and editing. Our final contribution to enable such applications is (4) a new inversion method for coupled 3D-GANs.", + "bbox": [ + 75, + 656, + 472, + 883 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, the proposed domain-adaption framework", + "bbox": [ + 96, + 885, + 470, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "allows us to train 3D-GANs on challenging artistic datasets with exaggerated geometry and texture. We call our method 3DAvatarGAN as it—for the first time—offers generation, editing, and animation of personalized stylized, artistic avatars obtained from a single image. Our results (See Sec. 5.2) show the high-quality 3D avatars possible by our method compared to the naive fine-tuning.", + "bbox": [ + 496, + 90, + 893, + 198 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 210, + 640, + 226 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "GANs and Semantic Image Editing. Generative adversarial Networks (GANs) [19, 47] are one popular type of generative model, especially for smaller high-quality datasets such as FFHQ [32], AFHQ [14], and LSUN objects [67]. For these datasets, StyleGAN [28, 30, 32] can be considered as the current state-of-the-art GAN [27, 28, 30, 32, 33]. The disentangled latent space learned by StyleGAN has been shown to exhibit semantic properties conducive to semantic image editing [1, 3, 16, 22, 36, 44, 51, 56, 62]. CLIP [46] based image editing [2, 17, 44] and domain transfer [15, 70] are another set of works enabled by StyleGAN.", + "bbox": [ + 496, + 234, + 893, + 402 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "GAN Inversion. Algorithms to project existing images into a GAN latent space are a prerequisite for GAN-based image editing. There are mainly two types of methods to enable such a projection: optimization-based methods [1,13,57,71] and encoder-based methods [5,7,48,58,69]. On top of both streams of methods, the generator weights can be further modified after obtaining initial inversion results [49].", + "bbox": [ + 496, + 402, + 893, + 507 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Learning 3D-GANs with 2D Data. Previously, some approaches attempt to extract 3D structure from pre-trained 2D-GANs [42, 52]. Recently, inspired by Neural Radiance Field (NeRF) [9, 37, 43, 68], novel GAN architectures have been proposed to combine implicit or explicit 3D representations with neural rendering techniques [11, 12, 20, 39-41, 50, 53, 55, 63, 64]. In our work, we build on EG3D [11] which has current state-of-the-art results for human faces trained on the FFHQ dataset.", + "bbox": [ + 496, + 508, + 893, + 643 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Avatars and GANs. To generate new results in an artistic domain (e.g. anime or cartoons), a promising technique is to fine-tune an existing GAN pre-trained on photographs, e.g. [45, 54, 60]. Data augmentation and freezing lower layers of the discriminator are useful tools when fine-tuning a 2D-GAN [28, 38]. One branch of methods [18, 44, 70] investigates domain adaptation if only a few examples or only text descriptions are available. While others focus on matching the distribution of artistic datasets with diverse shapes and styles. Our work also falls in this domain. Among previous efforts, StyleCariGAN [25] proposes invertible modules in the generator to train and generate caricatures from real images. DualStyleGAN [65] learns two mapping networks in StyleGAN to control the style and structure of the new domain. Some works are trained on 3D data or require heavy labeling/engineering [21, 26, 66] and use 3D morphable models to map 2D images of carica", + "bbox": [ + 496, + 643, + 895, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "4553", + "bbox": [ + 482, + 944, + 514, + 957 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/516e7d18529441246367b2a8888fc2a1c37173cbf2bdf5c251b7f98194755e06.jpg", + "image_caption": [ + "Naive Fine-Tuning", + "Figure 2. Comparison with naive fine-tuning. Comparison of generated 3D avatars with a naively fine-tuned generator $\\mathrm{G}_{\\mathrm{base}}$ (left sub-figures) versus our generator $\\mathrm{G}_{\\mathrm{t}}$ (right sub-figures). The corresponding sub-figures show comparisons in terms of texture quality (top two rows) and geometry (bottom two rows). See Sec. 5.1 for details." + ], + "image_footnote": [], + "bbox": [ + 80, + 88, + 274, + 243 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f9574c015b5d4a9f970cef245e38b45f1237a686dcc4b322c4f7ab095c2acd23.jpg", + "image_caption": [ + "Our Method" + ], + "image_footnote": [], + "bbox": [ + 274, + 88, + 468, + 243 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tures to 3D models. However, such models fail to model the hair, teeth, neck, and clothes and suffer in texture quality. In this work, we are the first to tackle the problem of domain adaption of 3D-GANs and to produce fully controllable 3D Avatars. We employ 2D to 3D domain adaptation and distillation and make use of synthetic 2D data from StyleCariGAN [25] and DualStyleGAN [65].", + "bbox": [ + 75, + 375, + 467, + 481 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Domain Adaptation for 3D-GANs", + "text_level": 1, + "bbox": [ + 76, + 518, + 382, + 537 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The goal of domain adaptation for 3D-GANs is to adapt (both texture and geometry) to a particular style defined by a 2D dataset (Caricature, Anime, Pixar toons, Comic, and Cartoons [24, 25, 65] in our case). In contrast to 2D-StyleGAN-based fine-tuning methods that are conceptually simpler [29, 45], fine-tuning a 3D-GAN on 2D data introduces challenges in addition to domain differences, especially on maintaining the texture quality while preserving the geometry. Moreover, for these datasets, there is no explicit shape and camera information. We define the domain adaptation task as follows: Given a prior 3D-GAN i.e. EG3D $(\\mathrm{G_s})$ of source domain $(T_{\\mathrm{s}})$ , we aim to produce a 3D Avatar GAN $(\\mathrm{G_t})$ of the target domain $(T_{\\mathrm{t}})$ while maintaining the semantic, style, and geometric properties of $\\mathrm{G_s}$ , and at the same time preserving the identity of the subject between the domains $(T_{\\mathrm{s}} \\leftrightarrow T_{\\mathrm{t}})$ . Refer to Fig. 4 in supplementary for the pipeline figure. We represent $\\mathrm{G}_{2\\mathrm{D}}$ as a teacher 2D-GAN used for knowledge distillation fine-tuned on the above datasets. Note that as $T_{\\mathrm{t}}$ is not assumed to contain camera parameter annotations, the training scheme must suppress artifacts such as low-quality texture under different views and flat geometry (See Fig. 2). In the following, we discuss the details of our method.", + "bbox": [ + 75, + 553, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. How to align the cameras?", + "text_level": 1, + "bbox": [ + 500, + 90, + 738, + 106 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Selecting appropriate ranges for camera parameters is of paramount importance for high-fidelity geometry and texture detail. Typically, such parameters are empirically estimated, directly computed from the dataset using an off-the-shelf pose detector [10], or learned during training [8]. In domains we aim to bridge, such as caricatures for which a 3D model may not even exist, directly estimating the camera distribution is problematic and, hence, is not assumed by our method. Instead, we find it essential to ensure that the camera parameter distribution is consistent across the source and target domains. For the target domain, we use StyleGAN2 trained on FFHQ, fine-tuned on artistic datasets [25, 65]. Assuming that the intrinsic parameters of all the cameras are the same, we aim to match the distribution of extrinsic camera parameters of $G_{\\mathrm{s}}$ and $G_{2\\mathrm{D}}$ and train our final $G_{\\mathrm{t}}$ using it (see illustration in Fig. 2 of the supplementary materials). To this end, we define an optimization-based method to match the sought distributions. The first step is to identify a canonical pose image in $G_{2\\mathrm{D}}$ , where the yaw, pitch, and roll parameters are zero. According to Karras et al., [31], the image corresponding to the mean latent code satisfies this property. Let $\\theta$ , $\\phi$ be the camera Euler angles in a spherical coordinate system, $r$ , $c$ be the radius of the sphere and camera lookat point, and $M$ be a function that converts these parameters into the camera-to-world matrix. Let $I_{\\mathrm{s}}(w,\\theta ,\\phi ,c,r) = G_{\\mathrm{s}}(w,M(\\theta ,\\phi ,c,r))$ and $I_{2\\mathrm{D}}(w) = G_{2\\mathrm{D}}(w)$ represent an arbitrary image generated by $G_{\\mathrm{s}}$ and $G_{2\\mathrm{D}}$ , respectively, given the $w$ code variable. Let $k_{\\mathrm{d}}$ be the face key-points detected by the detector $K_{\\mathrm{d}}$ [72], then", + "bbox": [ + 496, + 114, + 890, + 568 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\left(c ^ {\\prime}, r ^ {\\prime}\\right) := \\underset {(c, r)} {\\arg \\min } \\mathrm {L} _ {\\mathrm {k d}} \\left(I _ {\\mathrm {s}} \\left(w _ {\\text {a v g}} ^ {\\prime}, 0, 0, c, r\\right), I _ {2 \\mathrm {D}} \\left(w _ {\\text {a v g}}\\right)\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 580, + 890, + 621 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathrm{L_{kd}}(I_1,I_2) = \\| k_{\\mathrm{d}}(I_1) - k_{\\mathrm{d}}(I_2)\\| _1$ and $w_{\\mathrm{avg}}$ and $w_{\\mathrm{avg}}^{\\prime}$ are the mean $w$ latent codes of $\\mathrm{G}_{2\\mathrm{D}}$ and $\\mathrm{G_s}$ , respectively. In our results, $r^\\prime$ is determined to be 2.7 and $c^{\\prime}$ is approximately [0.0, 0.05, 0.17]. The next step is to determine a safe range of the $\\theta$ and $\\phi$ parameters. Following prior works, StyleFlow [3] and FreeStyleGAN [35] (see Fig.5 of the paper), we set these parameters as $\\theta^{\\prime}\\in [-0.45,0.45]$ and $\\phi^{\\prime}\\in [-0.35,0.35]$ in radians.", + "bbox": [ + 496, + 621, + 890, + 742 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. What loss functions and regularizers to use?", + "text_level": 1, + "bbox": [ + 500, + 755, + 872, + 771 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Next, although the camera systems are aligned, the given dataset may not stem from a consistent 3D model, e.g., in the case of caricatures or cartoons. This entics the generator $G_{t}$ to converge to an easier degenerate solution with a flat geometry. Hence, to benefit from the geometric prior of $G_{s}$ , another important step is to design the loss functions and regularizers for a selected set of parameters to update in $G_{t}$ . Next, we discuss these design choices:", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "4554", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/be9123cd168c87d431112c83004391d76550d1a56bad9d921aced8ac8a8ee51f.jpg", + "image_caption": [ + "Figure 3. Domain adaptation. Domain adaptation results of images from source domain $T_{\\mathrm{s}}$ (top row in each sub-figure) to target domain $T_{\\mathrm{t}}$ . Rows two to five show corresponding 3D avatar results from different viewpoints." + ], + "image_footnote": [], + "bbox": [ + 86, + 92, + 367, + 431 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d715cafb2b6af2c7094e88f5dff4bbf7e8fb8e731426e3ca0f77d84460d8c3a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 92, + 627, + 431 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6fe6fc94d25497f665769f6248b26f4df3aedbb4db51ea523686a25d8e882e8e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 629, + 93, + 890, + 431 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Loss Functions. To ensure texture quality and diversity, we resort to the adversarial loss used to fine-tune GANs as our main loss function. We use the standard non-saturating loss to train the generator and discriminator networks used in EG3D [11]. We also perform lazy density regularization to ensure consistency of the density values in the final finetuned model $\\mathrm{G}_{\\mathrm{t}}$ .", + "bbox": [ + 75, + 479, + 468, + 585 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Texture Regularization. Since the texture can be entangled with the geometry information, determining which layers to update is important. To make use of the fine-style information encoded in later layers, it is essential to update the $tRGB$ layer parameters (outputting tri-plane features) before the neural rendering stage. $tRGB$ are convolutional layers that transform feature maps to 3 channels at each resolution (96 channels in triplanes). Moreover, since the network has to adapt to a color distribution of $T_{t}$ , it is essential to update the decoder (MLP layers) of the neural rendering pipeline as well. Given the EG3D architecture, we also update the super-resolution layer parameters to ensure the coherency between the low-resolution and high-resolution outputs seen by the discriminator D.", + "bbox": [ + 75, + 592, + 468, + 803 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Geometry Regularization. In order to allow the network to learn the structure distribution of $T_{\\mathrm{t}}$ and at the same time ensure properties of $\\mathcal{W}$ and $S$ latent spaces are preserved, we update the earlier layers with regularization. This also encourages the latent spaces of $T_{\\mathrm{s}}$ and $T_{\\mathrm{t}}$ to be easily linked. Essentially, we update the deviation parameter $\\Delta s$ from the", + "bbox": [ + 75, + 809, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$s$ activations of the $S$ space [62]. The $s$ activations are predicted by $\\mathrm{A}(w)$ , where $\\mathrm{A}$ is the learned affine function in EG3D. The $s$ activations scale the kernels of a particular layer. In order to preserve the identity as well as geometry such that the optimization of $\\Delta s$ does not deviate too far away from the original domain $T_{\\mathrm{s}}$ , we introduce a regularizer given by", + "bbox": [ + 496, + 479, + 890, + 585 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nR (\\Delta s) := \\| \\Delta s \\| _ {1}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 632, + 585, + 890, + 602 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that we apply $\\mathrm{R}(\\Delta s)$ regularization in a lazy manner, i.e., with density regularization. Interestingly, after training, we can interpolate between $s$ and $s + \\Delta s$ parameters to interpolate between the geometries of samples in $T_{\\mathrm{s}}$ and $T_{\\mathrm{t}}$ (See Fig. 5).", + "bbox": [ + 496, + 609, + 890, + 684 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Depth Regularization. Next, we observe that even though the above design choice produces better geometry for $T_{\\mathrm{t}}$ , some samples from $G_{\\mathrm{t}}$ can still lead to flatter geometry, and it is hard to detect these cases. We found that the problem is related to the relative depth of the background to the foreground. To circumvent this problem, we use an additional regularization where we encourage the average background depth of $G_{\\mathrm{t}}$ to be similar to $G_{\\mathrm{s}}$ . Let $S_{\\mathrm{b}}$ be a face background segmentation network [34]. We first compute the average background depth of the samples given by $G_{\\mathrm{s}}$ . This average depth is given by", + "bbox": [ + 496, + 685, + 892, + 851 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\na _ {\\mathrm {d}} := \\frac {1}{M} \\sum_ {n = 1} ^ {M} \\left(\\frac {1}{N _ {n}} \\| D _ {n} \\odot \\mathrm {S} _ {\\mathrm {b}} (I _ {n}) \\| _ {F} ^ {2}\\right). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 862, + 890, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4555", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/8dff4311d2c0df9713a53db9cc71a323be40d2dafbf0424e7022191e653e146c.jpg", + "image_caption": [ + "Figure 4. 3D avatars from real images. Projection of real images on the 3D avatar generators." + ], + "image_footnote": [], + "bbox": [ + 102, + 90, + 447, + 431 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $D_{n}$ is the depth map of the image $I_{n}$ sampled from $G_{\\mathrm{s}}$ , $\\odot$ represents the Hadamard product, $M$ is the number of the sampled images, and $N_{n}$ is the number of background pixels in $I_{n}$ . Finally, regularization is defined as:", + "bbox": [ + 76, + 470, + 468, + 531 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {R} (D) := \\left\\| a _ {\\mathrm {d}} \\cdot J - \\left(D _ {\\mathrm {t}} \\odot \\mathrm {S} _ {\\mathrm {b}} \\left(I _ {\\mathrm {t}}\\right)\\right) \\right\\| _ {F}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 143, + 540, + 468, + 556 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $D_{\\mathrm{t}}$ is the depth map of the image $I_{\\mathrm{t}}$ sampled from $\\mathbf{G}_{\\mathrm{t}}$ and $J$ is the matrix of ones having the same spatial dimensions as $D_{\\mathrm{t}}$ .", + "bbox": [ + 76, + 566, + 468, + 609 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. What discriminator to use?", + "text_level": 1, + "bbox": [ + 76, + 619, + 323, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given that the data in $T_{\\mathrm{s}}$ and $T_{\\mathrm{t}}$ is not paired and $T_{\\mathrm{t}}$ is not assumed to contain camera parameter annotations, the choice of the discriminator (D) used for this task is also a critical design choice. Essentially, we use the unconditional version of the dual discriminator proposed in EG3D, and hence, we do not condition the discriminator on the camera information. As a result, during the training, $G_{\\mathrm{t}}$ generates arbitrary images with pose using $\\mathrm{M}(\\theta', \\phi', c', r')$ , and the discriminator discriminates these images using arbitrary images from $T_{\\mathrm{t}}$ . We train the discriminator from scratch and in order to adapt $T_{\\mathrm{s}} \\rightarrow T_{\\mathrm{t}}$ , we use the StyleGAN-ADA [28] training scheme and use R1 regularization.", + "bbox": [ + 76, + 642, + 468, + 824 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. How to incorporate larger geometric deformations between domains?", + "text_level": 1, + "bbox": [ + 76, + 832, + 468, + 862 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "While the regularizers are used to limit the geometric changes when adapting from $T_{\\mathrm{s}}$ to $T_{\\mathrm{t}}$ , modeling large ge", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ometric deformations, e.g., in the caricature dataset is another challenge. One choice to edit the geometry is to use the properties of tri-plane features learned by EG3D. We start out by analyzing these three planes in $\\mathrm{G_s}$ . We observe that the frontal plane encodes most of the information required to render the final image. To quantify this, we sample images and depth maps from $\\mathrm{G_s}$ and swap the front and the other planes from two random images. Then we compare the difference in RGB values of the images and the Chamfer distance of the depth maps. While swapping the frontal tri-planes, the final images are completely swapped, and the Chamfer distance changes by $80\\sim 90\\%$ matching the swapped image depth map. In the case of the other two planes, the RGB image is not much affected and the Chamfer distance of the depth maps is reduced by only $20\\sim 30\\%$ in most cases.", + "bbox": [ + 496, + 90, + 890, + 330 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given the analysis, we focus to manipulate the $2D$ front plane features to learn additional deformation or exaggerations. We learn a TPS (Thin Plate Spline) [61] network on top of the front plane. Our TPS network is conditioned both on the front plane features as well as the $\\mathcal{W}$ space to enable multiple transformations. The architecture of the module is similar to the standard StyleGAN2 layer with an MLP appended at the end to predict the control points that transform the features. Hence, as a byproduct, we also enable 3D-geometry editing guided by the learned latent space. We train this module separately after $G_{\\mathrm{t}}$ has been trained. We find that joint training is unstable due to exploding gradients arising from the large domain gap between $T_{\\mathrm{s}}$ and $T_{\\mathrm{t}}$ in the initial stages. Formally, we define this transformation as:", + "bbox": [ + 496, + 332, + 892, + 544 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {T} (w, f) := \\Delta c, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 640, + 554, + 890, + 570 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where, $w$ is the latent code, $f$ is the front plane, and $c$ are the control points.", + "bbox": [ + 496, + 580, + 890, + 609 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Let $c_{\\mathrm{I}}$ be the initial control points producing an identity transformation, $(c_{1}, c_{2})$ be the control points corresponding to front planes $(f_{1}, f_{2})$ sampled using $\\mathcal{W}$ codes $(w_{1}, w_{2})$ , respectively, and $(c_{1}', c_{2}')$ be points with $(w_{1}, w_{2})$ swapped in the TPS module. To regularize and encourage the module to learn different deformations, we have", + "bbox": [ + 496, + 611, + 890, + 700 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {R} \\left(\\mathrm {T} _ {1}\\right) := \\alpha \\sum_ {n = 1} ^ {2} \\| c _ {I} - c _ {n} \\| _ {1} - \\beta \\| c _ {1} - c _ {2} \\| _ {1} - \\sigma \\| c _ {1} ^ {\\prime} - c _ {2} ^ {\\prime} \\| _ {1}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 500, + 710, + 895, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use initial control point regularization to regularize large deviations in the control points which would otherwise explode. Additionally, to learn extreme exaggerations in $T_{\\mathrm{t}}$ and 'in expectation', conform to the target distribution in the dataset, we add an additional loss term. Let $S(I)$ be the soft-argmax output of the face segmentation network [34] given an image $I$ and assuming that $S$ generalizes to caricatures, then", + "bbox": [ + 496, + 763, + 890, + 883 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {R} \\left(\\mathrm {T} _ {2}\\right) := \\| \\mathrm {S} \\left(\\mathrm {G} _ {\\mathrm {t}} (w)\\right), \\mathrm {S} \\left(I _ {\\mathrm {t}}\\right) \\| _ {1} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 883, + 890, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "4556", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b54b53f3e6f87b4fc83c381fce3ac23524a5f98093017d653f4f9af65eb27f18.jpg", + "image_caption": [ + "Figure 5. Interpolation of $\\Delta s$ . Geometric deformation using the interpolation of learned $\\Delta s$ parameters." + ], + "image_footnote": [], + "bbox": [ + 80, + 89, + 467, + 200 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Eq. 6, Eq. 7, and adversarial training loss are used to train the $TPS$ module. We adopt gradient clipping to make sure that the training does not diverge. See the illustrations in Fig. 3 and Fig. 4 of the supplementary materials.", + "bbox": [ + 76, + 253, + 467, + 315 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Personalized Avatar Generation and Editing", + "text_level": 1, + "bbox": [ + 76, + 330, + 467, + 347 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Although 3D domain adaptation adapts $T_{\\mathrm{s}} \\leftrightarrow T_{\\mathrm{t}}$ , it is still a challenge to effectively link the latent spaces of $\\mathrm{G_s}$ and $\\mathrm{G_t}$ to generate personalized 3D avatars using a single photograph as the reference image. Particularly, the challenge arises due to the discrepancy in the coupled latent spaces when dealing with the projection of real photographs on 3D generators. Moreover, one would like to edit and animate these 3D avatars.", + "bbox": [ + 75, + 357, + 467, + 474 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Projection. The task is to project a real image into the latent space of $\\mathrm{G_s}$ , transfer the latent to $\\mathrm{G_t}$ , and further optimize it to construct a 3D avatar. First, we use an optimization-based method to find the $w$ code that minimizes the similarity between the generated and the real image in $\\mathrm{G_s}$ . To achieve this, the first step is to align the cameras. We follow the steps mentioned in Sec. 3.1 for this step. Next, we use pixel-wise MSE loss and LPIPS loss to project the image into $\\mathrm{G_s}$ [1]. Additionally, to preserve the identity of the subject, we use attribute classifiers e.g. caricature dataset [24] provides the coupled attribute information of real images and caricatures. We use such attribute classifier [24,25] in a post-hoc manner as we notice that such networks can affect the texture in the target domain and could degenerate to narrow style outputs if applied during training. Moreover, such networks may not be available for all target domains. To avoid overfitting into $\\mathrm{G_s}$ and encourage the easier transfer of the optimized latent code to $\\mathrm{G_t}$ , we use $\\mathcal{W}$ space optimization for this step. Finally, we initialize this $w$ code for $\\mathrm{G_t}$ and use additional attribute classifier loss [25] for $T_{\\mathrm{t}}$ domain along with Depth regularization $\\mathrm{R}(D)$ (Eq. 4). As an approximation, we assume that attribute classifier [24, 25] generalizes across all domains. We use $\\mathcal{W} / \\mathcal{W} +$ space optimization to control the quality and diversity of the outputs. See Algorithm 1 in supplementary for the description.", + "bbox": [ + 75, + 477, + 467, + 853 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Editing and Animation. Since our 3D domain adaptation is designed to preserve the properties of $\\mathcal{W}$ and $S$ spaces, we can perform semantic edits via InterFaceGAN [51],", + "bbox": [ + 76, + 854, + 467, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "GANSpace [22], StyleSpace [62] etc., and geometric edits using TPS (Sec. 3.4) and $\\Delta s$ interpolation (Sec. 3.2). To perform video editing, we design an encoder for EG3D based on $e4e$ [58] to encode videos and transfer the edits from $\\mathrm{G_s}$ to $\\mathrm{G_t}$ based on the $w$ codes [4,6,59]. We leave a more fine-grained approach for video processing as future work.", + "bbox": [ + 496, + 90, + 890, + 196 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Results", + "text_level": 1, + "bbox": [ + 500, + 213, + 586, + 229 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Quantitative Results", + "text_level": 1, + "bbox": [ + 500, + 239, + 692, + 255 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we consider three important evaluations to verify the quality of the texture, geometry, and identity preservation in the new domain using the Caricature, Cartoons, and Pixar toons datasets. We evaluate the ablation of our design choices in the supplementary materials. In the evaluation, let $\\mathrm{G}_{\\mathrm{base}}$ be the baseline naive fine-tuning method which is trained with all the parameters using the losses in EG3D fine-tuned from FFHQ trained prior $\\mathrm{G}_{\\mathrm{s}}$ . Note here we still align the cameras in $\\mathrm{G}_{\\mathrm{base}}$ using the method defined in Sec. 3.1 and use adaptive discriminator [28] with R1 regularization for a fair comparison.", + "bbox": [ + 496, + 263, + 890, + 429 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Texture Quality. To verify the quality of the texture, diversity of samples as well as to some extent, the geometry in the target domain $T_{\\mathrm{t}}$ , we compare the FID [23] scores using $\\mathrm{G}_{\\mathrm{base}}$ and $\\mathrm{G}_{\\mathrm{t}}$ in Table 1. Note that in the case of Caricatures, we report two scores i.e. with and without using the attribute classifier loss in the training as discussed in Sec. 4. Notice that our method outperforms the naive baseline method by a huge margin in some cases, especially in Caricatures and Cartoons. We attribute these differences to the mode collapse prone training of $\\mathrm{G}_{\\mathrm{base}}$ which is correlated with flat geometry degenerate solution. We show visual results of the flat geometries learned by $\\mathrm{G}_{\\mathrm{base}}$ and comparison in Fig. 2.", + "bbox": [ + 496, + 431, + 890, + 627 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Geometric Quality. To quantify the flat geometries, in Table 2, we show three scores that help us understand such degenerate solutions. Here we consider coupled depth maps generated from sampling in the domains $T_{\\mathrm{s}}$ ( $\\mathrm{G_s}$ ) and $T_{\\mathrm{t}}$ ( $\\mathrm{G_t}$ and $\\mathrm{G}_{\\mathrm{base}}$ ). First, we compute the expectation of the absolute mean differences ( $M_{\\mathrm{d}}$ ) of the corresponding foreground depth maps sampled from $T_{\\mathrm{s}}$ and $T_{\\mathrm{t}}$ . We also compute the expectation of the absolute standard deviation differences ( $S_{\\mathrm{d}}$ ) for the same setting. Here, we assume that the flatter geometries have a large difference in the depth maps as compared to the prior as indicated by $M_{\\mathrm{d}}$ . Moreover, $S_{\\mathrm{d}}$ computes the distance in the distribution of the depth values, where a larger difference indicates a narrow distribution, and hence a flatter geometry. We also notice that the flat geometry is correlated with the generator learning diverse poses when images are rendered under standard canonical camera parameters i.e. $\\mathrm{M}(0,0,c,r)$ . We hypothesize in the case of the flatter geometries, the model learns to pose in", + "bbox": [ + 496, + 628, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "4557", + "bbox": [ + 480, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/81649d86574bde316f083bf9578b5c3b43f0db2eb221bbdfe805561260412bd4.jpg", + "image_caption": [ + "Figure 6. Deformations using TPS. Geometric edits using our proposed TPS (Thin Plate Spline) module learned on the frontal tri-plane features. Each sub-figure shows a 3D avatar and three examples of TPS deformations sampled from the learned 3D deformation space." + ], + "image_footnote": [], + "bbox": [ + 80, + 87, + 893, + 208 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b75b22dfc573fbc5daadb15ad48e78d01c25f88687fe0be3a16c2d82bb359035.jpg", + "table_caption": [ + "Table 1. FID Computation. FID (Frechet Inception Distance) between the 2D dataset and the samples generated by the fine-tuned 3D GAN using baseline $(\\mathrm{G_{base}})$ and Ours $(\\mathrm{G_t})$ . $^{\\prime \\prime}*$ represents the score with the inclusion of the attribute classifier loss discussed in Sec. 3.2." + ], + "table_footnote": [], + "table_body": "
MethodCaricaturesCartoonsPixar Toons
Gbase67.879.015.1
Gt(Ours)19.4/20.2*12.812.4
", + "bbox": [ + 135, + 339, + 411, + 386 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/443cb2fcc18dcfe0d24843254017e56f49ff46e89b5977e915d18286a9385580.jpg", + "table_caption": [ + "Table 2. Geometry Evaluation. Comparing the geometry using baseline method $(\\mathrm{G}_{\\mathrm{base}})$ and Ours $(\\mathrm{G}_{\\mathrm{t}})$ . For the definition of $M_{\\mathrm{d}}$ , $S_{\\mathrm{d}}$ and $\\mathrm{R}(\\mathrm{T}_2)$ , refer to Sec. 5.1." + ], + "table_footnote": [], + "table_body": "
MetricMethodCaricaturesCartoonsPixar
Md ↓Gbase0.470.210.29
Gt (Ours)0.210.130.13
Sd ↓Gbase0.220.140.15
Gt (Ours)0.150.100.09
R(T2) ↓Gbase2.993.394.01
Gt (Ours)2.271.621.56
", + "bbox": [ + 117, + 455, + 429, + 555 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6fbead03a24f92fee3853dded50ba16618c8bf076ffa0a9ac7367b45c29a9db6.jpg", + "table_caption": [ + "Table 3. Identity Preservation. Identity preservation using baseline $(\\mathrm{G}_{\\mathrm{base}})$ and Ours $(\\mathrm{G}_{\\mathrm{t}})$ ." + ], + "table_footnote": [], + "table_body": "
MethodCaricaturesCartoonsPixel Toons
Gbase1.280.920.85
Gt(Ours)0.870.810.73
", + "bbox": [ + 133, + 612, + 413, + 659 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "formation in the earlier layers instead of being camera view-dependent. To quantify this, since pose information may not be available for some domains (e.g. cartoons), we compute the $\\mathrm{R}(\\mathrm{T}_2)$ scores between corresponding images in the domain $T_{\\mathrm{s}}$ ( $\\mathrm{G}_{\\mathrm{s}}$ ) and $T_{\\mathrm{t}}$ ( $\\mathrm{G}_{\\mathrm{t}}$ and $\\mathrm{G}_{\\mathrm{base}}$ ). Note that these scores are computed without the TPS module. Our scores are lower in all three metrics, hence, validating that our method avoids the degenerate solution and preserves the geometric distribution of the prior. For discussion on the TPS module and ablations refer to the supplementary materials.", + "bbox": [ + 75, + 672, + 468, + 823 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Identity Preservation. Identity preservation score is another important evaluation to check the quality of latent space linking between $\\mathrm{G_s}$ and $\\mathrm{G_t}$ . In Table 3, we compute the attribute loss (BCE loss) between the domains $T_{\\mathrm{s}}$ and $T_{\\mathrm{t}}$ using the attribute classifiers [24, 25]. Note that our method", + "bbox": [ + 75, + 824, + 468, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ffcb362871dc7ba10eb393f7d821dc10ff50e964ee7a1b5456194ee10c11b4a4.jpg", + "image_caption": [ + "Figure 7. Local edits. Local edits performed on the 3D avatars using the $S$ space." + ], + "image_footnote": [], + "bbox": [ + 542, + 257, + 852, + 659 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "is able to preserve the identity better across the domains.", + "bbox": [ + 500, + 710, + 872, + 727 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Qualitative Results", + "text_level": 1, + "bbox": [ + 500, + 739, + 684, + 756 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For qualitative results, we show the results of the domain adaptation, as well as the personalized edits (geometric and semantic), performed on the resultant 3D avatars. First, in order to show the quality of domain adaptation, identity preservation, and geometric consistency, in Fig. 3, we show results from $\\mathrm{G}_{\\mathrm{s}}$ and corresponding results from 3D avatar generator $\\mathrm{G}_{\\mathrm{t}}$ trained on Caricatures, Pixar toons, Cartoons, and Comic domains. Next, in order to show that the method generalizes to real images, we use the method described in", + "bbox": [ + 496, + 763, + 893, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "4558", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b1f8079284bc58efc40c07e2aaf6929d07f1e41440083f4cf8015ce8654f1cec.jpg", + "image_caption": [ + "Figure 8. 3D avatar animation. Animation of 3D avatars generated using a driving video encoded in source domain $T_{s}$ and applied to samples in target domain $T_{t}$ . The top row shows the driving video and the subsequent rows show generated animations using a random Caricature or Pixar toon. The head pose is changed in each frame of the generated animation to show 3D consistency." + ], + "image_footnote": [], + "bbox": [ + 158, + 88, + 810, + 431 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Sec. 4 to project and transfer the latent code from $\\mathrm{G_s}$ to $\\mathrm{G_t}$ to produce the 3D avatars. In Fig. 4, we show our results of real to 3D avatar transfer. Notice the quality both in terms of texture as well as geometry for both these results achieved by our method. Next, we show geometric and semantic edits possible to produce personalized 3D avatars:", + "bbox": [ + 75, + 500, + 470, + 590 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Geometry Edits. We show two types of geometric edits i.e. $\\Delta s$ interpolation (Sec. 3.2) and deformation using $TPS$ (Sec. 3.4). First, in Fig. 5, we show the geometry interpolation by interpolating between original $s$ activations of $\\mathrm{G_s}$ and learned $\\Delta s$ parameters. In Fig. 6, we show some additional exaggerations in caricatures using the learned 3D deformation latent space of $TPS$ module.", + "bbox": [ + 75, + 599, + 468, + 704 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Semantic Edits and Animation. Since in our method, we encourage the latent regularization to preserve the properties of the latent space learned by the $\\mathrm{G}_{\\mathrm{s}}$ generator, in Fig. 7 we show $S$ space edits performed on the 3D avatars. Notice the quality of edits in terms of locality and adaptability. Additionally, we can edit semantics like hair as opposed to 3D morphable model based methods. In Fig. 8, thanks to the latent space semantics preservation ensured by our method, we can perform some video edits to create a coherent animation based on the difference of $w$ codes of video encoded in $\\mathrm{G}_{\\mathrm{s}}$ (Sec. 4) and applied to layers $7 - 10$ in $\\mathrm{G}_{\\mathrm{t}}$ . Notice the quality of expressions, identity preservation, and 3D consistency across each identity in each row.", + "bbox": [ + 75, + 705, + 470, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 498, + 619, + 513 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We tackled two open research problems in this paper. In the first part, we proposed the first domain adaptation method for 3D-GANs to the best of our knowledge. This part yields two linked EG3D generators, one in the photorealistic source domain of faces, and another EG3D generator in an artistic target domain. As possible target domains, we show results for cartoons, caricatures, and Comics. In the second part, we built on domain adaptation to create 3D avatars in an artistic domain that can be edited and animated. Our framework consists of multiple technical components introduced in this paper. First, we propose a technique for camera space estimation for artistic domains. Second, we introduce a set of regularizers and loss functions that can regularize the fine-tuning of EG3D in such a way that enough of the 3D structure and geometry of the original model is kept, while the distinguishing attributes of the artistic domain, such as textures and colors and local geometric deformations can still be learned. Third, we introduce a geometric deformation module that can reintroduce larger geometric deformations in a controlled manner. These larger geometric deformations can interact and cooperate with EG3D so that semantic edits are still possible. Finally, we propose an embedding algorithm that is especially suitable for two linked EG3D generator networks.", + "bbox": [ + 496, + 523, + 890, + 888 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "4559", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4432-4441, Seoul, Korea, 2019. IEEE. 2, 6", + "[2] Rameen Abdal, Peihao Zhu, John Femiani, Niloy Mitra, and Peter Wonka. Clip2stylegan: Unsupervised extraction of stylegan edit directions. In ACM SIGGRAPH 2022 Conference Proceedings, SIGGRAPH '22, New York, NY, USA, 2022. Association for Computing Machinery. 2", + "[3] Rameen Abdal, Peihao Zhu, Niloy J. Mitra, and Peter Wonka. Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. ACM Trans. Graph., 40(3), may 2021. 2, 3", + "[4] Rameen Abdal, Peihao Zhu, Niloy J. Mitra, and Peter Wonka. Video2stylegan: Disentangling local and global variations in a video, 2022. 6", + "[5] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. Restyle: A residual-based stylegan encoder via iterative refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2021. 2", + "[6] Yuval Alaluf, Or Patashnik, Zongze Wu, Asif Zamir, Eli Shechtman, Dani Lischinski, and Daniel Cohen-Or. Third time's the charm? image and video editing with stylegan3. CoRR, abs/2201.13433, 2022. 6", + "[7] Yuval Alaluf, Omer Tov, Ron Mokady, Rimon Gal, and Amit H. Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. CoRR, abs/2111.15666, 2021. 2", + "[8] Anonymous. 3d generation on imagenet. In Open Review, 2023. 3", + "[9] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2", + "[10] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In arXiv, 2021. 1, 2, 3", + "[11] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks, 2021. 2, 4", + "[12] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5799-5809, 2021. 2", + "[13] Yen-Chi Cheng, Chieh Hubert Lin, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, and Ming-Hsuan Yang. Inout: Diverse image outpainting via gan inversion. In IEEE Conference on Computer Vision and Pattern Recognition, 2022. 2" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[14] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020. 2", + "[15] Min Jin Chong and David A. Forsyth. Jojogan: One shot face stylization. CoRR, abs/2112.11641, 2021. 2", + "[16] Min Jin Chong, Hsin-Ying Lee, and David Forsyth. Stylegan of all trades: Image manipulation with only pretrained stylegan. arXiv preprint arXiv:2111.01619, 2021. 2", + "[17] Rinon Gal, Or Patashnik, Haggai Maron, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. arXiv preprint arXiv:2108.00946, 2021. 2", + "[18] Rinon Gal, Or Patashnik, Haggai Maron, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators, 2021. 2", + "[19] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks, 2014. 2", + "[20] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylererf: A style-based 3d aware generator for high-resolution image synthesis. In International Conference on Learning Representations, 2022. 2", + "[21] Fangzhou Han, Shuquan Ye, Mingming He, Menglei Chai, and Jing Liao. Exemplar-based 3d portrait stylization. IEEE Transactions on Visualization and Computer Graphics, 2021. 2", + "[22] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. arXiv preprint arXiv:2004.02546, 2020. 2, 6", + "[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 6", + "[24] Jing Huo, Wenbin Li, Yinghuan Shi, Yang Gao, and Hujun Yin. Webcaricature: a benchmark for caricature recognition. In British Machine Vision Conference, 2018. 3, 6, 7", + "[25] Wonjong Jang, Gwangjin Ju, Yucheol Jung, Jiaolong Yang, Xin Tong, and Seungyong Lee. Stylecarigan: Caricature generation via stylegan feature map modulation. 40(4), 2021. 2, 3, 6, 7", + "[26] Yucheol Jung, Wonjong Jang, Soongjin Kim, Jiaolong Yang, Xin Tong, and Seungyong Lee. Deep deformable 3d caricatures with learned shape control. In Special Interest Group on Computer Graphics and Interactive Techniques Conference Proceedings. ACM, aug 2022. 2", + "[27] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation, 2017. 2", + "[28] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. In Proc. NeurIPS, 2020. 1, 2, 5, 6", + "[29] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. arXiv preprint arXiv:2006.06676, 2020.3" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "4560", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[30] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks, 2021. 1, 2", + "[31] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4401-4410, 2019. 1, 3", + "[32] Tero Karras, Samuli Laine, and Timo Aila. A Style-Based generator architecture for generative adversarial networks. IEEE transactions on pattern analysis and machine intelligence, 43(12):4217-4228, Dec. 2021. 2", + "[33] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In Proc. CVPR, 2020. 2", + "[34] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 4, 5", + "[35] Thomas Leimkuhler and George Drettakis. Freestylegan: Free-view editable portrait rendering with the camera manifold. 40(6), 2021. 3", + "[36] Chieh Hubert Lin, Hsin-Ying Lee, Yen-Chi Cheng, Sergey Tulyakov, and Ming-Hsuan Yang. Infinitygan: Towards infinite-pixel image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2", + "[37] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 2", + "[38] Sangwoo Mo, Minsu Cho, and Jinwoo Shin. Freeze the discriminator: a simple baseline for fine-tuning gans, 2020. 2", + "[39] Michael Niemeyer and Andreas Geiger. Campari: Camera-aware decomposed generative neural radiance fields. In 2021 International Conference on 3D Vision (3DV), pages 951-961. IEEE, 2021. 2", + "[40] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2", + "[41] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. StyleSDF: High-Resolution 3D-Consistent Image and Geometry Generation. arXiv preprint arXiv:2112.11427, 2021. 1, 2", + "[42] Xingang Pan, Bo Dai, Ziwei Liu, Chen Change Loy, and Ping Luo. Do 2d gans know 3d shape? unsupervised 3d shape reconstruction from 2d image gans. arXiv preprint arXiv:2011.00844, 2020. 2", + "[43] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Deformable neural radiance fields. arXiv preprint arXiv:2011.12948, 2020. 2", + "[44] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery, 2021. 2" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[45] Justin N. M. Pinkney and Doron Adler. Resolution dependent gan interpolation for controllable image synthesis between domains, 2020. 2, 3", + "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. 2", + "[47] Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks, 2015. 2", + "[48] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: a stylegan encoder for image-to-image translation. arXiv preprint arXiv:2008.00951, 2020. 2", + "[49] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. arXiv preprint arXiv:2106.05744, 2021. 2", + "[50] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2", + "[51] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. Interfacegan: Interpreting the disentangled face representation learned by gans. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 2, 6", + "[52] Yichun Shi, Divyansh Aggarwal, and Anil K Jain. Lifting 2d stylegan for 3d-aware face generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6258-6266, 2021. 2", + "[53] Ivan Skorokhodov, Aliaksandr Siarohin, Yinghao Xu, Jian Ren, Hsin-Ying Lee, Peter Wonka, and Sergey Tulyakov. 3d generation on imagenet. In International Conference on Learning Representations (ICLR), 2023. 2", + "[54] Guoxian Song, Linjie Luo, Jing Liu, Wan-Chun Ma, Chunpong Lai, Chuanxia Zheng, and Tat-Jen Cham. Agilegan: Stylizing portraits by inversion-consistent transfer learning. ACM Trans. Graph., 40(4), jul 2021. 2", + "[55] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis, 2022. 1, 2", + "[56] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6142-6151, 2020. 2", + "[57] Ayush Tewari, Mohamed Elgharib, Mallikarjun BR, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zöllhofer, and Christian Theobalt. Pie: Portrait image embedding for semantic control. volume 39, December 2020. 2", + "[58] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. arXiv preprint arXiv:2102.02766, 2021. 2, 6" + ], + "bbox": [ + 501, + 92, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "4561", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[59] Rotem Tzaban, Ron Mokady, Rinon Gal, Amit H. Bermano, and Daniel Cohen-Or. *Stitch it in time: Gan-based facial editing of real videos. CoRR*, abs/2201.08361, 2022. 6", + "[60] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Cross-domain and disentangled face manipulation with 3d guidance. IEEE Transactions on Visualization and Computer Graphics, 2022. 2", + "[61] WarBean. tps-stn-pytorch. https://github.com/WarBean/tps_stn_pytorch.5", + "[62] Zongze Wu, Dani Lischinski, and Eli Shechtman. Stylespace analysis: Disentangled controls for stylegan image generation. arXiv preprint arXiv:2011.12799, 2020. 2, 4, 6", + "[63] Yinghao Xu, Menglei Chai, Zifan Shi, Sida Peng, Ivan Skorokhodov, Aliaksandr Siarohin, Ceyuan Yang, Yujun Shen, Hsin-Ying Lee, Bolei Zhou, et al. Discoscene: Spatially disentangled generative radiance fields for controllable 3d-aware scene synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, 2023. 2", + "[64] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. arXiv preprint arXiv:2112.10759, 2021. 1, 2", + "[65] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Pastiche master: Exemplar-based high-resolution portrait style transfer. In CVPR, 2022. 2, 3", + "[66] Zipeng Ye, Mengfei Xia, Yanan Sun, Ran Yi, Minjing Yu, Juyong Zhang, Yu-Kun Lai, and Yong-Jin Liu. 3d-CariGAN: An end-to-end solution to 3d caricature generation from normal face photos. IEEE Transactions on Visualization and Computer Graphics, pages 1-1, 2021. 2", + "[67] Fisher Yu, Yinda Zhang, Shuran Song, Ari Seff, and Jianxiong Xiao. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365, 2015. 2", + "[68] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2", + "[69] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European Conference on Computer Vision, pages 592-608. Springer, 2020. 2", + "[70] Peihao Zhu, Rameen Abdal, John Femiani, and Peter Wonka. Mind the gap: Domain gap control for single shot domain adaptation for generative adversarial networks. In International Conference on Learning Representations, 2022. 2", + "[71] Peihao Zhu, Rameen Abdal, Yipeng Qin, John Femiani, and Peter Wonka. Improved stylegan embedding: Where are the good latents?, 2020. 2", + "[72] zllrunning. face-parsing.pytorch. https://github.com/zllrunning/face-parsing.PyTorch.2,3" + ], + "bbox": [ + 78, + 90, + 468, + 800 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "4562", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_model.json b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..9496efd3718d52c835147405c19fb4c51a712946 --- /dev/null +++ b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_model.json @@ -0,0 +1,2114 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.0, + 0.812, + 0.045 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.139, + 0.131, + 0.833, + 0.154 + ], + "angle": 0, + "content": "3DAvatarGAN: Bridging Domains for Personalized Editable Avatars" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.179, + 0.856, + 0.27 + ], + "angle": 0, + "content": "Rameen Abdal\\(^{\\dagger 1}\\) Hsin-Ying Lee\\(^{2}\\) Peihao Zhu\\(^{\\dagger 1}\\) Minglei Chai\\(^{2}\\) Aliaksandr Siarohin\\(^{2}\\) \nPeter Wonka\\(^{1}\\) Sergey Tulyakov\\(^{2}\\) \n\\(^{1}\\)KAUST \\(^{2}\\)Snap Inc." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.286, + 0.885, + 0.558 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.568, + 0.893, + 0.611 + ], + "angle": 0, + "content": "Figure 1. Editable 3D avatars. We present 3DAvatarGAN, a 3D GAN able to produce and edit personalized 3D avatars from a single photograph (real or generated). Our method distills information from a 2D-GAN trained on 2D artistic datasets like Caricatures, Pixar toons, Cartoons, Comics etc. and requires no camera annotations." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.623, + 0.314, + 0.639 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.474, + 0.902 + ], + "angle": 0, + "content": "Modern 3D-GANs synthesize geometry and texture by training on large-scale datasets with a consistent structure. Training such models on stylized, artistic data, with often unknown, highly variable geometry, and camera information has not yet been shown possible. Can we train a 3D GAN on such artistic data, while maintaining multi-view consistency and texture quality? To this end, we propose an adaptation framework, where the source domain is a pre-trained 3D-GAN, while the target domain is a 2D-GAN trained on artistic datasets. We, then, distill the knowledge from a 2D generator to the source 3D generator. To do that, we first propose an optimization-based method to align the distributions of camera parameters across domains. Second, we propose regularizations necessary to learn high-quality texture, while avoiding degenerate geometric solutions, such as flat shapes. Third, we show" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.624, + 0.896, + 0.748 + ], + "angle": 0, + "content": "a deformation-based technique for modeling exaggerated geometry of artistic domains, enabling—as a byproduct—personalized geometric editing. Finally, we propose a novel inversion method for 3D-GANs linking the latent spaces of the source and the target domains. Our contributions—for the first time—allow for the generation, editing, and animation of personalized artistic 3D avatars on artistic datasets. Project Page: https://rameenabdal.github.io/3DAvatarGAN" + }, + { + "type": "title", + "bbox": [ + 0.501, + 0.759, + 0.633, + 0.775 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.784, + 0.895, + 0.877 + ], + "angle": 0, + "content": "Photo-realistic portrait face generation is an iconic application demonstrating the capability of generative models especially GANs [28,30,31]. A recent development has witnessed an advancement from straightforwardly synthesizing 2D images to learning 3D structures without 3D supervision, referred to as 3D-GANs [10,41,55,64]. Such training" + }, + { + "type": "page_footnote", + "bbox": [ + 0.525, + 0.887, + 0.848, + 0.902 + ], + "angle": 0, + "content": "† Part of the work was done during an internship at Snap Inc." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.958 + ], + "angle": 0, + "content": "4552" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.473, + 0.304 + ], + "angle": 0, + "content": "is feasible with the datasets containing objects with highly consistent geometry, enabling a 3D-GAN to learn a distribution of shapes and textures. In contrast, artistically stylized datasets [25, 65] have arbitrary exaggerations of both geometry and texture, for example, the nose, cheeks, and eyes can be arbitrarily drawn, depending on the style of the artist as well as on the features of the subject, see Fig. 1. Training a 3D-GAN on such data becomes problematic due to the challenge of learning such an arbitrary distribution of geometry and texture. In our experiments (Sec. 5.1), 3D-GANs [10] generate flat geometry and become 2D-GANs essentially. A natural question arises, whether a 3D-GAN can synthesize consistent novel views of images belonging to artistically stylized domains, such as the ones in Fig. 1." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.305, + 0.473, + 0.412 + ], + "angle": 0, + "content": "In this work, we propose a domain-adaption framework that allows us to answer the question positively. Specifically, we fine-tune a pre-trained 3D-GAN using a 2D-GAN trained on a target domain. Despite being well explored for 2D-GANs [25, 65], existing domain adaptation techniques are not directly applicable to 3D-GANs, due to the nature of 3D data and characteristics of 3D generators." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.413, + 0.473, + 0.655 + ], + "angle": 0, + "content": "The geometry and texture of stylized 2D datasets can be arbitrarily exaggerated depending on the context, artist, and production requirements. Due to this, no reliable way to estimate camera parameters for each image exists, whether using an off-the-shelf pose detector [72] or a manual labeling effort. To enable the training of 3D-GANs on such challenging datasets, we propose three contributions. ① An optimization-based method to align distributions of camera parameters between domains. ② Texture, depth, and geometry regularizations to avoid degenerate, flat solutions and ensure high visual quality. Furthermore, we redesign the discriminator training to make it compatible with our task. We then propose ③ a Thin Plate Spline (TPS) 3D deformation module operating on a tri-plane representation to allow for certain large and sometimes extreme geometric deformations, which are so typical in artistic domains." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.657, + 0.473, + 0.884 + ], + "angle": 0, + "content": "The proposed adaptation framework enables the training of 3D-GANs on complex and challenging artistic data. The previous success of domain adaptation in 2D-GANs unleashed a number of exciting applications in the content creation area [25, 65]. Given a single image such methods first find a latent code corresponding to it using GAN inversion, followed by latent editing producing the desired effect in the image space. Compared to 2D-GANs, the latent space of 3D-GANs is more entangled, making it more challenging to link the latent spaces between domains, rendering the existing inversion and editing techniques not directly applicable. Hence, we take a step further and explore the use of our approach to 3D artistic avatar generation and editing. Our final contribution to enable such applications is (4) a new inversion method for coupled 3D-GANs." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.886, + 0.472, + 0.902 + ], + "angle": 0, + "content": "In summary, the proposed domain-adaption framework" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.895, + 0.199 + ], + "angle": 0, + "content": "allows us to train 3D-GANs on challenging artistic datasets with exaggerated geometry and texture. We call our method 3DAvatarGAN as it—for the first time—offers generation, editing, and animation of personalized stylized, artistic avatars obtained from a single image. Our results (See Sec. 5.2) show the high-quality 3D avatars possible by our method compared to the naive fine-tuning." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.211, + 0.642, + 0.227 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.236, + 0.895, + 0.403 + ], + "angle": 0, + "content": "GANs and Semantic Image Editing. Generative adversarial Networks (GANs) [19, 47] are one popular type of generative model, especially for smaller high-quality datasets such as FFHQ [32], AFHQ [14], and LSUN objects [67]. For these datasets, StyleGAN [28, 30, 32] can be considered as the current state-of-the-art GAN [27, 28, 30, 32, 33]. The disentangled latent space learned by StyleGAN has been shown to exhibit semantic properties conducive to semantic image editing [1, 3, 16, 22, 36, 44, 51, 56, 62]. CLIP [46] based image editing [2, 17, 44] and domain transfer [15, 70] are another set of works enabled by StyleGAN." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.403, + 0.895, + 0.508 + ], + "angle": 0, + "content": "GAN Inversion. Algorithms to project existing images into a GAN latent space are a prerequisite for GAN-based image editing. There are mainly two types of methods to enable such a projection: optimization-based methods [1,13,57,71] and encoder-based methods [5,7,48,58,69]. On top of both streams of methods, the generator weights can be further modified after obtaining initial inversion results [49]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.509, + 0.895, + 0.644 + ], + "angle": 0, + "content": "Learning 3D-GANs with 2D Data. Previously, some approaches attempt to extract 3D structure from pre-trained 2D-GANs [42, 52]. Recently, inspired by Neural Radiance Field (NeRF) [9, 37, 43, 68], novel GAN architectures have been proposed to combine implicit or explicit 3D representations with neural rendering techniques [11, 12, 20, 39-41, 50, 53, 55, 63, 64]. In our work, we build on EG3D [11] which has current state-of-the-art results for human faces trained on the FFHQ dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.645, + 0.896, + 0.902 + ], + "angle": 0, + "content": "Avatars and GANs. To generate new results in an artistic domain (e.g. anime or cartoons), a promising technique is to fine-tune an existing GAN pre-trained on photographs, e.g. [45, 54, 60]. Data augmentation and freezing lower layers of the discriminator are useful tools when fine-tuning a 2D-GAN [28, 38]. One branch of methods [18, 44, 70] investigates domain adaptation if only a few examples or only text descriptions are available. While others focus on matching the distribution of artistic datasets with diverse shapes and styles. Our work also falls in this domain. Among previous efforts, StyleCariGAN [25] proposes invertible modules in the generator to train and generate caricatures from real images. DualStyleGAN [65] learns two mapping networks in StyleGAN to control the style and structure of the new domain. Some works are trained on 3D data or require heavy labeling/engineering [21, 26, 66] and use 3D morphable models to map 2D images of carica" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.958 + ], + "angle": 0, + "content": "4553" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.089, + 0.275, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.128, + 0.245, + 0.22, + 0.256 + ], + "angle": 0, + "content": "Naive Fine-Tuning" + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.089, + 0.47, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.343, + 0.245, + 0.409, + 0.255 + ], + "angle": 0, + "content": "Our Method" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.267, + 0.47, + 0.349 + ], + "angle": 0, + "content": "Figure 2. Comparison with naive fine-tuning. Comparison of generated 3D avatars with a naively fine-tuned generator \\( \\mathrm{G}_{\\mathrm{base}} \\) (left sub-figures) versus our generator \\( \\mathrm{G}_{\\mathrm{t}} \\) (right sub-figures). The corresponding sub-figures show comparisons in terms of texture quality (top two rows) and geometry (bottom two rows). See Sec. 5.1 for details." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.376, + 0.468, + 0.482 + ], + "angle": 0, + "content": "tures to 3D models. However, such models fail to model the hair, teeth, neck, and clothes and suffer in texture quality. In this work, we are the first to tackle the problem of domain adaption of 3D-GANs and to produce fully controllable 3D Avatars. We employ 2D to 3D domain adaptation and distillation and make use of synthetic 2D data from StyleCariGAN [25] and DualStyleGAN [65]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.52, + 0.383, + 0.538 + ], + "angle": 0, + "content": "3. Domain Adaptation for 3D-GANs" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.554, + 0.47, + 0.901 + ], + "angle": 0, + "content": "The goal of domain adaptation for 3D-GANs is to adapt (both texture and geometry) to a particular style defined by a 2D dataset (Caricature, Anime, Pixar toons, Comic, and Cartoons [24, 25, 65] in our case). In contrast to 2D-StyleGAN-based fine-tuning methods that are conceptually simpler [29, 45], fine-tuning a 3D-GAN on 2D data introduces challenges in addition to domain differences, especially on maintaining the texture quality while preserving the geometry. Moreover, for these datasets, there is no explicit shape and camera information. We define the domain adaptation task as follows: Given a prior 3D-GAN i.e. EG3D \\((\\mathrm{G_s})\\) of source domain \\((T_{\\mathrm{s}})\\), we aim to produce a 3D Avatar GAN \\((\\mathrm{G_t})\\) of the target domain \\((T_{\\mathrm{t}})\\) while maintaining the semantic, style, and geometric properties of \\(\\mathrm{G_s}\\), and at the same time preserving the identity of the subject between the domains \\((T_{\\mathrm{s}} \\leftrightarrow T_{\\mathrm{t}})\\). Refer to Fig. 4 in supplementary for the pipeline figure. We represent \\(\\mathrm{G}_{2\\mathrm{D}}\\) as a teacher 2D-GAN used for knowledge distillation fine-tuned on the above datasets. Note that as \\(T_{\\mathrm{t}}\\) is not assumed to contain camera parameter annotations, the training scheme must suppress artifacts such as low-quality texture under different views and flat geometry (See Fig. 2). In the following, we discuss the details of our method." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.74, + 0.107 + ], + "angle": 0, + "content": "3.1. How to align the cameras?" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.115, + 0.892, + 0.569 + ], + "angle": 0, + "content": "Selecting appropriate ranges for camera parameters is of paramount importance for high-fidelity geometry and texture detail. Typically, such parameters are empirically estimated, directly computed from the dataset using an off-the-shelf pose detector [10], or learned during training [8]. In domains we aim to bridge, such as caricatures for which a 3D model may not even exist, directly estimating the camera distribution is problematic and, hence, is not assumed by our method. Instead, we find it essential to ensure that the camera parameter distribution is consistent across the source and target domains. For the target domain, we use StyleGAN2 trained on FFHQ, fine-tuned on artistic datasets [25, 65]. Assuming that the intrinsic parameters of all the cameras are the same, we aim to match the distribution of extrinsic camera parameters of \\( G_{\\mathrm{s}} \\) and \\( G_{2\\mathrm{D}} \\) and train our final \\( G_{\\mathrm{t}} \\) using it (see illustration in Fig. 2 of the supplementary materials). To this end, we define an optimization-based method to match the sought distributions. The first step is to identify a canonical pose image in \\( G_{2\\mathrm{D}} \\), where the yaw, pitch, and roll parameters are zero. According to Karras et al., [31], the image corresponding to the mean latent code satisfies this property. Let \\( \\theta \\), \\( \\phi \\) be the camera Euler angles in a spherical coordinate system, \\( r \\), \\( c \\) be the radius of the sphere and camera lookat point, and \\( M \\) be a function that converts these parameters into the camera-to-world matrix. Let \\( I_{\\mathrm{s}}(w,\\theta ,\\phi ,c,r) = G_{\\mathrm{s}}(w,M(\\theta ,\\phi ,c,r)) \\) and \\( I_{2\\mathrm{D}}(w) = G_{2\\mathrm{D}}(w) \\) represent an arbitrary image generated by \\( G_{\\mathrm{s}} \\) and \\( G_{2\\mathrm{D}} \\), respectively, given the \\( w \\) code variable. Let \\( k_{\\mathrm{d}} \\) be the face key-points detected by the detector \\( K_{\\mathrm{d}} \\) [72], then" + }, + { + "type": "equation", + "bbox": [ + 0.512, + 0.581, + 0.892, + 0.622 + ], + "angle": 0, + "content": "\\[\n\\left(c ^ {\\prime}, r ^ {\\prime}\\right) := \\underset {(c, r)} {\\arg \\min } \\mathrm {L} _ {\\mathrm {k d}} \\left(I _ {\\mathrm {s}} \\left(w _ {\\text {a v g}} ^ {\\prime}, 0, 0, c, r\\right), I _ {2 \\mathrm {D}} \\left(w _ {\\text {a v g}}\\right)\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.622, + 0.892, + 0.743 + ], + "angle": 0, + "content": "where \\(\\mathrm{L_{kd}}(I_1,I_2) = \\| k_{\\mathrm{d}}(I_1) - k_{\\mathrm{d}}(I_2)\\| _1\\) and \\(w_{\\mathrm{avg}}\\) and \\(w_{\\mathrm{avg}}^{\\prime}\\) are the mean \\(w\\) latent codes of \\(\\mathrm{G}_{2\\mathrm{D}}\\) and \\(\\mathrm{G_s}\\), respectively. In our results, \\(r^\\prime\\) is determined to be 2.7 and \\(c^{\\prime}\\) is approximately [0.0, 0.05, 0.17]. The next step is to determine a safe range of the \\(\\theta\\) and \\(\\phi\\) parameters. Following prior works, StyleFlow [3] and FreeStyleGAN [35] (see Fig.5 of the paper), we set these parameters as \\(\\theta^{\\prime}\\in [-0.45,0.45]\\) and \\(\\phi^{\\prime}\\in [-0.35,0.35]\\) in radians." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.756, + 0.874, + 0.772 + ], + "angle": 0, + "content": "3.2. What loss functions and regularizers to use?" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Next, although the camera systems are aligned, the given dataset may not stem from a consistent 3D model, e.g., in the case of caricatures or cartoons. This entics the generator \\( G_{t} \\) to converge to an easier degenerate solution with a flat geometry. Hence, to benefit from the geometric prior of \\( G_{s} \\), another important step is to design the loss functions and regularizers for a selected set of parameters to update in \\( G_{t} \\). Next, we discuss these design choices:" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4554" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.093, + 0.368, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.369, + 0.093, + 0.629, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.094, + 0.891, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.444, + 0.893, + 0.472 + ], + "angle": 0, + "content": "Figure 3. Domain adaptation. Domain adaptation results of images from source domain \\( T_{\\mathrm{s}} \\) (top row in each sub-figure) to target domain \\( T_{\\mathrm{t}} \\). Rows two to five show corresponding 3D avatar results from different viewpoints." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.481, + 0.47, + 0.587 + ], + "angle": 0, + "content": "Loss Functions. To ensure texture quality and diversity, we resort to the adversarial loss used to fine-tune GANs as our main loss function. We use the standard non-saturating loss to train the generator and discriminator networks used in EG3D [11]. We also perform lazy density regularization to ensure consistency of the density values in the final finetuned model \\( \\mathrm{G}_{\\mathrm{t}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.593, + 0.47, + 0.804 + ], + "angle": 0, + "content": "Texture Regularization. Since the texture can be entangled with the geometry information, determining which layers to update is important. To make use of the fine-style information encoded in later layers, it is essential to update the \\( tRGB \\) layer parameters (outputting tri-plane features) before the neural rendering stage. \\( tRGB \\) are convolutional layers that transform feature maps to 3 channels at each resolution (96 channels in triplanes). Moreover, since the network has to adapt to a color distribution of \\( T_{t} \\), it is essential to update the decoder (MLP layers) of the neural rendering pipeline as well. Given the EG3D architecture, we also update the super-resolution layer parameters to ensure the coherency between the low-resolution and high-resolution outputs seen by the discriminator D." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Geometry Regularization. In order to allow the network to learn the structure distribution of \\( T_{\\mathrm{t}} \\) and at the same time ensure properties of \\( \\mathcal{W} \\) and \\( S \\) latent spaces are preserved, we update the earlier layers with regularization. This also encourages the latent spaces of \\( T_{\\mathrm{s}} \\) and \\( T_{\\mathrm{t}} \\) to be easily linked. Essentially, we update the deviation parameter \\( \\Delta s \\) from the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.481, + 0.892, + 0.587 + ], + "angle": 0, + "content": "\\(s\\) activations of the \\(S\\) space [62]. The \\(s\\) activations are predicted by \\(\\mathrm{A}(w)\\), where \\(\\mathrm{A}\\) is the learned affine function in EG3D. The \\(s\\) activations scale the kernels of a particular layer. In order to preserve the identity as well as geometry such that the optimization of \\(\\Delta s\\) does not deviate too far away from the original domain \\(T_{\\mathrm{s}}\\), we introduce a regularizer given by" + }, + { + "type": "equation", + "bbox": [ + 0.633, + 0.587, + 0.891, + 0.603 + ], + "angle": 0, + "content": "\\[\nR (\\Delta s) := \\| \\Delta s \\| _ {1}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.892, + 0.685 + ], + "angle": 0, + "content": "Note that we apply \\( \\mathrm{R}(\\Delta s) \\) regularization in a lazy manner, i.e., with density regularization. Interestingly, after training, we can interpolate between \\( s \\) and \\( s + \\Delta s \\) parameters to interpolate between the geometries of samples in \\( T_{\\mathrm{s}} \\) and \\( T_{\\mathrm{t}} \\) (See Fig. 5)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.686, + 0.893, + 0.852 + ], + "angle": 0, + "content": "Depth Regularization. Next, we observe that even though the above design choice produces better geometry for \\( T_{\\mathrm{t}} \\), some samples from \\( G_{\\mathrm{t}} \\) can still lead to flatter geometry, and it is hard to detect these cases. We found that the problem is related to the relative depth of the background to the foreground. To circumvent this problem, we use an additional regularization where we encourage the average background depth of \\( G_{\\mathrm{t}} \\) to be similar to \\( G_{\\mathrm{s}} \\). Let \\( S_{\\mathrm{b}} \\) be a face background segmentation network [34]. We first compute the average background depth of the samples given by \\( G_{\\mathrm{s}} \\). This average depth is given by" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.863, + 0.892, + 0.904 + ], + "angle": 0, + "content": "\\[\na _ {\\mathrm {d}} := \\frac {1}{M} \\sum_ {n = 1} ^ {M} \\left(\\frac {1}{N _ {n}} \\| D _ {n} \\odot \\mathrm {S} _ {\\mathrm {b}} (I _ {n}) \\| _ {F} ^ {2}\\right). \\tag {3}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4555" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.092, + 0.449, + 0.432 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.437, + 0.47, + 0.465 + ], + "angle": 0, + "content": "Figure 4. 3D avatars from real images. Projection of real images on the 3D avatar generators." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.471, + 0.47, + 0.532 + ], + "angle": 0, + "content": "Here, \\(D_{n}\\) is the depth map of the image \\(I_{n}\\) sampled from \\(G_{\\mathrm{s}}\\), \\(\\odot\\) represents the Hadamard product, \\(M\\) is the number of the sampled images, and \\(N_{n}\\) is the number of background pixels in \\(I_{n}\\). Finally, regularization is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.145, + 0.541, + 0.469, + 0.557 + ], + "angle": 0, + "content": "\\[\n\\mathrm {R} (D) := \\left\\| a _ {\\mathrm {d}} \\cdot J - \\left(D _ {\\mathrm {t}} \\odot \\mathrm {S} _ {\\mathrm {b}} \\left(I _ {\\mathrm {t}}\\right)\\right) \\right\\| _ {F}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.567, + 0.469, + 0.611 + ], + "angle": 0, + "content": "where \\(D_{\\mathrm{t}}\\) is the depth map of the image \\(I_{\\mathrm{t}}\\) sampled from \\(\\mathbf{G}_{\\mathrm{t}}\\) and \\(J\\) is the matrix of ones having the same spatial dimensions as \\(D_{\\mathrm{t}}\\)." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.62, + 0.325, + 0.635 + ], + "angle": 0, + "content": "3.3. What discriminator to use?" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.643, + 0.47, + 0.825 + ], + "angle": 0, + "content": "Given that the data in \\( T_{\\mathrm{s}} \\) and \\( T_{\\mathrm{t}} \\) is not paired and \\( T_{\\mathrm{t}} \\) is not assumed to contain camera parameter annotations, the choice of the discriminator (D) used for this task is also a critical design choice. Essentially, we use the unconditional version of the dual discriminator proposed in EG3D, and hence, we do not condition the discriminator on the camera information. As a result, during the training, \\( G_{\\mathrm{t}} \\) generates arbitrary images with pose using \\( \\mathrm{M}(\\theta', \\phi', c', r') \\), and the discriminator discriminates these images using arbitrary images from \\( T_{\\mathrm{t}} \\). We train the discriminator from scratch and in order to adapt \\( T_{\\mathrm{s}} \\rightarrow T_{\\mathrm{t}} \\), we use the StyleGAN-ADA [28] training scheme and use R1 regularization." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.833, + 0.469, + 0.863 + ], + "angle": 0, + "content": "3.4. How to incorporate larger geometric deformations between domains?" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "While the regularizers are used to limit the geometric changes when adapting from \\( T_{\\mathrm{s}} \\) to \\( T_{\\mathrm{t}} \\), modeling large ge" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.332 + ], + "angle": 0, + "content": "ometric deformations, e.g., in the caricature dataset is another challenge. One choice to edit the geometry is to use the properties of tri-plane features learned by EG3D. We start out by analyzing these three planes in \\(\\mathrm{G_s}\\). We observe that the frontal plane encodes most of the information required to render the final image. To quantify this, we sample images and depth maps from \\(\\mathrm{G_s}\\) and swap the front and the other planes from two random images. Then we compare the difference in RGB values of the images and the Chamfer distance of the depth maps. While swapping the frontal tri-planes, the final images are completely swapped, and the Chamfer distance changes by \\(80\\sim 90\\%\\) matching the swapped image depth map. In the case of the other two planes, the RGB image is not much affected and the Chamfer distance of the depth maps is reduced by only \\(20\\sim 30\\%\\) in most cases." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.333, + 0.893, + 0.545 + ], + "angle": 0, + "content": "Given the analysis, we focus to manipulate the \\(2D\\) front plane features to learn additional deformation or exaggerations. We learn a TPS (Thin Plate Spline) [61] network on top of the front plane. Our TPS network is conditioned both on the front plane features as well as the \\(\\mathcal{W}\\) space to enable multiple transformations. The architecture of the module is similar to the standard StyleGAN2 layer with an MLP appended at the end to predict the control points that transform the features. Hence, as a byproduct, we also enable 3D-geometry editing guided by the learned latent space. We train this module separately after \\(G_{\\mathrm{t}}\\) has been trained. We find that joint training is unstable due to exploding gradients arising from the large domain gap between \\(T_{\\mathrm{s}}\\) and \\(T_{\\mathrm{t}}\\) in the initial stages. Formally, we define this transformation as:" + }, + { + "type": "equation", + "bbox": [ + 0.641, + 0.555, + 0.891, + 0.571 + ], + "angle": 0, + "content": "\\[\n\\mathrm {T} (w, f) := \\Delta c, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.582, + 0.891, + 0.611 + ], + "angle": 0, + "content": "where, \\( w \\) is the latent code, \\( f \\) is the front plane, and \\( c \\) are the control points." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.612, + 0.892, + 0.702 + ], + "angle": 0, + "content": "Let \\( c_{\\mathrm{I}} \\) be the initial control points producing an identity transformation, \\( (c_{1}, c_{2}) \\) be the control points corresponding to front planes \\( (f_{1}, f_{2}) \\) sampled using \\( \\mathcal{W} \\) codes \\( (w_{1}, w_{2}) \\), respectively, and \\( (c_{1}', c_{2}') \\) be points with \\( (w_{1}, w_{2}) \\) swapped in the TPS module. To regularize and encourage the module to learn different deformations, we have" + }, + { + "type": "equation", + "bbox": [ + 0.5, + 0.712, + 0.897, + 0.764 + ], + "angle": 0, + "content": "\\[\n\\mathrm {R} \\left(\\mathrm {T} _ {1}\\right) := \\alpha \\sum_ {n = 1} ^ {2} \\| c _ {I} - c _ {n} \\| _ {1} - \\beta \\| c _ {1} - c _ {2} \\| _ {1} - \\sigma \\| c _ {1} ^ {\\prime} - c _ {2} ^ {\\prime} \\| _ {1}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.892, + 0.884 + ], + "angle": 0, + "content": "We use initial control point regularization to regularize large deviations in the control points which would otherwise explode. Additionally, to learn extreme exaggerations in \\( T_{\\mathrm{t}} \\) and 'in expectation', conform to the target distribution in the dataset, we add an additional loss term. Let \\( S(I) \\) be the soft-argmax output of the face segmentation network [34] given an image \\( I \\) and assuming that \\( S \\) generalizes to caricatures, then" + }, + { + "type": "equation", + "bbox": [ + 0.592, + 0.885, + 0.891, + 0.902 + ], + "angle": 0, + "content": "\\[\n\\mathrm {R} \\left(\\mathrm {T} _ {2}\\right) := \\| \\mathrm {S} \\left(\\mathrm {G} _ {\\mathrm {t}} (w)\\right), \\mathrm {S} \\left(I _ {\\mathrm {t}}\\right) \\| _ {1} \\tag {7}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4556" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.09, + 0.468, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.077, + 0.213, + 0.47, + 0.242 + ], + "angle": 0, + "content": "Figure 5. Interpolation of \\(\\Delta s\\). Geometric deformation using the interpolation of learned \\(\\Delta s\\) parameters." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.255, + 0.468, + 0.316 + ], + "angle": 0, + "content": "Eq. 6, Eq. 7, and adversarial training loss are used to train the \\(TPS\\) module. We adopt gradient clipping to make sure that the training does not diverge. See the illustrations in Fig. 3 and Fig. 4 of the supplementary materials." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.331, + 0.468, + 0.348 + ], + "angle": 0, + "content": "4. Personalized Avatar Generation and Editing" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.358, + 0.468, + 0.476 + ], + "angle": 0, + "content": "Although 3D domain adaptation adapts \\( T_{\\mathrm{s}} \\leftrightarrow T_{\\mathrm{t}} \\), it is still a challenge to effectively link the latent spaces of \\( \\mathrm{G_s} \\) and \\( \\mathrm{G_t} \\) to generate personalized 3D avatars using a single photograph as the reference image. Particularly, the challenge arises due to the discrepancy in the coupled latent spaces when dealing with the projection of real photographs on 3D generators. Moreover, one would like to edit and animate these 3D avatars." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.478, + 0.468, + 0.854 + ], + "angle": 0, + "content": "Projection. The task is to project a real image into the latent space of \\( \\mathrm{G_s} \\), transfer the latent to \\( \\mathrm{G_t} \\), and further optimize it to construct a 3D avatar. First, we use an optimization-based method to find the \\( w \\) code that minimizes the similarity between the generated and the real image in \\( \\mathrm{G_s} \\). To achieve this, the first step is to align the cameras. We follow the steps mentioned in Sec. 3.1 for this step. Next, we use pixel-wise MSE loss and LPIPS loss to project the image into \\( \\mathrm{G_s} \\) [1]. Additionally, to preserve the identity of the subject, we use attribute classifiers e.g. caricature dataset [24] provides the coupled attribute information of real images and caricatures. We use such attribute classifier [24,25] in a post-hoc manner as we notice that such networks can affect the texture in the target domain and could degenerate to narrow style outputs if applied during training. Moreover, such networks may not be available for all target domains. To avoid overfitting into \\( \\mathrm{G_s} \\) and encourage the easier transfer of the optimized latent code to \\( \\mathrm{G_t} \\), we use \\( \\mathcal{W} \\) space optimization for this step. Finally, we initialize this \\( w \\) code for \\( \\mathrm{G_t} \\) and use additional attribute classifier loss [25] for \\( T_{\\mathrm{t}} \\) domain along with Depth regularization \\( \\mathrm{R}(D) \\) (Eq. 4). As an approximation, we assume that attribute classifier [24, 25] generalizes across all domains. We use \\( \\mathcal{W} / \\mathcal{W} + \\) space optimization to control the quality and diversity of the outputs. See Algorithm 1 in supplementary for the description." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Editing and Animation. Since our 3D domain adaptation is designed to preserve the properties of \\(\\mathcal{W}\\) and \\(S\\) spaces, we can perform semantic edits via InterFaceGAN [51]," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.198 + ], + "angle": 0, + "content": "GANSpace [22], StyleSpace [62] etc., and geometric edits using TPS (Sec. 3.4) and \\(\\Delta s\\) interpolation (Sec. 3.2). To perform video editing, we design an encoder for EG3D based on \\(e4e\\) [58] to encode videos and transfer the edits from \\(\\mathrm{G_s}\\) to \\(\\mathrm{G_t}\\) based on the \\(w\\) codes [4,6,59]. We leave a more fine-grained approach for video processing as future work." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.214, + 0.587, + 0.23 + ], + "angle": 0, + "content": "5. Results" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.24, + 0.694, + 0.256 + ], + "angle": 0, + "content": "5.1. Quantitative Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.265, + 0.892, + 0.43 + ], + "angle": 0, + "content": "In this section, we consider three important evaluations to verify the quality of the texture, geometry, and identity preservation in the new domain using the Caricature, Cartoons, and Pixar toons datasets. We evaluate the ablation of our design choices in the supplementary materials. In the evaluation, let \\( \\mathrm{G}_{\\mathrm{base}} \\) be the baseline naive fine-tuning method which is trained with all the parameters using the losses in EG3D fine-tuned from FFHQ trained prior \\( \\mathrm{G}_{\\mathrm{s}} \\). Note here we still align the cameras in \\( \\mathrm{G}_{\\mathrm{base}} \\) using the method defined in Sec. 3.1 and use adaptive discriminator [28] with R1 regularization for a fair comparison." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.432, + 0.892, + 0.628 + ], + "angle": 0, + "content": "Texture Quality. To verify the quality of the texture, diversity of samples as well as to some extent, the geometry in the target domain \\( T_{\\mathrm{t}} \\), we compare the FID [23] scores using \\( \\mathrm{G}_{\\mathrm{base}} \\) and \\( \\mathrm{G}_{\\mathrm{t}} \\) in Table 1. Note that in the case of Caricatures, we report two scores i.e. with and without using the attribute classifier loss in the training as discussed in Sec. 4. Notice that our method outperforms the naive baseline method by a huge margin in some cases, especially in Caricatures and Cartoons. We attribute these differences to the mode collapse prone training of \\( \\mathrm{G}_{\\mathrm{base}} \\) which is correlated with flat geometry degenerate solution. We show visual results of the flat geometries learned by \\( \\mathrm{G}_{\\mathrm{base}} \\) and comparison in Fig. 2." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.63, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Geometric Quality. To quantify the flat geometries, in Table 2, we show three scores that help us understand such degenerate solutions. Here we consider coupled depth maps generated from sampling in the domains \\( T_{\\mathrm{s}} \\) (\\( \\mathrm{G_s} \\)) and \\( T_{\\mathrm{t}} \\) (\\( \\mathrm{G_t} \\) and \\( \\mathrm{G}_{\\mathrm{base}} \\)). First, we compute the expectation of the absolute mean differences (\\( M_{\\mathrm{d}} \\)) of the corresponding foreground depth maps sampled from \\( T_{\\mathrm{s}} \\) and \\( T_{\\mathrm{t}} \\). We also compute the expectation of the absolute standard deviation differences (\\( S_{\\mathrm{d}} \\)) for the same setting. Here, we assume that the flatter geometries have a large difference in the depth maps as compared to the prior as indicated by \\( M_{\\mathrm{d}} \\). Moreover, \\( S_{\\mathrm{d}} \\) computes the distance in the distribution of the depth values, where a larger difference indicates a narrow distribution, and hence a flatter geometry. We also notice that the flat geometry is correlated with the generator learning diverse poses when images are rendered under standard canonical camera parameters i.e. \\( \\mathrm{M}(0,0,c,r) \\). We hypothesize in the case of the flatter geometries, the model learns to pose in" + }, + { + "type": "page_number", + "bbox": [ + 0.482, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4557" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.081, + 0.088, + 0.894, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.216, + 0.893, + 0.247 + ], + "angle": 0, + "content": "Figure 6. Deformations using TPS. Geometric edits using our proposed TPS (Thin Plate Spline) module learned on the frontal tri-plane features. Each sub-figure shows a 3D avatar and three examples of TPS deformations sampled from the learned 3D deformation space." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.258, + 0.471, + 0.327 + ], + "angle": 0, + "content": "Table 1. FID Computation. FID (Frechet Inception Distance) between the 2D dataset and the samples generated by the fine-tuned 3D GAN using baseline \\((\\mathrm{G_{base}})\\) and Ours \\((\\mathrm{G_t})\\) . \\(^{\\prime \\prime}*\\) represents the score with the inclusion of the attribute classifier loss discussed in Sec. 3.2." + }, + { + "type": "table", + "bbox": [ + 0.136, + 0.34, + 0.413, + 0.387 + ], + "angle": 0, + "content": "
MethodCaricaturesCartoonsPixar Toons
Gbase67.879.015.1
Gt(Ours)19.4/20.2*12.812.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.404, + 0.471, + 0.446 + ], + "angle": 0, + "content": "Table 2. Geometry Evaluation. Comparing the geometry using baseline method \\((\\mathrm{G}_{\\mathrm{base}})\\) and Ours \\((\\mathrm{G}_{\\mathrm{t}})\\). For the definition of \\(M_{\\mathrm{d}}\\), \\(S_{\\mathrm{d}}\\) and \\(\\mathrm{R}(\\mathrm{T}_2)\\), refer to Sec. 5.1." + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.456, + 0.43, + 0.556 + ], + "angle": 0, + "content": "
MetricMethodCaricaturesCartoonsPixar
Md ↓Gbase0.470.210.29
Gt (Ours)0.210.130.13
Sd ↓Gbase0.220.140.15
Gt (Ours)0.150.100.09
R(T2) ↓Gbase2.993.394.01
Gt (Ours)2.271.621.56
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.574, + 0.469, + 0.603 + ], + "angle": 0, + "content": "Table 3. Identity Preservation. Identity preservation using baseline \\((\\mathrm{G}_{\\mathrm{base}})\\) and Ours \\((\\mathrm{G}_{\\mathrm{t}})\\)." + }, + { + "type": "table", + "bbox": [ + 0.135, + 0.613, + 0.414, + 0.66 + ], + "angle": 0, + "content": "
MethodCaricaturesCartoonsPixel Toons
Gbase1.280.920.85
Gt(Ours)0.870.810.73
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.673, + 0.47, + 0.824 + ], + "angle": 0, + "content": "formation in the earlier layers instead of being camera view-dependent. To quantify this, since pose information may not be available for some domains (e.g. cartoons), we compute the \\(\\mathrm{R}(\\mathrm{T}_2)\\) scores between corresponding images in the domain \\(T_{\\mathrm{s}}\\) (\\(\\mathrm{G}_{\\mathrm{s}}\\)) and \\(T_{\\mathrm{t}}\\) (\\(\\mathrm{G}_{\\mathrm{t}}\\) and \\(\\mathrm{G}_{\\mathrm{base}}\\)). Note that these scores are computed without the TPS module. Our scores are lower in all three metrics, hence, validating that our method avoids the degenerate solution and preserves the geometric distribution of the prior. For discussion on the TPS module and ablations refer to the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Identity Preservation. Identity preservation score is another important evaluation to check the quality of latent space linking between \\( \\mathrm{G_s} \\) and \\( \\mathrm{G_t} \\). In Table 3, we compute the attribute loss (BCE loss) between the domains \\( T_{\\mathrm{s}} \\) and \\( T_{\\mathrm{t}} \\) using the attribute classifiers [24, 25]. Note that our method" + }, + { + "type": "image", + "bbox": [ + 0.543, + 0.258, + 0.853, + 0.66 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.672, + 0.892, + 0.701 + ], + "angle": 0, + "content": "Figure 7. Local edits. Local edits performed on the 3D avatars using the \\(S\\) space." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.712, + 0.874, + 0.728 + ], + "angle": 0, + "content": "is able to preserve the identity better across the domains." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.74, + 0.685, + 0.757 + ], + "angle": 0, + "content": "5.2. Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.765, + 0.894, + 0.903 + ], + "angle": 0, + "content": "For qualitative results, we show the results of the domain adaptation, as well as the personalized edits (geometric and semantic), performed on the resultant 3D avatars. First, in order to show the quality of domain adaptation, identity preservation, and geometric consistency, in Fig. 3, we show results from \\( \\mathrm{G}_{\\mathrm{s}} \\) and corresponding results from 3D avatar generator \\( \\mathrm{G}_{\\mathrm{t}} \\) trained on Caricatures, Pixar toons, Cartoons, and Comic domains. Next, in order to show that the method generalizes to real images, we use the method described in" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4558" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.16, + 0.089, + 0.812, + 0.432 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.443, + 0.895, + 0.486 + ], + "angle": 0, + "content": "Figure 8. 3D avatar animation. Animation of 3D avatars generated using a driving video encoded in source domain \\( T_{s} \\) and applied to samples in target domain \\( T_{t} \\). The top row shows the driving video and the subsequent rows show generated animations using a random Caricature or Pixar toon. The head pose is changed in each frame of the generated animation to show 3D consistency." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.5, + 0.471, + 0.592 + ], + "angle": 0, + "content": "Sec. 4 to project and transfer the latent code from \\( \\mathrm{G_s} \\) to \\( \\mathrm{G_t} \\) to produce the 3D avatars. In Fig. 4, we show our results of real to 3D avatar transfer. Notice the quality both in terms of texture as well as geometry for both these results achieved by our method. Next, we show geometric and semantic edits possible to produce personalized 3D avatars:" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.6, + 0.47, + 0.705 + ], + "angle": 0, + "content": "Geometry Edits. We show two types of geometric edits i.e. \\(\\Delta s\\) interpolation (Sec. 3.2) and deformation using \\(TPS\\) (Sec. 3.4). First, in Fig. 5, we show the geometry interpolation by interpolating between original \\(s\\) activations of \\(\\mathrm{G_s}\\) and learned \\(\\Delta s\\) parameters. In Fig. 6, we show some additional exaggerations in caricatures using the learned 3D deformation latent space of \\(TPS\\) module." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.706, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Semantic Edits and Animation. Since in our method, we encourage the latent regularization to preserve the properties of the latent space learned by the \\(\\mathrm{G}_{\\mathrm{s}}\\) generator, in Fig. 7 we show \\(S\\) space edits performed on the 3D avatars. Notice the quality of edits in terms of locality and adaptability. Additionally, we can edit semantics like hair as opposed to 3D morphable model based methods. In Fig. 8, thanks to the latent space semantics preservation ensured by our method, we can perform some video edits to create a coherent animation based on the difference of \\(w\\) codes of video encoded in \\(\\mathrm{G}_{\\mathrm{s}}\\) (Sec. 4) and applied to layers \\(7 - 10\\) in \\(\\mathrm{G}_{\\mathrm{t}}\\). Notice the quality of expressions, identity preservation, and 3D consistency across each identity in each row." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.499, + 0.62, + 0.515 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.524, + 0.892, + 0.889 + ], + "angle": 0, + "content": "We tackled two open research problems in this paper. In the first part, we proposed the first domain adaptation method for 3D-GANs to the best of our knowledge. This part yields two linked EG3D generators, one in the photorealistic source domain of faces, and another EG3D generator in an artistic target domain. As possible target domains, we show results for cartoons, caricatures, and Comics. In the second part, we built on domain adaptation to create 3D avatars in an artistic domain that can be edited and animated. Our framework consists of multiple technical components introduced in this paper. First, we propose a technique for camera space estimation for artistic domains. Second, we introduce a set of regularizers and loss functions that can regularize the fine-tuning of EG3D in such a way that enough of the 3D structure and geometry of the original model is kept, while the distinguishing attributes of the artistic domain, such as textures and colors and local geometric deformations can still be learned. Third, we introduce a geometric deformation module that can reintroduce larger geometric deformations in a controlled manner. These larger geometric deformations can interact and cooperate with EG3D so that semantic edits are still possible. Finally, we propose an embedding algorithm that is especially suitable for two linked EG3D generator networks." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "4559" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.184 + ], + "angle": 0, + "content": "[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4432-4441, Seoul, Korea, 2019. IEEE. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.185, + 0.472, + 0.255 + ], + "angle": 0, + "content": "[2] Rameen Abdal, Peihao Zhu, John Femiani, Niloy Mitra, and Peter Wonka. Clip2stylegan: Unsupervised extraction of stylegan edit directions. In ACM SIGGRAPH 2022 Conference Proceedings, SIGGRAPH '22, New York, NY, USA, 2022. Association for Computing Machinery. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.471, + 0.311 + ], + "angle": 0, + "content": "[3] Rameen Abdal, Peihao Zhu, Niloy J. Mitra, and Peter Wonka. Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. ACM Trans. Graph., 40(3), may 2021. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.312, + 0.471, + 0.352 + ], + "angle": 0, + "content": "[4] Rameen Abdal, Peihao Zhu, Niloy J. Mitra, and Peter Wonka. Video2stylegan: Disentangling local and global variations in a video, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.354, + 0.47, + 0.409 + ], + "angle": 0, + "content": "[5] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. Restyle: A residual-based stylegan encoder via iterative refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.41, + 0.47, + 0.464 + ], + "angle": 0, + "content": "[6] Yuval Alaluf, Or Patashnik, Zongze Wu, Asif Zamir, Eli Shechtman, Dani Lischinski, and Daniel Cohen-Or. Third time's the charm? image and video editing with stylegan3. CoRR, abs/2201.13433, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.466, + 0.47, + 0.52 + ], + "angle": 0, + "content": "[7] Yuval Alaluf, Omer Tov, Ron Mokady, Rimon Gal, and Amit H. Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. CoRR, abs/2111.15666, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.522, + 0.47, + 0.549 + ], + "angle": 0, + "content": "[8] Anonymous. 3d generation on imagenet. In Open Review, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.551, + 0.47, + 0.633 + ], + "angle": 0, + "content": "[9] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.635, + 0.47, + 0.704 + ], + "angle": 0, + "content": "[10] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In arXiv, 2021. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.705, + 0.47, + 0.773 + ], + "angle": 0, + "content": "[11] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks, 2021. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.776, + 0.47, + 0.844 + ], + "angle": 0, + "content": "[12] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5799-5809, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[13] Yen-Chi Cheng, Chieh Hubert Lin, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, and Ming-Hsuan Yang. Inout: Diverse image outpainting via gan inversion. In IEEE Conference on Computer Vision and Pattern Recognition, 2022. 2" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[14] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.149, + 0.892, + 0.175 + ], + "angle": 0, + "content": "[15] Min Jin Chong and David A. Forsyth. Jojogan: One shot face stylization. CoRR, abs/2112.11641, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.177, + 0.892, + 0.218 + ], + "angle": 0, + "content": "[16] Min Jin Chong, Hsin-Ying Lee, and David Forsyth. Stylegan of all trades: Image manipulation with only pretrained stylegan. arXiv preprint arXiv:2111.01619, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.219, + 0.892, + 0.272 + ], + "angle": 0, + "content": "[17] Rinon Gal, Or Patashnik, Haggai Maron, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. arXiv preprint arXiv:2108.00946, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.274, + 0.892, + 0.315 + ], + "angle": 0, + "content": "[18] Rinon Gal, Or Patashnik, Haggai Maron, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.316, + 0.892, + 0.357 + ], + "angle": 0, + "content": "[19] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.358, + 0.892, + 0.413 + ], + "angle": 0, + "content": "[20] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylererf: A style-based 3d aware generator for high-resolution image synthesis. In International Conference on Learning Representations, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.414, + 0.892, + 0.467 + ], + "angle": 0, + "content": "[21] Fangzhou Han, Shuquan Ye, Mingming He, Menglei Chai, and Jing Liao. Exemplar-based 3d portrait stylization. IEEE Transactions on Visualization and Computer Graphics, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.469, + 0.892, + 0.51 + ], + "angle": 0, + "content": "[22] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. arXiv preprint arXiv:2004.02546, 2020. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.511, + 0.892, + 0.579 + ], + "angle": 0, + "content": "[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.581, + 0.892, + 0.622 + ], + "angle": 0, + "content": "[24] Jing Huo, Wenbin Li, Yinghuan Shi, Yang Gao, and Hujun Yin. Webcaricature: a benchmark for caricature recognition. In British Machine Vision Conference, 2018. 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.623, + 0.892, + 0.677 + ], + "angle": 0, + "content": "[25] Wonjong Jang, Gwangjin Ju, Yucheol Jung, Jiaolong Yang, Xin Tong, and Seungyong Lee. Stylecarigan: Caricature generation via stylegan feature map modulation. 40(4), 2021. 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.679, + 0.892, + 0.748 + ], + "angle": 0, + "content": "[26] Yucheol Jung, Wonjong Jang, Soongjin Kim, Jiaolong Yang, Xin Tong, and Seungyong Lee. Deep deformable 3d caricatures with learned shape control. In Special Interest Group on Computer Graphics and Interactive Techniques Conference Proceedings. ACM, aug 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.749, + 0.892, + 0.789 + ], + "angle": 0, + "content": "[27] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.79, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[28] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. In Proc. NeurIPS, 2020. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.845, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[29] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. arXiv preprint arXiv:2006.06676, 2020.3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4560" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.134 + ], + "angle": 0, + "content": "[30] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.134, + 0.469, + 0.203 + ], + "angle": 0, + "content": "[31] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4401-4410, 2019. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.204, + 0.469, + 0.259 + ], + "angle": 0, + "content": "[32] Tero Karras, Samuli Laine, and Timo Aila. A Style-Based generator architecture for generative adversarial networks. IEEE transactions on pattern analysis and machine intelligence, 43(12):4217-4228, Dec. 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.26, + 0.469, + 0.302 + ], + "angle": 0, + "content": "[33] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In Proc. CVPR, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.302, + 0.469, + 0.357 + ], + "angle": 0, + "content": "[34] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.358, + 0.469, + 0.399 + ], + "angle": 0, + "content": "[35] Thomas Leimkuhler and George Drettakis. Freestylegan: Free-view editable portrait rendering with the camera manifold. 40(6), 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.4, + 0.469, + 0.455 + ], + "angle": 0, + "content": "[36] Chieh Hubert Lin, Hsin-Ying Lee, Yen-Chi Cheng, Sergey Tulyakov, and Ming-Hsuan Yang. Infinitygan: Towards infinite-pixel image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.456, + 0.469, + 0.524 + ], + "angle": 0, + "content": "[37] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.525, + 0.469, + 0.553 + ], + "angle": 0, + "content": "[38] Sangwoo Mo, Minsu Cho, and Jinwoo Shin. Freeze the discriminator: a simple baseline for fine-tuning gans, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.553, + 0.469, + 0.607 + ], + "angle": 0, + "content": "[39] Michael Niemeyer and Andreas Geiger. Campari: Camera-aware decomposed generative neural radiance fields. In 2021 International Conference on 3D Vision (3DV), pages 951-961. IEEE, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.609, + 0.469, + 0.676 + ], + "angle": 0, + "content": "[40] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.678, + 0.469, + 0.747 + ], + "angle": 0, + "content": "[41] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. StyleSDF: High-Resolution 3D-Consistent Image and Geometry Generation. arXiv preprint arXiv:2112.11427, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.748, + 0.469, + 0.803 + ], + "angle": 0, + "content": "[42] Xingang Pan, Bo Dai, Ziwei Liu, Chen Change Loy, and Ping Luo. Do 2d gans know 3d shape? unsupervised 3d shape reconstruction from 2d image gans. arXiv preprint arXiv:2011.00844, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.804, + 0.469, + 0.859 + ], + "angle": 0, + "content": "[43] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Deformable neural radiance fields. arXiv preprint arXiv:2011.12948, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.86, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[44] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery, 2021. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[45] Justin N. M. Pinkney and Doron Adler. Resolution dependent gan interpolation for controllable image synthesis between domains, 2020. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.218 + ], + "angle": 0, + "content": "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.22, + 0.892, + 0.261 + ], + "angle": 0, + "content": "[47] Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.263, + 0.892, + 0.318 + ], + "angle": 0, + "content": "[48] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: a stylegan encoder for image-to-image translation. arXiv preprint arXiv:2008.00951, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.32, + 0.892, + 0.361 + ], + "angle": 0, + "content": "[49] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. arXiv preprint arXiv:2106.05744, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.362, + 0.892, + 0.418 + ], + "angle": 0, + "content": "[50] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.42, + 0.892, + 0.476 + ], + "angle": 0, + "content": "[51] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. Interfacegan: Interpreting the disentangled face representation learned by gans. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.477, + 0.892, + 0.532 + ], + "angle": 0, + "content": "[52] Yichun Shi, Divyansh Aggarwal, and Anil K Jain. Lifting 2d stylegan for 3d-aware face generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6258-6266, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.534, + 0.892, + 0.589 + ], + "angle": 0, + "content": "[53] Ivan Skorokhodov, Aliaksandr Siarohin, Yinghao Xu, Jian Ren, Hsin-Ying Lee, Peter Wonka, and Sergey Tulyakov. 3d generation on imagenet. In International Conference on Learning Representations (ICLR), 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.591, + 0.892, + 0.646 + ], + "angle": 0, + "content": "[54] Guoxian Song, Linjie Luo, Jing Liu, Wan-Chun Ma, Chunpong Lai, Chuanxia Zheng, and Tat-Jen Cham. Agilegan: Stylizing portraits by inversion-consistent transfer learning. ACM Trans. Graph., 40(4), jul 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.648, + 0.892, + 0.701 + ], + "angle": 0, + "content": "[55] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.704, + 0.892, + 0.787 + ], + "angle": 0, + "content": "[56] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6142-6151, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.789, + 0.892, + 0.855 + ], + "angle": 0, + "content": "[57] Ayush Tewari, Mohamed Elgharib, Mallikarjun BR, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zöllhofer, and Christian Theobalt. Pie: Portrait image embedding for semantic control. volume 39, December 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[58] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. arXiv preprint arXiv:2102.02766, 2021. 2, 6" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.957 + ], + "angle": 0, + "content": "4561" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.134 + ], + "angle": 0, + "content": "[59] Rotem Tzaban, Ron Mokady, Rinon Gal, Amit H. Bermano, and Daniel Cohen-Or. *Stitch it in time: Gan-based facial editing of real videos. CoRR*, abs/2201.08361, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.136, + 0.469, + 0.19 + ], + "angle": 0, + "content": "[60] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Cross-domain and disentangled face manipulation with 3d guidance. IEEE Transactions on Visualization and Computer Graphics, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.192, + 0.468, + 0.22 + ], + "angle": 0, + "content": "[61] WarBean. tps-stn-pytorch. https://github.com/WarBean/tps_stn_pytorch.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.222, + 0.469, + 0.262 + ], + "angle": 0, + "content": "[62] Zongze Wu, Dani Lischinski, and Eli Shechtman. Stylespace analysis: Disentangled controls for stylegan image generation. arXiv preprint arXiv:2011.12799, 2020. 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.264, + 0.469, + 0.346 + ], + "angle": 0, + "content": "[63] Yinghao Xu, Menglei Chai, Zifan Shi, Sida Peng, Ivan Skorokhodov, Aliaksandr Siarohin, Ceyuan Yang, Yujun Shen, Hsin-Ying Lee, Bolei Zhou, et al. Discoscene: Spatially disentangled generative radiance fields for controllable 3d-aware scene synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.348, + 0.469, + 0.403 + ], + "angle": 0, + "content": "[64] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. arXiv preprint arXiv:2112.10759, 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.405, + 0.469, + 0.445 + ], + "angle": 0, + "content": "[65] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Pastiche master: Exemplar-based high-resolution portrait style transfer. In CVPR, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.448, + 0.469, + 0.516 + ], + "angle": 0, + "content": "[66] Zipeng Ye, Mengfei Xia, Yanan Sun, Ran Yi, Minjing Yu, Juyong Zhang, Yu-Kun Lai, and Yong-Jin Liu. 3d-CariGAN: An end-to-end solution to 3d caricature generation from normal face photos. IEEE Transactions on Visualization and Computer Graphics, pages 1-1, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.518, + 0.469, + 0.572 + ], + "angle": 0, + "content": "[67] Fisher Yu, Yinda Zhang, Shuran Song, Ari Seff, and Jianxiong Xiao. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365, 2015. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.575, + 0.469, + 0.615 + ], + "angle": 0, + "content": "[68] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.618, + 0.469, + 0.671 + ], + "angle": 0, + "content": "[69] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European Conference on Computer Vision, pages 592-608. Springer, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.674, + 0.469, + 0.729 + ], + "angle": 0, + "content": "[70] Peihao Zhu, Rameen Abdal, John Femiani, and Peter Wonka. Mind the gap: Domain gap control for single shot domain adaptation for generative adversarial networks. In International Conference on Learning Representations, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.731, + 0.469, + 0.771 + ], + "angle": 0, + "content": "[71] Peihao Zhu, Rameen Abdal, Yipeng Qin, John Femiani, and Peter Wonka. Improved stylegan embedding: Where are the good latents?, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.774, + 0.466, + 0.801 + ], + "angle": 0, + "content": "[72] zllrunning. face-parsing.pytorch. https://github.com/zllrunning/face-parsing.PyTorch.2,3" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.469, + 0.801 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "4562" + } + ] +] \ No newline at end of file diff --git a/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_origin.pdf b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..49a60440afb9d1cac6251b86055b38acf1ca4361 --- /dev/null +++ b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/ddf7c6ad-f988-4a54-8cf6-7aff7d8dd81c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b1424fd1b0ce9d96032e6edc3775f412cb2a31eb68e93268d8f39468e0c4f65 +size 7265466 diff --git a/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/full.md b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5d882c0e7829d26663a750a97207423bbb1c8ff6 --- /dev/null +++ b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/full.md @@ -0,0 +1,279 @@ +# 3DAvatarGAN: Bridging Domains for Personalized Editable Avatars + +Rameen Abdal $^{\dagger 1}$ Hsin-Ying Lee $^{2}$ Peihao Zhu $^{\dagger 1}$ Minglei Chai $^{2}$ Aliaksandr Siarohin $^{2}$ +Peter Wonka $^{1}$ Sergey Tulyakov $^{2}$ $^{1}$ KAUST $^{2}$ Snap Inc. + +![](images/df16d596e57fb3925b8f10971a3cd7e1f29201be7f32e2232fd0034c6fca15b4.jpg) +Figure 1. Editable 3D avatars. We present 3DAvatarGAN, a 3D GAN able to produce and edit personalized 3D avatars from a single photograph (real or generated). Our method distills information from a 2D-GAN trained on 2D artistic datasets like Caricatures, Pixar toons, Cartoons, Comics etc. and requires no camera annotations. + +# Abstract + +Modern 3D-GANs synthesize geometry and texture by training on large-scale datasets with a consistent structure. Training such models on stylized, artistic data, with often unknown, highly variable geometry, and camera information has not yet been shown possible. Can we train a 3D GAN on such artistic data, while maintaining multi-view consistency and texture quality? To this end, we propose an adaptation framework, where the source domain is a pre-trained 3D-GAN, while the target domain is a 2D-GAN trained on artistic datasets. We, then, distill the knowledge from a 2D generator to the source 3D generator. To do that, we first propose an optimization-based method to align the distributions of camera parameters across domains. Second, we propose regularizations necessary to learn high-quality texture, while avoiding degenerate geometric solutions, such as flat shapes. Third, we show + +a deformation-based technique for modeling exaggerated geometry of artistic domains, enabling—as a byproduct—personalized geometric editing. Finally, we propose a novel inversion method for 3D-GANs linking the latent spaces of the source and the target domains. Our contributions—for the first time—allow for the generation, editing, and animation of personalized artistic 3D avatars on artistic datasets. Project Page: https://rameenabdal.github.io/3DAvatarGAN + +# 1. Introduction + +Photo-realistic portrait face generation is an iconic application demonstrating the capability of generative models especially GANs [28,30,31]. A recent development has witnessed an advancement from straightforwardly synthesizing 2D images to learning 3D structures without 3D supervision, referred to as 3D-GANs [10,41,55,64]. Such training + +is feasible with the datasets containing objects with highly consistent geometry, enabling a 3D-GAN to learn a distribution of shapes and textures. In contrast, artistically stylized datasets [25, 65] have arbitrary exaggerations of both geometry and texture, for example, the nose, cheeks, and eyes can be arbitrarily drawn, depending on the style of the artist as well as on the features of the subject, see Fig. 1. Training a 3D-GAN on such data becomes problematic due to the challenge of learning such an arbitrary distribution of geometry and texture. In our experiments (Sec. 5.1), 3D-GANs [10] generate flat geometry and become 2D-GANs essentially. A natural question arises, whether a 3D-GAN can synthesize consistent novel views of images belonging to artistically stylized domains, such as the ones in Fig. 1. + +In this work, we propose a domain-adaption framework that allows us to answer the question positively. Specifically, we fine-tune a pre-trained 3D-GAN using a 2D-GAN trained on a target domain. Despite being well explored for 2D-GANs [25, 65], existing domain adaptation techniques are not directly applicable to 3D-GANs, due to the nature of 3D data and characteristics of 3D generators. + +The geometry and texture of stylized 2D datasets can be arbitrarily exaggerated depending on the context, artist, and production requirements. Due to this, no reliable way to estimate camera parameters for each image exists, whether using an off-the-shelf pose detector [72] or a manual labeling effort. To enable the training of 3D-GANs on such challenging datasets, we propose three contributions. ① An optimization-based method to align distributions of camera parameters between domains. ② Texture, depth, and geometry regularizations to avoid degenerate, flat solutions and ensure high visual quality. Furthermore, we redesign the discriminator training to make it compatible with our task. We then propose ③ a Thin Plate Spline (TPS) 3D deformation module operating on a tri-plane representation to allow for certain large and sometimes extreme geometric deformations, which are so typical in artistic domains. + +The proposed adaptation framework enables the training of 3D-GANs on complex and challenging artistic data. The previous success of domain adaptation in 2D-GANs unleashed a number of exciting applications in the content creation area [25, 65]. Given a single image such methods first find a latent code corresponding to it using GAN inversion, followed by latent editing producing the desired effect in the image space. Compared to 2D-GANs, the latent space of 3D-GANs is more entangled, making it more challenging to link the latent spaces between domains, rendering the existing inversion and editing techniques not directly applicable. Hence, we take a step further and explore the use of our approach to 3D artistic avatar generation and editing. Our final contribution to enable such applications is (4) a new inversion method for coupled 3D-GANs. + +In summary, the proposed domain-adaption framework + +allows us to train 3D-GANs on challenging artistic datasets with exaggerated geometry and texture. We call our method 3DAvatarGAN as it—for the first time—offers generation, editing, and animation of personalized stylized, artistic avatars obtained from a single image. Our results (See Sec. 5.2) show the high-quality 3D avatars possible by our method compared to the naive fine-tuning. + +# 2. Related Work + +GANs and Semantic Image Editing. Generative adversarial Networks (GANs) [19, 47] are one popular type of generative model, especially for smaller high-quality datasets such as FFHQ [32], AFHQ [14], and LSUN objects [67]. For these datasets, StyleGAN [28, 30, 32] can be considered as the current state-of-the-art GAN [27, 28, 30, 32, 33]. The disentangled latent space learned by StyleGAN has been shown to exhibit semantic properties conducive to semantic image editing [1, 3, 16, 22, 36, 44, 51, 56, 62]. CLIP [46] based image editing [2, 17, 44] and domain transfer [15, 70] are another set of works enabled by StyleGAN. + +GAN Inversion. Algorithms to project existing images into a GAN latent space are a prerequisite for GAN-based image editing. There are mainly two types of methods to enable such a projection: optimization-based methods [1,13,57,71] and encoder-based methods [5,7,48,58,69]. On top of both streams of methods, the generator weights can be further modified after obtaining initial inversion results [49]. + +Learning 3D-GANs with 2D Data. Previously, some approaches attempt to extract 3D structure from pre-trained 2D-GANs [42, 52]. Recently, inspired by Neural Radiance Field (NeRF) [9, 37, 43, 68], novel GAN architectures have been proposed to combine implicit or explicit 3D representations with neural rendering techniques [11, 12, 20, 39-41, 50, 53, 55, 63, 64]. In our work, we build on EG3D [11] which has current state-of-the-art results for human faces trained on the FFHQ dataset. + +Avatars and GANs. To generate new results in an artistic domain (e.g. anime or cartoons), a promising technique is to fine-tune an existing GAN pre-trained on photographs, e.g. [45, 54, 60]. Data augmentation and freezing lower layers of the discriminator are useful tools when fine-tuning a 2D-GAN [28, 38]. One branch of methods [18, 44, 70] investigates domain adaptation if only a few examples or only text descriptions are available. While others focus on matching the distribution of artistic datasets with diverse shapes and styles. Our work also falls in this domain. Among previous efforts, StyleCariGAN [25] proposes invertible modules in the generator to train and generate caricatures from real images. DualStyleGAN [65] learns two mapping networks in StyleGAN to control the style and structure of the new domain. Some works are trained on 3D data or require heavy labeling/engineering [21, 26, 66] and use 3D morphable models to map 2D images of carica + +![](images/516e7d18529441246367b2a8888fc2a1c37173cbf2bdf5c251b7f98194755e06.jpg) +Naive Fine-Tuning +Figure 2. Comparison with naive fine-tuning. Comparison of generated 3D avatars with a naively fine-tuned generator $\mathrm{G}_{\mathrm{base}}$ (left sub-figures) versus our generator $\mathrm{G}_{\mathrm{t}}$ (right sub-figures). The corresponding sub-figures show comparisons in terms of texture quality (top two rows) and geometry (bottom two rows). See Sec. 5.1 for details. + +![](images/f9574c015b5d4a9f970cef245e38b45f1237a686dcc4b322c4f7ab095c2acd23.jpg) +Our Method + +tures to 3D models. However, such models fail to model the hair, teeth, neck, and clothes and suffer in texture quality. In this work, we are the first to tackle the problem of domain adaption of 3D-GANs and to produce fully controllable 3D Avatars. We employ 2D to 3D domain adaptation and distillation and make use of synthetic 2D data from StyleCariGAN [25] and DualStyleGAN [65]. + +# 3. Domain Adaptation for 3D-GANs + +The goal of domain adaptation for 3D-GANs is to adapt (both texture and geometry) to a particular style defined by a 2D dataset (Caricature, Anime, Pixar toons, Comic, and Cartoons [24, 25, 65] in our case). In contrast to 2D-StyleGAN-based fine-tuning methods that are conceptually simpler [29, 45], fine-tuning a 3D-GAN on 2D data introduces challenges in addition to domain differences, especially on maintaining the texture quality while preserving the geometry. Moreover, for these datasets, there is no explicit shape and camera information. We define the domain adaptation task as follows: Given a prior 3D-GAN i.e. EG3D $(\mathrm{G_s})$ of source domain $(T_{\mathrm{s}})$ , we aim to produce a 3D Avatar GAN $(\mathrm{G_t})$ of the target domain $(T_{\mathrm{t}})$ while maintaining the semantic, style, and geometric properties of $\mathrm{G_s}$ , and at the same time preserving the identity of the subject between the domains $(T_{\mathrm{s}} \leftrightarrow T_{\mathrm{t}})$ . Refer to Fig. 4 in supplementary for the pipeline figure. We represent $\mathrm{G}_{2\mathrm{D}}$ as a teacher 2D-GAN used for knowledge distillation fine-tuned on the above datasets. Note that as $T_{\mathrm{t}}$ is not assumed to contain camera parameter annotations, the training scheme must suppress artifacts such as low-quality texture under different views and flat geometry (See Fig. 2). In the following, we discuss the details of our method. + +# 3.1. How to align the cameras? + +Selecting appropriate ranges for camera parameters is of paramount importance for high-fidelity geometry and texture detail. Typically, such parameters are empirically estimated, directly computed from the dataset using an off-the-shelf pose detector [10], or learned during training [8]. In domains we aim to bridge, such as caricatures for which a 3D model may not even exist, directly estimating the camera distribution is problematic and, hence, is not assumed by our method. Instead, we find it essential to ensure that the camera parameter distribution is consistent across the source and target domains. For the target domain, we use StyleGAN2 trained on FFHQ, fine-tuned on artistic datasets [25, 65]. Assuming that the intrinsic parameters of all the cameras are the same, we aim to match the distribution of extrinsic camera parameters of $G_{\mathrm{s}}$ and $G_{2\mathrm{D}}$ and train our final $G_{\mathrm{t}}$ using it (see illustration in Fig. 2 of the supplementary materials). To this end, we define an optimization-based method to match the sought distributions. The first step is to identify a canonical pose image in $G_{2\mathrm{D}}$ , where the yaw, pitch, and roll parameters are zero. According to Karras et al., [31], the image corresponding to the mean latent code satisfies this property. Let $\theta$ , $\phi$ be the camera Euler angles in a spherical coordinate system, $r$ , $c$ be the radius of the sphere and camera lookat point, and $M$ be a function that converts these parameters into the camera-to-world matrix. Let $I_{\mathrm{s}}(w,\theta ,\phi ,c,r) = G_{\mathrm{s}}(w,M(\theta ,\phi ,c,r))$ and $I_{2\mathrm{D}}(w) = G_{2\mathrm{D}}(w)$ represent an arbitrary image generated by $G_{\mathrm{s}}$ and $G_{2\mathrm{D}}$ , respectively, given the $w$ code variable. Let $k_{\mathrm{d}}$ be the face key-points detected by the detector $K_{\mathrm{d}}$ [72], then + +$$ +\left(c ^ {\prime}, r ^ {\prime}\right) := \underset {(c, r)} {\arg \min } \mathrm {L} _ {\mathrm {k d}} \left(I _ {\mathrm {s}} \left(w _ {\text {a v g}} ^ {\prime}, 0, 0, c, r\right), I _ {2 \mathrm {D}} \left(w _ {\text {a v g}}\right)\right), \tag {1} +$$ + +where $\mathrm{L_{kd}}(I_1,I_2) = \| k_{\mathrm{d}}(I_1) - k_{\mathrm{d}}(I_2)\| _1$ and $w_{\mathrm{avg}}$ and $w_{\mathrm{avg}}^{\prime}$ are the mean $w$ latent codes of $\mathrm{G}_{2\mathrm{D}}$ and $\mathrm{G_s}$ , respectively. In our results, $r^\prime$ is determined to be 2.7 and $c^{\prime}$ is approximately [0.0, 0.05, 0.17]. The next step is to determine a safe range of the $\theta$ and $\phi$ parameters. Following prior works, StyleFlow [3] and FreeStyleGAN [35] (see Fig.5 of the paper), we set these parameters as $\theta^{\prime}\in [-0.45,0.45]$ and $\phi^{\prime}\in [-0.35,0.35]$ in radians. + +# 3.2. What loss functions and regularizers to use? + +Next, although the camera systems are aligned, the given dataset may not stem from a consistent 3D model, e.g., in the case of caricatures or cartoons. This entics the generator $G_{t}$ to converge to an easier degenerate solution with a flat geometry. Hence, to benefit from the geometric prior of $G_{s}$ , another important step is to design the loss functions and regularizers for a selected set of parameters to update in $G_{t}$ . Next, we discuss these design choices: + +![](images/be9123cd168c87d431112c83004391d76550d1a56bad9d921aced8ac8a8ee51f.jpg) +Figure 3. Domain adaptation. Domain adaptation results of images from source domain $T_{\mathrm{s}}$ (top row in each sub-figure) to target domain $T_{\mathrm{t}}$ . Rows two to five show corresponding 3D avatar results from different viewpoints. + +![](images/d715cafb2b6af2c7094e88f5dff4bbf7e8fb8e731426e3ca0f77d84460d8c3a4.jpg) + +![](images/6fe6fc94d25497f665769f6248b26f4df3aedbb4db51ea523686a25d8e882e8e.jpg) + +Loss Functions. To ensure texture quality and diversity, we resort to the adversarial loss used to fine-tune GANs as our main loss function. We use the standard non-saturating loss to train the generator and discriminator networks used in EG3D [11]. We also perform lazy density regularization to ensure consistency of the density values in the final finetuned model $\mathrm{G}_{\mathrm{t}}$ . + +Texture Regularization. Since the texture can be entangled with the geometry information, determining which layers to update is important. To make use of the fine-style information encoded in later layers, it is essential to update the $tRGB$ layer parameters (outputting tri-plane features) before the neural rendering stage. $tRGB$ are convolutional layers that transform feature maps to 3 channels at each resolution (96 channels in triplanes). Moreover, since the network has to adapt to a color distribution of $T_{t}$ , it is essential to update the decoder (MLP layers) of the neural rendering pipeline as well. Given the EG3D architecture, we also update the super-resolution layer parameters to ensure the coherency between the low-resolution and high-resolution outputs seen by the discriminator D. + +Geometry Regularization. In order to allow the network to learn the structure distribution of $T_{\mathrm{t}}$ and at the same time ensure properties of $\mathcal{W}$ and $S$ latent spaces are preserved, we update the earlier layers with regularization. This also encourages the latent spaces of $T_{\mathrm{s}}$ and $T_{\mathrm{t}}$ to be easily linked. Essentially, we update the deviation parameter $\Delta s$ from the + +$s$ activations of the $S$ space [62]. The $s$ activations are predicted by $\mathrm{A}(w)$ , where $\mathrm{A}$ is the learned affine function in EG3D. The $s$ activations scale the kernels of a particular layer. In order to preserve the identity as well as geometry such that the optimization of $\Delta s$ does not deviate too far away from the original domain $T_{\mathrm{s}}$ , we introduce a regularizer given by + +$$ +R (\Delta s) := \| \Delta s \| _ {1}. \tag {2} +$$ + +Note that we apply $\mathrm{R}(\Delta s)$ regularization in a lazy manner, i.e., with density regularization. Interestingly, after training, we can interpolate between $s$ and $s + \Delta s$ parameters to interpolate between the geometries of samples in $T_{\mathrm{s}}$ and $T_{\mathrm{t}}$ (See Fig. 5). + +Depth Regularization. Next, we observe that even though the above design choice produces better geometry for $T_{\mathrm{t}}$ , some samples from $G_{\mathrm{t}}$ can still lead to flatter geometry, and it is hard to detect these cases. We found that the problem is related to the relative depth of the background to the foreground. To circumvent this problem, we use an additional regularization where we encourage the average background depth of $G_{\mathrm{t}}$ to be similar to $G_{\mathrm{s}}$ . Let $S_{\mathrm{b}}$ be a face background segmentation network [34]. We first compute the average background depth of the samples given by $G_{\mathrm{s}}$ . This average depth is given by + +$$ +a _ {\mathrm {d}} := \frac {1}{M} \sum_ {n = 1} ^ {M} \left(\frac {1}{N _ {n}} \| D _ {n} \odot \mathrm {S} _ {\mathrm {b}} (I _ {n}) \| _ {F} ^ {2}\right). \tag {3} +$$ + +![](images/8dff4311d2c0df9713a53db9cc71a323be40d2dafbf0424e7022191e653e146c.jpg) +Figure 4. 3D avatars from real images. Projection of real images on the 3D avatar generators. + +Here, $D_{n}$ is the depth map of the image $I_{n}$ sampled from $G_{\mathrm{s}}$ , $\odot$ represents the Hadamard product, $M$ is the number of the sampled images, and $N_{n}$ is the number of background pixels in $I_{n}$ . Finally, regularization is defined as: + +$$ +\mathrm {R} (D) := \left\| a _ {\mathrm {d}} \cdot J - \left(D _ {\mathrm {t}} \odot \mathrm {S} _ {\mathrm {b}} \left(I _ {\mathrm {t}}\right)\right) \right\| _ {F}, \tag {4} +$$ + +where $D_{\mathrm{t}}$ is the depth map of the image $I_{\mathrm{t}}$ sampled from $\mathbf{G}_{\mathrm{t}}$ and $J$ is the matrix of ones having the same spatial dimensions as $D_{\mathrm{t}}$ . + +# 3.3. What discriminator to use? + +Given that the data in $T_{\mathrm{s}}$ and $T_{\mathrm{t}}$ is not paired and $T_{\mathrm{t}}$ is not assumed to contain camera parameter annotations, the choice of the discriminator (D) used for this task is also a critical design choice. Essentially, we use the unconditional version of the dual discriminator proposed in EG3D, and hence, we do not condition the discriminator on the camera information. As a result, during the training, $G_{\mathrm{t}}$ generates arbitrary images with pose using $\mathrm{M}(\theta', \phi', c', r')$ , and the discriminator discriminates these images using arbitrary images from $T_{\mathrm{t}}$ . We train the discriminator from scratch and in order to adapt $T_{\mathrm{s}} \rightarrow T_{\mathrm{t}}$ , we use the StyleGAN-ADA [28] training scheme and use R1 regularization. + +# 3.4. How to incorporate larger geometric deformations between domains? + +While the regularizers are used to limit the geometric changes when adapting from $T_{\mathrm{s}}$ to $T_{\mathrm{t}}$ , modeling large ge + +ometric deformations, e.g., in the caricature dataset is another challenge. One choice to edit the geometry is to use the properties of tri-plane features learned by EG3D. We start out by analyzing these three planes in $\mathrm{G_s}$ . We observe that the frontal plane encodes most of the information required to render the final image. To quantify this, we sample images and depth maps from $\mathrm{G_s}$ and swap the front and the other planes from two random images. Then we compare the difference in RGB values of the images and the Chamfer distance of the depth maps. While swapping the frontal tri-planes, the final images are completely swapped, and the Chamfer distance changes by $80\sim 90\%$ matching the swapped image depth map. In the case of the other two planes, the RGB image is not much affected and the Chamfer distance of the depth maps is reduced by only $20\sim 30\%$ in most cases. + +Given the analysis, we focus to manipulate the $2D$ front plane features to learn additional deformation or exaggerations. We learn a TPS (Thin Plate Spline) [61] network on top of the front plane. Our TPS network is conditioned both on the front plane features as well as the $\mathcal{W}$ space to enable multiple transformations. The architecture of the module is similar to the standard StyleGAN2 layer with an MLP appended at the end to predict the control points that transform the features. Hence, as a byproduct, we also enable 3D-geometry editing guided by the learned latent space. We train this module separately after $G_{\mathrm{t}}$ has been trained. We find that joint training is unstable due to exploding gradients arising from the large domain gap between $T_{\mathrm{s}}$ and $T_{\mathrm{t}}$ in the initial stages. Formally, we define this transformation as: + +$$ +\mathrm {T} (w, f) := \Delta c, \tag {5} +$$ + +where, $w$ is the latent code, $f$ is the front plane, and $c$ are the control points. + +Let $c_{\mathrm{I}}$ be the initial control points producing an identity transformation, $(c_{1}, c_{2})$ be the control points corresponding to front planes $(f_{1}, f_{2})$ sampled using $\mathcal{W}$ codes $(w_{1}, w_{2})$ , respectively, and $(c_{1}', c_{2}')$ be points with $(w_{1}, w_{2})$ swapped in the TPS module. To regularize and encourage the module to learn different deformations, we have + +$$ +\mathrm {R} \left(\mathrm {T} _ {1}\right) := \alpha \sum_ {n = 1} ^ {2} \| c _ {I} - c _ {n} \| _ {1} - \beta \| c _ {1} - c _ {2} \| _ {1} - \sigma \| c _ {1} ^ {\prime} - c _ {2} ^ {\prime} \| _ {1}. \tag {6} +$$ + +We use initial control point regularization to regularize large deviations in the control points which would otherwise explode. Additionally, to learn extreme exaggerations in $T_{\mathrm{t}}$ and 'in expectation', conform to the target distribution in the dataset, we add an additional loss term. Let $S(I)$ be the soft-argmax output of the face segmentation network [34] given an image $I$ and assuming that $S$ generalizes to caricatures, then + +$$ +\mathrm {R} \left(\mathrm {T} _ {2}\right) := \| \mathrm {S} \left(\mathrm {G} _ {\mathrm {t}} (w)\right), \mathrm {S} \left(I _ {\mathrm {t}}\right) \| _ {1} \tag {7} +$$ + +![](images/b54b53f3e6f87b4fc83c381fce3ac23524a5f98093017d653f4f9af65eb27f18.jpg) +Figure 5. Interpolation of $\Delta s$ . Geometric deformation using the interpolation of learned $\Delta s$ parameters. + +Eq. 6, Eq. 7, and adversarial training loss are used to train the $TPS$ module. We adopt gradient clipping to make sure that the training does not diverge. See the illustrations in Fig. 3 and Fig. 4 of the supplementary materials. + +# 4. Personalized Avatar Generation and Editing + +Although 3D domain adaptation adapts $T_{\mathrm{s}} \leftrightarrow T_{\mathrm{t}}$ , it is still a challenge to effectively link the latent spaces of $\mathrm{G_s}$ and $\mathrm{G_t}$ to generate personalized 3D avatars using a single photograph as the reference image. Particularly, the challenge arises due to the discrepancy in the coupled latent spaces when dealing with the projection of real photographs on 3D generators. Moreover, one would like to edit and animate these 3D avatars. + +Projection. The task is to project a real image into the latent space of $\mathrm{G_s}$ , transfer the latent to $\mathrm{G_t}$ , and further optimize it to construct a 3D avatar. First, we use an optimization-based method to find the $w$ code that minimizes the similarity between the generated and the real image in $\mathrm{G_s}$ . To achieve this, the first step is to align the cameras. We follow the steps mentioned in Sec. 3.1 for this step. Next, we use pixel-wise MSE loss and LPIPS loss to project the image into $\mathrm{G_s}$ [1]. Additionally, to preserve the identity of the subject, we use attribute classifiers e.g. caricature dataset [24] provides the coupled attribute information of real images and caricatures. We use such attribute classifier [24,25] in a post-hoc manner as we notice that such networks can affect the texture in the target domain and could degenerate to narrow style outputs if applied during training. Moreover, such networks may not be available for all target domains. To avoid overfitting into $\mathrm{G_s}$ and encourage the easier transfer of the optimized latent code to $\mathrm{G_t}$ , we use $\mathcal{W}$ space optimization for this step. Finally, we initialize this $w$ code for $\mathrm{G_t}$ and use additional attribute classifier loss [25] for $T_{\mathrm{t}}$ domain along with Depth regularization $\mathrm{R}(D)$ (Eq. 4). As an approximation, we assume that attribute classifier [24, 25] generalizes across all domains. We use $\mathcal{W} / \mathcal{W} +$ space optimization to control the quality and diversity of the outputs. See Algorithm 1 in supplementary for the description. + +Editing and Animation. Since our 3D domain adaptation is designed to preserve the properties of $\mathcal{W}$ and $S$ spaces, we can perform semantic edits via InterFaceGAN [51], + +GANSpace [22], StyleSpace [62] etc., and geometric edits using TPS (Sec. 3.4) and $\Delta s$ interpolation (Sec. 3.2). To perform video editing, we design an encoder for EG3D based on $e4e$ [58] to encode videos and transfer the edits from $\mathrm{G_s}$ to $\mathrm{G_t}$ based on the $w$ codes [4,6,59]. We leave a more fine-grained approach for video processing as future work. + +# 5. Results + +# 5.1. Quantitative Results + +In this section, we consider three important evaluations to verify the quality of the texture, geometry, and identity preservation in the new domain using the Caricature, Cartoons, and Pixar toons datasets. We evaluate the ablation of our design choices in the supplementary materials. In the evaluation, let $\mathrm{G}_{\mathrm{base}}$ be the baseline naive fine-tuning method which is trained with all the parameters using the losses in EG3D fine-tuned from FFHQ trained prior $\mathrm{G}_{\mathrm{s}}$ . Note here we still align the cameras in $\mathrm{G}_{\mathrm{base}}$ using the method defined in Sec. 3.1 and use adaptive discriminator [28] with R1 regularization for a fair comparison. + +Texture Quality. To verify the quality of the texture, diversity of samples as well as to some extent, the geometry in the target domain $T_{\mathrm{t}}$ , we compare the FID [23] scores using $\mathrm{G}_{\mathrm{base}}$ and $\mathrm{G}_{\mathrm{t}}$ in Table 1. Note that in the case of Caricatures, we report two scores i.e. with and without using the attribute classifier loss in the training as discussed in Sec. 4. Notice that our method outperforms the naive baseline method by a huge margin in some cases, especially in Caricatures and Cartoons. We attribute these differences to the mode collapse prone training of $\mathrm{G}_{\mathrm{base}}$ which is correlated with flat geometry degenerate solution. We show visual results of the flat geometries learned by $\mathrm{G}_{\mathrm{base}}$ and comparison in Fig. 2. + +Geometric Quality. To quantify the flat geometries, in Table 2, we show three scores that help us understand such degenerate solutions. Here we consider coupled depth maps generated from sampling in the domains $T_{\mathrm{s}}$ ( $\mathrm{G_s}$ ) and $T_{\mathrm{t}}$ ( $\mathrm{G_t}$ and $\mathrm{G}_{\mathrm{base}}$ ). First, we compute the expectation of the absolute mean differences ( $M_{\mathrm{d}}$ ) of the corresponding foreground depth maps sampled from $T_{\mathrm{s}}$ and $T_{\mathrm{t}}$ . We also compute the expectation of the absolute standard deviation differences ( $S_{\mathrm{d}}$ ) for the same setting. Here, we assume that the flatter geometries have a large difference in the depth maps as compared to the prior as indicated by $M_{\mathrm{d}}$ . Moreover, $S_{\mathrm{d}}$ computes the distance in the distribution of the depth values, where a larger difference indicates a narrow distribution, and hence a flatter geometry. We also notice that the flat geometry is correlated with the generator learning diverse poses when images are rendered under standard canonical camera parameters i.e. $\mathrm{M}(0,0,c,r)$ . We hypothesize in the case of the flatter geometries, the model learns to pose in + +![](images/81649d86574bde316f083bf9578b5c3b43f0db2eb221bbdfe805561260412bd4.jpg) +Figure 6. Deformations using TPS. Geometric edits using our proposed TPS (Thin Plate Spline) module learned on the frontal tri-plane features. Each sub-figure shows a 3D avatar and three examples of TPS deformations sampled from the learned 3D deformation space. + +Table 1. FID Computation. FID (Frechet Inception Distance) between the 2D dataset and the samples generated by the fine-tuned 3D GAN using baseline $(\mathrm{G_{base}})$ and Ours $(\mathrm{G_t})$ . $^{\prime \prime}*$ represents the score with the inclusion of the attribute classifier loss discussed in Sec. 3.2. + +
MethodCaricaturesCartoonsPixar Toons
Gbase67.879.015.1
Gt(Ours)19.4/20.2*12.812.4
+ +Table 2. Geometry Evaluation. Comparing the geometry using baseline method $(\mathrm{G}_{\mathrm{base}})$ and Ours $(\mathrm{G}_{\mathrm{t}})$ . For the definition of $M_{\mathrm{d}}$ , $S_{\mathrm{d}}$ and $\mathrm{R}(\mathrm{T}_2)$ , refer to Sec. 5.1. + +
MetricMethodCaricaturesCartoonsPixar
Md ↓Gbase0.470.210.29
Gt (Ours)0.210.130.13
Sd ↓Gbase0.220.140.15
Gt (Ours)0.150.100.09
R(T2) ↓Gbase2.993.394.01
Gt (Ours)2.271.621.56
+ +Table 3. Identity Preservation. Identity preservation using baseline $(\mathrm{G}_{\mathrm{base}})$ and Ours $(\mathrm{G}_{\mathrm{t}})$ . + +
MethodCaricaturesCartoonsPixel Toons
Gbase1.280.920.85
Gt(Ours)0.870.810.73
+ +formation in the earlier layers instead of being camera view-dependent. To quantify this, since pose information may not be available for some domains (e.g. cartoons), we compute the $\mathrm{R}(\mathrm{T}_2)$ scores between corresponding images in the domain $T_{\mathrm{s}}$ ( $\mathrm{G}_{\mathrm{s}}$ ) and $T_{\mathrm{t}}$ ( $\mathrm{G}_{\mathrm{t}}$ and $\mathrm{G}_{\mathrm{base}}$ ). Note that these scores are computed without the TPS module. Our scores are lower in all three metrics, hence, validating that our method avoids the degenerate solution and preserves the geometric distribution of the prior. For discussion on the TPS module and ablations refer to the supplementary materials. + +Identity Preservation. Identity preservation score is another important evaluation to check the quality of latent space linking between $\mathrm{G_s}$ and $\mathrm{G_t}$ . In Table 3, we compute the attribute loss (BCE loss) between the domains $T_{\mathrm{s}}$ and $T_{\mathrm{t}}$ using the attribute classifiers [24, 25]. Note that our method + +![](images/ffcb362871dc7ba10eb393f7d821dc10ff50e964ee7a1b5456194ee10c11b4a4.jpg) +Figure 7. Local edits. Local edits performed on the 3D avatars using the $S$ space. + +is able to preserve the identity better across the domains. + +# 5.2. Qualitative Results + +For qualitative results, we show the results of the domain adaptation, as well as the personalized edits (geometric and semantic), performed on the resultant 3D avatars. First, in order to show the quality of domain adaptation, identity preservation, and geometric consistency, in Fig. 3, we show results from $\mathrm{G}_{\mathrm{s}}$ and corresponding results from 3D avatar generator $\mathrm{G}_{\mathrm{t}}$ trained on Caricatures, Pixar toons, Cartoons, and Comic domains. Next, in order to show that the method generalizes to real images, we use the method described in + +![](images/b1f8079284bc58efc40c07e2aaf6929d07f1e41440083f4cf8015ce8654f1cec.jpg) +Figure 8. 3D avatar animation. Animation of 3D avatars generated using a driving video encoded in source domain $T_{s}$ and applied to samples in target domain $T_{t}$ . The top row shows the driving video and the subsequent rows show generated animations using a random Caricature or Pixar toon. The head pose is changed in each frame of the generated animation to show 3D consistency. + +Sec. 4 to project and transfer the latent code from $\mathrm{G_s}$ to $\mathrm{G_t}$ to produce the 3D avatars. In Fig. 4, we show our results of real to 3D avatar transfer. Notice the quality both in terms of texture as well as geometry for both these results achieved by our method. Next, we show geometric and semantic edits possible to produce personalized 3D avatars: + +Geometry Edits. We show two types of geometric edits i.e. $\Delta s$ interpolation (Sec. 3.2) and deformation using $TPS$ (Sec. 3.4). First, in Fig. 5, we show the geometry interpolation by interpolating between original $s$ activations of $\mathrm{G_s}$ and learned $\Delta s$ parameters. In Fig. 6, we show some additional exaggerations in caricatures using the learned 3D deformation latent space of $TPS$ module. + +Semantic Edits and Animation. Since in our method, we encourage the latent regularization to preserve the properties of the latent space learned by the $\mathrm{G}_{\mathrm{s}}$ generator, in Fig. 7 we show $S$ space edits performed on the 3D avatars. Notice the quality of edits in terms of locality and adaptability. Additionally, we can edit semantics like hair as opposed to 3D morphable model based methods. In Fig. 8, thanks to the latent space semantics preservation ensured by our method, we can perform some video edits to create a coherent animation based on the difference of $w$ codes of video encoded in $\mathrm{G}_{\mathrm{s}}$ (Sec. 4) and applied to layers $7 - 10$ in $\mathrm{G}_{\mathrm{t}}$ . Notice the quality of expressions, identity preservation, and 3D consistency across each identity in each row. + +# 6. Conclusion + +We tackled two open research problems in this paper. In the first part, we proposed the first domain adaptation method for 3D-GANs to the best of our knowledge. This part yields two linked EG3D generators, one in the photorealistic source domain of faces, and another EG3D generator in an artistic target domain. As possible target domains, we show results for cartoons, caricatures, and Comics. In the second part, we built on domain adaptation to create 3D avatars in an artistic domain that can be edited and animated. Our framework consists of multiple technical components introduced in this paper. First, we propose a technique for camera space estimation for artistic domains. Second, we introduce a set of regularizers and loss functions that can regularize the fine-tuning of EG3D in such a way that enough of the 3D structure and geometry of the original model is kept, while the distinguishing attributes of the artistic domain, such as textures and colors and local geometric deformations can still be learned. Third, we introduce a geometric deformation module that can reintroduce larger geometric deformations in a controlled manner. These larger geometric deformations can interact and cooperate with EG3D so that semantic edits are still possible. Finally, we propose an embedding algorithm that is especially suitable for two linked EG3D generator networks. + +# References + +[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4432-4441, Seoul, Korea, 2019. IEEE. 2, 6 +[2] Rameen Abdal, Peihao Zhu, John Femiani, Niloy Mitra, and Peter Wonka. Clip2stylegan: Unsupervised extraction of stylegan edit directions. In ACM SIGGRAPH 2022 Conference Proceedings, SIGGRAPH '22, New York, NY, USA, 2022. Association for Computing Machinery. 2 +[3] Rameen Abdal, Peihao Zhu, Niloy J. Mitra, and Peter Wonka. Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. ACM Trans. Graph., 40(3), may 2021. 2, 3 +[4] Rameen Abdal, Peihao Zhu, Niloy J. Mitra, and Peter Wonka. Video2stylegan: Disentangling local and global variations in a video, 2022. 6 +[5] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. Restyle: A residual-based stylegan encoder via iterative refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2021. 2 +[6] Yuval Alaluf, Or Patashnik, Zongze Wu, Asif Zamir, Eli Shechtman, Dani Lischinski, and Daniel Cohen-Or. Third time's the charm? image and video editing with stylegan3. CoRR, abs/2201.13433, 2022. 6 +[7] Yuval Alaluf, Omer Tov, Ron Mokady, Rimon Gal, and Amit H. Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. CoRR, abs/2111.15666, 2021. 2 +[8] Anonymous. 3d generation on imagenet. In Open Review, 2023. 3 +[9] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2 +[10] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In arXiv, 2021. 1, 2, 3 +[11] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks, 2021. 2, 4 +[12] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5799-5809, 2021. 2 +[13] Yen-Chi Cheng, Chieh Hubert Lin, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, and Ming-Hsuan Yang. Inout: Diverse image outpainting via gan inversion. In IEEE Conference on Computer Vision and Pattern Recognition, 2022. 2 + +[14] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020. 2 +[15] Min Jin Chong and David A. Forsyth. Jojogan: One shot face stylization. CoRR, abs/2112.11641, 2021. 2 +[16] Min Jin Chong, Hsin-Ying Lee, and David Forsyth. Stylegan of all trades: Image manipulation with only pretrained stylegan. arXiv preprint arXiv:2111.01619, 2021. 2 +[17] Rinon Gal, Or Patashnik, Haggai Maron, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. arXiv preprint arXiv:2108.00946, 2021. 2 +[18] Rinon Gal, Or Patashnik, Haggai Maron, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators, 2021. 2 +[19] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks, 2014. 2 +[20] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylererf: A style-based 3d aware generator for high-resolution image synthesis. In International Conference on Learning Representations, 2022. 2 +[21] Fangzhou Han, Shuquan Ye, Mingming He, Menglei Chai, and Jing Liao. Exemplar-based 3d portrait stylization. IEEE Transactions on Visualization and Computer Graphics, 2021. 2 +[22] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. arXiv preprint arXiv:2004.02546, 2020. 2, 6 +[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 6 +[24] Jing Huo, Wenbin Li, Yinghuan Shi, Yang Gao, and Hujun Yin. Webcaricature: a benchmark for caricature recognition. In British Machine Vision Conference, 2018. 3, 6, 7 +[25] Wonjong Jang, Gwangjin Ju, Yucheol Jung, Jiaolong Yang, Xin Tong, and Seungyong Lee. Stylecarigan: Caricature generation via stylegan feature map modulation. 40(4), 2021. 2, 3, 6, 7 +[26] Yucheol Jung, Wonjong Jang, Soongjin Kim, Jiaolong Yang, Xin Tong, and Seungyong Lee. Deep deformable 3d caricatures with learned shape control. In Special Interest Group on Computer Graphics and Interactive Techniques Conference Proceedings. ACM, aug 2022. 2 +[27] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation, 2017. 2 +[28] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. In Proc. NeurIPS, 2020. 1, 2, 5, 6 +[29] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. arXiv preprint arXiv:2006.06676, 2020.3 + +[30] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks, 2021. 1, 2 +[31] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4401-4410, 2019. 1, 3 +[32] Tero Karras, Samuli Laine, and Timo Aila. A Style-Based generator architecture for generative adversarial networks. IEEE transactions on pattern analysis and machine intelligence, 43(12):4217-4228, Dec. 2021. 2 +[33] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In Proc. CVPR, 2020. 2 +[34] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 4, 5 +[35] Thomas Leimkuhler and George Drettakis. Freestylegan: Free-view editable portrait rendering with the camera manifold. 40(6), 2021. 3 +[36] Chieh Hubert Lin, Hsin-Ying Lee, Yen-Chi Cheng, Sergey Tulyakov, and Ming-Hsuan Yang. Infinitygan: Towards infinite-pixel image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2 +[37] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 2 +[38] Sangwoo Mo, Minsu Cho, and Jinwoo Shin. Freeze the discriminator: a simple baseline for fine-tuning gans, 2020. 2 +[39] Michael Niemeyer and Andreas Geiger. Campari: Camera-aware decomposed generative neural radiance fields. In 2021 International Conference on 3D Vision (3DV), pages 951-961. IEEE, 2021. 2 +[40] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2 +[41] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. StyleSDF: High-Resolution 3D-Consistent Image and Geometry Generation. arXiv preprint arXiv:2112.11427, 2021. 1, 2 +[42] Xingang Pan, Bo Dai, Ziwei Liu, Chen Change Loy, and Ping Luo. Do 2d gans know 3d shape? unsupervised 3d shape reconstruction from 2d image gans. arXiv preprint arXiv:2011.00844, 2020. 2 +[43] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Deformable neural radiance fields. arXiv preprint arXiv:2011.12948, 2020. 2 +[44] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery, 2021. 2 + +[45] Justin N. M. Pinkney and Doron Adler. Resolution dependent gan interpolation for controllable image synthesis between domains, 2020. 2, 3 +[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. 2 +[47] Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks, 2015. 2 +[48] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: a stylegan encoder for image-to-image translation. arXiv preprint arXiv:2008.00951, 2020. 2 +[49] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. arXiv preprint arXiv:2106.05744, 2021. 2 +[50] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2 +[51] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. Interfacegan: Interpreting the disentangled face representation learned by gans. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 2, 6 +[52] Yichun Shi, Divyansh Aggarwal, and Anil K Jain. Lifting 2d stylegan for 3d-aware face generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6258-6266, 2021. 2 +[53] Ivan Skorokhodov, Aliaksandr Siarohin, Yinghao Xu, Jian Ren, Hsin-Ying Lee, Peter Wonka, and Sergey Tulyakov. 3d generation on imagenet. In International Conference on Learning Representations (ICLR), 2023. 2 +[54] Guoxian Song, Linjie Luo, Jing Liu, Wan-Chun Ma, Chunpong Lai, Chuanxia Zheng, and Tat-Jen Cham. Agilegan: Stylizing portraits by inversion-consistent transfer learning. ACM Trans. Graph., 40(4), jul 2021. 2 +[55] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis, 2022. 1, 2 +[56] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6142-6151, 2020. 2 +[57] Ayush Tewari, Mohamed Elgharib, Mallikarjun BR, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zöllhofer, and Christian Theobalt. Pie: Portrait image embedding for semantic control. volume 39, December 2020. 2 +[58] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. arXiv preprint arXiv:2102.02766, 2021. 2, 6 + +[59] Rotem Tzaban, Ron Mokady, Rinon Gal, Amit H. Bermano, and Daniel Cohen-Or. *Stitch it in time: Gan-based facial editing of real videos. CoRR*, abs/2201.08361, 2022. 6 +[60] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Cross-domain and disentangled face manipulation with 3d guidance. IEEE Transactions on Visualization and Computer Graphics, 2022. 2 +[61] WarBean. tps-stn-pytorch. https://github.com/WarBean/tps_stn_pytorch.5 +[62] Zongze Wu, Dani Lischinski, and Eli Shechtman. Stylespace analysis: Disentangled controls for stylegan image generation. arXiv preprint arXiv:2011.12799, 2020. 2, 4, 6 +[63] Yinghao Xu, Menglei Chai, Zifan Shi, Sida Peng, Ivan Skorokhodov, Aliaksandr Siarohin, Ceyuan Yang, Yujun Shen, Hsin-Ying Lee, Bolei Zhou, et al. Discoscene: Spatially disentangled generative radiance fields for controllable 3d-aware scene synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, 2023. 2 +[64] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. arXiv preprint arXiv:2112.10759, 2021. 1, 2 +[65] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Pastiche master: Exemplar-based high-resolution portrait style transfer. In CVPR, 2022. 2, 3 +[66] Zipeng Ye, Mengfei Xia, Yanan Sun, Ran Yi, Minjing Yu, Juyong Zhang, Yu-Kun Lai, and Yong-Jin Liu. 3d-CariGAN: An end-to-end solution to 3d caricature generation from normal face photos. IEEE Transactions on Visualization and Computer Graphics, pages 1-1, 2021. 2 +[67] Fisher Yu, Yinda Zhang, Shuran Song, Ari Seff, and Jianxiong Xiao. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365, 2015. 2 +[68] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2 +[69] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European Conference on Computer Vision, pages 592-608. Springer, 2020. 2 +[70] Peihao Zhu, Rameen Abdal, John Femiani, and Peter Wonka. Mind the gap: Domain gap control for single shot domain adaptation for generative adversarial networks. In International Conference on Learning Representations, 2022. 2 +[71] Peihao Zhu, Rameen Abdal, Yipeng Qin, John Femiani, and Peter Wonka. Improved stylegan embedding: Where are the good latents?, 2020. 2 +[72] zllrunning. face-parsing.pytorch. https://github.com/zllrunning/face-parsing.PyTorch.2,3 \ No newline at end of file diff --git a/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/images.zip b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..6e75fef4d47a717a4f03383af194b7c6ac57e26f --- /dev/null +++ b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a471cb3de891d753cdd33489e02261f424d078bb3c8d00a9d2638d7ba76fb556 +size 862848 diff --git a/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/layout.json b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b6f2cd1d5601598ca06b5d195bf7a452089cc689 --- /dev/null +++ b/2023/3DAvatarGAN_ Bridging Domains for Personalized Editable Avatars/layout.json @@ -0,0 +1,10660 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 85, + 103, + 509, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 103, + 509, + 121 + ], + "spans": [ + { + "bbox": [ + 85, + 103, + 509, + 121 + ], + "type": "text", + "content": "3DAvatarGAN: Bridging Domains for Personalized Editable Avatars" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "spans": [ + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "content": "Rameen Abdal" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "inline_equation", + "content": "^{\\dagger 1}" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "content": " Hsin-Ying Lee" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "content": " Peihao Zhu" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "inline_equation", + "content": "^{\\dagger 1}" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "content": " Minglei Chai" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "content": " Aliaksandr Siarohin" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "content": " \nPeter Wonka" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "content": " Sergey Tulyakov" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "content": "KAUST " + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 70, + 141, + 523, + 213 + ], + "type": "text", + "content": "Snap Inc." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 50, + 226, + 541, + 441 + ], + "blocks": [ + { + "bbox": [ + 50, + 226, + 541, + 441 + ], + "lines": [ + { + "bbox": [ + 50, + 226, + 541, + 441 + ], + "spans": [ + { + "bbox": [ + 50, + 226, + 541, + 441 + ], + "type": "image", + "image_path": "df16d596e57fb3925b8f10971a3cd7e1f29201be7f32e2232fd0034c6fca15b4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 449, + 546, + 483 + ], + "lines": [ + { + "bbox": [ + 45, + 449, + 546, + 483 + ], + "spans": [ + { + "bbox": [ + 45, + 449, + 546, + 483 + ], + "type": "text", + "content": "Figure 1. Editable 3D avatars. We present 3DAvatarGAN, a 3D GAN able to produce and edit personalized 3D avatars from a single photograph (real or generated). Our method distills information from a 2D-GAN trained on 2D artistic datasets like Caricatures, Pixar toons, Cartoons, Comics etc. and requires no camera annotations." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 493, + 192, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 493, + 192, + 506 + ], + "spans": [ + { + "bbox": [ + 143, + 493, + 192, + 506 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 522, + 290, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 290, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 290, + 714 + ], + "type": "text", + "content": "Modern 3D-GANs synthesize geometry and texture by training on large-scale datasets with a consistent structure. Training such models on stylized, artistic data, with often unknown, highly variable geometry, and camera information has not yet been shown possible. Can we train a 3D GAN on such artistic data, while maintaining multi-view consistency and texture quality? To this end, we propose an adaptation framework, where the source domain is a pre-trained 3D-GAN, while the target domain is a 2D-GAN trained on artistic datasets. We, then, distill the knowledge from a 2D generator to the source 3D generator. To do that, we first propose an optimization-based method to align the distributions of camera parameters across domains. Second, we propose regularizations necessary to learn high-quality texture, while avoiding degenerate geometric solutions, such as flat shapes. Third, we show" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 494, + 548, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 494, + 548, + 592 + ], + "spans": [ + { + "bbox": [ + 304, + 494, + 548, + 592 + ], + "type": "text", + "content": "a deformation-based technique for modeling exaggerated geometry of artistic domains, enabling—as a byproduct—personalized geometric editing. Finally, we propose a novel inversion method for 3D-GANs linking the latent spaces of the source and the target domains. Our contributions—for the first time—allow for the generation, editing, and animation of personalized artistic 3D avatars on artistic datasets. Project Page: https://rameenabdal.github.io/3DAvatarGAN" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 601, + 387, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 601, + 387, + 613 + ], + "spans": [ + { + "bbox": [ + 306, + 601, + 387, + 613 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 620, + 547, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 620, + 547, + 694 + ], + "spans": [ + { + "bbox": [ + 304, + 620, + 547, + 694 + ], + "type": "text", + "content": "Photo-realistic portrait face generation is an iconic application demonstrating the capability of generative models especially GANs [28,30,31]. A recent development has witnessed an advancement from straightforwardly synthesizing 2D images to learning 3D structures without 3D supervision, referred to as 3D-GANs [10,41,55,64]. Such training" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 35 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 321, + 702, + 518, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 702, + 518, + 714 + ], + "spans": [ + { + "bbox": [ + 321, + 702, + 518, + 714 + ], + "type": "text", + "content": "† Part of the work was done during an internship at Snap Inc." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "text", + "content": "4552" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 289, + 240 + ], + "type": "text", + "content": "is feasible with the datasets containing objects with highly consistent geometry, enabling a 3D-GAN to learn a distribution of shapes and textures. In contrast, artistically stylized datasets [25, 65] have arbitrary exaggerations of both geometry and texture, for example, the nose, cheeks, and eyes can be arbitrarily drawn, depending on the style of the artist as well as on the features of the subject, see Fig. 1. Training a 3D-GAN on such data becomes problematic due to the challenge of learning such an arbitrary distribution of geometry and texture. In our experiments (Sec. 5.1), 3D-GANs [10] generate flat geometry and become 2D-GANs essentially. A natural question arises, whether a 3D-GAN can synthesize consistent novel views of images belonging to artistically stylized domains, such as the ones in Fig. 1." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 241, + 289, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 241, + 289, + 326 + ], + "spans": [ + { + "bbox": [ + 46, + 241, + 289, + 326 + ], + "type": "text", + "content": "In this work, we propose a domain-adaption framework that allows us to answer the question positively. Specifically, we fine-tune a pre-trained 3D-GAN using a 2D-GAN trained on a target domain. Despite being well explored for 2D-GANs [25, 65], existing domain adaptation techniques are not directly applicable to 3D-GANs, due to the nature of 3D data and characteristics of 3D generators." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 327, + 289, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 327, + 289, + 518 + ], + "spans": [ + { + "bbox": [ + 46, + 327, + 289, + 518 + ], + "type": "text", + "content": "The geometry and texture of stylized 2D datasets can be arbitrarily exaggerated depending on the context, artist, and production requirements. Due to this, no reliable way to estimate camera parameters for each image exists, whether using an off-the-shelf pose detector [72] or a manual labeling effort. To enable the training of 3D-GANs on such challenging datasets, we propose three contributions. ① An optimization-based method to align distributions of camera parameters between domains. ② Texture, depth, and geometry regularizations to avoid degenerate, flat solutions and ensure high visual quality. Furthermore, we redesign the discriminator training to make it compatible with our task. We then propose ③ a Thin Plate Spline (TPS) 3D deformation module operating on a tri-plane representation to allow for certain large and sometimes extreme geometric deformations, which are so typical in artistic domains." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 520, + 289, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 520, + 289, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 520, + 289, + 700 + ], + "type": "text", + "content": "The proposed adaptation framework enables the training of 3D-GANs on complex and challenging artistic data. The previous success of domain adaptation in 2D-GANs unleashed a number of exciting applications in the content creation area [25, 65]. Given a single image such methods first find a latent code corresponding to it using GAN inversion, followed by latent editing producing the desired effect in the image space. Compared to 2D-GANs, the latent space of 3D-GANs is more entangled, making it more challenging to link the latent spaces between domains, rendering the existing inversion and editing techniques not directly applicable. Hence, we take a step further and explore the use of our approach to 3D artistic avatar generation and editing. Our final contribution to enable such applications is (4) a new inversion method for coupled 3D-GANs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 59, + 701, + 288, + 714 + ], + "type": "text", + "content": "In summary, the proposed domain-adaption framework" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 304, + 72, + 547, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 547, + 157 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 547, + 157 + ], + "type": "text", + "content": "allows us to train 3D-GANs on challenging artistic datasets with exaggerated geometry and texture. We call our method 3DAvatarGAN as it—for the first time—offers generation, editing, and animation of personalized stylized, artistic avatars obtained from a single image. Our results (See Sec. 5.2) show the high-quality 3D avatars possible by our method compared to the naive fine-tuning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 167, + 392, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 167, + 392, + 179 + ], + "spans": [ + { + "bbox": [ + 306, + 167, + 392, + 179 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 186, + 547, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 186, + 547, + 319 + ], + "spans": [ + { + "bbox": [ + 304, + 186, + 547, + 319 + ], + "type": "text", + "content": "GANs and Semantic Image Editing. Generative adversarial Networks (GANs) [19, 47] are one popular type of generative model, especially for smaller high-quality datasets such as FFHQ [32], AFHQ [14], and LSUN objects [67]. For these datasets, StyleGAN [28, 30, 32] can be considered as the current state-of-the-art GAN [27, 28, 30, 32, 33]. The disentangled latent space learned by StyleGAN has been shown to exhibit semantic properties conducive to semantic image editing [1, 3, 16, 22, 36, 44, 51, 56, 62]. CLIP [46] based image editing [2, 17, 44] and domain transfer [15, 70] are another set of works enabled by StyleGAN." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 319, + 547, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 319, + 547, + 402 + ], + "spans": [ + { + "bbox": [ + 304, + 319, + 547, + 402 + ], + "type": "text", + "content": "GAN Inversion. Algorithms to project existing images into a GAN latent space are a prerequisite for GAN-based image editing. There are mainly two types of methods to enable such a projection: optimization-based methods [1,13,57,71] and encoder-based methods [5,7,48,58,69]. On top of both streams of methods, the generator weights can be further modified after obtaining initial inversion results [49]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 403, + 547, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 403, + 547, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 403, + 547, + 510 + ], + "type": "text", + "content": "Learning 3D-GANs with 2D Data. Previously, some approaches attempt to extract 3D structure from pre-trained 2D-GANs [42, 52]. Recently, inspired by Neural Radiance Field (NeRF) [9, 37, 43, 68], novel GAN architectures have been proposed to combine implicit or explicit 3D representations with neural rendering techniques [11, 12, 20, 39-41, 50, 53, 55, 63, 64]. In our work, we build on EG3D [11] which has current state-of-the-art results for human faces trained on the FFHQ dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 548, + 714 + ], + "type": "text", + "content": "Avatars and GANs. To generate new results in an artistic domain (e.g. anime or cartoons), a promising technique is to fine-tune an existing GAN pre-trained on photographs, e.g. [45, 54, 60]. Data augmentation and freezing lower layers of the discriminator are useful tools when fine-tuning a 2D-GAN [28, 38]. One branch of methods [18, 44, 70] investigates domain adaptation if only a few examples or only text descriptions are available. While others focus on matching the distribution of artistic datasets with diverse shapes and styles. Our work also falls in this domain. Among previous efforts, StyleCariGAN [25] proposes invertible modules in the generator to train and generate caricatures from real images. DualStyleGAN [65] learns two mapping networks in StyleGAN to control the style and structure of the new domain. Some works are trained on 3D data or require heavy labeling/engineering [21, 26, 66] and use 3D morphable models to map 2D images of carica" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "text", + "content": "4553" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 70, + 168, + 193 + ], + "blocks": [ + { + "bbox": [ + 49, + 70, + 168, + 193 + ], + "lines": [ + { + "bbox": [ + 49, + 70, + 168, + 193 + ], + "spans": [ + { + "bbox": [ + 49, + 70, + 168, + 193 + ], + "type": "image", + "image_path": "516e7d18529441246367b2a8888fc2a1c37173cbf2bdf5c251b7f98194755e06.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 78, + 194, + 134, + 202 + ], + "lines": [ + { + "bbox": [ + 78, + 194, + 134, + 202 + ], + "spans": [ + { + "bbox": [ + 78, + 194, + 134, + 202 + ], + "type": "text", + "content": "Naive Fine-Tuning" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 211, + 287, + 276 + ], + "lines": [ + { + "bbox": [ + 46, + 211, + 287, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 211, + 287, + 276 + ], + "type": "text", + "content": "Figure 2. Comparison with naive fine-tuning. Comparison of generated 3D avatars with a naively fine-tuned generator " + }, + { + "bbox": [ + 46, + 211, + 287, + 276 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{base}}" + }, + { + "bbox": [ + 46, + 211, + 287, + 276 + ], + "type": "text", + "content": " (left sub-figures) versus our generator " + }, + { + "bbox": [ + 46, + 211, + 287, + 276 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 211, + 287, + 276 + ], + "type": "text", + "content": " (right sub-figures). The corresponding sub-figures show comparisons in terms of texture quality (top two rows) and geometry (bottom two rows). See Sec. 5.1 for details." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 168, + 70, + 287, + 193 + ], + "blocks": [ + { + "bbox": [ + 168, + 70, + 287, + 193 + ], + "lines": [ + { + "bbox": [ + 168, + 70, + 287, + 193 + ], + "spans": [ + { + "bbox": [ + 168, + 70, + 287, + 193 + ], + "type": "image", + "image_path": "f9574c015b5d4a9f970cef245e38b45f1237a686dcc4b322c4f7ab095c2acd23.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 194, + 250, + 201 + ], + "lines": [ + { + "bbox": [ + 209, + 194, + 250, + 201 + ], + "spans": [ + { + "bbox": [ + 209, + 194, + 250, + 201 + ], + "type": "text", + "content": "Our Method" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 297, + 286, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 297, + 286, + 381 + ], + "spans": [ + { + "bbox": [ + 46, + 297, + 286, + 381 + ], + "type": "text", + "content": "tures to 3D models. However, such models fail to model the hair, teeth, neck, and clothes and suffer in texture quality. In this work, we are the first to tackle the problem of domain adaption of 3D-GANs and to produce fully controllable 3D Avatars. We employ 2D to 3D domain adaptation and distillation and make use of synthetic 2D data from StyleCariGAN [25] and DualStyleGAN [65]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 411, + 234, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 411, + 234, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 411, + 234, + 426 + ], + "type": "text", + "content": "3. Domain Adaptation for 3D-GANs" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "content": "The goal of domain adaptation for 3D-GANs is to adapt (both texture and geometry) to a particular style defined by a 2D dataset (Caricature, Anime, Pixar toons, Comic, and Cartoons [24, 25, 65] in our case). In contrast to 2D-StyleGAN-based fine-tuning methods that are conceptually simpler [29, 45], fine-tuning a 3D-GAN on 2D data introduces challenges in addition to domain differences, especially on maintaining the texture quality while preserving the geometry. Moreover, for these datasets, there is no explicit shape and camera information. We define the domain adaptation task as follows: Given a prior 3D-GAN i.e. EG3D " + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "inline_equation", + "content": "(\\mathrm{G_s})" + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "content": " of source domain " + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "inline_equation", + "content": "(T_{\\mathrm{s}})" + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "content": ", we aim to produce a 3D Avatar GAN " + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "inline_equation", + "content": "(\\mathrm{G_t})" + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "content": " of the target domain " + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "inline_equation", + "content": "(T_{\\mathrm{t}})" + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "content": " while maintaining the semantic, style, and geometric properties of " + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "content": ", and at the same time preserving the identity of the subject between the domains " + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "inline_equation", + "content": "(T_{\\mathrm{s}} \\leftrightarrow T_{\\mathrm{t}})" + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "content": ". Refer to Fig. 4 in supplementary for the pipeline figure. We represent " + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{2\\mathrm{D}}" + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "content": " as a teacher 2D-GAN used for knowledge distillation fine-tuned on the above datasets. Note that as " + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 438, + 287, + 713 + ], + "type": "text", + "content": " is not assumed to contain camera parameter annotations, the training scheme must suppress artifacts such as low-quality texture under different views and flat geometry (See Fig. 2). In the following, we discuss the details of our method." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 72, + 452, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 452, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 452, + 84 + ], + "type": "text", + "content": "3.1. How to align the cameras?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": "Selecting appropriate ranges for camera parameters is of paramount importance for high-fidelity geometry and texture detail. Typically, such parameters are empirically estimated, directly computed from the dataset using an off-the-shelf pose detector [10], or learned during training [8]. In domains we aim to bridge, such as caricatures for which a 3D model may not even exist, directly estimating the camera distribution is problematic and, hence, is not assumed by our method. Instead, we find it essential to ensure that the camera parameter distribution is consistent across the source and target domains. For the target domain, we use StyleGAN2 trained on FFHQ, fine-tuned on artistic datasets [25, 65]. Assuming that the intrinsic parameters of all the cameras are the same, we aim to match the distribution of extrinsic camera parameters of " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "G_{2\\mathrm{D}}" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " and train our final " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " using it (see illustration in Fig. 2 of the supplementary materials). To this end, we define an optimization-based method to match the sought distributions. The first step is to identify a canonical pose image in " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "G_{2\\mathrm{D}}" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": ", where the yaw, pitch, and roll parameters are zero. According to Karras et al., [31], the image corresponding to the mean latent code satisfies this property. Let " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " be the camera Euler angles in a spherical coordinate system, " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " be the radius of the sphere and camera lookat point, and " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " be a function that converts these parameters into the camera-to-world matrix. Let " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "I_{\\mathrm{s}}(w,\\theta ,\\phi ,c,r) = G_{\\mathrm{s}}(w,M(\\theta ,\\phi ,c,r))" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "I_{2\\mathrm{D}}(w) = G_{2\\mathrm{D}}(w)" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " represent an arbitrary image generated by " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "G_{2\\mathrm{D}}" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": ", respectively, given the " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " code variable. Let " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "k_{\\mathrm{d}}" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " be the face key-points detected by the detector " + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "inline_equation", + "content": "K_{\\mathrm{d}}" + }, + { + "bbox": [ + 304, + 91, + 545, + 450 + ], + "type": "text", + "content": " [72], then" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 460, + 545, + 492 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 460, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 313, + 460, + 545, + 492 + ], + "type": "interline_equation", + "content": "\\left(c ^ {\\prime}, r ^ {\\prime}\\right) := \\underset {(c, r)} {\\arg \\min } \\mathrm {L} _ {\\mathrm {k d}} \\left(I _ {\\mathrm {s}} \\left(w _ {\\text {a v g}} ^ {\\prime}, 0, 0, c, r\\right), I _ {2 \\mathrm {D}} \\left(w _ {\\text {a v g}}\\right)\\right), \\tag {1}", + "image_path": "f8439d40c7ae359d75653e4fa94ffa1a94cfc2734b1e2b823055d7c59b6b0a39.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{L_{kd}}(I_1,I_2) = \\| k_{\\mathrm{d}}(I_1) - k_{\\mathrm{d}}(I_2)\\| _1" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "w_{\\mathrm{avg}}" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "w_{\\mathrm{avg}}^{\\prime}" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " are the mean " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " latent codes of " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{2\\mathrm{D}}" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": ", respectively. In our results, " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "r^\\prime" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " is determined to be 2.7 and " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "c^{\\prime}" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " is approximately [0.0, 0.05, 0.17]. The next step is to determine a safe range of the " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " parameters. Following prior works, StyleFlow [3] and FreeStyleGAN [35] (see Fig.5 of the paper), we set these parameters as " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "\\theta^{\\prime}\\in [-0.45,0.45]" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "inline_equation", + "content": "\\phi^{\\prime}\\in [-0.35,0.35]" + }, + { + "bbox": [ + 304, + 492, + 545, + 588 + ], + "type": "text", + "content": " in radians." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 598, + 534, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 598, + 534, + 611 + ], + "spans": [ + { + "bbox": [ + 306, + 598, + 534, + 611 + ], + "type": "text", + "content": "3.2. What loss functions and regularizers to use?" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "Next, although the camera systems are aligned, the given dataset may not stem from a consistent 3D model, e.g., in the case of caricatures or cartoons. This entics the generator " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "G_{t}" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": " to converge to an easier degenerate solution with a flat geometry. Hence, to benefit from the geometric prior of " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "G_{s}" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": ", another important step is to design the loss functions and regularizers for a selected set of parameters to update in " + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "inline_equation", + "content": "G_{t}" + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": ". Next, we discuss these design choices:" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4554" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 73, + 225, + 342 + ], + "blocks": [ + { + "bbox": [ + 53, + 73, + 225, + 342 + ], + "lines": [ + { + "bbox": [ + 53, + 73, + 225, + 342 + ], + "spans": [ + { + "bbox": [ + 53, + 73, + 225, + 342 + ], + "type": "image", + "image_path": "be9123cd168c87d431112c83004391d76550d1a56bad9d921aced8ac8a8ee51f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 351, + 546, + 373 + ], + "lines": [ + { + "bbox": [ + 46, + 351, + 546, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 351, + 546, + 373 + ], + "type": "text", + "content": "Figure 3. Domain adaptation. Domain adaptation results of images from source domain " + }, + { + "bbox": [ + 46, + 351, + 546, + 373 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 351, + 546, + 373 + ], + "type": "text", + "content": " (top row in each sub-figure) to target domain " + }, + { + "bbox": [ + 46, + 351, + 546, + 373 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 351, + 546, + 373 + ], + "type": "text", + "content": ". Rows two to five show corresponding 3D avatar results from different viewpoints." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 225, + 73, + 384, + 342 + ], + "blocks": [ + { + "bbox": [ + 225, + 73, + 384, + 342 + ], + "lines": [ + { + "bbox": [ + 225, + 73, + 384, + 342 + ], + "spans": [ + { + "bbox": [ + 225, + 73, + 384, + 342 + ], + "type": "image", + "image_path": "d715cafb2b6af2c7094e88f5dff4bbf7e8fb8e731426e3ca0f77d84460d8c3a4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 385, + 74, + 545, + 342 + ], + "blocks": [ + { + "bbox": [ + 385, + 74, + 545, + 342 + ], + "lines": [ + { + "bbox": [ + 385, + 74, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 385, + 74, + 545, + 342 + ], + "type": "image", + "image_path": "6fe6fc94d25497f665769f6248b26f4df3aedbb4db51ea523686a25d8e882e8e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 380, + 287, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 380, + 287, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 380, + 287, + 464 + ], + "type": "text", + "content": "Loss Functions. To ensure texture quality and diversity, we resort to the adversarial loss used to fine-tune GANs as our main loss function. We use the standard non-saturating loss to train the generator and discriminator networks used in EG3D [11]. We also perform lazy density regularization to ensure consistency of the density values in the final finetuned model " + }, + { + "bbox": [ + 46, + 380, + 287, + 464 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 380, + 287, + 464 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 469, + 287, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 469, + 287, + 636 + ], + "spans": [ + { + "bbox": [ + 46, + 469, + 287, + 636 + ], + "type": "text", + "content": "Texture Regularization. Since the texture can be entangled with the geometry information, determining which layers to update is important. To make use of the fine-style information encoded in later layers, it is essential to update the " + }, + { + "bbox": [ + 46, + 469, + 287, + 636 + ], + "type": "inline_equation", + "content": "tRGB" + }, + { + "bbox": [ + 46, + 469, + 287, + 636 + ], + "type": "text", + "content": " layer parameters (outputting tri-plane features) before the neural rendering stage. " + }, + { + "bbox": [ + 46, + 469, + 287, + 636 + ], + "type": "inline_equation", + "content": "tRGB" + }, + { + "bbox": [ + 46, + 469, + 287, + 636 + ], + "type": "text", + "content": " are convolutional layers that transform feature maps to 3 channels at each resolution (96 channels in triplanes). Moreover, since the network has to adapt to a color distribution of " + }, + { + "bbox": [ + 46, + 469, + 287, + 636 + ], + "type": "inline_equation", + "content": "T_{t}" + }, + { + "bbox": [ + 46, + 469, + 287, + 636 + ], + "type": "text", + "content": ", it is essential to update the decoder (MLP layers) of the neural rendering pipeline as well. Given the EG3D architecture, we also update the super-resolution layer parameters to ensure the coherency between the low-resolution and high-resolution outputs seen by the discriminator D." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "content": "Geometry Regularization. In order to allow the network to learn the structure distribution of " + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "content": " and at the same time ensure properties of " + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "content": " latent spaces are preserved, we update the earlier layers with regularization. This also encourages the latent spaces of " + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "content": " to be easily linked. Essentially, we update the deviation parameter " + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\Delta s" + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "content": " from the" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "text", + "content": " activations of the " + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "text", + "content": " space [62]. The " + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "text", + "content": " activations are predicted by " + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\mathrm{A}(w)" + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\mathrm{A}" + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "text", + "content": " is the learned affine function in EG3D. The " + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "text", + "content": " activations scale the kernels of a particular layer. In order to preserve the identity as well as geometry such that the optimization of " + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "inline_equation", + "content": "\\Delta s" + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "text", + "content": " does not deviate too far away from the original domain " + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 380, + 545, + 464 + ], + "type": "text", + "content": ", we introduce a regularizer given by" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 387, + 464, + 545, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 387, + 464, + 545, + 477 + ], + "spans": [ + { + "bbox": [ + 387, + 464, + 545, + 477 + ], + "type": "interline_equation", + "content": "R (\\Delta s) := \\| \\Delta s \\| _ {1}. \\tag {2}", + "image_path": "cf1f92cf5cf81f7ed81930f32a10556ab099f8e9fec759141836e25a9e552656.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "text", + "content": "Note that we apply " + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "inline_equation", + "content": "\\mathrm{R}(\\Delta s)" + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "text", + "content": " regularization in a lazy manner, i.e., with density regularization. Interestingly, after training, we can interpolate between " + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "inline_equation", + "content": "s + \\Delta s" + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "text", + "content": " parameters to interpolate between the geometries of samples in " + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 483, + 545, + 542 + ], + "type": "text", + "content": " (See Fig. 5)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "spans": [ + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "text", + "content": "Depth Regularization. Next, we observe that even though the above design choice produces better geometry for " + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "text", + "content": ", some samples from " + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "text", + "content": " can still lead to flatter geometry, and it is hard to detect these cases. We found that the problem is related to the relative depth of the background to the foreground. To circumvent this problem, we use an additional regularization where we encourage the average background depth of " + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "text", + "content": " to be similar to " + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{b}}" + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "text", + "content": " be a face background segmentation network [34]. We first compute the average background depth of the samples given by " + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 543, + 546, + 674 + ], + "type": "text", + "content": ". This average depth is given by" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 347, + 683, + 545, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 683, + 545, + 715 + ], + "spans": [ + { + "bbox": [ + 347, + 683, + 545, + 715 + ], + "type": "interline_equation", + "content": "a _ {\\mathrm {d}} := \\frac {1}{M} \\sum_ {n = 1} ^ {M} \\left(\\frac {1}{N _ {n}} \\| D _ {n} \\odot \\mathrm {S} _ {\\mathrm {b}} (I _ {n}) \\| _ {F} ^ {2}\\right). \\tag {3}", + "image_path": "8dd8da720297ad773be45a35bfc73fa8222f5695c1363ebb97412943b42b6b78.jpg" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4555" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 72, + 274, + 342 + ], + "blocks": [ + { + "bbox": [ + 63, + 72, + 274, + 342 + ], + "lines": [ + { + "bbox": [ + 63, + 72, + 274, + 342 + ], + "spans": [ + { + "bbox": [ + 63, + 72, + 274, + 342 + ], + "type": "image", + "image_path": "8dff4311d2c0df9713a53db9cc71a323be40d2dafbf0424e7022191e653e146c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 346, + 287, + 368 + ], + "lines": [ + { + "bbox": [ + 47, + 346, + 287, + 368 + ], + "spans": [ + { + "bbox": [ + 47, + 346, + 287, + 368 + ], + "type": "text", + "content": "Figure 4. 3D avatars from real images. Projection of real images on the 3D avatar generators." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "spans": [ + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "inline_equation", + "content": "D_{n}" + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "text", + "content": " is the depth map of the image " + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "inline_equation", + "content": "I_{n}" + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "text", + "content": " sampled from " + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{s}}" + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "text", + "content": " represents the Hadamard product, " + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "text", + "content": " is the number of the sampled images, and " + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "inline_equation", + "content": "N_{n}" + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "text", + "content": " is the number of background pixels in " + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "inline_equation", + "content": "I_{n}" + }, + { + "bbox": [ + 47, + 373, + 287, + 421 + ], + "type": "text", + "content": ". Finally, regularization is defined as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 88, + 428, + 287, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 428, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 88, + 428, + 287, + 441 + ], + "type": "interline_equation", + "content": "\\mathrm {R} (D) := \\left\\| a _ {\\mathrm {d}} \\cdot J - \\left(D _ {\\mathrm {t}} \\odot \\mathrm {S} _ {\\mathrm {b}} \\left(I _ {\\mathrm {t}}\\right)\\right) \\right\\| _ {F}, \\tag {4}", + "image_path": "6160c9e93b9ffa418aa184b297c435e1350ea53dfc1d925668aedfe6c495321d.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "spans": [ + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "text", + "content": " is the depth map of the image " + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "inline_equation", + "content": "I_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "text", + "content": " sampled from " + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{G}_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "text", + "content": " is the matrix of ones having the same spatial dimensions as " + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 449, + 287, + 483 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 491, + 198, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 491, + 198, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 491, + 198, + 502 + ], + "type": "text", + "content": "3.3. What discriminator to use?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "spans": [ + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "text", + "content": "Given that the data in " + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "text", + "content": " is not paired and " + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "text", + "content": " is not assumed to contain camera parameter annotations, the choice of the discriminator (D) used for this task is also a critical design choice. Essentially, we use the unconditional version of the dual discriminator proposed in EG3D, and hence, we do not condition the discriminator on the camera information. As a result, during the training, " + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "text", + "content": " generates arbitrary images with pose using " + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{M}(\\theta', \\phi', c', r')" + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "text", + "content": ", and the discriminator discriminates these images using arbitrary images from " + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "text", + "content": ". We train the discriminator from scratch and in order to adapt " + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}} \\rightarrow T_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 509, + 287, + 653 + ], + "type": "text", + "content": ", we use the StyleGAN-ADA [28] training scheme and use R1 regularization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 659, + 287, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 659, + 287, + 683 + ], + "spans": [ + { + "bbox": [ + 47, + 659, + 287, + 683 + ], + "type": "text", + "content": "3.4. How to incorporate larger geometric deformations between domains?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "While the regularizers are used to limit the geometric changes when adapting from " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": ", modeling large ge" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "text", + "content": "ometric deformations, e.g., in the caricature dataset is another challenge. One choice to edit the geometry is to use the properties of tri-plane features learned by EG3D. We start out by analyzing these three planes in " + }, + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "text", + "content": ". We observe that the frontal plane encodes most of the information required to render the final image. To quantify this, we sample images and depth maps from " + }, + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "text", + "content": " and swap the front and the other planes from two random images. Then we compare the difference in RGB values of the images and the Chamfer distance of the depth maps. While swapping the frontal tri-planes, the final images are completely swapped, and the Chamfer distance changes by " + }, + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "inline_equation", + "content": "80\\sim 90\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "text", + "content": " matching the swapped image depth map. In the case of the other two planes, the RGB image is not much affected and the Chamfer distance of the depth maps is reduced by only " + }, + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "inline_equation", + "content": "20\\sim 30\\%" + }, + { + "bbox": [ + 304, + 72, + 545, + 262 + ], + "type": "text", + "content": " in most cases." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "spans": [ + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "text", + "content": "Given the analysis, we focus to manipulate the " + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "inline_equation", + "content": "2D" + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "text", + "content": " front plane features to learn additional deformation or exaggerations. We learn a TPS (Thin Plate Spline) [61] network on top of the front plane. Our TPS network is conditioned both on the front plane features as well as the " + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "text", + "content": " space to enable multiple transformations. The architecture of the module is similar to the standard StyleGAN2 layer with an MLP appended at the end to predict the control points that transform the features. Hence, as a byproduct, we also enable 3D-geometry editing guided by the learned latent space. We train this module separately after " + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "inline_equation", + "content": "G_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "text", + "content": " has been trained. We find that joint training is unstable due to exploding gradients arising from the large domain gap between " + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 263, + 546, + 431 + ], + "type": "text", + "content": " in the initial stages. Formally, we define this transformation as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 392, + 439, + 545, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 439, + 545, + 452 + ], + "spans": [ + { + "bbox": [ + 392, + 439, + 545, + 452 + ], + "type": "interline_equation", + "content": "\\mathrm {T} (w, f) := \\Delta c, \\tag {5}", + "image_path": "e4cd8824f6fe9de91bcc7048517ceba25e181b256d17448f480678758f559a60.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 460, + 545, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 545, + 483 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 545, + 483 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 304, + 460, + 545, + 483 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 460, + 545, + 483 + ], + "type": "text", + "content": " is the latent code, " + }, + { + "bbox": [ + 304, + 460, + 545, + 483 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 460, + 545, + 483 + ], + "type": "text", + "content": " is the front plane, and " + }, + { + "bbox": [ + 304, + 460, + 545, + 483 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 304, + 460, + 545, + 483 + ], + "type": "text", + "content": " are the control points." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "inline_equation", + "content": "c_{\\mathrm{I}}" + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "text", + "content": " be the initial control points producing an identity transformation, " + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "inline_equation", + "content": "(c_{1}, c_{2})" + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "text", + "content": " be the control points corresponding to front planes " + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "inline_equation", + "content": "(f_{1}, f_{2})" + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "text", + "content": " sampled using " + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "text", + "content": " codes " + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "inline_equation", + "content": "(w_{1}, w_{2})" + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "text", + "content": ", respectively, and " + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "inline_equation", + "content": "(c_{1}', c_{2}')" + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "text", + "content": " be points with " + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "inline_equation", + "content": "(w_{1}, w_{2})" + }, + { + "bbox": [ + 304, + 484, + 545, + 555 + ], + "type": "text", + "content": " swapped in the TPS module. To regularize and encourage the module to learn different deformations, we have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 563, + 548, + 605 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 563, + 548, + 605 + ], + "spans": [ + { + "bbox": [ + 306, + 563, + 548, + 605 + ], + "type": "interline_equation", + "content": "\\mathrm {R} \\left(\\mathrm {T} _ {1}\\right) := \\alpha \\sum_ {n = 1} ^ {2} \\| c _ {I} - c _ {n} \\| _ {1} - \\beta \\| c _ {1} - c _ {2} \\| _ {1} - \\sigma \\| c _ {1} ^ {\\prime} - c _ {2} ^ {\\prime} \\| _ {1}. \\tag {6}", + "image_path": "9e378d2532ce2bdcc97bef17751bd3823407639e44e1cd8ecd333a399b833d3f.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "text", + "content": "We use initial control point regularization to regularize large deviations in the control points which would otherwise explode. Additionally, to learn extreme exaggerations in " + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "text", + "content": " and 'in expectation', conform to the target distribution in the dataset, we add an additional loss term. Let " + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "inline_equation", + "content": "S(I)" + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "text", + "content": " be the soft-argmax output of the face segmentation network [34] given an image " + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "text", + "content": " and assuming that " + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "text", + "content": " generalizes to caricatures, then" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 362, + 700, + 545, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 700, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 362, + 700, + 545, + 714 + ], + "type": "interline_equation", + "content": "\\mathrm {R} \\left(\\mathrm {T} _ {2}\\right) := \\| \\mathrm {S} \\left(\\mathrm {G} _ {\\mathrm {t}} (w)\\right), \\mathrm {S} \\left(I _ {\\mathrm {t}}\\right) \\| _ {1} \\tag {7}", + "image_path": "c5b8b4cae28e48e229e1d63b5fa6dcfb23418aeb89867a00c32a9ebed00e935b.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4556" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 71, + 286, + 159 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 286, + 159 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 286, + 159 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 286, + 159 + ], + "type": "image", + "image_path": "b54b53f3e6f87b4fc83c381fce3ac23524a5f98093017d653f4f9af65eb27f18.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 168, + 287, + 191 + ], + "lines": [ + { + "bbox": [ + 47, + 168, + 287, + 191 + ], + "spans": [ + { + "bbox": [ + 47, + 168, + 287, + 191 + ], + "type": "text", + "content": "Figure 5. Interpolation of " + }, + { + "bbox": [ + 47, + 168, + 287, + 191 + ], + "type": "inline_equation", + "content": "\\Delta s" + }, + { + "bbox": [ + 47, + 168, + 287, + 191 + ], + "type": "text", + "content": ". Geometric deformation using the interpolation of learned " + }, + { + "bbox": [ + 47, + 168, + 287, + 191 + ], + "type": "inline_equation", + "content": "\\Delta s" + }, + { + "bbox": [ + 47, + 168, + 287, + 191 + ], + "type": "text", + "content": " parameters." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 201, + 286, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 201, + 286, + 250 + ], + "spans": [ + { + "bbox": [ + 47, + 201, + 286, + 250 + ], + "type": "text", + "content": "Eq. 6, Eq. 7, and adversarial training loss are used to train the " + }, + { + "bbox": [ + 47, + 201, + 286, + 250 + ], + "type": "inline_equation", + "content": "TPS" + }, + { + "bbox": [ + 47, + 201, + 286, + 250 + ], + "type": "text", + "content": " module. We adopt gradient clipping to make sure that the training does not diverge. See the illustrations in Fig. 3 and Fig. 4 of the supplementary materials." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 262, + 286, + 275 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 262, + 286, + 275 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 286, + 275 + ], + "type": "text", + "content": "4. Personalized Avatar Generation and Editing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 283, + 286, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 283, + 286, + 376 + ], + "spans": [ + { + "bbox": [ + 46, + 283, + 286, + 376 + ], + "type": "text", + "content": "Although 3D domain adaptation adapts " + }, + { + "bbox": [ + 46, + 283, + 286, + 376 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}} \\leftrightarrow T_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 283, + 286, + 376 + ], + "type": "text", + "content": ", it is still a challenge to effectively link the latent spaces of " + }, + { + "bbox": [ + 46, + 283, + 286, + 376 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 46, + 283, + 286, + 376 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 283, + 286, + 376 + ], + "type": "inline_equation", + "content": "\\mathrm{G_t}" + }, + { + "bbox": [ + 46, + 283, + 286, + 376 + ], + "type": "text", + "content": " to generate personalized 3D avatars using a single photograph as the reference image. Particularly, the challenge arises due to the discrepancy in the coupled latent spaces when dealing with the projection of real photographs on 3D generators. Moreover, one would like to edit and animate these 3D avatars." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "spans": [ + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": "Projection. The task is to project a real image into the latent space of " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": ", transfer the latent to " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{G_t}" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": ", and further optimize it to construct a 3D avatar. First, we use an optimization-based method to find the " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": " code that minimizes the similarity between the generated and the real image in " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": ". To achieve this, the first step is to align the cameras. We follow the steps mentioned in Sec. 3.1 for this step. Next, we use pixel-wise MSE loss and LPIPS loss to project the image into " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": " [1]. Additionally, to preserve the identity of the subject, we use attribute classifiers e.g. caricature dataset [24] provides the coupled attribute information of real images and caricatures. We use such attribute classifier [24,25] in a post-hoc manner as we notice that such networks can affect the texture in the target domain and could degenerate to narrow style outputs if applied during training. Moreover, such networks may not be available for all target domains. To avoid overfitting into " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": " and encourage the easier transfer of the optimized latent code to " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{G_t}" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": ", we use " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": " space optimization for this step. Finally, we initialize this " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": " code for " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{G_t}" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": " and use additional attribute classifier loss [25] for " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": " domain along with Depth regularization " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{R}(D)" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": " (Eq. 4). As an approximation, we assume that attribute classifier [24, 25] generalizes across all domains. We use " + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "inline_equation", + "content": "\\mathcal{W} / \\mathcal{W} +" + }, + { + "bbox": [ + 46, + 378, + 286, + 676 + ], + "type": "text", + "content": " space optimization to control the quality and diversity of the outputs. See Algorithm 1 in supplementary for the description." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "text", + "content": "Editing and Animation. Since our 3D domain adaptation is designed to preserve the properties of " + }, + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 47, + 677, + 286, + 713 + ], + "type": "text", + "content": " spaces, we can perform semantic edits via InterFaceGAN [51]," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": "GANSpace [22], StyleSpace [62] etc., and geometric edits using TPS (Sec. 3.4) and " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\Delta s" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " interpolation (Sec. 3.2). To perform video editing, we design an encoder for EG3D based on " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "e4e" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " [58] to encode videos and transfer the edits from " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "\\mathrm{G_t}" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " based on the " + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 304, + 72, + 545, + 156 + ], + "type": "text", + "content": " codes [4,6,59]. We leave a more fine-grained approach for video processing as future work." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 169, + 359, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 169, + 359, + 182 + ], + "spans": [ + { + "bbox": [ + 306, + 169, + 359, + 182 + ], + "type": "text", + "content": "5. Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 190, + 424, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 190, + 424, + 202 + ], + "spans": [ + { + "bbox": [ + 306, + 190, + 424, + 202 + ], + "type": "text", + "content": "5.1. Quantitative Results" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 209, + 545, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 209, + 545, + 340 + ], + "spans": [ + { + "bbox": [ + 304, + 209, + 545, + 340 + ], + "type": "text", + "content": "In this section, we consider three important evaluations to verify the quality of the texture, geometry, and identity preservation in the new domain using the Caricature, Cartoons, and Pixar toons datasets. We evaluate the ablation of our design choices in the supplementary materials. In the evaluation, let " + }, + { + "bbox": [ + 304, + 209, + 545, + 340 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{base}}" + }, + { + "bbox": [ + 304, + 209, + 545, + 340 + ], + "type": "text", + "content": " be the baseline naive fine-tuning method which is trained with all the parameters using the losses in EG3D fine-tuned from FFHQ trained prior " + }, + { + "bbox": [ + 304, + 209, + 545, + 340 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 209, + 545, + 340 + ], + "type": "text", + "content": ". Note here we still align the cameras in " + }, + { + "bbox": [ + 304, + 209, + 545, + 340 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{base}}" + }, + { + "bbox": [ + 304, + 209, + 545, + 340 + ], + "type": "text", + "content": " using the method defined in Sec. 3.1 and use adaptive discriminator [28] with R1 regularization for a fair comparison." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "spans": [ + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "text", + "content": "Texture Quality. To verify the quality of the texture, diversity of samples as well as to some extent, the geometry in the target domain " + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "text", + "content": ", we compare the FID [23] scores using " + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{base}}" + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "text", + "content": " in Table 1. Note that in the case of Caricatures, we report two scores i.e. with and without using the attribute classifier loss in the training as discussed in Sec. 4. Notice that our method outperforms the naive baseline method by a huge margin in some cases, especially in Caricatures and Cartoons. We attribute these differences to the mode collapse prone training of " + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{base}}" + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "text", + "content": " which is correlated with flat geometry degenerate solution. We show visual results of the flat geometries learned by " + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{base}}" + }, + { + "bbox": [ + 304, + 342, + 545, + 497 + ], + "type": "text", + "content": " and comparison in Fig. 2." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": "Geometric Quality. To quantify the flat geometries, in Table 2, we show three scores that help us understand such degenerate solutions. Here we consider coupled depth maps generated from sampling in the domains " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{G_t}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{base}}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": "). First, we compute the expectation of the absolute mean differences (" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{d}}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": ") of the corresponding foreground depth maps sampled from " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": ". We also compute the expectation of the absolute standard deviation differences (" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{d}}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": ") for the same setting. Here, we assume that the flatter geometries have a large difference in the depth maps as compared to the prior as indicated by " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{d}}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": ". Moreover, " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{d}}" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": " computes the distance in the distribution of the depth values, where a larger difference indicates a narrow distribution, and hence a flatter geometry. We also notice that the flat geometry is correlated with the generator learning diverse poses when images are rendered under standard canonical camera parameters i.e. " + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{M}(0,0,c,r)" + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": ". We hypothesize in the case of the flatter geometries, the model learns to pose in" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 315, + 757 + ], + "type": "text", + "content": "4557" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 69, + 547, + 165 + ], + "blocks": [ + { + "bbox": [ + 49, + 69, + 547, + 165 + ], + "lines": [ + { + "bbox": [ + 49, + 69, + 547, + 165 + ], + "spans": [ + { + "bbox": [ + 49, + 69, + 547, + 165 + ], + "type": "image", + "image_path": "81649d86574bde316f083bf9578b5c3b43f0db2eb221bbdfe805561260412bd4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 171, + 546, + 195 + ], + "lines": [ + { + "bbox": [ + 46, + 171, + 546, + 195 + ], + "spans": [ + { + "bbox": [ + 46, + 171, + 546, + 195 + ], + "type": "text", + "content": "Figure 6. Deformations using TPS. Geometric edits using our proposed TPS (Thin Plate Spline) module learned on the frontal tri-plane features. Each sub-figure shows a 3D avatar and three examples of TPS deformations sampled from the learned 3D deformation space." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 83, + 269, + 252, + 306 + ], + "blocks": [ + { + "bbox": [ + 46, + 204, + 288, + 258 + ], + "lines": [ + { + "bbox": [ + 46, + 204, + 288, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 204, + 288, + 258 + ], + "type": "text", + "content": "Table 1. FID Computation. FID (Frechet Inception Distance) between the 2D dataset and the samples generated by the fine-tuned 3D GAN using baseline " + }, + { + "bbox": [ + 46, + 204, + 288, + 258 + ], + "type": "inline_equation", + "content": "(\\mathrm{G_{base}})" + }, + { + "bbox": [ + 46, + 204, + 288, + 258 + ], + "type": "text", + "content": " and Ours " + }, + { + "bbox": [ + 46, + 204, + 288, + 258 + ], + "type": "inline_equation", + "content": "(\\mathrm{G_t})" + }, + { + "bbox": [ + 46, + 204, + 288, + 258 + ], + "type": "text", + "content": " . " + }, + { + "bbox": [ + 46, + 204, + 288, + 258 + ], + "type": "inline_equation", + "content": "^{\\prime \\prime}*" + }, + { + "bbox": [ + 46, + 204, + 288, + 258 + ], + "type": "text", + "content": " represents the score with the inclusion of the attribute classifier loss discussed in Sec. 3.2." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 83, + 269, + 252, + 306 + ], + "lines": [ + { + "bbox": [ + 83, + 269, + 252, + 306 + ], + "spans": [ + { + "bbox": [ + 83, + 269, + 252, + 306 + ], + "type": "table", + "html": "
MethodCaricaturesCartoonsPixar Toons
Gbase67.879.015.1
Gt(Ours)19.4/20.2*12.812.4
", + "image_path": "b75b22dfc573fbc5daadb15ad48e78d01c25f88687fe0be3a16c2d82bb359035.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 72, + 361, + 263, + 440 + ], + "blocks": [ + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "lines": [ + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "spans": [ + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "text", + "content": "Table 2. Geometry Evaluation. Comparing the geometry using baseline method " + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "inline_equation", + "content": "(\\mathrm{G}_{\\mathrm{base}})" + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "text", + "content": " and Ours " + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "inline_equation", + "content": "(\\mathrm{G}_{\\mathrm{t}})" + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "text", + "content": ". For the definition of " + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "inline_equation", + "content": "M_{\\mathrm{d}}" + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{d}}" + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "inline_equation", + "content": "\\mathrm{R}(\\mathrm{T}_2)" + }, + { + "bbox": [ + 46, + 319, + 288, + 353 + ], + "type": "text", + "content": ", refer to Sec. 5.1." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 72, + 361, + 263, + 440 + ], + "lines": [ + { + "bbox": [ + 72, + 361, + 263, + 440 + ], + "spans": [ + { + "bbox": [ + 72, + 361, + 263, + 440 + ], + "type": "table", + "html": "
MetricMethodCaricaturesCartoonsPixar
Md ↓Gbase0.470.210.29
Gt (Ours)0.210.130.13
Sd ↓Gbase0.220.140.15
Gt (Ours)0.150.100.09
R(T2) ↓Gbase2.993.394.01
Gt (Ours)2.271.621.56
", + "image_path": "443cb2fcc18dcfe0d24843254017e56f49ff46e89b5977e915d18286a9385580.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 82, + 485, + 253, + 522 + ], + "blocks": [ + { + "bbox": [ + 46, + 454, + 287, + 477 + ], + "lines": [ + { + "bbox": [ + 46, + 454, + 287, + 477 + ], + "spans": [ + { + "bbox": [ + 46, + 454, + 287, + 477 + ], + "type": "text", + "content": "Table 3. Identity Preservation. Identity preservation using baseline " + }, + { + "bbox": [ + 46, + 454, + 287, + 477 + ], + "type": "inline_equation", + "content": "(\\mathrm{G}_{\\mathrm{base}})" + }, + { + "bbox": [ + 46, + 454, + 287, + 477 + ], + "type": "text", + "content": " and Ours " + }, + { + "bbox": [ + 46, + 454, + 287, + 477 + ], + "type": "inline_equation", + "content": "(\\mathrm{G}_{\\mathrm{t}})" + }, + { + "bbox": [ + 46, + 454, + 287, + 477 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 82, + 485, + 253, + 522 + ], + "lines": [ + { + "bbox": [ + 82, + 485, + 253, + 522 + ], + "spans": [ + { + "bbox": [ + 82, + 485, + 253, + 522 + ], + "type": "table", + "html": "
MethodCaricaturesCartoonsPixel Toons
Gbase1.280.920.85
Gt(Ours)0.870.810.73
", + "image_path": "6fbead03a24f92fee3853dded50ba16618c8bf076ffa0a9ac7367b45c29a9db6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "text", + "content": "formation in the earlier layers instead of being camera view-dependent. To quantify this, since pose information may not be available for some domains (e.g. cartoons), we compute the " + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "inline_equation", + "content": "\\mathrm{R}(\\mathrm{T}_2)" + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "text", + "content": " scores between corresponding images in the domain " + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{base}}" + }, + { + "bbox": [ + 46, + 533, + 287, + 652 + ], + "type": "text", + "content": "). Note that these scores are computed without the TPS module. Our scores are lower in all three metrics, hence, validating that our method avoids the degenerate solution and preserves the geometric distribution of the prior. For discussion on the TPS module and ablations refer to the supplementary materials." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": "Identity Preservation. Identity preservation score is another important evaluation to check the quality of latent space linking between " + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{G_t}" + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": ". In Table 3, we compute the attribute loss (BCE loss) between the domains " + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "inline_equation", + "content": "T_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": " using the attribute classifiers [24, 25]. Note that our method" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 332, + 204, + 522, + 522 + ], + "blocks": [ + { + "bbox": [ + 332, + 204, + 522, + 522 + ], + "lines": [ + { + "bbox": [ + 332, + 204, + 522, + 522 + ], + "spans": [ + { + "bbox": [ + 332, + 204, + 522, + 522 + ], + "type": "image", + "image_path": "ffcb362871dc7ba10eb393f7d821dc10ff50e964ee7a1b5456194ee10c11b4a4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 532, + 545, + 555 + ], + "lines": [ + { + "bbox": [ + 305, + 532, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 305, + 532, + 545, + 555 + ], + "type": "text", + "content": "Figure 7. Local edits. Local edits performed on the 3D avatars using the " + }, + { + "bbox": [ + 305, + 532, + 545, + 555 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 305, + 532, + 545, + 555 + ], + "type": "text", + "content": " space." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 563, + 534, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 563, + 534, + 576 + ], + "spans": [ + { + "bbox": [ + 306, + 563, + 534, + 576 + ], + "type": "text", + "content": "is able to preserve the identity better across the domains." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 586, + 419, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 586, + 419, + 599 + ], + "spans": [ + { + "bbox": [ + 306, + 586, + 419, + 599 + ], + "type": "text", + "content": "5.2. Qualitative Results" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": "For qualitative results, we show the results of the domain adaptation, as well as the personalized edits (geometric and semantic), performed on the resultant 3D avatars. First, in order to show the quality of domain adaptation, identity preservation, and geometric consistency, in Fig. 3, we show results from " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{s}}" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " and corresponding results from 3D avatar generator " + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{t}}" + }, + { + "bbox": [ + 304, + 605, + 547, + 715 + ], + "type": "text", + "content": " trained on Caricatures, Pixar toons, Cartoons, and Comic domains. Next, in order to show that the method generalizes to real images, we use the method described in" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4558" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 97, + 70, + 496, + 342 + ], + "blocks": [ + { + "bbox": [ + 97, + 70, + 496, + 342 + ], + "lines": [ + { + "bbox": [ + 97, + 70, + 496, + 342 + ], + "spans": [ + { + "bbox": [ + 97, + 70, + 496, + 342 + ], + "type": "image", + "image_path": "b1f8079284bc58efc40c07e2aaf6929d07f1e41440083f4cf8015ce8654f1cec.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 350, + 547, + 384 + ], + "lines": [ + { + "bbox": [ + 46, + 350, + 547, + 384 + ], + "spans": [ + { + "bbox": [ + 46, + 350, + 547, + 384 + ], + "type": "text", + "content": "Figure 8. 3D avatar animation. Animation of 3D avatars generated using a driving video encoded in source domain " + }, + { + "bbox": [ + 46, + 350, + 547, + 384 + ], + "type": "inline_equation", + "content": "T_{s}" + }, + { + "bbox": [ + 46, + 350, + 547, + 384 + ], + "type": "text", + "content": " and applied to samples in target domain " + }, + { + "bbox": [ + 46, + 350, + 547, + 384 + ], + "type": "inline_equation", + "content": "T_{t}" + }, + { + "bbox": [ + 46, + 350, + 547, + 384 + ], + "type": "text", + "content": ". The top row shows the driving video and the subsequent rows show generated animations using a random Caricature or Pixar toon. The head pose is changed in each frame of the generated animation to show 3D consistency." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 396, + 288, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 396, + 288, + 468 + ], + "spans": [ + { + "bbox": [ + 46, + 396, + 288, + 468 + ], + "type": "text", + "content": "Sec. 4 to project and transfer the latent code from " + }, + { + "bbox": [ + 46, + 396, + 288, + 468 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 46, + 396, + 288, + 468 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 396, + 288, + 468 + ], + "type": "inline_equation", + "content": "\\mathrm{G_t}" + }, + { + "bbox": [ + 46, + 396, + 288, + 468 + ], + "type": "text", + "content": " to produce the 3D avatars. In Fig. 4, we show our results of real to 3D avatar transfer. Notice the quality both in terms of texture as well as geometry for both these results achieved by our method. Next, we show geometric and semantic edits possible to produce personalized 3D avatars:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "spans": [ + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "text", + "content": "Geometry Edits. We show two types of geometric edits i.e. " + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "inline_equation", + "content": "\\Delta s" + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "text", + "content": " interpolation (Sec. 3.2) and deformation using " + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "inline_equation", + "content": "TPS" + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "text", + "content": " (Sec. 3.4). First, in Fig. 5, we show the geometry interpolation by interpolating between original " + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "text", + "content": " activations of " + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "inline_equation", + "content": "\\mathrm{G_s}" + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "text", + "content": " and learned " + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "inline_equation", + "content": "\\Delta s" + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "text", + "content": " parameters. In Fig. 6, we show some additional exaggerations in caricatures using the learned 3D deformation latent space of " + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "inline_equation", + "content": "TPS" + }, + { + "bbox": [ + 46, + 475, + 287, + 558 + ], + "type": "text", + "content": " module." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "text", + "content": "Semantic Edits and Animation. Since in our method, we encourage the latent regularization to preserve the properties of the latent space learned by the " + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "text", + "content": " generator, in Fig. 7 we show " + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "text", + "content": " space edits performed on the 3D avatars. Notice the quality of edits in terms of locality and adaptability. Additionally, we can edit semantics like hair as opposed to 3D morphable model based methods. In Fig. 8, thanks to the latent space semantics preservation ensured by our method, we can perform some video edits to create a coherent animation based on the difference of " + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "text", + "content": " codes of video encoded in " + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{s}}" + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "text", + "content": " (Sec. 4) and applied to layers " + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "inline_equation", + "content": "7 - 10" + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{G}_{\\mathrm{t}}" + }, + { + "bbox": [ + 46, + 559, + 288, + 715 + ], + "type": "text", + "content": ". Notice the quality of expressions, identity preservation, and 3D consistency across each identity in each row." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 395, + 379, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 395, + 379, + 407 + ], + "spans": [ + { + "bbox": [ + 306, + 395, + 379, + 407 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 415, + 545, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 545, + 704 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 545, + 704 + ], + "type": "text", + "content": "We tackled two open research problems in this paper. In the first part, we proposed the first domain adaptation method for 3D-GANs to the best of our knowledge. This part yields two linked EG3D generators, one in the photorealistic source domain of faces, and another EG3D generator in an artistic target domain. As possible target domains, we show results for cartoons, caricatures, and Comics. In the second part, we built on domain adaptation to create 3D avatars in an artistic domain that can be edited and animated. Our framework consists of multiple technical components introduced in this paper. First, we propose a technique for camera space estimation for artistic domains. Second, we introduce a set of regularizers and loss functions that can regularize the fine-tuning of EG3D in such a way that enough of the 3D structure and geometry of the original model is kept, while the distinguishing attributes of the artistic domain, such as textures and colors and local geometric deformations can still be learned. Third, we introduce a geometric deformation module that can reintroduce larger geometric deformations in a controlled manner. These larger geometric deformations can interact and cooperate with EG3D so that semantic edits are still possible. Finally, we propose an embedding algorithm that is especially suitable for two linked EG3D generator networks." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4559" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 145 + ], + "type": "text", + "content": "[1] Rameen Abdul, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4432-4441, Seoul, Korea, 2019. IEEE. 2, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 146, + 288, + 201 + ], + "type": "text", + "content": "[2] Rameen Abdal, Peihao Zhu, John Femiani, Niloy Mitra, and Peter Wonka. Clip2stylegan: Unsupervised extraction of stylegan edit directions. In ACM SIGGRAPH 2022 Conference Proceedings, SIGGRAPH '22, New York, NY, USA, 2022. Association for Computing Machinery. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 202, + 288, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 288, + 246 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 288, + 246 + ], + "type": "text", + "content": "[3] Rameen Abdal, Peihao Zhu, Niloy J. Mitra, and Peter Wonka. Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. ACM Trans. Graph., 40(3), may 2021. 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 247, + 288, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 288, + 278 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 288, + 278 + ], + "type": "text", + "content": "[4] Rameen Abdal, Peihao Zhu, Niloy J. Mitra, and Peter Wonka. Video2stylegan: Disentangling local and global variations in a video, 2022. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 280, + 287, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 280, + 287, + 323 + ], + "spans": [ + { + "bbox": [ + 53, + 280, + 287, + 323 + ], + "type": "text", + "content": "[5] Yuval Alaluf, Or Patashnik, and Daniel Cohen-Or. Restyle: A residual-based stylegan encoder via iterative refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2021. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 324, + 287, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 324, + 287, + 367 + ], + "spans": [ + { + "bbox": [ + 53, + 324, + 287, + 367 + ], + "type": "text", + "content": "[6] Yuval Alaluf, Or Patashnik, Zongze Wu, Asif Zamir, Eli Shechtman, Dani Lischinski, and Daniel Cohen-Or. Third time's the charm? image and video editing with stylegan3. CoRR, abs/2201.13433, 2022. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 369, + 287, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 369, + 287, + 411 + ], + "spans": [ + { + "bbox": [ + 53, + 369, + 287, + 411 + ], + "type": "text", + "content": "[7] Yuval Alaluf, Omer Tov, Ron Mokady, Rimon Gal, and Amit H. Bermano. Hyperstyle: Stylegan inversion with hypernetworks for real image editing. CoRR, abs/2111.15666, 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 413, + 287, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 413, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 413, + 287, + 434 + ], + "type": "text", + "content": "[8] Anonymous. 3d generation on imagenet. In Open Review, 2023. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 436, + 287, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 436, + 287, + 501 + ], + "spans": [ + { + "bbox": [ + 53, + 436, + 287, + 501 + ], + "type": "text", + "content": "[9] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5855–5864, 2021. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 502, + 287, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 502, + 287, + 557 + ], + "spans": [ + { + "bbox": [ + 48, + 502, + 287, + 557 + ], + "type": "text", + "content": "[10] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In arXiv, 2021. 1, 2, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 558, + 287, + 612 + ], + "type": "text", + "content": "[11] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3d generative adversarial networks, 2021. 2, 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "type": "text", + "content": "[12] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5799-5809, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "text", + "content": "[13] Yen-Chi Cheng, Chieh Hubert Lin, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, and Ming-Hsuan Yang. Inout: Diverse image outpainting via gan inversion. In IEEE Conference on Computer Vision and Pattern Recognition, 2022. 2" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[14] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 118, + 545, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 118, + 545, + 138 + ], + "spans": [ + { + "bbox": [ + 308, + 118, + 545, + 138 + ], + "type": "text", + "content": "[15] Min Jin Chong and David A. Forsyth. Jojogan: One shot face stylization. CoRR, abs/2112.11641, 2021. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 140, + 545, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 140, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 308, + 140, + 545, + 172 + ], + "type": "text", + "content": "[16] Min Jin Chong, Hsin-Ying Lee, and David Forsyth. Stylegan of all trades: Image manipulation with only pretrained stylegan. arXiv preprint arXiv:2111.01619, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 173, + 545, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 173, + 545, + 215 + ], + "spans": [ + { + "bbox": [ + 308, + 173, + 545, + 215 + ], + "type": "text", + "content": "[17] Rinon Gal, Or Patashnik, Haggai Maron, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. arXiv preprint arXiv:2108.00946, 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 217, + 545, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 217, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 308, + 217, + 545, + 249 + ], + "type": "text", + "content": "[18] Rinon Gal, Or Patashnik, Haggai Maron, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 250, + 545, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 250, + 545, + 282 + ], + "spans": [ + { + "bbox": [ + 308, + 250, + 545, + 282 + ], + "type": "text", + "content": "[19] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks, 2014. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 283, + 545, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 283, + 545, + 327 + ], + "spans": [ + { + "bbox": [ + 308, + 283, + 545, + 327 + ], + "type": "text", + "content": "[20] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylererf: A style-based 3d aware generator for high-resolution image synthesis. In International Conference on Learning Representations, 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 327, + 545, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 327, + 545, + 369 + ], + "spans": [ + { + "bbox": [ + 308, + 327, + 545, + 369 + ], + "type": "text", + "content": "[21] Fangzhou Han, Shuquan Ye, Mingming He, Menglei Chai, and Jing Liao. Exemplar-based 3d portrait stylization. IEEE Transactions on Visualization and Computer Graphics, 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 371, + 545, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 371, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 308, + 371, + 545, + 403 + ], + "type": "text", + "content": "[22] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. arXiv preprint arXiv:2004.02546, 2020. 2, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 404, + 545, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 404, + 545, + 458 + ], + "spans": [ + { + "bbox": [ + 308, + 404, + 545, + 458 + ], + "type": "text", + "content": "[23] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 460, + 545, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 460, + 545, + 492 + ], + "spans": [ + { + "bbox": [ + 308, + 460, + 545, + 492 + ], + "type": "text", + "content": "[24] Jing Huo, Wenbin Li, Yinghuan Shi, Yang Gao, and Hujun Yin. Webcaricature: a benchmark for caricature recognition. In British Machine Vision Conference, 2018. 3, 6, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 308, + 493, + 545, + 536 + ], + "type": "text", + "content": "[25] Wonjong Jang, Gwangjin Ju, Yucheol Jung, Jiaolong Yang, Xin Tong, and Seungyong Lee. Stylecarigan: Caricature generation via stylegan feature map modulation. 40(4), 2021. 2, 3, 6, 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 537, + 545, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 537, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 308, + 537, + 545, + 592 + ], + "type": "text", + "content": "[26] Yucheol Jung, Wonjong Jang, Soongjin Kim, Jiaolong Yang, Xin Tong, and Seungyong Lee. Deep deformable 3d caricatures with learned shape control. In Special Interest Group on Computer Graphics and Interactive Techniques Conference Proceedings. ACM, aug 2022. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 593, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 593, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 308, + 593, + 545, + 624 + ], + "type": "text", + "content": "[27] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation, 2017. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 625, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 625, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 308, + 625, + 545, + 667 + ], + "type": "text", + "content": "[28] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. In Proc. NeurIPS, 2020. 1, 2, 5, 6" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 669, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 669, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 669, + 545, + 712 + ], + "type": "text", + "content": "[29] Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. arXiv preprint arXiv:2006.06676, 2020.3" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "4560" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[30] Tero Karras, Miika Aittala, Samuli Laine, Erik Härkönen, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Alias-free generative adversarial networks, 2021. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 106, + 287, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 106, + 287, + 160 + ], + "spans": [ + { + "bbox": [ + 48, + 106, + 287, + 160 + ], + "type": "text", + "content": "[31] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4401-4410, 2019. 1, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 161, + 287, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 161, + 287, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 161, + 287, + 205 + ], + "type": "text", + "content": "[32] Tero Karras, Samuli Laine, and Timo Aila. A Style-Based generator architecture for generative adversarial networks. IEEE transactions on pattern analysis and machine intelligence, 43(12):4217-4228, Dec. 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 205, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 205, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 205, + 287, + 239 + ], + "type": "text", + "content": "[33] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of StyleGAN. In Proc. CVPR, 2020. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "spans": [ + { + "bbox": [ + 48, + 239, + 287, + 282 + ], + "type": "text", + "content": "[34] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 4, 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 283, + 287, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 283, + 287, + 316 + ], + "spans": [ + { + "bbox": [ + 48, + 283, + 287, + 316 + ], + "type": "text", + "content": "[35] Thomas Leimkuhler and George Drettakis. Freestylegan: Free-view editable portrait rendering with the camera manifold. 40(6), 2021. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 316, + 287, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 316, + 287, + 360 + ], + "spans": [ + { + "bbox": [ + 48, + 316, + 287, + 360 + ], + "type": "text", + "content": "[36] Chieh Hubert Lin, Hsin-Ying Lee, Yen-Chi Cheng, Sergey Tulyakov, and Ming-Hsuan Yang. Infinitygan: Towards infinite-pixel image synthesis. In International Conference on Learning Representations (ICLR), 2022. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 361, + 287, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 361, + 287, + 415 + ], + "spans": [ + { + "bbox": [ + 48, + 361, + 287, + 415 + ], + "type": "text", + "content": "[37] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European conference on computer vision, pages 405-421. Springer, 2020. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 415, + 287, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 415, + 287, + 437 + ], + "spans": [ + { + "bbox": [ + 48, + 415, + 287, + 437 + ], + "type": "text", + "content": "[38] Sangwoo Mo, Minsu Cho, and Jinwoo Shin. Freeze the discriminator: a simple baseline for fine-tuning gans, 2020. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 437, + 287, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 437, + 287, + 480 + ], + "spans": [ + { + "bbox": [ + 48, + 437, + 287, + 480 + ], + "type": "text", + "content": "[39] Michael Niemeyer and Andreas Geiger. Campari: Camera-aware decomposed generative neural radiance fields. In 2021 International Conference on 3D Vision (3DV), pages 951-961. IEEE, 2021. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 482, + 287, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 482, + 287, + 535 + ], + "spans": [ + { + "bbox": [ + 48, + 482, + 287, + 535 + ], + "type": "text", + "content": "[40] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11453-11464, 2021. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 536, + 287, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 536, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 48, + 536, + 287, + 591 + ], + "type": "text", + "content": "[41] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. StyleSDF: High-Resolution 3D-Consistent Image and Geometry Generation. arXiv preprint arXiv:2112.11427, 2021. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 592, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 592, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 592, + 287, + 635 + ], + "type": "text", + "content": "[42] Xingang Pan, Bo Dai, Ziwei Liu, Chen Change Loy, and Ping Luo. Do 2d gans know 3d shape? unsupervised 3d shape reconstruction from 2d image gans. arXiv preprint arXiv:2011.00844, 2020. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 636, + 287, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 636, + 287, + 680 + ], + "spans": [ + { + "bbox": [ + 48, + 636, + 287, + 680 + ], + "type": "text", + "content": "[43] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Deformable neural radiance fields. arXiv preprint arXiv:2011.12948, 2020. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 681, + 287, + 713 + ], + "type": "text", + "content": "[44] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery, 2021. 2" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 106 + ], + "type": "text", + "content": "[45] Justin N. M. Pinkney and Doron Adler. Resolution dependent gan interpolation for controllable image synthesis between domains, 2020. 2, 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 107, + 545, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 172 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 172 + ], + "type": "text", + "content": "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. CoRR, abs/2103.00020, 2021. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 174, + 545, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 174, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 307, + 174, + 545, + 206 + ], + "type": "text", + "content": "[47] Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks, 2015. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 208, + 545, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 208, + 545, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 208, + 545, + 251 + ], + "type": "text", + "content": "[48] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: a stylegan encoder for image-to-image translation. arXiv preprint arXiv:2008.00951, 2020. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 253, + 545, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 253, + 545, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 253, + 545, + 285 + ], + "type": "text", + "content": "[49] Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. arXiv preprint arXiv:2106.05744, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 286, + 545, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 286, + 545, + 331 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 545, + 331 + ], + "type": "text", + "content": "[50] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 332, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 332, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 307, + 332, + 545, + 376 + ], + "type": "text", + "content": "[51] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. Interfacegan: Interpreting the disentangled face representation learned by gans. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020. 2, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "spans": [ + { + "bbox": [ + 307, + 377, + 545, + 421 + ], + "type": "text", + "content": "[52] Yichun Shi, Divyansh Aggarwal, and Anil K Jain. Lifting 2d stylegan for 3d-aware face generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6258-6266, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 422, + 545, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 466 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 466 + ], + "type": "text", + "content": "[53] Ivan Skorokhodov, Aliaksandr Siarohin, Yinghao Xu, Jian Ren, Hsin-Ying Lee, Peter Wonka, and Sergey Tulyakov. 3d generation on imagenet. In International Conference on Learning Representations (ICLR), 2023. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 468, + 545, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 468, + 545, + 511 + ], + "spans": [ + { + "bbox": [ + 307, + 468, + 545, + 511 + ], + "type": "text", + "content": "[54] Guoxian Song, Linjie Luo, Jing Liu, Wan-Chun Ma, Chunpong Lai, Chuanxia Zheng, and Tat-Jen Cham. Agilegan: Stylizing portraits by inversion-consistent transfer learning. ACM Trans. Graph., 40(4), jul 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 513, + 545, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 513, + 545, + 555 + ], + "spans": [ + { + "bbox": [ + 307, + 513, + 545, + 555 + ], + "type": "text", + "content": "[55] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis, 2022. 1, 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 557, + 545, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 557, + 545, + 623 + ], + "spans": [ + { + "bbox": [ + 307, + 557, + 545, + 623 + ], + "type": "text", + "content": "[56] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6142-6151, 2020. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 624, + 545, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 545, + 677 + ], + "type": "text", + "content": "[57] Ayush Tewari, Mohamed Elgharib, Mallikarjun BR, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zöllhofer, and Christian Theobalt. Pie: Portrait image embedding for semantic control. volume 39, December 2020. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "text", + "content": "[58] Omer Tov, Yuval Alaluf, Yotam Nitzan, Or Patashnik, and Daniel Cohen-Or. Designing an encoder for stylegan image manipulation. arXiv preprint arXiv:2102.02766, 2021. 2, 6" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "4561" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 634 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[59] Rotem Tzaban, Ron Mokady, Rinon Gal, Amit H. Bermano, and Daniel Cohen-Or. *Stitch it in time: Gan-based facial editing of real videos. CoRR*, abs/2201.08361, 2022. 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 107, + 287, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 107, + 287, + 150 + ], + "spans": [ + { + "bbox": [ + 49, + 107, + 287, + 150 + ], + "type": "text", + "content": "[60] Can Wang, Menglei Chai, Mingming He, Dongdong Chen, and Jing Liao. Cross-domain and disentangled face manipulation with 3d guidance. IEEE Transactions on Visualization and Computer Graphics, 2022. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 152, + 286, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 152, + 286, + 174 + ], + "spans": [ + { + "bbox": [ + 49, + 152, + 286, + 174 + ], + "type": "text", + "content": "[61] WarBean. tps-stn-pytorch. https://github.com/WarBean/tps_stn_pytorch.5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 175, + 287, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 175, + 287, + 207 + ], + "spans": [ + { + "bbox": [ + 49, + 175, + 287, + 207 + ], + "type": "text", + "content": "[62] Zongze Wu, Dani Lischinski, and Eli Shechtman. Stylespace analysis: Disentangled controls for stylegan image generation. arXiv preprint arXiv:2011.12799, 2020. 2, 4, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 209, + 287, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 209, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 49, + 209, + 287, + 274 + ], + "type": "text", + "content": "[63] Yinghao Xu, Menglei Chai, Zifan Shi, Sida Peng, Ivan Skorokhodov, Aliaksandr Siarohin, Ceyuan Yang, Yujun Shen, Hsin-Ying Lee, Bolei Zhou, et al. Discoscene: Spatially disentangled generative radiance fields for controllable 3d-aware scene synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, 2023. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 275, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 275, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 49, + 275, + 287, + 319 + ], + "type": "text", + "content": "[64] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. arXiv preprint arXiv:2112.10759, 2021. 1, 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 320, + 287, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 320, + 287, + 352 + ], + "spans": [ + { + "bbox": [ + 49, + 320, + 287, + 352 + ], + "type": "text", + "content": "[65] Shuai Yang, Liming Jiang, Ziwei Liu, and Chen Change Loy. Pastiche master: Exemplar-based high-resolution portrait style transfer. In CVPR, 2022. 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 354, + 287, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 354, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 49, + 354, + 287, + 408 + ], + "type": "text", + "content": "[66] Zipeng Ye, Mengfei Xia, Yanan Sun, Ran Yi, Minjing Yu, Juyong Zhang, Yu-Kun Lai, and Yong-Jin Liu. 3d-CariGAN: An end-to-end solution to 3d caricature generation from normal face photos. IEEE Transactions on Visualization and Computer Graphics, pages 1-1, 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 410, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 410, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 49, + 410, + 287, + 453 + ], + "type": "text", + "content": "[67] Fisher Yu, Yinda Zhang, Shuran Song, Ari Seff, and Jianxiong Xiao. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365, 2015. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 455, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 455, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 49, + 455, + 287, + 487 + ], + "type": "text", + "content": "[68] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492, 2020. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 489, + 287, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 489, + 287, + 531 + ], + "spans": [ + { + "bbox": [ + 49, + 489, + 287, + 531 + ], + "type": "text", + "content": "[69] Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. Indomain gan inversion for real image editing. In European Conference on Computer Vision, pages 592-608. Springer, 2020. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 533, + 287, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 533, + 287, + 577 + ], + "spans": [ + { + "bbox": [ + 49, + 533, + 287, + 577 + ], + "type": "text", + "content": "[70] Peihao Zhu, Rameen Abdal, John Femiani, and Peter Wonka. Mind the gap: Domain gap control for single shot domain adaptation for generative adversarial networks. In International Conference on Learning Representations, 2022. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 578, + 287, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 578, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 49, + 578, + 287, + 610 + ], + "type": "text", + "content": "[71] Peihao Zhu, Rameen Abdal, Yipeng Qin, John Femiani, and Peter Wonka. Improved stylegan embedding: Where are the good latents?, 2020. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 613, + 285, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 613, + 285, + 634 + ], + "spans": [ + { + "bbox": [ + 49, + 613, + 285, + 634 + ], + "type": "text", + "content": "[72] zllrunning. face-parsing.pytorch. https://github.com/zllrunning/face-parsing.PyTorch.2,3" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "4562" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/59904744-5656-40cd-af70-98473e4f87a7_content_list.json b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/59904744-5656-40cd-af70-98473e4f87a7_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1f13501609386ded7c8793bdb03973d34fc97709 --- /dev/null +++ b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/59904744-5656-40cd-af70-98473e4f87a7_content_list.json @@ -0,0 +1,1685 @@ +[ + { + "type": "text", + "text": "3Mformer: Multi-order Multi-mode Transformer for Skeletal Action Recognition", + "text_level": 1, + "bbox": [ + 230, + 130, + 743, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lei Wang†,§ Piotr Koniusz*,§,†", + "bbox": [ + 344, + 202, + 622, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "†Australian National University, §Data61♥CSIRO", + "bbox": [ + 289, + 220, + 679, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "\\$firstname_lastname@data61.csiro.au", + "bbox": [ + 334, + 238, + 630, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 291, + 313, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Many skeletal action recognition models use GCNs to represent the human body by 3D body joints connected body parts. GCNs aggregate one- or few-hop graph neighbourhoods, and ignore the dependency between not linked body joints. We propose to form hypergraph to model hyperedges between graph nodes (e.g., third- and fourth-order hyper-edges capture three and four nodes) which help capture higher-order motion patterns of groups of body joints. We split action sequences into temporal blocks, Higher-order Transformer (HoT) produces embeddings of each temporal block based on (i) the body joints, (ii) pairwise links of body joints and (iii) higher-order hyper-edges of skeleton body joints. We combine such HoT embeddings of hyper-edges of orders 1,..., $r$ by a novel Multi-order Multi-mode Transformer (3Mformer) with two modules whose order can be exchanged to achieve coupled-mode attention on coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs. The first module, called Multi-order Pooling (MP), additionally learns weighted aggregation along the hyper-edge mode, whereas the second module, Temporal block Pooling (TP), aggregates along the temporal block' mode. Our end-to-end trainable network yields state-of-the-art results compared to GCN-, transformer- and hypergraph-based counterparts.", + "bbox": [ + 75, + 323, + 472, + 703 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 731, + 209, + 748 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Action Recognition has applications in video surveillance, human-computer interaction, sports analysis, and virtual reality [24, 25, 40, 52-59]. Different from video-based methods which mainly focus on modeling the spatiotemporal representations from RGB frames and/or optical flow [25, 52-55, 58], skeleton sequences, representing a spatio-temporal evolution of 3D body joints, have been", + "bbox": [ + 75, + 757, + 468, + 864 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "proven robust against sensor noises and effective in action recognition while being computationally and storage efficient [24, 40, 52, 53, 56, 57, 59]. The skeleton data is usually obtained by either localization of 2D/3D coordinates of human body joints with the depth sensors or pose estimation algorithms applied to videos [2]. Skeleton sequences enjoy (i) simple structural connectivity of skeletal graph and (ii) temporal continuity of 3D body joints evolving in time. While temporal evolution of each body joint is highly informative, embeddings of separate body joints are insensitive to relations between body parts. Moreover, while the links between adjacent 3D body joints (following the structural connectivity) are very informative as they model relations, these links represent highly correlated nodes in the sense of their temporal evolution. Thus, modeling larger groups of 3D body joints as hyper-edges can capture more complex spatio-temporal motion dynamics.", + "bbox": [ + 500, + 292, + 893, + 549 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The existing graph-based models mainly differ by how they handle temporal information. Graph Neural Network (GNN) may encode spatial neighborhood of the node followed by aggregation by LSTM [46, 65]. Alternatively, Graph Convolutional Network (GCN) may perform spatio-temporal convolution in the neighborhood of each node [64]. Spatial GCNs perform convolution within one or two hop distance of each node, e.g., spatio-temporal GCN model called ST-GCN [64] models spatio-temporal vicinity of each 3D body joint. As ST-GCN applies convolution along structural connections (links between body joints), structurally distant joints, which may cover key patterns of actions, are largely ignored. ST-GCN captures ever larger neighborhoods as layers are added but suffers from oversmoothing that can be mitigated by linear GCNs [76-78].", + "bbox": [ + 496, + 551, + 893, + 777 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human actions are associated with interaction groups of skeletal joints, e.g., wrist alone, head-wrist, head-wrist-ankles, etc. The impact of these groups of joints on each action differs, and the degree of influence of each joint should be learned. Accordingly, designing a better model for skeleton data is vital given the topology of skeleton graph is suboptimal. While GCN can be applied to a fully-connected graph (i.e., 3D body joints as densely connected", + "bbox": [ + 496, + 779, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author.", + "bbox": [ + 94, + 875, + 222, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "For brevity, we write $\\tau$ temporal blocks per sequence but $\\tau$ varies.", + "bbox": [ + 94, + 887, + 447, + 900 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "5620", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "graph nodes), Higher-order Transformer (HoT) [21] has been proven more efficient.", + "bbox": [ + 75, + 90, + 468, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Thus, we propose to use hypergraphs with hyper-edges of order 1 to $r$ to effectively represent skeleton data for action recognition. Compared to GCNs, our encoder contains an MLP followed by three HoT branches that encode first, second- and higher-order hyper-edges, i.e., set of body joints, edges between pairs of nodes, hyper-edges between triplets of nodes, etc. Each branch has its own learnable parameters, and processes temporal blocks2 one-by-one.", + "bbox": [ + 75, + 121, + 467, + 241 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We notice that (i) the number of hyper-edges of $J$ joints grows rapidly with order $r$ , i.e., $\\binom{J}{i}$ for $i = 1, \\dots, r$ , embeddings of the highest order dominate lower orders in terms of volume if such embeddings are merely concatenated, and (ii) long-range temporal dependencies of feature maps are insufficiently explored, as sequences are split into $\\tau$ temporal blocks for computational tractability.", + "bbox": [ + 75, + 242, + 467, + 348 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Merely concatenating outputs of HoT branches of orders 1 to $r$ , and across $\\tau$ blocks, is sub-optimal. Thus, our Multi-order Multi-mode Transformer (3Mformer) with two modules whose order can be exchanged, realizes a variation of coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs. As HoT operates block-by-block, 'channel-temporal block' tokens and weighted hyper-edge aggregation in Multi-order Pooling (MP) help combine information flow block-wise. Various coupled-mode tokens help improve results further due to different focus of each attention mechanism. As the block-temporal mode needs to be aggregated (number of blocks varies across sequences), Temporal block Pooling (TP) can use rank pooling [13], second-order [14, 26, 33, 41, 60, 68, 80] or higher-order pooling [8, 24, 25, 69, 70].", + "bbox": [ + 75, + 349, + 467, + 590 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our main contributions are listed as follows:", + "bbox": [ + 96, + 599, + 470, + 612 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "i. We model the skeleton data as hypergraph of orders 1 to $r$ (set, graph and/or hypergraph), where human body joints serve as nodes. Higher-order Transformer embeddings of such formed hyper-edges represent various groups of 3D body joints and capture various higher-order dynamics important for action recognition.", + "ii. As HoT embeddings represent individual hyper-edge order and block, we introduce a novel Multi-order Multi-mode Transformer (3Mformer) with two modules, Multi-order Pooling and Temporal block Pooling. Their goal is to form coupled-mode tokens such as 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only', and perform weighted hyper-edge aggregation and temporal block aggregation." + ], + "bbox": [ + 83, + 627, + 467, + 864 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our 3Mformer outperforms other GCN- and hypergraph-based models on NTU-60, NTU-120, Kinetics-Skeleton and Northwestern-UCLA by a large margin.", + "bbox": [ + 498, + 90, + 890, + 136 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 152, + 640, + 167 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Below we describe popular action recognition models for skeletal data.", + "bbox": [ + 498, + 178, + 890, + 205 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Graph-based models. Popular GCN-based models include the Attention enhanced Graph Convolutional LSTM network (AGC-LSTM) [46], the Actional-Structural GCN (AS-GCN) [30], Dynamic Directed GCN (DDGCN) [27], Decoupling GCN with DropGraph module [5], ShiftGCN [6], Semantics-Guided Neural Networks (SGN) [67], AdaSGN [45], Context Aware GCN (CA-GCN) [71], Channel-wise Topology Refinement Graph Convolution Network (CTR-GCN) [4] and a family of Efficient GCN (EfficientGCN-Bx) [47]. Although GCN-based models enjoy good performance, they have shortcomings, e.g., convolution and/or pooling are applied over one- or few-hop neighborhoods, e.g., ST-GCN [64], according to the human skeleton graph (body joints linked up according to connectivity of human body parts). Thus, indirect links between various 3D body joints such as hands and legs are ignored. In contrast, our model is not restricted by the structure of typical human body skeletal graph. Instead, 3D body joints are nodes which form hyper-edges of orders 1 to $r$ .", + "bbox": [ + 496, + 212, + 890, + 500 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Hypergraph-based models. Pioneering work on capturing groups of nodes across time uses tensors [24] to represent the 3D human body joints to exploit the kinematic relations among the adjacent and non-adjacent joints. Representing the human body as a hypergraph is adopted in [35] via a semi-dynamic hypergraph neural network that captures richer information than GCN. A hypergraph GNN [15] captures both spatio-temporal information and higher-order dependencies for skeleton-based action recognition. Our work is somewhat closely related to these works, but we jointly use hypergraphs of order 1 to $r$ to obtain rich hyper-edge embeddings based on Higher-order Transformers.", + "bbox": [ + 496, + 503, + 890, + 684 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Transformer-based models. Action recognition with transformers includes self-supervised video transformer [42] that matches the features from different views (a popular strategy in self-supervised GCNs [74, 75]), the end-to-end trainable Video-Audio-Text-Transformer (VATT) [1] for learning multi-model representations from unlabeled raw video, audio and text through the multimodal contrastive losses, and the Temporal Transformer Network with Self-supervision (TTSN) [72]. Motion-Transformer [7] captures the temporal dependencies via a self-supervised pre-training on human actions, Masked Feature Prediction (MaskedFeat) [61] pre-trained on unlabeled videos with MViT-L learns abundant visual representations, and video-masked autoencoder (VideoMAE) [48] with vanilla ViT", + "bbox": [ + 496, + 688, + 890, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "2Each temporal block enjoys a locally factored out (removed) temporal mode, which makes each block representation compact.", + "bbox": [ + 75, + 875, + 467, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "5621", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e54dd32c2d7e0e6322522f3bca6e512a8da722d88d64a7868bafd07b87e447c2.jpg", + "image_caption": [ + "Figure 1. Pipeline overview. Each sequence is split into $\\tau$ temporal blocks $\\mathbf{B}_1, \\dots, \\mathbf{B}_{\\tau}$ . Subsequently, each block is embedded by a simple MLP into $\\mathbf{X}_1, \\dots, \\mathbf{X}_{\\tau}$ , which are passed to Higher-order Transformers (HoT ( $n = 1, \\dots, r$ )) in order to obtain feature tensors $\\Phi_1, \\dots, \\Phi_{\\tau}$ . These tensors are subsequently concatenated by $\\odot$ along the hyper-edge mode into a multi-order feature tensor $\\mathcal{M}$ . The final step is a Multi-order Multi-mode Transformer (3Mformer from Section 4), which contains two complementary branches, MP $\\rightarrow$ TP and TP $\\rightarrow$ MP, whose outputs are concatenated by $\\odot$ and passed to the classifier. MP and TP perform the Coupled-mode Self-Attention (CmSA) with the so-called coupled-mode tokens, based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge' and 'channel-only' pairs. To this end, MP contains also weighted pooling along hyper-edge mode by learnable matrix $\\mathbf{H}$ (and $\\mathbf{H}'$ in another branch). TP contains also block-temporal pooling denoted by $g(\\cdot)$ whose role is to capture block-temporal order with average, maximum, rank pooling, etc. In our experiments we show that such designed MP and TP are able to efficiently process hyper-edge feature representations from HoT branches. Appendix A shows full visualization of our 3Mformer." + ], + "image_footnote": [], + "bbox": [ + 81, + 89, + 890, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "uses the masking strategy. In contrast to these works, we use three HoT branches of model [21], and we model hyperedges of orders 1 to $r$ by forming several multi-mode token variations in 3Mformer.", + "bbox": [ + 75, + 386, + 468, + 446 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Attention. In order to improve feature representations, attention captures relationship between tokens. Natural language processing and computer vision have driven recent developments in attention mechanisms based on transformers [11, 49]. Examples include the hierarchical Cross Attention Transformer (CAT) [32], Cross-attention by Temporal Shift with CNNs [16], Cross-Attention Multi-Scale Vision Transformer (CrossViT) for image classification [3] and Multi-Modality Cross Attention (MMCA) Network for image and sentence matching [63]. In GNNs, attention can be defined over edges [50, 66] or over nodes [29]. In this work, we use the attention with hyper-edges of several orders from HoT branches serving as tokens, and coupled-mode attention with coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs formed in 3Mformer.", + "bbox": [ + 75, + 452, + 472, + 708 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Background", + "text_level": 1, + "bbox": [ + 76, + 727, + 205, + 744 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Below we describe foundations necessary for our work.", + "bbox": [ + 94, + 753, + 464, + 770 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Notations. $\\mathcal{I}_K$ stands for the index set $\\{1,2,\\dots,K\\}$ . Regular fonts are scalars; vectors are denoted by lowercase boldface letters, e.g., $\\mathbf{x}$ ; matrices by the uppercase boldface, e.g., $\\mathbf{M}$ ; and tensors by calligraphic letters, e.g., $\\mathbf{M}$ . An $r$ th-order tensor is denoted as $\\mathbf{M} \\in \\mathbb{R}^{I_1 \\times I_2 \\times \\ldots \\times I_r}$ , and the mode- $m$ matricization of $\\mathbf{M}$ is denoted as $\\mathbf{M}_{(m)} \\in \\mathbb{R}^{I_m \\times (I_1 \\ldots I_{m-1} I_{m+1} \\ldots I_r)}$ .", + "bbox": [ + 75, + 773, + 468, + 866 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Transformer layers [11, 49]. A transformer encoder layer $f: \\mathbb{R}^{J \\times d} \\to \\mathbb{R}^{J \\times d}$ consists of two sub-layers: (i) a self", + "bbox": [ + 76, + 869, + 470, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "attention $a: \\mathbb{R}^{J \\times d} \\to \\mathbb{R}^{J \\times d}$ and (ii) an element-wise feedforward MLP: $\\mathbb{R}^{J \\times d} \\to \\mathbb{R}^{J \\times d}$ . For a set of $J$ nodes with $\\mathbf{X} \\in \\mathbb{R}^{J \\times d}$ , where $\\mathbf{x}_i$ is a feature vector of node $i$ , a transformer layer3 computes:", + "bbox": [ + 496, + 385, + 892, + 448 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\na \\left(\\mathbf {x} _ {i}\\right) = \\mathbf {x} _ {i} + \\sum_ {h = 1} ^ {H} \\sum_ {j = 1} ^ {J} \\alpha_ {i j} ^ {h} \\mathbf {x} _ {j} \\mathbf {W} _ {h} ^ {V} \\mathbf {W} _ {h} ^ {O}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 458, + 890, + 501 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nf (\\mathbf {x} _ {i}) = a (\\mathbf {x} _ {i}) + \\operatorname {M L P} (a (\\mathbf {X})) _ {i}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 503, + 890, + 522 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $H$ and $d_H$ denote respectively the number of heads and the head size, $\\alpha^h = \\sigma (\\mathbf{X}\\mathbf{W}_h^Q (\\mathbf{X}\\mathbf{W}_h^K)^\\top)$ is the attention coefficient, $\\mathbf{W}_h^O\\in \\mathbb{R}^{d_H\\times d}$ , and $\\mathbf{W}_h^V,\\mathbf{W}_h^K,\\mathbf{W}_h^Q\\in \\mathbb{R}^{d\\times d_H}$ .", + "bbox": [ + 496, + 532, + 890, + 583 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Higher-order transformer layers [21]. Let the HoT layer be $f_{m\\rightarrow n}:\\mathbb{R}^{J^m\\times d}\\to \\mathbb{R}^{J^n\\times d}$ with two sub-layers: (i) a higher-order self-attention $a_{m\\rightarrow n}:\\mathbb{R}^{J^m\\times d}\\to \\mathbb{R}^{J^n\\times d}$ and (ii) a feedforward $\\mathrm{MLP}_{n\\rightarrow n}:\\mathbb{R}^{J^n\\times d}\\to \\mathbb{R}^{J^n\\times d}$ . Moreover, let indexing vectors $\\mathbf{i}\\in \\mathcal{I}_J^m\\equiv \\mathcal{I}_J\\times \\mathcal{I}_J\\times \\ldots \\times \\mathcal{I}_J$ ( $m$ modes) and $\\mathbf{j}\\in \\mathcal{I}_J^n\\equiv \\mathcal{I}_J\\times \\mathcal{I}_J\\times \\ldots \\times \\mathcal{I}_J$ ( $n$ modes). For the input tensor $\\mathbf{X}\\in \\mathbb{R}^{J^m\\times d}$ with hyper-edges of order $m$ , a HoT layer evaluates:", + "bbox": [ + 496, + 584, + 892, + 691 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\na _ {m \\rightarrow n} (\\mathbf {X}) _ {j} = \\sum_ {h = 1} ^ {H} \\sum_ {\\mu} \\sum_ {i} \\boldsymbol {\\alpha} _ {i, j} ^ {h, \\mu} \\mathbf {X} _ {i} \\mathbf {W} _ {h, \\mu} ^ {V} \\mathbf {W} _ {h, \\mu} ^ {O} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 513, + 702, + 890, + 743 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {M L P} _ {n \\rightarrow n} \\left(a _ {m \\rightarrow n} (\\mathbf {X})\\right) = \\mathrm {L} _ {n \\rightarrow n} ^ {2} \\left(\\operatorname {R e L U} \\left(\\mathrm {L} _ {n \\rightarrow n} ^ {1} \\left(a _ {m \\rightarrow n} (\\mathbf {X})\\right)\\right)\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 746, + 890, + 763 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nf _ {m \\rightarrow n} (\\mathbf {X}) = a _ {m \\rightarrow n} (\\mathbf {X}) + \\operatorname {M L P} _ {n \\rightarrow n} (a _ {m \\rightarrow n} (\\mathbf {X})), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 767, + 890, + 784 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\pmb{\\alpha}^{h,\\mu}\\in \\mathbb{R}^{J^{m + n}}$ is the so-called attention coefficient tensor with multiple heads, and $\\pmb{\\alpha}_{\\mathbf{i},\\mathbf{j}}^{h,\\mu}\\in \\mathbb{R}^{J}$ is a vector, $\\mathbf{W}_{h,\\mu}^V\\in \\mathbb{R}^{d\\times d_H}$ and $\\mathbf{W}_{h,\\mu}^{O}\\in \\mathbb{R}^{d_{H}\\times d}$ are learnable parameters. Moreover, $\\mu$ indexes over the so-called equivalence classes of order $(m + n)$ in the same partition of nodes, $\\mathrm{L}_{n\\to n}^{1}\\colon \\mathbb{R}^{J^n\\times d}\\to \\mathbb{R}^{J^n\\times d_F}$", + "bbox": [ + 496, + 796, + 890, + 878 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "${}^{3}$ Normalizations after $a\\left( \\cdot \\right)$ & $\\operatorname{MLP}\\left( \\cdot \\right)$ are omitted for simplicity.", + "bbox": [ + 514, + 886, + 849, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "5622", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and $\\mathrm{L}_{n\\to n}^2\\colon \\mathbb{R}^{J^n\\times d_F}\\to \\mathbb{R}^{J^n\\times d}$ are equivariant linear layers and $d_{F}$ is the hidden dimension.", + "bbox": [ + 75, + 89, + 468, + 119 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To compute each attention tensor $\\alpha^{h,\\mu} \\in \\mathbb{R}^{J^{m + n}}$ from the input tensor $\\mathbf{X} \\in \\mathbb{R}^{J^m \\times d}$ of hyper-edges of order $m$ , from the higher-order query and key, we obtain:", + "bbox": [ + 75, + 119, + 468, + 167 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\alpha} _ {i, j} ^ {h, \\mu} = \\left\\{ \\begin{array}{c c} \\frac {\\sigma \\left(\\mathbf {Q} _ {j} ^ {h , \\mu} , \\mathbf {K} _ {i} ^ {h , \\mu}\\right)}{Z _ {j}} & (i, j) \\in \\mu \\\\ 0 & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 176, + 468, + 223 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{Q}^{\\mu} = \\mathrm{L}_{m\\to n}^{\\mu}(\\mathbf{X})$ $\\mathbf{K}^{\\mu} = \\mathrm{L}_{m\\to m}^{\\mu}(\\mathbf{X})$ , and normalization constant $Z_{j} = \\sum_{i:(i,j)\\in \\mu}\\sigma (\\mathbf{Q}_{j}^{\\mu},\\mathbf{K}_{i}^{\\mu})$ . Finally, kernel attention in Eq. (6) can be approximated with RKHS feature maps $\\psi \\in \\mathbb{R}_+^{d_K}$ for efficacy as $d_{K}\\ll d_{H}$ . Specifically, we have $\\sigma (\\mathbf{Q}_j^{h,\\mu},\\mathbf{K}_i^{h,\\mu})\\approx \\psi (\\mathbf{Q}_j^{h,\\mu})^\\top \\psi (\\mathbf{K}_i^{h,\\mu})$ as in [10, 19]. We choose the performer kernel [10] due to its good performance.", + "bbox": [ + 75, + 232, + 468, + 324 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As query and key tensors are computed from the input tensor $\\mathbf{X}$ using the equivariant linear layers, the transformer encoder layer $f_{m\\to n}$ satisfies the permutation equivariance.", + "bbox": [ + 75, + 324, + 468, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Approach", + "text_level": 1, + "bbox": [ + 76, + 382, + 184, + 400 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Skeletal Graph [64] and Skeletal Hypergraph [15,35] are popular for modeling edges and hyper-edges. In this work, we use the Higher-order Transformer (HoT) [21] as a backbone encoder.", + "bbox": [ + 75, + 407, + 468, + 467 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Model Overview", + "text_level": 1, + "bbox": [ + 76, + 477, + 238, + 491 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fig. 1 shows that our framework contains a simple 3-layer MLP unit (FC, ReLU, FC, ReLU, Dropout, FC), three HoT blocks with each HoT for each type of input (i.e., body joint feature set, graph and hypergraph of body joints), followed by Multi-order Multi-mode Transformer (3Mformer) with two modules (i) Multi-order Pooling (MP) and (ii) Temporal block Pooling (TP). The goal of 3Mformer is to form coupled-mode tokens (explained later) such as 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only', and perform weighted hyper-edge aggregation and temporal block aggregation. Their outputs are further concatenated and passed to an FC layer for classification.", + "bbox": [ + 75, + 500, + 468, + 696 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MLP unit. The MLP unit takes $T$ neighboring frames, each with $J$ 2D/3D skeleton body joints, forming one temporal block. In total, depending on stride $S$ , we obtain some $\\tau$ temporal blocks (a block captures the short-term temporal evolution). In contrast, the long-term temporal evolution is modeled with HoT and 3Mformer. Each temporal block is encoded by the MLP into a $d \\times J$ dimensional feature map.", + "bbox": [ + 75, + 700, + 468, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "HoT branches. We stack $r$ branches of HoT, each taking embeddings $\\mathbf{X}_t\\in \\mathbb{R}^{d\\times J}$ where $t\\in I_{\\tau}$ denotes a temporal block. HoT branches output hyper-edge feature representations of size $m\\in I_r$ as $\\Phi_m^{\\prime}\\in \\mathbb{R}^{J^m\\times d'}$ for order $m\\in I_r$ .", + "bbox": [ + 75, + 809, + 468, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For the first-, second- and higher-order stream outputs $\\Phi_1',\\dots,\\Phi_r'$ , we (i) swap feature channel and hyper-edge", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "modes, (ii) extract the upper triangular of tensors, and we concatenate along the block-temporal mode, so we have $\\Phi_{m}\\in \\mathbb{R}^{d^{\\prime}\\times N_{E_{m}}\\times \\tau}$ , where $N_{E_m} = \\binom{J}{m}$ . Subsequently, we concatenate $\\Phi_1,\\ldots ,\\Phi_r$ along the hyper-edge mode and obtain a multi-order feature tensor $\\pmb {M}\\in \\mathbb{R}^{d^{\\prime}\\times N\\times r}$ where the total number of hyper-edges across all orders is $N = \\sum_{m = 1}^{r}\\binom{J}{m}$ .", + "bbox": [ + 496, + 90, + 890, + 185 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3Mformer. Our Multi-order Multi-mode Transformer (3Mformer) with Coupled-mode Self-Attention (CmSA) is used for the fusion of information flow inside the multi-order feature tensor $\\mathcal{M}$ , and finally, the output from 3Mformer is passed to a classifier for classification.", + "bbox": [ + 496, + 186, + 890, + 261 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Coupled-mode Self-Attention", + "text_level": 1, + "bbox": [ + 498, + 271, + 761, + 286 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Coupled-mode tokens. We are inspired by the attentive regions of the one-class token in the standard Vision Transformer (ViT) [49] that can be leveraged to form a class-agnostic localization map. We investigate if the transformer model can also effectively capture the coupled-mode attention for more discriminative classification tasks, e.g., tensorial skeleton-based action recognition by learning the coupled-mode tokens within the transformer. To this end, we propose a Multi-order Multi-mode Transformer (3Mformer), which uses coupled-mode tokens to jointly learn various higher-order motion dynamics among channel-, block-temporal-, body joint- and order-mode. Our 3Mformer can successfully produce coupled-mode relationships from CmSA mechanism corresponding to different tokens. Below we introduce our CmSA.", + "bbox": [ + 496, + 297, + 890, + 523 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given the order- $r$ tensor $\\mathcal{M} \\in \\mathbb{R}^{I_1 \\times I_2 \\times \\ldots \\times I_r}$ , to form the joint mode token, we perform the mode- $m$ matricization of $\\mathcal{M}$ to obtain $\\mathbf{M} \\equiv \\mathcal{M}_{(m)}^{\\top} \\in \\mathbb{R}^{(I_1 \\ldots I_{m-1} I_{m+1} \\ldots I_r) \\times I_m}$ , and the coupled-token for $\\mathbf{M}$ is formed. For example, for a given 3rd-order tensor that has feature channel-, hyper-edge- and temporal block-mode, we can form 'channel-temporal block', 'channel-hyper-edge (any order)' and 'channel-only' pairs; and if the given tensor is used as input and outputs a new tensor which produces new mode, e.g., body joint-mode, we can form the 'order-channel-body joint' token. In the following sections, for simplicity, we use reshape for the matricization of tensor to form different types of coupled-mode tokens. Our CmSA is given as:", + "bbox": [ + 496, + 523, + 890, + 720 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\na (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) = \\operatorname {S o f t M a x} \\left(\\frac {\\mathbf {Q} \\mathbf {K} ^ {\\top}}{\\sqrt {d _ {K}}}\\right) \\mathbf {V}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 731, + 890, + 766 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\sqrt{d_K}$ is the scaling factor, $\\mathbf{Q} = \\mathbf{W}^q\\mathbf{M}$ , $\\mathbf{K} = \\mathbf{W}^k\\mathbf{M}$ and $\\mathbf{V} = \\mathbf{W}^{\\nu}\\mathbf{M}$ are the query, key and value, respectively, and $\\mathbf{M} \\equiv \\mathcal{M}_{(m)}^{\\top}$ . Moreover, $\\mathbf{Q}, \\mathbf{K}, \\mathbf{V} \\in \\mathbb{R}^{(I_1\\dots I_{m-1}I_{m+1}\\dots I_r)\\times I_m}$ and $\\mathbf{W}^q, \\mathbf{W}^k, \\mathbf{W}^\\nu \\in \\mathbb{R}^{(I_1\\dots I_{m-1}I_{m+1}\\dots I_r)\\times (I_1\\dots I_{m-1}I_{m+1}\\dots I_r)}$ are learnable weights. We notice that various coupled-mode tokens have different 'focus' of attention mechanisms, and we apply them in our 3Mformer for the fusion of multi-order feature representations.", + "bbox": [ + 496, + 777, + 890, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "5623", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3. Multi-order Multi-mode Transformer", + "text_level": 1, + "bbox": [ + 76, + 90, + 403, + 104 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Below we introduce Multi-order Multi-mode Transformer (3Mformer) with Multi-order Pooling (MP) block and Temporal block Pooling (TP) block, which are cascaded into two branches (i) $\\mathrm{MP} \\rightarrow \\mathrm{TP}$ and (ii) $\\mathrm{TP} \\rightarrow \\mathrm{MP}$ , to achieve different types of coupled-mode tokens.", + "bbox": [ + 75, + 114, + 468, + 191 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3.1 Multi-order Pooling (MP) Module", + "text_level": 1, + "bbox": [ + 76, + 214, + 369, + 229 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\mathbf{CmSA}$ in MP. We reshape the multi-order feature representation $\\mathcal{M} \\in \\mathbb{R}^{d' \\times N \\times \\tau}$ into $\\mathbf{M} \\in \\mathbb{R}^{d' \\times N}$ (or reshape the output from TP explained later into $\\mathbf{M}' \\in \\mathbb{R}^{d' \\times N}$ ) to let the model attend to different types of feature representations. Let us simply denote $d'' = d'\\tau$ (or $d'' = d'$ ) depending on the source of input. We form a coupled-mode self-attention (if $d'' = d'\\tau$ , we have, i.e., 'channel-temporal block' token; if $d'' = d'$ , we have 'channel-only' token):", + "bbox": [ + 75, + 243, + 470, + 364 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\na _ {\\mathrm {M P}} \\left(\\mathbf {Q} _ {\\mathrm {M P}}, \\mathbf {K} _ {\\mathrm {M P}}, \\mathbf {V} _ {\\mathrm {M P}}\\right) = \\operatorname {S o f t M a x} \\left(\\frac {\\mathbf {Q} _ {\\mathrm {M P}} \\mathbf {K} _ {\\mathrm {M P}} ^ {\\top}}{\\sqrt {d _ {K _ {\\mathrm {M P}}}}}\\right) \\mathbf {V} _ {\\mathrm {M P}}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 377, + 468, + 416 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\sqrt{d_{K_{\\mathrm{MP}}}}$ is the scaling factor, $\\mathbf{Q}_{\\mathrm{MP}} = \\mathbf{W}_{\\mathrm{MP}}^{q}\\mathbf{M}$ , $\\mathbf{K}_{\\mathrm{MP}} = \\mathbf{W}_{\\mathrm{MP}}^{k}\\mathbf{M}$ and $\\mathbf{V}_{\\mathrm{MP}} = \\mathbf{W}_{\\mathrm{MP}}^{\\nu}\\mathbf{M}$ (we can use here $\\mathbf{M}$ or $\\mathbf{M}'$ ) are the query, key and value. Moreover, $\\mathbf{Q}_{\\mathrm{MP}}, \\mathbf{K}_{\\mathrm{MP}}, \\mathbf{V}_{\\mathrm{MP}} \\in \\mathbb{R}^{d'' \\times N}$ and $\\mathbf{W}_{\\mathrm{MP}}^{q}, \\mathbf{W}_{\\mathrm{MP}}^{k}, \\mathbf{W}_{\\mathrm{MP}}^{\\nu} \\in \\mathbb{R}^{d'' \\times d''}$ are learnable weights. Eq. (8) is a self-attention layer which reweighs $\\mathbf{V}_{\\mathrm{MP}}$ based on the correlation between $\\mathbf{Q}_{\\mathrm{MP}}$ and $\\mathbf{K}_{\\mathrm{MP}}$ token embeddings of so-called coupled-mode tokens.", + "bbox": [ + 75, + 429, + 468, + 536 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Weighted pooling. Attention layer in Eq. (8) produces feature representation $\\mathbf{O}_{\\mathrm{MP}} \\in \\mathbb{R}^{d'' \\times N}$ to enhance the relationship between example feature channels and body joints. Subsequently, we handle the impact of hyper-edges of multiple orders by weighted pooling along hyper-edges of order $m \\in I_r$ :", + "bbox": [ + 75, + 541, + 468, + 632 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {O} _ {\\mathrm {M P}} ^ {* (m)} = \\mathbf {O} _ {\\mathrm {M P}} ^ {(m)} \\mathbf {H} ^ {(m)} \\in \\mathbb {R} ^ {d ^ {\\prime \\prime} \\times J}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 643, + 468, + 664 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{O}_{\\mathrm{MP}}^{(m)} \\in \\mathbb{R}^{d'' \\times N_{E_m}}$ is simply extracted from $\\mathbf{O}_{\\mathrm{MP}}$ for hyper-edges of order $m$ , and matrices $\\mathbf{H}^{(m)} \\in \\mathbb{R}^{N_{E_m} \\times J}$ are learnable weights to perform weighted pooling along hyperedges of order $m$ . Finally, we obtain $\\mathbf{O}_{\\mathrm{MP}}^* \\in \\mathbb{R}^{rd'' \\times J}$ by simply concatenating $\\mathbf{O}_{\\mathrm{MP}}^{*(1)}, \\ldots, \\mathbf{O}_{\\mathrm{MP}}^{*(r)}$ . If we used the input to MP from TP, then we denote the output of MP as $\\mathbf{O}_{\\mathrm{MP}}^*$ .", + "bbox": [ + 75, + 676, + 468, + 773 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3.2 Temporal block Pooling (TP) Module", + "text_level": 1, + "bbox": [ + 76, + 795, + 390, + 811 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\mathbf{CmSA}$ in TP. Firstly, we reshape the multi-order feature representation $\\mathcal{M} \\in \\mathbb{R}^{d' \\times N \\times \\tau}$ into $\\mathbf{M} \\in \\mathbb{R}^{d' N \\times \\tau}$ (or reshape the output from MP into $\\mathbf{M}'' \\in \\mathbb{R}^{rd' J \\times \\tau}$ ). For simplicity, we denote $d''' = d'N$ in the first case and $d''' = rd'J$ in the second case. As the first mode of reshaped input serves to form", + "bbox": [ + 75, + 824, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "tokens, they are again coupled-mode tokens, e.g., 'channel-hyper-edge' and 'order-channel-body joint' tokens, respectively. Moreover, TP also performs pooling along block-temporal mode (along $\\tau$ ). We form an coupled-mode self-attention:", + "bbox": [ + 496, + 90, + 890, + 165 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\na _ {\\mathrm {T P}} \\left(\\mathbf {Q} _ {\\mathrm {T P}}, \\mathbf {K} _ {\\mathrm {T P}}, \\mathbf {V} _ {\\mathrm {T P}}\\right) = \\operatorname {S o f t M a x} \\left(\\frac {\\mathbf {Q} _ {\\mathrm {T P}} \\mathbf {K} _ {\\mathrm {T P}} ^ {\\top}}{\\sqrt {d _ {K _ {\\mathrm {T P}}}}}\\right) \\mathbf {V} _ {\\mathrm {T P}}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 527, + 176, + 890, + 215 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\sqrt{d_{K_{\\mathrm{TP}}}}$ is the scaling factor, $\\mathbf{Q}_{\\mathrm{TP}} = \\mathbf{W}_{\\mathrm{TP}}^{q}\\mathbf{M}$ , $\\mathbf{K}_{\\mathrm{TP}} = \\mathbf{W}_{\\mathrm{TP}}^{k}\\mathbf{M}$ and $\\mathbf{V}_{\\mathrm{TP}} = \\mathbf{W}_{\\mathrm{TP}}^{\\nu}\\mathbf{M}$ (we can use here $\\mathbf{M}$ or $\\mathbf{M}''$ ) are the query, key and value. Moreover, $\\mathbf{Q}_{\\mathrm{TP}}, \\mathbf{K}_{\\mathrm{TP}}, \\mathbf{V}_{\\mathrm{TP}} \\in \\mathbb{R}^{d''' \\times \\tau}$ and $\\mathbf{W}_{\\mathrm{TP}}^{q}, \\mathbf{W}_{\\mathrm{TP}}^{k}, \\mathbf{W}_{\\mathrm{TP}}^{\\nu} \\in \\mathbb{R}^{d''' \\times d'''}$ are learnable weights. Eq. (10) reweights $\\mathbf{V}_{\\mathrm{TP}}$ based on the correlation between $\\mathbf{Q}_{\\mathrm{TP}}$ and $\\mathbf{K}_{\\mathrm{TP}}$ token embeddings of coupled-mode tokens ('channel-hyper-edge' or 'order-channel-body joint'). The output of attention is the temporal representation $\\mathbf{O}_{\\mathrm{TP}} \\in \\mathbb{R}^{d''' \\times \\tau}$ . If we used $\\mathbf{M}''$ as input, we denote the output as $\\mathbf{O}_{\\mathrm{TP}}''$ .", + "bbox": [ + 496, + 229, + 890, + 366 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pooling step. Given the temporal representation $\\mathbf{O}_{\\mathrm{TP}} \\in \\mathbb{R}^{d'' \\times r}$ (or $\\mathbf{O}_{\\mathrm{TP}}''$ ), we apply pooling along the block-temporal mode to obtain compact feature representations independent of length (block count $\\tau$ ) of skeleton sequence. There exist many pooling operations including first-order, e.g., average, maximum, sum pooling, second-order [60, 80] such as attentional pooling [14], higher-order (tri-linear) [8, 25] and rank pooling [13]. The output after pooling is $\\mathbf{O}_{\\mathrm{TP}}^* \\in \\mathbb{R}^{d''}$ (or $\\mathbf{O}_{\\mathrm{TP}}''$ ).", + "bbox": [ + 496, + 369, + 890, + 508 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3.3 Model Variants", + "text_level": 1, + "bbox": [ + 500, + 529, + 661, + 542 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We devise four model variants by different stacking of MP with TP, with the goal of exploiting attention with different kinds of coupled-mode tokens:", + "bbox": [ + 496, + 554, + 890, + 599 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "i. Single-branch: MP followed by TP, denoted $\\mathrm{MP}\\rightarrow \\mathrm{TP}$ (Fig. 1 top right branch).", + "ii. Single-branch: TP followed by MP, denoted $\\mathrm{TP} \\rightarrow \\mathrm{MP}$ , (Fig. 1 bottom right branch).", + "iii. Two-branch (our 3Mformer, Fig. 1) which concatenates outputs of $\\mathrm{MP} \\rightarrow \\mathrm{TP}$ and $\\mathrm{TP} \\rightarrow \\mathrm{MP}$ .", + "iv. We also investigate only MP or TP module followed by average pooling or an FC layer." + ], + "bbox": [ + 506, + 612, + 888, + 773 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The outputs from $\\mathrm{MP} \\rightarrow \\mathrm{TP}$ and $\\mathrm{TP} \\rightarrow \\mathrm{MP}$ have exactly the same feature dimension ( $\\mathbb{R}^{rd'J}$ , after reshaping into vector). For two-branch (our 3Mformer), we simply concatenate these outputs ( $\\mathbb{R}^{2rd'J}$ , after concatenation). These vectors are forwarded to the FC layer to learn a classifier.", + "bbox": [ + 496, + 786, + 890, + 861 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "4We do not propose pooling operators but we select popular ones with the purpose of comparing their impact on TP.", + "bbox": [ + 500, + 875, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5624", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 76, + 89, + 209, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Datasets and Protocols", + "text_level": 1, + "bbox": [ + 76, + 114, + 287, + 130 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(i) NTU RGB+D (NTU-60) [43] contains 56,880 video sequences. This dataset has variable sequence lengths and high intra-class variations. Each skeleton sequence has 25 joints and there are no more than two human subjects in each video. Two evaluation protocols are: (i) cross-subject (X-Sub) and (ii) cross-view (X-View).", + "(ii) NTU RGB+D 120 (NTU-120) [34], an extension of NTU-60, contains 120 action classes (daily/health-related), and 114,480 RGB+D video samples captured with 106 distinct human subjects from 155 different camera viewpoints. There are also two evaluation protocols: (i) cross-subject (X-Sub) and (ii) cross-setup (X-Set).", + "(iii) Kinetics-Skeleton, based on Kinetics [20], is large-scale dataset with 300,000 video clips and up to 400 human actions collected from YouTube. This dataset involves human daily activities, sports scenes and complex human-computer interaction scenes. Since Kinetics only provides raw videos without the skeletons, ST-GCN [64] uses the publicly available OpenPose toolbox [2] to estimate and extract the location of 18 human body joints on every frame in the clips. We use their released skeleton data to evaluate our model. Following the standard evaluation protocol, we report the Top-1 and Top-5 accuracies on the validation set. (iv) Northwestern-UCLA [51] was captured by 3 Kinect cameras simultaneously from multiple viewpoints. It contains 1494 video clips covering 10 actions. Each action is performed by 10 different subjects. We follow the same evaluation protocol as [51]: training split is formed from the first two cameras, and testing split from the last camera." + ], + "bbox": [ + 75, + 138, + 467, + 579 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Experimental Setup", + "text_level": 1, + "bbox": [ + 76, + 589, + 266, + 606 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We use PyTorch and $1\\times$ Titan RTX 3090 for experiments. We use the Stochastic Gradient Descent (SGD) with momentum 0.9, cross-entropy as the loss, weight decay of 0.0001 and batch size of 32. The learning rate is set to 0.1 initially. On NTU-60 and NTU-120, the learning rate is divided by 10 at the 40th and 50th epoch, and the training process ends at the 60th epoch. On Kinetics-Skeleton, the learning rate is divided by 10 at the 50th and 60th epoch, and the training finishes at the 80th epoch. We took $20\\%$ of training set for validation to tune hyperparameters. All models have fixed hyperparameters with 2 and 4 layers for NTU-60/NTU-120 and Kinetics-Skeleton, respectively. The hidden dimensions is set to 16 for all 3 datasets. We use 4 attention heads for NTU-60 and NTU-120, and 8 attention heads for Kinetics-Skeleton. To form each video temporal block, we simply choose temporal block size to be 10 and stride to be 5 to allow a $50\\%$ overlap between consecutive temporal blocks. For Northwestern-UCLA, the batch size is 16. We adopted the data pre-processing in [6].", + "bbox": [ + 75, + 613, + 467, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/30b5d18fb5e789cf5aab50af498c687be20410e2f8abb43c67796441546111d7.jpg", + "table_caption": [ + "Table 1. Search for the single best order $n$ of hypergraph (except for $n = 3 \\& 4$ where we check if $n = 3 \\& 4$ are complementary)." + ], + "table_footnote": [], + "table_body": "
Order-nNTU-60NTU-120Kinetics-Skel.
X-SubX-ViewX-SubX-SetTop-1 acc.
n = 178.586.375.377.932.0
n = 283.089.286.288.337.1
n = 391.397.087.589.739.5
n = 491.597.187.890.040.1
n = 591.497.387.890.040.3
n = 3 & 491.697.287.690.340.5
", + "bbox": [ + 517, + 122, + 870, + 229 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/aa13d7f6d9ce50cc957bbdaf9763b3ca72cd0074c3a6ba2cada4e6aa4fc58b4e.jpg", + "table_caption": [ + "Table 2. Evaluations of our model variants with/without MP and/or TP. Baseline in the table denotes the backbone (MLP unit + HoTs) without the use of either MP or TP module." + ], + "table_footnote": [], + "table_body": "
VariantsNTU-60NTU-120Kinetics-Skel.
X-SubX-ViewX-SubX-SetTop-1 acc.
Baseline89.891.486.587.038.6
+ TP only91.293.887.588.639.8
+ MP only92.094.388.789.740.3
+ MP→TP93.096.190.891.745.7
+ TP→MP92.695.890.291.144.0
+ 2-branch(3Mformer)94.898.792.093.848.3
", + "bbox": [ + 501, + 306, + 890, + 409 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f2a642a29b0b5a90add2848cc3d6a8e473c9a3478a97cacea725338b04209982.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 503, + 436, + 588, + 505 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e71bc19dcd48ddc53580575a60c07d8346dff3cc584340aa6a1d7bde738afbe3.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 591, + 436, + 676, + 505 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fa582b8c85b427cd431a2c30983a66c679cdbbde815668395b5ea8ff0439d87d.jpg", + "image_caption": [ + "(c)", + "Figure 2. Visualization of attention matrices. (a) single-mode attention matrix of 'channel-only' token, (b)-(d) coupled-mode attention matrices of 'channel-hyper-edge', 'order-channel-body joint' and 'channel-temporal block' tokens, respectively." + ], + "image_footnote": [], + "bbox": [ + 681, + 436, + 767, + 505 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8afabcce92664b7f25393b42d25ba665997d90ef09c75ae925c5daa0a6813233.jpg", + "image_caption": [ + "(d)" + ], + "image_footnote": [], + "bbox": [ + 772, + 436, + 861, + 505 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 614, + 651, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Search for the single best order $n$ . Table 1 shows our analysis regarding the best order $n$ . In general, increasing the order $n$ improves the performance (within $\\sim 0.5\\%$ on average), but causing higher computational cost, e.g., the number of hyper-edges for the skeletal hypergraph of order $n = 4$ is 3060 on Kinetics-Skeleton. We also notice that combining orders 3 and 4 yields very limited improvements. The main reasons are: (i) reasonable order $n$ , e.g., $n = 3$ or 4 improves accuracy as higher-order motion patterns are captured which are useful for classification-related tasks (ii) further increasing order $n$ , e.g., $n = 5$ introduces patterns in feature representations that rarely repeat even for the same action class. Considering the cost and performance, we choose the maximum order $r = 3$ ( $n = 1,2,3$ ) in the following experiments unless specified otherwise.", + "bbox": [ + 496, + 638, + 890, + 866 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Discussion on coupled-mode attention. Fig. 2 shows the visualization of some attention matrices in our 3Mformer,", + "bbox": [ + 500, + 869, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "5625", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/90e1416c9626cf72abc9441ae9af223aaee48ee37c15ee8e0b7a5fff22357fe6.jpg", + "table_caption": [ + "Table 3. Experimental results on NTU-60, NTU-120 and Kinetics-Skeleton." + ], + "table_footnote": [], + "table_body": "
MethodVenueNTU-60NTU-120Kinetics-Skeleton
X-SubX-ViewX-SubX-SetTop-1Top-5
Graph-basedTCN [22]CVPRW'17----20.340.0
ST-GCN [64]AAAI'1881.588.370.773.230.752.8
AS-GCN [30]CVPR'1986.894.278.379.834.856.5
2S-AGCN [44]CVPR'1988.595.182.584.236.158.7
NAS-GCN [37]AAAI'2089.495.7--37.160.1
Sym-GNN [31]TPAMI'2290.196.4--37.258.1
Shift-GCN [6]CVPR'2090.796.585.987.6--
MS-G3D [36]CVPR'2091.596.286.988.438.060.9
CTR-GCN [4]ICCV'2192.496.888.990.6--
InfoGCN [9]CVPR'2293.097.189.891.2--
PoseConv3D [12]CVPR'2294.197.186.990.347.7-
Hypergraph-basedHyper-GNN [15]TIP'2189.595.7--37.160.0
DHGCN [62]CoRR'2190.796.086.087.937.760.6
Selective-HCN [79]ICMR'2290.896.6--38.061.1
SD-HGCN [17]ICONIP'2190.996.787.088.237.460.5
Transformer-basedST-TR [39]CVIU'2190.396.385.187.138.060.5
MTT [23]LSP'2190.896.786.187.637.961.3
4s-GSTN [18]Symmetry'2291.396.686.488.7--
STST [73]ACM MM'2191.996.8--38.361.2
3Mformer (with avg-pool, ours)92.097.388.090.143.165.2
3Mformer (with max-pool, ours)92.197.8----
3Mformer (with attn-pool, ours)94.298.589.792.445.767.6
3Mformer (with tri-pool, ours)94.098.591.292.747.771.9
3Mformer (with rank-pool, ours)94.898.792.093.848.372.3
", + "bbox": [ + 76, + 109, + 893, + 486 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c53534b9138a06abacb121d77afe8a0a88fe2d064fb209ea931912cb3b2f85c4.jpg", + "image_caption": [ + "Figure 3. Evaluations of different single-mode (baseline) and coupled-mode tokens. We use a 3rd-order HoT with a standard Transformer, but we replace the scaled dot-product attention with coupled-mode tokens and coupled-mode attention." + ], + "image_footnote": [], + "bbox": [ + 76, + 521, + 470, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "which show diagonal and/or vertical patterns that are consistent with the patterns of the attention matrices found in standard Transformer trained on sequences, e.g., for natural language processing tasks [28, 49]. We also notice that the coupled-mode attention, e.g., 'channel-temporal block' captures much richer information compared to single mode attention, e.g., 'channel-only'. Our coupled-mode attention can be applied to different orders of tensor representations through simple matricization.", + "bbox": [ + 75, + 726, + 468, + 863 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Discussion on model variants. To show the effectiveness of the proposed MP and TP module, firstly, we compare", + "bbox": [ + 75, + 869, + 470, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "TP only and MP only with the baseline (No MP or TP module). We use the TP module followed by an FC layer instead of MP as in $\\mathrm{TP} \\rightarrow \\mathrm{MP}$ , where the FC layer takes the output from TP $(\\mathbb{R}^{d'N})$ and produces a vector in $\\mathbb{R}^{3d'J}$ passed to the classifier. Similarly, for MP only, we use the MP module followed by an average pooling layer instead of TP as in $\\mathrm{MP} \\rightarrow \\mathrm{TP}$ , where the average layer takes output from MP $(\\mathbb{R}^{3d'J \\times \\tau})$ and generates a vector in $\\mathbb{R}^{3d'J}$ (pool along $\\tau$ blocks), passed to the classifier. Table 2 shows the results. With just the TP module, we outperform the baseline by $1.3\\%$ on average. With only the MP module, we outperform the baseline by $2.34\\%$ on average. These comparisons show that (i) CmSA in MP and TP are efficient for better performance (ii) MP performs better than TP which shows that 'channel-temporal block' token contains richer information than 'channel-hyper-edge' token. We also notice that $\\mathrm{MP} \\rightarrow \\mathrm{TP}$ slightly outperforms $\\mathrm{TP} \\rightarrow \\mathrm{MP}$ by $\\sim 1\\%$ , and the main reason is that $\\mathrm{MP} \\rightarrow \\mathrm{TP}$ has coupled-mode tokens 'channel-temporal block' and 'order-channel-joint' which attend 4 modes, whereas $\\mathrm{TP} \\rightarrow \\mathrm{MP}$ has 'channel-hyper-edge' and 'channel-only' tokens which attend only 2 modes. Fig. 3 shows a comparison of different coupled-mode tokens on 3 benchmark datasets. This also suggests that one should firstly perform attenuate", + "bbox": [ + 496, + 523, + 893, + 888 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "5626", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tion with coupled-mode 'channel-block' tokens, followed by weighted pooling along the hyper-edge mode, followed by attention with coupled-mode 'order-channel-body joint' and finalised by block-temporal pooling. Finally, with 2- branch (3Mformer), we further boost the performance by $2 - 4\\%$ , which shows that $\\mathrm{MP}\\rightarrow \\mathrm{TP}$ and $\\mathrm{TP}\\rightarrow \\mathrm{MP}$ are complementary branches. Below we use 2-branch (3Mformer) in the experiments (as in Fig. 1).", + "bbox": [ + 75, + 90, + 470, + 210 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Comparison of pooling in TP. As shown in Table 3, average pooling (avg-pool) achieves similar performance (within $\\sim 0.5\\%$ difference) as maximum pooling (max-pool), second-order pooling (attn-pool) outperforms average and maximum pooling by $\\sim 1 - 2\\%$ and third-order pooling (tri-pool) outperforms second-order pooling by $\\sim 1\\%$ . Interestingly, rank pooling (rank-pool) achieves the best performance. We think it is reasonable as rank pooling strives to enforce the temporal order in the feature space to be preserved, e.g., it forces network to always preserve temporal progression of actions over time. With multiple attention modules, orderless statistics such as second- or third-order pooling may be too general.", + "bbox": [ + 75, + 215, + 470, + 412 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4. Comparisons with the State of the Arts", + "text_level": 1, + "bbox": [ + 76, + 422, + 411, + 439 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We compare our model with recent state-of-the-art methods. On the NTU-60 (Tab. 3), we obtain the top-1 accuracies of the two evaluation protocols during test stage. The methods in comparisons include popular graph-based [30, 31, 37, 44, 64] and hypergraph-based models [15, 17, 62, 79]. Our 3rd-order model outperforms all graph-based methods, and also outperforms existing hypergraph-based models such as Selective-HCN and SD-HGCN by $0.45\\%$ and $0.35\\%$ on average on X-Sub and X-View respectively. With 3Mformer for the fusion of multi-order features, our model further boosts the performance by $\\sim 3\\%$ and $1.5\\%$ on the two protocols.", + "bbox": [ + 75, + 446, + 468, + 628 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "It can be seen from Tab. 3 on NTU-60 that although some learned graph-based methods such as AS-GCN and 2S-AGCN can also capture the dependencies between human body joints, they only consider the pairwise relationship between body joints, which is the second-order interaction, and ignore the higher-order interaction between multiple body joints in form of hyper-edges, which may lose sensitivity to important groups of body joints. Our proposed 3Mformer achieves better performance by constructing a hypergraph from 2D/3D body joints as nodes for action recognition, thus capturing higher-order interactions of body joints to further improve the performance. Note that even with the average pooling, our model still achieves competitive results compared to its counterparts.", + "bbox": [ + 75, + 628, + 468, + 839 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For the NTU-120 dataset (Tab. 3), we obtain the top-1 performance on X-Sub and X-Set protocols. Our 2nd-order HoT alone outperforms graph-based models by $2 - 2.4\\%$ on average. For example, we outperform recent Shift-GCN by", + "bbox": [ + 75, + 839, + 468, + 901 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3ade81c222e702d89f2d649bb6af9c834bdcd74ab8ce96d393b5fc3e01ac8adb.jpg", + "table_caption": [ + "Table 4. Experimental results on Northwestern-UCLA." + ], + "table_footnote": [], + "table_body": "
Shift-GCN [6] (CVPR'20)CTR-GCN [4] (ICCV'21)InfoGCN [9] (CVPR'22)2nd-order only (ours)3rd-order only (ours)3Mformer (ours)
acc.(%)94.696.597.096.597.297.8
", + "bbox": [ + 501, + 108, + 890, + 147 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "0.3% and 0.7% on X-Sub and X-Set respectively. Moreover, our 3rd-order HoT alone outperforms SD-HGCN by 0.5% and 1.5% respectively on X-Sub and X-Set. With the 3Mformer for the fusion of multi-order feature maps, we obtain the new state-of-the-art results. Notice that our 3Mformer yields $92.0\\% / 93.8\\%$ on NTU-120 while [38] yields $80.5\\% / 81.7\\%$ as we explore the fusion of multiple orders of hyperedges and several coupled-token types capturing easy-to-complex dynamics of varying joint groups.", + "bbox": [ + 496, + 191, + 890, + 330 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As videos from the Kinetics dataset are processed by the OpenPose, the skeletons in the Kinetics-Skeleton dataset have defects which adversely affect the performance of the model. We show both top-1 and top-5 performance in Table 3 to better reflect the performance of our 3Mformer. STGCN is the first method based on GCN, our 2nd-order HoT alone achieves very competitive results compared to the very recent NAS-GCN and Sym-GNN. The 3rd-order HoT alone outperforms Hyper-GNN, SD-HGCN and SelectiveHCN by $3.4\\%$ , $3.1\\%$ and $2.9\\%$ respectively for top-1 accuracies. Moreover, fusing multi-order feature maps from multiple orders of hyper-edges via 3Mformer gives us the best performance on Kinetics-Skeleton with $48.3\\%$ for top-1, the new state-of-the-art result.", + "bbox": [ + 496, + 333, + 892, + 544 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 4 shows results on the Northwestern-UCLA dataset. Our 3Mformer is also effective on this dataset-it outperforms the current state-of-the-art InfoGCN by $0.8\\%$ .", + "bbox": [ + 496, + 547, + 893, + 594 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusions", + "text_level": 1, + "bbox": [ + 500, + 619, + 627, + 635 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we model the skeleton data as hypergraph to capture higher-order information formed between groups of human body joints of orders 1, ..., $r$ . We use Higher-order Transformer (HoT) to learn higher-order information on hypergraphs of $r$ -order formed over 2D/3D human body joints. We also introduce a novel Multi-order Multi-mode Transformer (3Mformer) for the fusion of multi-order feature representations. Our end-to-end trainable 3Mformer outperforms state-of-the-art graph- and hypergraph-based models by a large margin on several benchmarks.", + "bbox": [ + 496, + 648, + 890, + 800 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. LW is supported by the Data61/ CSIRO PhD Scholarship. PK is in part funded by CSIRO's Machine Learning and Artificial Intelligence Future Science Platform (MLAI FSP) Spatiotemporal Activity.", + "bbox": [ + 496, + 839, + 893, + 902 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "5627", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 174, + 106 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. VATT: Transformers for multimodal self-supervised learning from raw video, audio and text. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021. 2", + "[2] Zhe Cao, Tomas Simon, Shih-En Wei, and Yaser Sheikh. Realtime multi-person 2d pose estimation using part affinity fields. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), July 2017. 1, 6", + "[3] Chun-Fu Chen, Quanfu Fan, and Rameswar Panda. Crossvit: Cross-attention multi-scale vision transformer for image classification. CoRR, abs/2103.14899, 2021. 3", + "[4] Yuxin Chen, Ziqi Zhang, Chunfeng Yuan, Bing Li, Ying Deng, and Weiming Hu. Channel-wise topology refinement graph convolution for skeleton-based action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13359-13368, 2021. 2, 7, 8", + "[5] Ke Cheng, Yifan Zhang, Congqi Cao, Lei Shi, Jian Cheng, and Hanqing Lu. Decoupling gcn with dropgraph module for skeleton-based action recognition. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision - ECCV 2020, pages 536-553, Cham, 2020. Springer International Publishing. 2", + "[6] Ke Cheng, Yifan Zhang, Xiangyu He, Weihan Chen, Jian Cheng, and Hanqing Lu. Skeleton-based action recognition with shift graph convolutional network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 6, 7, 8", + "[7] Yi-Bin Cheng, Xipeng Chen, Dongyu Zhang, and Liang Lin. Motion-transformer: Self-supervised pre-training for skeleton-based action recognition. In Proceedings of the 2nd ACM International Conference on Multimedia in Asia, MMAsia '20, New York, NY, USA, 2021. Association for Computing Machinery. 2", + "[8] Anoop Cherian, Piotr Koniusz, and Stephen Gould. Higher-order pooling of cnn features via kernel linearization for action recognition. In 2017 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 130-138, 2017. 2, 5", + "[9] Hyung-gun Chi, Myoung Hoon Ha, Seunggeun Chi, Sang Wan Lee, Qixing Huang, and Karthik Ramani. Infogcn: Representation learning for human skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20186-20196, June 2022. 7, 8", + "[10] Krzysztof Marcin Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos, Peter Hawkins, Jared Quincy Davis, Afroz Mohiuddin, Lukasz Kaiser, David Benjamin Belanger, Lucy J Colwell, and Adrian Weller. Rethinking attention with performers. In International Conference on Learning Representations, 2021. 4", + "[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner," + ], + "bbox": [ + 78, + 116, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3", + "[12] Haodong Duan, Yue Zhao, Kai Chen, Dahua Lin, and Bo Dai. Revisiting skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2969-2978, June 2022. 7", + "[13] Basura Fernando, Efstratios Gavves, Jose Oramas Oramas M., Amir Ghodrati, and Tinne Tuytelaars. Rank pooling for action recognition. IEEE Trans. Pattern Anal. Mach. Intell., 39(4):773-787, apr 2017. 2, 5", + "[14] Rohit Girdhar and Deva Ramanan. Attentional pooling for action recognition. In NIPS, 2017. 2, 5", + "[15] Xiaoke Hao, Jie Li, Yingchun Guo, Tao Jiang, and Ming Yu. Hypergraph neural network for skeleton-based action recognition. IEEE Transactions on Image Processing, 30:2263-2275, 2021. 2, 4, 7, 8, 13", + "[16] Ryota Hashiguchi and Toru Tamaki. Vision transformer with cross-attention by temporal shift for efficient action recognition, 2022. 3", + "[17] Changxiang He, Chen Xiao, Shuting Liu, Xiaofei Qin, Ying Zhao, and Xuedian Zhang. Single-skeleton and dual-skeleton hypergraph convolution neural networks for skeleton-based action recognition. In Teddy Mantoro, Minho Lee, Media Anugerah Ayu, Kok Wai Wong, and Achmad Nizar Hidayanto, editors, Neural Information Processing, pages 15-27, Cham, 2021. Springer International Publishing. 7, 8", + "[18] Yujuan Jiang, Zhaoneng Sun, Saisai Yu, Shuang Wang, and Yang Song. A graph skeleton transformer network for action recognition. Symmetry, 14(8), 2022. 7", + "[19] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are RNNs: Fast autoregressive transformers with linear attention. In Hal Daumé III and Aarti Singh, editors, Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 5156-5165. PMLR, 13-18 Jul 2020. 4", + "[20] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman. The kinetics human action video dataset, 2017. 6", + "[21] Jinwoo Kim, Saeyoon Oh, and Seunghoon Hong. Transformers generalize deepsets and can be extended to graphs & hypergraphs. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021. 2, 3, 4", + "[22] Tae Soo Kim and Austin Reiter. Interpretable 3d human action analysis with temporal convolutional networks. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1623-1631, 2017. 7", + "[23] Jun Kong, Yuhang Bian, and Min Jiang. Mtt: Multi-scale temporal transformer for skeleton-based action recognition. IEEE Signal Processing Letters, 29:528-532, 2022. 7" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "5628", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[24] Piotr Koniusz, Lei Wang, and Anoop Cherian. Tensor representations for action recognition. In IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2020. 1, 2", + "[25] Piotr Koniusz, Lei Wang, and Ke Sun. High-order tensor pooling with attention for action recognition. arXiv, 2021. 1, 2, 5", + "[26] Piotr Koniusz and Hongguang Zhang. Power normalizations in fine-grained image, few-shot image and graph classification. In IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2020. 2", + "[27] Matthew Korban and Xin Li. Ddgcn: A dynamic directed graph convolutional network for action recognition. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision – ECCV 2020, pages 761–776, Cham, 2020. Springer International Publishing. 2", + "[28] Olga Kovaleva, Alexey Romanov, Anna Rogers, and Anna Rumshisky. Revealing the dark secrets of BERT. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4365-4374, Hong Kong, China, Nov. 2019. Association for Computational Linguistics. 7", + "[29] John Boaz Lee, Ryan Rossi, and Xiangnan Kong. Graph classification using structural attention. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '18, page 1666-1674, New York, NY, USA, 2018. Association for Computing Machinery. 3", + "[30] Maosen Li, Siheng Chen, Xu Chen, Ya Zhang, Yanfeng Wang, and Qi Tian. Actional-structural graph convolutional networks for skeleton-based action recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 2, 7, 8", + "[31] Maosen Li, Siheng Chen, Xu Chen, Ya Zhang, Yanfeng Wang, and Qi Tian. Symbiotic graph neural networks for 3d skeleton-based human action recognition and motion prediction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(6):3316-3333, 2022. 7, 8", + "[32] Hezheng Lin, Xing Cheng, Xiangyu Wu, Fan Yang, Dong Shen, Zhongyuan Wang, Qing Song, and Wei Yuan. CAT: cross attention in vision transformer. CoRR, abs/2106.05786, 2021. 3", + "[33] Tsung-Yu Lin, Subhransu Maji, and Piotr Koniusz. Second-order democratic aggregation. In ECCV, 2018. 2", + "[34] Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C. Kot. Ntu rgb+d 120: A large-scale benchmark for 3d human activity understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2019. 6", + "[35] Shengyuan Liu, Pei Lv, Yuzhen Zhang, Jie Fu, Junjin Cheng, Wanqing Li, Bing Zhou, and Mingliang Xu. Semi-dynamic hypergraph neural network for 3d pose estimation. In Christian Bessiere, editor, Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI-20, pages 782-788. International Joint Conferences on Artificial Intelligence Organization, 7 2020. Main track. 2, 4, 13" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[36] Ziyu Liu, Hongwen Zhang, Zhenghao Chen, Zhiyong Wang, and Wanli Ouyang. Disentangling and unifying graph convolutions for skeleton-based action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 7", + "[37] Wei Peng, Xiaopeng Hong, Haoyu Chen, and Guoying Zhao. Learning graph convolutional network for skeleton-based human action recognition by neural searching. Proceedings of the AAAI Conference on Artificial Intelligence, 34(03):2669-2676, Apr. 2020. 7, 8", + "[38] Wei Peng, Jingang Shi, Tuomas Varanka, and Guoying Zhao. Rethinking the st-gcns for 3d skeleton-based human action recognition. Neurocomputing, 454:45-53, 2021. 8", + "[39] Chiara Plizzari, Marco Cannici, and Matteo Matteucci. Skeleton-based action recognition via spatial and temporal transformer networks. Computer Vision and Image Understanding, 208-209:103219, 2021. 7", + "[40] Zhenyue Qin, Yang Liu, Pan Ji, Dongwoo Kim, Lei Wang, Bob McKay, Saeed Anwar, and Tom Gedeon. Fusing higher-order features in graph neural networks for skeleton-based action recognition. IEEE TNNLS, 2022. 1", + "[41] Saimunur Rahman, Piotr Koniusz, Lei Wang, Luping Zhou, Peyman Moghadam, and Changming Sun. Learning partial correlation based deep visual representation for image classification. In CVPR, 2023. 2", + "[42] Kanchana Ranasinghe, Muzammal Naseer, Salman Khan, Fahad Shahbaz Khan, and Michael Ryoo. Self-supervised video transformer. In IEEE/CVF International Conference on Computer Vision and Pattern Recognition, June 2022. 2", + "[43] Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang. Ntu rgb+d: A large scale dataset for 3d human activity analysis. In IEEE Conference on Computer Vision and Pattern Recognition, June 2016. 6", + "[44] Lei Shi, Yifan Zhang, Jian Cheng, and Hanqing Lu. Two-stream adaptive graph convolutional networks for skeleton-based action recognition. In CVPR, 2019. 7, 8", + "[45] Lei Shi, Yifan Zhang, Jian Cheng, and Hanqing Lu. Adasgn: Adapting joint number and model size for efficient skeleton-based action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 13413-13422, October 2021. 2", + "[46] Chenyang Si, Wentao Chen, Wei Wang, Liang Wang, and Tieniu Tan. An attention enhanced graph convolutional LSTM network for skeleton-based action recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 1, 2", + "[47] Yi-Fan Song, Zhang Zhang, Caifeng Shan, and Liang Wang. Constructing stronger and faster baselines for skeleton-based action recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2022. 2", + "[48] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. CoRR, abs/2203.12602, 2022. 2", + "[49] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Ilia Polosukhin. Attention is all you need. In I. Guyon," + ], + "bbox": [ + 501, + 92, + 892, + 901 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "5629", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. 3, 4, 7", + "[50] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Liò, and Yoshua Bengio. Graph attention networks. In International Conference on Learning Representations, 2018. 3", + "[51] Jiang Wang, Xiaohan Nie, Yin Xia, Ying Wu, and Song-Chun Zhu. Cross-view action modeling, learning and recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2649-2656, 2014. 6", + "[52] Lei Wang. Analysis and evaluation of Kinect-based action recognition algorithms. Master's thesis, School of the Computer Science and Software Engineering, The University of Western Australia, 11 2017. 1", + "[53] Lei Wang, Du Q. Huynh, and Piotr Koniusz. A comparative review of recent kinet-based action recognition algorithms. TIP, 2019. 1", + "[54] Lei Wang, Du Q. Huynh, and Moussa Reda Mansour. Loss switching fusion with similarity search for video classification. ICIP, 2019. 1", + "[55] Lei Wang and Piotr Koniusz. Self-Supervising Action Recognition by Statistical Moment and Subspace Descriptors, page 4324-4333. Association for Computing Machinery, New York, NY, USA, 2021. 1", + "[56] Lei Wang and Piotr Koniusz. Temporal-viewpoint transportation plan for skeletal few-shot action recognition. In Proceedings of the Asian Conference on Computer Vision, pages 4176-4193, 2022. 1", + "[57] Lei Wang and Piotr Koniusz. Uncertainty-dtw for time series and sequences. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXI, pages 176-195. Springer, 2022. 1", + "[58] Lei Wang, Piotr Koniusz, and Du Q. Huynh. Hallucinating IDT descriptors and I3D optical flow features for action recognition with cnns. In ICCV, 2019. 1", + "[59] Lei Wang, Jun Liu, and Piotr Koniusz. 3d skeleton-based few-shot action recognition with jeanie is not so naive. arXiv preprint arXiv:2112.12668, 2021. 1", + "[60] Qilong Wang, Zilin Gao, Jiangtao Xie, Wangmeng Zuo, and Peihua Li. Global gated mixture of second-order pooling for improving deep convolutional neural networks. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. 2, 5", + "[61] Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan L. Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. CoRR, abs/2112.09133, 2021. 2", + "[62] Jinfeng Wei, Yunxin Wang, Mengli Guo, Pei Lv, Xiaoshan Yang, and Mingliang Xu. Dynamic hypergraph convolutional networks for skeleton-based action recognition. CoRR, abs/2112.10570, 2021. 7, 8", + "[63] Xi Wei, Tianzhu Zhang, Yan Li, Yongdong Zhang, and Feng Wu. Multi-modality cross attention network for image and" + ], + "bbox": [ + 78, + 90, + 470, + 902 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "sentence matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 3", + "[64] Sijie Yan, Yuanjun Xiong, and Dahua Lin. Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition. In AAAI, 2018. 1, 2, 4, 6, 7, 8, 13", + "[65] Han Zhang, Yonghong Song, and Yuanlin Zhang. Graph convolutional LSTM model for skeleton-based action recognition. In 2019 IEEE International Conference on Multimedia and Expo (ICME), pages 412-417, 2019. 1", + "[66] Jiani Zhang, Xingjian Shi, Junyuan Xie, Hao Ma, Irwin King, and Dit-Yan Yeung. Gaan: Gated attention networks for learning on large and spatiotemporal graphs. In Amir Globerson and Ricardo Silva, editors, UAI, pages 339-349. AUAI Press, 2018. 3", + "[67] Pengfei Zhang, Cuiling Lan, Wenjun Zeng, Junliang Xing, Jianru Xue, and Nanning Zheng. Semantics-guided neural networks for efficient skeleton-based human action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2", + "[68] Shan Zhang, Dawei Luo, Lei Wang, and Piotr Koniusz. Few-shot object detection by second-order pooling. In ACCV, 2020. 2", + "[69] Shan Zhang, Naila Murray, Lei Wang, and Piotr Koniusz. Time-reversed diffusion tensor transformer: A new tenet of few-shot object detection. In ECCV, 2022. 2", + "[70] Shan Zhang, Lei Wang, Naila Murray, and Piotr Koniusz. Kernelized few-shot object detection with efficient integral aggregation. In CVPR, 2022. 2", + "[71] Xikun Zhang, Chang Xu, and Dacheng Tao. Context aware graph convolution for skeleton-based action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2", + "[72] Yongkang Zhang, Jun Li, Guoming Wu, Han Zhang, Zhiping Shi, Zhaoxun Liu, and Zizhang Wu. Temporal transformer networks with self-supervision for action recognition. CoRR, abs/2112.07338, 2021. 2", + "[73] Yuhan Zhang, Bo Wu, Wen Li, Lixin Duan, and Chuang Gan. Stst: Spatial-temporal specialized transformer for skeleton-based action recognition. In Proceedings of the 29th ACM International Conference on Multimedia, MM '21, page 3229-3237, New York, NY, USA, 2021. Association for Computing Machinery. 7", + "[74] Yifei Zhang, Hao Zhu, Zixing Song, Piotr Koniusz, and Irwin King. COSTA: Covariance-preserving feature augmentation for graph contrastive learning. In KDD, 2022. 2", + "[75] Yifei Zhang, Hao Zhu, Zixing Song, Piotr Koniusz, and Irwin King. Spectral feature augmentation for graph contrastive learning and beyond. In AAAI, 2023. 2", + "[76] Hao Zhu and Piotr Koniusz. Simple spectral graph convolution. In ICLR, 2021. 1", + "[77] Hao Zhu and Piotr Koniusz. Generalized laplacian eigenmaps. In NeurIPS, 2022. 1", + "[78] Hao Zhu, Ke Sun, and Piotr Koniusz. Contrastive laplacian eigenmaps. In NeurIPS, 2021. 1", + "[79] Yiran Zhu, Guangji Huang, Xing Xu, Yanli Ji, and Fumin Shen. Selective hypergraph convolutional networks for" + ], + "bbox": [ + 503, + 92, + 890, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "5630", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "skeleton-based action recognition. In Proceedings of the 2022 International Conference on Multimedia Retrieval, ICMR '22, page 518-526, New York, NY, USA, 2022. Association for Computing Machinery. 7, 8", + "[80] Gao Zilin, Xie Jiangtao, Wang Qilong, and Li Peihua. Global second-order pooling convolutional networks. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5" + ], + "bbox": [ + 78, + 90, + 468, + 204 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "5631", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/59904744-5656-40cd-af70-98473e4f87a7_model.json b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/59904744-5656-40cd-af70-98473e4f87a7_model.json new file mode 100644 index 0000000000000000000000000000000000000000..82b3b1c96b2c7e7cae96033d05ba07be7dbe2608 --- /dev/null +++ b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/59904744-5656-40cd-af70-98473e4f87a7_model.json @@ -0,0 +1,2677 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.232, + 0.131, + 0.744, + 0.177 + ], + "angle": 0, + "content": "3Mformer: Multi-order Multi-mode Transformer for Skeletal Action Recognition" + }, + { + "type": "text", + "bbox": [ + 0.346, + 0.203, + 0.623, + 0.221 + ], + "angle": 0, + "content": "Lei Wang†,§ Piotr Koniusz*,§,†" + }, + { + "type": "text", + "bbox": [ + 0.29, + 0.221, + 0.681, + 0.239 + ], + "angle": 0, + "content": "†Australian National University, §Data61♥CSIRO" + }, + { + "type": "text", + "bbox": [ + 0.336, + 0.239, + 0.631, + 0.256 + ], + "angle": 0, + "content": "\\$firstname_lastname@data61.csiro.au" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.292, + 0.314, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.324, + 0.473, + 0.704 + ], + "angle": 0, + "content": "Many skeletal action recognition models use GCNs to represent the human body by 3D body joints connected body parts. GCNs aggregate one- or few-hop graph neighbourhoods, and ignore the dependency between not linked body joints. We propose to form hypergraph to model hyperedges between graph nodes (e.g., third- and fourth-order hyper-edges capture three and four nodes) which help capture higher-order motion patterns of groups of body joints. We split action sequences into temporal blocks, Higher-order Transformer (HoT) produces embeddings of each temporal block based on (i) the body joints, (ii) pairwise links of body joints and (iii) higher-order hyper-edges of skeleton body joints. We combine such HoT embeddings of hyper-edges of orders 1,..., \\( r \\) by a novel Multi-order Multi-mode Transformer (3Mformer) with two modules whose order can be exchanged to achieve coupled-mode attention on coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs. The first module, called Multi-order Pooling (MP), additionally learns weighted aggregation along the hyper-edge mode, whereas the second module, Temporal block Pooling (TP), aggregates along the temporal block' mode. Our end-to-end trainable network yields state-of-the-art results compared to GCN-, transformer- and hypergraph-based counterparts." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.732, + 0.21, + 0.749 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.758, + 0.47, + 0.865 + ], + "angle": 0, + "content": "Action Recognition has applications in video surveillance, human-computer interaction, sports analysis, and virtual reality [24, 25, 40, 52-59]. Different from video-based methods which mainly focus on modeling the spatiotemporal representations from RGB frames and/or optical flow [25, 52-55, 58], skeleton sequences, representing a spatio-temporal evolution of 3D body joints, have been" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.293, + 0.895, + 0.55 + ], + "angle": 0, + "content": "proven robust against sensor noises and effective in action recognition while being computationally and storage efficient [24, 40, 52, 53, 56, 57, 59]. The skeleton data is usually obtained by either localization of 2D/3D coordinates of human body joints with the depth sensors or pose estimation algorithms applied to videos [2]. Skeleton sequences enjoy (i) simple structural connectivity of skeletal graph and (ii) temporal continuity of 3D body joints evolving in time. While temporal evolution of each body joint is highly informative, embeddings of separate body joints are insensitive to relations between body parts. Moreover, while the links between adjacent 3D body joints (following the structural connectivity) are very informative as they model relations, these links represent highly correlated nodes in the sense of their temporal evolution. Thus, modeling larger groups of 3D body joints as hyper-edges can capture more complex spatio-temporal motion dynamics." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.552, + 0.895, + 0.779 + ], + "angle": 0, + "content": "The existing graph-based models mainly differ by how they handle temporal information. Graph Neural Network (GNN) may encode spatial neighborhood of the node followed by aggregation by LSTM [46, 65]. Alternatively, Graph Convolutional Network (GCN) may perform spatio-temporal convolution in the neighborhood of each node [64]. Spatial GCNs perform convolution within one or two hop distance of each node, e.g., spatio-temporal GCN model called ST-GCN [64] models spatio-temporal vicinity of each 3D body joint. As ST-GCN applies convolution along structural connections (links between body joints), structurally distant joints, which may cover key patterns of actions, are largely ignored. ST-GCN captures ever larger neighborhoods as layers are added but suffers from oversmoothing that can be mitigated by linear GCNs [76-78]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Human actions are associated with interaction groups of skeletal joints, e.g., wrist alone, head-wrist, head-wrist-ankles, etc. The impact of these groups of joints on each action differs, and the degree of influence of each joint should be learned. Accordingly, designing a better model for skeleton data is vital given the topology of skeleton graph is suboptimal. While GCN can be applied to a fully-connected graph (i.e., 3D body joints as densely connected" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.875, + 0.223, + 0.888 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.888, + 0.448, + 0.901 + ], + "angle": 0, + "content": "For brevity, we write \\(\\tau\\) temporal blocks per sequence but \\(\\tau\\) varies." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.875, + 0.448, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "5620" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.121 + ], + "angle": 0, + "content": "graph nodes), Higher-order Transformer (HoT) [21] has been proven more efficient." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.122, + 0.468, + 0.242 + ], + "angle": 0, + "content": "Thus, we propose to use hypergraphs with hyper-edges of order 1 to \\( r \\) to effectively represent skeleton data for action recognition. Compared to GCNs, our encoder contains an MLP followed by three HoT branches that encode first, second- and higher-order hyper-edges, i.e., set of body joints, edges between pairs of nodes, hyper-edges between triplets of nodes, etc. Each branch has its own learnable parameters, and processes temporal blocks2 one-by-one." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.243, + 0.468, + 0.349 + ], + "angle": 0, + "content": "We notice that (i) the number of hyper-edges of \\(J\\) joints grows rapidly with order \\(r\\), i.e., \\(\\binom{J}{i}\\) for \\(i = 1, \\dots, r\\), embeddings of the highest order dominate lower orders in terms of volume if such embeddings are merely concatenated, and (ii) long-range temporal dependencies of feature maps are insufficiently explored, as sequences are split into \\(\\tau\\) temporal blocks for computational tractability." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.351, + 0.468, + 0.591 + ], + "angle": 0, + "content": "Merely concatenating outputs of HoT branches of orders 1 to \\( r \\), and across \\( \\tau \\) blocks, is sub-optimal. Thus, our Multi-order Multi-mode Transformer (3Mformer) with two modules whose order can be exchanged, realizes a variation of coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs. As HoT operates block-by-block, 'channel-temporal block' tokens and weighted hyper-edge aggregation in Multi-order Pooling (MP) help combine information flow block-wise. Various coupled-mode tokens help improve results further due to different focus of each attention mechanism. As the block-temporal mode needs to be aggregated (number of blocks varies across sequences), Temporal block Pooling (TP) can use rank pooling [13], second-order [14, 26, 33, 41, 60, 68, 80] or higher-order pooling [8, 24, 25, 69, 70]." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.6, + 0.472, + 0.613 + ], + "angle": 0, + "content": "In summary, our main contributions are listed as follows:" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.628, + 0.468, + 0.718 + ], + "angle": 0, + "content": "i. We model the skeleton data as hypergraph of orders 1 to \\( r \\) (set, graph and/or hypergraph), where human body joints serve as nodes. Higher-order Transformer embeddings of such formed hyper-edges represent various groups of 3D body joints and capture various higher-order dynamics important for action recognition." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.729, + 0.468, + 0.865 + ], + "angle": 0, + "content": "ii. As HoT embeddings represent individual hyper-edge order and block, we introduce a novel Multi-order Multi-mode Transformer (3Mformer) with two modules, Multi-order Pooling and Temporal block Pooling. Their goal is to form coupled-mode tokens such as 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only', and perform weighted hyper-edge aggregation and temporal block aggregation." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.628, + 0.468, + 0.865 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.875, + 0.468, + 0.9 + ], + "angle": 0, + "content": "2Each temporal block enjoys a locally factored out (removed) temporal mode, which makes each block representation compact." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.137 + ], + "angle": 0, + "content": "Our 3Mformer outperforms other GCN- and hypergraph-based models on NTU-60, NTU-120, Kinetics-Skeleton and Northwestern-UCLA by a large margin." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.153, + 0.642, + 0.168 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.179, + 0.891, + 0.207 + ], + "angle": 0, + "content": "Below we describe popular action recognition models for skeletal data." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.213, + 0.892, + 0.5 + ], + "angle": 0, + "content": "Graph-based models. Popular GCN-based models include the Attention enhanced Graph Convolutional LSTM network (AGC-LSTM) [46], the Actional-Structural GCN (AS-GCN) [30], Dynamic Directed GCN (DDGCN) [27], Decoupling GCN with DropGraph module [5], ShiftGCN [6], Semantics-Guided Neural Networks (SGN) [67], AdaSGN [45], Context Aware GCN (CA-GCN) [71], Channel-wise Topology Refinement Graph Convolution Network (CTR-GCN) [4] and a family of Efficient GCN (EfficientGCN-Bx) [47]. Although GCN-based models enjoy good performance, they have shortcomings, e.g., convolution and/or pooling are applied over one- or few-hop neighborhoods, e.g., ST-GCN [64], according to the human skeleton graph (body joints linked up according to connectivity of human body parts). Thus, indirect links between various 3D body joints such as hands and legs are ignored. In contrast, our model is not restricted by the structure of typical human body skeletal graph. Instead, 3D body joints are nodes which form hyper-edges of orders 1 to \\( r \\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.504, + 0.892, + 0.685 + ], + "angle": 0, + "content": "Hypergraph-based models. Pioneering work on capturing groups of nodes across time uses tensors [24] to represent the 3D human body joints to exploit the kinematic relations among the adjacent and non-adjacent joints. Representing the human body as a hypergraph is adopted in [35] via a semi-dynamic hypergraph neural network that captures richer information than GCN. A hypergraph GNN [15] captures both spatio-temporal information and higher-order dependencies for skeleton-based action recognition. Our work is somewhat closely related to these works, but we jointly use hypergraphs of order 1 to \\( r \\) to obtain rich hyper-edge embeddings based on Higher-order Transformers." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.689, + 0.892, + 0.9 + ], + "angle": 0, + "content": "Transformer-based models. Action recognition with transformers includes self-supervised video transformer [42] that matches the features from different views (a popular strategy in self-supervised GCNs [74, 75]), the end-to-end trainable Video-Audio-Text-Transformer (VATT) [1] for learning multi-model representations from unlabeled raw video, audio and text through the multimodal contrastive losses, and the Temporal Transformer Network with Self-supervision (TTSN) [72]. Motion-Transformer [7] captures the temporal dependencies via a self-supervised pre-training on human actions, Masked Feature Prediction (MaskedFeat) [61] pre-trained on unlabeled videos with MViT-L learns abundant visual representations, and video-masked autoencoder (VideoMAE) [48] with vanilla ViT" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "5621" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.09, + 0.891, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.222, + 0.895, + 0.364 + ], + "angle": 0, + "content": "Figure 1. Pipeline overview. Each sequence is split into \\(\\tau\\) temporal blocks \\(\\mathbf{B}_1, \\dots, \\mathbf{B}_{\\tau}\\). Subsequently, each block is embedded by a simple MLP into \\(\\mathbf{X}_1, \\dots, \\mathbf{X}_{\\tau}\\), which are passed to Higher-order Transformers (HoT (\\(n = 1, \\dots, r\\))) in order to obtain feature tensors \\(\\Phi_1, \\dots, \\Phi_{\\tau}\\). These tensors are subsequently concatenated by \\(\\odot\\) along the hyper-edge mode into a multi-order feature tensor \\(\\mathcal{M}\\). The final step is a Multi-order Multi-mode Transformer (3Mformer from Section 4), which contains two complementary branches, MP \\(\\rightarrow\\) TP and TP \\(\\rightarrow\\) MP, whose outputs are concatenated by \\(\\odot\\) and passed to the classifier. MP and TP perform the Coupled-mode Self-Attention (CmSA) with the so-called coupled-mode tokens, based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge' and 'channel-only' pairs. To this end, MP contains also weighted pooling along hyper-edge mode by learnable matrix \\(\\mathbf{H}\\) (and \\(\\mathbf{H}'\\) in another branch). TP contains also block-temporal pooling denoted by \\(g(\\cdot)\\) whose role is to capture block-temporal order with average, maximum, rank pooling, etc. In our experiments we show that such designed MP and TP are able to efficiently process hyper-edge feature representations from HoT branches. Appendix A shows full visualization of our 3Mformer." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.387, + 0.47, + 0.448 + ], + "angle": 0, + "content": "uses the masking strategy. In contrast to these works, we use three HoT branches of model [21], and we model hyperedges of orders 1 to \\( r \\) by forming several multi-mode token variations in 3Mformer." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.453, + 0.473, + 0.709 + ], + "angle": 0, + "content": "Attention. In order to improve feature representations, attention captures relationship between tokens. Natural language processing and computer vision have driven recent developments in attention mechanisms based on transformers [11, 49]. Examples include the hierarchical Cross Attention Transformer (CAT) [32], Cross-attention by Temporal Shift with CNNs [16], Cross-Attention Multi-Scale Vision Transformer (CrossViT) for image classification [3] and Multi-Modality Cross Attention (MMCA) Network for image and sentence matching [63]. In GNNs, attention can be defined over edges [50, 66] or over nodes [29]. In this work, we use the attention with hyper-edges of several orders from HoT branches serving as tokens, and coupled-mode attention with coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs formed in 3Mformer." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.728, + 0.206, + 0.745 + ], + "angle": 0, + "content": "3. Background" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.755, + 0.465, + 0.771 + ], + "angle": 0, + "content": "Below we describe foundations necessary for our work." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.775, + 0.47, + 0.867 + ], + "angle": 0, + "content": "Notations. \\(\\mathcal{I}_K\\) stands for the index set \\(\\{1,2,\\dots,K\\}\\). Regular fonts are scalars; vectors are denoted by lowercase boldface letters, e.g., \\(\\mathbf{x}\\); matrices by the uppercase boldface, e.g., \\(\\mathbf{M}\\); and tensors by calligraphic letters, e.g., \\(\\mathbf{M}\\). An \\(r\\)th-order tensor is denoted as \\(\\mathbf{M} \\in \\mathbb{R}^{I_1 \\times I_2 \\times \\ldots \\times I_r}\\), and the mode-\\(m\\) matricization of \\(\\mathbf{M}\\) is denoted as \\(\\mathbf{M}_{(m)} \\in \\mathbb{R}^{I_m \\times (I_1 \\ldots I_{m-1} I_{m+1} \\ldots I_r)}\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Transformer layers [11, 49]. A transformer encoder layer \\(f: \\mathbb{R}^{J \\times d} \\to \\mathbb{R}^{J \\times d}\\) consists of two sub-layers: (i) a self" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.386, + 0.893, + 0.449 + ], + "angle": 0, + "content": "attention \\(a: \\mathbb{R}^{J \\times d} \\to \\mathbb{R}^{J \\times d}\\) and (ii) an element-wise feedforward MLP: \\(\\mathbb{R}^{J \\times d} \\to \\mathbb{R}^{J \\times d}\\). For a set of \\(J\\) nodes with \\(\\mathbf{X} \\in \\mathbb{R}^{J \\times d}\\), where \\(\\mathbf{x}_i\\) is a feature vector of node \\(i\\), a transformer layer3 computes:" + }, + { + "type": "equation", + "bbox": [ + 0.59, + 0.459, + 0.892, + 0.502 + ], + "angle": 0, + "content": "\\[\na \\left(\\mathbf {x} _ {i}\\right) = \\mathbf {x} _ {i} + \\sum_ {h = 1} ^ {H} \\sum_ {j = 1} ^ {J} \\alpha_ {i j} ^ {h} \\mathbf {x} _ {j} \\mathbf {W} _ {h} ^ {V} \\mathbf {W} _ {h} ^ {O}, \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.505, + 0.892, + 0.523 + ], + "angle": 0, + "content": "\\[\nf (\\mathbf {x} _ {i}) = a (\\mathbf {x} _ {i}) + \\operatorname {M L P} (a (\\mathbf {X})) _ {i}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.534, + 0.892, + 0.584 + ], + "angle": 0, + "content": "where \\(H\\) and \\(d_H\\) denote respectively the number of heads and the head size, \\(\\alpha^h = \\sigma (\\mathbf{X}\\mathbf{W}_h^Q (\\mathbf{X}\\mathbf{W}_h^K)^\\top)\\) is the attention coefficient, \\(\\mathbf{W}_h^O\\in \\mathbb{R}^{d_H\\times d}\\), and \\(\\mathbf{W}_h^V,\\mathbf{W}_h^K,\\mathbf{W}_h^Q\\in \\mathbb{R}^{d\\times d_H}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.585, + 0.893, + 0.692 + ], + "angle": 0, + "content": "Higher-order transformer layers [21]. Let the HoT layer be \\(f_{m\\rightarrow n}:\\mathbb{R}^{J^m\\times d}\\to \\mathbb{R}^{J^n\\times d}\\) with two sub-layers: (i) a higher-order self-attention \\(a_{m\\rightarrow n}:\\mathbb{R}^{J^m\\times d}\\to \\mathbb{R}^{J^n\\times d}\\) and (ii) a feedforward \\(\\mathrm{MLP}_{n\\rightarrow n}:\\mathbb{R}^{J^n\\times d}\\to \\mathbb{R}^{J^n\\times d}\\). Moreover, let indexing vectors \\(\\mathbf{i}\\in \\mathcal{I}_J^m\\equiv \\mathcal{I}_J\\times \\mathcal{I}_J\\times \\ldots \\times \\mathcal{I}_J\\) (\\(m\\) modes) and \\(\\mathbf{j}\\in \\mathcal{I}_J^n\\equiv \\mathcal{I}_J\\times \\mathcal{I}_J\\times \\ldots \\times \\mathcal{I}_J\\) (\\(n\\) modes). For the input tensor \\(\\mathbf{X}\\in \\mathbb{R}^{J^m\\times d}\\) with hyper-edges of order \\(m\\), a HoT layer evaluates:" + }, + { + "type": "equation", + "bbox": [ + 0.514, + 0.703, + 0.892, + 0.744 + ], + "angle": 0, + "content": "\\[\na _ {m \\rightarrow n} (\\mathbf {X}) _ {j} = \\sum_ {h = 1} ^ {H} \\sum_ {\\mu} \\sum_ {i} \\boldsymbol {\\alpha} _ {i, j} ^ {h, \\mu} \\mathbf {X} _ {i} \\mathbf {W} _ {h, \\mu} ^ {V} \\mathbf {W} _ {h, \\mu} ^ {O} \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.747, + 0.892, + 0.765 + ], + "angle": 0, + "content": "\\[\n\\mathrm {M L P} _ {n \\rightarrow n} \\left(a _ {m \\rightarrow n} (\\mathbf {X})\\right) = \\mathrm {L} _ {n \\rightarrow n} ^ {2} \\left(\\operatorname {R e L U} \\left(\\mathrm {L} _ {n \\rightarrow n} ^ {1} \\left(a _ {m \\rightarrow n} (\\mathbf {X})\\right)\\right)\\right), \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.516, + 0.768, + 0.892, + 0.785 + ], + "angle": 0, + "content": "\\[\nf _ {m \\rightarrow n} (\\mathbf {X}) = a _ {m \\rightarrow n} (\\mathbf {X}) + \\operatorname {M L P} _ {n \\rightarrow n} (a _ {m \\rightarrow n} (\\mathbf {X})), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.797, + 0.892, + 0.879 + ], + "angle": 0, + "content": "where \\(\\pmb{\\alpha}^{h,\\mu}\\in \\mathbb{R}^{J^{m + n}}\\) is the so-called attention coefficient tensor with multiple heads, and \\(\\pmb{\\alpha}_{\\mathbf{i},\\mathbf{j}}^{h,\\mu}\\in \\mathbb{R}^{J}\\) is a vector, \\(\\mathbf{W}_{h,\\mu}^V\\in \\mathbb{R}^{d\\times d_H}\\) and \\(\\mathbf{W}_{h,\\mu}^{O}\\in \\mathbb{R}^{d_{H}\\times d}\\) are learnable parameters. Moreover, \\(\\mu\\) indexes over the so-called equivalence classes of order\\((m + n)\\) in the same partition of nodes, \\(\\mathrm{L}_{n\\to n}^{1}\\colon \\mathbb{R}^{J^n\\times d}\\to \\mathbb{R}^{J^n\\times d_F}\\)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.516, + 0.887, + 0.851, + 0.901 + ], + "angle": 0, + "content": "\\( {}^{3} \\) Normalizations after \\( a\\left( \\cdot \\right) \\) & \\( \\operatorname{MLP}\\left( \\cdot \\right) \\) are omitted for simplicity." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5622" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.09, + 0.47, + 0.12 + ], + "angle": 0, + "content": "and \\(\\mathrm{L}_{n\\to n}^2\\colon \\mathbb{R}^{J^n\\times d_F}\\to \\mathbb{R}^{J^n\\times d}\\) are equivariant linear layers and \\(d_{F}\\) is the hidden dimension." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.121, + 0.47, + 0.168 + ], + "angle": 0, + "content": "To compute each attention tensor \\(\\alpha^{h,\\mu} \\in \\mathbb{R}^{J^{m + n}}\\) from the input tensor \\(\\mathbf{X} \\in \\mathbb{R}^{J^m \\times d}\\) of hyper-edges of order \\(m\\), from the higher-order query and key, we obtain:" + }, + { + "type": "equation", + "bbox": [ + 0.163, + 0.177, + 0.47, + 0.224 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\alpha} _ {i, j} ^ {h, \\mu} = \\left\\{ \\begin{array}{c c} \\frac {\\sigma \\left(\\mathbf {Q} _ {j} ^ {h , \\mu} , \\mathbf {K} _ {i} ^ {h , \\mu}\\right)}{Z _ {j}} & (i, j) \\in \\mu \\\\ 0 & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.233, + 0.469, + 0.325 + ], + "angle": 0, + "content": "where \\(\\mathbf{Q}^{\\mu} = \\mathrm{L}_{m\\to n}^{\\mu}(\\mathbf{X})\\) \\(\\mathbf{K}^{\\mu} = \\mathrm{L}_{m\\to m}^{\\mu}(\\mathbf{X})\\) , and normalization constant \\(Z_{j} = \\sum_{i:(i,j)\\in \\mu}\\sigma (\\mathbf{Q}_{j}^{\\mu},\\mathbf{K}_{i}^{\\mu})\\) . Finally, kernel attention in Eq. (6) can be approximated with RKHS feature maps \\(\\psi \\in \\mathbb{R}_+^{d_K}\\) for efficacy as \\(d_{K}\\ll d_{H}\\) . Specifically, we have \\(\\sigma (\\mathbf{Q}_j^{h,\\mu},\\mathbf{K}_i^{h,\\mu})\\approx \\psi (\\mathbf{Q}_j^{h,\\mu})^\\top \\psi (\\mathbf{K}_i^{h,\\mu})\\) as in [10, 19]. We choose the performer kernel [10] due to its good performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.325, + 0.469, + 0.37 + ], + "angle": 0, + "content": "As query and key tensors are computed from the input tensor \\(\\mathbf{X}\\) using the equivariant linear layers, the transformer encoder layer \\(f_{m\\to n}\\) satisfies the permutation equivariance." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.383, + 0.185, + 0.401 + ], + "angle": 0, + "content": "4. Approach" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.408, + 0.469, + 0.468 + ], + "angle": 0, + "content": "Skeletal Graph [64] and Skeletal Hypergraph [15,35] are popular for modeling edges and hyper-edges. In this work, we use the Higher-order Transformer (HoT) [21] as a backbone encoder." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.478, + 0.24, + 0.492 + ], + "angle": 0, + "content": "4.1. Model Overview" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.501, + 0.469, + 0.698 + ], + "angle": 0, + "content": "Fig. 1 shows that our framework contains a simple 3-layer MLP unit (FC, ReLU, FC, ReLU, Dropout, FC), three HoT blocks with each HoT for each type of input (i.e., body joint feature set, graph and hypergraph of body joints), followed by Multi-order Multi-mode Transformer (3Mformer) with two modules (i) Multi-order Pooling (MP) and (ii) Temporal block Pooling (TP). The goal of 3Mformer is to form coupled-mode tokens (explained later) such as 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only', and perform weighted hyper-edge aggregation and temporal block aggregation. Their outputs are further concatenated and passed to an FC layer for classification." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.701, + 0.469, + 0.807 + ], + "angle": 0, + "content": "MLP unit. The MLP unit takes \\( T \\) neighboring frames, each with \\( J \\) 2D/3D skeleton body joints, forming one temporal block. In total, depending on stride \\( S \\), we obtain some \\( \\tau \\) temporal blocks (a block captures the short-term temporal evolution). In contrast, the long-term temporal evolution is modeled with HoT and 3Mformer. Each temporal block is encoded by the MLP into a \\( d \\times J \\) dimensional feature map." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.469, + 0.871 + ], + "angle": 0, + "content": "HoT branches. We stack \\(r\\) branches of HoT, each taking embeddings \\(\\mathbf{X}_t\\in \\mathbb{R}^{d\\times J}\\) where \\(t\\in I_{\\tau}\\) denotes a temporal block. HoT branches output hyper-edge feature representations of size \\(m\\in I_r\\) as \\(\\Phi_m^{\\prime}\\in \\mathbb{R}^{J^m\\times d'}\\) for order \\(m\\in I_r\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "For the first-, second- and higher-order stream outputs \\(\\Phi_1',\\dots,\\Phi_r'\\), we (i) swap feature channel and hyper-edge" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.186 + ], + "angle": 0, + "content": "modes, (ii) extract the upper triangular of tensors, and we concatenate along the block-temporal mode, so we have \\(\\Phi_{m}\\in \\mathbb{R}^{d^{\\prime}\\times N_{E_{m}}\\times \\tau}\\), where \\(N_{E_m} = \\binom{J}{m}\\). Subsequently, we concatenate \\(\\Phi_1,\\ldots ,\\Phi_r\\) along the hyper-edge mode and obtain a multi-order feature tensor \\(\\pmb {M}\\in \\mathbb{R}^{d^{\\prime}\\times N\\times r}\\) where the total number of hyper-edges across all orders is \\(N = \\sum_{m = 1}^{r}\\binom{J}{m}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.187, + 0.892, + 0.262 + ], + "angle": 0, + "content": "3Mformer. Our Multi-order Multi-mode Transformer (3Mformer) with Coupled-mode Self-Attention (CmSA) is used for the fusion of information flow inside the multi-order feature tensor \\(\\mathcal{M}\\), and finally, the output from 3Mformer is passed to a classifier for classification." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.272, + 0.762, + 0.287 + ], + "angle": 0, + "content": "4.2. Coupled-mode Self-Attention" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.298, + 0.892, + 0.524 + ], + "angle": 0, + "content": "Coupled-mode tokens. We are inspired by the attentive regions of the one-class token in the standard Vision Transformer (ViT) [49] that can be leveraged to form a class-agnostic localization map. We investigate if the transformer model can also effectively capture the coupled-mode attention for more discriminative classification tasks, e.g., tensorial skeleton-based action recognition by learning the coupled-mode tokens within the transformer. To this end, we propose a Multi-order Multi-mode Transformer (3Mformer), which uses coupled-mode tokens to jointly learn various higher-order motion dynamics among channel-, block-temporal-, body joint- and order-mode. Our 3Mformer can successfully produce coupled-mode relationships from CmSA mechanism corresponding to different tokens. Below we introduce our CmSA." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.525, + 0.892, + 0.722 + ], + "angle": 0, + "content": "Given the order- \\(r\\) tensor \\(\\mathcal{M} \\in \\mathbb{R}^{I_1 \\times I_2 \\times \\ldots \\times I_r}\\), to form the joint mode token, we perform the mode-\\(m\\) matricization of \\(\\mathcal{M}\\) to obtain \\(\\mathbf{M} \\equiv \\mathcal{M}_{(m)}^{\\top} \\in \\mathbb{R}^{(I_1 \\ldots I_{m-1} I_{m+1} \\ldots I_r) \\times I_m}\\), and the coupled-token for \\(\\mathbf{M}\\) is formed. For example, for a given 3rd-order tensor that has feature channel-, hyper-edge- and temporal block-mode, we can form 'channel-temporal block', 'channel-hyper-edge (any order)' and 'channel-only' pairs; and if the given tensor is used as input and outputs a new tensor which produces new mode, e.g., body joint-mode, we can form the 'order-channel-body joint' token. In the following sections, for simplicity, we use reshape for the matricization of tensor to form different types of coupled-mode tokens. Our CmSA is given as:" + }, + { + "type": "equation", + "bbox": [ + 0.586, + 0.732, + 0.892, + 0.767 + ], + "angle": 0, + "content": "\\[\na (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) = \\operatorname {S o f t M a x} \\left(\\frac {\\mathbf {Q} \\mathbf {K} ^ {\\top}}{\\sqrt {d _ {K}}}\\right) \\mathbf {V}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.778, + 0.892, + 0.901 + ], + "angle": 0, + "content": "where \\(\\sqrt{d_K}\\) is the scaling factor, \\(\\mathbf{Q} = \\mathbf{W}^q\\mathbf{M}\\), \\(\\mathbf{K} = \\mathbf{W}^k\\mathbf{M}\\) and \\(\\mathbf{V} = \\mathbf{W}^{\\nu}\\mathbf{M}\\) are the query, key and value, respectively, and \\(\\mathbf{M} \\equiv \\mathcal{M}_{(m)}^{\\top}\\). Moreover, \\(\\mathbf{Q}, \\mathbf{K}, \\mathbf{V} \\in \\mathbb{R}^{(I_1\\dots I_{m-1}I_{m+1}\\dots I_r)\\times I_m}\\) and \\(\\mathbf{W}^q, \\mathbf{W}^k, \\mathbf{W}^\\nu \\in \\mathbb{R}^{(I_1\\dots I_{m-1}I_{m+1}\\dots I_r)\\times (I_1\\dots I_{m-1}I_{m+1}\\dots I_r)}\\) are learnable weights. We notice that various coupled-mode tokens have different 'focus' of attention mechanisms, and we apply them in our 3Mformer for the fusion of multi-order feature representations." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5623" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.091, + 0.405, + 0.106 + ], + "angle": 0, + "content": "4.3. Multi-order Multi-mode Transformer" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.115, + 0.47, + 0.192 + ], + "angle": 0, + "content": "Below we introduce Multi-order Multi-mode Transformer (3Mformer) with Multi-order Pooling (MP) block and Temporal block Pooling (TP) block, which are cascaded into two branches (i) \\(\\mathrm{MP} \\rightarrow \\mathrm{TP}\\) and (ii) \\(\\mathrm{TP} \\rightarrow \\mathrm{MP}\\), to achieve different types of coupled-mode tokens." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.215, + 0.37, + 0.23 + ], + "angle": 0, + "content": "4.3.1 Multi-order Pooling (MP) Module" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.244, + 0.471, + 0.366 + ], + "angle": 0, + "content": "\\(\\mathbf{CmSA}\\) in MP. We reshape the multi-order feature representation \\(\\mathcal{M} \\in \\mathbb{R}^{d' \\times N \\times \\tau}\\) into \\(\\mathbf{M} \\in \\mathbb{R}^{d' \\times N}\\) (or reshape the output from TP explained later into \\(\\mathbf{M}' \\in \\mathbb{R}^{d' \\times N}\\)) to let the model attend to different types of feature representations. Let us simply denote \\(d'' = d'\\tau\\) (or \\(d'' = d'\\)) depending on the source of input. We form a coupled-mode self-attention (if \\(d'' = d'\\tau\\), we have, i.e., 'channel-temporal block' token; if \\(d'' = d'\\), we have 'channel-only' token):" + }, + { + "type": "equation", + "bbox": [ + 0.097, + 0.378, + 0.47, + 0.417 + ], + "angle": 0, + "content": "\\[\na _ {\\mathrm {M P}} \\left(\\mathbf {Q} _ {\\mathrm {M P}}, \\mathbf {K} _ {\\mathrm {M P}}, \\mathbf {V} _ {\\mathrm {M P}}\\right) = \\operatorname {S o f t M a x} \\left(\\frac {\\mathbf {Q} _ {\\mathrm {M P}} \\mathbf {K} _ {\\mathrm {M P}} ^ {\\top}}{\\sqrt {d _ {K _ {\\mathrm {M P}}}}}\\right) \\mathbf {V} _ {\\mathrm {M P}}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.43, + 0.47, + 0.537 + ], + "angle": 0, + "content": "where \\(\\sqrt{d_{K_{\\mathrm{MP}}}}\\) is the scaling factor, \\(\\mathbf{Q}_{\\mathrm{MP}} = \\mathbf{W}_{\\mathrm{MP}}^{q}\\mathbf{M}\\), \\(\\mathbf{K}_{\\mathrm{MP}} = \\mathbf{W}_{\\mathrm{MP}}^{k}\\mathbf{M}\\) and \\(\\mathbf{V}_{\\mathrm{MP}} = \\mathbf{W}_{\\mathrm{MP}}^{\\nu}\\mathbf{M}\\) (we can use here \\(\\mathbf{M}\\) or \\(\\mathbf{M}'\\)) are the query, key and value. Moreover, \\(\\mathbf{Q}_{\\mathrm{MP}}, \\mathbf{K}_{\\mathrm{MP}}, \\mathbf{V}_{\\mathrm{MP}} \\in \\mathbb{R}^{d'' \\times N}\\) and \\(\\mathbf{W}_{\\mathrm{MP}}^{q}, \\mathbf{W}_{\\mathrm{MP}}^{k}, \\mathbf{W}_{\\mathrm{MP}}^{\\nu} \\in \\mathbb{R}^{d'' \\times d''}\\) are learnable weights. Eq. (8) is a self-attention layer which reweighs \\(\\mathbf{V}_{\\mathrm{MP}}\\) based on the correlation between \\(\\mathbf{Q}_{\\mathrm{MP}}\\) and \\(\\mathbf{K}_{\\mathrm{MP}}\\) token embeddings of so-called coupled-mode tokens." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.542, + 0.47, + 0.633 + ], + "angle": 0, + "content": "Weighted pooling. Attention layer in Eq. (8) produces feature representation \\(\\mathbf{O}_{\\mathrm{MP}} \\in \\mathbb{R}^{d'' \\times N}\\) to enhance the relationship between example feature channels and body joints. Subsequently, we handle the impact of hyper-edges of multiple orders by weighted pooling along hyper-edges of order \\(m \\in I_r\\):" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.644, + 0.469, + 0.665 + ], + "angle": 0, + "content": "\\[\n\\mathbf {O} _ {\\mathrm {M P}} ^ {* (m)} = \\mathbf {O} _ {\\mathrm {M P}} ^ {(m)} \\mathbf {H} ^ {(m)} \\in \\mathbb {R} ^ {d ^ {\\prime \\prime} \\times J}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.678, + 0.47, + 0.775 + ], + "angle": 0, + "content": "where \\(\\mathbf{O}_{\\mathrm{MP}}^{(m)} \\in \\mathbb{R}^{d'' \\times N_{E_m}}\\) is simply extracted from \\(\\mathbf{O}_{\\mathrm{MP}}\\) for hyper-edges of order \\(m\\), and matrices \\(\\mathbf{H}^{(m)} \\in \\mathbb{R}^{N_{E_m} \\times J}\\) are learnable weights to perform weighted pooling along hyperedges of order \\(m\\). Finally, we obtain \\(\\mathbf{O}_{\\mathrm{MP}}^* \\in \\mathbb{R}^{rd'' \\times J}\\) by simply concatenating \\(\\mathbf{O}_{\\mathrm{MP}}^{*(1)}, \\ldots, \\mathbf{O}_{\\mathrm{MP}}^{*(r)}\\). If we used the input to MP from TP, then we denote the output of MP as \\(\\mathbf{O}_{\\mathrm{MP}}^*\\)." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.796, + 0.391, + 0.812 + ], + "angle": 0, + "content": "4.3.2 Temporal block Pooling (TP) Module" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.901 + ], + "angle": 0, + "content": "\\(\\mathbf{CmSA}\\) in TP. Firstly, we reshape the multi-order feature representation \\(\\mathcal{M} \\in \\mathbb{R}^{d' \\times N \\times \\tau}\\) into \\(\\mathbf{M} \\in \\mathbb{R}^{d' N \\times \\tau}\\) (or reshape the output from MP into \\(\\mathbf{M}'' \\in \\mathbb{R}^{rd' J \\times \\tau}\\)). For simplicity, we denote \\(d''' = d'N\\) in the first case and \\(d''' = rd'J\\) in the second case. As the first mode of reshaped input serves to form" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.166 + ], + "angle": 0, + "content": "tokens, they are again coupled-mode tokens, e.g., 'channel-hyper-edge' and 'order-channel-body joint' tokens, respectively. Moreover, TP also performs pooling along block-temporal mode (along \\(\\tau\\)). We form an coupled-mode self-attention:" + }, + { + "type": "equation", + "bbox": [ + 0.529, + 0.177, + 0.892, + 0.216 + ], + "angle": 0, + "content": "\\[\na _ {\\mathrm {T P}} \\left(\\mathbf {Q} _ {\\mathrm {T P}}, \\mathbf {K} _ {\\mathrm {T P}}, \\mathbf {V} _ {\\mathrm {T P}}\\right) = \\operatorname {S o f t M a x} \\left(\\frac {\\mathbf {Q} _ {\\mathrm {T P}} \\mathbf {K} _ {\\mathrm {T P}} ^ {\\top}}{\\sqrt {d _ {K _ {\\mathrm {T P}}}}}\\right) \\mathbf {V} _ {\\mathrm {T P}}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.23, + 0.892, + 0.367 + ], + "angle": 0, + "content": "where \\(\\sqrt{d_{K_{\\mathrm{TP}}}}\\) is the scaling factor, \\(\\mathbf{Q}_{\\mathrm{TP}} = \\mathbf{W}_{\\mathrm{TP}}^{q}\\mathbf{M}\\), \\(\\mathbf{K}_{\\mathrm{TP}} = \\mathbf{W}_{\\mathrm{TP}}^{k}\\mathbf{M}\\) and \\(\\mathbf{V}_{\\mathrm{TP}} = \\mathbf{W}_{\\mathrm{TP}}^{\\nu}\\mathbf{M}\\) (we can use here \\(\\mathbf{M}\\) or \\(\\mathbf{M}''\\)) are the query, key and value. Moreover, \\(\\mathbf{Q}_{\\mathrm{TP}}, \\mathbf{K}_{\\mathrm{TP}}, \\mathbf{V}_{\\mathrm{TP}} \\in \\mathbb{R}^{d''' \\times \\tau}\\) and \\(\\mathbf{W}_{\\mathrm{TP}}^{q}, \\mathbf{W}_{\\mathrm{TP}}^{k}, \\mathbf{W}_{\\mathrm{TP}}^{\\nu} \\in \\mathbb{R}^{d''' \\times d'''}\\) are learnable weights. Eq. (10) reweights \\(\\mathbf{V}_{\\mathrm{TP}}\\) based on the correlation between \\(\\mathbf{Q}_{\\mathrm{TP}}\\) and \\(\\mathbf{K}_{\\mathrm{TP}}\\) token embeddings of coupled-mode tokens ('channel-hyper-edge' or 'order-channel-body joint'). The output of attention is the temporal representation \\(\\mathbf{O}_{\\mathrm{TP}} \\in \\mathbb{R}^{d''' \\times \\tau}\\). If we used \\(\\mathbf{M}''\\) as input, we denote the output as \\(\\mathbf{O}_{\\mathrm{TP}}''\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.37, + 0.892, + 0.509 + ], + "angle": 0, + "content": "Pooling step. Given the temporal representation \\(\\mathbf{O}_{\\mathrm{TP}} \\in \\mathbb{R}^{d'' \\times r}\\) (or \\(\\mathbf{O}_{\\mathrm{TP}}''\\)), we apply pooling along the block-temporal mode to obtain compact feature representations independent of length (block count \\(\\tau\\)) of skeleton sequence. There exist many pooling operations including first-order, e.g., average, maximum, sum pooling, second-order [60, 80] such as attentional pooling [14], higher-order (tri-linear) [8, 25] and rank pooling [13]. The output after pooling is \\(\\mathbf{O}_{\\mathrm{TP}}^* \\in \\mathbb{R}^{d''}\\) (or \\(\\mathbf{O}_{\\mathrm{TP}}''\\))." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.53, + 0.663, + 0.543 + ], + "angle": 0, + "content": "4.3.3 Model Variants" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.555, + 0.892, + 0.6 + ], + "angle": 0, + "content": "We devise four model variants by different stacking of MP with TP, with the goal of exploiting attention with different kinds of coupled-mode tokens:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.613, + 0.89, + 0.644 + ], + "angle": 0, + "content": "i. Single-branch: MP followed by TP, denoted \\(\\mathrm{MP}\\rightarrow \\mathrm{TP}\\) (Fig. 1 top right branch)." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.656, + 0.89, + 0.687 + ], + "angle": 0, + "content": "ii. Single-branch: TP followed by MP, denoted \\(\\mathrm{TP} \\rightarrow \\mathrm{MP}\\), (Fig. 1 bottom right branch)." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.7, + 0.89, + 0.731 + ], + "angle": 0, + "content": "iii. Two-branch (our 3Mformer, Fig. 1) which concatenates outputs of \\(\\mathrm{MP} \\rightarrow \\mathrm{TP}\\) and \\(\\mathrm{TP} \\rightarrow \\mathrm{MP}\\)." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.744, + 0.89, + 0.775 + ], + "angle": 0, + "content": "iv. We also investigate only MP or TP module followed by average pooling or an FC layer." + }, + { + "type": "list", + "bbox": [ + 0.507, + 0.613, + 0.89, + 0.775 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.787, + 0.892, + 0.862 + ], + "angle": 0, + "content": "The outputs from \\(\\mathrm{MP} \\rightarrow \\mathrm{TP}\\) and \\(\\mathrm{TP} \\rightarrow \\mathrm{MP}\\) have exactly the same feature dimension (\\(\\mathbb{R}^{rd'J}\\), after reshaping into vector). For two-branch (our 3Mformer), we simply concatenate these outputs (\\(\\mathbb{R}^{2rd'J}\\), after concatenation). These vectors are forwarded to the FC layer to learn a classifier." + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.875, + 0.892, + 0.901 + ], + "angle": 0, + "content": "4We do not propose pooling operators but we select popular ones with the purpose of comparing their impact on TP." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5624" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.09, + 0.21, + 0.108 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.115, + 0.289, + 0.131 + ], + "angle": 0, + "content": "5.1. Datasets and Protocols" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.139, + 0.468, + 0.229 + ], + "angle": 0, + "content": "(i) NTU RGB+D (NTU-60) [43] contains 56,880 video sequences. This dataset has variable sequence lengths and high intra-class variations. Each skeleton sequence has 25 joints and there are no more than two human subjects in each video. Two evaluation protocols are: (i) cross-subject (X-Sub) and (ii) cross-view (X-View)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.23, + 0.468, + 0.32 + ], + "angle": 0, + "content": "(ii) NTU RGB+D 120 (NTU-120) [34], an extension of NTU-60, contains 120 action classes (daily/health-related), and 114,480 RGB+D video samples captured with 106 distinct human subjects from 155 different camera viewpoints. There are also two evaluation protocols: (i) cross-subject (X-Sub) and (ii) cross-setup (X-Set)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.321, + 0.468, + 0.58 + ], + "angle": 0, + "content": "(iii) Kinetics-Skeleton, based on Kinetics [20], is large-scale dataset with 300,000 video clips and up to 400 human actions collected from YouTube. This dataset involves human daily activities, sports scenes and complex human-computer interaction scenes. Since Kinetics only provides raw videos without the skeletons, ST-GCN [64] uses the publicly available OpenPose toolbox [2] to estimate and extract the location of 18 human body joints on every frame in the clips. We use their released skeleton data to evaluate our model. Following the standard evaluation protocol, we report the Top-1 and Top-5 accuracies on the validation set. (iv) Northwestern-UCLA [51] was captured by 3 Kinect cameras simultaneously from multiple viewpoints. It contains 1494 video clips covering 10 actions. Each action is performed by 10 different subjects. We follow the same evaluation protocol as [51]: training split is formed from the first two cameras, and testing split from the last camera." + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.139, + 0.468, + 0.58 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.59, + 0.267, + 0.607 + ], + "angle": 0, + "content": "5.2. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.614, + 0.468, + 0.901 + ], + "angle": 0, + "content": "We use PyTorch and \\(1\\times\\) Titan RTX 3090 for experiments. We use the Stochastic Gradient Descent (SGD) with momentum 0.9, cross-entropy as the loss, weight decay of 0.0001 and batch size of 32. The learning rate is set to 0.1 initially. On NTU-60 and NTU-120, the learning rate is divided by 10 at the 40th and 50th epoch, and the training process ends at the 60th epoch. On Kinetics-Skeleton, the learning rate is divided by 10 at the 50th and 60th epoch, and the training finishes at the 80th epoch. We took \\(20\\%\\) of training set for validation to tune hyperparameters. All models have fixed hyperparameters with 2 and 4 layers for NTU-60/NTU-120 and Kinetics-Skeleton, respectively. The hidden dimensions is set to 16 for all 3 datasets. We use 4 attention heads for NTU-60 and NTU-120, and 8 attention heads for Kinetics-Skeleton. To form each video temporal block, we simply choose temporal block size to be 10 and stride to be 5 to allow a \\(50\\%\\) overlap between consecutive temporal blocks. For Northwestern-UCLA, the batch size is 16. We adopted the data pre-processing in [6]." + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.09, + 0.892, + 0.118 + ], + "angle": 0, + "content": "Table 1. Search for the single best order \\( n \\) of hypergraph (except for \\( n = 3 \\& 4 \\) where we check if \\( n = 3 \\& 4 \\) are complementary)." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.123, + 0.871, + 0.231 + ], + "angle": 0, + "content": "
Order-nNTU-60NTU-120Kinetics-Skel.
X-SubX-ViewX-SubX-SetTop-1 acc.
n = 178.586.375.377.932.0
n = 283.089.286.288.337.1
n = 391.397.087.589.739.5
n = 491.597.187.890.040.1
n = 591.497.387.890.040.3
n = 3 & 491.697.287.690.340.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.257, + 0.892, + 0.3 + ], + "angle": 0, + "content": "Table 2. Evaluations of our model variants with/without MP and/or TP. Baseline in the table denotes the backbone (MLP unit + HoTs) without the use of either MP or TP module." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.307, + 0.892, + 0.41 + ], + "angle": 0, + "content": "
VariantsNTU-60NTU-120Kinetics-Skel.
X-SubX-ViewX-SubX-SetTop-1 acc.
Baseline89.891.486.587.038.6
+ TP only91.293.887.588.639.8
+ MP only92.094.388.789.740.3
+ MP→TP93.096.190.891.745.7
+ TP→MP92.695.890.291.144.0
+ 2-branch(3Mformer)94.898.792.093.848.3
" + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.437, + 0.589, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.538, + 0.509, + 0.555, + 0.521 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.593, + 0.438, + 0.678, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.627, + 0.509, + 0.645, + 0.521 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.438, + 0.768, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.717, + 0.509, + 0.734, + 0.521 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image", + "bbox": [ + 0.774, + 0.438, + 0.862, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.822, + 0.509, + 0.84, + 0.521 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.533, + 0.892, + 0.59 + ], + "angle": 0, + "content": "Figure 2. Visualization of attention matrices. (a) single-mode attention matrix of 'channel-only' token, (b)-(d) coupled-mode attention matrices of 'channel-hyper-edge', 'order-channel-body joint' and 'channel-temporal block' tokens, respectively." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.616, + 0.653, + 0.633 + ], + "angle": 0, + "content": "5.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.64, + 0.892, + 0.867 + ], + "angle": 0, + "content": "Search for the single best order \\( n \\). Table 1 shows our analysis regarding the best order \\( n \\). In general, increasing the order \\( n \\) improves the performance (within \\( \\sim 0.5\\% \\) on average), but causing higher computational cost, e.g., the number of hyper-edges for the skeletal hypergraph of order \\( n = 4 \\) is 3060 on Kinetics-Skeleton. We also notice that combining orders 3 and 4 yields very limited improvements. The main reasons are: (i) reasonable order \\( n \\), e.g., \\( n = 3 \\) or 4 improves accuracy as higher-order motion patterns are captured which are useful for classification-related tasks (ii) further increasing order \\( n \\), e.g., \\( n = 5 \\) introduces patterns in feature representations that rarely repeat even for the same action class. Considering the cost and performance, we choose the maximum order \\( r = 3 \\) (\\( n = 1,2,3 \\)) in the following experiments unless specified otherwise." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Discussion on coupled-mode attention. Fig. 2 shows the visualization of some attention matrices in our 3Mformer," + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5625" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.26, + 0.09, + 0.71, + 0.104 + ], + "angle": 0, + "content": "Table 3. Experimental results on NTU-60, NTU-120 and Kinetics-Skeleton." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.111, + 0.895, + 0.487 + ], + "angle": 0, + "content": "
MethodVenueNTU-60NTU-120Kinetics-Skeleton
X-SubX-ViewX-SubX-SetTop-1Top-5
Graph-basedTCN [22]CVPRW'17----20.340.0
ST-GCN [64]AAAI'1881.588.370.773.230.752.8
AS-GCN [30]CVPR'1986.894.278.379.834.856.5
2S-AGCN [44]CVPR'1988.595.182.584.236.158.7
NAS-GCN [37]AAAI'2089.495.7--37.160.1
Sym-GNN [31]TPAMI'2290.196.4--37.258.1
Shift-GCN [6]CVPR'2090.796.585.987.6--
MS-G3D [36]CVPR'2091.596.286.988.438.060.9
CTR-GCN [4]ICCV'2192.496.888.990.6--
InfoGCN [9]CVPR'2293.097.189.891.2--
PoseConv3D [12]CVPR'2294.197.186.990.347.7-
Hypergraph-basedHyper-GNN [15]TIP'2189.595.7--37.160.0
DHGCN [62]CoRR'2190.796.086.087.937.760.6
Selective-HCN [79]ICMR'2290.896.6--38.061.1
SD-HGCN [17]ICONIP'2190.996.787.088.237.460.5
Transformer-basedST-TR [39]CVIU'2190.396.385.187.138.060.5
MTT [23]LSP'2190.896.786.187.637.961.3
4s-GSTN [18]Symmetry'2291.396.686.488.7--
STST [73]ACM MM'2191.996.8--38.361.2
3Mformer (with avg-pool, ours)92.097.388.090.143.165.2
3Mformer (with max-pool, ours)92.197.8----
3Mformer (with attn-pool, ours)94.298.589.792.445.767.6
3Mformer (with tri-pool, ours)94.098.591.292.747.771.9
3Mformer (with rank-pool, ours)94.898.792.093.848.372.3
" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.522, + 0.472, + 0.626 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.635, + 0.471, + 0.692 + ], + "angle": 0, + "content": "Figure 3. Evaluations of different single-mode (baseline) and coupled-mode tokens. We use a 3rd-order HoT with a standard Transformer, but we replace the scaled dot-product attention with coupled-mode tokens and coupled-mode attention." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.727, + 0.47, + 0.864 + ], + "angle": 0, + "content": "which show diagonal and/or vertical patterns that are consistent with the patterns of the attention matrices found in standard Transformer trained on sequences, e.g., for natural language processing tasks [28, 49]. We also notice that the coupled-mode attention, e.g., 'channel-temporal block' captures much richer information compared to single mode attention, e.g., 'channel-only'. Our coupled-mode attention can be applied to different orders of tensor representations through simple matricization." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.871, + 0.471, + 0.903 + ], + "angle": 0, + "content": "Discussion on model variants. To show the effectiveness of the proposed MP and TP module, firstly, we compare" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.525, + 0.895, + 0.89 + ], + "angle": 0, + "content": "TP only and MP only with the baseline (No MP or TP module). We use the TP module followed by an FC layer instead of MP as in \\(\\mathrm{TP} \\rightarrow \\mathrm{MP}\\), where the FC layer takes the output from TP \\((\\mathbb{R}^{d'N})\\) and produces a vector in \\(\\mathbb{R}^{3d'J}\\) passed to the classifier. Similarly, for MP only, we use the MP module followed by an average pooling layer instead of TP as in \\(\\mathrm{MP} \\rightarrow \\mathrm{TP}\\), where the average layer takes output from MP \\((\\mathbb{R}^{3d'J \\times \\tau})\\) and generates a vector in \\(\\mathbb{R}^{3d'J}\\) (pool along \\(\\tau\\) blocks), passed to the classifier. Table 2 shows the results. With just the TP module, we outperform the baseline by \\(1.3\\%\\) on average. With only the MP module, we outperform the baseline by \\(2.34\\%\\) on average. These comparisons show that (i) CmSA in MP and TP are efficient for better performance (ii) MP performs better than TP which shows that 'channel-temporal block' token contains richer information than 'channel-hyper-edge' token. We also notice that \\(\\mathrm{MP} \\rightarrow \\mathrm{TP}\\) slightly outperforms \\(\\mathrm{TP} \\rightarrow \\mathrm{MP}\\) by \\(\\sim 1\\%\\), and the main reason is that \\(\\mathrm{MP} \\rightarrow \\mathrm{TP}\\) has coupled-mode tokens 'channel-temporal block' and 'order-channel-joint' which attend 4 modes, whereas \\(\\mathrm{TP} \\rightarrow \\mathrm{MP}\\) has 'channel-hyper-edge' and 'channel-only' tokens which attend only 2 modes. Fig. 3 shows a comparison of different coupled-mode tokens on 3 benchmark datasets. This also suggests that one should firstly perform attenuate" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5626" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.212 + ], + "angle": 0, + "content": "tion with coupled-mode 'channel-block' tokens, followed by weighted pooling along the hyper-edge mode, followed by attention with coupled-mode 'order-channel-body joint' and finalised by block-temporal pooling. Finally, with 2- branch (3Mformer), we further boost the performance by \\(2 - 4\\%\\) , which shows that \\(\\mathrm{MP}\\rightarrow \\mathrm{TP}\\) and \\(\\mathrm{TP}\\rightarrow \\mathrm{MP}\\) are complementary branches. Below we use 2-branch (3Mformer) in the experiments (as in Fig. 1)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.216, + 0.471, + 0.414 + ], + "angle": 0, + "content": "Comparison of pooling in TP. As shown in Table 3, average pooling (avg-pool) achieves similar performance (within \\(\\sim 0.5\\%\\) difference) as maximum pooling (max-pool), second-order pooling (attn-pool) outperforms average and maximum pooling by \\(\\sim 1 - 2\\%\\) and third-order pooling (tri-pool) outperforms second-order pooling by \\(\\sim 1\\%\\). Interestingly, rank pooling (rank-pool) achieves the best performance. We think it is reasonable as rank pooling strives to enforce the temporal order in the feature space to be preserved, e.g., it forces network to always preserve temporal progression of actions over time. With multiple attention modules, orderless statistics such as second- or third-order pooling may be too general." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.424, + 0.413, + 0.44 + ], + "angle": 0, + "content": "5.4. Comparisons with the State of the Arts" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.448, + 0.47, + 0.629 + ], + "angle": 0, + "content": "We compare our model with recent state-of-the-art methods. On the NTU-60 (Tab. 3), we obtain the top-1 accuracies of the two evaluation protocols during test stage. The methods in comparisons include popular graph-based [30, 31, 37, 44, 64] and hypergraph-based models [15, 17, 62, 79]. Our 3rd-order model outperforms all graph-based methods, and also outperforms existing hypergraph-based models such as Selective-HCN and SD-HGCN by \\(0.45\\%\\) and \\(0.35\\%\\) on average on X-Sub and X-View respectively. With 3Mformer for the fusion of multi-order features, our model further boosts the performance by \\(\\sim 3\\%\\) and \\(1.5\\%\\) on the two protocols." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.63, + 0.47, + 0.84 + ], + "angle": 0, + "content": "It can be seen from Tab. 3 on NTU-60 that although some learned graph-based methods such as AS-GCN and 2S-AGCN can also capture the dependencies between human body joints, they only consider the pairwise relationship between body joints, which is the second-order interaction, and ignore the higher-order interaction between multiple body joints in form of hyper-edges, which may lose sensitivity to important groups of body joints. Our proposed 3Mformer achieves better performance by constructing a hypergraph from 2D/3D body joints as nodes for action recognition, thus capturing higher-order interactions of body joints to further improve the performance. Note that even with the average pooling, our model still achieves competitive results compared to its counterparts." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.902 + ], + "angle": 0, + "content": "For the NTU-120 dataset (Tab. 3), we obtain the top-1 performance on X-Sub and X-Set protocols. Our 2nd-order HoT alone outperforms graph-based models by \\(2 - 2.4\\%\\) on average. For example, we outperform recent Shift-GCN by" + }, + { + "type": "table_caption", + "bbox": [ + 0.532, + 0.09, + 0.859, + 0.104 + ], + "angle": 0, + "content": "Table 4. Experimental results on Northwestern-UCLA." + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.109, + 0.892, + 0.148 + ], + "angle": 0, + "content": "
Shift-GCN [6] (CVPR'20)CTR-GCN [4] (ICCV'21)InfoGCN [9] (CVPR'22)2nd-order only (ours)3rd-order only (ours)3Mformer (ours)
acc.(%)94.696.597.096.597.297.8
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.193, + 0.892, + 0.331 + ], + "angle": 0, + "content": "0.3% and 0.7% on X-Sub and X-Set respectively. Moreover, our 3rd-order HoT alone outperforms SD-HGCN by 0.5% and 1.5% respectively on X-Sub and X-Set. With the 3Mformer for the fusion of multi-order feature maps, we obtain the new state-of-the-art results. Notice that our 3Mformer yields \\(92.0\\% / 93.8\\%\\) on NTU-120 while [38] yields \\(80.5\\% / 81.7\\%\\) as we explore the fusion of multiple orders of hyperedges and several coupled-token types capturing easy-to-complex dynamics of varying joint groups." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.334, + 0.893, + 0.545 + ], + "angle": 0, + "content": "As videos from the Kinetics dataset are processed by the OpenPose, the skeletons in the Kinetics-Skeleton dataset have defects which adversely affect the performance of the model. We show both top-1 and top-5 performance in Table 3 to better reflect the performance of our 3Mformer. STGCN is the first method based on GCN, our 2nd-order HoT alone achieves very competitive results compared to the very recent NAS-GCN and Sym-GNN. The 3rd-order HoT alone outperforms Hyper-GNN, SD-HGCN and SelectiveHCN by \\(3.4\\%\\), \\(3.1\\%\\) and \\(2.9\\%\\) respectively for top-1 accuracies. Moreover, fusing multi-order feature maps from multiple orders of hyper-edges via 3Mformer gives us the best performance on Kinetics-Skeleton with \\(48.3\\%\\) for top-1, the new state-of-the-art result." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.549, + 0.894, + 0.595 + ], + "angle": 0, + "content": "Table 4 shows results on the Northwestern-UCLA dataset. Our 3Mformer is also effective on this dataset-it outperforms the current state-of-the-art InfoGCN by \\(0.8\\%\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.62, + 0.628, + 0.636 + ], + "angle": 0, + "content": "6. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.649, + 0.892, + 0.801 + ], + "angle": 0, + "content": "In this paper, we model the skeleton data as hypergraph to capture higher-order information formed between groups of human body joints of orders 1, ..., \\( r \\). We use Higher-order Transformer (HoT) to learn higher-order information on hypergraphs of \\( r \\)-order formed over 2D/3D human body joints. We also introduce a novel Multi-order Multi-mode Transformer (3Mformer) for the fusion of multi-order feature representations. Our end-to-end trainable 3Mformer outperforms state-of-the-art graph- and hypergraph-based models by a large margin on several benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Acknowledgements. LW is supported by the Data61/ CSIRO PhD Scholarship. PK is in part funded by CSIRO's Machine Learning and Artificial Intelligence Future Science Platform (MLAI FSP) Spatiotemporal Activity." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "5627" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.09, + 0.176, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.117, + 0.47, + 0.2 + ], + "angle": 0, + "content": "[1] Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. VATT: Transformers for multimodal self-supervised learning from raw video, audio and text. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.202, + 0.472, + 0.258 + ], + "angle": 0, + "content": "[2] Zhe Cao, Tomas Simon, Shih-En Wei, and Yaser Sheikh. Realtime multi-person 2d pose estimation using part affinity fields. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), July 2017. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.26, + 0.47, + 0.3 + ], + "angle": 0, + "content": "[3] Chun-Fu Chen, Quanfu Fan, and Rameswar Panda. Crossvit: Cross-attention multi-scale vision transformer for image classification. CoRR, abs/2103.14899, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.303, + 0.47, + 0.373 + ], + "angle": 0, + "content": "[4] Yuxin Chen, Ziqi Zhang, Chunfeng Yuan, Bing Li, Ying Deng, and Weiming Hu. Channel-wise topology refinement graph convolution for skeleton-based action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13359-13368, 2021. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.375, + 0.468, + 0.458 + ], + "angle": 0, + "content": "[5] Ke Cheng, Yifan Zhang, Congqi Cao, Lei Shi, Jian Cheng, and Hanqing Lu. Decoupling gcn with dropgraph module for skeleton-based action recognition. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision - ECCV 2020, pages 536-553, Cham, 2020. Springer International Publishing. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.46, + 0.468, + 0.529 + ], + "angle": 0, + "content": "[6] Ke Cheng, Yifan Zhang, Xiangyu He, Weihan Chen, Jian Cheng, and Hanqing Lu. Skeleton-based action recognition with shift graph convolutional network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.532, + 0.47, + 0.615 + ], + "angle": 0, + "content": "[7] Yi-Bin Cheng, Xipeng Chen, Dongyu Zhang, and Liang Lin. Motion-transformer: Self-supervised pre-training for skeleton-based action recognition. In Proceedings of the 2nd ACM International Conference on Multimedia in Asia, MMAsia '20, New York, NY, USA, 2021. Association for Computing Machinery. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.617, + 0.468, + 0.685 + ], + "angle": 0, + "content": "[8] Anoop Cherian, Piotr Koniusz, and Stephen Gould. Higher-order pooling of cnn features via kernel linearization for action recognition. In 2017 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 130-138, 2017. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.688, + 0.468, + 0.771 + ], + "angle": 0, + "content": "[9] Hyung-gun Chi, Myoung Hoon Ha, Seunggeun Chi, Sang Wan Lee, Qixing Huang, and Karthik Ramani. Infogcn: Representation learning for human skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20186-20196, June 2022. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.774, + 0.468, + 0.869 + ], + "angle": 0, + "content": "[10] Krzysztof Marcin Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos, Peter Hawkins, Jared Quincy Davis, Afroz Mohiuddin, Lukasz Kaiser, David Benjamin Belanger, Lucy J Colwell, and Adrian Weller. Rethinking attention with performers. In International Conference on Learning Representations, 2021. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.468, + 0.901 + ], + "angle": 0, + "content": "[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner," + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.117, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.161 + ], + "angle": 0, + "content": "Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.163, + 0.892, + 0.231 + ], + "angle": 0, + "content": "[12] Haodong Duan, Yue Zhao, Kai Chen, Dahua Lin, and Bo Dai. Revisiting skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2969-2978, June 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.233, + 0.892, + 0.287 + ], + "angle": 0, + "content": "[13] Basura Fernando, Efstratios Gavves, Jose Oramas Oramas M., Amir Ghodrati, and Tinne Tuytelaars. Rank pooling for action recognition. IEEE Trans. Pattern Anal. Mach. Intell., 39(4):773-787, apr 2017. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.288, + 0.892, + 0.315 + ], + "angle": 0, + "content": "[14] Rohit Girdhar and Deva Ramanan. Attentional pooling for action recognition. In NIPS, 2017. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.317, + 0.892, + 0.37 + ], + "angle": 0, + "content": "[15] Xiaoke Hao, Jie Li, Yingchun Guo, Tao Jiang, and Ming Yu. Hypergraph neural network for skeleton-based action recognition. IEEE Transactions on Image Processing, 30:2263-2275, 2021. 2, 4, 7, 8, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.372, + 0.892, + 0.412 + ], + "angle": 0, + "content": "[16] Ryota Hashiguchi and Toru Tamaki. Vision transformer with cross-attention by temporal shift for efficient action recognition, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.414, + 0.892, + 0.524 + ], + "angle": 0, + "content": "[17] Changxiang He, Chen Xiao, Shuting Liu, Xiaofei Qin, Ying Zhao, and Xuedian Zhang. Single-skeleton and dual-skeleton hypergraph convolution neural networks for skeleton-based action recognition. In Teddy Mantoro, Minho Lee, Media Anugerah Ayu, Kok Wai Wong, and Achmad Nizar Hidayanto, editors, Neural Information Processing, pages 15-27, Cham, 2021. Springer International Publishing. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.525, + 0.892, + 0.566 + ], + "angle": 0, + "content": "[18] Yujuan Jiang, Zhaoneng Sun, Saisai Yu, Shuang Wang, and Yang Song. A graph skeleton transformer network for action recognition. Symmetry, 14(8), 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.568, + 0.892, + 0.662 + ], + "angle": 0, + "content": "[19] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are RNNs: Fast autoregressive transformers with linear attention. In Hal Daumé III and Aarti Singh, editors, Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 5156-5165. PMLR, 13-18 Jul 2020. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.665, + 0.892, + 0.733 + ], + "angle": 0, + "content": "[20] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman. The kinetics human action video dataset, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.735, + 0.892, + 0.802 + ], + "angle": 0, + "content": "[21] Jinwoo Kim, Saeyoon Oh, and Seunghoon Hong. Transformers generalize deepsets and can be extended to graphs & hypergraphs. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.804, + 0.892, + 0.859 + ], + "angle": 0, + "content": "[22] Tae Soo Kim and Austin Reiter. Interpretable 3d human action analysis with temporal convolutional networks. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1623-1631, 2017. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[23] Jun Kong, Yuhang Bian, and Min Jiang. Mtt: Multi-scale temporal transformer for skeleton-based action recognition. IEEE Signal Processing Letters, 29:528-532, 2022. 7" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "5628" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.135 + ], + "angle": 0, + "content": "[24] Piotr Koniusz, Lei Wang, and Anoop Cherian. Tensor representations for action recognition. In IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.47, + 0.178 + ], + "angle": 0, + "content": "[25] Piotr Koniusz, Lei Wang, and Ke Sun. High-order tensor pooling with attention for action recognition. arXiv, 2021. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.18, + 0.47, + 0.234 + ], + "angle": 0, + "content": "[26] Piotr Koniusz and Hongguang Zhang. Power normalizations in fine-grained image, few-shot image and graph classification. In IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.236, + 0.47, + 0.306 + ], + "angle": 0, + "content": "[27] Matthew Korban and Xin Li. Ddgcn: A dynamic directed graph convolutional network for action recognition. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision – ECCV 2020, pages 761–776, Cham, 2020. Springer International Publishing. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.308, + 0.47, + 0.405 + ], + "angle": 0, + "content": "[28] Olga Kovaleva, Alexey Romanov, Anna Rogers, and Anna Rumshisky. Revealing the dark secrets of BERT. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4365-4374, Hong Kong, China, Nov. 2019. Association for Computational Linguistics. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.406, + 0.47, + 0.49 + ], + "angle": 0, + "content": "[29] John Boaz Lee, Ryan Rossi, and Xiangnan Kong. Graph classification using structural attention. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '18, page 1666-1674, New York, NY, USA, 2018. Association for Computing Machinery. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.491, + 0.47, + 0.56 + ], + "angle": 0, + "content": "[30] Maosen Li, Siheng Chen, Xu Chen, Ya Zhang, Yanfeng Wang, and Qi Tian. Actional-structural graph convolutional networks for skeleton-based action recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.562, + 0.47, + 0.631 + ], + "angle": 0, + "content": "[31] Maosen Li, Siheng Chen, Xu Chen, Ya Zhang, Yanfeng Wang, and Qi Tian. Symbiotic graph neural networks for 3d skeleton-based human action recognition and motion prediction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(6):3316-3333, 2022. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.633, + 0.47, + 0.687 + ], + "angle": 0, + "content": "[32] Hezheng Lin, Xing Cheng, Xiangyu Wu, Fan Yang, Dong Shen, Zhongyuan Wang, Qing Song, and Wei Yuan. CAT: cross attention in vision transformer. CoRR, abs/2106.05786, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.689, + 0.47, + 0.718 + ], + "angle": 0, + "content": "[33] Tsung-Yu Lin, Subhransu Maji, and Piotr Koniusz. Second-order democratic aggregation. In ECCV, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.72, + 0.47, + 0.788 + ], + "angle": 0, + "content": "[34] Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C. Kot. Ntu rgb+d 120: A large-scale benchmark for 3d human activity understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.79, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[35] Shengyuan Liu, Pei Lv, Yuzhen Zhang, Jie Fu, Junjin Cheng, Wanqing Li, Bing Zhou, and Mingliang Xu. Semi-dynamic hypergraph neural network for 3d pose estimation. In Christian Bessiere, editor, Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI-20, pages 782-788. International Joint Conferences on Artificial Intelligence Organization, 7 2020. Main track. 2, 4, 13" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.162 + ], + "angle": 0, + "content": "[36] Ziyu Liu, Hongwen Zhang, Zhenghao Chen, Zhiyong Wang, and Wanli Ouyang. Disentangling and unifying graph convolutions for skeleton-based action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.164, + 0.893, + 0.234 + ], + "angle": 0, + "content": "[37] Wei Peng, Xiaopeng Hong, Haoyu Chen, and Guoying Zhao. Learning graph convolutional network for skeleton-based human action recognition by neural searching. Proceedings of the AAAI Conference on Artificial Intelligence, 34(03):2669-2676, Apr. 2020. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.893, + 0.276 + ], + "angle": 0, + "content": "[38] Wei Peng, Jingang Shi, Tuomas Varanka, and Guoying Zhao. Rethinking the st-gcns for 3d skeleton-based human action recognition. Neurocomputing, 454:45-53, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.277, + 0.893, + 0.332 + ], + "angle": 0, + "content": "[39] Chiara Plizzari, Marco Cannici, and Matteo Matteucci. Skeleton-based action recognition via spatial and temporal transformer networks. Computer Vision and Image Understanding, 208-209:103219, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.334, + 0.893, + 0.389 + ], + "angle": 0, + "content": "[40] Zhenyue Qin, Yang Liu, Pan Ji, Dongwoo Kim, Lei Wang, Bob McKay, Saeed Anwar, and Tom Gedeon. Fusing higher-order features in graph neural networks for skeleton-based action recognition. IEEE TNNLS, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.391, + 0.893, + 0.446 + ], + "angle": 0, + "content": "[41] Saimunur Rahman, Piotr Koniusz, Lei Wang, Luping Zhou, Peyman Moghadam, and Changming Sun. Learning partial correlation based deep visual representation for image classification. In CVPR, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.448, + 0.893, + 0.504 + ], + "angle": 0, + "content": "[42] Kanchana Ranasinghe, Muzammal Naseer, Salman Khan, Fahad Shahbaz Khan, and Michael Ryoo. Self-supervised video transformer. In IEEE/CVF International Conference on Computer Vision and Pattern Recognition, June 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.505, + 0.893, + 0.56 + ], + "angle": 0, + "content": "[43] Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang. Ntu rgb+d: A large scale dataset for 3d human activity analysis. In IEEE Conference on Computer Vision and Pattern Recognition, June 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.561, + 0.893, + 0.603 + ], + "angle": 0, + "content": "[44] Lei Shi, Yifan Zhang, Jian Cheng, and Hanqing Lu. Two-stream adaptive graph convolutional networks for skeleton-based action recognition. In CVPR, 2019. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.893, + 0.673 + ], + "angle": 0, + "content": "[45] Lei Shi, Yifan Zhang, Jian Cheng, and Hanqing Lu. Adasgn: Adapting joint number and model size for efficient skeleton-based action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 13413-13422, October 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.675, + 0.893, + 0.744 + ], + "angle": 0, + "content": "[46] Chenyang Si, Wentao Chen, Wei Wang, Liang Wang, and Tieniu Tan. An attention enhanced graph convolutional LSTM network for skeleton-based action recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.746, + 0.893, + 0.802 + ], + "angle": 0, + "content": "[47] Yi-Fan Song, Zhang Zhang, Caifeng Shan, and Liang Wang. Constructing stronger and faster baselines for skeleton-based action recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.803, + 0.893, + 0.858 + ], + "angle": 0, + "content": "[48] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. CoRR, abs/2203.12602, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.893, + 0.902 + ], + "angle": 0, + "content": "[49] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Ilia Polosukhin. Attention is all you need. In I. Guyon," + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "5629" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. 3, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.47, + 0.205 + ], + "angle": 0, + "content": "[50] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Liò, and Yoshua Bengio. Graph attention networks. In International Conference on Learning Representations, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.207, + 0.471, + 0.262 + ], + "angle": 0, + "content": "[51] Jiang Wang, Xiaohan Nie, Yin Xia, Ying Wu, and Song-Chun Zhu. Cross-view action modeling, learning and recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2649-2656, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.264, + 0.471, + 0.317 + ], + "angle": 0, + "content": "[52] Lei Wang. Analysis and evaluation of Kinect-based action recognition algorithms. Master's thesis, School of the Computer Science and Software Engineering, The University of Western Australia, 11 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.32, + 0.47, + 0.36 + ], + "angle": 0, + "content": "[53] Lei Wang, Du Q. Huynh, and Piotr Koniusz. A comparative review of recent kinet-based action recognition algorithms. TIP, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.362, + 0.47, + 0.403 + ], + "angle": 0, + "content": "[54] Lei Wang, Du Q. Huynh, and Moussa Reda Mansour. Loss switching fusion with similarity search for video classification. ICIP, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.406, + 0.47, + 0.46 + ], + "angle": 0, + "content": "[55] Lei Wang and Piotr Koniusz. Self-Supervising Action Recognition by Statistical Moment and Subspace Descriptors, page 4324-4333. Association for Computing Machinery, New York, NY, USA, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.462, + 0.469, + 0.517 + ], + "angle": 0, + "content": "[56] Lei Wang and Piotr Koniusz. Temporal-viewpoint transportation plan for skeletal few-shot action recognition. In Proceedings of the Asian Conference on Computer Vision, pages 4176-4193, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.519, + 0.469, + 0.575 + ], + "angle": 0, + "content": "[57] Lei Wang and Piotr Koniusz. Uncertainty-dtw for time series and sequences. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXI, pages 176-195. Springer, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.577, + 0.469, + 0.617 + ], + "angle": 0, + "content": "[58] Lei Wang, Piotr Koniusz, and Du Q. Huynh. Hallucinating IDT descriptors and I3D optical flow features for action recognition with cnns. In ICCV, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.619, + 0.469, + 0.66 + ], + "angle": 0, + "content": "[59] Lei Wang, Jun Liu, and Piotr Koniusz. 3d skeleton-based few-shot action recognition with jeanie is not so naive. arXiv preprint arXiv:2112.12668, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.662, + 0.469, + 0.757 + ], + "angle": 0, + "content": "[60] Qilong Wang, Zilin Gao, Jiangtao Xie, Wangmeng Zuo, and Peihua Li. Global gated mixture of second-order pooling for improving deep convolutional neural networks. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.76, + 0.469, + 0.814 + ], + "angle": 0, + "content": "[61] Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan L. Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. CoRR, abs/2112.09133, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.469, + 0.871 + ], + "angle": 0, + "content": "[62] Jinfeng Wei, Yunxin Wang, Mengli Guo, Pei Lv, Xiaoshan Yang, and Mingliang Xu. Dynamic hypergraph convolutional networks for skeleton-based action recognition. CoRR, abs/2112.10570, 2021. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.469, + 0.903 + ], + "angle": 0, + "content": "[63] Xi Wei, Tianzhu Zhang, Yan Li, Yongdong Zhang, and Feng Wu. Multi-modality cross attention network for image and" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.535, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "sentence matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.136, + 0.892, + 0.177 + ], + "angle": 0, + "content": "[64] Sijie Yan, Yuanjun Xiong, and Dahua Lin. Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition. In AAAI, 2018. 1, 2, 4, 6, 7, 8, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.179, + 0.892, + 0.233 + ], + "angle": 0, + "content": "[65] Han Zhang, Yonghong Song, and Yuanlin Zhang. Graph convolutional LSTM model for skeleton-based action recognition. In 2019 IEEE International Conference on Multimedia and Expo (ICME), pages 412-417, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.235, + 0.892, + 0.303 + ], + "angle": 0, + "content": "[66] Jiani Zhang, Xingjian Shi, Junyuan Xie, Hao Ma, Irwin King, and Dit-Yan Yeung. Gaan: Gated attention networks for learning on large and spatiotemporal graphs. In Amir Globerson and Ricardo Silva, editors, UAI, pages 339-349. AUAI Press, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.305, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[67] Pengfei Zhang, Cuiling Lan, Wenjun Zeng, Junliang Xing, Jianru Xue, and Nanning Zheng. Semantics-guided neural networks for efficient skeleton-based human action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.376, + 0.892, + 0.416 + ], + "angle": 0, + "content": "[68] Shan Zhang, Dawei Luo, Lei Wang, and Piotr Koniusz. Few-shot object detection by second-order pooling. In ACCV, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.419, + 0.892, + 0.46 + ], + "angle": 0, + "content": "[69] Shan Zhang, Naila Murray, Lei Wang, and Piotr Koniusz. Time-reversed diffusion tensor transformer: A new tenet of few-shot object detection. In ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.462, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[70] Shan Zhang, Lei Wang, Naila Murray, and Piotr Koniusz. Kernelized few-shot object detection with efficient integral aggregation. In CVPR, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.504, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[71] Xikun Zhang, Chang Xu, and Dacheng Tao. Context aware graph convolution for skeleton-based action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.561, + 0.892, + 0.615 + ], + "angle": 0, + "content": "[72] Yongkang Zhang, Jun Li, Guoming Wu, Han Zhang, Zhiping Shi, Zhaoxun Liu, and Zizhang Wu. Temporal transformer networks with self-supervision for action recognition. CoRR, abs/2112.07338, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.617, + 0.892, + 0.699 + ], + "angle": 0, + "content": "[73] Yuhan Zhang, Bo Wu, Wen Li, Lixin Duan, and Chuang Gan. Stst: Spatial-temporal specialized transformer for skeleton-based action recognition. In Proceedings of the 29th ACM International Conference on Multimedia, MM '21, page 3229-3237, New York, NY, USA, 2021. Association for Computing Machinery. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.702, + 0.892, + 0.743 + ], + "angle": 0, + "content": "[74] Yifei Zhang, Hao Zhu, Zixing Song, Piotr Koniusz, and Irwin King. COSTA: Covariance-preserving feature augmentation for graph contrastive learning. In KDD, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.745, + 0.892, + 0.785 + ], + "angle": 0, + "content": "[75] Yifei Zhang, Hao Zhu, Zixing Song, Piotr Koniusz, and Irwin King. Spectral feature augmentation for graph contrastive learning and beyond. In AAAI, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.787, + 0.892, + 0.813 + ], + "angle": 0, + "content": "[76] Hao Zhu and Piotr Koniusz. Simple spectral graph convolution. In ICLR, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.816, + 0.892, + 0.843 + ], + "angle": 0, + "content": "[77] Hao Zhu and Piotr Koniusz. Generalized laplacian eigenmaps. In NeurIPS, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.845, + 0.892, + 0.871 + ], + "angle": 0, + "content": "[78] Hao Zhu, Ke Sun, and Piotr Koniusz. Contrastive laplacian eigenmaps. In NeurIPS, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.873, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[79] Yiran Zhu, Guangji Huang, Xing Xu, Yanli Ji, and Fumin Shen. Selective hypergraph convolutional networks for" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.093, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.956 + ], + "angle": 0, + "content": "5630" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.47, + 0.147 + ], + "angle": 0, + "content": "skeleton-based action recognition. In Proceedings of the 2022 International Conference on Multimedia Retrieval, ICMR '22, page 518-526, New York, NY, USA, 2022. Association for Computing Machinery. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.15, + 0.47, + 0.205 + ], + "angle": 0, + "content": "[80] Gao Zilin, Xie Jiangtao, Wang Qilong, and Li Peihua. Global second-order pooling convolutional networks. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.957 + ], + "angle": 0, + "content": "5631" + } + ] +] \ No newline at end of file diff --git a/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/59904744-5656-40cd-af70-98473e4f87a7_origin.pdf b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/59904744-5656-40cd-af70-98473e4f87a7_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fa2b68feff641307405665232b5cdcc4d69d614f --- /dev/null +++ b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/59904744-5656-40cd-af70-98473e4f87a7_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47838d097f4d237aa304280f9693c2f0b7c4fa2074d8c9877b676e2f24deebaf +size 882804 diff --git a/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/full.md b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/full.md new file mode 100644 index 0000000000000000000000000000000000000000..350425c4d831c297d662af9de040bc91b7315431 --- /dev/null +++ b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/full.md @@ -0,0 +1,351 @@ +# 3Mformer: Multi-order Multi-mode Transformer for Skeletal Action Recognition + +Lei Wang†,§ Piotr Koniusz*,§,† + +†Australian National University, §Data61♥CSIRO + +\$firstname_lastname@data61.csiro.au + +# Abstract + +Many skeletal action recognition models use GCNs to represent the human body by 3D body joints connected body parts. GCNs aggregate one- or few-hop graph neighbourhoods, and ignore the dependency between not linked body joints. We propose to form hypergraph to model hyperedges between graph nodes (e.g., third- and fourth-order hyper-edges capture three and four nodes) which help capture higher-order motion patterns of groups of body joints. We split action sequences into temporal blocks, Higher-order Transformer (HoT) produces embeddings of each temporal block based on (i) the body joints, (ii) pairwise links of body joints and (iii) higher-order hyper-edges of skeleton body joints. We combine such HoT embeddings of hyper-edges of orders 1,..., $r$ by a novel Multi-order Multi-mode Transformer (3Mformer) with two modules whose order can be exchanged to achieve coupled-mode attention on coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs. The first module, called Multi-order Pooling (MP), additionally learns weighted aggregation along the hyper-edge mode, whereas the second module, Temporal block Pooling (TP), aggregates along the temporal block' mode. Our end-to-end trainable network yields state-of-the-art results compared to GCN-, transformer- and hypergraph-based counterparts. + +# 1. Introduction + +Action Recognition has applications in video surveillance, human-computer interaction, sports analysis, and virtual reality [24, 25, 40, 52-59]. Different from video-based methods which mainly focus on modeling the spatiotemporal representations from RGB frames and/or optical flow [25, 52-55, 58], skeleton sequences, representing a spatio-temporal evolution of 3D body joints, have been + +proven robust against sensor noises and effective in action recognition while being computationally and storage efficient [24, 40, 52, 53, 56, 57, 59]. The skeleton data is usually obtained by either localization of 2D/3D coordinates of human body joints with the depth sensors or pose estimation algorithms applied to videos [2]. Skeleton sequences enjoy (i) simple structural connectivity of skeletal graph and (ii) temporal continuity of 3D body joints evolving in time. While temporal evolution of each body joint is highly informative, embeddings of separate body joints are insensitive to relations between body parts. Moreover, while the links between adjacent 3D body joints (following the structural connectivity) are very informative as they model relations, these links represent highly correlated nodes in the sense of their temporal evolution. Thus, modeling larger groups of 3D body joints as hyper-edges can capture more complex spatio-temporal motion dynamics. + +The existing graph-based models mainly differ by how they handle temporal information. Graph Neural Network (GNN) may encode spatial neighborhood of the node followed by aggregation by LSTM [46, 65]. Alternatively, Graph Convolutional Network (GCN) may perform spatio-temporal convolution in the neighborhood of each node [64]. Spatial GCNs perform convolution within one or two hop distance of each node, e.g., spatio-temporal GCN model called ST-GCN [64] models spatio-temporal vicinity of each 3D body joint. As ST-GCN applies convolution along structural connections (links between body joints), structurally distant joints, which may cover key patterns of actions, are largely ignored. ST-GCN captures ever larger neighborhoods as layers are added but suffers from oversmoothing that can be mitigated by linear GCNs [76-78]. + +Human actions are associated with interaction groups of skeletal joints, e.g., wrist alone, head-wrist, head-wrist-ankles, etc. The impact of these groups of joints on each action differs, and the degree of influence of each joint should be learned. Accordingly, designing a better model for skeleton data is vital given the topology of skeleton graph is suboptimal. While GCN can be applied to a fully-connected graph (i.e., 3D body joints as densely connected + +graph nodes), Higher-order Transformer (HoT) [21] has been proven more efficient. + +Thus, we propose to use hypergraphs with hyper-edges of order 1 to $r$ to effectively represent skeleton data for action recognition. Compared to GCNs, our encoder contains an MLP followed by three HoT branches that encode first, second- and higher-order hyper-edges, i.e., set of body joints, edges between pairs of nodes, hyper-edges between triplets of nodes, etc. Each branch has its own learnable parameters, and processes temporal blocks2 one-by-one. + +We notice that (i) the number of hyper-edges of $J$ joints grows rapidly with order $r$ , i.e., $\binom{J}{i}$ for $i = 1, \dots, r$ , embeddings of the highest order dominate lower orders in terms of volume if such embeddings are merely concatenated, and (ii) long-range temporal dependencies of feature maps are insufficiently explored, as sequences are split into $\tau$ temporal blocks for computational tractability. + +Merely concatenating outputs of HoT branches of orders 1 to $r$ , and across $\tau$ blocks, is sub-optimal. Thus, our Multi-order Multi-mode Transformer (3Mformer) with two modules whose order can be exchanged, realizes a variation of coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs. As HoT operates block-by-block, 'channel-temporal block' tokens and weighted hyper-edge aggregation in Multi-order Pooling (MP) help combine information flow block-wise. Various coupled-mode tokens help improve results further due to different focus of each attention mechanism. As the block-temporal mode needs to be aggregated (number of blocks varies across sequences), Temporal block Pooling (TP) can use rank pooling [13], second-order [14, 26, 33, 41, 60, 68, 80] or higher-order pooling [8, 24, 25, 69, 70]. + +In summary, our main contributions are listed as follows: + +i. We model the skeleton data as hypergraph of orders 1 to $r$ (set, graph and/or hypergraph), where human body joints serve as nodes. Higher-order Transformer embeddings of such formed hyper-edges represent various groups of 3D body joints and capture various higher-order dynamics important for action recognition. +ii. As HoT embeddings represent individual hyper-edge order and block, we introduce a novel Multi-order Multi-mode Transformer (3Mformer) with two modules, Multi-order Pooling and Temporal block Pooling. Their goal is to form coupled-mode tokens such as 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only', and perform weighted hyper-edge aggregation and temporal block aggregation. + +Our 3Mformer outperforms other GCN- and hypergraph-based models on NTU-60, NTU-120, Kinetics-Skeleton and Northwestern-UCLA by a large margin. + +# 2. Related Work + +Below we describe popular action recognition models for skeletal data. + +Graph-based models. Popular GCN-based models include the Attention enhanced Graph Convolutional LSTM network (AGC-LSTM) [46], the Actional-Structural GCN (AS-GCN) [30], Dynamic Directed GCN (DDGCN) [27], Decoupling GCN with DropGraph module [5], ShiftGCN [6], Semantics-Guided Neural Networks (SGN) [67], AdaSGN [45], Context Aware GCN (CA-GCN) [71], Channel-wise Topology Refinement Graph Convolution Network (CTR-GCN) [4] and a family of Efficient GCN (EfficientGCN-Bx) [47]. Although GCN-based models enjoy good performance, they have shortcomings, e.g., convolution and/or pooling are applied over one- or few-hop neighborhoods, e.g., ST-GCN [64], according to the human skeleton graph (body joints linked up according to connectivity of human body parts). Thus, indirect links between various 3D body joints such as hands and legs are ignored. In contrast, our model is not restricted by the structure of typical human body skeletal graph. Instead, 3D body joints are nodes which form hyper-edges of orders 1 to $r$ . + +Hypergraph-based models. Pioneering work on capturing groups of nodes across time uses tensors [24] to represent the 3D human body joints to exploit the kinematic relations among the adjacent and non-adjacent joints. Representing the human body as a hypergraph is adopted in [35] via a semi-dynamic hypergraph neural network that captures richer information than GCN. A hypergraph GNN [15] captures both spatio-temporal information and higher-order dependencies for skeleton-based action recognition. Our work is somewhat closely related to these works, but we jointly use hypergraphs of order 1 to $r$ to obtain rich hyper-edge embeddings based on Higher-order Transformers. + +Transformer-based models. Action recognition with transformers includes self-supervised video transformer [42] that matches the features from different views (a popular strategy in self-supervised GCNs [74, 75]), the end-to-end trainable Video-Audio-Text-Transformer (VATT) [1] for learning multi-model representations from unlabeled raw video, audio and text through the multimodal contrastive losses, and the Temporal Transformer Network with Self-supervision (TTSN) [72]. Motion-Transformer [7] captures the temporal dependencies via a self-supervised pre-training on human actions, Masked Feature Prediction (MaskedFeat) [61] pre-trained on unlabeled videos with MViT-L learns abundant visual representations, and video-masked autoencoder (VideoMAE) [48] with vanilla ViT + +![](images/e54dd32c2d7e0e6322522f3bca6e512a8da722d88d64a7868bafd07b87e447c2.jpg) +Figure 1. Pipeline overview. Each sequence is split into $\tau$ temporal blocks $\mathbf{B}_1, \dots, \mathbf{B}_{\tau}$ . Subsequently, each block is embedded by a simple MLP into $\mathbf{X}_1, \dots, \mathbf{X}_{\tau}$ , which are passed to Higher-order Transformers (HoT ( $n = 1, \dots, r$ )) in order to obtain feature tensors $\Phi_1, \dots, \Phi_{\tau}$ . These tensors are subsequently concatenated by $\odot$ along the hyper-edge mode into a multi-order feature tensor $\mathcal{M}$ . The final step is a Multi-order Multi-mode Transformer (3Mformer from Section 4), which contains two complementary branches, MP $\rightarrow$ TP and TP $\rightarrow$ MP, whose outputs are concatenated by $\odot$ and passed to the classifier. MP and TP perform the Coupled-mode Self-Attention (CmSA) with the so-called coupled-mode tokens, based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge' and 'channel-only' pairs. To this end, MP contains also weighted pooling along hyper-edge mode by learnable matrix $\mathbf{H}$ (and $\mathbf{H}'$ in another branch). TP contains also block-temporal pooling denoted by $g(\cdot)$ whose role is to capture block-temporal order with average, maximum, rank pooling, etc. In our experiments we show that such designed MP and TP are able to efficiently process hyper-edge feature representations from HoT branches. Appendix A shows full visualization of our 3Mformer. + +uses the masking strategy. In contrast to these works, we use three HoT branches of model [21], and we model hyperedges of orders 1 to $r$ by forming several multi-mode token variations in 3Mformer. + +Attention. In order to improve feature representations, attention captures relationship between tokens. Natural language processing and computer vision have driven recent developments in attention mechanisms based on transformers [11, 49]. Examples include the hierarchical Cross Attention Transformer (CAT) [32], Cross-attention by Temporal Shift with CNNs [16], Cross-Attention Multi-Scale Vision Transformer (CrossViT) for image classification [3] and Multi-Modality Cross Attention (MMCA) Network for image and sentence matching [63]. In GNNs, attention can be defined over edges [50, 66] or over nodes [29]. In this work, we use the attention with hyper-edges of several orders from HoT branches serving as tokens, and coupled-mode attention with coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs formed in 3Mformer. + +# 3. Background + +Below we describe foundations necessary for our work. + +Notations. $\mathcal{I}_K$ stands for the index set $\{1,2,\dots,K\}$ . Regular fonts are scalars; vectors are denoted by lowercase boldface letters, e.g., $\mathbf{x}$ ; matrices by the uppercase boldface, e.g., $\mathbf{M}$ ; and tensors by calligraphic letters, e.g., $\mathbf{M}$ . An $r$ th-order tensor is denoted as $\mathbf{M} \in \mathbb{R}^{I_1 \times I_2 \times \ldots \times I_r}$ , and the mode- $m$ matricization of $\mathbf{M}$ is denoted as $\mathbf{M}_{(m)} \in \mathbb{R}^{I_m \times (I_1 \ldots I_{m-1} I_{m+1} \ldots I_r)}$ . + +Transformer layers [11, 49]. A transformer encoder layer $f: \mathbb{R}^{J \times d} \to \mathbb{R}^{J \times d}$ consists of two sub-layers: (i) a self + +attention $a: \mathbb{R}^{J \times d} \to \mathbb{R}^{J \times d}$ and (ii) an element-wise feedforward MLP: $\mathbb{R}^{J \times d} \to \mathbb{R}^{J \times d}$ . For a set of $J$ nodes with $\mathbf{X} \in \mathbb{R}^{J \times d}$ , where $\mathbf{x}_i$ is a feature vector of node $i$ , a transformer layer3 computes: + +$$ +a \left(\mathbf {x} _ {i}\right) = \mathbf {x} _ {i} + \sum_ {h = 1} ^ {H} \sum_ {j = 1} ^ {J} \alpha_ {i j} ^ {h} \mathbf {x} _ {j} \mathbf {W} _ {h} ^ {V} \mathbf {W} _ {h} ^ {O}, \tag {1} +$$ + +$$ +f (\mathbf {x} _ {i}) = a (\mathbf {x} _ {i}) + \operatorname {M L P} (a (\mathbf {X})) _ {i}, \tag {2} +$$ + +where $H$ and $d_H$ denote respectively the number of heads and the head size, $\alpha^h = \sigma (\mathbf{X}\mathbf{W}_h^Q (\mathbf{X}\mathbf{W}_h^K)^\top)$ is the attention coefficient, $\mathbf{W}_h^O\in \mathbb{R}^{d_H\times d}$ , and $\mathbf{W}_h^V,\mathbf{W}_h^K,\mathbf{W}_h^Q\in \mathbb{R}^{d\times d_H}$ . + +Higher-order transformer layers [21]. Let the HoT layer be $f_{m\rightarrow n}:\mathbb{R}^{J^m\times d}\to \mathbb{R}^{J^n\times d}$ with two sub-layers: (i) a higher-order self-attention $a_{m\rightarrow n}:\mathbb{R}^{J^m\times d}\to \mathbb{R}^{J^n\times d}$ and (ii) a feedforward $\mathrm{MLP}_{n\rightarrow n}:\mathbb{R}^{J^n\times d}\to \mathbb{R}^{J^n\times d}$ . Moreover, let indexing vectors $\mathbf{i}\in \mathcal{I}_J^m\equiv \mathcal{I}_J\times \mathcal{I}_J\times \ldots \times \mathcal{I}_J$ ( $m$ modes) and $\mathbf{j}\in \mathcal{I}_J^n\equiv \mathcal{I}_J\times \mathcal{I}_J\times \ldots \times \mathcal{I}_J$ ( $n$ modes). For the input tensor $\mathbf{X}\in \mathbb{R}^{J^m\times d}$ with hyper-edges of order $m$ , a HoT layer evaluates: + +$$ +a _ {m \rightarrow n} (\mathbf {X}) _ {j} = \sum_ {h = 1} ^ {H} \sum_ {\mu} \sum_ {i} \boldsymbol {\alpha} _ {i, j} ^ {h, \mu} \mathbf {X} _ {i} \mathbf {W} _ {h, \mu} ^ {V} \mathbf {W} _ {h, \mu} ^ {O} \tag {3} +$$ + +$$ +\mathrm {M L P} _ {n \rightarrow n} \left(a _ {m \rightarrow n} (\mathbf {X})\right) = \mathrm {L} _ {n \rightarrow n} ^ {2} \left(\operatorname {R e L U} \left(\mathrm {L} _ {n \rightarrow n} ^ {1} \left(a _ {m \rightarrow n} (\mathbf {X})\right)\right)\right), \tag {4} +$$ + +$$ +f _ {m \rightarrow n} (\mathbf {X}) = a _ {m \rightarrow n} (\mathbf {X}) + \operatorname {M L P} _ {n \rightarrow n} (a _ {m \rightarrow n} (\mathbf {X})), \tag {5} +$$ + +where $\pmb{\alpha}^{h,\mu}\in \mathbb{R}^{J^{m + n}}$ is the so-called attention coefficient tensor with multiple heads, and $\pmb{\alpha}_{\mathbf{i},\mathbf{j}}^{h,\mu}\in \mathbb{R}^{J}$ is a vector, $\mathbf{W}_{h,\mu}^V\in \mathbb{R}^{d\times d_H}$ and $\mathbf{W}_{h,\mu}^{O}\in \mathbb{R}^{d_{H}\times d}$ are learnable parameters. Moreover, $\mu$ indexes over the so-called equivalence classes of order $(m + n)$ in the same partition of nodes, $\mathrm{L}_{n\to n}^{1}\colon \mathbb{R}^{J^n\times d}\to \mathbb{R}^{J^n\times d_F}$ + +and $\mathrm{L}_{n\to n}^2\colon \mathbb{R}^{J^n\times d_F}\to \mathbb{R}^{J^n\times d}$ are equivariant linear layers and $d_{F}$ is the hidden dimension. + +To compute each attention tensor $\alpha^{h,\mu} \in \mathbb{R}^{J^{m + n}}$ from the input tensor $\mathbf{X} \in \mathbb{R}^{J^m \times d}$ of hyper-edges of order $m$ , from the higher-order query and key, we obtain: + +$$ +\boldsymbol {\alpha} _ {i, j} ^ {h, \mu} = \left\{ \begin{array}{c c} \frac {\sigma \left(\mathbf {Q} _ {j} ^ {h , \mu} , \mathbf {K} _ {i} ^ {h , \mu}\right)}{Z _ {j}} & (i, j) \in \mu \\ 0 & \text {o t h e r w i s e}, \end{array} \right. \tag {6} +$$ + +where $\mathbf{Q}^{\mu} = \mathrm{L}_{m\to n}^{\mu}(\mathbf{X})$ $\mathbf{K}^{\mu} = \mathrm{L}_{m\to m}^{\mu}(\mathbf{X})$ , and normalization constant $Z_{j} = \sum_{i:(i,j)\in \mu}\sigma (\mathbf{Q}_{j}^{\mu},\mathbf{K}_{i}^{\mu})$ . Finally, kernel attention in Eq. (6) can be approximated with RKHS feature maps $\psi \in \mathbb{R}_+^{d_K}$ for efficacy as $d_{K}\ll d_{H}$ . Specifically, we have $\sigma (\mathbf{Q}_j^{h,\mu},\mathbf{K}_i^{h,\mu})\approx \psi (\mathbf{Q}_j^{h,\mu})^\top \psi (\mathbf{K}_i^{h,\mu})$ as in [10, 19]. We choose the performer kernel [10] due to its good performance. + +As query and key tensors are computed from the input tensor $\mathbf{X}$ using the equivariant linear layers, the transformer encoder layer $f_{m\to n}$ satisfies the permutation equivariance. + +# 4. Approach + +Skeletal Graph [64] and Skeletal Hypergraph [15,35] are popular for modeling edges and hyper-edges. In this work, we use the Higher-order Transformer (HoT) [21] as a backbone encoder. + +# 4.1. Model Overview + +Fig. 1 shows that our framework contains a simple 3-layer MLP unit (FC, ReLU, FC, ReLU, Dropout, FC), three HoT blocks with each HoT for each type of input (i.e., body joint feature set, graph and hypergraph of body joints), followed by Multi-order Multi-mode Transformer (3Mformer) with two modules (i) Multi-order Pooling (MP) and (ii) Temporal block Pooling (TP). The goal of 3Mformer is to form coupled-mode tokens (explained later) such as 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only', and perform weighted hyper-edge aggregation and temporal block aggregation. Their outputs are further concatenated and passed to an FC layer for classification. + +MLP unit. The MLP unit takes $T$ neighboring frames, each with $J$ 2D/3D skeleton body joints, forming one temporal block. In total, depending on stride $S$ , we obtain some $\tau$ temporal blocks (a block captures the short-term temporal evolution). In contrast, the long-term temporal evolution is modeled with HoT and 3Mformer. Each temporal block is encoded by the MLP into a $d \times J$ dimensional feature map. + +HoT branches. We stack $r$ branches of HoT, each taking embeddings $\mathbf{X}_t\in \mathbb{R}^{d\times J}$ where $t\in I_{\tau}$ denotes a temporal block. HoT branches output hyper-edge feature representations of size $m\in I_r$ as $\Phi_m^{\prime}\in \mathbb{R}^{J^m\times d'}$ for order $m\in I_r$ . + +For the first-, second- and higher-order stream outputs $\Phi_1',\dots,\Phi_r'$ , we (i) swap feature channel and hyper-edge + +modes, (ii) extract the upper triangular of tensors, and we concatenate along the block-temporal mode, so we have $\Phi_{m}\in \mathbb{R}^{d^{\prime}\times N_{E_{m}}\times \tau}$ , where $N_{E_m} = \binom{J}{m}$ . Subsequently, we concatenate $\Phi_1,\ldots ,\Phi_r$ along the hyper-edge mode and obtain a multi-order feature tensor $\pmb {M}\in \mathbb{R}^{d^{\prime}\times N\times r}$ where the total number of hyper-edges across all orders is $N = \sum_{m = 1}^{r}\binom{J}{m}$ . + +3Mformer. Our Multi-order Multi-mode Transformer (3Mformer) with Coupled-mode Self-Attention (CmSA) is used for the fusion of information flow inside the multi-order feature tensor $\mathcal{M}$ , and finally, the output from 3Mformer is passed to a classifier for classification. + +# 4.2. Coupled-mode Self-Attention + +Coupled-mode tokens. We are inspired by the attentive regions of the one-class token in the standard Vision Transformer (ViT) [49] that can be leveraged to form a class-agnostic localization map. We investigate if the transformer model can also effectively capture the coupled-mode attention for more discriminative classification tasks, e.g., tensorial skeleton-based action recognition by learning the coupled-mode tokens within the transformer. To this end, we propose a Multi-order Multi-mode Transformer (3Mformer), which uses coupled-mode tokens to jointly learn various higher-order motion dynamics among channel-, block-temporal-, body joint- and order-mode. Our 3Mformer can successfully produce coupled-mode relationships from CmSA mechanism corresponding to different tokens. Below we introduce our CmSA. + +Given the order- $r$ tensor $\mathcal{M} \in \mathbb{R}^{I_1 \times I_2 \times \ldots \times I_r}$ , to form the joint mode token, we perform the mode- $m$ matricization of $\mathcal{M}$ to obtain $\mathbf{M} \equiv \mathcal{M}_{(m)}^{\top} \in \mathbb{R}^{(I_1 \ldots I_{m-1} I_{m+1} \ldots I_r) \times I_m}$ , and the coupled-token for $\mathbf{M}$ is formed. For example, for a given 3rd-order tensor that has feature channel-, hyper-edge- and temporal block-mode, we can form 'channel-temporal block', 'channel-hyper-edge (any order)' and 'channel-only' pairs; and if the given tensor is used as input and outputs a new tensor which produces new mode, e.g., body joint-mode, we can form the 'order-channel-body joint' token. In the following sections, for simplicity, we use reshape for the matricization of tensor to form different types of coupled-mode tokens. Our CmSA is given as: + +$$ +a (\mathbf {Q}, \mathbf {K}, \mathbf {V}) = \operatorname {S o f t M a x} \left(\frac {\mathbf {Q} \mathbf {K} ^ {\top}}{\sqrt {d _ {K}}}\right) \mathbf {V}, \tag {7} +$$ + +where $\sqrt{d_K}$ is the scaling factor, $\mathbf{Q} = \mathbf{W}^q\mathbf{M}$ , $\mathbf{K} = \mathbf{W}^k\mathbf{M}$ and $\mathbf{V} = \mathbf{W}^{\nu}\mathbf{M}$ are the query, key and value, respectively, and $\mathbf{M} \equiv \mathcal{M}_{(m)}^{\top}$ . Moreover, $\mathbf{Q}, \mathbf{K}, \mathbf{V} \in \mathbb{R}^{(I_1\dots I_{m-1}I_{m+1}\dots I_r)\times I_m}$ and $\mathbf{W}^q, \mathbf{W}^k, \mathbf{W}^\nu \in \mathbb{R}^{(I_1\dots I_{m-1}I_{m+1}\dots I_r)\times (I_1\dots I_{m-1}I_{m+1}\dots I_r)}$ are learnable weights. We notice that various coupled-mode tokens have different 'focus' of attention mechanisms, and we apply them in our 3Mformer for the fusion of multi-order feature representations. + +# 4.3. Multi-order Multi-mode Transformer + +Below we introduce Multi-order Multi-mode Transformer (3Mformer) with Multi-order Pooling (MP) block and Temporal block Pooling (TP) block, which are cascaded into two branches (i) $\mathrm{MP} \rightarrow \mathrm{TP}$ and (ii) $\mathrm{TP} \rightarrow \mathrm{MP}$ , to achieve different types of coupled-mode tokens. + +# 4.3.1 Multi-order Pooling (MP) Module + +$\mathbf{CmSA}$ in MP. We reshape the multi-order feature representation $\mathcal{M} \in \mathbb{R}^{d' \times N \times \tau}$ into $\mathbf{M} \in \mathbb{R}^{d' \times N}$ (or reshape the output from TP explained later into $\mathbf{M}' \in \mathbb{R}^{d' \times N}$ ) to let the model attend to different types of feature representations. Let us simply denote $d'' = d'\tau$ (or $d'' = d'$ ) depending on the source of input. We form a coupled-mode self-attention (if $d'' = d'\tau$ , we have, i.e., 'channel-temporal block' token; if $d'' = d'$ , we have 'channel-only' token): + +$$ +a _ {\mathrm {M P}} \left(\mathbf {Q} _ {\mathrm {M P}}, \mathbf {K} _ {\mathrm {M P}}, \mathbf {V} _ {\mathrm {M P}}\right) = \operatorname {S o f t M a x} \left(\frac {\mathbf {Q} _ {\mathrm {M P}} \mathbf {K} _ {\mathrm {M P}} ^ {\top}}{\sqrt {d _ {K _ {\mathrm {M P}}}}}\right) \mathbf {V} _ {\mathrm {M P}}, \tag {8} +$$ + +where $\sqrt{d_{K_{\mathrm{MP}}}}$ is the scaling factor, $\mathbf{Q}_{\mathrm{MP}} = \mathbf{W}_{\mathrm{MP}}^{q}\mathbf{M}$ , $\mathbf{K}_{\mathrm{MP}} = \mathbf{W}_{\mathrm{MP}}^{k}\mathbf{M}$ and $\mathbf{V}_{\mathrm{MP}} = \mathbf{W}_{\mathrm{MP}}^{\nu}\mathbf{M}$ (we can use here $\mathbf{M}$ or $\mathbf{M}'$ ) are the query, key and value. Moreover, $\mathbf{Q}_{\mathrm{MP}}, \mathbf{K}_{\mathrm{MP}}, \mathbf{V}_{\mathrm{MP}} \in \mathbb{R}^{d'' \times N}$ and $\mathbf{W}_{\mathrm{MP}}^{q}, \mathbf{W}_{\mathrm{MP}}^{k}, \mathbf{W}_{\mathrm{MP}}^{\nu} \in \mathbb{R}^{d'' \times d''}$ are learnable weights. Eq. (8) is a self-attention layer which reweighs $\mathbf{V}_{\mathrm{MP}}$ based on the correlation between $\mathbf{Q}_{\mathrm{MP}}$ and $\mathbf{K}_{\mathrm{MP}}$ token embeddings of so-called coupled-mode tokens. + +Weighted pooling. Attention layer in Eq. (8) produces feature representation $\mathbf{O}_{\mathrm{MP}} \in \mathbb{R}^{d'' \times N}$ to enhance the relationship between example feature channels and body joints. Subsequently, we handle the impact of hyper-edges of multiple orders by weighted pooling along hyper-edges of order $m \in I_r$ : + +$$ +\mathbf {O} _ {\mathrm {M P}} ^ {* (m)} = \mathbf {O} _ {\mathrm {M P}} ^ {(m)} \mathbf {H} ^ {(m)} \in \mathbb {R} ^ {d ^ {\prime \prime} \times J}, \tag {9} +$$ + +where $\mathbf{O}_{\mathrm{MP}}^{(m)} \in \mathbb{R}^{d'' \times N_{E_m}}$ is simply extracted from $\mathbf{O}_{\mathrm{MP}}$ for hyper-edges of order $m$ , and matrices $\mathbf{H}^{(m)} \in \mathbb{R}^{N_{E_m} \times J}$ are learnable weights to perform weighted pooling along hyperedges of order $m$ . Finally, we obtain $\mathbf{O}_{\mathrm{MP}}^* \in \mathbb{R}^{rd'' \times J}$ by simply concatenating $\mathbf{O}_{\mathrm{MP}}^{*(1)}, \ldots, \mathbf{O}_{\mathrm{MP}}^{*(r)}$ . If we used the input to MP from TP, then we denote the output of MP as $\mathbf{O}_{\mathrm{MP}}^*$ . + +# 4.3.2 Temporal block Pooling (TP) Module + +$\mathbf{CmSA}$ in TP. Firstly, we reshape the multi-order feature representation $\mathcal{M} \in \mathbb{R}^{d' \times N \times \tau}$ into $\mathbf{M} \in \mathbb{R}^{d' N \times \tau}$ (or reshape the output from MP into $\mathbf{M}'' \in \mathbb{R}^{rd' J \times \tau}$ ). For simplicity, we denote $d''' = d'N$ in the first case and $d''' = rd'J$ in the second case. As the first mode of reshaped input serves to form + +tokens, they are again coupled-mode tokens, e.g., 'channel-hyper-edge' and 'order-channel-body joint' tokens, respectively. Moreover, TP also performs pooling along block-temporal mode (along $\tau$ ). We form an coupled-mode self-attention: + +$$ +a _ {\mathrm {T P}} \left(\mathbf {Q} _ {\mathrm {T P}}, \mathbf {K} _ {\mathrm {T P}}, \mathbf {V} _ {\mathrm {T P}}\right) = \operatorname {S o f t M a x} \left(\frac {\mathbf {Q} _ {\mathrm {T P}} \mathbf {K} _ {\mathrm {T P}} ^ {\top}}{\sqrt {d _ {K _ {\mathrm {T P}}}}}\right) \mathbf {V} _ {\mathrm {T P}}, \tag {10} +$$ + +where $\sqrt{d_{K_{\mathrm{TP}}}}$ is the scaling factor, $\mathbf{Q}_{\mathrm{TP}} = \mathbf{W}_{\mathrm{TP}}^{q}\mathbf{M}$ , $\mathbf{K}_{\mathrm{TP}} = \mathbf{W}_{\mathrm{TP}}^{k}\mathbf{M}$ and $\mathbf{V}_{\mathrm{TP}} = \mathbf{W}_{\mathrm{TP}}^{\nu}\mathbf{M}$ (we can use here $\mathbf{M}$ or $\mathbf{M}''$ ) are the query, key and value. Moreover, $\mathbf{Q}_{\mathrm{TP}}, \mathbf{K}_{\mathrm{TP}}, \mathbf{V}_{\mathrm{TP}} \in \mathbb{R}^{d''' \times \tau}$ and $\mathbf{W}_{\mathrm{TP}}^{q}, \mathbf{W}_{\mathrm{TP}}^{k}, \mathbf{W}_{\mathrm{TP}}^{\nu} \in \mathbb{R}^{d''' \times d'''}$ are learnable weights. Eq. (10) reweights $\mathbf{V}_{\mathrm{TP}}$ based on the correlation between $\mathbf{Q}_{\mathrm{TP}}$ and $\mathbf{K}_{\mathrm{TP}}$ token embeddings of coupled-mode tokens ('channel-hyper-edge' or 'order-channel-body joint'). The output of attention is the temporal representation $\mathbf{O}_{\mathrm{TP}} \in \mathbb{R}^{d''' \times \tau}$ . If we used $\mathbf{M}''$ as input, we denote the output as $\mathbf{O}_{\mathrm{TP}}''$ . + +Pooling step. Given the temporal representation $\mathbf{O}_{\mathrm{TP}} \in \mathbb{R}^{d'' \times r}$ (or $\mathbf{O}_{\mathrm{TP}}''$ ), we apply pooling along the block-temporal mode to obtain compact feature representations independent of length (block count $\tau$ ) of skeleton sequence. There exist many pooling operations including first-order, e.g., average, maximum, sum pooling, second-order [60, 80] such as attentional pooling [14], higher-order (tri-linear) [8, 25] and rank pooling [13]. The output after pooling is $\mathbf{O}_{\mathrm{TP}}^* \in \mathbb{R}^{d''}$ (or $\mathbf{O}_{\mathrm{TP}}''$ ). + +# 4.3.3 Model Variants + +We devise four model variants by different stacking of MP with TP, with the goal of exploiting attention with different kinds of coupled-mode tokens: + +i. Single-branch: MP followed by TP, denoted $\mathrm{MP}\rightarrow \mathrm{TP}$ (Fig. 1 top right branch). +ii. Single-branch: TP followed by MP, denoted $\mathrm{TP} \rightarrow \mathrm{MP}$ , (Fig. 1 bottom right branch). +iii. Two-branch (our 3Mformer, Fig. 1) which concatenates outputs of $\mathrm{MP} \rightarrow \mathrm{TP}$ and $\mathrm{TP} \rightarrow \mathrm{MP}$ . +iv. We also investigate only MP or TP module followed by average pooling or an FC layer. + +The outputs from $\mathrm{MP} \rightarrow \mathrm{TP}$ and $\mathrm{TP} \rightarrow \mathrm{MP}$ have exactly the same feature dimension ( $\mathbb{R}^{rd'J}$ , after reshaping into vector). For two-branch (our 3Mformer), we simply concatenate these outputs ( $\mathbb{R}^{2rd'J}$ , after concatenation). These vectors are forwarded to the FC layer to learn a classifier. + +# 5. Experiments + +# 5.1. Datasets and Protocols + +(i) NTU RGB+D (NTU-60) [43] contains 56,880 video sequences. This dataset has variable sequence lengths and high intra-class variations. Each skeleton sequence has 25 joints and there are no more than two human subjects in each video. Two evaluation protocols are: (i) cross-subject (X-Sub) and (ii) cross-view (X-View). +(ii) NTU RGB+D 120 (NTU-120) [34], an extension of NTU-60, contains 120 action classes (daily/health-related), and 114,480 RGB+D video samples captured with 106 distinct human subjects from 155 different camera viewpoints. There are also two evaluation protocols: (i) cross-subject (X-Sub) and (ii) cross-setup (X-Set). +(iii) Kinetics-Skeleton, based on Kinetics [20], is large-scale dataset with 300,000 video clips and up to 400 human actions collected from YouTube. This dataset involves human daily activities, sports scenes and complex human-computer interaction scenes. Since Kinetics only provides raw videos without the skeletons, ST-GCN [64] uses the publicly available OpenPose toolbox [2] to estimate and extract the location of 18 human body joints on every frame in the clips. We use their released skeleton data to evaluate our model. Following the standard evaluation protocol, we report the Top-1 and Top-5 accuracies on the validation set. (iv) Northwestern-UCLA [51] was captured by 3 Kinect cameras simultaneously from multiple viewpoints. It contains 1494 video clips covering 10 actions. Each action is performed by 10 different subjects. We follow the same evaluation protocol as [51]: training split is formed from the first two cameras, and testing split from the last camera. + +# 5.2. Experimental Setup + +We use PyTorch and $1\times$ Titan RTX 3090 for experiments. We use the Stochastic Gradient Descent (SGD) with momentum 0.9, cross-entropy as the loss, weight decay of 0.0001 and batch size of 32. The learning rate is set to 0.1 initially. On NTU-60 and NTU-120, the learning rate is divided by 10 at the 40th and 50th epoch, and the training process ends at the 60th epoch. On Kinetics-Skeleton, the learning rate is divided by 10 at the 50th and 60th epoch, and the training finishes at the 80th epoch. We took $20\%$ of training set for validation to tune hyperparameters. All models have fixed hyperparameters with 2 and 4 layers for NTU-60/NTU-120 and Kinetics-Skeleton, respectively. The hidden dimensions is set to 16 for all 3 datasets. We use 4 attention heads for NTU-60 and NTU-120, and 8 attention heads for Kinetics-Skeleton. To form each video temporal block, we simply choose temporal block size to be 10 and stride to be 5 to allow a $50\%$ overlap between consecutive temporal blocks. For Northwestern-UCLA, the batch size is 16. We adopted the data pre-processing in [6]. + +Table 1. Search for the single best order $n$ of hypergraph (except for $n = 3 \& 4$ where we check if $n = 3 \& 4$ are complementary). + +
Order-nNTU-60NTU-120Kinetics-Skel.
X-SubX-ViewX-SubX-SetTop-1 acc.
n = 178.586.375.377.932.0
n = 283.089.286.288.337.1
n = 391.397.087.589.739.5
n = 491.597.187.890.040.1
n = 591.497.387.890.040.3
n = 3 & 491.697.287.690.340.5
+ +Table 2. Evaluations of our model variants with/without MP and/or TP. Baseline in the table denotes the backbone (MLP unit + HoTs) without the use of either MP or TP module. + +
VariantsNTU-60NTU-120Kinetics-Skel.
X-SubX-ViewX-SubX-SetTop-1 acc.
Baseline89.891.486.587.038.6
+ TP only91.293.887.588.639.8
+ MP only92.094.388.789.740.3
+ MP→TP93.096.190.891.745.7
+ TP→MP92.695.890.291.144.0
+ 2-branch(3Mformer)94.898.792.093.848.3
+ +![](images/f2a642a29b0b5a90add2848cc3d6a8e473c9a3478a97cacea725338b04209982.jpg) +(a) + +![](images/e71bc19dcd48ddc53580575a60c07d8346dff3cc584340aa6a1d7bde738afbe3.jpg) +(b) + +![](images/fa582b8c85b427cd431a2c30983a66c679cdbbde815668395b5ea8ff0439d87d.jpg) +(c) +Figure 2. Visualization of attention matrices. (a) single-mode attention matrix of 'channel-only' token, (b)-(d) coupled-mode attention matrices of 'channel-hyper-edge', 'order-channel-body joint' and 'channel-temporal block' tokens, respectively. + +![](images/8afabcce92664b7f25393b42d25ba665997d90ef09c75ae925c5daa0a6813233.jpg) +(d) + +# 5.3. Ablation Study + +Search for the single best order $n$ . Table 1 shows our analysis regarding the best order $n$ . In general, increasing the order $n$ improves the performance (within $\sim 0.5\%$ on average), but causing higher computational cost, e.g., the number of hyper-edges for the skeletal hypergraph of order $n = 4$ is 3060 on Kinetics-Skeleton. We also notice that combining orders 3 and 4 yields very limited improvements. The main reasons are: (i) reasonable order $n$ , e.g., $n = 3$ or 4 improves accuracy as higher-order motion patterns are captured which are useful for classification-related tasks (ii) further increasing order $n$ , e.g., $n = 5$ introduces patterns in feature representations that rarely repeat even for the same action class. Considering the cost and performance, we choose the maximum order $r = 3$ ( $n = 1,2,3$ ) in the following experiments unless specified otherwise. + +Discussion on coupled-mode attention. Fig. 2 shows the visualization of some attention matrices in our 3Mformer, + +Table 3. Experimental results on NTU-60, NTU-120 and Kinetics-Skeleton. + +
MethodVenueNTU-60NTU-120Kinetics-Skeleton
X-SubX-ViewX-SubX-SetTop-1Top-5
Graph-basedTCN [22]CVPRW'17----20.340.0
ST-GCN [64]AAAI'1881.588.370.773.230.752.8
AS-GCN [30]CVPR'1986.894.278.379.834.856.5
2S-AGCN [44]CVPR'1988.595.182.584.236.158.7
NAS-GCN [37]AAAI'2089.495.7--37.160.1
Sym-GNN [31]TPAMI'2290.196.4--37.258.1
Shift-GCN [6]CVPR'2090.796.585.987.6--
MS-G3D [36]CVPR'2091.596.286.988.438.060.9
CTR-GCN [4]ICCV'2192.496.888.990.6--
InfoGCN [9]CVPR'2293.097.189.891.2--
PoseConv3D [12]CVPR'2294.197.186.990.347.7-
Hypergraph-basedHyper-GNN [15]TIP'2189.595.7--37.160.0
DHGCN [62]CoRR'2190.796.086.087.937.760.6
Selective-HCN [79]ICMR'2290.896.6--38.061.1
SD-HGCN [17]ICONIP'2190.996.787.088.237.460.5
Transformer-basedST-TR [39]CVIU'2190.396.385.187.138.060.5
MTT [23]LSP'2190.896.786.187.637.961.3
4s-GSTN [18]Symmetry'2291.396.686.488.7--
STST [73]ACM MM'2191.996.8--38.361.2
3Mformer (with avg-pool, ours)92.097.388.090.143.165.2
3Mformer (with max-pool, ours)92.197.8----
3Mformer (with attn-pool, ours)94.298.589.792.445.767.6
3Mformer (with tri-pool, ours)94.098.591.292.747.771.9
3Mformer (with rank-pool, ours)94.898.792.093.848.372.3
+ +![](images/c53534b9138a06abacb121d77afe8a0a88fe2d064fb209ea931912cb3b2f85c4.jpg) +Figure 3. Evaluations of different single-mode (baseline) and coupled-mode tokens. We use a 3rd-order HoT with a standard Transformer, but we replace the scaled dot-product attention with coupled-mode tokens and coupled-mode attention. + +which show diagonal and/or vertical patterns that are consistent with the patterns of the attention matrices found in standard Transformer trained on sequences, e.g., for natural language processing tasks [28, 49]. We also notice that the coupled-mode attention, e.g., 'channel-temporal block' captures much richer information compared to single mode attention, e.g., 'channel-only'. Our coupled-mode attention can be applied to different orders of tensor representations through simple matricization. + +Discussion on model variants. To show the effectiveness of the proposed MP and TP module, firstly, we compare + +TP only and MP only with the baseline (No MP or TP module). We use the TP module followed by an FC layer instead of MP as in $\mathrm{TP} \rightarrow \mathrm{MP}$ , where the FC layer takes the output from TP $(\mathbb{R}^{d'N})$ and produces a vector in $\mathbb{R}^{3d'J}$ passed to the classifier. Similarly, for MP only, we use the MP module followed by an average pooling layer instead of TP as in $\mathrm{MP} \rightarrow \mathrm{TP}$ , where the average layer takes output from MP $(\mathbb{R}^{3d'J \times \tau})$ and generates a vector in $\mathbb{R}^{3d'J}$ (pool along $\tau$ blocks), passed to the classifier. Table 2 shows the results. With just the TP module, we outperform the baseline by $1.3\%$ on average. With only the MP module, we outperform the baseline by $2.34\%$ on average. These comparisons show that (i) CmSA in MP and TP are efficient for better performance (ii) MP performs better than TP which shows that 'channel-temporal block' token contains richer information than 'channel-hyper-edge' token. We also notice that $\mathrm{MP} \rightarrow \mathrm{TP}$ slightly outperforms $\mathrm{TP} \rightarrow \mathrm{MP}$ by $\sim 1\%$ , and the main reason is that $\mathrm{MP} \rightarrow \mathrm{TP}$ has coupled-mode tokens 'channel-temporal block' and 'order-channel-joint' which attend 4 modes, whereas $\mathrm{TP} \rightarrow \mathrm{MP}$ has 'channel-hyper-edge' and 'channel-only' tokens which attend only 2 modes. Fig. 3 shows a comparison of different coupled-mode tokens on 3 benchmark datasets. This also suggests that one should firstly perform attenuate + +tion with coupled-mode 'channel-block' tokens, followed by weighted pooling along the hyper-edge mode, followed by attention with coupled-mode 'order-channel-body joint' and finalised by block-temporal pooling. Finally, with 2- branch (3Mformer), we further boost the performance by $2 - 4\%$ , which shows that $\mathrm{MP}\rightarrow \mathrm{TP}$ and $\mathrm{TP}\rightarrow \mathrm{MP}$ are complementary branches. Below we use 2-branch (3Mformer) in the experiments (as in Fig. 1). + +Comparison of pooling in TP. As shown in Table 3, average pooling (avg-pool) achieves similar performance (within $\sim 0.5\%$ difference) as maximum pooling (max-pool), second-order pooling (attn-pool) outperforms average and maximum pooling by $\sim 1 - 2\%$ and third-order pooling (tri-pool) outperforms second-order pooling by $\sim 1\%$ . Interestingly, rank pooling (rank-pool) achieves the best performance. We think it is reasonable as rank pooling strives to enforce the temporal order in the feature space to be preserved, e.g., it forces network to always preserve temporal progression of actions over time. With multiple attention modules, orderless statistics such as second- or third-order pooling may be too general. + +# 5.4. Comparisons with the State of the Arts + +We compare our model with recent state-of-the-art methods. On the NTU-60 (Tab. 3), we obtain the top-1 accuracies of the two evaluation protocols during test stage. The methods in comparisons include popular graph-based [30, 31, 37, 44, 64] and hypergraph-based models [15, 17, 62, 79]. Our 3rd-order model outperforms all graph-based methods, and also outperforms existing hypergraph-based models such as Selective-HCN and SD-HGCN by $0.45\%$ and $0.35\%$ on average on X-Sub and X-View respectively. With 3Mformer for the fusion of multi-order features, our model further boosts the performance by $\sim 3\%$ and $1.5\%$ on the two protocols. + +It can be seen from Tab. 3 on NTU-60 that although some learned graph-based methods such as AS-GCN and 2S-AGCN can also capture the dependencies between human body joints, they only consider the pairwise relationship between body joints, which is the second-order interaction, and ignore the higher-order interaction between multiple body joints in form of hyper-edges, which may lose sensitivity to important groups of body joints. Our proposed 3Mformer achieves better performance by constructing a hypergraph from 2D/3D body joints as nodes for action recognition, thus capturing higher-order interactions of body joints to further improve the performance. Note that even with the average pooling, our model still achieves competitive results compared to its counterparts. + +For the NTU-120 dataset (Tab. 3), we obtain the top-1 performance on X-Sub and X-Set protocols. Our 2nd-order HoT alone outperforms graph-based models by $2 - 2.4\%$ on average. For example, we outperform recent Shift-GCN by + +Table 4. Experimental results on Northwestern-UCLA. + +
Shift-GCN [6] (CVPR'20)CTR-GCN [4] (ICCV'21)InfoGCN [9] (CVPR'22)2nd-order only (ours)3rd-order only (ours)3Mformer (ours)
acc.(%)94.696.597.096.597.297.8
+ +0.3% and 0.7% on X-Sub and X-Set respectively. Moreover, our 3rd-order HoT alone outperforms SD-HGCN by 0.5% and 1.5% respectively on X-Sub and X-Set. With the 3Mformer for the fusion of multi-order feature maps, we obtain the new state-of-the-art results. Notice that our 3Mformer yields $92.0\% / 93.8\%$ on NTU-120 while [38] yields $80.5\% / 81.7\%$ as we explore the fusion of multiple orders of hyperedges and several coupled-token types capturing easy-to-complex dynamics of varying joint groups. + +As videos from the Kinetics dataset are processed by the OpenPose, the skeletons in the Kinetics-Skeleton dataset have defects which adversely affect the performance of the model. We show both top-1 and top-5 performance in Table 3 to better reflect the performance of our 3Mformer. STGCN is the first method based on GCN, our 2nd-order HoT alone achieves very competitive results compared to the very recent NAS-GCN and Sym-GNN. The 3rd-order HoT alone outperforms Hyper-GNN, SD-HGCN and SelectiveHCN by $3.4\%$ , $3.1\%$ and $2.9\%$ respectively for top-1 accuracies. Moreover, fusing multi-order feature maps from multiple orders of hyper-edges via 3Mformer gives us the best performance on Kinetics-Skeleton with $48.3\%$ for top-1, the new state-of-the-art result. + +Table 4 shows results on the Northwestern-UCLA dataset. Our 3Mformer is also effective on this dataset-it outperforms the current state-of-the-art InfoGCN by $0.8\%$ . + +# 6. Conclusions + +In this paper, we model the skeleton data as hypergraph to capture higher-order information formed between groups of human body joints of orders 1, ..., $r$ . We use Higher-order Transformer (HoT) to learn higher-order information on hypergraphs of $r$ -order formed over 2D/3D human body joints. We also introduce a novel Multi-order Multi-mode Transformer (3Mformer) for the fusion of multi-order feature representations. Our end-to-end trainable 3Mformer outperforms state-of-the-art graph- and hypergraph-based models by a large margin on several benchmarks. + +Acknowledgements. LW is supported by the Data61/ CSIRO PhD Scholarship. PK is in part funded by CSIRO's Machine Learning and Artificial Intelligence Future Science Platform (MLAI FSP) Spatiotemporal Activity. + +# References + +[1] Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. VATT: Transformers for multimodal self-supervised learning from raw video, audio and text. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021. 2 +[2] Zhe Cao, Tomas Simon, Shih-En Wei, and Yaser Sheikh. Realtime multi-person 2d pose estimation using part affinity fields. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), July 2017. 1, 6 +[3] Chun-Fu Chen, Quanfu Fan, and Rameswar Panda. Crossvit: Cross-attention multi-scale vision transformer for image classification. CoRR, abs/2103.14899, 2021. 3 +[4] Yuxin Chen, Ziqi Zhang, Chunfeng Yuan, Bing Li, Ying Deng, and Weiming Hu. Channel-wise topology refinement graph convolution for skeleton-based action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13359-13368, 2021. 2, 7, 8 +[5] Ke Cheng, Yifan Zhang, Congqi Cao, Lei Shi, Jian Cheng, and Hanqing Lu. Decoupling gcn with dropgraph module for skeleton-based action recognition. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision - ECCV 2020, pages 536-553, Cham, 2020. Springer International Publishing. 2 +[6] Ke Cheng, Yifan Zhang, Xiangyu He, Weihan Chen, Jian Cheng, and Hanqing Lu. Skeleton-based action recognition with shift graph convolutional network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 6, 7, 8 +[7] Yi-Bin Cheng, Xipeng Chen, Dongyu Zhang, and Liang Lin. Motion-transformer: Self-supervised pre-training for skeleton-based action recognition. In Proceedings of the 2nd ACM International Conference on Multimedia in Asia, MMAsia '20, New York, NY, USA, 2021. Association for Computing Machinery. 2 +[8] Anoop Cherian, Piotr Koniusz, and Stephen Gould. Higher-order pooling of cnn features via kernel linearization for action recognition. In 2017 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 130-138, 2017. 2, 5 +[9] Hyung-gun Chi, Myoung Hoon Ha, Seunggeun Chi, Sang Wan Lee, Qixing Huang, and Karthik Ramani. Infogcn: Representation learning for human skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20186-20196, June 2022. 7, 8 +[10] Krzysztof Marcin Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos, Peter Hawkins, Jared Quincy Davis, Afroz Mohiuddin, Lukasz Kaiser, David Benjamin Belanger, Lucy J Colwell, and Adrian Weller. Rethinking attention with performers. In International Conference on Learning Representations, 2021. 4 +[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, + +Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3 +[12] Haodong Duan, Yue Zhao, Kai Chen, Dahua Lin, and Bo Dai. Revisiting skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2969-2978, June 2022. 7 +[13] Basura Fernando, Efstratios Gavves, Jose Oramas Oramas M., Amir Ghodrati, and Tinne Tuytelaars. Rank pooling for action recognition. IEEE Trans. Pattern Anal. Mach. Intell., 39(4):773-787, apr 2017. 2, 5 +[14] Rohit Girdhar and Deva Ramanan. Attentional pooling for action recognition. In NIPS, 2017. 2, 5 +[15] Xiaoke Hao, Jie Li, Yingchun Guo, Tao Jiang, and Ming Yu. Hypergraph neural network for skeleton-based action recognition. IEEE Transactions on Image Processing, 30:2263-2275, 2021. 2, 4, 7, 8, 13 +[16] Ryota Hashiguchi and Toru Tamaki. Vision transformer with cross-attention by temporal shift for efficient action recognition, 2022. 3 +[17] Changxiang He, Chen Xiao, Shuting Liu, Xiaofei Qin, Ying Zhao, and Xuedian Zhang. Single-skeleton and dual-skeleton hypergraph convolution neural networks for skeleton-based action recognition. In Teddy Mantoro, Minho Lee, Media Anugerah Ayu, Kok Wai Wong, and Achmad Nizar Hidayanto, editors, Neural Information Processing, pages 15-27, Cham, 2021. Springer International Publishing. 7, 8 +[18] Yujuan Jiang, Zhaoneng Sun, Saisai Yu, Shuang Wang, and Yang Song. A graph skeleton transformer network for action recognition. Symmetry, 14(8), 2022. 7 +[19] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are RNNs: Fast autoregressive transformers with linear attention. In Hal Daumé III and Aarti Singh, editors, Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 5156-5165. PMLR, 13-18 Jul 2020. 4 +[20] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman. The kinetics human action video dataset, 2017. 6 +[21] Jinwoo Kim, Saeyoon Oh, and Seunghoon Hong. Transformers generalize deepsets and can be extended to graphs & hypergraphs. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021. 2, 3, 4 +[22] Tae Soo Kim and Austin Reiter. Interpretable 3d human action analysis with temporal convolutional networks. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1623-1631, 2017. 7 +[23] Jun Kong, Yuhang Bian, and Min Jiang. Mtt: Multi-scale temporal transformer for skeleton-based action recognition. IEEE Signal Processing Letters, 29:528-532, 2022. 7 + +[24] Piotr Koniusz, Lei Wang, and Anoop Cherian. Tensor representations for action recognition. In IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2020. 1, 2 +[25] Piotr Koniusz, Lei Wang, and Ke Sun. High-order tensor pooling with attention for action recognition. arXiv, 2021. 1, 2, 5 +[26] Piotr Koniusz and Hongguang Zhang. Power normalizations in fine-grained image, few-shot image and graph classification. In IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2020. 2 +[27] Matthew Korban and Xin Li. Ddgcn: A dynamic directed graph convolutional network for action recognition. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision – ECCV 2020, pages 761–776, Cham, 2020. Springer International Publishing. 2 +[28] Olga Kovaleva, Alexey Romanov, Anna Rogers, and Anna Rumshisky. Revealing the dark secrets of BERT. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4365-4374, Hong Kong, China, Nov. 2019. Association for Computational Linguistics. 7 +[29] John Boaz Lee, Ryan Rossi, and Xiangnan Kong. Graph classification using structural attention. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '18, page 1666-1674, New York, NY, USA, 2018. Association for Computing Machinery. 3 +[30] Maosen Li, Siheng Chen, Xu Chen, Ya Zhang, Yanfeng Wang, and Qi Tian. Actional-structural graph convolutional networks for skeleton-based action recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 2, 7, 8 +[31] Maosen Li, Siheng Chen, Xu Chen, Ya Zhang, Yanfeng Wang, and Qi Tian. Symbiotic graph neural networks for 3d skeleton-based human action recognition and motion prediction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(6):3316-3333, 2022. 7, 8 +[32] Hezheng Lin, Xing Cheng, Xiangyu Wu, Fan Yang, Dong Shen, Zhongyuan Wang, Qing Song, and Wei Yuan. CAT: cross attention in vision transformer. CoRR, abs/2106.05786, 2021. 3 +[33] Tsung-Yu Lin, Subhransu Maji, and Piotr Koniusz. Second-order democratic aggregation. In ECCV, 2018. 2 +[34] Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C. Kot. Ntu rgb+d 120: A large-scale benchmark for 3d human activity understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2019. 6 +[35] Shengyuan Liu, Pei Lv, Yuzhen Zhang, Jie Fu, Junjin Cheng, Wanqing Li, Bing Zhou, and Mingliang Xu. Semi-dynamic hypergraph neural network for 3d pose estimation. In Christian Bessiere, editor, Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI-20, pages 782-788. International Joint Conferences on Artificial Intelligence Organization, 7 2020. Main track. 2, 4, 13 + +[36] Ziyu Liu, Hongwen Zhang, Zhenghao Chen, Zhiyong Wang, and Wanli Ouyang. Disentangling and unifying graph convolutions for skeleton-based action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 7 +[37] Wei Peng, Xiaopeng Hong, Haoyu Chen, and Guoying Zhao. Learning graph convolutional network for skeleton-based human action recognition by neural searching. Proceedings of the AAAI Conference on Artificial Intelligence, 34(03):2669-2676, Apr. 2020. 7, 8 +[38] Wei Peng, Jingang Shi, Tuomas Varanka, and Guoying Zhao. Rethinking the st-gcns for 3d skeleton-based human action recognition. Neurocomputing, 454:45-53, 2021. 8 +[39] Chiara Plizzari, Marco Cannici, and Matteo Matteucci. Skeleton-based action recognition via spatial and temporal transformer networks. Computer Vision and Image Understanding, 208-209:103219, 2021. 7 +[40] Zhenyue Qin, Yang Liu, Pan Ji, Dongwoo Kim, Lei Wang, Bob McKay, Saeed Anwar, and Tom Gedeon. Fusing higher-order features in graph neural networks for skeleton-based action recognition. IEEE TNNLS, 2022. 1 +[41] Saimunur Rahman, Piotr Koniusz, Lei Wang, Luping Zhou, Peyman Moghadam, and Changming Sun. Learning partial correlation based deep visual representation for image classification. In CVPR, 2023. 2 +[42] Kanchana Ranasinghe, Muzammal Naseer, Salman Khan, Fahad Shahbaz Khan, and Michael Ryoo. Self-supervised video transformer. In IEEE/CVF International Conference on Computer Vision and Pattern Recognition, June 2022. 2 +[43] Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang. Ntu rgb+d: A large scale dataset for 3d human activity analysis. In IEEE Conference on Computer Vision and Pattern Recognition, June 2016. 6 +[44] Lei Shi, Yifan Zhang, Jian Cheng, and Hanqing Lu. Two-stream adaptive graph convolutional networks for skeleton-based action recognition. In CVPR, 2019. 7, 8 +[45] Lei Shi, Yifan Zhang, Jian Cheng, and Hanqing Lu. Adasgn: Adapting joint number and model size for efficient skeleton-based action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 13413-13422, October 2021. 2 +[46] Chenyang Si, Wentao Chen, Wei Wang, Liang Wang, and Tieniu Tan. An attention enhanced graph convolutional LSTM network for skeleton-based action recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 1, 2 +[47] Yi-Fan Song, Zhang Zhang, Caifeng Shan, and Liang Wang. Constructing stronger and faster baselines for skeleton-based action recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2022. 2 +[48] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. CoRR, abs/2203.12602, 2022. 2 +[49] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Ilia Polosukhin. Attention is all you need. In I. Guyon, + +U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. 3, 4, 7 +[50] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Liò, and Yoshua Bengio. Graph attention networks. In International Conference on Learning Representations, 2018. 3 +[51] Jiang Wang, Xiaohan Nie, Yin Xia, Ying Wu, and Song-Chun Zhu. Cross-view action modeling, learning and recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2649-2656, 2014. 6 +[52] Lei Wang. Analysis and evaluation of Kinect-based action recognition algorithms. Master's thesis, School of the Computer Science and Software Engineering, The University of Western Australia, 11 2017. 1 +[53] Lei Wang, Du Q. Huynh, and Piotr Koniusz. A comparative review of recent kinet-based action recognition algorithms. TIP, 2019. 1 +[54] Lei Wang, Du Q. Huynh, and Moussa Reda Mansour. Loss switching fusion with similarity search for video classification. ICIP, 2019. 1 +[55] Lei Wang and Piotr Koniusz. Self-Supervising Action Recognition by Statistical Moment and Subspace Descriptors, page 4324-4333. Association for Computing Machinery, New York, NY, USA, 2021. 1 +[56] Lei Wang and Piotr Koniusz. Temporal-viewpoint transportation plan for skeletal few-shot action recognition. In Proceedings of the Asian Conference on Computer Vision, pages 4176-4193, 2022. 1 +[57] Lei Wang and Piotr Koniusz. Uncertainty-dtw for time series and sequences. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXI, pages 176-195. Springer, 2022. 1 +[58] Lei Wang, Piotr Koniusz, and Du Q. Huynh. Hallucinating IDT descriptors and I3D optical flow features for action recognition with cnns. In ICCV, 2019. 1 +[59] Lei Wang, Jun Liu, and Piotr Koniusz. 3d skeleton-based few-shot action recognition with jeanie is not so naive. arXiv preprint arXiv:2112.12668, 2021. 1 +[60] Qilong Wang, Zilin Gao, Jiangtao Xie, Wangmeng Zuo, and Peihua Li. Global gated mixture of second-order pooling for improving deep convolutional neural networks. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. 2, 5 +[61] Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan L. Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. CoRR, abs/2112.09133, 2021. 2 +[62] Jinfeng Wei, Yunxin Wang, Mengli Guo, Pei Lv, Xiaoshan Yang, and Mingliang Xu. Dynamic hypergraph convolutional networks for skeleton-based action recognition. CoRR, abs/2112.10570, 2021. 7, 8 +[63] Xi Wei, Tianzhu Zhang, Yan Li, Yongdong Zhang, and Feng Wu. Multi-modality cross attention network for image and + +sentence matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 3 +[64] Sijie Yan, Yuanjun Xiong, and Dahua Lin. Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition. In AAAI, 2018. 1, 2, 4, 6, 7, 8, 13 +[65] Han Zhang, Yonghong Song, and Yuanlin Zhang. Graph convolutional LSTM model for skeleton-based action recognition. In 2019 IEEE International Conference on Multimedia and Expo (ICME), pages 412-417, 2019. 1 +[66] Jiani Zhang, Xingjian Shi, Junyuan Xie, Hao Ma, Irwin King, and Dit-Yan Yeung. Gaan: Gated attention networks for learning on large and spatiotemporal graphs. In Amir Globerson and Ricardo Silva, editors, UAI, pages 339-349. AUAI Press, 2018. 3 +[67] Pengfei Zhang, Cuiling Lan, Wenjun Zeng, Junliang Xing, Jianru Xue, and Nanning Zheng. Semantics-guided neural networks for efficient skeleton-based human action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2 +[68] Shan Zhang, Dawei Luo, Lei Wang, and Piotr Koniusz. Few-shot object detection by second-order pooling. In ACCV, 2020. 2 +[69] Shan Zhang, Naila Murray, Lei Wang, and Piotr Koniusz. Time-reversed diffusion tensor transformer: A new tenet of few-shot object detection. In ECCV, 2022. 2 +[70] Shan Zhang, Lei Wang, Naila Murray, and Piotr Koniusz. Kernelized few-shot object detection with efficient integral aggregation. In CVPR, 2022. 2 +[71] Xikun Zhang, Chang Xu, and Dacheng Tao. Context aware graph convolution for skeleton-based action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2 +[72] Yongkang Zhang, Jun Li, Guoming Wu, Han Zhang, Zhiping Shi, Zhaoxun Liu, and Zizhang Wu. Temporal transformer networks with self-supervision for action recognition. CoRR, abs/2112.07338, 2021. 2 +[73] Yuhan Zhang, Bo Wu, Wen Li, Lixin Duan, and Chuang Gan. Stst: Spatial-temporal specialized transformer for skeleton-based action recognition. In Proceedings of the 29th ACM International Conference on Multimedia, MM '21, page 3229-3237, New York, NY, USA, 2021. Association for Computing Machinery. 7 +[74] Yifei Zhang, Hao Zhu, Zixing Song, Piotr Koniusz, and Irwin King. COSTA: Covariance-preserving feature augmentation for graph contrastive learning. In KDD, 2022. 2 +[75] Yifei Zhang, Hao Zhu, Zixing Song, Piotr Koniusz, and Irwin King. Spectral feature augmentation for graph contrastive learning and beyond. In AAAI, 2023. 2 +[76] Hao Zhu and Piotr Koniusz. Simple spectral graph convolution. In ICLR, 2021. 1 +[77] Hao Zhu and Piotr Koniusz. Generalized laplacian eigenmaps. In NeurIPS, 2022. 1 +[78] Hao Zhu, Ke Sun, and Piotr Koniusz. Contrastive laplacian eigenmaps. In NeurIPS, 2021. 1 +[79] Yiran Zhu, Guangji Huang, Xing Xu, Yanli Ji, and Fumin Shen. Selective hypergraph convolutional networks for + +skeleton-based action recognition. In Proceedings of the 2022 International Conference on Multimedia Retrieval, ICMR '22, page 518-526, New York, NY, USA, 2022. Association for Computing Machinery. 7, 8 +[80] Gao Zilin, Xie Jiangtao, Wang Qilong, and Li Peihua. Global second-order pooling convolutional networks. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5 \ No newline at end of file diff --git a/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/images.zip b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..2be17f5eaa5b44c00d849301b55d5c47f79e54e2 --- /dev/null +++ b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:284b442da056eabe8061a0ee568dac69b990b10f7b59d452933f25b4c81a8952 +size 366230 diff --git a/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/layout.json b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b1282f5697740af2d1dbc3236951390e8814a484 --- /dev/null +++ b/2023/3Mformer_ Multi-Order Multi-Mode Transformer for Skeletal Action Recognition/layout.json @@ -0,0 +1,12532 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 141, + 103, + 455, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 103, + 455, + 140 + ], + "spans": [ + { + "bbox": [ + 141, + 103, + 455, + 140 + ], + "type": "text", + "content": "3Mformer: Multi-order Multi-mode Transformer for Skeletal Action Recognition" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 211, + 160, + 381, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 160, + 381, + 175 + ], + "spans": [ + { + "bbox": [ + 211, + 160, + 381, + 175 + ], + "type": "text", + "content": "Lei Wang†,§ Piotr Koniusz*,§,†" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 177, + 175, + 416, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 175, + 416, + 189 + ], + "spans": [ + { + "bbox": [ + 177, + 175, + 416, + 189 + ], + "type": "text", + "content": "†Australian National University, §Data61♥CSIRO" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 205, + 189, + 386, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 189, + 386, + 202 + ], + "spans": [ + { + "bbox": [ + 205, + 189, + 386, + 202 + ], + "type": "text", + "content": "\\$firstname_lastname@data61.csiro.au" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "spans": [ + { + "bbox": [ + 143, + 231, + 192, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 256, + 289, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 256, + 289, + 557 + ], + "spans": [ + { + "bbox": [ + 46, + 256, + 289, + 557 + ], + "type": "text", + "content": "Many skeletal action recognition models use GCNs to represent the human body by 3D body joints connected body parts. GCNs aggregate one- or few-hop graph neighbourhoods, and ignore the dependency between not linked body joints. We propose to form hypergraph to model hyperedges between graph nodes (e.g., third- and fourth-order hyper-edges capture three and four nodes) which help capture higher-order motion patterns of groups of body joints. We split action sequences into temporal blocks, Higher-order Transformer (HoT) produces embeddings of each temporal block based on (i) the body joints, (ii) pairwise links of body joints and (iii) higher-order hyper-edges of skeleton body joints. We combine such HoT embeddings of hyper-edges of orders 1,..., " + }, + { + "bbox": [ + 46, + 256, + 289, + 557 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 256, + 289, + 557 + ], + "type": "text", + "content": " by a novel Multi-order Multi-mode Transformer (3Mformer) with two modules whose order can be exchanged to achieve coupled-mode attention on coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs. The first module, called Multi-order Pooling (MP), additionally learns weighted aggregation along the hyper-edge mode, whereas the second module, Temporal block Pooling (TP), aggregates along the temporal block' mode. Our end-to-end trainable network yields state-of-the-art results compared to GCN-, transformer- and hypergraph-based counterparts." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 579, + 128, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 579, + 128, + 593 + ], + "spans": [ + { + "bbox": [ + 47, + 579, + 128, + 593 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 600, + 287, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 600, + 287, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 600, + 287, + 685 + ], + "type": "text", + "content": "Action Recognition has applications in video surveillance, human-computer interaction, sports analysis, and virtual reality [24, 25, 40, 52-59]. Different from video-based methods which mainly focus on modeling the spatiotemporal representations from RGB frames and/or optical flow [25, 52-55, 58], skeleton sequences, representing a spatio-temporal evolution of 3D body joints, have been" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 232, + 547, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 232, + 547, + 435 + ], + "spans": [ + { + "bbox": [ + 306, + 232, + 547, + 435 + ], + "type": "text", + "content": "proven robust against sensor noises and effective in action recognition while being computationally and storage efficient [24, 40, 52, 53, 56, 57, 59]. The skeleton data is usually obtained by either localization of 2D/3D coordinates of human body joints with the depth sensors or pose estimation algorithms applied to videos [2]. Skeleton sequences enjoy (i) simple structural connectivity of skeletal graph and (ii) temporal continuity of 3D body joints evolving in time. While temporal evolution of each body joint is highly informative, embeddings of separate body joints are insensitive to relations between body parts. Moreover, while the links between adjacent 3D body joints (following the structural connectivity) are very informative as they model relations, these links represent highly correlated nodes in the sense of their temporal evolution. Thus, modeling larger groups of 3D body joints as hyper-edges can capture more complex spatio-temporal motion dynamics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 437, + 547, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 437, + 547, + 616 + ], + "spans": [ + { + "bbox": [ + 304, + 437, + 547, + 616 + ], + "type": "text", + "content": "The existing graph-based models mainly differ by how they handle temporal information. Graph Neural Network (GNN) may encode spatial neighborhood of the node followed by aggregation by LSTM [46, 65]. Alternatively, Graph Convolutional Network (GCN) may perform spatio-temporal convolution in the neighborhood of each node [64]. Spatial GCNs perform convolution within one or two hop distance of each node, e.g., spatio-temporal GCN model called ST-GCN [64] models spatio-temporal vicinity of each 3D body joint. As ST-GCN applies convolution along structural connections (links between body joints), structurally distant joints, which may cover key patterns of actions, are largely ignored. ST-GCN captures ever larger neighborhoods as layers are added but suffers from oversmoothing that can be mitigated by linear GCNs [76-78]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 547, + 715 + ], + "type": "text", + "content": "Human actions are associated with interaction groups of skeletal joints, e.g., wrist alone, head-wrist, head-wrist-ankles, etc. The impact of these groups of joints on each action differs, and the degree of influence of each joint should be learned. Accordingly, designing a better model for skeleton data is vital given the topology of skeleton graph is suboptimal. While GCN can be applied to a fully-connected graph (i.e., 3D body joints as densely connected" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 693, + 136, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 693, + 136, + 703 + ], + "spans": [ + { + "bbox": [ + 58, + 693, + 136, + 703 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 58, + 703, + 274, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 703, + 274, + 713 + ], + "spans": [ + { + "bbox": [ + 58, + 703, + 274, + 713 + ], + "type": "text", + "content": "For brevity, we write " + }, + { + "bbox": [ + 58, + 703, + 274, + 713 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 58, + 703, + 274, + 713 + ], + "type": "text", + "content": " temporal blocks per sequence but " + }, + { + "bbox": [ + 58, + 703, + 274, + 713 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 58, + 703, + 274, + 713 + ], + "type": "text", + "content": " varies." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "5620" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": "graph nodes), Higher-order Transformer (HoT) [21] has been proven more efficient." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "spans": [ + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "type": "text", + "content": "Thus, we propose to use hypergraphs with hyper-edges of order 1 to " + }, + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 96, + 286, + 191 + ], + "type": "text", + "content": " to effectively represent skeleton data for action recognition. Compared to GCNs, our encoder contains an MLP followed by three HoT branches that encode first, second- and higher-order hyper-edges, i.e., set of body joints, edges between pairs of nodes, hyper-edges between triplets of nodes, etc. Each branch has its own learnable parameters, and processes temporal blocks2 one-by-one." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "text", + "content": "We notice that (i) the number of hyper-edges of " + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "text", + "content": " joints grows rapidly with order " + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "inline_equation", + "content": "\\binom{J}{i}" + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "inline_equation", + "content": "i = 1, \\dots, r" + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "text", + "content": ", embeddings of the highest order dominate lower orders in terms of volume if such embeddings are merely concatenated, and (ii) long-range temporal dependencies of feature maps are insufficiently explored, as sequences are split into " + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 192, + 286, + 276 + ], + "type": "text", + "content": " temporal blocks for computational tractability." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 277, + 286, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 277, + 286, + 468 + ], + "spans": [ + { + "bbox": [ + 46, + 277, + 286, + 468 + ], + "type": "text", + "content": "Merely concatenating outputs of HoT branches of orders 1 to " + }, + { + "bbox": [ + 46, + 277, + 286, + 468 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 277, + 286, + 468 + ], + "type": "text", + "content": ", and across " + }, + { + "bbox": [ + 46, + 277, + 286, + 468 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 277, + 286, + 468 + ], + "type": "text", + "content": " blocks, is sub-optimal. Thus, our Multi-order Multi-mode Transformer (3Mformer) with two modules whose order can be exchanged, realizes a variation of coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs. As HoT operates block-by-block, 'channel-temporal block' tokens and weighted hyper-edge aggregation in Multi-order Pooling (MP) help combine information flow block-wise. Various coupled-mode tokens help improve results further due to different focus of each attention mechanism. As the block-temporal mode needs to be aggregated (number of blocks varies across sequences), Temporal block Pooling (TP) can use rank pooling [13], second-order [14, 26, 33, 41, 60, 68, 80] or higher-order pooling [8, 24, 25, 69, 70]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 475, + 288, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 475, + 288, + 485 + ], + "spans": [ + { + "bbox": [ + 59, + 475, + 288, + 485 + ], + "type": "text", + "content": "In summary, our main contributions are listed as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 497, + 286, + 685 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 53, + 497, + 286, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 497, + 286, + 568 + ], + "spans": [ + { + "bbox": [ + 53, + 497, + 286, + 568 + ], + "type": "text", + "content": "i. We model the skeleton data as hypergraph of orders 1 to " + }, + { + "bbox": [ + 53, + 497, + 286, + 568 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 53, + 497, + 286, + 568 + ], + "type": "text", + "content": " (set, graph and/or hypergraph), where human body joints serve as nodes. Higher-order Transformer embeddings of such formed hyper-edges represent various groups of 3D body joints and capture various higher-order dynamics important for action recognition." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 577, + 286, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 577, + 286, + 685 + ], + "spans": [ + { + "bbox": [ + 51, + 577, + 286, + 685 + ], + "type": "text", + "content": "ii. As HoT embeddings represent individual hyper-edge order and block, we introduce a novel Multi-order Multi-mode Transformer (3Mformer) with two modules, Multi-order Pooling and Temporal block Pooling. Their goal is to form coupled-mode tokens such as 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only', and perform weighted hyper-edge aggregation and temporal block aggregation." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 108 + ], + "type": "text", + "content": "Our 3Mformer outperforms other GCN- and hypergraph-based models on NTU-60, NTU-120, Kinetics-Skeleton and Northwestern-UCLA by a large margin." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 121, + 392, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 121, + 392, + 133 + ], + "spans": [ + { + "bbox": [ + 306, + 121, + 392, + 133 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 141, + 545, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 141, + 545, + 163 + ], + "spans": [ + { + "bbox": [ + 305, + 141, + 545, + 163 + ], + "type": "text", + "content": "Below we describe popular action recognition models for skeletal data." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 168, + 545, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 168, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 304, + 168, + 545, + 396 + ], + "type": "text", + "content": "Graph-based models. Popular GCN-based models include the Attention enhanced Graph Convolutional LSTM network (AGC-LSTM) [46], the Actional-Structural GCN (AS-GCN) [30], Dynamic Directed GCN (DDGCN) [27], Decoupling GCN with DropGraph module [5], ShiftGCN [6], Semantics-Guided Neural Networks (SGN) [67], AdaSGN [45], Context Aware GCN (CA-GCN) [71], Channel-wise Topology Refinement Graph Convolution Network (CTR-GCN) [4] and a family of Efficient GCN (EfficientGCN-Bx) [47]. Although GCN-based models enjoy good performance, they have shortcomings, e.g., convolution and/or pooling are applied over one- or few-hop neighborhoods, e.g., ST-GCN [64], according to the human skeleton graph (body joints linked up according to connectivity of human body parts). Thus, indirect links between various 3D body joints such as hands and legs are ignored. In contrast, our model is not restricted by the structure of typical human body skeletal graph. Instead, 3D body joints are nodes which form hyper-edges of orders 1 to " + }, + { + "bbox": [ + 304, + 168, + 545, + 396 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 168, + 545, + 396 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 399, + 545, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 545, + 542 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 545, + 542 + ], + "type": "text", + "content": "Hypergraph-based models. Pioneering work on capturing groups of nodes across time uses tensors [24] to represent the 3D human body joints to exploit the kinematic relations among the adjacent and non-adjacent joints. Representing the human body as a hypergraph is adopted in [35] via a semi-dynamic hypergraph neural network that captures richer information than GCN. A hypergraph GNN [15] captures both spatio-temporal information and higher-order dependencies for skeleton-based action recognition. Our work is somewhat closely related to these works, but we jointly use hypergraphs of order 1 to " + }, + { + "bbox": [ + 304, + 399, + 545, + 542 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 399, + 545, + 542 + ], + "type": "text", + "content": " to obtain rich hyper-edge embeddings based on Higher-order Transformers." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 545, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 545, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 545, + 545, + 712 + ], + "type": "text", + "content": "Transformer-based models. Action recognition with transformers includes self-supervised video transformer [42] that matches the features from different views (a popular strategy in self-supervised GCNs [74, 75]), the end-to-end trainable Video-Audio-Text-Transformer (VATT) [1] for learning multi-model representations from unlabeled raw video, audio and text through the multimodal contrastive losses, and the Temporal Transformer Network with Self-supervision (TTSN) [72]. Motion-Transformer [7] captures the temporal dependencies via a self-supervised pre-training on human actions, Masked Feature Prediction (MaskedFeat) [61] pre-trained on unlabeled videos with MViT-L learns abundant visual representations, and video-masked autoencoder (VideoMAE) [48] with vanilla ViT" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 46, + 693, + 286, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 693, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 46, + 693, + 286, + 712 + ], + "type": "text", + "content": "2Each temporal block enjoys a locally factored out (removed) temporal mode, which makes each block representation compact." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "5621" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 545, + 167 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 545, + 167 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 545, + 167 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 545, + 167 + ], + "type": "image", + "image_path": "e54dd32c2d7e0e6322522f3bca6e512a8da722d88d64a7868bafd07b87e447c2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "lines": [ + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "spans": [ + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": "Figure 1. Pipeline overview. Each sequence is split into " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": " temporal blocks " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\mathbf{B}_1, \\dots, \\mathbf{B}_{\\tau}" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": ". Subsequently, each block is embedded by a simple MLP into " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_1, \\dots, \\mathbf{X}_{\\tau}" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": ", which are passed to Higher-order Transformers (HoT (" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "n = 1, \\dots, r" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": ")) in order to obtain feature tensors " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\Phi_1, \\dots, \\Phi_{\\tau}" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": ". These tensors are subsequently concatenated by " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": " along the hyper-edge mode into a multi-order feature tensor " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": ". The final step is a Multi-order Multi-mode Transformer (3Mformer from Section 4), which contains two complementary branches, MP " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": " TP and TP " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": " MP, whose outputs are concatenated by " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": " and passed to the classifier. MP and TP perform the Coupled-mode Self-Attention (CmSA) with the so-called coupled-mode tokens, based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge' and 'channel-only' pairs. To this end, MP contains also weighted pooling along hyper-edge mode by learnable matrix " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\mathbf{H}" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": " (and " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "\\mathbf{H}'" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": " in another branch). TP contains also block-temporal pooling denoted by " + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "inline_equation", + "content": "g(\\cdot)" + }, + { + "bbox": [ + 46, + 175, + 547, + 288 + ], + "type": "text", + "content": " whose role is to capture block-temporal order with average, maximum, rank pooling, etc. In our experiments we show that such designed MP and TP are able to efficiently process hyper-edge feature representations from HoT branches. Appendix A shows full visualization of our 3Mformer." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "text", + "content": "uses the masking strategy. In contrast to these works, we use three HoT branches of model [21], and we model hyperedges of orders 1 to " + }, + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 306, + 287, + 354 + ], + "type": "text", + "content": " by forming several multi-mode token variations in 3Mformer." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 358, + 289, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 358, + 289, + 561 + ], + "spans": [ + { + "bbox": [ + 46, + 358, + 289, + 561 + ], + "type": "text", + "content": "Attention. In order to improve feature representations, attention captures relationship between tokens. Natural language processing and computer vision have driven recent developments in attention mechanisms based on transformers [11, 49]. Examples include the hierarchical Cross Attention Transformer (CAT) [32], Cross-attention by Temporal Shift with CNNs [16], Cross-Attention Multi-Scale Vision Transformer (CrossViT) for image classification [3] and Multi-Modality Cross Attention (MMCA) Network for image and sentence matching [63]. In GNNs, attention can be defined over edges [50, 66] or over nodes [29]. In this work, we use the attention with hyper-edges of several orders from HoT branches serving as tokens, and coupled-mode attention with coupled-mode tokens based on 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only' pairs formed in 3Mformer." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 576, + 126, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 576, + 126, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 576, + 126, + 590 + ], + "type": "text", + "content": "3. Background" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 58, + 597, + 284, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 597, + 284, + 610 + ], + "spans": [ + { + "bbox": [ + 58, + 597, + 284, + 610 + ], + "type": "text", + "content": "Below we describe foundations necessary for our work." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "spans": [ + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": "Notations. " + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_K" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": " stands for the index set " + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "\\{1,2,\\dots,K\\}" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": ". Regular fonts are scalars; vectors are denoted by lowercase boldface letters, e.g., " + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": "; matrices by the uppercase boldface, e.g., " + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": "; and tensors by calligraphic letters, e.g., " + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": ". An " + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": "th-order tensor is denoted as " + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\mathbb{R}^{I_1 \\times I_2 \\times \\ldots \\times I_r}" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": ", and the mode-" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": " matricization of " + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": " is denoted as " + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{(m)} \\in \\mathbb{R}^{I_m \\times (I_1 \\ldots I_{m-1} I_{m+1} \\ldots I_r)}" + }, + { + "bbox": [ + 46, + 613, + 287, + 686 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "type": "text", + "content": "Transformer layers [11, 49]. A transformer encoder layer " + }, + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "type": "inline_equation", + "content": "f: \\mathbb{R}^{J \\times d} \\to \\mathbb{R}^{J \\times d}" + }, + { + "bbox": [ + 47, + 689, + 288, + 714 + ], + "type": "text", + "content": " consists of two sub-layers: (i) a self" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "spans": [ + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "text", + "content": "attention " + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "inline_equation", + "content": "a: \\mathbb{R}^{J \\times d} \\to \\mathbb{R}^{J \\times d}" + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "text", + "content": " and (ii) an element-wise feedforward MLP: " + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{J \\times d} \\to \\mathbb{R}^{J \\times d}" + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "text", + "content": ". For a set of " + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "text", + "content": " nodes with " + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^{J \\times d}" + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i" + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "text", + "content": " is a feature vector of node " + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 305, + 546, + 355 + ], + "type": "text", + "content": ", a transformer layer3 computes:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 361, + 363, + 545, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 363, + 545, + 397 + ], + "spans": [ + { + "bbox": [ + 361, + 363, + 545, + 397 + ], + "type": "interline_equation", + "content": "a \\left(\\mathbf {x} _ {i}\\right) = \\mathbf {x} _ {i} + \\sum_ {h = 1} ^ {H} \\sum_ {j = 1} ^ {J} \\alpha_ {i j} ^ {h} \\mathbf {x} _ {j} \\mathbf {W} _ {h} ^ {V} \\mathbf {W} _ {h} ^ {O}, \\tag {1}", + "image_path": "edc5f56b38ba85d910fcb3d5ba1608787fb669f7e2d1e118da26c3fb5a0b80c0.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 361, + 399, + 545, + 414 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 399, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 361, + 399, + 545, + 414 + ], + "type": "interline_equation", + "content": "f (\\mathbf {x} _ {i}) = a (\\mathbf {x} _ {i}) + \\operatorname {M L P} (a (\\mathbf {X})) _ {i}, \\tag {2}", + "image_path": "73785b770beb03288a3a1ec1b676400f792bf06801ef2ff8a9462c6ab3e7043d.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "spans": [ + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "inline_equation", + "content": "d_H" + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "text", + "content": " denote respectively the number of heads and the head size, " + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "inline_equation", + "content": "\\alpha^h = \\sigma (\\mathbf{X}\\mathbf{W}_h^Q (\\mathbf{X}\\mathbf{W}_h^K)^\\top)" + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "text", + "content": " is the attention coefficient, " + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_h^O\\in \\mathbb{R}^{d_H\\times d}" + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_h^V,\\mathbf{W}_h^K,\\mathbf{W}_h^Q\\in \\mathbb{R}^{d\\times d_H}" + }, + { + "bbox": [ + 304, + 422, + 545, + 462 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "spans": [ + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": "Higher-order transformer layers [21]. Let the HoT layer be " + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "inline_equation", + "content": "f_{m\\rightarrow n}:\\mathbb{R}^{J^m\\times d}\\to \\mathbb{R}^{J^n\\times d}" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": " with two sub-layers: (i) a higher-order self-attention " + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "inline_equation", + "content": "a_{m\\rightarrow n}:\\mathbb{R}^{J^m\\times d}\\to \\mathbb{R}^{J^n\\times d}" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": " and (ii) a feedforward " + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "inline_equation", + "content": "\\mathrm{MLP}_{n\\rightarrow n}:\\mathbb{R}^{J^n\\times d}\\to \\mathbb{R}^{J^n\\times d}" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": ". Moreover, let indexing vectors " + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{i}\\in \\mathcal{I}_J^m\\equiv \\mathcal{I}_J\\times \\mathcal{I}_J\\times \\ldots \\times \\mathcal{I}_J" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": " modes) and " + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{j}\\in \\mathcal{I}_J^n\\equiv \\mathcal{I}_J\\times \\mathcal{I}_J\\times \\ldots \\times \\mathcal{I}_J" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": " modes). For the input tensor " + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "inline_equation", + "content": "\\mathbf{X}\\in \\mathbb{R}^{J^m\\times d}" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": " with hyper-edges of order " + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 463, + 546, + 548 + ], + "type": "text", + "content": ", a HoT layer evaluates:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 556, + 545, + 589 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 556, + 545, + 589 + ], + "spans": [ + { + "bbox": [ + 314, + 556, + 545, + 589 + ], + "type": "interline_equation", + "content": "a _ {m \\rightarrow n} (\\mathbf {X}) _ {j} = \\sum_ {h = 1} ^ {H} \\sum_ {\\mu} \\sum_ {i} \\boldsymbol {\\alpha} _ {i, j} ^ {h, \\mu} \\mathbf {X} _ {i} \\mathbf {W} _ {h, \\mu} ^ {V} \\mathbf {W} _ {h, \\mu} ^ {O} \\tag {3}", + "image_path": "56cc90cccb754ece27050c7bee085faf28fe86cf501ceaf73ba634f00babeeaa.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 591, + 545, + 605 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 591, + 545, + 605 + ], + "spans": [ + { + "bbox": [ + 315, + 591, + 545, + 605 + ], + "type": "interline_equation", + "content": "\\mathrm {M L P} _ {n \\rightarrow n} \\left(a _ {m \\rightarrow n} (\\mathbf {X})\\right) = \\mathrm {L} _ {n \\rightarrow n} ^ {2} \\left(\\operatorname {R e L U} \\left(\\mathrm {L} _ {n \\rightarrow n} ^ {1} \\left(a _ {m \\rightarrow n} (\\mathbf {X})\\right)\\right)\\right), \\tag {4}", + "image_path": "735b145ae35ff3c9aa58719bd866bfd79309080c1388821296066c2951e6fffb.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 608, + 545, + 621 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 608, + 545, + 621 + ], + "spans": [ + { + "bbox": [ + 315, + 608, + 545, + 621 + ], + "type": "interline_equation", + "content": "f _ {m \\rightarrow n} (\\mathbf {X}) = a _ {m \\rightarrow n} (\\mathbf {X}) + \\operatorname {M L P} _ {n \\rightarrow n} (a _ {m \\rightarrow n} (\\mathbf {X})), \\tag {5}", + "image_path": "70edbe6e79529cab9f78e68a49ffa9705782924dbd535d45f6d714b4e683c3f9.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "spans": [ + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "inline_equation", + "content": "\\pmb{\\alpha}^{h,\\mu}\\in \\mathbb{R}^{J^{m + n}}" + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "text", + "content": " is the so-called attention coefficient tensor with multiple heads, and " + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "inline_equation", + "content": "\\pmb{\\alpha}_{\\mathbf{i},\\mathbf{j}}^{h,\\mu}\\in \\mathbb{R}^{J}" + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "text", + "content": " is a vector, " + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{h,\\mu}^V\\in \\mathbb{R}^{d\\times d_H}" + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{h,\\mu}^{O}\\in \\mathbb{R}^{d_{H}\\times d}" + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "text", + "content": " are learnable parameters. Moreover, " + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "text", + "content": " indexes over the so-called equivalence classes of order" + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "inline_equation", + "content": "(m + n)" + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "text", + "content": " in the same partition of nodes, " + }, + { + "bbox": [ + 304, + 631, + 545, + 696 + ], + "type": "inline_equation", + "content": "\\mathrm{L}_{n\\to n}^{1}\\colon \\mathbb{R}^{J^n\\times d}\\to \\mathbb{R}^{J^n\\times d_F}" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 702, + 520, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 702, + 520, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 702, + 520, + 713 + ], + "type": "inline_equation", + "content": "{}^{3}" + }, + { + "bbox": [ + 315, + 702, + 520, + 713 + ], + "type": "text", + "content": " Normalizations after " + }, + { + "bbox": [ + 315, + 702, + 520, + 713 + ], + "type": "inline_equation", + "content": "a\\left( \\cdot \\right)" + }, + { + "bbox": [ + 315, + 702, + 520, + 713 + ], + "type": "text", + "content": " & " + }, + { + "bbox": [ + 315, + 702, + 520, + 713 + ], + "type": "inline_equation", + "content": "\\operatorname{MLP}\\left( \\cdot \\right)" + }, + { + "bbox": [ + 315, + 702, + 520, + 713 + ], + "type": "text", + "content": " are omitted for simplicity." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5622" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 71, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 71, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 71, + 287, + 95 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 46, + 71, + 287, + 95 + ], + "type": "inline_equation", + "content": "\\mathrm{L}_{n\\to n}^2\\colon \\mathbb{R}^{J^n\\times d_F}\\to \\mathbb{R}^{J^n\\times d}" + }, + { + "bbox": [ + 46, + 71, + 287, + 95 + ], + "type": "text", + "content": " are equivariant linear layers and " + }, + { + "bbox": [ + 46, + 71, + 287, + 95 + ], + "type": "inline_equation", + "content": "d_{F}" + }, + { + "bbox": [ + 46, + 71, + 287, + 95 + ], + "type": "text", + "content": " is the hidden dimension." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 95, + 287, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 95, + 287, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 95, + 287, + 133 + ], + "type": "text", + "content": "To compute each attention tensor " + }, + { + "bbox": [ + 46, + 95, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\alpha^{h,\\mu} \\in \\mathbb{R}^{J^{m + n}}" + }, + { + "bbox": [ + 46, + 95, + 287, + 133 + ], + "type": "text", + "content": " from the input tensor " + }, + { + "bbox": [ + 46, + 95, + 287, + 133 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^{J^m \\times d}" + }, + { + "bbox": [ + 46, + 95, + 287, + 133 + ], + "type": "text", + "content": " of hyper-edges of order " + }, + { + "bbox": [ + 46, + 95, + 287, + 133 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 95, + 287, + 133 + ], + "type": "text", + "content": ", from the higher-order query and key, we obtain:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 99, + 140, + 287, + 177 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 140, + 287, + 177 + ], + "spans": [ + { + "bbox": [ + 99, + 140, + 287, + 177 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\alpha} _ {i, j} ^ {h, \\mu} = \\left\\{ \\begin{array}{c c} \\frac {\\sigma \\left(\\mathbf {Q} _ {j} ^ {h , \\mu} , \\mathbf {K} _ {i} ^ {h , \\mu}\\right)}{Z _ {j}} & (i, j) \\in \\mu \\\\ 0 & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {6}", + "image_path": "19fc8ddbd9f738fc2a73bdeb9e6d36010811d571779bb8fb29c57a67b13a062d.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "spans": [ + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^{\\mu} = \\mathrm{L}_{m\\to n}^{\\mu}(\\mathbf{X})" + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "inline_equation", + "content": "\\mathbf{K}^{\\mu} = \\mathrm{L}_{m\\to m}^{\\mu}(\\mathbf{X})" + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "text", + "content": " , and normalization constant " + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "inline_equation", + "content": "Z_{j} = \\sum_{i:(i,j)\\in \\mu}\\sigma (\\mathbf{Q}_{j}^{\\mu},\\mathbf{K}_{i}^{\\mu})" + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "text", + "content": " . Finally, kernel attention in Eq. (6) can be approximated with RKHS feature maps " + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "inline_equation", + "content": "\\psi \\in \\mathbb{R}_+^{d_K}" + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "text", + "content": " for efficacy as " + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "inline_equation", + "content": "d_{K}\\ll d_{H}" + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "text", + "content": " . Specifically, we have " + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "inline_equation", + "content": "\\sigma (\\mathbf{Q}_j^{h,\\mu},\\mathbf{K}_i^{h,\\mu})\\approx \\psi (\\mathbf{Q}_j^{h,\\mu})^\\top \\psi (\\mathbf{K}_i^{h,\\mu})" + }, + { + "bbox": [ + 46, + 184, + 287, + 257 + ], + "type": "text", + "content": " as in [10, 19]. We choose the performer kernel [10] due to its good performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 257, + 287, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 257, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 46, + 257, + 287, + 293 + ], + "type": "text", + "content": "As query and key tensors are computed from the input tensor " + }, + { + "bbox": [ + 46, + 257, + 287, + 293 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 46, + 257, + 287, + 293 + ], + "type": "text", + "content": " using the equivariant linear layers, the transformer encoder layer " + }, + { + "bbox": [ + 46, + 257, + 287, + 293 + ], + "type": "inline_equation", + "content": "f_{m\\to n}" + }, + { + "bbox": [ + 46, + 257, + 287, + 293 + ], + "type": "text", + "content": " satisfies the permutation equivariance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 303, + 113, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 303, + 113, + 317 + ], + "spans": [ + { + "bbox": [ + 47, + 303, + 113, + 317 + ], + "type": "text", + "content": "4. Approach" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 323, + 287, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 323, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 46, + 323, + 287, + 370 + ], + "type": "text", + "content": "Skeletal Graph [64] and Skeletal Hypergraph [15,35] are popular for modeling edges and hyper-edges. In this work, we use the Higher-order Transformer (HoT) [21] as a backbone encoder." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 378, + 146, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 378, + 146, + 389 + ], + "spans": [ + { + "bbox": [ + 47, + 378, + 146, + 389 + ], + "type": "text", + "content": "4.1. Model Overview" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 396, + 287, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 396, + 287, + 552 + ], + "spans": [ + { + "bbox": [ + 46, + 396, + 287, + 552 + ], + "type": "text", + "content": "Fig. 1 shows that our framework contains a simple 3-layer MLP unit (FC, ReLU, FC, ReLU, Dropout, FC), three HoT blocks with each HoT for each type of input (i.e., body joint feature set, graph and hypergraph of body joints), followed by Multi-order Multi-mode Transformer (3Mformer) with two modules (i) Multi-order Pooling (MP) and (ii) Temporal block Pooling (TP). The goal of 3Mformer is to form coupled-mode tokens (explained later) such as 'channel-temporal block', 'order-channel-body joint', 'channel-hyper-edge (any order)' and 'channel-only', and perform weighted hyper-edge aggregation and temporal block aggregation. Their outputs are further concatenated and passed to an FC layer for classification." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "spans": [ + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": "MLP unit. The MLP unit takes " + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": " neighboring frames, each with " + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "inline_equation", + "content": "J" + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": " 2D/3D skeleton body joints, forming one temporal block. In total, depending on stride " + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": ", we obtain some " + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": " temporal blocks (a block captures the short-term temporal evolution). In contrast, the long-term temporal evolution is modeled with HoT and 3Mformer. Each temporal block is encoded by the MLP into a " + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "inline_equation", + "content": "d \\times J" + }, + { + "bbox": [ + 46, + 555, + 287, + 639 + ], + "type": "text", + "content": " dimensional feature map." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "content": "HoT branches. We stack " + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "content": " branches of HoT, each taking embeddings " + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_t\\in \\mathbb{R}^{d\\times J}" + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "inline_equation", + "content": "t\\in I_{\\tau}" + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "content": " denotes a temporal block. HoT branches output hyper-edge feature representations of size " + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "inline_equation", + "content": "m\\in I_r" + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "inline_equation", + "content": "\\Phi_m^{\\prime}\\in \\mathbb{R}^{J^m\\times d'}" + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "content": " for order " + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "inline_equation", + "content": "m\\in I_r" + }, + { + "bbox": [ + 46, + 641, + 287, + 689 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "For the first-, second- and higher-order stream outputs " + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\Phi_1',\\dots,\\Phi_r'" + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": ", we (i) swap feature channel and hyper-edge" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "text", + "content": "modes, (ii) extract the upper triangular of tensors, and we concatenate along the block-temporal mode, so we have " + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "inline_equation", + "content": "\\Phi_{m}\\in \\mathbb{R}^{d^{\\prime}\\times N_{E_{m}}\\times \\tau}" + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "inline_equation", + "content": "N_{E_m} = \\binom{J}{m}" + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "text", + "content": ". Subsequently, we concatenate " + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "inline_equation", + "content": "\\Phi_1,\\ldots ,\\Phi_r" + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "text", + "content": " along the hyper-edge mode and obtain a multi-order feature tensor " + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "inline_equation", + "content": "\\pmb {M}\\in \\mathbb{R}^{d^{\\prime}\\times N\\times r}" + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "text", + "content": " where the total number of hyper-edges across all orders is " + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "inline_equation", + "content": "N = \\sum_{m = 1}^{r}\\binom{J}{m}" + }, + { + "bbox": [ + 304, + 72, + 545, + 147 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 148, + 545, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 148, + 545, + 207 + ], + "spans": [ + { + "bbox": [ + 304, + 148, + 545, + 207 + ], + "type": "text", + "content": "3Mformer. Our Multi-order Multi-mode Transformer (3Mformer) with Coupled-mode Self-Attention (CmSA) is used for the fusion of information flow inside the multi-order feature tensor " + }, + { + "bbox": [ + 304, + 148, + 545, + 207 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 304, + 148, + 545, + 207 + ], + "type": "text", + "content": ", and finally, the output from 3Mformer is passed to a classifier for classification." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 215, + 466, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 215, + 466, + 227 + ], + "spans": [ + { + "bbox": [ + 305, + 215, + 466, + 227 + ], + "type": "text", + "content": "4.2. Coupled-mode Self-Attention" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 236, + 545, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 236, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 304, + 236, + 545, + 415 + ], + "type": "text", + "content": "Coupled-mode tokens. We are inspired by the attentive regions of the one-class token in the standard Vision Transformer (ViT) [49] that can be leveraged to form a class-agnostic localization map. We investigate if the transformer model can also effectively capture the coupled-mode attention for more discriminative classification tasks, e.g., tensorial skeleton-based action recognition by learning the coupled-mode tokens within the transformer. To this end, we propose a Multi-order Multi-mode Transformer (3Mformer), which uses coupled-mode tokens to jointly learn various higher-order motion dynamics among channel-, block-temporal-, body joint- and order-mode. Our 3Mformer can successfully produce coupled-mode relationships from CmSA mechanism corresponding to different tokens. Below we introduce our CmSA." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "text", + "content": "Given the order- " + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "text", + "content": " tensor " + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "inline_equation", + "content": "\\mathcal{M} \\in \\mathbb{R}^{I_1 \\times I_2 \\times \\ldots \\times I_r}" + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "text", + "content": ", to form the joint mode token, we perform the mode-" + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "text", + "content": " matricization of " + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "text", + "content": " to obtain " + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\equiv \\mathcal{M}_{(m)}^{\\top} \\in \\mathbb{R}^{(I_1 \\ldots I_{m-1} I_{m+1} \\ldots I_r) \\times I_m}" + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "text", + "content": ", and the coupled-token for " + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 415, + 545, + 571 + ], + "type": "text", + "content": " is formed. For example, for a given 3rd-order tensor that has feature channel-, hyper-edge- and temporal block-mode, we can form 'channel-temporal block', 'channel-hyper-edge (any order)' and 'channel-only' pairs; and if the given tensor is used as input and outputs a new tensor which produces new mode, e.g., body joint-mode, we can form the 'order-channel-body joint' token. In the following sections, for simplicity, we use reshape for the matricization of tensor to form different types of coupled-mode tokens. Our CmSA is given as:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 358, + 579, + 545, + 607 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 579, + 545, + 607 + ], + "spans": [ + { + "bbox": [ + 358, + 579, + 545, + 607 + ], + "type": "interline_equation", + "content": "a (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) = \\operatorname {S o f t M a x} \\left(\\frac {\\mathbf {Q} \\mathbf {K} ^ {\\top}}{\\sqrt {d _ {K}}}\\right) \\mathbf {V}, \\tag {7}", + "image_path": "46b1355c7d7a3af20b8d8bc1f6cc12e07be63939a978cdd3db46943fa1da3e48.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\sqrt{d_K}" + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "text", + "content": " is the scaling factor, " + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{Q} = \\mathbf{W}^q\\mathbf{M}" + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{K} = \\mathbf{W}^k\\mathbf{M}" + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{V} = \\mathbf{W}^{\\nu}\\mathbf{M}" + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "text", + "content": " are the query, key and value, respectively, and " + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\equiv \\mathcal{M}_{(m)}^{\\top}" + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "text", + "content": ". Moreover, " + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}, \\mathbf{K}, \\mathbf{V} \\in \\mathbb{R}^{(I_1\\dots I_{m-1}I_{m+1}\\dots I_r)\\times I_m}" + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{W}^q, \\mathbf{W}^k, \\mathbf{W}^\\nu \\in \\mathbb{R}^{(I_1\\dots I_{m-1}I_{m+1}\\dots I_r)\\times (I_1\\dots I_{m-1}I_{m+1}\\dots I_r)}" + }, + { + "bbox": [ + 304, + 616, + 545, + 713 + ], + "type": "text", + "content": " are learnable weights. We notice that various coupled-mode tokens have different 'focus' of attention mechanisms, and we apply them in our 3Mformer for the fusion of multi-order feature representations." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5623" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 247, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 247, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 247, + 83 + ], + "type": "text", + "content": "4.3. Multi-order Multi-mode Transformer" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 91, + 287, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 91, + 287, + 152 + ], + "spans": [ + { + "bbox": [ + 46, + 91, + 287, + 152 + ], + "type": "text", + "content": "Below we introduce Multi-order Multi-mode Transformer (3Mformer) with Multi-order Pooling (MP) block and Temporal block Pooling (TP) block, which are cascaded into two branches (i) " + }, + { + "bbox": [ + 46, + 91, + 287, + 152 + ], + "type": "inline_equation", + "content": "\\mathrm{MP} \\rightarrow \\mathrm{TP}" + }, + { + "bbox": [ + 46, + 91, + 287, + 152 + ], + "type": "text", + "content": " and (ii) " + }, + { + "bbox": [ + 46, + 91, + 287, + 152 + ], + "type": "inline_equation", + "content": "\\mathrm{TP} \\rightarrow \\mathrm{MP}" + }, + { + "bbox": [ + 46, + 91, + 287, + 152 + ], + "type": "text", + "content": ", to achieve different types of coupled-mode tokens." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 170, + 226, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 226, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 226, + 182 + ], + "type": "text", + "content": "4.3.1 Multi-order Pooling (MP) Module" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "spans": [ + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "inline_equation", + "content": "\\mathbf{CmSA}" + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "text", + "content": " in MP. We reshape the multi-order feature representation " + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "inline_equation", + "content": "\\mathcal{M} \\in \\mathbb{R}^{d' \\times N \\times \\tau}" + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\mathbb{R}^{d' \\times N}" + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "text", + "content": " (or reshape the output from TP explained later into " + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "inline_equation", + "content": "\\mathbf{M}' \\in \\mathbb{R}^{d' \\times N}" + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "text", + "content": ") to let the model attend to different types of feature representations. Let us simply denote " + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "inline_equation", + "content": "d'' = d'\\tau" + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "text", + "content": " (or " + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "inline_equation", + "content": "d'' = d'" + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "text", + "content": ") depending on the source of input. We form a coupled-mode self-attention (if " + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "inline_equation", + "content": "d'' = d'\\tau" + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "text", + "content": ", we have, i.e., 'channel-temporal block' token; if " + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "inline_equation", + "content": "d'' = d'" + }, + { + "bbox": [ + 46, + 193, + 288, + 289 + ], + "type": "text", + "content": ", we have 'channel-only' token):" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 299, + 287, + 330 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 299, + 287, + 330 + ], + "spans": [ + { + "bbox": [ + 59, + 299, + 287, + 330 + ], + "type": "interline_equation", + "content": "a _ {\\mathrm {M P}} \\left(\\mathbf {Q} _ {\\mathrm {M P}}, \\mathbf {K} _ {\\mathrm {M P}}, \\mathbf {V} _ {\\mathrm {M P}}\\right) = \\operatorname {S o f t M a x} \\left(\\frac {\\mathbf {Q} _ {\\mathrm {M P}} \\mathbf {K} _ {\\mathrm {M P}} ^ {\\top}}{\\sqrt {d _ {K _ {\\mathrm {M P}}}}}\\right) \\mathbf {V} _ {\\mathrm {M P}}, \\tag {8}", + "image_path": "f932da5ebdca76dd3028e69110e78855f5a2852a41b3cbaaf99a545df16c93ad.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "spans": [ + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\sqrt{d_{K_{\\mathrm{MP}}}}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": " is the scaling factor, " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\mathrm{MP}} = \\mathbf{W}_{\\mathrm{MP}}^{q}\\mathbf{M}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{\\mathrm{MP}} = \\mathbf{W}_{\\mathrm{MP}}^{k}\\mathbf{M}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{\\mathrm{MP}} = \\mathbf{W}_{\\mathrm{MP}}^{\\nu}\\mathbf{M}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": " (we can use here " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{M}'" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": ") are the query, key and value. Moreover, " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\mathrm{MP}}, \\mathbf{K}_{\\mathrm{MP}}, \\mathbf{V}_{\\mathrm{MP}} \\in \\mathbb{R}^{d'' \\times N}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{\\mathrm{MP}}^{q}, \\mathbf{W}_{\\mathrm{MP}}^{k}, \\mathbf{W}_{\\mathrm{MP}}^{\\nu} \\in \\mathbb{R}^{d'' \\times d''}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": " are learnable weights. Eq. (8) is a self-attention layer which reweighs " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{\\mathrm{MP}}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": " based on the correlation between " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\mathrm{MP}}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{\\mathrm{MP}}" + }, + { + "bbox": [ + 46, + 340, + 287, + 425 + ], + "type": "text", + "content": " token embeddings of so-called coupled-mode tokens." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 429, + 287, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 429, + 287, + 501 + ], + "spans": [ + { + "bbox": [ + 46, + 429, + 287, + 501 + ], + "type": "text", + "content": "Weighted pooling. Attention layer in Eq. (8) produces feature representation " + }, + { + "bbox": [ + 46, + 429, + 287, + 501 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{MP}} \\in \\mathbb{R}^{d'' \\times N}" + }, + { + "bbox": [ + 46, + 429, + 287, + 501 + ], + "type": "text", + "content": " to enhance the relationship between example feature channels and body joints. Subsequently, we handle the impact of hyper-edges of multiple orders by weighted pooling along hyper-edges of order " + }, + { + "bbox": [ + 46, + 429, + 287, + 501 + ], + "type": "inline_equation", + "content": "m \\in I_r" + }, + { + "bbox": [ + 46, + 429, + 287, + 501 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 115, + 510, + 287, + 526 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 510, + 287, + 526 + ], + "spans": [ + { + "bbox": [ + 115, + 510, + 287, + 526 + ], + "type": "interline_equation", + "content": "\\mathbf {O} _ {\\mathrm {M P}} ^ {* (m)} = \\mathbf {O} _ {\\mathrm {M P}} ^ {(m)} \\mathbf {H} ^ {(m)} \\in \\mathbb {R} ^ {d ^ {\\prime \\prime} \\times J}, \\tag {9}", + "image_path": "dea88bcba083b75bd261805a0401d324d7d8b9415ceac278ce69f163753b2c97.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{MP}}^{(m)} \\in \\mathbb{R}^{d'' \\times N_{E_m}}" + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "content": " is simply extracted from " + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{MP}}" + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "content": " for hyper-edges of order " + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "content": ", and matrices " + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathbf{H}^{(m)} \\in \\mathbb{R}^{N_{E_m} \\times J}" + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "content": " are learnable weights to perform weighted pooling along hyperedges of order " + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "content": ". Finally, we obtain " + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{MP}}^* \\in \\mathbb{R}^{rd'' \\times J}" + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "content": " by simply concatenating " + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{MP}}^{*(1)}, \\ldots, \\mathbf{O}_{\\mathrm{MP}}^{*(r)}" + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "content": ". If we used the input to MP from TP, then we denote the output of MP as " + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{MP}}^*" + }, + { + "bbox": [ + 46, + 536, + 287, + 613 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 630, + 239, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 630, + 239, + 643 + ], + "spans": [ + { + "bbox": [ + 47, + 630, + 239, + 643 + ], + "type": "text", + "content": "4.3.2 Temporal block Pooling (TP) Module" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{CmSA}" + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "text", + "content": " in TP. Firstly, we reshape the multi-order feature representation " + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathcal{M} \\in \\mathbb{R}^{d' \\times N \\times \\tau}" + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\mathbb{R}^{d' N \\times \\tau}" + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "text", + "content": " (or reshape the output from MP into " + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{M}'' \\in \\mathbb{R}^{rd' J \\times \\tau}" + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "text", + "content": "). For simplicity, we denote " + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "inline_equation", + "content": "d''' = d'N" + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "text", + "content": " in the first case and " + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "inline_equation", + "content": "d''' = rd'J" + }, + { + "bbox": [ + 46, + 653, + 287, + 713 + ], + "type": "text", + "content": " in the second case. As the first mode of reshaped input serves to form" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": "tokens, they are again coupled-mode tokens, e.g., 'channel-hyper-edge' and 'order-channel-body joint' tokens, respectively. Moreover, TP also performs pooling along block-temporal mode (along " + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 72, + 545, + 131 + ], + "type": "text", + "content": "). We form an coupled-mode self-attention:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 323, + 140, + 545, + 171 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 140, + 545, + 171 + ], + "spans": [ + { + "bbox": [ + 323, + 140, + 545, + 171 + ], + "type": "interline_equation", + "content": "a _ {\\mathrm {T P}} \\left(\\mathbf {Q} _ {\\mathrm {T P}}, \\mathbf {K} _ {\\mathrm {T P}}, \\mathbf {V} _ {\\mathrm {T P}}\\right) = \\operatorname {S o f t M a x} \\left(\\frac {\\mathbf {Q} _ {\\mathrm {T P}} \\mathbf {K} _ {\\mathrm {T P}} ^ {\\top}}{\\sqrt {d _ {K _ {\\mathrm {T P}}}}}\\right) \\mathbf {V} _ {\\mathrm {T P}}, \\tag {10}", + "image_path": "a8da34eb0a09af3f6810f0549151d261a627c937f3fda9990aaf2200f350294b.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "spans": [ + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\sqrt{d_{K_{\\mathrm{TP}}}}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " is the scaling factor, " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\mathrm{TP}} = \\mathbf{W}_{\\mathrm{TP}}^{q}\\mathbf{M}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{\\mathrm{TP}} = \\mathbf{W}_{\\mathrm{TP}}^{k}\\mathbf{M}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{\\mathrm{TP}} = \\mathbf{W}_{\\mathrm{TP}}^{\\nu}\\mathbf{M}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " (we can use here " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{M}''" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": ") are the query, key and value. Moreover, " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\mathrm{TP}}, \\mathbf{K}_{\\mathrm{TP}}, \\mathbf{V}_{\\mathrm{TP}} \\in \\mathbb{R}^{d''' \\times \\tau}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{\\mathrm{TP}}^{q}, \\mathbf{W}_{\\mathrm{TP}}^{k}, \\mathbf{W}_{\\mathrm{TP}}^{\\nu} \\in \\mathbb{R}^{d''' \\times d'''}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " are learnable weights. Eq. (10) reweights " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{\\mathrm{TP}}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " based on the correlation between " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\mathrm{TP}}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{K}_{\\mathrm{TP}}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " token embeddings of coupled-mode tokens ('channel-hyper-edge' or 'order-channel-body joint'). The output of attention is the temporal representation " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{TP}} \\in \\mathbb{R}^{d''' \\times \\tau}" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": ". If we used " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{M}''" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": " as input, we denote the output as " + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{TP}}''" + }, + { + "bbox": [ + 304, + 182, + 545, + 290 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "text", + "content": "Pooling step. Given the temporal representation " + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{TP}} \\in \\mathbb{R}^{d'' \\times r}" + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "text", + "content": " (or " + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{TP}}''" + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "text", + "content": "), we apply pooling along the block-temporal mode to obtain compact feature representations independent of length (block count " + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "text", + "content": ") of skeleton sequence. There exist many pooling operations including first-order, e.g., average, maximum, sum pooling, second-order [60, 80] such as attentional pooling [14], higher-order (tri-linear) [8, 25] and rank pooling [13]. The output after pooling is " + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{TP}}^* \\in \\mathbb{R}^{d''}" + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "text", + "content": " (or " + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "inline_equation", + "content": "\\mathbf{O}_{\\mathrm{TP}}''" + }, + { + "bbox": [ + 304, + 293, + 545, + 403 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 419, + 405, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 419, + 405, + 430 + ], + "spans": [ + { + "bbox": [ + 306, + 419, + 405, + 430 + ], + "type": "text", + "content": "4.3.3 Model Variants" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 439, + 545, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 439, + 545, + 475 + ], + "spans": [ + { + "bbox": [ + 304, + 439, + 545, + 475 + ], + "type": "text", + "content": "We devise four model variants by different stacking of MP with TP, with the goal of exploiting attention with different kinds of coupled-mode tokens:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 310, + 485, + 544, + 613 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 315, + 485, + 544, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 485, + 544, + 510 + ], + "spans": [ + { + "bbox": [ + 315, + 485, + 544, + 510 + ], + "type": "text", + "content": "i. Single-branch: MP followed by TP, denoted " + }, + { + "bbox": [ + 315, + 485, + 544, + 510 + ], + "type": "inline_equation", + "content": "\\mathrm{MP}\\rightarrow \\mathrm{TP}" + }, + { + "bbox": [ + 315, + 485, + 544, + 510 + ], + "type": "text", + "content": " (Fig. 1 top right branch)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 519, + 544, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 519, + 544, + 544 + ], + "spans": [ + { + "bbox": [ + 312, + 519, + 544, + 544 + ], + "type": "text", + "content": "ii. Single-branch: TP followed by MP, denoted " + }, + { + "bbox": [ + 312, + 519, + 544, + 544 + ], + "type": "inline_equation", + "content": "\\mathrm{TP} \\rightarrow \\mathrm{MP}" + }, + { + "bbox": [ + 312, + 519, + 544, + 544 + ], + "type": "text", + "content": ", (Fig. 1 bottom right branch)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 554, + 544, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 554, + 544, + 578 + ], + "spans": [ + { + "bbox": [ + 310, + 554, + 544, + 578 + ], + "type": "text", + "content": "iii. Two-branch (our 3Mformer, Fig. 1) which concatenates outputs of " + }, + { + "bbox": [ + 310, + 554, + 544, + 578 + ], + "type": "inline_equation", + "content": "\\mathrm{MP} \\rightarrow \\mathrm{TP}" + }, + { + "bbox": [ + 310, + 554, + 544, + 578 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 554, + 544, + 578 + ], + "type": "inline_equation", + "content": "\\mathrm{TP} \\rightarrow \\mathrm{MP}" + }, + { + "bbox": [ + 310, + 554, + 544, + 578 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 311, + 589, + 544, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 589, + 544, + 613 + ], + "spans": [ + { + "bbox": [ + 311, + 589, + 544, + 613 + ], + "type": "text", + "content": "iv. We also investigate only MP or TP module followed by average pooling or an FC layer." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "spans": [ + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "text", + "content": "The outputs from " + }, + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "inline_equation", + "content": "\\mathrm{MP} \\rightarrow \\mathrm{TP}" + }, + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "inline_equation", + "content": "\\mathrm{TP} \\rightarrow \\mathrm{MP}" + }, + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "text", + "content": " have exactly the same feature dimension (" + }, + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{rd'J}" + }, + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "text", + "content": ", after reshaping into vector). For two-branch (our 3Mformer), we simply concatenate these outputs (" + }, + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{2rd'J}" + }, + { + "bbox": [ + 304, + 623, + 545, + 682 + ], + "type": "text", + "content": ", after concatenation). These vectors are forwarded to the FC layer to learn a classifier." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 693, + 545, + 713 + ], + "type": "text", + "content": "4We do not propose pooling operators but we select popular ones with the purpose of comparing their impact on TP." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5624" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 128, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 128, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 128, + 85 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 176, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 176, + 103 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 176, + 103 + ], + "type": "text", + "content": "5.1. Datasets and Protocols" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 110, + 286, + 459 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 46, + 110, + 286, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 110, + 286, + 181 + ], + "spans": [ + { + "bbox": [ + 46, + 110, + 286, + 181 + ], + "type": "text", + "content": "(i) NTU RGB+D (NTU-60) [43] contains 56,880 video sequences. This dataset has variable sequence lengths and high intra-class variations. Each skeleton sequence has 25 joints and there are no more than two human subjects in each video. Two evaluation protocols are: (i) cross-subject (X-Sub) and (ii) cross-view (X-View)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 182, + 286, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 182, + 286, + 253 + ], + "spans": [ + { + "bbox": [ + 46, + 182, + 286, + 253 + ], + "type": "text", + "content": "(ii) NTU RGB+D 120 (NTU-120) [34], an extension of NTU-60, contains 120 action classes (daily/health-related), and 114,480 RGB+D video samples captured with 106 distinct human subjects from 155 different camera viewpoints. There are also two evaluation protocols: (i) cross-subject (X-Sub) and (ii) cross-setup (X-Set)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 254, + 286, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 254, + 286, + 459 + ], + "spans": [ + { + "bbox": [ + 46, + 254, + 286, + 459 + ], + "type": "text", + "content": "(iii) Kinetics-Skeleton, based on Kinetics [20], is large-scale dataset with 300,000 video clips and up to 400 human actions collected from YouTube. This dataset involves human daily activities, sports scenes and complex human-computer interaction scenes. Since Kinetics only provides raw videos without the skeletons, ST-GCN [64] uses the publicly available OpenPose toolbox [2] to estimate and extract the location of 18 human body joints on every frame in the clips. We use their released skeleton data to evaluate our model. Following the standard evaluation protocol, we report the Top-1 and Top-5 accuracies on the validation set. (iv) Northwestern-UCLA [51] was captured by 3 Kinect cameras simultaneously from multiple viewpoints. It contains 1494 video clips covering 10 actions. Each action is performed by 10 different subjects. We follow the same evaluation protocol as [51]: training split is formed from the first two cameras, and testing split from the last camera." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 467, + 163, + 480 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 467, + 163, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 467, + 163, + 480 + ], + "type": "text", + "content": "5.2. Experimental Setup" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "text", + "content": "We use PyTorch and " + }, + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "inline_equation", + "content": "1\\times" + }, + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "text", + "content": " Titan RTX 3090 for experiments. We use the Stochastic Gradient Descent (SGD) with momentum 0.9, cross-entropy as the loss, weight decay of 0.0001 and batch size of 32. The learning rate is set to 0.1 initially. On NTU-60 and NTU-120, the learning rate is divided by 10 at the 40th and 50th epoch, and the training process ends at the 60th epoch. On Kinetics-Skeleton, the learning rate is divided by 10 at the 50th and 60th epoch, and the training finishes at the 80th epoch. We took " + }, + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "text", + "content": " of training set for validation to tune hyperparameters. All models have fixed hyperparameters with 2 and 4 layers for NTU-60/NTU-120 and Kinetics-Skeleton, respectively. The hidden dimensions is set to 16 for all 3 datasets. We use 4 attention heads for NTU-60 and NTU-120, and 8 attention heads for Kinetics-Skeleton. To form each video temporal block, we simply choose temporal block size to be 10 and stride to be 5 to allow a " + }, + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 46, + 486, + 286, + 713 + ], + "type": "text", + "content": " overlap between consecutive temporal blocks. For Northwestern-UCLA, the batch size is 16. We adopted the data pre-processing in [6]." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 317, + 97, + 533, + 182 + ], + "blocks": [ + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "lines": [ + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "type": "text", + "content": "Table 1. Search for the single best order " + }, + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "type": "text", + "content": " of hypergraph (except for " + }, + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "type": "inline_equation", + "content": "n = 3 \\& 4" + }, + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "type": "text", + "content": " where we check if " + }, + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "type": "inline_equation", + "content": "n = 3 \\& 4" + }, + { + "bbox": [ + 306, + 71, + 545, + 93 + ], + "type": "text", + "content": " are complementary)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 97, + 533, + 182 + ], + "lines": [ + { + "bbox": [ + 317, + 97, + 533, + 182 + ], + "spans": [ + { + "bbox": [ + 317, + 97, + 533, + 182 + ], + "type": "table", + "html": "
Order-nNTU-60NTU-120Kinetics-Skel.
X-SubX-ViewX-SubX-SetTop-1 acc.
n = 178.586.375.377.932.0
n = 283.089.286.288.337.1
n = 391.397.087.589.739.5
n = 491.597.187.890.040.1
n = 591.497.387.890.040.3
n = 3 & 491.697.287.690.340.5
", + "image_path": "30b5d18fb5e789cf5aab50af498c687be20410e2f8abb43c67796441546111d7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 307, + 243, + 545, + 324 + ], + "blocks": [ + { + "bbox": [ + 305, + 203, + 545, + 237 + ], + "lines": [ + { + "bbox": [ + 305, + 203, + 545, + 237 + ], + "spans": [ + { + "bbox": [ + 305, + 203, + 545, + 237 + ], + "type": "text", + "content": "Table 2. Evaluations of our model variants with/without MP and/or TP. Baseline in the table denotes the backbone (MLP unit + HoTs) without the use of either MP or TP module." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 243, + 545, + 324 + ], + "lines": [ + { + "bbox": [ + 307, + 243, + 545, + 324 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 545, + 324 + ], + "type": "table", + "html": "
VariantsNTU-60NTU-120Kinetics-Skel.
X-SubX-ViewX-SubX-SetTop-1 acc.
Baseline89.891.486.587.038.6
+ TP only91.293.887.588.639.8
+ MP only92.094.388.789.740.3
+ MP→TP93.096.190.891.745.7
+ TP→MP92.695.890.291.144.0
+ 2-branch(3Mformer)94.898.792.093.848.3
", + "image_path": "aa13d7f6d9ce50cc957bbdaf9763b3ca72cd0074c3a6ba2cada4e6aa4fc58b4e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 308, + 346, + 360, + 400 + ], + "blocks": [ + { + "bbox": [ + 308, + 346, + 360, + 400 + ], + "lines": [ + { + "bbox": [ + 308, + 346, + 360, + 400 + ], + "spans": [ + { + "bbox": [ + 308, + 346, + 360, + 400 + ], + "type": "image", + "image_path": "f2a642a29b0b5a90add2848cc3d6a8e473c9a3478a97cacea725338b04209982.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 329, + 403, + 339, + 412 + ], + "lines": [ + { + "bbox": [ + 329, + 403, + 339, + 412 + ], + "spans": [ + { + "bbox": [ + 329, + 403, + 339, + 412 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 362, + 346, + 414, + 400 + ], + "blocks": [ + { + "bbox": [ + 362, + 346, + 414, + 400 + ], + "lines": [ + { + "bbox": [ + 362, + 346, + 414, + 400 + ], + "spans": [ + { + "bbox": [ + 362, + 346, + 414, + 400 + ], + "type": "image", + "image_path": "e71bc19dcd48ddc53580575a60c07d8346dff3cc584340aa6a1d7bde738afbe3.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 383, + 403, + 394, + 412 + ], + "lines": [ + { + "bbox": [ + 383, + 403, + 394, + 412 + ], + "spans": [ + { + "bbox": [ + 383, + 403, + 394, + 412 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 417, + 346, + 470, + 400 + ], + "blocks": [ + { + "bbox": [ + 417, + 346, + 470, + 400 + ], + "lines": [ + { + "bbox": [ + 417, + 346, + 470, + 400 + ], + "spans": [ + { + "bbox": [ + 417, + 346, + 470, + 400 + ], + "type": "image", + "image_path": "fa582b8c85b427cd431a2c30983a66c679cdbbde815668395b5ea8ff0439d87d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 438, + 403, + 449, + 412 + ], + "lines": [ + { + "bbox": [ + 438, + 403, + 449, + 412 + ], + "spans": [ + { + "bbox": [ + 438, + 403, + 449, + 412 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 305, + 422, + 545, + 467 + ], + "lines": [ + { + "bbox": [ + 305, + 422, + 545, + 467 + ], + "spans": [ + { + "bbox": [ + 305, + 422, + 545, + 467 + ], + "type": "text", + "content": "Figure 2. Visualization of attention matrices. (a) single-mode attention matrix of 'channel-only' token, (b)-(d) coupled-mode attention matrices of 'channel-hyper-edge', 'order-channel-body joint' and 'channel-temporal block' tokens, respectively." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 473, + 346, + 527, + 400 + ], + "blocks": [ + { + "bbox": [ + 473, + 346, + 527, + 400 + ], + "lines": [ + { + "bbox": [ + 473, + 346, + 527, + 400 + ], + "spans": [ + { + "bbox": [ + 473, + 346, + 527, + 400 + ], + "type": "image", + "image_path": "8afabcce92664b7f25393b42d25ba665997d90ef09c75ae925c5daa0a6813233.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 503, + 403, + 514, + 412 + ], + "lines": [ + { + "bbox": [ + 503, + 403, + 514, + 412 + ], + "spans": [ + { + "bbox": [ + 503, + 403, + 514, + 412 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 487, + 399, + 501 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 487, + 399, + 501 + ], + "spans": [ + { + "bbox": [ + 306, + 487, + 399, + 501 + ], + "type": "text", + "content": "5.3. Ablation Study" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": "Search for the single best order " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": ". Table 1 shows our analysis regarding the best order " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": ". In general, increasing the order " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": " improves the performance (within " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "\\sim 0.5\\%" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": " on average), but causing higher computational cost, e.g., the number of hyper-edges for the skeletal hypergraph of order " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "n = 4" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": " is 3060 on Kinetics-Skeleton. We also notice that combining orders 3 and 4 yields very limited improvements. The main reasons are: (i) reasonable order " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": ", e.g., " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "n = 3" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": " or 4 improves accuracy as higher-order motion patterns are captured which are useful for classification-related tasks (ii) further increasing order " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": ", e.g., " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "n = 5" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": " introduces patterns in feature representations that rarely repeat even for the same action class. Considering the cost and performance, we choose the maximum order " + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "r = 3" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "inline_equation", + "content": "n = 1,2,3" + }, + { + "bbox": [ + 304, + 506, + 545, + 686 + ], + "type": "text", + "content": ") in the following experiments unless specified otherwise." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 689, + 545, + 713 + ], + "type": "text", + "content": "Discussion on coupled-mode attention. Fig. 2 shows the visualization of some attention matrices in our 3Mformer," + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5625" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 87, + 547, + 385 + ], + "blocks": [ + { + "bbox": [ + 159, + 71, + 434, + 82 + ], + "lines": [ + { + "bbox": [ + 159, + 71, + 434, + 82 + ], + "spans": [ + { + "bbox": [ + 159, + 71, + 434, + 82 + ], + "type": "text", + "content": "Table 3. Experimental results on NTU-60, NTU-120 and Kinetics-Skeleton." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 87, + 547, + 385 + ], + "lines": [ + { + "bbox": [ + 47, + 87, + 547, + 385 + ], + "spans": [ + { + "bbox": [ + 47, + 87, + 547, + 385 + ], + "type": "table", + "html": "
MethodVenueNTU-60NTU-120Kinetics-Skeleton
X-SubX-ViewX-SubX-SetTop-1Top-5
Graph-basedTCN [22]CVPRW'17----20.340.0
ST-GCN [64]AAAI'1881.588.370.773.230.752.8
AS-GCN [30]CVPR'1986.894.278.379.834.856.5
2S-AGCN [44]CVPR'1988.595.182.584.236.158.7
NAS-GCN [37]AAAI'2089.495.7--37.160.1
Sym-GNN [31]TPAMI'2290.196.4--37.258.1
Shift-GCN [6]CVPR'2090.796.585.987.6--
MS-G3D [36]CVPR'2091.596.286.988.438.060.9
CTR-GCN [4]ICCV'2192.496.888.990.6--
InfoGCN [9]CVPR'2293.097.189.891.2--
PoseConv3D [12]CVPR'2294.197.186.990.347.7-
Hypergraph-basedHyper-GNN [15]TIP'2189.595.7--37.160.0
DHGCN [62]CoRR'2190.796.086.087.937.760.6
Selective-HCN [79]ICMR'2290.896.6--38.061.1
SD-HGCN [17]ICONIP'2190.996.787.088.237.460.5
Transformer-basedST-TR [39]CVIU'2190.396.385.187.138.060.5
MTT [23]LSP'2190.896.786.187.637.961.3
4s-GSTN [18]Symmetry'2291.396.686.488.7--
STST [73]ACM MM'2191.996.8--38.361.2
3Mformer (with avg-pool, ours)92.097.388.090.143.165.2
3Mformer (with max-pool, ours)92.197.8----
3Mformer (with attn-pool, ours)94.298.589.792.445.767.6
3Mformer (with tri-pool, ours)94.098.591.292.747.771.9
3Mformer (with rank-pool, ours)94.898.792.093.848.372.3
", + "image_path": "90e1416c9626cf72abc9441ae9af223aaee48ee37c15ee8e0b7a5fff22357fe6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 47, + 413, + 288, + 495 + ], + "blocks": [ + { + "bbox": [ + 47, + 413, + 288, + 495 + ], + "lines": [ + { + "bbox": [ + 47, + 413, + 288, + 495 + ], + "spans": [ + { + "bbox": [ + 47, + 413, + 288, + 495 + ], + "type": "image", + "image_path": "c53534b9138a06abacb121d77afe8a0a88fe2d064fb209ea931912cb3b2f85c4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 502, + 288, + 548 + ], + "lines": [ + { + "bbox": [ + 46, + 502, + 288, + 548 + ], + "spans": [ + { + "bbox": [ + 46, + 502, + 288, + 548 + ], + "type": "text", + "content": "Figure 3. Evaluations of different single-mode (baseline) and coupled-mode tokens. We use a 3rd-order HoT with a standard Transformer, but we replace the scaled dot-product attention with coupled-mode tokens and coupled-mode attention." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 575, + 287, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 575, + 287, + 684 + ], + "spans": [ + { + "bbox": [ + 46, + 575, + 287, + 684 + ], + "type": "text", + "content": "which show diagonal and/or vertical patterns that are consistent with the patterns of the attention matrices found in standard Transformer trained on sequences, e.g., for natural language processing tasks [28, 49]. We also notice that the coupled-mode attention, e.g., 'channel-temporal block' captures much richer information compared to single mode attention, e.g., 'channel-only'. Our coupled-mode attention can be applied to different orders of tensor representations through simple matricization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 689, + 288, + 715 + ], + "type": "text", + "content": "Discussion on model variants. To show the effectiveness of the proposed MP and TP module, firstly, we compare" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "spans": [ + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": "TP only and MP only with the baseline (No MP or TP module). We use the TP module followed by an FC layer instead of MP as in " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{TP} \\rightarrow \\mathrm{MP}" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": ", where the FC layer takes the output from TP " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "(\\mathbb{R}^{d'N})" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " and produces a vector in " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{3d'J}" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " passed to the classifier. Similarly, for MP only, we use the MP module followed by an average pooling layer instead of TP as in " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{MP} \\rightarrow \\mathrm{TP}" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": ", where the average layer takes output from MP " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "(\\mathbb{R}^{3d'J \\times \\tau})" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " and generates a vector in " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{3d'J}" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " (pool along " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " blocks), passed to the classifier. Table 2 shows the results. With just the TP module, we outperform the baseline by " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "1.3\\%" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " on average. With only the MP module, we outperform the baseline by " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "2.34\\%" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " on average. These comparisons show that (i) CmSA in MP and TP are efficient for better performance (ii) MP performs better than TP which shows that 'channel-temporal block' token contains richer information than 'channel-hyper-edge' token. We also notice that " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{MP} \\rightarrow \\mathrm{TP}" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " slightly outperforms " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{TP} \\rightarrow \\mathrm{MP}" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\sim 1\\%" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": ", and the main reason is that " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{MP} \\rightarrow \\mathrm{TP}" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " has coupled-mode tokens 'channel-temporal block' and 'order-channel-joint' which attend 4 modes, whereas " + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{TP} \\rightarrow \\mathrm{MP}" + }, + { + "bbox": [ + 304, + 415, + 547, + 704 + ], + "type": "text", + "content": " has 'channel-hyper-edge' and 'channel-only' tokens which attend only 2 modes. Fig. 3 shows a comparison of different coupled-mode tokens on 3 benchmark datasets. This also suggests that one should firstly perform attenuate" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5626" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "text", + "content": "tion with coupled-mode 'channel-block' tokens, followed by weighted pooling along the hyper-edge mode, followed by attention with coupled-mode 'order-channel-body joint' and finalised by block-temporal pooling. Finally, with 2- branch (3Mformer), we further boost the performance by " + }, + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "inline_equation", + "content": "2 - 4\\%" + }, + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "text", + "content": " , which shows that " + }, + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "inline_equation", + "content": "\\mathrm{MP}\\rightarrow \\mathrm{TP}" + }, + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "inline_equation", + "content": "\\mathrm{TP}\\rightarrow \\mathrm{MP}" + }, + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "text", + "content": " are complementary branches. Below we use 2-branch (3Mformer) in the experiments (as in Fig. 1)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 171, + 288, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 171, + 288, + 327 + ], + "spans": [ + { + "bbox": [ + 46, + 171, + 288, + 327 + ], + "type": "text", + "content": "Comparison of pooling in TP. As shown in Table 3, average pooling (avg-pool) achieves similar performance (within " + }, + { + "bbox": [ + 46, + 171, + 288, + 327 + ], + "type": "inline_equation", + "content": "\\sim 0.5\\%" + }, + { + "bbox": [ + 46, + 171, + 288, + 327 + ], + "type": "text", + "content": " difference) as maximum pooling (max-pool), second-order pooling (attn-pool) outperforms average and maximum pooling by " + }, + { + "bbox": [ + 46, + 171, + 288, + 327 + ], + "type": "inline_equation", + "content": "\\sim 1 - 2\\%" + }, + { + "bbox": [ + 46, + 171, + 288, + 327 + ], + "type": "text", + "content": " and third-order pooling (tri-pool) outperforms second-order pooling by " + }, + { + "bbox": [ + 46, + 171, + 288, + 327 + ], + "type": "inline_equation", + "content": "\\sim 1\\%" + }, + { + "bbox": [ + 46, + 171, + 288, + 327 + ], + "type": "text", + "content": ". Interestingly, rank pooling (rank-pool) achieves the best performance. We think it is reasonable as rank pooling strives to enforce the temporal order in the feature space to be preserved, e.g., it forces network to always preserve temporal progression of actions over time. With multiple attention modules, orderless statistics such as second- or third-order pooling may be too general." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 335, + 252, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 335, + 252, + 348 + ], + "spans": [ + { + "bbox": [ + 47, + 335, + 252, + 348 + ], + "type": "text", + "content": "5.4. Comparisons with the State of the Arts" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "text", + "content": "We compare our model with recent state-of-the-art methods. On the NTU-60 (Tab. 3), we obtain the top-1 accuracies of the two evaluation protocols during test stage. The methods in comparisons include popular graph-based [30, 31, 37, 44, 64] and hypergraph-based models [15, 17, 62, 79]. Our 3rd-order model outperforms all graph-based methods, and also outperforms existing hypergraph-based models such as Selective-HCN and SD-HGCN by " + }, + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "inline_equation", + "content": "0.45\\%" + }, + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "inline_equation", + "content": "0.35\\%" + }, + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "text", + "content": " on average on X-Sub and X-View respectively. With 3Mformer for the fusion of multi-order features, our model further boosts the performance by " + }, + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "inline_equation", + "content": "\\sim 3\\%" + }, + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "inline_equation", + "content": "1.5\\%" + }, + { + "bbox": [ + 46, + 354, + 287, + 498 + ], + "type": "text", + "content": " on the two protocols." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 498, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 287, + 665 + ], + "type": "text", + "content": "It can be seen from Tab. 3 on NTU-60 that although some learned graph-based methods such as AS-GCN and 2S-AGCN can also capture the dependencies between human body joints, they only consider the pairwise relationship between body joints, which is the second-order interaction, and ignore the higher-order interaction between multiple body joints in form of hyper-edges, which may lose sensitivity to important groups of body joints. Our proposed 3Mformer achieves better performance by constructing a hypergraph from 2D/3D body joints as nodes for action recognition, thus capturing higher-order interactions of body joints to further improve the performance. Note that even with the average pooling, our model still achieves competitive results compared to its counterparts." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "text", + "content": "For the NTU-120 dataset (Tab. 3), we obtain the top-1 performance on X-Sub and X-Set protocols. Our 2nd-order HoT alone outperforms graph-based models by " + }, + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "inline_equation", + "content": "2 - 2.4\\%" + }, + { + "bbox": [ + 46, + 665, + 287, + 714 + ], + "type": "text", + "content": " on average. For example, we outperform recent Shift-GCN by" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 307, + 86, + 545, + 117 + ], + "blocks": [ + { + "bbox": [ + 325, + 71, + 525, + 82 + ], + "lines": [ + { + "bbox": [ + 325, + 71, + 525, + 82 + ], + "spans": [ + { + "bbox": [ + 325, + 71, + 525, + 82 + ], + "type": "text", + "content": "Table 4. Experimental results on Northwestern-UCLA." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 307, + 86, + 545, + 117 + ], + "lines": [ + { + "bbox": [ + 307, + 86, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 86, + 545, + 117 + ], + "type": "table", + "html": "
Shift-GCN [6] (CVPR'20)CTR-GCN [4] (ICCV'21)InfoGCN [9] (CVPR'22)2nd-order only (ours)3rd-order only (ours)3Mformer (ours)
acc.(%)94.696.597.096.597.297.8
", + "image_path": "3ade81c222e702d89f2d649bb6af9c834bdcd74ab8ce96d393b5fc3e01ac8adb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 152, + 545, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 152, + 545, + 262 + ], + "spans": [ + { + "bbox": [ + 304, + 152, + 545, + 262 + ], + "type": "text", + "content": "0.3% and 0.7% on X-Sub and X-Set respectively. Moreover, our 3rd-order HoT alone outperforms SD-HGCN by 0.5% and 1.5% respectively on X-Sub and X-Set. With the 3Mformer for the fusion of multi-order feature maps, we obtain the new state-of-the-art results. Notice that our 3Mformer yields " + }, + { + "bbox": [ + 304, + 152, + 545, + 262 + ], + "type": "inline_equation", + "content": "92.0\\% / 93.8\\%" + }, + { + "bbox": [ + 304, + 152, + 545, + 262 + ], + "type": "text", + "content": " on NTU-120 while [38] yields " + }, + { + "bbox": [ + 304, + 152, + 545, + 262 + ], + "type": "inline_equation", + "content": "80.5\\% / 81.7\\%" + }, + { + "bbox": [ + 304, + 152, + 545, + 262 + ], + "type": "text", + "content": " as we explore the fusion of multiple orders of hyperedges and several coupled-token types capturing easy-to-complex dynamics of varying joint groups." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "spans": [ + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "text", + "content": "As videos from the Kinetics dataset are processed by the OpenPose, the skeletons in the Kinetics-Skeleton dataset have defects which adversely affect the performance of the model. We show both top-1 and top-5 performance in Table 3 to better reflect the performance of our 3Mformer. STGCN is the first method based on GCN, our 2nd-order HoT alone achieves very competitive results compared to the very recent NAS-GCN and Sym-GNN. The 3rd-order HoT alone outperforms Hyper-GNN, SD-HGCN and SelectiveHCN by " + }, + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "inline_equation", + "content": "3.4\\%" + }, + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "inline_equation", + "content": "3.1\\%" + }, + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "inline_equation", + "content": "2.9\\%" + }, + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "text", + "content": " respectively for top-1 accuracies. Moreover, fusing multi-order feature maps from multiple orders of hyper-edges via 3Mformer gives us the best performance on Kinetics-Skeleton with " + }, + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "inline_equation", + "content": "48.3\\%" + }, + { + "bbox": [ + 304, + 264, + 546, + 431 + ], + "type": "text", + "content": " for top-1, the new state-of-the-art result." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 434, + 547, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 434, + 547, + 471 + ], + "spans": [ + { + "bbox": [ + 304, + 434, + 547, + 471 + ], + "type": "text", + "content": "Table 4 shows results on the Northwestern-UCLA dataset. Our 3Mformer is also effective on this dataset-it outperforms the current state-of-the-art InfoGCN by " + }, + { + "bbox": [ + 304, + 434, + 547, + 471 + ], + "type": "inline_equation", + "content": "0.8\\%" + }, + { + "bbox": [ + 304, + 434, + 547, + 471 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 491, + 384, + 503 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 491, + 384, + 503 + ], + "spans": [ + { + "bbox": [ + 306, + 491, + 384, + 503 + ], + "type": "text", + "content": "6. Conclusions" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 514, + 545, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 514, + 545, + 634 + ], + "spans": [ + { + "bbox": [ + 304, + 514, + 545, + 634 + ], + "type": "text", + "content": "In this paper, we model the skeleton data as hypergraph to capture higher-order information formed between groups of human body joints of orders 1, ..., " + }, + { + "bbox": [ + 304, + 514, + 545, + 634 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 514, + 545, + 634 + ], + "type": "text", + "content": ". We use Higher-order Transformer (HoT) to learn higher-order information on hypergraphs of " + }, + { + "bbox": [ + 304, + 514, + 545, + 634 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 514, + 545, + 634 + ], + "type": "text", + "content": "-order formed over 2D/3D human body joints. We also introduce a novel Multi-order Multi-mode Transformer (3Mformer) for the fusion of multi-order feature representations. Our end-to-end trainable 3Mformer outperforms state-of-the-art graph- and hypergraph-based models by a large margin on several benchmarks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 715 + ], + "type": "text", + "content": "Acknowledgements. LW is supported by the Data61/ CSIRO PhD Scholarship. PK is in part funded by CSIRO's Machine Learning and Artificial Intelligence Future Science Platform (MLAI FSP) Spatiotemporal Activity." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5627" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 107, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 92, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 92, + 287, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 92, + 287, + 158 + ], + "spans": [ + { + "bbox": [ + 53, + 92, + 287, + 158 + ], + "type": "text", + "content": "[1] Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. VATT: Transformers for multimodal self-supervised learning from raw video, audio and text. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 159, + 288, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 288, + 204 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 288, + 204 + ], + "type": "text", + "content": "[2] Zhe Cao, Tomas Simon, Shih-En Wei, and Yaser Sheikh. Realtime multi-person 2d pose estimation using part affinity fields. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), July 2017. 1, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 205, + 287, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 287, + 237 + ], + "type": "text", + "content": "[3] Chun-Fu Chen, Quanfu Fan, and Rameswar Panda. Crossvit: Cross-attention multi-scale vision transformer for image classification. CoRR, abs/2103.14899, 2021. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 239, + 287, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 287, + 295 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 287, + 295 + ], + "type": "text", + "content": "[4] Yuxin Chen, Ziqi Zhang, Chunfeng Yuan, Bing Li, Ying Deng, and Weiming Hu. Channel-wise topology refinement graph convolution for skeleton-based action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13359-13368, 2021. 2, 7, 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 297, + 286, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 297, + 286, + 362 + ], + "spans": [ + { + "bbox": [ + 53, + 297, + 286, + 362 + ], + "type": "text", + "content": "[5] Ke Cheng, Yifan Zhang, Congqi Cao, Lei Shi, Jian Cheng, and Hanqing Lu. Decoupling gcn with dropgraph module for skeleton-based action recognition. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision - ECCV 2020, pages 536-553, Cham, 2020. Springer International Publishing. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 364, + 286, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 364, + 286, + 418 + ], + "spans": [ + { + "bbox": [ + 53, + 364, + 286, + 418 + ], + "type": "text", + "content": "[6] Ke Cheng, Yifan Zhang, Xiangyu He, Weihan Chen, Jian Cheng, and Hanqing Lu. Skeleton-based action recognition with shift graph convolutional network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 6, 7, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 421, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 421, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 53, + 421, + 287, + 487 + ], + "type": "text", + "content": "[7] Yi-Bin Cheng, Xipeng Chen, Dongyu Zhang, and Liang Lin. Motion-transformer: Self-supervised pre-training for skeleton-based action recognition. In Proceedings of the 2nd ACM International Conference on Multimedia in Asia, MMAsia '20, New York, NY, USA, 2021. Association for Computing Machinery. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 488, + 286, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 488, + 286, + 542 + ], + "spans": [ + { + "bbox": [ + 53, + 488, + 286, + 542 + ], + "type": "text", + "content": "[8] Anoop Cherian, Piotr Koniusz, and Stephen Gould. Higher-order pooling of cnn features via kernel linearization for action recognition. In 2017 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 130-138, 2017. 2, 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 544, + 286, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 544, + 286, + 610 + ], + "spans": [ + { + "bbox": [ + 53, + 544, + 286, + 610 + ], + "type": "text", + "content": "[9] Hyung-gun Chi, Myoung Hoon Ha, Seunggeun Chi, Sang Wan Lee, Qixing Huang, and Karthik Ramani. Infogcn: Representation learning for human skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20186-20196, June 2022. 7, 8" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 613, + 286, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 286, + 688 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 286, + 688 + ], + "type": "text", + "content": "[10] Krzysztof Marcin Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos, Peter Hawkins, Jared Quincy Davis, Afroz Mohiuddin, Lukasz Kaiser, David Benjamin Belanger, Lucy J Colwell, and Adrian Weller. Rethinking attention with performers. In International Conference on Learning Representations, 2021. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 691, + 286, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 286, + 713 + ], + "type": "text", + "content": "[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner," + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 127 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 127 + ], + "type": "text", + "content": "Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 129, + 545, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 129, + 545, + 182 + ], + "spans": [ + { + "bbox": [ + 308, + 129, + 545, + 182 + ], + "type": "text", + "content": "[12] Haodong Duan, Yue Zhao, Kai Chen, Dahua Lin, and Bo Dai. Revisiting skeleton-based action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2969-2978, June 2022. 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 184, + 545, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 184, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 308, + 184, + 545, + 227 + ], + "type": "text", + "content": "[13] Basura Fernando, Efstratios Gavves, Jose Oramas Oramas M., Amir Ghodrati, and Tinne Tuytelaars. Rank pooling for action recognition. IEEE Trans. Pattern Anal. Mach. Intell., 39(4):773-787, apr 2017. 2, 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 228, + 545, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 228, + 545, + 249 + ], + "spans": [ + { + "bbox": [ + 308, + 228, + 545, + 249 + ], + "type": "text", + "content": "[14] Rohit Girdhar and Deva Ramanan. Attentional pooling for action recognition. In NIPS, 2017. 2, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 251, + 545, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 251, + 545, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 251, + 545, + 293 + ], + "type": "text", + "content": "[15] Xiaoke Hao, Jie Li, Yingchun Guo, Tao Jiang, and Ming Yu. Hypergraph neural network for skeleton-based action recognition. IEEE Transactions on Image Processing, 30:2263-2275, 2021. 2, 4, 7, 8, 13" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 294, + 545, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 294, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 308, + 294, + 545, + 326 + ], + "type": "text", + "content": "[16] Ryota Hashiguchi and Toru Tamaki. Vision transformer with cross-attention by temporal shift for efficient action recognition, 2022. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 327, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 327, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 308, + 327, + 545, + 415 + ], + "type": "text", + "content": "[17] Changxiang He, Chen Xiao, Shuting Liu, Xiaofei Qin, Ying Zhao, and Xuedian Zhang. Single-skeleton and dual-skeleton hypergraph convolution neural networks for skeleton-based action recognition. In Teddy Mantoro, Minho Lee, Media Anugerah Ayu, Kok Wai Wong, and Achmad Nizar Hidayanto, editors, Neural Information Processing, pages 15-27, Cham, 2021. Springer International Publishing. 7, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 415, + 545, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 415, + 545, + 448 + ], + "spans": [ + { + "bbox": [ + 308, + 415, + 545, + 448 + ], + "type": "text", + "content": "[18] Yujuan Jiang, Zhaoneng Sun, Saisai Yu, Shuang Wang, and Yang Song. A graph skeleton transformer network for action recognition. Symmetry, 14(8), 2022. 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 449, + 545, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 449, + 545, + 524 + ], + "spans": [ + { + "bbox": [ + 308, + 449, + 545, + 524 + ], + "type": "text", + "content": "[19] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are RNNs: Fast autoregressive transformers with linear attention. In Hal Daumé III and Aarti Singh, editors, Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 5156-5165. PMLR, 13-18 Jul 2020. 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 526, + 545, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 526, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 308, + 526, + 545, + 580 + ], + "type": "text", + "content": "[20] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman. The kinetics human action video dataset, 2017. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 308, + 582, + 545, + 635 + ], + "type": "text", + "content": "[21] Jinwoo Kim, Saeyoon Oh, and Seunghoon Hong. Transformers generalize deepsets and can be extended to graphs & hypergraphs. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021. 2, 3, 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 636, + 545, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 636, + 545, + 680 + ], + "spans": [ + { + "bbox": [ + 308, + 636, + 545, + 680 + ], + "type": "text", + "content": "[22] Tae Soo Kim and Austin Reiter. Interpretable 3d human action analysis with temporal convolutional networks. In 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1623-1631, 2017. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 681, + 545, + 713 + ], + "type": "text", + "content": "[23] Jun Kong, Yuhang Bian, and Min Jiang. Mtt: Multi-scale temporal transformer for skeleton-based action recognition. IEEE Signal Processing Letters, 29:528-532, 2022. 7" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "5628" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 106 + ], + "type": "text", + "content": "[24] Piotr Koniusz, Lei Wang, and Anoop Cherian. Tensor representations for action recognition. In IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2020. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 140 + ], + "type": "text", + "content": "[25] Piotr Koniusz, Lei Wang, and Ke Sun. High-order tensor pooling with attention for action recognition. arXiv, 2021. 1, 2, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 142, + 287, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 142, + 287, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 142, + 287, + 185 + ], + "type": "text", + "content": "[26] Piotr Koniusz and Hongguang Zhang. Power normalizations in fine-grained image, few-shot image and graph classification. In IEEE Transactions on Pattern Analysis and Machine Intelligence. IEEE, 2020. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 242 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 242 + ], + "type": "text", + "content": "[27] Matthew Korban and Xin Li. Ddgcn: A dynamic directed graph convolutional network for action recognition. In Andrea Vedaldi, Horst Bischof, Thomas Brox, and Jan-Michael Frahm, editors, Computer Vision – ECCV 2020, pages 761–776, Cham, 2020. Springer International Publishing. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 243, + 287, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 243, + 287, + 320 + ], + "spans": [ + { + "bbox": [ + 48, + 243, + 287, + 320 + ], + "type": "text", + "content": "[28] Olga Kovaleva, Alexey Romanov, Anna Rogers, and Anna Rumshisky. Revealing the dark secrets of BERT. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4365-4374, Hong Kong, China, Nov. 2019. Association for Computational Linguistics. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 321, + 287, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 321, + 287, + 388 + ], + "spans": [ + { + "bbox": [ + 48, + 321, + 287, + 388 + ], + "type": "text", + "content": "[29] John Boaz Lee, Ryan Rossi, and Xiangnan Kong. Graph classification using structural attention. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '18, page 1666-1674, New York, NY, USA, 2018. Association for Computing Machinery. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 388, + 287, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 287, + 443 + ], + "type": "text", + "content": "[30] Maosen Li, Siheng Chen, Xu Chen, Ya Zhang, Yanfeng Wang, and Qi Tian. Actional-structural graph convolutional networks for skeleton-based action recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 2, 7, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 445, + 287, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 445, + 287, + 499 + ], + "spans": [ + { + "bbox": [ + 48, + 445, + 287, + 499 + ], + "type": "text", + "content": "[31] Maosen Li, Siheng Chen, Xu Chen, Ya Zhang, Yanfeng Wang, and Qi Tian. Symbiotic graph neural networks for 3d skeleton-based human action recognition and motion prediction. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(6):3316-3333, 2022. 7, 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 501, + 287, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 501, + 287, + 544 + ], + "spans": [ + { + "bbox": [ + 48, + 501, + 287, + 544 + ], + "type": "text", + "content": "[32] Hezheng Lin, Xing Cheng, Xiangyu Wu, Fan Yang, Dong Shen, Zhongyuan Wang, Qing Song, and Wei Yuan. CAT: cross attention in vision transformer. CoRR, abs/2106.05786, 2021. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 545, + 287, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 545, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 48, + 545, + 287, + 568 + ], + "type": "text", + "content": "[33] Tsung-Yu Lin, Subhransu Maji, and Piotr Koniusz. Second-order democratic aggregation. In ECCV, 2018. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 570, + 287, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 570, + 287, + 624 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 287, + 624 + ], + "type": "text", + "content": "[34] Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C. Kot. Ntu rgb+d 120: A large-scale benchmark for 3d human activity understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2019. 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 625, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 625, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 625, + 287, + 712 + ], + "type": "text", + "content": "[35] Shengyuan Liu, Pei Lv, Yuzhen Zhang, Jie Fu, Junjin Cheng, Wanqing Li, Bing Zhou, and Mingliang Xu. Semi-dynamic hypergraph neural network for 3d pose estimation. In Christian Bessiere, editor, Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI-20, pages 782-788. International Joint Conferences on Artificial Intelligence Organization, 7 2020. Main track. 2, 4, 13" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 546, + 714 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 546, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 546, + 128 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 546, + 128 + ], + "type": "text", + "content": "[36] Ziyu Liu, Hongwen Zhang, Zhenghao Chen, Zhiyong Wang, and Wanli Ouyang. Disentangling and unifying graph convolutions for skeleton-based action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 129, + 546, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 129, + 546, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 129, + 546, + 185 + ], + "type": "text", + "content": "[37] Wei Peng, Xiaopeng Hong, Haoyu Chen, and Guoying Zhao. Learning graph convolutional network for skeleton-based human action recognition by neural searching. Proceedings of the AAAI Conference on Artificial Intelligence, 34(03):2669-2676, Apr. 2020. 7, 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 186, + 546, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 546, + 218 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 546, + 218 + ], + "type": "text", + "content": "[38] Wei Peng, Jingang Shi, Tuomas Varanka, and Guoying Zhao. Rethinking the st-gcns for 3d skeleton-based human action recognition. Neurocomputing, 454:45-53, 2021. 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 219, + 546, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 219, + 546, + 262 + ], + "spans": [ + { + "bbox": [ + 307, + 219, + 546, + 262 + ], + "type": "text", + "content": "[39] Chiara Plizzari, Marco Cannici, and Matteo Matteucci. Skeleton-based action recognition via spatial and temporal transformer networks. Computer Vision and Image Understanding, 208-209:103219, 2021. 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 264, + 546, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 264, + 546, + 308 + ], + "spans": [ + { + "bbox": [ + 307, + 264, + 546, + 308 + ], + "type": "text", + "content": "[40] Zhenyue Qin, Yang Liu, Pan Ji, Dongwoo Kim, Lei Wang, Bob McKay, Saeed Anwar, and Tom Gedeon. Fusing higher-order features in graph neural networks for skeleton-based action recognition. IEEE TNNLS, 2022. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 309, + 546, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 309, + 546, + 353 + ], + "spans": [ + { + "bbox": [ + 307, + 309, + 546, + 353 + ], + "type": "text", + "content": "[41] Saimunur Rahman, Piotr Koniusz, Lei Wang, Luping Zhou, Peyman Moghadam, and Changming Sun. Learning partial correlation based deep visual representation for image classification. In CVPR, 2023. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 354, + 546, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 546, + 399 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 546, + 399 + ], + "type": "text", + "content": "[42] Kanchana Ranasinghe, Muzammal Naseer, Salman Khan, Fahad Shahbaz Khan, and Michael Ryoo. Self-supervised video transformer. In IEEE/CVF International Conference on Computer Vision and Pattern Recognition, June 2022. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 399, + 546, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 546, + 443 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 546, + 443 + ], + "type": "text", + "content": "[43] Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang. Ntu rgb+d: A large scale dataset for 3d human activity analysis. In IEEE Conference on Computer Vision and Pattern Recognition, June 2016. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 444, + 546, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 444, + 546, + 477 + ], + "spans": [ + { + "bbox": [ + 307, + 444, + 546, + 477 + ], + "type": "text", + "content": "[44] Lei Shi, Yifan Zhang, Jian Cheng, and Hanqing Lu. Two-stream adaptive graph convolutional networks for skeleton-based action recognition. In CVPR, 2019. 7, 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 478, + 546, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 546, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 546, + 533 + ], + "type": "text", + "content": "[45] Lei Shi, Yifan Zhang, Jian Cheng, and Hanqing Lu. Adasgn: Adapting joint number and model size for efficient skeleton-based action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 13413-13422, October 2021. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 534, + 546, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 534, + 546, + 589 + ], + "spans": [ + { + "bbox": [ + 307, + 534, + 546, + 589 + ], + "type": "text", + "content": "[46] Chenyang Si, Wentao Chen, Wei Wang, Liang Wang, and Tieniu Tan. An attention enhanced graph convolutional LSTM network for skeleton-based action recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 1, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 590, + 546, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 590, + 546, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 590, + 546, + 635 + ], + "type": "text", + "content": "[47] Yi-Fan Song, Zhang Zhang, Caifeng Shan, and Liang Wang. Constructing stronger and faster baselines for skeleton-based action recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2022. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 635, + 546, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 546, + 679 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 546, + 679 + ], + "type": "text", + "content": "[48] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. CoRR, abs/2203.12602, 2022. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 680, + 546, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 546, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 546, + 714 + ], + "type": "text", + "content": "[49] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Ilia Polosukhin. Attention is all you need. In I. Guyon," + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "5629" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 715 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "text", + "content": "U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. 3, 4, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 162 + ], + "type": "text", + "content": "[50] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Liò, and Yoshua Bengio. Graph attention networks. In International Conference on Learning Representations, 2018. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 163, + 288, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 288, + 207 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 288, + 207 + ], + "type": "text", + "content": "[51] Jiang Wang, Xiaohan Nie, Yin Xia, Ying Wu, and Song-Chun Zhu. Cross-view action modeling, learning and recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2649-2656, 2014. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 209, + 288, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 209, + 288, + 251 + ], + "spans": [ + { + "bbox": [ + 48, + 209, + 288, + 251 + ], + "type": "text", + "content": "[52] Lei Wang. Analysis and evaluation of Kinect-based action recognition algorithms. Master's thesis, School of the Computer Science and Software Engineering, The University of Western Australia, 11 2017. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 253, + 287, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 253, + 287, + 285 + ], + "spans": [ + { + "bbox": [ + 48, + 253, + 287, + 285 + ], + "type": "text", + "content": "[53] Lei Wang, Du Q. Huynh, and Piotr Koniusz. A comparative review of recent kinet-based action recognition algorithms. TIP, 2019. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 286, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 286, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 286, + 287, + 319 + ], + "type": "text", + "content": "[54] Lei Wang, Du Q. Huynh, and Moussa Reda Mansour. Loss switching fusion with similarity search for video classification. ICIP, 2019. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "spans": [ + { + "bbox": [ + 48, + 321, + 287, + 364 + ], + "type": "text", + "content": "[55] Lei Wang and Piotr Koniusz. Self-Supervising Action Recognition by Statistical Moment and Subspace Descriptors, page 4324-4333. Association for Computing Machinery, New York, NY, USA, 2021. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 365, + 287, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 365, + 287, + 409 + ], + "spans": [ + { + "bbox": [ + 48, + 365, + 287, + 409 + ], + "type": "text", + "content": "[56] Lei Wang and Piotr Koniusz. Temporal-viewpoint transportation plan for skeletal few-shot action recognition. In Proceedings of the Asian Conference on Computer Vision, pages 4176-4193, 2022. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 411, + 287, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 287, + 455 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 287, + 455 + ], + "type": "text", + "content": "[57] Lei Wang and Piotr Koniusz. Uncertainty-dtw for time series and sequences. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXI, pages 176-195. Springer, 2022. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 456, + 287, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 287, + 488 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 287, + 488 + ], + "type": "text", + "content": "[58] Lei Wang, Piotr Koniusz, and Du Q. Huynh. Hallucinating IDT descriptors and I3D optical flow features for action recognition with cnns. In ICCV, 2019. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 490, + 287, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 490, + 287, + 522 + ], + "spans": [ + { + "bbox": [ + 48, + 490, + 287, + 522 + ], + "type": "text", + "content": "[59] Lei Wang, Jun Liu, and Piotr Koniusz. 3d skeleton-based few-shot action recognition with jeanie is not so naive. arXiv preprint arXiv:2112.12668, 2021. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 524, + 287, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 524, + 287, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 524, + 287, + 599 + ], + "type": "text", + "content": "[60] Qilong Wang, Zilin Gao, Jiangtao Xie, Wangmeng Zuo, and Peihua Li. Global gated mixture of second-order pooling for improving deep convolutional neural networks. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. 2, 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 601, + 287, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 601, + 287, + 644 + ], + "spans": [ + { + "bbox": [ + 48, + 601, + 287, + 644 + ], + "type": "text", + "content": "[61] Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan L. Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. CoRR, abs/2112.09133, 2021. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 689 + ], + "type": "text", + "content": "[62] Jinfeng Wei, Yunxin Wang, Mengli Guo, Pei Lv, Xiaoshan Yang, and Mingliang Xu. Dynamic hypergraph convolutional networks for skeleton-based action recognition. CoRR, abs/2112.10570, 2021. 7, 8" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 691, + 287, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 715 + ], + "type": "text", + "content": "[63] Xi Wei, Tianzhu Zhang, Yan Li, Yongdong Zhang, and Feng Wu. Multi-modality cross attention network for image and" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 73, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 327, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 327, + 73, + 545, + 105 + ], + "type": "text", + "content": "sentence matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 107, + 545, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 107, + 545, + 140 + ], + "spans": [ + { + "bbox": [ + 308, + 107, + 545, + 140 + ], + "type": "text", + "content": "[64] Sijie Yan, Yuanjun Xiong, and Dahua Lin. Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition. In AAAI, 2018. 1, 2, 4, 6, 7, 8, 13" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 141, + 545, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 141, + 545, + 184 + ], + "spans": [ + { + "bbox": [ + 308, + 141, + 545, + 184 + ], + "type": "text", + "content": "[65] Han Zhang, Yonghong Song, and Yuanlin Zhang. Graph convolutional LSTM model for skeleton-based action recognition. In 2019 IEEE International Conference on Multimedia and Expo (ICME), pages 412-417, 2019. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 186, + 545, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 186, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 308, + 186, + 545, + 239 + ], + "type": "text", + "content": "[66] Jiani Zhang, Xingjian Shi, Junyuan Xie, Hao Ma, Irwin King, and Dit-Yan Yeung. Gaan: Gated attention networks for learning on large and spatiotemporal graphs. In Amir Globerson and Ricardo Silva, editors, UAI, pages 339-349. AUAI Press, 2018. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 241, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 241, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 308, + 241, + 545, + 296 + ], + "type": "text", + "content": "[67] Pengfei Zhang, Cuiling Lan, Wenjun Zeng, Junliang Xing, Jianru Xue, and Nanning Zheng. Semantics-guided neural networks for efficient skeleton-based human action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 297, + 545, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 297, + 545, + 329 + ], + "spans": [ + { + "bbox": [ + 308, + 297, + 545, + 329 + ], + "type": "text", + "content": "[68] Shan Zhang, Dawei Luo, Lei Wang, and Piotr Koniusz. Few-shot object detection by second-order pooling. In ACCV, 2020. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 331, + 545, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 331, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 308, + 331, + 545, + 364 + ], + "type": "text", + "content": "[69] Shan Zhang, Naila Murray, Lei Wang, and Piotr Koniusz. Time-reversed diffusion tensor transformer: A new tenet of few-shot object detection. In ECCV, 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 365, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 545, + 398 + ], + "type": "text", + "content": "[70] Shan Zhang, Lei Wang, Naila Murray, and Piotr Koniusz. Kernelized few-shot object detection with efficient integral aggregation. In CVPR, 2022. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 399, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 545, + 442 + ], + "type": "text", + "content": "[71] Xikun Zhang, Chang Xu, and Dacheng Tao. Context aware graph convolution for skeleton-based action recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 444, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 444, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 308, + 444, + 545, + 487 + ], + "type": "text", + "content": "[72] Yongkang Zhang, Jun Li, Guoming Wu, Han Zhang, Zhiping Shi, Zhaoxun Liu, and Zizhang Wu. Temporal transformer networks with self-supervision for action recognition. CoRR, abs/2112.07338, 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 488, + 545, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 488, + 545, + 553 + ], + "spans": [ + { + "bbox": [ + 308, + 488, + 545, + 553 + ], + "type": "text", + "content": "[73] Yuhan Zhang, Bo Wu, Wen Li, Lixin Duan, and Chuang Gan. Stst: Spatial-temporal specialized transformer for skeleton-based action recognition. In Proceedings of the 29th ACM International Conference on Multimedia, MM '21, page 3229-3237, New York, NY, USA, 2021. Association for Computing Machinery. 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 555, + 545, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 555, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 308, + 555, + 545, + 588 + ], + "type": "text", + "content": "[74] Yifei Zhang, Hao Zhu, Zixing Song, Piotr Koniusz, and Irwin King. COSTA: Covariance-preserving feature augmentation for graph contrastive learning. In KDD, 2022. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 590, + 545, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 590, + 545, + 621 + ], + "spans": [ + { + "bbox": [ + 308, + 590, + 545, + 621 + ], + "type": "text", + "content": "[75] Yifei Zhang, Hao Zhu, Zixing Song, Piotr Koniusz, and Irwin King. Spectral feature augmentation for graph contrastive learning and beyond. In AAAI, 2023. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 623, + 545, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 623, + 545, + 643 + ], + "spans": [ + { + "bbox": [ + 308, + 623, + 545, + 643 + ], + "type": "text", + "content": "[76] Hao Zhu and Piotr Koniusz. Simple spectral graph convolution. In ICLR, 2021. 1" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 646, + 545, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 646, + 545, + 667 + ], + "spans": [ + { + "bbox": [ + 308, + 646, + 545, + 667 + ], + "type": "text", + "content": "[77] Hao Zhu and Piotr Koniusz. Generalized laplacian eigenmaps. In NeurIPS, 2022. 1" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 308, + 669, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 669, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 308, + 669, + 545, + 689 + ], + "type": "text", + "content": "[78] Hao Zhu, Ke Sun, and Piotr Koniusz. Contrastive laplacian eigenmaps. In NeurIPS, 2021. 1" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 308, + 691, + 545, + 713 + ], + "type": "text", + "content": "[79] Yiran Zhu, Guangji Huang, Xing Xu, Yanli Ji, and Fumin Shen. Selective hypergraph convolutional networks for" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "5630" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 162 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 116 + ], + "type": "text", + "content": "skeleton-based action recognition. In Proceedings of the 2022 International Conference on Multimedia Retrieval, ICMR '22, page 518-526, New York, NY, USA, 2022. Association for Computing Machinery. 7, 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 162 + ], + "type": "text", + "content": "[80] Gao Zilin, Xie Jiangtao, Wang Qilong, and Li Peihua. Global second-order pooling convolutional networks. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "5631" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/f45f628e-fe49-4cb9-b5bd-808953724624_content_list.json b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/f45f628e-fe49-4cb9-b5bd-808953724624_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..497330b1f9a43422e6e7e5c6c77aa0bedf9192cf --- /dev/null +++ b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/f45f628e-fe49-4cb9-b5bd-808953724624_content_list.json @@ -0,0 +1,1510 @@ +[ + { + "type": "text", + "text": "A Bag-of-Prototypes Representation for Dataset-Level Applications", + "text_level": 1, + "bbox": [ + 145, + 130, + 823, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weijie $\\mathrm{Tu}^{1}$ Weijian Deng $^{1}$ Tom Gedeon $^{2}$ Liang Zheng $^{1}$", + "bbox": [ + 236, + 179, + 730, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Australian National University $^{2}$ Curtin University", + "bbox": [ + 271, + 198, + 697, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 251, + 313, + 267 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This work investigates dataset vectorization for two dataset-level tasks: assessing training set suitability and test set difficulty. The former measures how suitable a training set is for a target domain, while the latter studies how challenging a test set is for a learned model. Central to the two tasks is measuring the underlying relationship between datasets. This needs a desirable dataset vectorization scheme, which should preserve as much discriminative dataset information as possible so that the distance between the resulting dataset vectors can reflect dataset-to-dataset similarity. To this end, we propose a bag-of-prototypes (BoP) dataset representation that extends the image-level bag consisting of patch descriptors to dataset-level bag consisting of semantic prototypes. Specifically, we develop a codebook consisting of $K$ prototypes clustered from a reference dataset. Given a dataset to be encoded, we quantize each of its image features to a certain prototype in the codebook and obtain a $K$ -dimensional histogram. Without assuming access to dataset labels, the BoP representation provides rich characterization of the dataset semantic distribution. Furthermore, BoP representations cooperate well with Jensen-Shannon divergence for measuring dataset-to-dataset similarity. Although very simple, BoP consistently shows its advantage over existing representations on a series of benchmarks for two dataset-level tasks.", + "bbox": [ + 76, + 282, + 472, + 660 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 691, + 209, + 709 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Datasets are fundamental in machine learning research, forming the basis of model training and testing [18, 51, 52, 61]. While large-scale datasets bring opportunities in algorithm design, there lack proper tools to analyze and make the best use of them [6, 51, 56]. Therefore, as opposed to traditional algorithm-centric research where improving models is of primary interest, the community has seen a growing interest in understanding and analyzing the data used for developing models [51, 56]. Recent examples of such goal include data synthesis [29], data sculpting [25, 51], and data valuation [6, 32, 56]. These tasks typically focus on individual sample of a dataset. In this work, we aim to understand", + "bbox": [ + 76, + 719, + 470, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "nature of datasets from a dataset-level perspective.", + "bbox": [ + 500, + 253, + 833, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This work considers two dataset-level tasks: suitability in training and difficulty in testing. First, training set suitability denotes whether a training set is suitable for training models for a target dataset. In real-world applications, we are often provided with multiple training sets from various data distributions (e.g., universities and hospitals). Due to distribution shift, their trained models have different performance on the target dataset. Then, it is of high practical value to select the most suitable training set for the target dataset. Second, test set difficulty means how challenging a test set is for a learned model. In practice, test sets are usually unlabeled and often come from different distributions than that of the training set. Measuring the test set difficulty for a learned model helps us understand the model reliability, thereby ensuring safe model deployment.", + "bbox": [ + 496, + 271, + 890, + 500 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The core of the two dataset-level tasks is to measure the relationship between datasets. For example, a training set is more suitable for learning a model if it is more similar to the target dataset. To this end, we propose a vectorization scheme to represent a dataset. Then, the relationship between a pair of datasets can be simply reflected by the distance between their representations. Yet, it is challenging to encode a dataset as a representative vector, because (i) a dataset has a different cardinality (number of images) and (ii) each image has its own semantic content (e.g., category). It is thus critical to find an effective way to aggregate all image features to uncover dataset semantic distributions.", + "bbox": [ + 496, + 503, + 892, + 684 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the literature, some researchers use the first few moments of distributions such as feature mean and co-variance to represent datasets [20, 62, 74, 75, 82]. While being computational friendly, these methods do not offer sufficiently strong descriptive ability of a dataset, such as class distributions, and thus have limited effectiveness in assessing attributes related to semantics. There are also some methods learn task-specific dataset representations [1, 63]. For example, given a dataset with labels and a task loss function, Task2Vec [1] computes an embedding based on estimates of the Fisher information matrix associated with a probe network's parameters. While these task-specific representations are able to predict task similarities, they are not suitable for characterizing dataset properties of interest. They", + "bbox": [ + 496, + 689, + 893, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "2881", + "bbox": [ + 482, + 944, + 513, + 957 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "require training a network on the specific task [1] or on multiple datasets [63], so they are not effective in assessing the training set suitability. Additionally, they require image labels for the specific task, so they cannot be used to measure the difficulty of unlabeled test sets.", + "bbox": [ + 75, + 90, + 468, + 165 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we propose a simple and effective bag-of-prototypes (BoP) dataset representation. Its computation starts with partitioning the image feature space into semantic regions through clustering, where the region centers, or prototypes, form a codebook. Given a new dataset, we quantize its features to their corresponding prototypes and compute an assignment histogram, which, after normalization, gives the BoP representation. The dimensionality of BoP equals the codebook size, which is usually a few hundred and is considered memory-efficient. Meanwhile, the histogram computed on the prototypes is descriptive of the dataset semantic distribution.", + "bbox": [ + 75, + 167, + 468, + 347 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Apart from being low dimensional and semantically rich, BoP has a few other advantages. First, while recent works in task-specific dataset representation usually require full image annotations and additional learning procedure [1,63], the computation of BoP does not rely on any. It is relatively efficient and allows for unsupervised assessment of dataset attributes. Second, BoP supports dataset-to-dataset similarity measurement through Jensen-Shannon divergence. We show in our experiment that this similarity is superior to commonly used metrics such as Fréchet distance [27] and maximum mean discrepancy [33] in two dataset-level tasks.", + "bbox": [ + 75, + 348, + 468, + 513 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 527, + 218, + 542 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Dataset representations. A common practice is to use simple and generic statistics as dataset representations [20, 62, 74, 75, 82]. For example, Peng et al. [62] use the first moment to represent a dataset. Deng et al. [20] use global feature mean and co-variance as dataset representations. Vanschoeren et al. [82] find dataset cardinality (the number of images/classes) useful to encode a dataset. These methods have limited descriptive ability, whereas BoP is more semantically descriptive. Moreover, it is feasible to learn a task-specific dataset representation [1, 63, 84, 87]. For example, Ying et al. [84] learn transfer skills from previous transfer learning experiences for future target tasks. Achille et al. [1] propose to learn a task embedding based on the estimate of Fisher information matrix associated with a task loss. Compared with these task-specific representations, BoP is hand-crafted, avoiding computation overheads incurred by end-to-end learning. It is thus efficient in measuring training set suitability without training any models. Moreover, BoP require no image labels, making it more suitable for assessing the difficulty of unlabeled test sets.", + "bbox": [ + 75, + 553, + 468, + 854 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Dataset-to-dataset similarity. We briefly review three strategies. First, some dataset similarity measures are developed in the context of domain adaptation [2, 9, 10, 85].", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "They typically depend on a loss function and hypothesis class, and use a supremum of that function class to quantify the similarity of datasets. (e.g., $\\mathcal{H}\\Delta \\mathcal{H}$ -divergence [9], $f$ -divergence [2], and $\\mathcal{A}$ -distance [10]). Second, dataset distance can be computed based on optimal transport [5, 17, 79]. For example, the squared Wasserstein metric Fréchet distance [27] is widely used in comparing the distribution discrepancy of generated images with the distribution of real images [39]. To better leverage the geometric relationship between datasets, Alvarez et al. [5] use labels to guide optimal transport towards class-coherent matches. Third, existing dataset representations can be used to compute dataset distance [33, 62, 75, 81]. For example, maximum mean discrepancy (MMD) [33] computes the distance between mean elements of distributions on the probability space. Peng et al. [62] eliminate dataset discrepancy by matching datasets moments. CORAL [75] uses the second-order statistics of datasets to measure distance. This work is in the third category and uses JS divergence between BoP representations to calculate dataset-to-dataset similarity.", + "bbox": [ + 496, + 90, + 890, + 393 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Assessment of training dataset suitability. Recent works have focused on understanding the importance of individual training instances in training of neural networks [6,32,45,56]. For example, Data Shapley [32] and Consistency Score [45] are proposed to evaluate the value of each data instance. Some methods identify \"difficult\" instances based on the information of training dynamics [7,76,80].", + "bbox": [ + 496, + 401, + 890, + 507 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Different from the above approaches, this work studies the suitability of an entire training set. Given multiple training datasets from different data distributions, the focus is to choose the most appropriate training dataset for the target domain. Dataset-to-dataset similarity can be used for this goal. Intuitively, if a training dataset has high similarity with a target dataset, the model trained on it is expected to be more performant and vice versa. In this work, we use BoP representation coupled with simple JS divergence to calculate dataset-to-dataset similarity and demonstrate its effectiveness in accessing training set suitability.", + "bbox": [ + 496, + 515, + 890, + 681 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Assessing test set difficulty without ground truths. The goal of this task (also known as unsupervised accuracy estimation) is to predict the accuracy of a given model on various unlabeled test sets. Existing methods usually use a representation of the test set for accuracy prediction [13, 19, 20, 30, 34]. Normally this representation is derived from classifier outputs, such as image features [20], prediction logits [30], average softmax scores [34]. Then, regression is used to establish the relationship between this representation and model test accuracy under various testing environments. Compared with existing dataset features, the BoP representation better characterizes the semantic distribution of training and test sets and thus can be effectively used for model accuracy prediction.", + "bbox": [ + 496, + 689, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2882", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 76, + 89, + 210, + 107 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Bag-of-Words Model Across Communities", + "text_level": 1, + "bbox": [ + 76, + 114, + 437, + 131 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In natural language processing (NLP) and information retrieval, the Bag-of-Words (BoW) model [46, 47, 50, 57] vectorizes textual data as a word histogram. Specifically, for each word in the dictionary, its occurrences in a document are counted, which fills in the corresponding entry of the BoW feature. This word frequency vector is thus used to represent a document. Numerous improvements of the BoW feature were made in NLP, such as n-grams [47, 50] and term frequency-inverse document frequency [68].", + "bbox": [ + 75, + 138, + 468, + 273 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the early 2000s, the BoW representation was introduced to the computer vision (CV) community to encode hundreds or thousands of local image descriptors [8, 53] into a compact vector [73]. As there is no semantic codebook like in NLP, a visual codebook is constructed by performing clustering (e.g., k-means) on a collection of local image features, where the resulting clustering centers are called \"visual words\". Local image descriptors are quantized to their nearest cluster center so that a visual word histogram can be computed. This BoW histogram also have undergone extensive improvements in later years, such as Fisher vector [64, 65], vector of locally aggregated descriptors (VLAD) [43], and the use of principal component analysis and whitening [42].", + "bbox": [ + 75, + 275, + 470, + 487 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Contribution statement. This paper contributes a baseline method in adopting the BoW idea study the two basic properties of a dataset. To this end, we propose to represent a dataset using its histogram over a series of prototypes. A comparison between the usage of BoW model in NLP, CV and our dataset-level research is shown in Table 1. Specifically, the BoP representation relies on clustering for codebook formation, has a relatively small codebook (depending on the richness of dataset semantics), and has semantically sensible codewords.", + "bbox": [ + 75, + 487, + 468, + 638 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Bag-of-Prototypes Dataset Representation", + "text_level": 1, + "bbox": [ + 76, + 648, + 434, + 665 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a dataset $\\mathcal{D} = \\{\\mathbf{x}_i\\}_{i=1}^N$ where $N$ is the number of images and a feature extractor $\\mathbf{F}(\\cdot)$ that maps an input image into a $d$ -dimensional feature $f \\in \\mathbb{R}^d$ , we extract a set of image features $\\mathcal{F} := \\{\\mathbf{F}(\\mathbf{x}_i)\\}_{i=1}^N$ . While it is possible to directly use the dataset images (or features) as model input under small $N$ , it becomes prohibitively expensive when $N$ is large. We therefore focus on extracting useful semantic features of $\\mathcal{F}$ by encoding its image features into a compact representation. Below we detail the necessary steps for computing the proposed BoP representation (refer Fig. 1).", + "bbox": [ + 75, + 671, + 468, + 823 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step I: Codebook generation. Given a reference dataset $\\mathcal{D}_r = \\{\\mathbf{x}_i^r\\}_{i=1}^{N_r}$ , we extract all of its image features $\\mathcal{F}_r := \\{\\mathbf{F}(\\mathbf{x}_i^r)\\}_{i=1}^{N_r}$ using a pretrained network, from which a codebook is constructed. Specifically, we adopt standard k-means clustering [54] to partition the feature space $\\mathbb{R}^d$", + "bbox": [ + 75, + 823, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/8cbef4372750199df233303ee4da52934471e12b37b8aef3deae0e253489455e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BoW in NLPBoW in CVBoP
Encoded objectsDocumentsImagesDatasets (a set of images)
Codewords in codebookWordsCluster centers of local descriptorsPrototypes of image features
Clustering?NoYesYes
Codewords semanticsClearLittleSensible
Codebook size>103103-106~102(dataset dependent)
", + "bbox": [ + 501, + 89, + 893, + 237 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1. Comparing BoP with BoW model in natural language processing (NLP) and computer vision (CV). The objective of BoW in NLP and CV is encoding texts and images respectively, while BoP is proposed to represent datasets.", + "bbox": [ + 496, + 250, + 893, + 306 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "into $K$ clusters. Each of the $K$ cluster centers is called a \"prototype\", because oftentimes each center mainly represents a certain semantic content. See Fig. 1 right for exemplar image of each prototype. The prototypes, or centers, constitute the codebook, denoted as $\\mathcal{C} = \\{\\mathbf{c}_i\\}_{i=1}^K$ , where $\\mathbf{c}_i$ is the $i$ -th prototype. Note that, the order of the prototypes is fixed in the codebook.", + "bbox": [ + 496, + 323, + 890, + 428 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step II: Histogram computation. For a dataset to be encoded $\\mathcal{D}_e = \\{\\mathbf{x}_i^e\\}_{i=1}^{N_e}$ where $N_e$ is the number of images, we project it onto codebook $\\mathcal{C}$ of size $K$ to compute its BoP representation. Specifically, after extracting image features $\\mathcal{F}_e := \\{\\mathbf{F}(\\mathbf{x}_i^e)\\}_{i=1}^{N_e}$ from $\\mathcal{D}_e$ , for each image feature, we compute its distance with all the $K$ prototypes in the codebook, yielding $K$ distances $d_1, \\ldots, d_k$ , where $d_i$ is the distance between an image feature and the $i$ -th prototype. An image feature is quantized to prototype $c_i$ if $d_i$ is the lowest among $d_1, \\ldots, d_k$ . Following the quantization, we generate a $K$ -dimensional one-hot encoding where the $i$ -th entry is 1 and all the others are 0. Having computed the one-hot vectors for all the image features, we sum them which is then normalized by $N_e$ , the number of images in $D_e$ . This gives the histogram representation $\\mathbf{h}_e$ , or BoP representation, for $D_e$ where the $i$ -th entry indicates the density of features in $D_e$ belonging to prototype $\\mathbf{c}_i$ .", + "bbox": [ + 496, + 429, + 892, + 686 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Measuring Dataset-to-Dataset Similarity", + "text_level": 1, + "bbox": [ + 500, + 696, + 846, + 712 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Similar to image / document retrieval where BoW vectors of instances are used for similarity comparison [14, 26, 59, 66, 73], this work uses the BoP representation to calculate dataset-to-dataset similarity. Specifically, given BoP representations $\\mathbf{h}_x$ and $\\mathbf{h}_y$ of two datasets $\\mathcal{D}_x$ and $\\mathcal{D}_y$ , we simply define their similarity $S_{x,y}$ using Jensen-Shannon divergence (JS divergence), which is designed for histogram-based similarity measurement [16, 55].", + "bbox": [ + 496, + 719, + 890, + 839 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Task-oriented similarity measure. We can build a universal codebook on a large-scale dataset following BoW model [14, 86]. By doing so, the resulting BoP representations are generic. We can also build a task-oriented code", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "2883", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/15458d234ddfea33e58d1cb767cc770f7d3f294ee12622c7525847f7ef78d0c4.jpg", + "image_caption": [ + "Figure 1. Workflow of BoP representation computation using CIFAR-10 [49] and one CIFAR-10 out-of-distribution (OOD) test set as an example. Top: We group image features of the reference dataset CIFAR-10 into 10 clusters, and the centers are called prototypes. The prototypes constitute the codebook of size 10. Bottom left: To encode the OOD test set, we project it onto the codebook by quantizing each image feature to its corresponding prototype. Lastly, we compute the histogram, i.e., BoP representation, of CIFAR-10 OOD test set. Bottom right: We regard dataset-to-dataset similarity as the Jensen-Shannon divergence between BoP histograms of CIFAR-10 OOD test set and reference dataset. With such similarity, we can measure the test set difficulty for the model trained on reference dataset." + ], + "image_footnote": [], + "bbox": [ + 166, + 99, + 803, + 354 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "book on a reference dataset from a specific task to consider more task-oriented information. The latter is more suitable for the two dataset-level tasks considered in this work. For the task of training set suitability assessment, we use the target dataset as the reference for codebook generation to fully consider its semantic information. As a result, the JS divergence between BoP representations of the training set and the target dataset can well capture how a training set is similar to the target set. Similarly, for the task of test set difficulty assessment, we build codebook on the training set. This practice can effectively measure how an unlabeled test is similar to a given training set.", + "bbox": [ + 75, + 453, + 472, + 636 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4. Discussion", + "text_level": 1, + "bbox": [ + 76, + 648, + 194, + 662 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Working mechanism of BoP. Codebook generation of BoP can be viewed as Centroidal Voronoi Tessellations [24]. Specifically, the prototypes (cluster centers) of codebook tessellate the feature space into Voronoi cells. Then, histogram computation approximates a probability distribution function in the same way as the nonparametric histogram [12, 28, 67]. That is, the BoP representation reflects the distribution of a dataset in the feature space.", + "bbox": [ + 75, + 672, + 468, + 794 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Fig. 1, the prototypes of reference dataset tessellate feature space into Voronoi cells. Based on this, we quantify the histogram of the reference dataset to represent its distribution. Given a new dataset, we conduct the same histogram calculation procedure and correspondingly capture its dataset distribution with the histogram. Then, we measure discrepancy of the two datasets by calculating JS", + "bbox": [ + 75, + 795, + 470, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "divergence between their histograms. Compared with common measures of dataset distance (e.g., FD [27], KID [11] and MMD [33]) that only reflect global structure (e.g., first few moments) of dataset distributions, BoP, collaborated with JS divergence, considers more local structures.", + "bbox": [ + 496, + 453, + 890, + 529 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training set suitability vs. transferability estimation. Two tasks relate but differ significantly: 1) Given an unlabeled target dataset and a pool of training datasets, the former aims to select the most suitable training set for the target. The latter assumes a labeled target dataset and a pool of models pretrained on a source dataset, with the goal of selecting the most suitable source model for the target without fine-tuning them all [3,4,60]; 2) Datasets in training set suitability are used for the same classification problem. In contrast, in transferability estimation, the problem in the target dataset (e.g., CIFAR-10 classification) is different from that of the source dataset (e.g. ImageNet classification).", + "bbox": [ + 496, + 534, + 892, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Analysis of the number of prototypes in a codebook. The codebook size is a critical factor influencing the usefulness of the BoP. A small codebook means a coarser partition of feature space, where similar features will likely be in the same cluster, but dissimilar features may also be in the same cluster. Moreover, a large codebook provides a finer description of the space, where dissimilar features are quantized to different prototypes and more semantics are explored. According to our experiment results in Fig. 2 and Fig. 5, we find, reassuringly, BoP is robust against the codebook size: prototype number can deviate within a wide range around the true classes number (e.g., 345 for Domain-", + "bbox": [ + 496, + 719, + 893, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "2884", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Net [62]) without significantly affecting performance.", + "bbox": [ + 76, + 90, + 434, + 106 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Application scope and future directions. BoP is proposed to study the two dataset-level tasks, and the datasets considered in each task share the same label space. We may encounter some situations where we need to compare datasets with different label spaces (e.g., pre-training datasets selection [1]). In this case, one potential way is to build a universal codebook on a large-scale and representative dataset similar to BoW models [14, 86]. By doing so, the resulting BoP representations can encode diverse and sufficient semantics for comparing datasets across various label spaces. We view our BoP as a starting point to encode datasets. It would be interesting to study other dataset vectorization schemes and dataset-level tasks.", + "bbox": [ + 75, + 106, + 470, + 303 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Comparing Training Suitability of Datasets", + "text_level": 1, + "bbox": [ + 76, + 315, + 465, + 333 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This task studies dataset valuation where multiple training sets are provided by different data contributors. The goal is to select the most suitable training set (ideally without training) whose trained model performs the best on a target test set. In this section, we first validate that BoP, collaborated with JS divergence $(\\mathrm{BoP} + \\mathrm{JS})$ , is predictive of dataset suitability for the target test set. Then, we show that BoP is robust when using a wide range of codebook sizes and different networks.", + "bbox": [ + 75, + 340, + 470, + 476 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 76, + 484, + 284, + 501 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Correlation study under DomainNet setup. We use domain generalization benchmark DomainNet [62], which consists of 6 domains: Painting, Real, Infograph, Quickdraw, Sketch and ClipArt, where the tasks are 345-way object classification. Each domain has its training and test splits. We conduct the correlation study in an leave-one-out manner, leading to 6 groups of correlation studies, with each group using the test split of one domain as the target test set. Additionally, we apply image transformations to the training split of six original domains. Specifically, we employ 'Cartoon' [48], 'Zoom Blur' and 'JPEG Compression' [36] to convert domains' style to be one specific type. We also use 'AugMix' [38] and 'AutoAugment' [15], which transform images with various operations to generate domains with mixed styles. This process synthesizes 30 new datasets, so we have 36 training sets in total.", + "bbox": [ + 75, + 508, + 468, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We follow the training scheme provided by TLlib [44] to train ResNet-101 model [35], whose weights are pretrained on ImageNet [18], yielding 36 models. Moreover, penultimate outputs of pretrained ResNet-101 is used as image feature. On the test set, we generate a codebook of size 1000. Then, for each training set, we compute its BoP histogram, $\\mathrm{BoP} + \\mathrm{JS}$ from the test set, and the accuracy of its trained model on the test set. After this, we calculate correlation strength between $\\mathrm{BoP} + \\mathrm{JS}$ and model accuracy to evaluate whether BoP is indicative of datasets training suitability.", + "bbox": [ + 75, + 750, + 470, + 902 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/2249d97e7310739c4877deae7739deac5cdfdcf80fcc2096a3569889cf85ebdd.jpg", + "table_caption": [], + "table_footnote": [ + "Table 2. Compare averaged Pearson's correlation $(r)$ , Spearman's correlation $(\\rho)$ and weighted Kendall's correlation $(\\tau_w)$ of Fréchet distance (FD), maximum mean discrepancy (MMD), kernel inception distance (KID) and BoP + JS (codebook size 1000) on six test sets in DomainNet. We report two groups of results using ResNet-34 (Left) and ResNet-101 (Right). We show BoP + JS is more effective in assessing training set suitability than others." + ], + "table_body": "
MethodResNet-34ResNet-101
rρτwrρτw
FD [27]-0.860-0.926-0.828-0.903-0.902-0.802
MMD [33]-0.817-0.801-0.691-0.821-0.817-0.704
KID [11]-0.773-0.904-0.804-0.876-0.896-0.800
BoP + JS-0.960-0.927-0.840-0.961-0.929-0.840
", + "bbox": [ + 501, + 90, + 897, + 209 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation metric. We use Pearson's correlation $r$ and Spearman's rank correlation $\\rho$ to show linearity and monotonicity between BoP-based dataset distance and model accuracy, respectively. Both metrics range in $[-1, 1]$ . If $|r|$ or $|\\rho|$ is close to 1, the linearity or monotonicity is strong, and vice versa. In addition to these two metrics, we also use weighted variant of Kendall's correlation $(\\tau_w)$ [83]. It is shown to be useful when selecting the best ranked item is of interest [71], while a major application of BoP + JS is to select the training dataset leading to the best performance on a test set. This metric has the same range where a number closer to -1 or 1 indicates stronger negative or positive correlation, respectively, and 0 means no correlation.", + "bbox": [ + 496, + 335, + 893, + 532 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Evaluation", + "text_level": 1, + "bbox": [ + 500, + 542, + 620, + 558 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Strong correlation: A training set is more suitable for a given test set if it has small $\\mathrm{BoP} + \\mathrm{JS}$ . Fig. 2 shows correlation study on ClipArt, Painting, Real and Sketch. We notice that there are strong Pearson's correlations $(|r| > 0.95)$ , Spearman's rank correlations $(|\\rho| > 0.93)$ and relatively high weighted Kendall's correlations $(|\\tau_w| > 0.84)$ on four test sets. This suggests that $\\mathrm{BoP} + \\mathrm{JS}$ is stable and useful across test sets. Table 2 compares average correlation strength of $\\mathrm{BoP} + \\mathrm{JS}$ with Fréchet distance (FD) [27], maximum mean discrepancy (MMD) [33] and kernel inception distance (KID) [11]. They use that same image features as BoP. According to their formulae, mean and covariance of these features are used for distance computation. We see that $\\mathrm{BoP} + \\mathrm{JS}$ has the highest average correlation scores on six test sets $(|r| = 0.961$ , $|\\rho| = 0.929$ and $|\\tau_w| = 0.840)$ . On average, $\\mathrm{BoP} + \\mathrm{JS}$ is superior in depicting training sets suitability for a test set without any training procedure.", + "bbox": [ + 496, + 566, + 893, + 824 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Impact of codebook size is shown in the Fig. 3. We construct codebooks with different size within approximately one order of magnitude around 345. We find that the three correlation scores increase and then become stable when codebook size becomes larger. This indicates that the per", + "bbox": [ + 496, + 825, + 893, + 902 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "2885", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/95aed49b03c7f756627bc5a2bc9af9e0427f44b529d54773a3deefe245f6a65b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 259, + 89, + 725, + 119 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/5643a69bbc88e3d4d9a753f5785172c67698b9964d7b4f8d56690ca73bce5432.jpg", + "image_caption": [ + "Figure 2. Correlation study for training suitability of datasets. We report the correlation strength between $\\mathrm{BoP} + \\mathrm{JS}$ and model classification accuracy on four test domains of DomainNet: ClipArt, Painting, Real and Sketch. The model architecture is ResNet-101. Each dot denotes a model trained on a training set of DomainNet. We mark training domains (e.g., ClipArt) by different shapes and transformation operations (e.g., AugMix) by different colors. The straight lines are fit with robust linear regression [41]. We consistently observe high correlation results: Pearson's correlation $(|r| > 0.95)$ , Spearman's correlation $(|\\rho| > 0.93)$ and weighted Kendall's correlation $(|\\tau_w| > 0.84)$ . This suggests that $\\mathrm{BoP} + \\mathrm{JS}$ is predictive of the suitability of a training set." + ], + "image_footnote": [], + "bbox": [ + 83, + 121, + 292, + 239 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0386ccc80c9f44dfd12666803e30c349d2ba213c099709ceee4c3972e2c9dc26.jpg", + "image_caption": [ + "BoP + JS divergence" + ], + "image_footnote": [], + "bbox": [ + 292, + 119, + 488, + 241 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/19d3fe321a43c59bd2607bc43646f863c24e6eb1dbe637e93448cef3667208f7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 491, + 119, + 687, + 241 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8220335a4926c5656209fb26dbfb4c0cbb1fdfd8bc794248aa9badf98a5ac04e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 121, + 883, + 241 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0d07af07218366662e123a19c2b4bd0df58a255cf0d1b79e074f809502fe6e86.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 81, + 357, + 267, + 474 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/372efe534f400d85614618c9636645bf3dbfc4ce8b181ae6c02f6007fffd42c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 269, + 357, + 444, + 474 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/024137bd89d4a2148c03aca8c6f37dd30fa586d2abb7999a397e6531ef3b5631.jpg", + "image_caption": [ + "Figure 3. Impact of codebook size on correlation strength for ResNet-101 on four test domains: ClipArt, Painting, Real and Sketch. For example, on Real domain, correlation scores $|\\rho|$ , $|r|$ and $|\\tau_w|$ are relatively low under a small size and remain stably high when the size is greater than 400." + ], + "image_footnote": [], + "bbox": [ + 81, + 474, + 267, + 594 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4a5dcf19bff65ebb62bb994a02a31bdeea36c09dbb093f5201e1a943fa64cda5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 269, + 474, + 446, + 594 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "formance $\\mathrm{BoP} + \\mathrm{JS}$ is overall consistent.", + "bbox": [ + 76, + 679, + 346, + 693 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Correlation study with a different model architecture. We additionally validate the robustness of BoP for ResNet-34 with codebook size 1000. As shown in Table 2, we compare the average correlation scores of $\\mathrm{BoP} + \\mathrm{JS}$ , FD, MMD and KID. We see that $\\mathrm{BoP} + \\mathrm{JS}$ has consistent performance on two models and remains preferable to characterize training suitability.", + "bbox": [ + 75, + 694, + 468, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5. Assessing Test Set Difficulty without Labels", + "text_level": 1, + "bbox": [ + 76, + 814, + 465, + 832 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the task of test set difficulty assessment, we are provided with a labeled training set and a set of unlabeled datasets for testing. Given a classifier trained on the training set, the goal is to estimate the model accuracy on these", + "bbox": [ + 75, + 840, + 470, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "test sets without any data annotations. In this section, we first show dataset distance measured by $\\mathrm{BoP} + \\mathrm{JS}$ exhibits strong negative correlation with classifier accuracy. We then demonstrate an accuracy predictor based on the BoP representation gives accurate performance estimates compared to state-of-the-art methods.", + "bbox": [ + 496, + 356, + 893, + 446 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1. Experimental Settings", + "text_level": 1, + "bbox": [ + 500, + 454, + 707, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Correlation study under CIFAR-10 setup. We conduct a correlation study by comparing $\\mathrm{BoP} + \\mathrm{JS}$ with classifier accuracy. Following the same setup in [21], we use a series of datasets sharing the same label space (but usually with distribution shift) with CIFAR-10 [49]. Specifically, we train ResNet-44 classifier [35] on the training set of CIFAR-10, which consists of 50,000 images from 10 classes. Here, we use the CIFAR-10-C benchmark [37] for correlation study, which contains different types of corruptions with 5 levels of severity including per-pixel noise, blurring, synthetic weather effects, and digital transforms. Then, for each dataset, we compute its BoP vector, its $\\mathrm{BoP} + \\mathrm{JS}$ from CIFAR-10 training set and the classifier accuracy. In addition to ResNet-44, we also study the RepVGG-A1 [22], VGG-16-BN [72] and MobileNet-V2 [70].", + "bbox": [ + 496, + 477, + 890, + 703 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Predicting classification accuracy under CIFAR-10 setup. We train a regressor that takes as input the BoP representation and outputs classification accuracy. The regressor is a neural network with 3 fully connected layers and trained on CIFAR-10-C (regression training set). We evaluate accuracy prediction on CIFAR-10.1 [69], CIFAR-10.2 [69] and CIFAR-10.2- $\\tilde{C}$ [58]. The former two are real-world datasets with natural shift, while the latter one is manually corrupted by the same synthetic shift as [58]. Specifically, we add 10 types of unseen and unique corruptions such as warps, blurs, color distortions and noise additions, with 5 severity levels to CIFAR-10.2. Note that, these corruptions have no overlap with those in CIFAR-10-C [58].", + "bbox": [ + 496, + 704, + 892, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "2886", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/114a0f7deadcda905a7b3d8b52c58f98e9d3ce35f4f318968c9d0d4f2b4fcac5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 81, + 89, + 300, + 215 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e3da439484b5169a327ac2fdf0d47882f557f84145d690c13fd972e0756eb678.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 305, + 90, + 500, + 214 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2b69af6e1794263a51140d7ffbb1d30e5b4ae43439e9cba6ffbe19ed4ef07647.jpg", + "image_caption": [ + "BoP + JS divergence" + ], + "image_footnote": [], + "bbox": [ + 501, + 90, + 687, + 215 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/04cfb99844c04a664155c54583e873e2c7606978a5c1800a3b719884868423a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 90, + 883, + 215 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f04c9c9a95bb02dacba75c08fa5d445492eda752c47f895aec31a1b307c913fc.jpg", + "image_caption": [ + "Figure 4. Correlation between train-test distance measured by $\\mathrm{BoP} + \\mathrm{JS}$ and model accuracy. Top: Correlation study under CIFAR-10 setup using ResNet-44, RepVGG-A1, VGG-16-BN and MobileNet-V2. Each data point denotes a dataset from CIFAR-10-C. Bottom: Correlation study under ImageNet setup using EfficientNet-B1, DenseNet-121, Inception-V4 and ViT-Base-16. ImageNet-C datasets are used as test sets. The straight lines are fit with robust linear regression [41]. Under both setups, we observe the strong Spearman's rank correlation $(|\\rho| > 0.98)$ between $\\mathrm{BoP} + \\mathrm{JS}$ and model accuracy." + ], + "image_footnote": [], + "bbox": [ + 83, + 227, + 302, + 349 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7ec349e4283c573447053b3cd207667876f6ca26f0753912b835be7cad463ca4.jpg", + "image_caption": [ + "BoP + JS divergence" + ], + "image_footnote": [], + "bbox": [ + 305, + 228, + 495, + 349 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/25f24f8e80a721fc770a535be332d7efa631d636219482958ec1a09be9a8c349.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 228, + 687, + 349 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/da8fda1eb4dcc90dc0c5c19bbd31ea26b60a823346a9746205f4496c89ac2c79.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 689, + 228, + 879, + 349 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For the above, we extract image features (output of penultimate layer of ResNet-44) from CIFAR-10 training set. We construct a codebook by dividing the features into 80 clusters with k-means.", + "bbox": [ + 75, + 446, + 470, + 506 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Correlation study under ImageNet setup. We use DenseNet-121 [40] classifier trained on ImageNet training set. We employ a series of datasets from the ImageNet-C benchmark [36] where the classifier is tested. ImageNet-C uses the same types of corruptions as CIFAR-10-C. We construct a codebook of size 1000 on the ImageNet training set from which images features are extracted by the penultimate layer of DenseNet-121. We project each dataset in ImageNet-C onto the codebook and obtain their BoP representations. When exhibiting linear correlations, we calculate $\\mathrm{BoP} + \\mathrm{JS}$ between each ImageNet-C dataset and the training set, and compute classification accuracy. We also use EfficientNet-B1 [78], Inception-V4 [77] and ViT-Base-16 [23] to repeat above procedure for correlation study.", + "bbox": [ + 75, + 507, + 468, + 718 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation metric. Same as Section 4.1, we use Pearson's correlation $r$ and Spearman's rank correlation $\\rho$ to show linearity and monotonicity between BoP based dataset distance and model accuracy, respectively. To evaluate the effectiveness of accuracy estimation, we use root mean squared error (RMSE) by calculating the difference between estimated accuracy and ground truth before taking the mean across all the test sets. A larger RMSE means a less accurate prediction, and vice versa.", + "bbox": [ + 75, + 719, + 468, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Compared methods. We compare our system with four existing ones. 1) Prediction score: it estimates model accuracy using the maximum of Softmax output (i.e., confidence", + "bbox": [ + 75, + 854, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "score). An image with a confidence score greater than a predefined threshold $\\tau \\in [0,1]$ is considered correctly predicted. We select two thresholds ( $\\tau = 0.8$ and $0.9$ ). 2) Difference of confidence (DoC) [34] trains a linear regressor mapping average confidence to classifier accuracy; 3) Average thresholded confidence with maximum confidence score function (ATC-MC) [31] calculates a threshold on CIFAR-10 validation set and regards an image with a confidence score higher than the threshold as correctly classified; 4) Network regression $(\\mu + \\sigma + FD)$ [21] trains a neural network that takes as input the feature mean, covariance and Fréchet distance between a set of interest and training set and outputs model accuracy. All methods, if applicable, are compared under the same conditions as our system, e.g., classification training set and regression training set.", + "bbox": [ + 496, + 446, + 890, + 675 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Evaluation", + "text_level": 1, + "bbox": [ + 500, + 681, + 620, + 695 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Strong correlation: A test set is difficult (low accuracy) if it is dissimilar to the training set using $\\mathrm{BoP} + \\mathrm{JS}$ . Fig. 4 presents the correlation study of two setups and various classifiers. We observe a very high Spearman's rank correlation $(|\\rho| > 0.99)$ in all the scenarios. It indicates that classification accuracy is highly correlated with JS divergence between BoPs of training and test sets. That is, test accuracy drops proportionally to the distance between the given training set and a test set. The results demonstrate $\\mathrm{BoP} + \\mathrm{JS}$ between training and test sets is an effective indicator of classification accuracy. More studies are presented in the supplementary materials.", + "bbox": [ + 496, + 704, + 890, + 885 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effectiveness of the BoP representation in predict", + "bbox": [ + 517, + 885, + 890, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "2887", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4e91acc407cb219dc26baca5c22e8b4cd7a23d2de4dc3a6d714564222cf8b60b.jpg", + "table_caption": [ + "Table 3. Method comparison in predicting classifier accuracy under CIFAR-10 setup. We compare four methods: predicted score-based method with hard threshold $\\tau$ , neural network regression based on feature statistics $(\\mu + \\sigma + \\mathrm{FD})$ [20], average thresholded confidence with maximum confidence score function (ATC-MC) [30] and difference of confidences (DoC) [34]. We use CIFAR-10.1 and CIFAR-10.2 (both real-world) and CIFAR-10.2- $\\bar{C}$ (manually corrupted) as unseen test sets for accuracy prediction. We use RMSE (\\%) to indicate precision of estimates. In each column, we compare our method with the best of the competing ones. We report results by average of five runs." + ], + "table_footnote": [], + "table_body": "
MethodCIFAR-10.1CIFAR-10.2CIFAR-10.2-C (50)
Severity 1Severity 2Severity 3Severity 4Severity 5Overall
Prediction score (τ = 0.8)4.8994.80010.12712.86916.80921.42724.37117.910
Prediction score (τ = 0.9)0.2970.5503.6385.0788.04811.80414.1089.404
ATC-MC [30]2.6502.6723.0804.3067.10811.01513.0408.601
DoC [34]0.4900.2632.2472.9165.1179.0126.6375.744
μ + σ + FD [21]0.4550.5615.8755.8234.7244.9086.4865.602
BoP (K = 80)0.2180.1222.4582.8183.7305.8366.4514.551
BoP (K = 100)0.1860.1242.8492.8083.5484.0254.7773.663
", + "bbox": [ + 93, + 167, + 877, + 335 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/862be20553eceff7e6b1353d88c7c4c9743ff9cdb50fb1b180001579e19e7436.jpg", + "image_caption": [ + "Figure 5. Impact of codebook size on correlation strength on CIFAR-10-C. Correlation scores $|\\rho|$ and $|r|$ are relatively low under a small size and become stably high when the size is greater than 20 for all four model architectures." + ], + "image_footnote": [], + "bbox": [ + 84, + 344, + 883, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ing classification accuracy on variou unseen test sets. After performing correlation study, we train a neural network regressor on CIFAR-10-C and test it on a series of test sets. Results are summarized in Table 3. We have the following observations. First and foremost, BoP representation achieves the best accuracy prediction performance, evidenced by the lowest RMSE across all the four test scenarios. For example, on the test sets of CIFAR-10.2- $\\bar{C}$ , the RMSE of our method is 4.551, which is 1.051 lower than the second best method [21]. This clearly validates the effectiveness of the BoP representation. Second, we observe that the \"Prediction score\" method is unstable. While it has good results under $\\tau = 0.9$ on CIFAR-10.1 and CIFAR-10.2 datasets, it is generally inferior to the competing methods in other test scenarios. Our observation is similar to [21], suggesting that a more robust threshold selection method is needed for this method. Third, although BoP has slightly higher RMSE than DoC on Severity 1 of CIFAR-10.2- $\\bar{C}$ (2.458 v.s., 2.247), we stress that BoP is overall more stable and effective on real world datasets and other severity levels of synthetic datasets.", + "bbox": [ + 75, + 526, + 472, + 843 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impact of codebook size is summarized in Fig. 5 under CIFAR-10 setup. We conduct the study using different sizes on four classifiers. We observe correlation scores first in", + "bbox": [ + 75, + 854, + 470, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "crease and then become stable when codebook size is larger than 20. These results are considered validation and help us use competitive and stable codebook sizes in Table 3.", + "bbox": [ + 498, + 526, + 892, + 571 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 585, + 619, + 599 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work introduces a bag-of-prototypes (BoP) dataset representation to vectorize visual datasets. It first computes a codebook composed of clustering prototypes and then a prototype histogram for a dataset. The BoP vector considers the underlying local semantic distribution of a dataset and is thus more discriminative than global dataset statistics. Specifically, when used in conjunction with JS divergence, the proposed descriptor better captures the underlying relationship across datasets. This advantage is validated by its promising results in two dataset-level tasks: assessing training set suitability and test set difficulty. This work establishes the baseline usage of the BoP scheme, and more investigations and applications will be made in future work.", + "bbox": [ + 496, + 609, + 893, + 806 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 500, + 820, + 668, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We thank all anonymous reviewers for their constructive comments in improving this paper. This work was supported by the ARC Discovery Project (DP210102801).", + "bbox": [ + 496, + 845, + 892, + 891 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "2888", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Alessandro Achille, Michael Lam, Rahul Tewari, Avinash Ravichandran, Subhransu Maji, Charless C Fowlkes, Stefano Soatto, and Pietro Perona. Task2vec: Task embedding for meta-learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6430-6439, 2019. 1, 2, 5", + "[2] David Acuna, Guojun Zhang, Marc T Law, and Sanja Fidler. f-domain adversarial learning: Theory and algorithms. In International Conference on Machine Learning, pages 66-75, 2021. 2", + "[3] Andrea Agostinelli, Michal Pandy, Jasper Uijlings, Thomas Mensink, and Vittorio Ferrari. How stable are transferability metrics evaluations? In European Conference on Computer Vision, pages 303-321. Springer, 2022. 4", + "[4] Andrea Agostinelli, Jasper Uijlings, Thomas Mensink, and Vittorio Ferrari. Transferability metrics for selecting source model ensembles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7936-7946, 2022. 4", + "[5] David Alvarez-Melis and Nicolo Fusi. Geometric dataset distances via optimal transport. In Advances in Neural Information Processing Systems, pages 21428-21439, 2020. 2", + "[6] Lora Aroyo, Matthew Lease, Praveen Paritosh, and Mike Schaekermann. Data excellence for ai: why should you care? Interactions, 29(2):66-69, 2022. 1, 2", + "[7] Robert Baldock, Hartmut Maennel, and Behnam Neyshabur. Deep learning through the lens of example difficulty. Advances in Neural Information Processing Systems, 34:10876-10889, 2021. 2", + "[8] Herbert Bay, Tinne Tuytelaars, and Luc Van Gool. Surf: Speeded up robust features. In European Conference on Computer Vision, pages 404-417. Springer, 2006. 3", + "[9] Shai Ben-David, John Blitzer, Koby Crammer, Alex Kulesza, Fernando Pereira, and Jennifer Vaughan. A theory of learning from different domains. Machine Learning, 79:151-175, 2010. 2", + "[10] Shai Ben-David, John Blitzer, Koby Crammer, and Fernando Pereira. Analysis of representations for domain adaptation. In Advances in Neural Information Processing Systems, pages 137-144, 2006. 2", + "[11] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. arXiv preprint arXiv:1801.01401, 2018. 4, 5", + "[12] Barry Boots, Kokichi Sugihara, Sung Nok Chiu, and Atsuyuki Okabe. Spatial tessellations: concepts and applications of voronoi diagrams. 2009. 4", + "[13] Jiefeng Chen, Frederick Liu, Besim Avci, Xi Wu, Yingyu Liang, and Somesh Jha. Detecting errors and estimating accuracy on unlabeled data with self-training ensembles. In Advances in Neural Information Processing Systems, 2021. 2", + "[14] Gabriella Csurka, Christopher Dance, Lixin Fan, Jutta Willamowski, and Cedric Bray. Visual categorization with bags of keypoints. In Workshop on statistical learning in computer vision, ECCV, volume 1, pages 1-2, 2004. 3, 5" + ], + "bbox": [ + 78, + 114, + 470, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Ekin D Cubuk, Barret Zoph, Dandelion Mane, Vijay Vasudevan, and Quoc V Le. Autoaugment: Learning augmentation policies from data. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2019. 5", + "[16] Ido Dagan, Lillian Lee, and Fernando Pereira. Similarity-based methods for word sense disambiguation. In Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics and Eighth Conference of the European Chapter of the Association for Computational Linguistics, pages 56-63, 1997. 3", + "[17] Julie Delon and Agnes Desolneux. A wasserstein-type distance in the space of gaussian mixture models. SIAM Journal on Imaging Sciences, 13(2):936-970, 2020. 2", + "[18] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 1, 5", + "[19] Weijian Deng, Stephen Gould, and Liang Zheng. What does rotation prediction tell us about classifier accuracy under varying testing environments? In International Conference on Machine Learning, 2021. 2", + "[20] Weijian Deng and Liang Zheng. Are labels always necessary for classifier accuracy evaluation? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15069-15078, 2021. 1, 2, 8", + "[21] Weijian Deng and Liang Zheng. Are labels always necessary for classifier accuracy evaluation. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2021. 6, 7, 8", + "[22] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13733–13742, 2021. 6", + "[23] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In Proceedings of the International Conference on Learning Representations, 2021. 7", + "[24] Qiang Du, Vance Faber, and Max Gunzburger. Centroidal voronoi tessellations: Applications and algorithms. SIAM review, 41(4):637-676, 1999. 4", + "[25] Sabri Eyuboglu, Bojan Karlaš, Christopher Ré, Ce Zhang, and James Zou. dcbench: A benchmark for data-centric ai systems. In Proceedings of the Sixth Workshop on Data Management for End-To-End Machine Learning, 2022. 1", + "[26] L. Fei-Fei and P. Perona. A bayesian hierarchical model for learning natural scene categories. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 524–531, 2005. 3", + "[27] Maurice Fréchet. Sur la distance de deux lois de probabilité. Comptes Rendus Hebdomadaires des Seances de L'Académie des Sciences, 244(6):689-692, 1957. 2, 4, 5" + ], + "bbox": [ + 501, + 92, + 892, + 900 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "2889", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] David Freedman and Persi Diaconis. On the histogram as a density estimator: L 2 theory. Zeitschrift für Wahrscheinlichkeitstheorie und verwandte Gebiete, 57(4):453-476, 1981. 4", + "[29] Jianglin Fu, Shikai Li, Yuming Jiang, Kwan-Yee Lin, Chen Qian, Chen Change Loy, Wayne Wu, and Ziwei Liu. Stylegan-human: A data-centric odyssey of human generation. In European Conference on Computer Vision, pages 1-19. Springer, 2022. 1", + "[30] Saurabh Garg, Sivaraman Balakrishnan, Zachary C Lipton, Behnam Neyshabur, and Hanie Sedghi. Leveraging unlabeled data to predict out-of-distribution performance. In International Conference on Learning Representations, 2022, 2, 8", + "[31] Saurabh Garg, Sivaraman Balakrishnan, Zachary Chase Lipton, Behnam Neyshabur, and Hanie Sedghi. Leveraging unlabeled data to predict out-of-distribution performance. In Proceedings of the International Conference on Learning Representations, 2022. 7", + "[32] Amirata Ghorbani and James Zou. Data shapley: Equitable valuation of data for machine learning. In International Conference on Machine Learning, pages 2242-2251. PMLR, 2019. 1, 2", + "[33] Arthur Gretton, Karsten Borgwardt, Malte Rasch, Bernhard Schölkopf, and Alex Smola. A kernel method for the two-sample-problem. In Advances in Neural Information Processing Systems, 2006. 2, 4, 5", + "[34] Devin Guillery, Vaishaal Shankar, Sayna Ebrahimi, Trevor Darrell, and Ludwig Schmidt. Predicting with confidence on unseen distributions. In Proceedings of the IEEE/CVF International Conference on Computer Vision and Pattern Recognition, pages 1134-1144, 2021. 2, 7, 8", + "[35] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016. 5, 6", + "[36] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. In Proceedings of the International Conference on Learning Representations, 2019. 5, 7", + "[37] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. In Proceedings of the International Conference on Learning Representations, 2019. 6", + "[38] Dan Hendrycks, Norman Mu, Ekin D Cubuk, Barret Zoph, Justin Gilmer, and Balaji Lakshminarayanan. Augmix: A simple data processing method to improve robustness and uncertainty. In Proceedings of the International Conference on Learning Representations, 2020. 5", + "[39] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems, 2017. 2", + "[40] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4700-4708, 2017. 7" + ], + "bbox": [ + 78, + 90, + 468, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[41] Peter J Huber. Robust statistics. In International Encyclopedia of Statistical Science, pages 1248-1251. Springer, 2011. 6, 7", + "[42] Hervé Jégou and Ondrej Chum. Negative evidences and co-occurrences in image retrieval: The benefit of pca and whiten-ing. In European Conference on Computer Vision, pages 774–787, 2012. 3", + "[43] Hervé Jégou, Matthijs Douze, Cordelia Schmid, and Patrick Pérez. Aggregating local descriptors into a compact image representation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3304-3311, 2010. 3", + "[44] Junguang Jiang, Baixu Chen, Bo Fu, and Mingsheng Long. Transfer-learning-library. https://github.com/thuml/Transfer-Learning-Library, 2020.5", + "[45] Ziheng Jiang, Chiyuan Zhang, Kunal Talwar, and Michael C Mozer. Characterizing structural regularities of labeled data in overparameterized models. arXiv preprint arXiv:2002.03206, 2020. 2", + "[46] Thorsten Joachims. Text categorization with support vector machines: Learning with many relevant features. In European Conference on Machine Learning, pages 137-142, 1998. 3", + "[47] Thorsten Joachims, Dayne Freitag, Tom Mitchell, et al. Webwatcher: A tour guide for the world wide web. In International Joint Conference on Artificial Intelligence, pages 770-777. CiteSeer, 1997. 3", + "[48] Alexander B. Jung. imgaug. https://github.com/ aleju/imgaug, 2018. [Online; accessed 30-Oct-2018]. 5", + "[49] Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. 2009. 4, 6", + "[50] David D Lewis and William A Gale. A sequential algorithm for training text classifiers. In Proceedings of the Seventeenth Annual International ACM-SIGIR Conference on Research and Development in Information Retrieval, pages 3-12, 1994. 3", + "[51] Weixin Liang, Girmaw Abebe Tadesse, Daniel Ho, L Fei-Fei, Matei Zaharia, Ce Zhang, and James Zou. Advances, challenges and opportunities in creating data for trustworthy ai. Nature Machine Intelligence, 4(8):669–677, 2022. 1", + "[52] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European Conference on Computer Vision, pages 740-755, 2014. 1", + "[53] D.G. Lowe. Object recognition from local scale-invariant features. In Proceedings of the IEEE International Conference on Computer Vision, pages 1150-1157, 1999. 3", + "[54] James MacQueen et al. Some methods for classification and analysis of multivariate observations. In Proceedings of the Fifth Berkeley Symposium on Mathematical statistics and probability, volume 1, pages 281-297. Oakland, CA, USA, 1967. 3", + "[55] Christopher Manning and Hinrich Schutze. Foundations of statistical natural language processing. MIT press, 1999. 3", + "[56] Mark Mazumder, Colby Banbury, Xiaozhe Yao, Bojan Karlas, William Gaviria Rojas, Sudnya Diamos, Greg Diamos, Lynn He, Douwe Kiela, David Jurado, et al. Dataperf:" + ], + "bbox": [ + 501, + 90, + 890, + 900 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "2890", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Benchmarks for data-centric ai development. arXiv preprint arXiv:2207.10062, 2022. 1, 2", + "[57] Andrew McCallum, Kamal Nigam, et al. A comparison of event models for naive bayes text classification. In AAAI Workshop on Learning for Text Categorization, pages 41-48, 1998. 3", + "[58] Eric Mintun, Alexander Kirillov, and Saining Xie. On interaction between augmentations and corruptions in natural corruption robustness. In Advances in Neural Information Processing Systems, 2021. 6", + "[59] David Nister and Henrik Stewenius. Scalable recognition with a vocabulary tree. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2161-2168, 2006. 3", + "[60] Michal Páncy, Andrea Agostinelli, Jasper Uijlings, Vittorio Ferrari, and Thomas Mensink. Transferability estimation using bhattacharyya class separability. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9172-9182, 2022. 4", + "[61] Amandalynne Paullada, Inioluwa Deborah Raji, Emily Bender, Emily Denton, and Alex Hanna. Data and its (dis)contents: A survey of dataset development and use in machine learning research. *Patterns*, 2021. 1", + "[62] Xingchao Peng, Qinxun Bai, Xide Xia, Zijun Huang, Kate Saenko, and Bo Wang. Moment matching for multi-source domain adaptation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1406-1415, 2019. 1, 2, 5", + "[63] Xingchao Peng, Yichen Li, and Kate Saenko. Domain2vec: Domain embedding for unsupervised domain adaptation. In European Conference on Computer Vision, pages 756-774, 2020. 1, 2", + "[64] Florent Perronnin and Christopher Dance. Fisher kernels on visual vocabularies for image categorization. In Proceedings of the IEEE International Conference on Computer Vision, pages 1-8, 2007. 3", + "[65] Florent Perronnin, Jorge Sánchez, and Thomas Mensink. Improving the fisher kernel for large-scale image classification. In European Conference on Computer Vision, pages 143-156, 2010. 3", + "[66] James Philbin, Ondrej Chum, Michael Isard, Josef Sivic, and Andrew Zisserman. Object retrieval with large vocabularies and fast spatial matching. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2007. 3", + "[67] Vladislav Polianskii, Giovanni Luca Marchetti, Alexander Kravberg, Anastasiia Varava, Florian T Pokorny, and Danica Kragic. Voronoi density estimator for high-dimensional data: Computation, compactification and convergence. In Uncertainty in Artificial Intelligence, pages 1644-1653. PMLR, 2022. 4", + "[68] Anand Rajaraman and Jeffrey David Ullman. Mining of massive datasets. Cambridge University Press, 2011. 3", + "[69] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. Do cifar-10 classifiers generalize to cifar-10? arXiv preprint arXiv:1806.00451, 2018. 6", + "[70] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zh-moginov, and Liang-Chieh Chen. Mobilenetv2: Inverted" + ], + "bbox": [ + 78, + 92, + 470, + 900 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "residuals and linear bottlenecks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4510-4520, 2018. 6", + "[71] Grace S Shieh. A weighted kendall's tau statistic. Statistics & Probability Letters, 39(1):17-24, 1998. 5", + "[72] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6", + "[73] Sivic and Zisserman. Video google: a text retrieval approach to object matching in videos. In Proceedings of the IEEE International Conference on Computer Vision, pages 1470-1477, 2003. 3", + "[74] Baochen Sun, Jiashi Feng, and Kate Saenko. Return of frustratingly easy domain adaptation. In Proceedings of the AAAI Conference on Artificial Intelligence, 2016. 1, 2", + "[75] Baochen Sun and Kate Saenko. Deep coral: Correlation alignment for deep domain adaptation. In European Conference on Computer Vision, pages 443-450, 2016. 1, 2", + "[76] Swabha Swayamdipta, Roy Schwartz, Nicholas Lourie, Yizhong Wang, Hannaneh Hajishirzi, Noah A Smith, and Yejin Choi. Dataset cartography: Mapping and diagnosing datasets with training dynamics. arXiv preprint arXiv:2009.10795, 2020. 2", + "[77] Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, and Alexander A Alemi. Inception-v4, inception-resnet and the impact of residual connections on learning. In Thirty-first AAAI Conference on Artificial Intelligence, 2017. 7", + "[78] Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning, pages 6105-6114, 2019. 7", + "[79] Yang Tan, Yang Li, and Shao-Lun Huang. Otce: A transferability metric for cross-domain cross-task representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15779-15788, 2021. 2", + "[80] Mariya Toneva, Alessandro Sordoni, Remi Tachet des Combes, Adam Trischler, Yoshua Bengio, and Geoffrey J Gordon. An empirical study of example forgetting during deep neural network learning. arXiv preprint arXiv:1812.05159, 2018. 2", + "[81] Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014. 2", + "[82] Joaquin Vanschoren. Meta-learning: A survey. arXiv preprint arXiv:1810.03548, 2018. 1, 2", + "[83] Sebastiano Vigna. A weighted correlation index for rankings with ties. In Proceedings of International Conference on World Wide Web, pages 1166-1176, 2015. 5", + "[84] Wei Ying, Yu Zhang, Junzhou Huang, and Qiang Yang. Transfer learning via learning to transfer. In International Conference on Machine Learning, pages 5085-5094. PMLR, 2018. 2", + "[85] Yuchen Zhang, Tianle Liu, Mingsheng Long, and Michael Jordan. Bridging theory and algorithm for domain adaptation. In International Conference on Machine Learning, pages 7404-7413, 2019. 2" + ], + "bbox": [ + 501, + 92, + 892, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "2891", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[86] Liang Zheng, Yi Yang, and Qi Tian. Sift meets cnn: A decade survey of instance retrieval. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(5):1224-1244, 2017. 3, 5", + "[87] Yujie Zhong, Relja Arandjelovic, and Andrew Zisserman. Ghostvlad for set-based face recognition. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2–6, 2018, Revised Selected Papers, Part II 14, pages 35–50. Springer, 2019. 2" + ], + "bbox": [ + 78, + 90, + 468, + 218 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "2892", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/f45f628e-fe49-4cb9-b5bd-808953724624_model.json b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/f45f628e-fe49-4cb9-b5bd-808953724624_model.json new file mode 100644 index 0000000000000000000000000000000000000000..0002fde0d09d83dd84b8aac822aed2521095a9a8 --- /dev/null +++ b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/f45f628e-fe49-4cb9-b5bd-808953724624_model.json @@ -0,0 +1,2435 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.147, + 0.131, + 0.825, + 0.154 + ], + "angle": 0, + "content": "A Bag-of-Prototypes Representation for Dataset-Level Applications" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.18, + 0.731, + 0.199 + ], + "angle": 0, + "content": "Weijie \\(\\mathrm{Tu}^{1}\\) Weijian Deng\\(^{1}\\) Tom Gedeon\\(^{2}\\) Liang Zheng\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.199, + 0.699, + 0.217 + ], + "angle": 0, + "content": "\\(^{1}\\)Australian National University \\(^{2}\\)Curtin University" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.252, + 0.314, + 0.268 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.284, + 0.473, + 0.661 + ], + "angle": 0, + "content": "This work investigates dataset vectorization for two dataset-level tasks: assessing training set suitability and test set difficulty. The former measures how suitable a training set is for a target domain, while the latter studies how challenging a test set is for a learned model. Central to the two tasks is measuring the underlying relationship between datasets. This needs a desirable dataset vectorization scheme, which should preserve as much discriminative dataset information as possible so that the distance between the resulting dataset vectors can reflect dataset-to-dataset similarity. To this end, we propose a bag-of-prototypes (BoP) dataset representation that extends the image-level bag consisting of patch descriptors to dataset-level bag consisting of semantic prototypes. Specifically, we develop a codebook consisting of \\( K \\) prototypes clustered from a reference dataset. Given a dataset to be encoded, we quantize each of its image features to a certain prototype in the codebook and obtain a \\( K \\)-dimensional histogram. Without assuming access to dataset labels, the BoP representation provides rich characterization of the dataset semantic distribution. Furthermore, BoP representations cooperate well with Jensen-Shannon divergence for measuring dataset-to-dataset similarity. Although very simple, BoP consistently shows its advantage over existing representations on a series of benchmarks for two dataset-level tasks." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.693, + 0.21, + 0.71 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.72, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Datasets are fundamental in machine learning research, forming the basis of model training and testing [18, 51, 52, 61]. While large-scale datasets bring opportunities in algorithm design, there lack proper tools to analyze and make the best use of them [6, 51, 56]. Therefore, as opposed to traditional algorithm-centric research where improving models is of primary interest, the community has seen a growing interest in understanding and analyzing the data used for developing models [51, 56]. Recent examples of such goal include data synthesis [29], data sculpting [25, 51], and data valuation [6, 32, 56]. These tasks typically focus on individual sample of a dataset. In this work, we aim to understand" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.254, + 0.834, + 0.269 + ], + "angle": 0, + "content": "nature of datasets from a dataset-level perspective." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.272, + 0.892, + 0.501 + ], + "angle": 0, + "content": "This work considers two dataset-level tasks: suitability in training and difficulty in testing. First, training set suitability denotes whether a training set is suitable for training models for a target dataset. In real-world applications, we are often provided with multiple training sets from various data distributions (e.g., universities and hospitals). Due to distribution shift, their trained models have different performance on the target dataset. Then, it is of high practical value to select the most suitable training set for the target dataset. Second, test set difficulty means how challenging a test set is for a learned model. In practice, test sets are usually unlabeled and often come from different distributions than that of the training set. Measuring the test set difficulty for a learned model helps us understand the model reliability, thereby ensuring safe model deployment." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.504, + 0.893, + 0.685 + ], + "angle": 0, + "content": "The core of the two dataset-level tasks is to measure the relationship between datasets. For example, a training set is more suitable for learning a model if it is more similar to the target dataset. To this end, we propose a vectorization scheme to represent a dataset. Then, the relationship between a pair of datasets can be simply reflected by the distance between their representations. Yet, it is challenging to encode a dataset as a representative vector, because (i) a dataset has a different cardinality (number of images) and (ii) each image has its own semantic content (e.g., category). It is thus critical to find an effective way to aggregate all image features to uncover dataset semantic distributions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.895, + 0.902 + ], + "angle": 0, + "content": "In the literature, some researchers use the first few moments of distributions such as feature mean and co-variance to represent datasets [20, 62, 74, 75, 82]. While being computational friendly, these methods do not offer sufficiently strong descriptive ability of a dataset, such as class distributions, and thus have limited effectiveness in assessing attributes related to semantics. There are also some methods learn task-specific dataset representations [1, 63]. For example, given a dataset with labels and a task loss function, Task2Vec [1] computes an embedding based on estimates of the Fisher information matrix associated with a probe network's parameters. While these task-specific representations are able to predict task similarities, they are not suitable for characterizing dataset properties of interest. They" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.958 + ], + "angle": 0, + "content": "2881" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.166 + ], + "angle": 0, + "content": "require training a network on the specific task [1] or on multiple datasets [63], so they are not effective in assessing the training set suitability. Additionally, they require image labels for the specific task, so they cannot be used to measure the difficulty of unlabeled test sets." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.168, + 0.47, + 0.348 + ], + "angle": 0, + "content": "In this work, we propose a simple and effective bag-of-prototypes (BoP) dataset representation. Its computation starts with partitioning the image feature space into semantic regions through clustering, where the region centers, or prototypes, form a codebook. Given a new dataset, we quantize its features to their corresponding prototypes and compute an assignment histogram, which, after normalization, gives the BoP representation. The dimensionality of BoP equals the codebook size, which is usually a few hundred and is considered memory-efficient. Meanwhile, the histogram computed on the prototypes is descriptive of the dataset semantic distribution." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.349, + 0.47, + 0.515 + ], + "angle": 0, + "content": "Apart from being low dimensional and semantically rich, BoP has a few other advantages. First, while recent works in task-specific dataset representation usually require full image annotations and additional learning procedure [1,63], the computation of BoP does not rely on any. It is relatively efficient and allows for unsupervised assessment of dataset attributes. Second, BoP supports dataset-to-dataset similarity measurement through Jensen-Shannon divergence. We show in our experiment that this similarity is superior to commonly used metrics such as Fréchet distance [27] and maximum mean discrepancy [33] in two dataset-level tasks." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.528, + 0.22, + 0.544 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.554, + 0.47, + 0.855 + ], + "angle": 0, + "content": "Dataset representations. A common practice is to use simple and generic statistics as dataset representations [20, 62, 74, 75, 82]. For example, Peng et al. [62] use the first moment to represent a dataset. Deng et al. [20] use global feature mean and co-variance as dataset representations. Vanschoeren et al. [82] find dataset cardinality (the number of images/classes) useful to encode a dataset. These methods have limited descriptive ability, whereas BoP is more semantically descriptive. Moreover, it is feasible to learn a task-specific dataset representation [1, 63, 84, 87]. For example, Ying et al. [84] learn transfer skills from previous transfer learning experiences for future target tasks. Achille et al. [1] propose to learn a task embedding based on the estimate of Fisher information matrix associated with a task loss. Compared with these task-specific representations, BoP is hand-crafted, avoiding computation overheads incurred by end-to-end learning. It is thus efficient in measuring training set suitability without training any models. Moreover, BoP require no image labels, making it more suitable for assessing the difficulty of unlabeled test sets." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Dataset-to-dataset similarity. We briefly review three strategies. First, some dataset similarity measures are developed in the context of domain adaptation [2, 9, 10, 85]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.394 + ], + "angle": 0, + "content": "They typically depend on a loss function and hypothesis class, and use a supremum of that function class to quantify the similarity of datasets. (e.g., \\(\\mathcal{H}\\Delta \\mathcal{H}\\)-divergence [9], \\(f\\)-divergence [2], and \\(\\mathcal{A}\\)-distance [10]). Second, dataset distance can be computed based on optimal transport [5, 17, 79]. For example, the squared Wasserstein metric Fréchet distance [27] is widely used in comparing the distribution discrepancy of generated images with the distribution of real images [39]. To better leverage the geometric relationship between datasets, Alvarez et al. [5] use labels to guide optimal transport towards class-coherent matches. Third, existing dataset representations can be used to compute dataset distance [33, 62, 75, 81]. For example, maximum mean discrepancy (MMD) [33] computes the distance between mean elements of distributions on the probability space. Peng et al. [62] eliminate dataset discrepancy by matching datasets moments. CORAL [75] uses the second-order statistics of datasets to measure distance. This work is in the third category and uses JS divergence between BoP representations to calculate dataset-to-dataset similarity." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.402, + 0.892, + 0.508 + ], + "angle": 0, + "content": "Assessment of training dataset suitability. Recent works have focused on understanding the importance of individual training instances in training of neural networks [6,32,45,56]. For example, Data Shapley [32] and Consistency Score [45] are proposed to evaluate the value of each data instance. Some methods identify \"difficult\" instances based on the information of training dynamics [7,76,80]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.516, + 0.892, + 0.682 + ], + "angle": 0, + "content": "Different from the above approaches, this work studies the suitability of an entire training set. Given multiple training datasets from different data distributions, the focus is to choose the most appropriate training dataset for the target domain. Dataset-to-dataset similarity can be used for this goal. Intuitively, if a training dataset has high similarity with a target dataset, the model trained on it is expected to be more performant and vice versa. In this work, we use BoP representation coupled with simple JS divergence to calculate dataset-to-dataset similarity and demonstrate its effectiveness in accessing training set suitability." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Assessing test set difficulty without ground truths. The goal of this task (also known as unsupervised accuracy estimation) is to predict the accuracy of a given model on various unlabeled test sets. Existing methods usually use a representation of the test set for accuracy prediction [13, 19, 20, 30, 34]. Normally this representation is derived from classifier outputs, such as image features [20], prediction logits [30], average softmax scores [34]. Then, regression is used to establish the relationship between this representation and model test accuracy under various testing environments. Compared with existing dataset features, the BoP representation better characterizes the semantic distribution of training and test sets and thus can be effectively used for model accuracy prediction." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "2882" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.078, + 0.09, + 0.212, + 0.108 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.115, + 0.438, + 0.132 + ], + "angle": 0, + "content": "3.1. Bag-of-Words Model Across Communities" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.139, + 0.47, + 0.275 + ], + "angle": 0, + "content": "In natural language processing (NLP) and information retrieval, the Bag-of-Words (BoW) model [46, 47, 50, 57] vectorizes textual data as a word histogram. Specifically, for each word in the dictionary, its occurrences in a document are counted, which fills in the corresponding entry of the BoW feature. This word frequency vector is thus used to represent a document. Numerous improvements of the BoW feature were made in NLP, such as n-grams [47, 50] and term frequency-inverse document frequency [68]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.276, + 0.471, + 0.488 + ], + "angle": 0, + "content": "In the early 2000s, the BoW representation was introduced to the computer vision (CV) community to encode hundreds or thousands of local image descriptors [8, 53] into a compact vector [73]. As there is no semantic codebook like in NLP, a visual codebook is constructed by performing clustering (e.g., k-means) on a collection of local image features, where the resulting clustering centers are called \"visual words\". Local image descriptors are quantized to their nearest cluster center so that a visual word histogram can be computed. This BoW histogram also have undergone extensive improvements in later years, such as Fisher vector [64, 65], vector of locally aggregated descriptors (VLAD) [43], and the use of principal component analysis and whitening [42]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.488, + 0.47, + 0.639 + ], + "angle": 0, + "content": "Contribution statement. This paper contributes a baseline method in adopting the BoW idea study the two basic properties of a dataset. To this end, we propose to represent a dataset using its histogram over a series of prototypes. A comparison between the usage of BoW model in NLP, CV and our dataset-level research is shown in Table 1. Specifically, the BoP representation relies on clustering for codebook formation, has a relatively small codebook (depending on the richness of dataset semantics), and has semantically sensible codewords." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.649, + 0.436, + 0.666 + ], + "angle": 0, + "content": "3.2. Bag-of-Prototypes Dataset Representation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.672, + 0.47, + 0.824 + ], + "angle": 0, + "content": "Given a dataset \\(\\mathcal{D} = \\{\\mathbf{x}_i\\}_{i=1}^N\\) where \\(N\\) is the number of images and a feature extractor \\(\\mathbf{F}(\\cdot)\\) that maps an input image into a \\(d\\)-dimensional feature \\(f \\in \\mathbb{R}^d\\), we extract a set of image features \\(\\mathcal{F} := \\{\\mathbf{F}(\\mathbf{x}_i)\\}_{i=1}^N\\). While it is possible to directly use the dataset images (or features) as model input under small \\(N\\), it becomes prohibitively expensive when \\(N\\) is large. We therefore focus on extracting useful semantic features of \\(\\mathcal{F}\\) by encoding its image features into a compact representation. Below we detail the necessary steps for computing the proposed BoP representation (refer Fig. 1)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.824, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Step I: Codebook generation. Given a reference dataset \\(\\mathcal{D}_r = \\{\\mathbf{x}_i^r\\}_{i=1}^{N_r}\\), we extract all of its image features \\(\\mathcal{F}_r := \\{\\mathbf{F}(\\mathbf{x}_i^r)\\}_{i=1}^{N_r}\\) using a pretrained network, from which a codebook is constructed. Specifically, we adopt standard k-means clustering [54] to partition the feature space \\(\\mathbb{R}^d\\)" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.09, + 0.895, + 0.238 + ], + "angle": 0, + "content": "
BoW in NLPBoW in CVBoP
Encoded objectsDocumentsImagesDatasets (a set of images)
Codewords in codebookWordsCluster centers of local descriptorsPrototypes of image features
Clustering?NoYesYes
Codewords semanticsClearLittleSensible
Codebook size>103103-106~102(dataset dependent)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.498, + 0.251, + 0.894, + 0.307 + ], + "angle": 0, + "content": "Table 1. Comparing BoP with BoW model in natural language processing (NLP) and computer vision (CV). The objective of BoW in NLP and CV is encoding texts and images respectively, while BoP is proposed to represent datasets." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.324, + 0.892, + 0.429 + ], + "angle": 0, + "content": "into \\(K\\) clusters. Each of the \\(K\\) cluster centers is called a \"prototype\", because oftentimes each center mainly represents a certain semantic content. See Fig. 1 right for exemplar image of each prototype. The prototypes, or centers, constitute the codebook, denoted as \\(\\mathcal{C} = \\{\\mathbf{c}_i\\}_{i=1}^K\\), where \\(\\mathbf{c}_i\\) is the \\(i\\)-th prototype. Note that, the order of the prototypes is fixed in the codebook." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.43, + 0.893, + 0.688 + ], + "angle": 0, + "content": "Step II: Histogram computation. For a dataset to be encoded \\(\\mathcal{D}_e = \\{\\mathbf{x}_i^e\\}_{i=1}^{N_e}\\) where \\(N_e\\) is the number of images, we project it onto codebook \\(\\mathcal{C}\\) of size \\(K\\) to compute its BoP representation. Specifically, after extracting image features \\(\\mathcal{F}_e := \\{\\mathbf{F}(\\mathbf{x}_i^e)\\}_{i=1}^{N_e}\\) from \\(\\mathcal{D}_e\\), for each image feature, we compute its distance with all the \\(K\\) prototypes in the codebook, yielding \\(K\\) distances \\(d_1, \\ldots, d_k\\), where \\(d_i\\) is the distance between an image feature and the \\(i\\)-th prototype. An image feature is quantized to prototype \\(c_i\\) if \\(d_i\\) is the lowest among \\(d_1, \\ldots, d_k\\). Following the quantization, we generate a \\(K\\)-dimensional one-hot encoding where the \\(i\\)-th entry is 1 and all the others are 0. Having computed the one-hot vectors for all the image features, we sum them which is then normalized by \\(N_e\\), the number of images in \\(D_e\\). This gives the histogram representation \\(\\mathbf{h}_e\\), or BoP representation, for \\(D_e\\) where the \\(i\\)-th entry indicates the density of features in \\(D_e\\) belonging to prototype \\(\\mathbf{c}_i\\)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.697, + 0.848, + 0.713 + ], + "angle": 0, + "content": "3.3. Measuring Dataset-to-Dataset Similarity" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.892, + 0.84 + ], + "angle": 0, + "content": "Similar to image / document retrieval where BoW vectors of instances are used for similarity comparison [14, 26, 59, 66, 73], this work uses the BoP representation to calculate dataset-to-dataset similarity. Specifically, given BoP representations \\(\\mathbf{h}_x\\) and \\(\\mathbf{h}_y\\) of two datasets \\(\\mathcal{D}_x\\) and \\(\\mathcal{D}_y\\), we simply define their similarity \\(S_{x,y}\\) using Jensen-Shannon divergence (JS divergence), which is designed for histogram-based similarity measurement [16, 55]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Task-oriented similarity measure. We can build a universal codebook on a large-scale dataset following BoW model [14, 86]. By doing so, the resulting BoP representations are generic. We can also build a task-oriented code" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2883" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.167, + 0.1, + 0.805, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.362, + 0.895, + 0.446 + ], + "angle": 0, + "content": "Figure 1. Workflow of BoP representation computation using CIFAR-10 [49] and one CIFAR-10 out-of-distribution (OOD) test set as an example. Top: We group image features of the reference dataset CIFAR-10 into 10 clusters, and the centers are called prototypes. The prototypes constitute the codebook of size 10. Bottom left: To encode the OOD test set, we project it onto the codebook by quantizing each image feature to its corresponding prototype. Lastly, we compute the histogram, i.e., BoP representation, of CIFAR-10 OOD test set. Bottom right: We regard dataset-to-dataset similarity as the Jensen-Shannon divergence between BoP histograms of CIFAR-10 OOD test set and reference dataset. With such similarity, we can measure the test set difficulty for the model trained on reference dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.454, + 0.473, + 0.637 + ], + "angle": 0, + "content": "book on a reference dataset from a specific task to consider more task-oriented information. The latter is more suitable for the two dataset-level tasks considered in this work. For the task of training set suitability assessment, we use the target dataset as the reference for codebook generation to fully consider its semantic information. As a result, the JS divergence between BoP representations of the training set and the target dataset can well capture how a training set is similar to the target set. Similarly, for the task of test set difficulty assessment, we build codebook on the training set. This practice can effectively measure how an unlabeled test is similar to a given training set." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.649, + 0.196, + 0.663 + ], + "angle": 0, + "content": "3.4. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.673, + 0.47, + 0.795 + ], + "angle": 0, + "content": "Working mechanism of BoP. Codebook generation of BoP can be viewed as Centroidal Voronoi Tessellations [24]. Specifically, the prototypes (cluster centers) of codebook tessellate the feature space into Voronoi cells. Then, histogram computation approximates a probability distribution function in the same way as the nonparametric histogram [12, 28, 67]. That is, the BoP representation reflects the distribution of a dataset in the feature space." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.471, + 0.903 + ], + "angle": 0, + "content": "As shown in Fig. 1, the prototypes of reference dataset tessellate feature space into Voronoi cells. Based on this, we quantify the histogram of the reference dataset to represent its distribution. Given a new dataset, we conduct the same histogram calculation procedure and correspondingly capture its dataset distribution with the histogram. Then, we measure discrepancy of the two datasets by calculating JS" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.454, + 0.892, + 0.53 + ], + "angle": 0, + "content": "divergence between their histograms. Compared with common measures of dataset distance (e.g., FD [27], KID [11] and MMD [33]) that only reflect global structure (e.g., first few moments) of dataset distributions, BoP, collaborated with JS divergence, considers more local structures." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.535, + 0.893, + 0.716 + ], + "angle": 0, + "content": "Training set suitability vs. transferability estimation. Two tasks relate but differ significantly: 1) Given an unlabeled target dataset and a pool of training datasets, the former aims to select the most suitable training set for the target. The latter assumes a labeled target dataset and a pool of models pretrained on a source dataset, with the goal of selecting the most suitable source model for the target without fine-tuning them all [3,4,60]; 2) Datasets in training set suitability are used for the same classification problem. In contrast, in transferability estimation, the problem in the target dataset (e.g., CIFAR-10 classification) is different from that of the source dataset (e.g. ImageNet classification)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.72, + 0.895, + 0.902 + ], + "angle": 0, + "content": "Analysis of the number of prototypes in a codebook. The codebook size is a critical factor influencing the usefulness of the BoP. A small codebook means a coarser partition of feature space, where similar features will likely be in the same cluster, but dissimilar features may also be in the same cluster. Moreover, a large codebook provides a finer description of the space, where dissimilar features are quantized to different prototypes and more semantics are explored. According to our experiment results in Fig. 2 and Fig. 5, we find, reassuringly, BoP is robust against the codebook size: prototype number can deviate within a wide range around the true classes number (e.g., 345 for Domain-" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2884" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.435, + 0.107 + ], + "angle": 0, + "content": "Net [62]) without significantly affecting performance." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.107, + 0.471, + 0.304 + ], + "angle": 0, + "content": "Application scope and future directions. BoP is proposed to study the two dataset-level tasks, and the datasets considered in each task share the same label space. We may encounter some situations where we need to compare datasets with different label spaces (e.g., pre-training datasets selection [1]). In this case, one potential way is to build a universal codebook on a large-scale and representative dataset similar to BoW models [14, 86]. By doing so, the resulting BoP representations can encode diverse and sufficient semantics for comparing datasets across various label spaces. We view our BoP as a starting point to encode datasets. It would be interesting to study other dataset vectorization schemes and dataset-level tasks." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.316, + 0.466, + 0.334 + ], + "angle": 0, + "content": "4. Comparing Training Suitability of Datasets" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.341, + 0.471, + 0.477 + ], + "angle": 0, + "content": "This task studies dataset valuation where multiple training sets are provided by different data contributors. The goal is to select the most suitable training set (ideally without training) whose trained model performs the best on a target test set. In this section, we first validate that BoP, collaborated with JS divergence \\((\\mathrm{BoP} + \\mathrm{JS})\\), is predictive of dataset suitability for the target test set. Then, we show that BoP is robust when using a wide range of codebook sizes and different networks." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.485, + 0.285, + 0.502 + ], + "angle": 0, + "content": "4.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.509, + 0.47, + 0.75 + ], + "angle": 0, + "content": "Correlation study under DomainNet setup. We use domain generalization benchmark DomainNet [62], which consists of 6 domains: Painting, Real, Infograph, Quickdraw, Sketch and ClipArt, where the tasks are 345-way object classification. Each domain has its training and test splits. We conduct the correlation study in an leave-one-out manner, leading to 6 groups of correlation studies, with each group using the test split of one domain as the target test set. Additionally, we apply image transformations to the training split of six original domains. Specifically, we employ 'Cartoon' [48], 'Zoom Blur' and 'JPEG Compression' [36] to convert domains' style to be one specific type. We also use 'AugMix' [38] and 'AutoAugment' [15], which transform images with various operations to generate domains with mixed styles. This process synthesizes 30 new datasets, so we have 36 training sets in total." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.471, + 0.903 + ], + "angle": 0, + "content": "We follow the training scheme provided by TLlib [44] to train ResNet-101 model [35], whose weights are pretrained on ImageNet [18], yielding 36 models. Moreover, penultimate outputs of pretrained ResNet-101 is used as image feature. On the test set, we generate a codebook of size 1000. Then, for each training set, we compute its BoP histogram, \\(\\mathrm{BoP} + \\mathrm{JS}\\) from the test set, and the accuracy of its trained model on the test set. After this, we calculate correlation strength between \\(\\mathrm{BoP} + \\mathrm{JS}\\) and model accuracy to evaluate whether BoP is indicative of datasets training suitability." + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.091, + 0.898, + 0.21 + ], + "angle": 0, + "content": "
MethodResNet-34ResNet-101
rρτwrρτw
FD [27]-0.860-0.926-0.828-0.903-0.902-0.802
MMD [33]-0.817-0.801-0.691-0.821-0.817-0.704
KID [11]-0.773-0.904-0.804-0.876-0.896-0.800
BoP + JS-0.960-0.927-0.840-0.961-0.929-0.840
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.498, + 0.219, + 0.895, + 0.318 + ], + "angle": 0, + "content": "Table 2. Compare averaged Pearson's correlation \\((r)\\), Spearman's correlation \\((\\rho)\\) and weighted Kendall's correlation \\((\\tau_w)\\) of Fréchet distance (FD), maximum mean discrepancy (MMD), kernel inception distance (KID) and BoP + JS (codebook size 1000) on six test sets in DomainNet. We report two groups of results using ResNet-34 (Left) and ResNet-101 (Right). We show BoP + JS is more effective in assessing training set suitability than others." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.336, + 0.895, + 0.533 + ], + "angle": 0, + "content": "Evaluation metric. We use Pearson's correlation \\( r \\) and Spearman's rank correlation \\( \\rho \\) to show linearity and monotonicity between BoP-based dataset distance and model accuracy, respectively. Both metrics range in \\([-1, 1]\\). If \\( |r| \\) or \\( |\\rho| \\) is close to 1, the linearity or monotonicity is strong, and vice versa. In addition to these two metrics, we also use weighted variant of Kendall's correlation \\( (\\tau_w) \\) [83]. It is shown to be useful when selecting the best ranked item is of interest [71], while a major application of BoP + JS is to select the training dataset leading to the best performance on a test set. This metric has the same range where a number closer to -1 or 1 indicates stronger negative or positive correlation, respectively, and 0 means no correlation." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.544, + 0.622, + 0.559 + ], + "angle": 0, + "content": "4.2. Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.568, + 0.894, + 0.825 + ], + "angle": 0, + "content": "Strong correlation: A training set is more suitable for a given test set if it has small \\(\\mathrm{BoP} + \\mathrm{JS}\\). Fig. 2 shows correlation study on ClipArt, Painting, Real and Sketch. We notice that there are strong Pearson's correlations \\((|r| > 0.95)\\), Spearman's rank correlations \\((|\\rho| > 0.93)\\) and relatively high weighted Kendall's correlations \\((|\\tau_w| > 0.84)\\) on four test sets. This suggests that \\(\\mathrm{BoP} + \\mathrm{JS}\\) is stable and useful across test sets. Table 2 compares average correlation strength of \\(\\mathrm{BoP} + \\mathrm{JS}\\) with Fréchet distance (FD) [27], maximum mean discrepancy (MMD) [33] and kernel inception distance (KID) [11]. They use that same image features as BoP. According to their formulae, mean and covariance of these features are used for distance computation. We see that \\(\\mathrm{BoP} + \\mathrm{JS}\\) has the highest average correlation scores on six test sets \\((|r| = 0.961\\), \\(|\\rho| = 0.929\\) and \\(|\\tau_w| = 0.840)\\). On average, \\(\\mathrm{BoP} + \\mathrm{JS}\\) is superior in depicting training sets suitability for a test set without any training procedure." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.826, + 0.894, + 0.903 + ], + "angle": 0, + "content": "Impact of codebook size is shown in the Fig. 3. We construct codebooks with different size within approximately one order of magnitude around 345. We find that the three correlation scores increase and then become stable when codebook size becomes larger. This indicates that the per" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2885" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.26, + 0.09, + 0.726, + 0.12 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.122, + 0.293, + 0.241 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.293, + 0.121, + 0.49, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.493, + 0.121, + 0.688, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.69, + 0.122, + 0.885, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.453, + 0.246, + 0.555, + 0.258 + ], + "angle": 0, + "content": "BoP + JS divergence" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.265, + 0.895, + 0.349 + ], + "angle": 0, + "content": "Figure 2. Correlation study for training suitability of datasets. We report the correlation strength between \\(\\mathrm{BoP} + \\mathrm{JS}\\) and model classification accuracy on four test domains of DomainNet: ClipArt, Painting, Real and Sketch. The model architecture is ResNet-101. Each dot denotes a model trained on a training set of DomainNet. We mark training domains (e.g., ClipArt) by different shapes and transformation operations (e.g., AugMix) by different colors. The straight lines are fit with robust linear regression [41]. We consistently observe high correlation results: Pearson's correlation \\((|r| > 0.95)\\), Spearman's correlation \\((|\\rho| > 0.93)\\) and weighted Kendall's correlation \\((|\\tau_w| > 0.84)\\). This suggests that \\(\\mathrm{BoP} + \\mathrm{JS}\\) is predictive of the suitability of a training set." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.358, + 0.269, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.358, + 0.446, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.476, + 0.269, + 0.595 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.476, + 0.447, + 0.595 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.601, + 0.47, + 0.673 + ], + "angle": 0, + "content": "Figure 3. Impact of codebook size on correlation strength for ResNet-101 on four test domains: ClipArt, Painting, Real and Sketch. For example, on Real domain, correlation scores \\( |\\rho| \\), \\( |r| \\) and \\( |\\tau_w| \\) are relatively low under a small size and remain stably high when the size is greater than 400." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.68, + 0.347, + 0.694 + ], + "angle": 0, + "content": "formance \\(\\mathrm{BoP} + \\mathrm{JS}\\) is overall consistent." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.695, + 0.47, + 0.803 + ], + "angle": 0, + "content": "Correlation study with a different model architecture. We additionally validate the robustness of BoP for ResNet-34 with codebook size 1000. As shown in Table 2, we compare the average correlation scores of \\(\\mathrm{BoP} + \\mathrm{JS}\\), FD, MMD and KID. We see that \\(\\mathrm{BoP} + \\mathrm{JS}\\) has consistent performance on two models and remains preferable to characterize training suitability." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.815, + 0.466, + 0.833 + ], + "angle": 0, + "content": "5. Assessing Test Set Difficulty without Labels" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.841, + 0.471, + 0.903 + ], + "angle": 0, + "content": "In the task of test set difficulty assessment, we are provided with a labeled training set and a set of unlabeled datasets for testing. Given a classifier trained on the training set, the goal is to estimate the model accuracy on these" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.357, + 0.895, + 0.448 + ], + "angle": 0, + "content": "test sets without any data annotations. In this section, we first show dataset distance measured by \\(\\mathrm{BoP} + \\mathrm{JS}\\) exhibits strong negative correlation with classifier accuracy. We then demonstrate an accuracy predictor based on the BoP representation gives accurate performance estimates compared to state-of-the-art methods." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.455, + 0.708, + 0.472 + ], + "angle": 0, + "content": "5.1. Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.478, + 0.892, + 0.704 + ], + "angle": 0, + "content": "Correlation study under CIFAR-10 setup. We conduct a correlation study by comparing \\(\\mathrm{BoP} + \\mathrm{JS}\\) with classifier accuracy. Following the same setup in [21], we use a series of datasets sharing the same label space (but usually with distribution shift) with CIFAR-10 [49]. Specifically, we train ResNet-44 classifier [35] on the training set of CIFAR-10, which consists of 50,000 images from 10 classes. Here, we use the CIFAR-10-C benchmark [37] for correlation study, which contains different types of corruptions with 5 levels of severity including per-pixel noise, blurring, synthetic weather effects, and digital transforms. Then, for each dataset, we compute its BoP vector, its \\(\\mathrm{BoP} + \\mathrm{JS}\\) from CIFAR-10 training set and the classifier accuracy. In addition to ResNet-44, we also study the RepVGG-A1 [22], VGG-16-BN [72] and MobileNet-V2 [70]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.705, + 0.893, + 0.901 + ], + "angle": 0, + "content": "Predicting classification accuracy under CIFAR-10 setup. We train a regressor that takes as input the BoP representation and outputs classification accuracy. The regressor is a neural network with 3 fully connected layers and trained on CIFAR-10-C (regression training set). We evaluate accuracy prediction on CIFAR-10.1 [69], CIFAR-10.2 [69] and CIFAR-10.2-\\(\\tilde{C}\\) [58]. The former two are real-world datasets with natural shift, while the latter one is manually corrupted by the same synthetic shift as [58]. Specifically, we add 10 types of unseen and unique corruptions such as warps, blurs, color distortions and noise additions, with 5 severity levels to CIFAR-10.2. Note that, these corruptions have no overlap with those in CIFAR-10-C [58]." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2886" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.082, + 0.09, + 0.302, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.091, + 0.5, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.092, + 0.688, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.691, + 0.092, + 0.885, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.217, + 0.551, + 0.225 + ], + "angle": 0, + "content": "BoP + JS divergence" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.228, + 0.303, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.229, + 0.496, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.229, + 0.689, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.691, + 0.229, + 0.88, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.461, + 0.353, + 0.551, + 0.363 + ], + "angle": 0, + "content": "BoP + JS divergence" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.372, + 0.892, + 0.442 + ], + "angle": 0, + "content": "Figure 4. Correlation between train-test distance measured by \\(\\mathrm{BoP} + \\mathrm{JS}\\) and model accuracy. Top: Correlation study under CIFAR-10 setup using ResNet-44, RepVGG-A1, VGG-16-BN and MobileNet-V2. Each data point denotes a dataset from CIFAR-10-C. Bottom: Correlation study under ImageNet setup using EfficientNet-B1, DenseNet-121, Inception-V4 and ViT-Base-16. ImageNet-C datasets are used as test sets. The straight lines are fit with robust linear regression [41]. Under both setups, we observe the strong Spearman's rank correlation \\((|\\rho| > 0.98)\\) between \\(\\mathrm{BoP} + \\mathrm{JS}\\) and model accuracy." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.448, + 0.471, + 0.507 + ], + "angle": 0, + "content": "For the above, we extract image features (output of penultimate layer of ResNet-44) from CIFAR-10 training set. We construct a codebook by dividing the features into 80 clusters with k-means." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.508, + 0.47, + 0.719 + ], + "angle": 0, + "content": "Correlation study under ImageNet setup. We use DenseNet-121 [40] classifier trained on ImageNet training set. We employ a series of datasets from the ImageNet-C benchmark [36] where the classifier is tested. ImageNet-C uses the same types of corruptions as CIFAR-10-C. We construct a codebook of size 1000 on the ImageNet training set from which images features are extracted by the penultimate layer of DenseNet-121. We project each dataset in ImageNet-C onto the codebook and obtain their BoP representations. When exhibiting linear correlations, we calculate \\(\\mathrm{BoP} + \\mathrm{JS}\\) between each ImageNet-C dataset and the training set, and compute classification accuracy. We also use EfficientNet-B1 [78], Inception-V4 [77] and ViT-Base-16 [23] to repeat above procedure for correlation study." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.72, + 0.47, + 0.856 + ], + "angle": 0, + "content": "Evaluation metric. Same as Section 4.1, we use Pearson's correlation \\( r \\) and Spearman's rank correlation \\( \\rho \\) to show linearity and monotonicity between BoP based dataset distance and model accuracy, respectively. To evaluate the effectiveness of accuracy estimation, we use root mean squared error (RMSE) by calculating the difference between estimated accuracy and ground truth before taking the mean across all the test sets. A larger RMSE means a less accurate prediction, and vice versa." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Compared methods. We compare our system with four existing ones. 1) Prediction score: it estimates model accuracy using the maximum of Softmax output (i.e., confidence" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.448, + 0.892, + 0.676 + ], + "angle": 0, + "content": "score). An image with a confidence score greater than a predefined threshold \\(\\tau \\in [0,1]\\) is considered correctly predicted. We select two thresholds (\\(\\tau = 0.8\\) and \\(0.9\\)). 2) Difference of confidence (DoC) [34] trains a linear regressor mapping average confidence to classifier accuracy; 3) Average thresholded confidence with maximum confidence score function (ATC-MC) [31] calculates a threshold on CIFAR-10 validation set and regards an image with a confidence score higher than the threshold as correctly classified; 4) Network regression \\((\\mu + \\sigma + FD)\\) [21] trains a neural network that takes as input the feature mean, covariance and Fréchet distance between a set of interest and training set and outputs model accuracy. All methods, if applicable, are compared under the same conditions as our system, e.g., classification training set and regression training set." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.682, + 0.622, + 0.696 + ], + "angle": 0, + "content": "5.2. Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.497, + 0.705, + 0.892, + 0.886 + ], + "angle": 0, + "content": "Strong correlation: A test set is difficult (low accuracy) if it is dissimilar to the training set using \\(\\mathrm{BoP} + \\mathrm{JS}\\). Fig. 4 presents the correlation study of two setups and various classifiers. We observe a very high Spearman's rank correlation \\((|\\rho| > 0.99)\\) in all the scenarios. It indicates that classification accuracy is highly correlated with JS divergence between BoPs of training and test sets. That is, test accuracy drops proportionally to the distance between the given training set and a test set. The results demonstrate \\(\\mathrm{BoP} + \\mathrm{JS}\\) between training and test sets is an effective indicator of classification accuracy. More studies are presented in the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.886, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Effectiveness of the BoP representation in predict" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2887" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.895, + 0.161 + ], + "angle": 0, + "content": "Table 3. Method comparison in predicting classifier accuracy under CIFAR-10 setup. We compare four methods: predicted score-based method with hard threshold \\(\\tau\\), neural network regression based on feature statistics \\((\\mu + \\sigma + \\mathrm{FD})\\) [20], average thresholded confidence with maximum confidence score function (ATC-MC) [30] and difference of confidences (DoC) [34]. We use CIFAR-10.1 and CIFAR-10.2 (both real-world) and CIFAR-10.2-\\(\\bar{C}\\) (manually corrupted) as unseen test sets for accuracy prediction. We use RMSE (\\%) to indicate precision of estimates. In each column, we compare our method with the best of the competing ones. We report results by average of five runs." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.169, + 0.879, + 0.336 + ], + "angle": 0, + "content": "
MethodCIFAR-10.1CIFAR-10.2CIFAR-10.2-C (50)
Severity 1Severity 2Severity 3Severity 4Severity 5Overall
Prediction score (τ = 0.8)4.8994.80010.12712.86916.80921.42724.37117.910
Prediction score (τ = 0.9)0.2970.5503.6385.0788.04811.80414.1089.404
ATC-MC [30]2.6502.6723.0804.3067.10811.01513.0408.601
DoC [34]0.4900.2632.2472.9165.1179.0126.6375.744
μ + σ + FD [21]0.4550.5615.8755.8234.7244.9086.4865.602
BoP (K = 80)0.2180.1222.4582.8183.7305.8366.4514.551
BoP (K = 100)0.1860.1242.8492.8083.5484.0254.7773.663
" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.345, + 0.885, + 0.48 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.489, + 0.893, + 0.518 + ], + "angle": 0, + "content": "Figure 5. Impact of codebook size on correlation strength on CIFAR-10-C. Correlation scores \\( |\\rho| \\) and \\( |r| \\) are relatively low under a small size and become stably high when the size is greater than 20 for all four model architectures." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.527, + 0.473, + 0.844 + ], + "angle": 0, + "content": "ing classification accuracy on variou unseen test sets. After performing correlation study, we train a neural network regressor on CIFAR-10-C and test it on a series of test sets. Results are summarized in Table 3. We have the following observations. First and foremost, BoP representation achieves the best accuracy prediction performance, evidenced by the lowest RMSE across all the four test scenarios. For example, on the test sets of CIFAR-10.2-\\(\\bar{C}\\), the RMSE of our method is 4.551, which is 1.051 lower than the second best method [21]. This clearly validates the effectiveness of the BoP representation. Second, we observe that the \"Prediction score\" method is unstable. While it has good results under \\(\\tau = 0.9\\) on CIFAR-10.1 and CIFAR-10.2 datasets, it is generally inferior to the competing methods in other test scenarios. Our observation is similar to [21], suggesting that a more robust threshold selection method is needed for this method. Third, although BoP has slightly higher RMSE than DoC on Severity 1 of CIFAR-10.2-\\(\\bar{C}\\) (2.458 v.s., 2.247), we stress that BoP is overall more stable and effective on real world datasets and other severity levels of synthetic datasets." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.471, + 0.901 + ], + "angle": 0, + "content": "Impact of codebook size is summarized in Fig. 5 under CIFAR-10 setup. We conduct the study using different sizes on four classifiers. We observe correlation scores first in" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.527, + 0.893, + 0.573 + ], + "angle": 0, + "content": "crease and then become stable when codebook size is larger than 20. These results are considered validation and help us use competitive and stable codebook sizes in Table 3." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.586, + 0.62, + 0.601 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.895, + 0.807 + ], + "angle": 0, + "content": "This work introduces a bag-of-prototypes (BoP) dataset representation to vectorize visual datasets. It first computes a codebook composed of clustering prototypes and then a prototype histogram for a dataset. The BoP vector considers the underlying local semantic distribution of a dataset and is thus more discriminative than global dataset statistics. Specifically, when used in conjunction with JS divergence, the proposed descriptor better captures the underlying relationship across datasets. This advantage is validated by its promising results in two dataset-level tasks: assessing training set suitability and test set difficulty. This work establishes the baseline usage of the BoP scheme, and more investigations and applications will be made in future work." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.821, + 0.669, + 0.837 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.846, + 0.893, + 0.892 + ], + "angle": 0, + "content": "We thank all anonymous reviewers for their constructive comments in improving this paper. This work was supported by the ARC Discovery Project (DP210102801)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2888" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.198 + ], + "angle": 0, + "content": "[1] Alessandro Achille, Michael Lam, Rahul Tewari, Avinash Ravichandran, Subhransu Maji, Charless C Fowlkes, Stefano Soatto, and Pietro Perona. Task2vec: Task embedding for meta-learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6430-6439, 2019. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.201, + 0.472, + 0.256 + ], + "angle": 0, + "content": "[2] David Acuna, Guojun Zhang, Marc T Law, and Sanja Fidler. f-domain adversarial learning: Theory and algorithms. In International Conference on Machine Learning, pages 66-75, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.258, + 0.471, + 0.314 + ], + "angle": 0, + "content": "[3] Andrea Agostinelli, Michal Pandy, Jasper Uijlings, Thomas Mensink, and Vittorio Ferrari. How stable are transferability metrics evaluations? In European Conference on Computer Vision, pages 303-321. Springer, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.316, + 0.47, + 0.383 + ], + "angle": 0, + "content": "[4] Andrea Agostinelli, Jasper Uijlings, Thomas Mensink, and Vittorio Ferrari. Transferability metrics for selecting source model ensembles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7936-7946, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.385, + 0.469, + 0.428 + ], + "angle": 0, + "content": "[5] David Alvarez-Melis and Nicolo Fusi. Geometric dataset distances via optimal transport. In Advances in Neural Information Processing Systems, pages 21428-21439, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.43, + 0.469, + 0.47 + ], + "angle": 0, + "content": "[6] Lora Aroyo, Matthew Lease, Praveen Paritosh, and Mike Schaekermann. Data excellence for ai: why should you care? Interactions, 29(2):66-69, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.473, + 0.469, + 0.527 + ], + "angle": 0, + "content": "[7] Robert Baldock, Hartmut Maennel, and Behnam Neyshabur. Deep learning through the lens of example difficulty. Advances in Neural Information Processing Systems, 34:10876-10889, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.53, + 0.469, + 0.572 + ], + "angle": 0, + "content": "[8] Herbert Bay, Tinne Tuytelaars, and Luc Van Gool. Surf: Speeded up robust features. In European Conference on Computer Vision, pages 404-417. Springer, 2006. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.574, + 0.469, + 0.627 + ], + "angle": 0, + "content": "[9] Shai Ben-David, John Blitzer, Koby Crammer, Alex Kulesza, Fernando Pereira, and Jennifer Vaughan. A theory of learning from different domains. Machine Learning, 79:151-175, 2010. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.631, + 0.469, + 0.686 + ], + "angle": 0, + "content": "[10] Shai Ben-David, John Blitzer, Koby Crammer, and Fernando Pereira. Analysis of representations for domain adaptation. In Advances in Neural Information Processing Systems, pages 137-144, 2006. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.688, + 0.469, + 0.729 + ], + "angle": 0, + "content": "[11] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. arXiv preprint arXiv:1801.01401, 2018. 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.731, + 0.469, + 0.772 + ], + "angle": 0, + "content": "[12] Barry Boots, Kokichi Sugihara, Sung Nok Chiu, and Atsuyuki Okabe. Spatial tessellations: concepts and applications of voronoi diagrams. 2009. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.774, + 0.469, + 0.841 + ], + "angle": 0, + "content": "[13] Jiefeng Chen, Frederick Liu, Besim Avci, Xi Wu, Yingyu Liang, and Somesh Jha. Detecting errors and estimating accuracy on unlabeled data with self-training ensembles. In Advances in Neural Information Processing Systems, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.469, + 0.901 + ], + "angle": 0, + "content": "[14] Gabriella Csurka, Christopher Dance, Lixin Fan, Jutta Willamowski, and Cedric Bray. Visual categorization with bags of keypoints. In Workshop on statistical learning in computer vision, ECCV, volume 1, pages 1-2, 2004. 3, 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.116, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.148 + ], + "angle": 0, + "content": "[15] Ekin D Cubuk, Barret Zoph, Dandelion Mane, Vijay Vasudevan, and Quoc V Le. Autoaugment: Learning augmentation policies from data. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.151, + 0.892, + 0.234 + ], + "angle": 0, + "content": "[16] Ido Dagan, Lillian Lee, and Fernando Pereira. Similarity-based methods for word sense disambiguation. In Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics and Eighth Conference of the European Chapter of the Association for Computational Linguistics, pages 56-63, 1997. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.236, + 0.892, + 0.278 + ], + "angle": 0, + "content": "[17] Julie Delon and Agnes Desolneux. A wasserstein-type distance in the space of gaussian mixture models. SIAM Journal on Imaging Sciences, 13(2):936-970, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.281, + 0.892, + 0.349 + ], + "angle": 0, + "content": "[18] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 1, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.353, + 0.892, + 0.408 + ], + "angle": 0, + "content": "[19] Weijian Deng, Stephen Gould, and Liang Zheng. What does rotation prediction tell us about classifier accuracy under varying testing environments? In International Conference on Machine Learning, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.411, + 0.893, + 0.466 + ], + "angle": 0, + "content": "[20] Weijian Deng and Liang Zheng. Are labels always necessary for classifier accuracy evaluation? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15069-15078, 2021. 1, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.469, + 0.892, + 0.523 + ], + "angle": 0, + "content": "[21] Weijian Deng and Liang Zheng. Are labels always necessary for classifier accuracy evaluation. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2021. 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.527, + 0.892, + 0.595 + ], + "angle": 0, + "content": "[22] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13733–13742, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.599, + 0.892, + 0.696 + ], + "angle": 0, + "content": "[23] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In Proceedings of the International Conference on Learning Representations, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.699, + 0.892, + 0.74 + ], + "angle": 0, + "content": "[24] Qiang Du, Vance Faber, and Max Gunzburger. Centroidal voronoi tessellations: Applications and algorithms. SIAM review, 41(4):637-676, 1999. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.743, + 0.892, + 0.799 + ], + "angle": 0, + "content": "[25] Sabri Eyuboglu, Bojan Karlaš, Christopher Ré, Ce Zhang, and James Zou. dcbench: A benchmark for data-centric ai systems. In Proceedings of the Sixth Workshop on Data Management for End-To-End Machine Learning, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.801, + 0.892, + 0.857 + ], + "angle": 0, + "content": "[26] L. Fei-Fei and P. Perona. A bayesian hierarchical model for learning natural scene categories. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 524–531, 2005. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.859, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[27] Maurice Fréchet. Sur la distance de deux lois de probabilité. Comptes Rendus Hebdomadaires des Seances de L'Académie des Sciences, 244(6):689-692, 1957. 2, 4, 5" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "2889" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.146 + ], + "angle": 0, + "content": "[28] David Freedman and Persi Diaconis. On the histogram as a density estimator: L 2 theory. Zeitschrift für Wahrscheinlichkeitstheorie und verwandte Gebiete, 57(4):453-476, 1981. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.148, + 0.47, + 0.218 + ], + "angle": 0, + "content": "[29] Jianglin Fu, Shikai Li, Yuming Jiang, Kwan-Yee Lin, Chen Qian, Chen Change Loy, Wayne Wu, and Ziwei Liu. Stylegan-human: A data-centric odyssey of human generation. In European Conference on Computer Vision, pages 1-19. Springer, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.218, + 0.47, + 0.286 + ], + "angle": 0, + "content": "[30] Saurabh Garg, Sivaraman Balakrishnan, Zachary C Lipton, Behnam Neyshabur, and Hanie Sedghi. Leveraging unlabeled data to predict out-of-distribution performance. In International Conference on Learning Representations, 2022, 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.288, + 0.47, + 0.357 + ], + "angle": 0, + "content": "[31] Saurabh Garg, Sivaraman Balakrishnan, Zachary Chase Lipton, Behnam Neyshabur, and Hanie Sedghi. Leveraging unlabeled data to predict out-of-distribution performance. In Proceedings of the International Conference on Learning Representations, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.358, + 0.47, + 0.412 + ], + "angle": 0, + "content": "[32] Amirata Ghorbani and James Zou. Data shapley: Equitable valuation of data for machine learning. In International Conference on Machine Learning, pages 2242-2251. PMLR, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.413, + 0.47, + 0.468 + ], + "angle": 0, + "content": "[33] Arthur Gretton, Karsten Borgwardt, Malte Rasch, Bernhard Schölkopf, and Alex Smola. A kernel method for the two-sample-problem. In Advances in Neural Information Processing Systems, 2006. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.469, + 0.47, + 0.538 + ], + "angle": 0, + "content": "[34] Devin Guillery, Vaishaal Shankar, Sayna Ebrahimi, Trevor Darrell, and Ludwig Schmidt. Predicting with confidence on unseen distributions. In Proceedings of the IEEE/CVF International Conference on Computer Vision and Pattern Recognition, pages 1134-1144, 2021. 2, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.539, + 0.47, + 0.594 + ], + "angle": 0, + "content": "[35] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.595, + 0.47, + 0.65 + ], + "angle": 0, + "content": "[36] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. In Proceedings of the International Conference on Learning Representations, 2019. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.65, + 0.47, + 0.705 + ], + "angle": 0, + "content": "[37] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. In Proceedings of the International Conference on Learning Representations, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.706, + 0.47, + 0.775 + ], + "angle": 0, + "content": "[38] Dan Hendrycks, Norman Mu, Ekin D Cubuk, Barret Zoph, Justin Gilmer, and Balaji Lakshminarayanan. Augmix: A simple data processing method to improve robustness and uncertainty. In Proceedings of the International Conference on Learning Representations, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.776, + 0.47, + 0.844 + ], + "angle": 0, + "content": "[39] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.845, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[40] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4700-4708, 2017. 7" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.092, + 0.892, + 0.134 + ], + "angle": 0, + "content": "[41] Peter J Huber. Robust statistics. In International Encyclopedia of Statistical Science, pages 1248-1251. Springer, 2011. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.135, + 0.892, + 0.189 + ], + "angle": 0, + "content": "[42] Hervé Jégou and Ondrej Chum. Negative evidences and co-occurrences in image retrieval: The benefit of pca and whiten-ing. In European Conference on Computer Vision, pages 774–787, 2012. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.19, + 0.892, + 0.259 + ], + "angle": 0, + "content": "[43] Hervé Jégou, Matthijs Douze, Cordelia Schmid, and Patrick Pérez. Aggregating local descriptors into a compact image representation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3304-3311, 2010. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.26, + 0.892, + 0.302 + ], + "angle": 0, + "content": "[44] Junguang Jiang, Baixu Chen, Bo Fu, and Mingsheng Long. Transfer-learning-library. https://github.com/thuml/Transfer-Learning-Library, 2020.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.302, + 0.892, + 0.357 + ], + "angle": 0, + "content": "[45] Ziheng Jiang, Chiyuan Zhang, Kunal Talwar, and Michael C Mozer. Characterizing structural regularities of labeled data in overparameterized models. arXiv preprint arXiv:2002.03206, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.358, + 0.892, + 0.412 + ], + "angle": 0, + "content": "[46] Thorsten Joachims. Text categorization with support vector machines: Learning with many relevant features. In European Conference on Machine Learning, pages 137-142, 1998. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.413, + 0.892, + 0.468 + ], + "angle": 0, + "content": "[47] Thorsten Joachims, Dayne Freitag, Tom Mitchell, et al. Webwatcher: A tour guide for the world wide web. In International Joint Conference on Artificial Intelligence, pages 770-777. CiteSeer, 1997. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.469, + 0.892, + 0.497 + ], + "angle": 0, + "content": "[48] Alexander B. Jung. imgaug. https://github.com/ aleju/imgaug, 2018. [Online; accessed 30-Oct-2018]. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.497, + 0.892, + 0.525 + ], + "angle": 0, + "content": "[49] Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. 2009. 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.525, + 0.892, + 0.593 + ], + "angle": 0, + "content": "[50] David D Lewis and William A Gale. A sequential algorithm for training text classifiers. In Proceedings of the Seventeenth Annual International ACM-SIGIR Conference on Research and Development in Information Retrieval, pages 3-12, 1994. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.595, + 0.892, + 0.65 + ], + "angle": 0, + "content": "[51] Weixin Liang, Girmaw Abebe Tadesse, Daniel Ho, L Fei-Fei, Matei Zaharia, Ce Zhang, and James Zou. Advances, challenges and opportunities in creating data for trustworthy ai. Nature Machine Intelligence, 4(8):669–677, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.65, + 0.892, + 0.718 + ], + "angle": 0, + "content": "[52] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European Conference on Computer Vision, pages 740-755, 2014. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.719, + 0.892, + 0.762 + ], + "angle": 0, + "content": "[53] D.G. Lowe. Object recognition from local scale-invariant features. In Proceedings of the IEEE International Conference on Computer Vision, pages 1150-1157, 1999. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.762, + 0.892, + 0.829 + ], + "angle": 0, + "content": "[54] James MacQueen et al. Some methods for classification and analysis of multivariate observations. In Proceedings of the Fifth Berkeley Symposium on Mathematical statistics and probability, volume 1, pages 281-297. Oakland, CA, USA, 1967. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.831, + 0.892, + 0.859 + ], + "angle": 0, + "content": "[55] Christopher Manning and Hinrich Schutze. Foundations of statistical natural language processing. MIT press, 1999. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.86, + 0.892, + 0.901 + ], + "angle": 0, + "content": "[56] Mark Mazumder, Colby Banbury, Xiaozhe Yao, Bojan Karlas, William Gaviria Rojas, Sudnya Diamos, Greg Diamos, Lynn He, Douwe Kiela, David Jurado, et al. Dataperf:" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.092, + 0.892, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "2890" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.47, + 0.12 + ], + "angle": 0, + "content": "Benchmarks for data-centric ai development. arXiv preprint arXiv:2207.10062, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.121, + 0.472, + 0.175 + ], + "angle": 0, + "content": "[57] Andrew McCallum, Kamal Nigam, et al. A comparison of event models for naive bayes text classification. In AAAI Workshop on Learning for Text Categorization, pages 41-48, 1998. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.177, + 0.471, + 0.232 + ], + "angle": 0, + "content": "[58] Eric Mintun, Alexander Kirillov, and Saining Xie. On interaction between augmentations and corruptions in natural corruption robustness. In Advances in Neural Information Processing Systems, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.233, + 0.471, + 0.287 + ], + "angle": 0, + "content": "[59] David Nister and Henrik Stewenius. Scalable recognition with a vocabulary tree. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2161-2168, 2006. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.288, + 0.471, + 0.357 + ], + "angle": 0, + "content": "[60] Michal Páncy, Andrea Agostinelli, Jasper Uijlings, Vittorio Ferrari, and Thomas Mensink. Transferability estimation using bhattacharyya class separability. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9172-9182, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.358, + 0.471, + 0.413 + ], + "angle": 0, + "content": "[61] Amandalynne Paullada, Inioluwa Deborah Raji, Emily Bender, Emily Denton, and Alex Hanna. Data and its (dis)contents: A survey of dataset development and use in machine learning research. *Patterns*, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.414, + 0.471, + 0.482 + ], + "angle": 0, + "content": "[62] Xingchao Peng, Qinxun Bai, Xide Xia, Zijun Huang, Kate Saenko, and Bo Wang. Moment matching for multi-source domain adaptation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1406-1415, 2019. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.483, + 0.471, + 0.537 + ], + "angle": 0, + "content": "[63] Xingchao Peng, Yichen Li, and Kate Saenko. Domain2vec: Domain embedding for unsupervised domain adaptation. In European Conference on Computer Vision, pages 756-774, 2020. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.539, + 0.471, + 0.594 + ], + "angle": 0, + "content": "[64] Florent Perronnin and Christopher Dance. Fisher kernels on visual vocabularies for image categorization. In Proceedings of the IEEE International Conference on Computer Vision, pages 1-8, 2007. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.595, + 0.471, + 0.649 + ], + "angle": 0, + "content": "[65] Florent Perronnin, Jorge Sánchez, and Thomas Mensink. Improving the fisher kernel for large-scale image classification. In European Conference on Computer Vision, pages 143-156, 2010. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.65, + 0.471, + 0.718 + ], + "angle": 0, + "content": "[66] James Philbin, Ondrej Chum, Michael Isard, Josef Sivic, and Andrew Zisserman. Object retrieval with large vocabularies and fast spatial matching. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2007. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.72, + 0.471, + 0.801 + ], + "angle": 0, + "content": "[67] Vladislav Polianskii, Giovanni Luca Marchetti, Alexander Kravberg, Anastasiia Varava, Florian T Pokorny, and Danica Kragic. Voronoi density estimator for high-dimensional data: Computation, compactification and convergence. In Uncertainty in Artificial Intelligence, pages 1644-1653. PMLR, 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.803, + 0.471, + 0.831 + ], + "angle": 0, + "content": "[68] Anand Rajaraman and Jeffrey David Ullman. Mining of massive datasets. Cambridge University Press, 2011. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.832, + 0.471, + 0.873 + ], + "angle": 0, + "content": "[69] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. Do cifar-10 classifiers generalize to cifar-10? arXiv preprint arXiv:1806.00451, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.874, + 0.471, + 0.901 + ], + "angle": 0, + "content": "[70] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zh-moginov, and Liang-Chieh Chen. Mobilenetv2: Inverted" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.472, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.893, + 0.135 + ], + "angle": 0, + "content": "residuals and linear bottlenecks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4510-4520, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.137, + 0.892, + 0.164 + ], + "angle": 0, + "content": "[71] Grace S Shieh. A weighted kendall's tau statistic. Statistics & Probability Letters, 39(1):17-24, 1998. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.166, + 0.892, + 0.209 + ], + "angle": 0, + "content": "[72] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.21, + 0.893, + 0.265 + ], + "angle": 0, + "content": "[73] Sivic and Zisserman. Video google: a text retrieval approach to object matching in videos. In Proceedings of the IEEE International Conference on Computer Vision, pages 1470-1477, 2003. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.267, + 0.893, + 0.31 + ], + "angle": 0, + "content": "[74] Baochen Sun, Jiashi Feng, and Kate Saenko. Return of frustratingly easy domain adaptation. In Proceedings of the AAAI Conference on Artificial Intelligence, 2016. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.311, + 0.893, + 0.354 + ], + "angle": 0, + "content": "[75] Baochen Sun and Kate Saenko. Deep coral: Correlation alignment for deep domain adaptation. In European Conference on Computer Vision, pages 443-450, 2016. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.355, + 0.893, + 0.424 + ], + "angle": 0, + "content": "[76] Swabha Swayamdipta, Roy Schwartz, Nicholas Lourie, Yizhong Wang, Hannaneh Hajishirzi, Noah A Smith, and Yejin Choi. Dataset cartography: Mapping and diagnosing datasets with training dynamics. arXiv preprint arXiv:2009.10795, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.426, + 0.893, + 0.482 + ], + "angle": 0, + "content": "[77] Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, and Alexander A Alemi. Inception-v4, inception-resnet and the impact of residual connections on learning. In Thirty-first AAAI Conference on Artificial Intelligence, 2017. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.484, + 0.893, + 0.538 + ], + "angle": 0, + "content": "[78] Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning, pages 6105-6114, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.541, + 0.893, + 0.598 + ], + "angle": 0, + "content": "[79] Yang Tan, Yang Li, and Shao-Lun Huang. Otce: A transferability metric for cross-domain cross-task representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15779-15788, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.599, + 0.893, + 0.668 + ], + "angle": 0, + "content": "[80] Mariya Toneva, Alessandro Sordoni, Remi Tachet des Combes, Adam Trischler, Yoshua Bengio, and Geoffrey J Gordon. An empirical study of example forgetting during deep neural network learning. arXiv preprint arXiv:1812.05159, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.67, + 0.893, + 0.713 + ], + "angle": 0, + "content": "[81] Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.714, + 0.893, + 0.743 + ], + "angle": 0, + "content": "[82] Joaquin Vanschoren. Meta-learning: A survey. arXiv preprint arXiv:1810.03548, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.744, + 0.893, + 0.786 + ], + "angle": 0, + "content": "[83] Sebastiano Vigna. A weighted correlation index for rankings with ties. In Proceedings of International Conference on World Wide Web, pages 1166-1176, 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.788, + 0.893, + 0.842 + ], + "angle": 0, + "content": "[84] Wei Ying, Yu Zhang, Junzhou Huang, and Qiang Yang. Transfer learning via learning to transfer. In International Conference on Machine Learning, pages 5085-5094. PMLR, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.845, + 0.893, + 0.901 + ], + "angle": 0, + "content": "[85] Yuchen Zhang, Tianle Liu, Mingsheng Long, and Michael Jordan. Bridging theory and algorithm for domain adaptation. In International Conference on Machine Learning, pages 7404-7413, 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.893, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "2891" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.146 + ], + "angle": 0, + "content": "[86] Liang Zheng, Yi Yang, and Qi Tian. Sift meets cnn: A decade survey of instance retrieval. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(5):1224-1244, 2017. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.149, + 0.47, + 0.219 + ], + "angle": 0, + "content": "[87] Yujie Zhong, Relja Arandjelovic, and Andrew Zisserman. Ghostvlad for set-based face recognition. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2–6, 2018, Revised Selected Papers, Part II 14, pages 35–50. Springer, 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.515, + 0.956 + ], + "angle": 0, + "content": "2892" + } + ] +] \ No newline at end of file diff --git a/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/f45f628e-fe49-4cb9-b5bd-808953724624_origin.pdf b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/f45f628e-fe49-4cb9-b5bd-808953724624_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0c47298246b7ba23cab0a67493373c9713c29bf6 --- /dev/null +++ b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/f45f628e-fe49-4cb9-b5bd-808953724624_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c427d012f6207f6509265c7c911cccfa57d103edb3ef0161aa5776ac639a27c +size 864688 diff --git a/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/full.md b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8ef1dd35f5126f7fb9ad27b8cc032ac4e570881c --- /dev/null +++ b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/full.md @@ -0,0 +1,305 @@ +# A Bag-of-Prototypes Representation for Dataset-Level Applications + +Weijie $\mathrm{Tu}^{1}$ Weijian Deng $^{1}$ Tom Gedeon $^{2}$ Liang Zheng $^{1}$ + +$^{1}$ Australian National University $^{2}$ Curtin University + +# Abstract + +This work investigates dataset vectorization for two dataset-level tasks: assessing training set suitability and test set difficulty. The former measures how suitable a training set is for a target domain, while the latter studies how challenging a test set is for a learned model. Central to the two tasks is measuring the underlying relationship between datasets. This needs a desirable dataset vectorization scheme, which should preserve as much discriminative dataset information as possible so that the distance between the resulting dataset vectors can reflect dataset-to-dataset similarity. To this end, we propose a bag-of-prototypes (BoP) dataset representation that extends the image-level bag consisting of patch descriptors to dataset-level bag consisting of semantic prototypes. Specifically, we develop a codebook consisting of $K$ prototypes clustered from a reference dataset. Given a dataset to be encoded, we quantize each of its image features to a certain prototype in the codebook and obtain a $K$ -dimensional histogram. Without assuming access to dataset labels, the BoP representation provides rich characterization of the dataset semantic distribution. Furthermore, BoP representations cooperate well with Jensen-Shannon divergence for measuring dataset-to-dataset similarity. Although very simple, BoP consistently shows its advantage over existing representations on a series of benchmarks for two dataset-level tasks. + +# 1. Introduction + +Datasets are fundamental in machine learning research, forming the basis of model training and testing [18, 51, 52, 61]. While large-scale datasets bring opportunities in algorithm design, there lack proper tools to analyze and make the best use of them [6, 51, 56]. Therefore, as opposed to traditional algorithm-centric research where improving models is of primary interest, the community has seen a growing interest in understanding and analyzing the data used for developing models [51, 56]. Recent examples of such goal include data synthesis [29], data sculpting [25, 51], and data valuation [6, 32, 56]. These tasks typically focus on individual sample of a dataset. In this work, we aim to understand + +nature of datasets from a dataset-level perspective. + +This work considers two dataset-level tasks: suitability in training and difficulty in testing. First, training set suitability denotes whether a training set is suitable for training models for a target dataset. In real-world applications, we are often provided with multiple training sets from various data distributions (e.g., universities and hospitals). Due to distribution shift, their trained models have different performance on the target dataset. Then, it is of high practical value to select the most suitable training set for the target dataset. Second, test set difficulty means how challenging a test set is for a learned model. In practice, test sets are usually unlabeled and often come from different distributions than that of the training set. Measuring the test set difficulty for a learned model helps us understand the model reliability, thereby ensuring safe model deployment. + +The core of the two dataset-level tasks is to measure the relationship between datasets. For example, a training set is more suitable for learning a model if it is more similar to the target dataset. To this end, we propose a vectorization scheme to represent a dataset. Then, the relationship between a pair of datasets can be simply reflected by the distance between their representations. Yet, it is challenging to encode a dataset as a representative vector, because (i) a dataset has a different cardinality (number of images) and (ii) each image has its own semantic content (e.g., category). It is thus critical to find an effective way to aggregate all image features to uncover dataset semantic distributions. + +In the literature, some researchers use the first few moments of distributions such as feature mean and co-variance to represent datasets [20, 62, 74, 75, 82]. While being computational friendly, these methods do not offer sufficiently strong descriptive ability of a dataset, such as class distributions, and thus have limited effectiveness in assessing attributes related to semantics. There are also some methods learn task-specific dataset representations [1, 63]. For example, given a dataset with labels and a task loss function, Task2Vec [1] computes an embedding based on estimates of the Fisher information matrix associated with a probe network's parameters. While these task-specific representations are able to predict task similarities, they are not suitable for characterizing dataset properties of interest. They + +require training a network on the specific task [1] or on multiple datasets [63], so they are not effective in assessing the training set suitability. Additionally, they require image labels for the specific task, so they cannot be used to measure the difficulty of unlabeled test sets. + +In this work, we propose a simple and effective bag-of-prototypes (BoP) dataset representation. Its computation starts with partitioning the image feature space into semantic regions through clustering, where the region centers, or prototypes, form a codebook. Given a new dataset, we quantize its features to their corresponding prototypes and compute an assignment histogram, which, after normalization, gives the BoP representation. The dimensionality of BoP equals the codebook size, which is usually a few hundred and is considered memory-efficient. Meanwhile, the histogram computed on the prototypes is descriptive of the dataset semantic distribution. + +Apart from being low dimensional and semantically rich, BoP has a few other advantages. First, while recent works in task-specific dataset representation usually require full image annotations and additional learning procedure [1,63], the computation of BoP does not rely on any. It is relatively efficient and allows for unsupervised assessment of dataset attributes. Second, BoP supports dataset-to-dataset similarity measurement through Jensen-Shannon divergence. We show in our experiment that this similarity is superior to commonly used metrics such as Fréchet distance [27] and maximum mean discrepancy [33] in two dataset-level tasks. + +# 2. Related Work + +Dataset representations. A common practice is to use simple and generic statistics as dataset representations [20, 62, 74, 75, 82]. For example, Peng et al. [62] use the first moment to represent a dataset. Deng et al. [20] use global feature mean and co-variance as dataset representations. Vanschoeren et al. [82] find dataset cardinality (the number of images/classes) useful to encode a dataset. These methods have limited descriptive ability, whereas BoP is more semantically descriptive. Moreover, it is feasible to learn a task-specific dataset representation [1, 63, 84, 87]. For example, Ying et al. [84] learn transfer skills from previous transfer learning experiences for future target tasks. Achille et al. [1] propose to learn a task embedding based on the estimate of Fisher information matrix associated with a task loss. Compared with these task-specific representations, BoP is hand-crafted, avoiding computation overheads incurred by end-to-end learning. It is thus efficient in measuring training set suitability without training any models. Moreover, BoP require no image labels, making it more suitable for assessing the difficulty of unlabeled test sets. + +Dataset-to-dataset similarity. We briefly review three strategies. First, some dataset similarity measures are developed in the context of domain adaptation [2, 9, 10, 85]. + +They typically depend on a loss function and hypothesis class, and use a supremum of that function class to quantify the similarity of datasets. (e.g., $\mathcal{H}\Delta \mathcal{H}$ -divergence [9], $f$ -divergence [2], and $\mathcal{A}$ -distance [10]). Second, dataset distance can be computed based on optimal transport [5, 17, 79]. For example, the squared Wasserstein metric Fréchet distance [27] is widely used in comparing the distribution discrepancy of generated images with the distribution of real images [39]. To better leverage the geometric relationship between datasets, Alvarez et al. [5] use labels to guide optimal transport towards class-coherent matches. Third, existing dataset representations can be used to compute dataset distance [33, 62, 75, 81]. For example, maximum mean discrepancy (MMD) [33] computes the distance between mean elements of distributions on the probability space. Peng et al. [62] eliminate dataset discrepancy by matching datasets moments. CORAL [75] uses the second-order statistics of datasets to measure distance. This work is in the third category and uses JS divergence between BoP representations to calculate dataset-to-dataset similarity. + +Assessment of training dataset suitability. Recent works have focused on understanding the importance of individual training instances in training of neural networks [6,32,45,56]. For example, Data Shapley [32] and Consistency Score [45] are proposed to evaluate the value of each data instance. Some methods identify "difficult" instances based on the information of training dynamics [7,76,80]. + +Different from the above approaches, this work studies the suitability of an entire training set. Given multiple training datasets from different data distributions, the focus is to choose the most appropriate training dataset for the target domain. Dataset-to-dataset similarity can be used for this goal. Intuitively, if a training dataset has high similarity with a target dataset, the model trained on it is expected to be more performant and vice versa. In this work, we use BoP representation coupled with simple JS divergence to calculate dataset-to-dataset similarity and demonstrate its effectiveness in accessing training set suitability. + +Assessing test set difficulty without ground truths. The goal of this task (also known as unsupervised accuracy estimation) is to predict the accuracy of a given model on various unlabeled test sets. Existing methods usually use a representation of the test set for accuracy prediction [13, 19, 20, 30, 34]. Normally this representation is derived from classifier outputs, such as image features [20], prediction logits [30], average softmax scores [34]. Then, regression is used to establish the relationship between this representation and model test accuracy under various testing environments. Compared with existing dataset features, the BoP representation better characterizes the semantic distribution of training and test sets and thus can be effectively used for model accuracy prediction. + +# 3. Methodology + +# 3.1. Bag-of-Words Model Across Communities + +In natural language processing (NLP) and information retrieval, the Bag-of-Words (BoW) model [46, 47, 50, 57] vectorizes textual data as a word histogram. Specifically, for each word in the dictionary, its occurrences in a document are counted, which fills in the corresponding entry of the BoW feature. This word frequency vector is thus used to represent a document. Numerous improvements of the BoW feature were made in NLP, such as n-grams [47, 50] and term frequency-inverse document frequency [68]. + +In the early 2000s, the BoW representation was introduced to the computer vision (CV) community to encode hundreds or thousands of local image descriptors [8, 53] into a compact vector [73]. As there is no semantic codebook like in NLP, a visual codebook is constructed by performing clustering (e.g., k-means) on a collection of local image features, where the resulting clustering centers are called "visual words". Local image descriptors are quantized to their nearest cluster center so that a visual word histogram can be computed. This BoW histogram also have undergone extensive improvements in later years, such as Fisher vector [64, 65], vector of locally aggregated descriptors (VLAD) [43], and the use of principal component analysis and whitening [42]. + +Contribution statement. This paper contributes a baseline method in adopting the BoW idea study the two basic properties of a dataset. To this end, we propose to represent a dataset using its histogram over a series of prototypes. A comparison between the usage of BoW model in NLP, CV and our dataset-level research is shown in Table 1. Specifically, the BoP representation relies on clustering for codebook formation, has a relatively small codebook (depending on the richness of dataset semantics), and has semantically sensible codewords. + +# 3.2. Bag-of-Prototypes Dataset Representation + +Given a dataset $\mathcal{D} = \{\mathbf{x}_i\}_{i=1}^N$ where $N$ is the number of images and a feature extractor $\mathbf{F}(\cdot)$ that maps an input image into a $d$ -dimensional feature $f \in \mathbb{R}^d$ , we extract a set of image features $\mathcal{F} := \{\mathbf{F}(\mathbf{x}_i)\}_{i=1}^N$ . While it is possible to directly use the dataset images (or features) as model input under small $N$ , it becomes prohibitively expensive when $N$ is large. We therefore focus on extracting useful semantic features of $\mathcal{F}$ by encoding its image features into a compact representation. Below we detail the necessary steps for computing the proposed BoP representation (refer Fig. 1). + +Step I: Codebook generation. Given a reference dataset $\mathcal{D}_r = \{\mathbf{x}_i^r\}_{i=1}^{N_r}$ , we extract all of its image features $\mathcal{F}_r := \{\mathbf{F}(\mathbf{x}_i^r)\}_{i=1}^{N_r}$ using a pretrained network, from which a codebook is constructed. Specifically, we adopt standard k-means clustering [54] to partition the feature space $\mathbb{R}^d$ + +
BoW in NLPBoW in CVBoP
Encoded objectsDocumentsImagesDatasets (a set of images)
Codewords in codebookWordsCluster centers of local descriptorsPrototypes of image features
Clustering?NoYesYes
Codewords semanticsClearLittleSensible
Codebook size>103103-106~102(dataset dependent)
+ +Table 1. Comparing BoP with BoW model in natural language processing (NLP) and computer vision (CV). The objective of BoW in NLP and CV is encoding texts and images respectively, while BoP is proposed to represent datasets. + +into $K$ clusters. Each of the $K$ cluster centers is called a "prototype", because oftentimes each center mainly represents a certain semantic content. See Fig. 1 right for exemplar image of each prototype. The prototypes, or centers, constitute the codebook, denoted as $\mathcal{C} = \{\mathbf{c}_i\}_{i=1}^K$ , where $\mathbf{c}_i$ is the $i$ -th prototype. Note that, the order of the prototypes is fixed in the codebook. + +Step II: Histogram computation. For a dataset to be encoded $\mathcal{D}_e = \{\mathbf{x}_i^e\}_{i=1}^{N_e}$ where $N_e$ is the number of images, we project it onto codebook $\mathcal{C}$ of size $K$ to compute its BoP representation. Specifically, after extracting image features $\mathcal{F}_e := \{\mathbf{F}(\mathbf{x}_i^e)\}_{i=1}^{N_e}$ from $\mathcal{D}_e$ , for each image feature, we compute its distance with all the $K$ prototypes in the codebook, yielding $K$ distances $d_1, \ldots, d_k$ , where $d_i$ is the distance between an image feature and the $i$ -th prototype. An image feature is quantized to prototype $c_i$ if $d_i$ is the lowest among $d_1, \ldots, d_k$ . Following the quantization, we generate a $K$ -dimensional one-hot encoding where the $i$ -th entry is 1 and all the others are 0. Having computed the one-hot vectors for all the image features, we sum them which is then normalized by $N_e$ , the number of images in $D_e$ . This gives the histogram representation $\mathbf{h}_e$ , or BoP representation, for $D_e$ where the $i$ -th entry indicates the density of features in $D_e$ belonging to prototype $\mathbf{c}_i$ . + +# 3.3. Measuring Dataset-to-Dataset Similarity + +Similar to image / document retrieval where BoW vectors of instances are used for similarity comparison [14, 26, 59, 66, 73], this work uses the BoP representation to calculate dataset-to-dataset similarity. Specifically, given BoP representations $\mathbf{h}_x$ and $\mathbf{h}_y$ of two datasets $\mathcal{D}_x$ and $\mathcal{D}_y$ , we simply define their similarity $S_{x,y}$ using Jensen-Shannon divergence (JS divergence), which is designed for histogram-based similarity measurement [16, 55]. + +Task-oriented similarity measure. We can build a universal codebook on a large-scale dataset following BoW model [14, 86]. By doing so, the resulting BoP representations are generic. We can also build a task-oriented code + +![](images/15458d234ddfea33e58d1cb767cc770f7d3f294ee12622c7525847f7ef78d0c4.jpg) +Figure 1. Workflow of BoP representation computation using CIFAR-10 [49] and one CIFAR-10 out-of-distribution (OOD) test set as an example. Top: We group image features of the reference dataset CIFAR-10 into 10 clusters, and the centers are called prototypes. The prototypes constitute the codebook of size 10. Bottom left: To encode the OOD test set, we project it onto the codebook by quantizing each image feature to its corresponding prototype. Lastly, we compute the histogram, i.e., BoP representation, of CIFAR-10 OOD test set. Bottom right: We regard dataset-to-dataset similarity as the Jensen-Shannon divergence between BoP histograms of CIFAR-10 OOD test set and reference dataset. With such similarity, we can measure the test set difficulty for the model trained on reference dataset. + +book on a reference dataset from a specific task to consider more task-oriented information. The latter is more suitable for the two dataset-level tasks considered in this work. For the task of training set suitability assessment, we use the target dataset as the reference for codebook generation to fully consider its semantic information. As a result, the JS divergence between BoP representations of the training set and the target dataset can well capture how a training set is similar to the target set. Similarly, for the task of test set difficulty assessment, we build codebook on the training set. This practice can effectively measure how an unlabeled test is similar to a given training set. + +# 3.4. Discussion + +Working mechanism of BoP. Codebook generation of BoP can be viewed as Centroidal Voronoi Tessellations [24]. Specifically, the prototypes (cluster centers) of codebook tessellate the feature space into Voronoi cells. Then, histogram computation approximates a probability distribution function in the same way as the nonparametric histogram [12, 28, 67]. That is, the BoP representation reflects the distribution of a dataset in the feature space. + +As shown in Fig. 1, the prototypes of reference dataset tessellate feature space into Voronoi cells. Based on this, we quantify the histogram of the reference dataset to represent its distribution. Given a new dataset, we conduct the same histogram calculation procedure and correspondingly capture its dataset distribution with the histogram. Then, we measure discrepancy of the two datasets by calculating JS + +divergence between their histograms. Compared with common measures of dataset distance (e.g., FD [27], KID [11] and MMD [33]) that only reflect global structure (e.g., first few moments) of dataset distributions, BoP, collaborated with JS divergence, considers more local structures. + +Training set suitability vs. transferability estimation. Two tasks relate but differ significantly: 1) Given an unlabeled target dataset and a pool of training datasets, the former aims to select the most suitable training set for the target. The latter assumes a labeled target dataset and a pool of models pretrained on a source dataset, with the goal of selecting the most suitable source model for the target without fine-tuning them all [3,4,60]; 2) Datasets in training set suitability are used for the same classification problem. In contrast, in transferability estimation, the problem in the target dataset (e.g., CIFAR-10 classification) is different from that of the source dataset (e.g. ImageNet classification). + +Analysis of the number of prototypes in a codebook. The codebook size is a critical factor influencing the usefulness of the BoP. A small codebook means a coarser partition of feature space, where similar features will likely be in the same cluster, but dissimilar features may also be in the same cluster. Moreover, a large codebook provides a finer description of the space, where dissimilar features are quantized to different prototypes and more semantics are explored. According to our experiment results in Fig. 2 and Fig. 5, we find, reassuringly, BoP is robust against the codebook size: prototype number can deviate within a wide range around the true classes number (e.g., 345 for Domain- + +Net [62]) without significantly affecting performance. + +Application scope and future directions. BoP is proposed to study the two dataset-level tasks, and the datasets considered in each task share the same label space. We may encounter some situations where we need to compare datasets with different label spaces (e.g., pre-training datasets selection [1]). In this case, one potential way is to build a universal codebook on a large-scale and representative dataset similar to BoW models [14, 86]. By doing so, the resulting BoP representations can encode diverse and sufficient semantics for comparing datasets across various label spaces. We view our BoP as a starting point to encode datasets. It would be interesting to study other dataset vectorization schemes and dataset-level tasks. + +# 4. Comparing Training Suitability of Datasets + +This task studies dataset valuation where multiple training sets are provided by different data contributors. The goal is to select the most suitable training set (ideally without training) whose trained model performs the best on a target test set. In this section, we first validate that BoP, collaborated with JS divergence $(\mathrm{BoP} + \mathrm{JS})$ , is predictive of dataset suitability for the target test set. Then, we show that BoP is robust when using a wide range of codebook sizes and different networks. + +# 4.1. Experimental Settings + +Correlation study under DomainNet setup. We use domain generalization benchmark DomainNet [62], which consists of 6 domains: Painting, Real, Infograph, Quickdraw, Sketch and ClipArt, where the tasks are 345-way object classification. Each domain has its training and test splits. We conduct the correlation study in an leave-one-out manner, leading to 6 groups of correlation studies, with each group using the test split of one domain as the target test set. Additionally, we apply image transformations to the training split of six original domains. Specifically, we employ 'Cartoon' [48], 'Zoom Blur' and 'JPEG Compression' [36] to convert domains' style to be one specific type. We also use 'AugMix' [38] and 'AutoAugment' [15], which transform images with various operations to generate domains with mixed styles. This process synthesizes 30 new datasets, so we have 36 training sets in total. + +We follow the training scheme provided by TLlib [44] to train ResNet-101 model [35], whose weights are pretrained on ImageNet [18], yielding 36 models. Moreover, penultimate outputs of pretrained ResNet-101 is used as image feature. On the test set, we generate a codebook of size 1000. Then, for each training set, we compute its BoP histogram, $\mathrm{BoP} + \mathrm{JS}$ from the test set, and the accuracy of its trained model on the test set. After this, we calculate correlation strength between $\mathrm{BoP} + \mathrm{JS}$ and model accuracy to evaluate whether BoP is indicative of datasets training suitability. + +
MethodResNet-34ResNet-101
rρτwrρτw
FD [27]-0.860-0.926-0.828-0.903-0.902-0.802
MMD [33]-0.817-0.801-0.691-0.821-0.817-0.704
KID [11]-0.773-0.904-0.804-0.876-0.896-0.800
BoP + JS-0.960-0.927-0.840-0.961-0.929-0.840
+ +Table 2. Compare averaged Pearson's correlation $(r)$ , Spearman's correlation $(\rho)$ and weighted Kendall's correlation $(\tau_w)$ of Fréchet distance (FD), maximum mean discrepancy (MMD), kernel inception distance (KID) and BoP + JS (codebook size 1000) on six test sets in DomainNet. We report two groups of results using ResNet-34 (Left) and ResNet-101 (Right). We show BoP + JS is more effective in assessing training set suitability than others. + +Evaluation metric. We use Pearson's correlation $r$ and Spearman's rank correlation $\rho$ to show linearity and monotonicity between BoP-based dataset distance and model accuracy, respectively. Both metrics range in $[-1, 1]$ . If $|r|$ or $|\rho|$ is close to 1, the linearity or monotonicity is strong, and vice versa. In addition to these two metrics, we also use weighted variant of Kendall's correlation $(\tau_w)$ [83]. It is shown to be useful when selecting the best ranked item is of interest [71], while a major application of BoP + JS is to select the training dataset leading to the best performance on a test set. This metric has the same range where a number closer to -1 or 1 indicates stronger negative or positive correlation, respectively, and 0 means no correlation. + +# 4.2. Evaluation + +Strong correlation: A training set is more suitable for a given test set if it has small $\mathrm{BoP} + \mathrm{JS}$ . Fig. 2 shows correlation study on ClipArt, Painting, Real and Sketch. We notice that there are strong Pearson's correlations $(|r| > 0.95)$ , Spearman's rank correlations $(|\rho| > 0.93)$ and relatively high weighted Kendall's correlations $(|\tau_w| > 0.84)$ on four test sets. This suggests that $\mathrm{BoP} + \mathrm{JS}$ is stable and useful across test sets. Table 2 compares average correlation strength of $\mathrm{BoP} + \mathrm{JS}$ with Fréchet distance (FD) [27], maximum mean discrepancy (MMD) [33] and kernel inception distance (KID) [11]. They use that same image features as BoP. According to their formulae, mean and covariance of these features are used for distance computation. We see that $\mathrm{BoP} + \mathrm{JS}$ has the highest average correlation scores on six test sets $(|r| = 0.961$ , $|\rho| = 0.929$ and $|\tau_w| = 0.840)$ . On average, $\mathrm{BoP} + \mathrm{JS}$ is superior in depicting training sets suitability for a test set without any training procedure. + +Impact of codebook size is shown in the Fig. 3. We construct codebooks with different size within approximately one order of magnitude around 345. We find that the three correlation scores increase and then become stable when codebook size becomes larger. This indicates that the per + +![](images/95aed49b03c7f756627bc5a2bc9af9e0427f44b529d54773a3deefe245f6a65b.jpg) + +![](images/5643a69bbc88e3d4d9a753f5785172c67698b9964d7b4f8d56690ca73bce5432.jpg) +Figure 2. Correlation study for training suitability of datasets. We report the correlation strength between $\mathrm{BoP} + \mathrm{JS}$ and model classification accuracy on four test domains of DomainNet: ClipArt, Painting, Real and Sketch. The model architecture is ResNet-101. Each dot denotes a model trained on a training set of DomainNet. We mark training domains (e.g., ClipArt) by different shapes and transformation operations (e.g., AugMix) by different colors. The straight lines are fit with robust linear regression [41]. We consistently observe high correlation results: Pearson's correlation $(|r| > 0.95)$ , Spearman's correlation $(|\rho| > 0.93)$ and weighted Kendall's correlation $(|\tau_w| > 0.84)$ . This suggests that $\mathrm{BoP} + \mathrm{JS}$ is predictive of the suitability of a training set. + +![](images/0386ccc80c9f44dfd12666803e30c349d2ba213c099709ceee4c3972e2c9dc26.jpg) +BoP + JS divergence + +![](images/19d3fe321a43c59bd2607bc43646f863c24e6eb1dbe637e93448cef3667208f7.jpg) + +![](images/8220335a4926c5656209fb26dbfb4c0cbb1fdfd8bc794248aa9badf98a5ac04e.jpg) + +![](images/0d07af07218366662e123a19c2b4bd0df58a255cf0d1b79e074f809502fe6e86.jpg) + +![](images/372efe534f400d85614618c9636645bf3dbfc4ce8b181ae6c02f6007fffd42c5.jpg) + +![](images/024137bd89d4a2148c03aca8c6f37dd30fa586d2abb7999a397e6531ef3b5631.jpg) +Figure 3. Impact of codebook size on correlation strength for ResNet-101 on four test domains: ClipArt, Painting, Real and Sketch. For example, on Real domain, correlation scores $|\rho|$ , $|r|$ and $|\tau_w|$ are relatively low under a small size and remain stably high when the size is greater than 400. + +![](images/4a5dcf19bff65ebb62bb994a02a31bdeea36c09dbb093f5201e1a943fa64cda5.jpg) + +formance $\mathrm{BoP} + \mathrm{JS}$ is overall consistent. + +Correlation study with a different model architecture. We additionally validate the robustness of BoP for ResNet-34 with codebook size 1000. As shown in Table 2, we compare the average correlation scores of $\mathrm{BoP} + \mathrm{JS}$ , FD, MMD and KID. We see that $\mathrm{BoP} + \mathrm{JS}$ has consistent performance on two models and remains preferable to characterize training suitability. + +# 5. Assessing Test Set Difficulty without Labels + +In the task of test set difficulty assessment, we are provided with a labeled training set and a set of unlabeled datasets for testing. Given a classifier trained on the training set, the goal is to estimate the model accuracy on these + +test sets without any data annotations. In this section, we first show dataset distance measured by $\mathrm{BoP} + \mathrm{JS}$ exhibits strong negative correlation with classifier accuracy. We then demonstrate an accuracy predictor based on the BoP representation gives accurate performance estimates compared to state-of-the-art methods. + +# 5.1. Experimental Settings + +Correlation study under CIFAR-10 setup. We conduct a correlation study by comparing $\mathrm{BoP} + \mathrm{JS}$ with classifier accuracy. Following the same setup in [21], we use a series of datasets sharing the same label space (but usually with distribution shift) with CIFAR-10 [49]. Specifically, we train ResNet-44 classifier [35] on the training set of CIFAR-10, which consists of 50,000 images from 10 classes. Here, we use the CIFAR-10-C benchmark [37] for correlation study, which contains different types of corruptions with 5 levels of severity including per-pixel noise, blurring, synthetic weather effects, and digital transforms. Then, for each dataset, we compute its BoP vector, its $\mathrm{BoP} + \mathrm{JS}$ from CIFAR-10 training set and the classifier accuracy. In addition to ResNet-44, we also study the RepVGG-A1 [22], VGG-16-BN [72] and MobileNet-V2 [70]. + +Predicting classification accuracy under CIFAR-10 setup. We train a regressor that takes as input the BoP representation and outputs classification accuracy. The regressor is a neural network with 3 fully connected layers and trained on CIFAR-10-C (regression training set). We evaluate accuracy prediction on CIFAR-10.1 [69], CIFAR-10.2 [69] and CIFAR-10.2- $\tilde{C}$ [58]. The former two are real-world datasets with natural shift, while the latter one is manually corrupted by the same synthetic shift as [58]. Specifically, we add 10 types of unseen and unique corruptions such as warps, blurs, color distortions and noise additions, with 5 severity levels to CIFAR-10.2. Note that, these corruptions have no overlap with those in CIFAR-10-C [58]. + +![](images/114a0f7deadcda905a7b3d8b52c58f98e9d3ce35f4f318968c9d0d4f2b4fcac5.jpg) + +![](images/e3da439484b5169a327ac2fdf0d47882f557f84145d690c13fd972e0756eb678.jpg) + +![](images/2b69af6e1794263a51140d7ffbb1d30e5b4ae43439e9cba6ffbe19ed4ef07647.jpg) +BoP + JS divergence + +![](images/04cfb99844c04a664155c54583e873e2c7606978a5c1800a3b719884868423a4.jpg) + +![](images/f04c9c9a95bb02dacba75c08fa5d445492eda752c47f895aec31a1b307c913fc.jpg) +Figure 4. Correlation between train-test distance measured by $\mathrm{BoP} + \mathrm{JS}$ and model accuracy. Top: Correlation study under CIFAR-10 setup using ResNet-44, RepVGG-A1, VGG-16-BN and MobileNet-V2. Each data point denotes a dataset from CIFAR-10-C. Bottom: Correlation study under ImageNet setup using EfficientNet-B1, DenseNet-121, Inception-V4 and ViT-Base-16. ImageNet-C datasets are used as test sets. The straight lines are fit with robust linear regression [41]. Under both setups, we observe the strong Spearman's rank correlation $(|\rho| > 0.98)$ between $\mathrm{BoP} + \mathrm{JS}$ and model accuracy. + +![](images/7ec349e4283c573447053b3cd207667876f6ca26f0753912b835be7cad463ca4.jpg) +BoP + JS divergence + +![](images/25f24f8e80a721fc770a535be332d7efa631d636219482958ec1a09be9a8c349.jpg) + +![](images/da8fda1eb4dcc90dc0c5c19bbd31ea26b60a823346a9746205f4496c89ac2c79.jpg) + +For the above, we extract image features (output of penultimate layer of ResNet-44) from CIFAR-10 training set. We construct a codebook by dividing the features into 80 clusters with k-means. + +Correlation study under ImageNet setup. We use DenseNet-121 [40] classifier trained on ImageNet training set. We employ a series of datasets from the ImageNet-C benchmark [36] where the classifier is tested. ImageNet-C uses the same types of corruptions as CIFAR-10-C. We construct a codebook of size 1000 on the ImageNet training set from which images features are extracted by the penultimate layer of DenseNet-121. We project each dataset in ImageNet-C onto the codebook and obtain their BoP representations. When exhibiting linear correlations, we calculate $\mathrm{BoP} + \mathrm{JS}$ between each ImageNet-C dataset and the training set, and compute classification accuracy. We also use EfficientNet-B1 [78], Inception-V4 [77] and ViT-Base-16 [23] to repeat above procedure for correlation study. + +Evaluation metric. Same as Section 4.1, we use Pearson's correlation $r$ and Spearman's rank correlation $\rho$ to show linearity and monotonicity between BoP based dataset distance and model accuracy, respectively. To evaluate the effectiveness of accuracy estimation, we use root mean squared error (RMSE) by calculating the difference between estimated accuracy and ground truth before taking the mean across all the test sets. A larger RMSE means a less accurate prediction, and vice versa. + +Compared methods. We compare our system with four existing ones. 1) Prediction score: it estimates model accuracy using the maximum of Softmax output (i.e., confidence + +score). An image with a confidence score greater than a predefined threshold $\tau \in [0,1]$ is considered correctly predicted. We select two thresholds ( $\tau = 0.8$ and $0.9$ ). 2) Difference of confidence (DoC) [34] trains a linear regressor mapping average confidence to classifier accuracy; 3) Average thresholded confidence with maximum confidence score function (ATC-MC) [31] calculates a threshold on CIFAR-10 validation set and regards an image with a confidence score higher than the threshold as correctly classified; 4) Network regression $(\mu + \sigma + FD)$ [21] trains a neural network that takes as input the feature mean, covariance and Fréchet distance between a set of interest and training set and outputs model accuracy. All methods, if applicable, are compared under the same conditions as our system, e.g., classification training set and regression training set. + +# 5.2. Evaluation + +Strong correlation: A test set is difficult (low accuracy) if it is dissimilar to the training set using $\mathrm{BoP} + \mathrm{JS}$ . Fig. 4 presents the correlation study of two setups and various classifiers. We observe a very high Spearman's rank correlation $(|\rho| > 0.99)$ in all the scenarios. It indicates that classification accuracy is highly correlated with JS divergence between BoPs of training and test sets. That is, test accuracy drops proportionally to the distance between the given training set and a test set. The results demonstrate $\mathrm{BoP} + \mathrm{JS}$ between training and test sets is an effective indicator of classification accuracy. More studies are presented in the supplementary materials. + +Effectiveness of the BoP representation in predict + +Table 3. Method comparison in predicting classifier accuracy under CIFAR-10 setup. We compare four methods: predicted score-based method with hard threshold $\tau$ , neural network regression based on feature statistics $(\mu + \sigma + \mathrm{FD})$ [20], average thresholded confidence with maximum confidence score function (ATC-MC) [30] and difference of confidences (DoC) [34]. We use CIFAR-10.1 and CIFAR-10.2 (both real-world) and CIFAR-10.2- $\bar{C}$ (manually corrupted) as unseen test sets for accuracy prediction. We use RMSE (\%) to indicate precision of estimates. In each column, we compare our method with the best of the competing ones. We report results by average of five runs. + +
MethodCIFAR-10.1CIFAR-10.2CIFAR-10.2-C (50)
Severity 1Severity 2Severity 3Severity 4Severity 5Overall
Prediction score (τ = 0.8)4.8994.80010.12712.86916.80921.42724.37117.910
Prediction score (τ = 0.9)0.2970.5503.6385.0788.04811.80414.1089.404
ATC-MC [30]2.6502.6723.0804.3067.10811.01513.0408.601
DoC [34]0.4900.2632.2472.9165.1179.0126.6375.744
μ + σ + FD [21]0.4550.5615.8755.8234.7244.9086.4865.602
BoP (K = 80)0.2180.1222.4582.8183.7305.8366.4514.551
BoP (K = 100)0.1860.1242.8492.8083.5484.0254.7773.663
+ +![](images/862be20553eceff7e6b1353d88c7c4c9743ff9cdb50fb1b180001579e19e7436.jpg) +Figure 5. Impact of codebook size on correlation strength on CIFAR-10-C. Correlation scores $|\rho|$ and $|r|$ are relatively low under a small size and become stably high when the size is greater than 20 for all four model architectures. + +ing classification accuracy on variou unseen test sets. After performing correlation study, we train a neural network regressor on CIFAR-10-C and test it on a series of test sets. Results are summarized in Table 3. We have the following observations. First and foremost, BoP representation achieves the best accuracy prediction performance, evidenced by the lowest RMSE across all the four test scenarios. For example, on the test sets of CIFAR-10.2- $\bar{C}$ , the RMSE of our method is 4.551, which is 1.051 lower than the second best method [21]. This clearly validates the effectiveness of the BoP representation. Second, we observe that the "Prediction score" method is unstable. While it has good results under $\tau = 0.9$ on CIFAR-10.1 and CIFAR-10.2 datasets, it is generally inferior to the competing methods in other test scenarios. Our observation is similar to [21], suggesting that a more robust threshold selection method is needed for this method. Third, although BoP has slightly higher RMSE than DoC on Severity 1 of CIFAR-10.2- $\bar{C}$ (2.458 v.s., 2.247), we stress that BoP is overall more stable and effective on real world datasets and other severity levels of synthetic datasets. + +Impact of codebook size is summarized in Fig. 5 under CIFAR-10 setup. We conduct the study using different sizes on four classifiers. We observe correlation scores first in + +crease and then become stable when codebook size is larger than 20. These results are considered validation and help us use competitive and stable codebook sizes in Table 3. + +# 6. Conclusion + +This work introduces a bag-of-prototypes (BoP) dataset representation to vectorize visual datasets. It first computes a codebook composed of clustering prototypes and then a prototype histogram for a dataset. The BoP vector considers the underlying local semantic distribution of a dataset and is thus more discriminative than global dataset statistics. Specifically, when used in conjunction with JS divergence, the proposed descriptor better captures the underlying relationship across datasets. This advantage is validated by its promising results in two dataset-level tasks: assessing training set suitability and test set difficulty. This work establishes the baseline usage of the BoP scheme, and more investigations and applications will be made in future work. + +# Acknowledgements + +We thank all anonymous reviewers for their constructive comments in improving this paper. This work was supported by the ARC Discovery Project (DP210102801). + +# References + +[1] Alessandro Achille, Michael Lam, Rahul Tewari, Avinash Ravichandran, Subhransu Maji, Charless C Fowlkes, Stefano Soatto, and Pietro Perona. Task2vec: Task embedding for meta-learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6430-6439, 2019. 1, 2, 5 +[2] David Acuna, Guojun Zhang, Marc T Law, and Sanja Fidler. f-domain adversarial learning: Theory and algorithms. In International Conference on Machine Learning, pages 66-75, 2021. 2 +[3] Andrea Agostinelli, Michal Pandy, Jasper Uijlings, Thomas Mensink, and Vittorio Ferrari. How stable are transferability metrics evaluations? In European Conference on Computer Vision, pages 303-321. Springer, 2022. 4 +[4] Andrea Agostinelli, Jasper Uijlings, Thomas Mensink, and Vittorio Ferrari. Transferability metrics for selecting source model ensembles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7936-7946, 2022. 4 +[5] David Alvarez-Melis and Nicolo Fusi. Geometric dataset distances via optimal transport. In Advances in Neural Information Processing Systems, pages 21428-21439, 2020. 2 +[6] Lora Aroyo, Matthew Lease, Praveen Paritosh, and Mike Schaekermann. Data excellence for ai: why should you care? Interactions, 29(2):66-69, 2022. 1, 2 +[7] Robert Baldock, Hartmut Maennel, and Behnam Neyshabur. Deep learning through the lens of example difficulty. Advances in Neural Information Processing Systems, 34:10876-10889, 2021. 2 +[8] Herbert Bay, Tinne Tuytelaars, and Luc Van Gool. Surf: Speeded up robust features. In European Conference on Computer Vision, pages 404-417. Springer, 2006. 3 +[9] Shai Ben-David, John Blitzer, Koby Crammer, Alex Kulesza, Fernando Pereira, and Jennifer Vaughan. A theory of learning from different domains. Machine Learning, 79:151-175, 2010. 2 +[10] Shai Ben-David, John Blitzer, Koby Crammer, and Fernando Pereira. Analysis of representations for domain adaptation. In Advances in Neural Information Processing Systems, pages 137-144, 2006. 2 +[11] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. arXiv preprint arXiv:1801.01401, 2018. 4, 5 +[12] Barry Boots, Kokichi Sugihara, Sung Nok Chiu, and Atsuyuki Okabe. Spatial tessellations: concepts and applications of voronoi diagrams. 2009. 4 +[13] Jiefeng Chen, Frederick Liu, Besim Avci, Xi Wu, Yingyu Liang, and Somesh Jha. Detecting errors and estimating accuracy on unlabeled data with self-training ensembles. In Advances in Neural Information Processing Systems, 2021. 2 +[14] Gabriella Csurka, Christopher Dance, Lixin Fan, Jutta Willamowski, and Cedric Bray. Visual categorization with bags of keypoints. In Workshop on statistical learning in computer vision, ECCV, volume 1, pages 1-2, 2004. 3, 5 + +[15] Ekin D Cubuk, Barret Zoph, Dandelion Mane, Vijay Vasudevan, and Quoc V Le. Autoaugment: Learning augmentation policies from data. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2019. 5 +[16] Ido Dagan, Lillian Lee, and Fernando Pereira. Similarity-based methods for word sense disambiguation. In Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics and Eighth Conference of the European Chapter of the Association for Computational Linguistics, pages 56-63, 1997. 3 +[17] Julie Delon and Agnes Desolneux. A wasserstein-type distance in the space of gaussian mixture models. SIAM Journal on Imaging Sciences, 13(2):936-970, 2020. 2 +[18] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 1, 5 +[19] Weijian Deng, Stephen Gould, and Liang Zheng. What does rotation prediction tell us about classifier accuracy under varying testing environments? In International Conference on Machine Learning, 2021. 2 +[20] Weijian Deng and Liang Zheng. Are labels always necessary for classifier accuracy evaluation? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15069-15078, 2021. 1, 2, 8 +[21] Weijian Deng and Liang Zheng. Are labels always necessary for classifier accuracy evaluation. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2021. 6, 7, 8 +[22] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13733–13742, 2021. 6 +[23] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In Proceedings of the International Conference on Learning Representations, 2021. 7 +[24] Qiang Du, Vance Faber, and Max Gunzburger. Centroidal voronoi tessellations: Applications and algorithms. SIAM review, 41(4):637-676, 1999. 4 +[25] Sabri Eyuboglu, Bojan Karlaš, Christopher Ré, Ce Zhang, and James Zou. dcbench: A benchmark for data-centric ai systems. In Proceedings of the Sixth Workshop on Data Management for End-To-End Machine Learning, 2022. 1 +[26] L. Fei-Fei and P. Perona. A bayesian hierarchical model for learning natural scene categories. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 524–531, 2005. 3 +[27] Maurice Fréchet. Sur la distance de deux lois de probabilité. Comptes Rendus Hebdomadaires des Seances de L'Académie des Sciences, 244(6):689-692, 1957. 2, 4, 5 + +[28] David Freedman and Persi Diaconis. On the histogram as a density estimator: L 2 theory. Zeitschrift für Wahrscheinlichkeitstheorie und verwandte Gebiete, 57(4):453-476, 1981. 4 +[29] Jianglin Fu, Shikai Li, Yuming Jiang, Kwan-Yee Lin, Chen Qian, Chen Change Loy, Wayne Wu, and Ziwei Liu. Stylegan-human: A data-centric odyssey of human generation. In European Conference on Computer Vision, pages 1-19. Springer, 2022. 1 +[30] Saurabh Garg, Sivaraman Balakrishnan, Zachary C Lipton, Behnam Neyshabur, and Hanie Sedghi. Leveraging unlabeled data to predict out-of-distribution performance. In International Conference on Learning Representations, 2022, 2, 8 +[31] Saurabh Garg, Sivaraman Balakrishnan, Zachary Chase Lipton, Behnam Neyshabur, and Hanie Sedghi. Leveraging unlabeled data to predict out-of-distribution performance. In Proceedings of the International Conference on Learning Representations, 2022. 7 +[32] Amirata Ghorbani and James Zou. Data shapley: Equitable valuation of data for machine learning. In International Conference on Machine Learning, pages 2242-2251. PMLR, 2019. 1, 2 +[33] Arthur Gretton, Karsten Borgwardt, Malte Rasch, Bernhard Schölkopf, and Alex Smola. A kernel method for the two-sample-problem. In Advances in Neural Information Processing Systems, 2006. 2, 4, 5 +[34] Devin Guillery, Vaishaal Shankar, Sayna Ebrahimi, Trevor Darrell, and Ludwig Schmidt. Predicting with confidence on unseen distributions. In Proceedings of the IEEE/CVF International Conference on Computer Vision and Pattern Recognition, pages 1134-1144, 2021. 2, 7, 8 +[35] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016. 5, 6 +[36] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. In Proceedings of the International Conference on Learning Representations, 2019. 5, 7 +[37] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. In Proceedings of the International Conference on Learning Representations, 2019. 6 +[38] Dan Hendrycks, Norman Mu, Ekin D Cubuk, Barret Zoph, Justin Gilmer, and Balaji Lakshminarayanan. Augmix: A simple data processing method to improve robustness and uncertainty. In Proceedings of the International Conference on Learning Representations, 2020. 5 +[39] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems, 2017. 2 +[40] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4700-4708, 2017. 7 + +[41] Peter J Huber. Robust statistics. In International Encyclopedia of Statistical Science, pages 1248-1251. Springer, 2011. 6, 7 +[42] Hervé Jégou and Ondrej Chum. Negative evidences and co-occurrences in image retrieval: The benefit of pca and whiten-ing. In European Conference on Computer Vision, pages 774–787, 2012. 3 +[43] Hervé Jégou, Matthijs Douze, Cordelia Schmid, and Patrick Pérez. Aggregating local descriptors into a compact image representation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3304-3311, 2010. 3 +[44] Junguang Jiang, Baixu Chen, Bo Fu, and Mingsheng Long. Transfer-learning-library. https://github.com/thuml/Transfer-Learning-Library, 2020.5 +[45] Ziheng Jiang, Chiyuan Zhang, Kunal Talwar, and Michael C Mozer. Characterizing structural regularities of labeled data in overparameterized models. arXiv preprint arXiv:2002.03206, 2020. 2 +[46] Thorsten Joachims. Text categorization with support vector machines: Learning with many relevant features. In European Conference on Machine Learning, pages 137-142, 1998. 3 +[47] Thorsten Joachims, Dayne Freitag, Tom Mitchell, et al. Webwatcher: A tour guide for the world wide web. In International Joint Conference on Artificial Intelligence, pages 770-777. CiteSeer, 1997. 3 +[48] Alexander B. Jung. imgaug. https://github.com/ aleju/imgaug, 2018. [Online; accessed 30-Oct-2018]. 5 +[49] Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. 2009. 4, 6 +[50] David D Lewis and William A Gale. A sequential algorithm for training text classifiers. In Proceedings of the Seventeenth Annual International ACM-SIGIR Conference on Research and Development in Information Retrieval, pages 3-12, 1994. 3 +[51] Weixin Liang, Girmaw Abebe Tadesse, Daniel Ho, L Fei-Fei, Matei Zaharia, Ce Zhang, and James Zou. Advances, challenges and opportunities in creating data for trustworthy ai. Nature Machine Intelligence, 4(8):669–677, 2022. 1 +[52] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European Conference on Computer Vision, pages 740-755, 2014. 1 +[53] D.G. Lowe. Object recognition from local scale-invariant features. In Proceedings of the IEEE International Conference on Computer Vision, pages 1150-1157, 1999. 3 +[54] James MacQueen et al. Some methods for classification and analysis of multivariate observations. In Proceedings of the Fifth Berkeley Symposium on Mathematical statistics and probability, volume 1, pages 281-297. Oakland, CA, USA, 1967. 3 +[55] Christopher Manning and Hinrich Schutze. Foundations of statistical natural language processing. MIT press, 1999. 3 +[56] Mark Mazumder, Colby Banbury, Xiaozhe Yao, Bojan Karlas, William Gaviria Rojas, Sudnya Diamos, Greg Diamos, Lynn He, Douwe Kiela, David Jurado, et al. Dataperf: + +Benchmarks for data-centric ai development. arXiv preprint arXiv:2207.10062, 2022. 1, 2 +[57] Andrew McCallum, Kamal Nigam, et al. A comparison of event models for naive bayes text classification. In AAAI Workshop on Learning for Text Categorization, pages 41-48, 1998. 3 +[58] Eric Mintun, Alexander Kirillov, and Saining Xie. On interaction between augmentations and corruptions in natural corruption robustness. In Advances in Neural Information Processing Systems, 2021. 6 +[59] David Nister and Henrik Stewenius. Scalable recognition with a vocabulary tree. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2161-2168, 2006. 3 +[60] Michal Páncy, Andrea Agostinelli, Jasper Uijlings, Vittorio Ferrari, and Thomas Mensink. Transferability estimation using bhattacharyya class separability. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9172-9182, 2022. 4 +[61] Amandalynne Paullada, Inioluwa Deborah Raji, Emily Bender, Emily Denton, and Alex Hanna. Data and its (dis)contents: A survey of dataset development and use in machine learning research. *Patterns*, 2021. 1 +[62] Xingchao Peng, Qinxun Bai, Xide Xia, Zijun Huang, Kate Saenko, and Bo Wang. Moment matching for multi-source domain adaptation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1406-1415, 2019. 1, 2, 5 +[63] Xingchao Peng, Yichen Li, and Kate Saenko. Domain2vec: Domain embedding for unsupervised domain adaptation. In European Conference on Computer Vision, pages 756-774, 2020. 1, 2 +[64] Florent Perronnin and Christopher Dance. Fisher kernels on visual vocabularies for image categorization. In Proceedings of the IEEE International Conference on Computer Vision, pages 1-8, 2007. 3 +[65] Florent Perronnin, Jorge Sánchez, and Thomas Mensink. Improving the fisher kernel for large-scale image classification. In European Conference on Computer Vision, pages 143-156, 2010. 3 +[66] James Philbin, Ondrej Chum, Michael Isard, Josef Sivic, and Andrew Zisserman. Object retrieval with large vocabularies and fast spatial matching. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2007. 3 +[67] Vladislav Polianskii, Giovanni Luca Marchetti, Alexander Kravberg, Anastasiia Varava, Florian T Pokorny, and Danica Kragic. Voronoi density estimator for high-dimensional data: Computation, compactification and convergence. In Uncertainty in Artificial Intelligence, pages 1644-1653. PMLR, 2022. 4 +[68] Anand Rajaraman and Jeffrey David Ullman. Mining of massive datasets. Cambridge University Press, 2011. 3 +[69] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. Do cifar-10 classifiers generalize to cifar-10? arXiv preprint arXiv:1806.00451, 2018. 6 +[70] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zh-moginov, and Liang-Chieh Chen. Mobilenetv2: Inverted + +residuals and linear bottlenecks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4510-4520, 2018. 6 +[71] Grace S Shieh. A weighted kendall's tau statistic. Statistics & Probability Letters, 39(1):17-24, 1998. 5 +[72] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6 +[73] Sivic and Zisserman. Video google: a text retrieval approach to object matching in videos. In Proceedings of the IEEE International Conference on Computer Vision, pages 1470-1477, 2003. 3 +[74] Baochen Sun, Jiashi Feng, and Kate Saenko. Return of frustratingly easy domain adaptation. In Proceedings of the AAAI Conference on Artificial Intelligence, 2016. 1, 2 +[75] Baochen Sun and Kate Saenko. Deep coral: Correlation alignment for deep domain adaptation. In European Conference on Computer Vision, pages 443-450, 2016. 1, 2 +[76] Swabha Swayamdipta, Roy Schwartz, Nicholas Lourie, Yizhong Wang, Hannaneh Hajishirzi, Noah A Smith, and Yejin Choi. Dataset cartography: Mapping and diagnosing datasets with training dynamics. arXiv preprint arXiv:2009.10795, 2020. 2 +[77] Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, and Alexander A Alemi. Inception-v4, inception-resnet and the impact of residual connections on learning. In Thirty-first AAAI Conference on Artificial Intelligence, 2017. 7 +[78] Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning, pages 6105-6114, 2019. 7 +[79] Yang Tan, Yang Li, and Shao-Lun Huang. Otce: A transferability metric for cross-domain cross-task representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15779-15788, 2021. 2 +[80] Mariya Toneva, Alessandro Sordoni, Remi Tachet des Combes, Adam Trischler, Yoshua Bengio, and Geoffrey J Gordon. An empirical study of example forgetting during deep neural network learning. arXiv preprint arXiv:1812.05159, 2018. 2 +[81] Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014. 2 +[82] Joaquin Vanschoren. Meta-learning: A survey. arXiv preprint arXiv:1810.03548, 2018. 1, 2 +[83] Sebastiano Vigna. A weighted correlation index for rankings with ties. In Proceedings of International Conference on World Wide Web, pages 1166-1176, 2015. 5 +[84] Wei Ying, Yu Zhang, Junzhou Huang, and Qiang Yang. Transfer learning via learning to transfer. In International Conference on Machine Learning, pages 5085-5094. PMLR, 2018. 2 +[85] Yuchen Zhang, Tianle Liu, Mingsheng Long, and Michael Jordan. Bridging theory and algorithm for domain adaptation. In International Conference on Machine Learning, pages 7404-7413, 2019. 2 + +[86] Liang Zheng, Yi Yang, and Qi Tian. Sift meets cnn: A decade survey of instance retrieval. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(5):1224-1244, 2017. 3, 5 +[87] Yujie Zhong, Relja Arandjelovic, and Andrew Zisserman. Ghostvlad for set-based face recognition. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2–6, 2018, Revised Selected Papers, Part II 14, pages 35–50. Springer, 2019. 2 \ No newline at end of file diff --git a/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/images.zip b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ad81b922b420fde6321f4989ed3db04c25f3a509 --- /dev/null +++ b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:107332703081fc88ff9069f48b8110958349031bf9878dab4e49ae2ea091b589 +size 427634 diff --git a/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/layout.json b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d08a3dd273a3441bb187188514cec5e1d1659646 --- /dev/null +++ b/2023/A Bag-of-Prototypes Representation for Dataset-Level Applications/layout.json @@ -0,0 +1,9769 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 89, + 103, + 504, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 103, + 504, + 121 + ], + "spans": [ + { + "bbox": [ + 89, + 103, + 504, + 121 + ], + "type": "text", + "content": "A Bag-of-Prototypes Representation for Dataset-Level Applications" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "spans": [ + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "type": "text", + "content": "Weijie " + }, + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "type": "inline_equation", + "content": "\\mathrm{Tu}^{1}" + }, + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "type": "text", + "content": " Weijian Deng" + }, + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "type": "text", + "content": " Tom Gedeon" + }, + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "type": "text", + "content": " Liang Zheng" + }, + { + "bbox": [ + 145, + 142, + 447, + 157 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 166, + 157, + 427, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 157, + 427, + 171 + ], + "spans": [ + { + "bbox": [ + 166, + 157, + 427, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 166, + 157, + 427, + 171 + ], + "type": "text", + "content": "Australian National University " + }, + { + "bbox": [ + 166, + 157, + 427, + 171 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 166, + 157, + 427, + 171 + ], + "type": "text", + "content": "Curtin University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 199, + 192, + 212 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 199, + 192, + 212 + ], + "spans": [ + { + "bbox": [ + 143, + 199, + 192, + 212 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 224, + 289, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 224, + 289, + 523 + ], + "spans": [ + { + "bbox": [ + 47, + 224, + 289, + 523 + ], + "type": "text", + "content": "This work investigates dataset vectorization for two dataset-level tasks: assessing training set suitability and test set difficulty. The former measures how suitable a training set is for a target domain, while the latter studies how challenging a test set is for a learned model. Central to the two tasks is measuring the underlying relationship between datasets. This needs a desirable dataset vectorization scheme, which should preserve as much discriminative dataset information as possible so that the distance between the resulting dataset vectors can reflect dataset-to-dataset similarity. To this end, we propose a bag-of-prototypes (BoP) dataset representation that extends the image-level bag consisting of patch descriptors to dataset-level bag consisting of semantic prototypes. Specifically, we develop a codebook consisting of " + }, + { + "bbox": [ + 47, + 224, + 289, + 523 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 47, + 224, + 289, + 523 + ], + "type": "text", + "content": " prototypes clustered from a reference dataset. Given a dataset to be encoded, we quantize each of its image features to a certain prototype in the codebook and obtain a " + }, + { + "bbox": [ + 47, + 224, + 289, + 523 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 47, + 224, + 289, + 523 + ], + "type": "text", + "content": "-dimensional histogram. Without assuming access to dataset labels, the BoP representation provides rich characterization of the dataset semantic distribution. Furthermore, BoP representations cooperate well with Jensen-Shannon divergence for measuring dataset-to-dataset similarity. Although very simple, BoP consistently shows its advantage over existing representations on a series of benchmarks for two dataset-level tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 548, + 128, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 128, + 562 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 128, + 562 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 570, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 570, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 570, + 288, + 714 + ], + "type": "text", + "content": "Datasets are fundamental in machine learning research, forming the basis of model training and testing [18, 51, 52, 61]. While large-scale datasets bring opportunities in algorithm design, there lack proper tools to analyze and make the best use of them [6, 51, 56]. Therefore, as opposed to traditional algorithm-centric research where improving models is of primary interest, the community has seen a growing interest in understanding and analyzing the data used for developing models [51, 56]. Recent examples of such goal include data synthesis [29], data sculpting [25, 51], and data valuation [6, 32, 56]. These tasks typically focus on individual sample of a dataset. In this work, we aim to understand" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 201, + 510, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 201, + 510, + 213 + ], + "spans": [ + { + "bbox": [ + 306, + 201, + 510, + 213 + ], + "type": "text", + "content": "nature of datasets from a dataset-level perspective." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 215, + 545, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 215, + 545, + 396 + ], + "spans": [ + { + "bbox": [ + 304, + 215, + 545, + 396 + ], + "type": "text", + "content": "This work considers two dataset-level tasks: suitability in training and difficulty in testing. First, training set suitability denotes whether a training set is suitable for training models for a target dataset. In real-world applications, we are often provided with multiple training sets from various data distributions (e.g., universities and hospitals). Due to distribution shift, their trained models have different performance on the target dataset. Then, it is of high practical value to select the most suitable training set for the target dataset. Second, test set difficulty means how challenging a test set is for a learned model. In practice, test sets are usually unlabeled and often come from different distributions than that of the training set. Measuring the test set difficulty for a learned model helps us understand the model reliability, thereby ensuring safe model deployment." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 399, + 546, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 399, + 546, + 542 + ], + "spans": [ + { + "bbox": [ + 304, + 399, + 546, + 542 + ], + "type": "text", + "content": "The core of the two dataset-level tasks is to measure the relationship between datasets. For example, a training set is more suitable for learning a model if it is more similar to the target dataset. To this end, we propose a vectorization scheme to represent a dataset. Then, the relationship between a pair of datasets can be simply reflected by the distance between their representations. Yet, it is challenging to encode a dataset as a representative vector, because (i) a dataset has a different cardinality (number of images) and (ii) each image has its own semantic content (e.g., category). It is thus critical to find an effective way to aggregate all image features to uncover dataset semantic distributions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 546, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 547, + 714 + ], + "type": "text", + "content": "In the literature, some researchers use the first few moments of distributions such as feature mean and co-variance to represent datasets [20, 62, 74, 75, 82]. While being computational friendly, these methods do not offer sufficiently strong descriptive ability of a dataset, such as class distributions, and thus have limited effectiveness in assessing attributes related to semantics. There are also some methods learn task-specific dataset representations [1, 63]. For example, given a dataset with labels and a task loss function, Task2Vec [1] computes an embedding based on estimates of the Fisher information matrix associated with a probe network's parameters. While these task-specific representations are able to predict task similarities, they are not suitable for characterizing dataset properties of interest. They" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 758 + ], + "type": "text", + "content": "2881" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 131 + ], + "type": "text", + "content": "require training a network on the specific task [1] or on multiple datasets [63], so they are not effective in assessing the training set suitability. Additionally, they require image labels for the specific task, so they cannot be used to measure the difficulty of unlabeled test sets." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 133, + 287, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 133, + 287, + 275 + ], + "spans": [ + { + "bbox": [ + 46, + 133, + 287, + 275 + ], + "type": "text", + "content": "In this work, we propose a simple and effective bag-of-prototypes (BoP) dataset representation. Its computation starts with partitioning the image feature space into semantic regions through clustering, where the region centers, or prototypes, form a codebook. Given a new dataset, we quantize its features to their corresponding prototypes and compute an assignment histogram, which, after normalization, gives the BoP representation. The dimensionality of BoP equals the codebook size, which is usually a few hundred and is considered memory-efficient. Meanwhile, the histogram computed on the prototypes is descriptive of the dataset semantic distribution." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 276, + 287, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 276, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 276, + 287, + 407 + ], + "type": "text", + "content": "Apart from being low dimensional and semantically rich, BoP has a few other advantages. First, while recent works in task-specific dataset representation usually require full image annotations and additional learning procedure [1,63], the computation of BoP does not rely on any. It is relatively efficient and allows for unsupervised assessment of dataset attributes. Second, BoP supports dataset-to-dataset similarity measurement through Jensen-Shannon divergence. We show in our experiment that this similarity is superior to commonly used metrics such as Fréchet distance [27] and maximum mean discrepancy [33] in two dataset-level tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 418, + 134, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 418, + 134, + 430 + ], + "spans": [ + { + "bbox": [ + 47, + 418, + 134, + 430 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 438, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 438, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 438, + 287, + 677 + ], + "type": "text", + "content": "Dataset representations. A common practice is to use simple and generic statistics as dataset representations [20, 62, 74, 75, 82]. For example, Peng et al. [62] use the first moment to represent a dataset. Deng et al. [20] use global feature mean and co-variance as dataset representations. Vanschoeren et al. [82] find dataset cardinality (the number of images/classes) useful to encode a dataset. These methods have limited descriptive ability, whereas BoP is more semantically descriptive. Moreover, it is feasible to learn a task-specific dataset representation [1, 63, 84, 87]. For example, Ying et al. [84] learn transfer skills from previous transfer learning experiences for future target tasks. Achille et al. [1] propose to learn a task embedding based on the estimate of Fisher information matrix associated with a task loss. Compared with these task-specific representations, BoP is hand-crafted, avoiding computation overheads incurred by end-to-end learning. It is thus efficient in measuring training set suitability without training any models. Moreover, BoP require no image labels, making it more suitable for assessing the difficulty of unlabeled test sets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Dataset-to-dataset similarity. We briefly review three strategies. First, some dataset similarity measures are developed in the context of domain adaptation [2, 9, 10, 85]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "text", + "content": "They typically depend on a loss function and hypothesis class, and use a supremum of that function class to quantify the similarity of datasets. (e.g., " + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\mathcal{H}\\Delta \\mathcal{H}" + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "text", + "content": "-divergence [9], " + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "text", + "content": "-divergence [2], and " + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 304, + 72, + 545, + 312 + ], + "type": "text", + "content": "-distance [10]). Second, dataset distance can be computed based on optimal transport [5, 17, 79]. For example, the squared Wasserstein metric Fréchet distance [27] is widely used in comparing the distribution discrepancy of generated images with the distribution of real images [39]. To better leverage the geometric relationship between datasets, Alvarez et al. [5] use labels to guide optimal transport towards class-coherent matches. Third, existing dataset representations can be used to compute dataset distance [33, 62, 75, 81]. For example, maximum mean discrepancy (MMD) [33] computes the distance between mean elements of distributions on the probability space. Peng et al. [62] eliminate dataset discrepancy by matching datasets moments. CORAL [75] uses the second-order statistics of datasets to measure distance. This work is in the third category and uses JS divergence between BoP representations to calculate dataset-to-dataset similarity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 318, + 545, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 318, + 545, + 402 + ], + "spans": [ + { + "bbox": [ + 304, + 318, + 545, + 402 + ], + "type": "text", + "content": "Assessment of training dataset suitability. Recent works have focused on understanding the importance of individual training instances in training of neural networks [6,32,45,56]. For example, Data Shapley [32] and Consistency Score [45] are proposed to evaluate the value of each data instance. Some methods identify \"difficult\" instances based on the information of training dynamics [7,76,80]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 408, + 545, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 408, + 545, + 540 + ], + "spans": [ + { + "bbox": [ + 304, + 408, + 545, + 540 + ], + "type": "text", + "content": "Different from the above approaches, this work studies the suitability of an entire training set. Given multiple training datasets from different data distributions, the focus is to choose the most appropriate training dataset for the target domain. Dataset-to-dataset similarity can be used for this goal. Intuitively, if a training dataset has high similarity with a target dataset, the model trained on it is expected to be more performant and vice versa. In this work, we use BoP representation coupled with simple JS divergence to calculate dataset-to-dataset similarity and demonstrate its effectiveness in accessing training set suitability." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 545, + 713 + ], + "type": "text", + "content": "Assessing test set difficulty without ground truths. The goal of this task (also known as unsupervised accuracy estimation) is to predict the accuracy of a given model on various unlabeled test sets. Existing methods usually use a representation of the test set for accuracy prediction [13, 19, 20, 30, 34]. Normally this representation is derived from classifier outputs, such as image features [20], prediction logits [30], average softmax scores [34]. Then, regression is used to establish the relationship between this representation and model test accuracy under various testing environments. Compared with existing dataset features, the BoP representation better characterizes the semantic distribution of training and test sets and thus can be effectively used for model accuracy prediction." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2882" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 129, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 129, + 85 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 129, + 85 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 91, + 268, + 104 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 268, + 104 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 268, + 104 + ], + "type": "text", + "content": "3.1. Bag-of-Words Model Across Communities" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 110, + 287, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 110, + 287, + 217 + ], + "spans": [ + { + "bbox": [ + 46, + 110, + 287, + 217 + ], + "type": "text", + "content": "In natural language processing (NLP) and information retrieval, the Bag-of-Words (BoW) model [46, 47, 50, 57] vectorizes textual data as a word histogram. Specifically, for each word in the dictionary, its occurrences in a document are counted, which fills in the corresponding entry of the BoW feature. This word frequency vector is thus used to represent a document. Numerous improvements of the BoW feature were made in NLP, such as n-grams [47, 50] and term frequency-inverse document frequency [68]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 218, + 288, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 218, + 288, + 386 + ], + "spans": [ + { + "bbox": [ + 46, + 218, + 288, + 386 + ], + "type": "text", + "content": "In the early 2000s, the BoW representation was introduced to the computer vision (CV) community to encode hundreds or thousands of local image descriptors [8, 53] into a compact vector [73]. As there is no semantic codebook like in NLP, a visual codebook is constructed by performing clustering (e.g., k-means) on a collection of local image features, where the resulting clustering centers are called \"visual words\". Local image descriptors are quantized to their nearest cluster center so that a visual word histogram can be computed. This BoW histogram also have undergone extensive improvements in later years, such as Fisher vector [64, 65], vector of locally aggregated descriptors (VLAD) [43], and the use of principal component analysis and whitening [42]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 386, + 287, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 386, + 287, + 506 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 287, + 506 + ], + "type": "text", + "content": "Contribution statement. This paper contributes a baseline method in adopting the BoW idea study the two basic properties of a dataset. To this end, we propose to represent a dataset using its histogram over a series of prototypes. A comparison between the usage of BoW model in NLP, CV and our dataset-level research is shown in Table 1. Specifically, the BoP representation relies on clustering for codebook formation, has a relatively small codebook (depending on the richness of dataset semantics), and has semantically sensible codewords." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 514, + 266, + 527 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 266, + 527 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 266, + 527 + ], + "type": "text", + "content": "3.2. Bag-of-Prototypes Dataset Representation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": "Given a dataset " + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{\\mathbf{x}_i\\}_{i=1}^N" + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": " is the number of images and a feature extractor " + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "inline_equation", + "content": "\\mathbf{F}(\\cdot)" + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": " that maps an input image into a " + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": "-dimensional feature " + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "inline_equation", + "content": "f \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": ", we extract a set of image features " + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{F} := \\{\\mathbf{F}(\\mathbf{x}_i)\\}_{i=1}^N" + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": ". While it is possible to directly use the dataset images (or features) as model input under small " + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": ", it becomes prohibitively expensive when " + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": " is large. We therefore focus on extracting useful semantic features of " + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 46, + 532, + 287, + 652 + ], + "type": "text", + "content": " by encoding its image features into a compact representation. Below we detail the necessary steps for computing the proposed BoP representation (refer Fig. 1)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 652, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 652, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 652, + 287, + 714 + ], + "type": "text", + "content": "Step I: Codebook generation. Given a reference dataset " + }, + { + "bbox": [ + 46, + 652, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_r = \\{\\mathbf{x}_i^r\\}_{i=1}^{N_r}" + }, + { + "bbox": [ + 46, + 652, + 287, + 714 + ], + "type": "text", + "content": ", we extract all of its image features " + }, + { + "bbox": [ + 46, + 652, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_r := \\{\\mathbf{F}(\\mathbf{x}_i^r)\\}_{i=1}^{N_r}" + }, + { + "bbox": [ + 46, + 652, + 287, + 714 + ], + "type": "text", + "content": " using a pretrained network, from which a codebook is constructed. Specifically, we adopt standard k-means clustering [54] to partition the feature space " + }, + { + "bbox": [ + 46, + 652, + 287, + 714 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 307, + 71, + 547, + 188 + ], + "blocks": [ + { + "bbox": [ + 307, + 71, + 547, + 188 + ], + "lines": [ + { + "bbox": [ + 307, + 71, + 547, + 188 + ], + "spans": [ + { + "bbox": [ + 307, + 71, + 547, + 188 + ], + "type": "table", + "html": "
BoW in NLPBoW in CVBoP
Encoded objectsDocumentsImagesDatasets (a set of images)
Codewords in codebookWordsCluster centers of local descriptorsPrototypes of image features
Clustering?NoYesYes
Codewords semanticsClearLittleSensible
Codebook size>103103-106~102(dataset dependent)
", + "image_path": "8cbef4372750199df233303ee4da52934471e12b37b8aef3deae0e253489455e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 198, + 547, + 243 + ], + "lines": [ + { + "bbox": [ + 304, + 198, + 547, + 243 + ], + "spans": [ + { + "bbox": [ + 304, + 198, + 547, + 243 + ], + "type": "text", + "content": "Table 1. Comparing BoP with BoW model in natural language processing (NLP) and computer vision (CV). The objective of BoW in NLP and CV is encoding texts and images respectively, while BoP is proposed to represent datasets." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "spans": [ + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "text", + "content": "into " + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "text", + "content": " clusters. Each of the " + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "text", + "content": " cluster centers is called a \"prototype\", because oftentimes each center mainly represents a certain semantic content. See Fig. 1 right for exemplar image of each prototype. The prototypes, or centers, constitute the codebook, denoted as " + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "inline_equation", + "content": "\\mathcal{C} = \\{\\mathbf{c}_i\\}_{i=1}^K" + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 256, + 545, + 339 + ], + "type": "text", + "content": "-th prototype. Note that, the order of the prototypes is fixed in the codebook." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "spans": [ + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": "Step II: Histogram computation. For a dataset to be encoded " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_e = \\{\\mathbf{x}_i^e\\}_{i=1}^{N_e}" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "N_e" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " is the number of images, we project it onto codebook " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " to compute its BoP representation. Specifically, after extracting image features " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_e := \\{\\mathbf{F}(\\mathbf{x}_i^e)\\}_{i=1}^{N_e}" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_e" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": ", for each image feature, we compute its distance with all the " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " prototypes in the codebook, yielding " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " distances " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "d_1, \\ldots, d_k" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "d_i" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " is the distance between an image feature and the " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": "-th prototype. An image feature is quantized to prototype " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "d_i" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " is the lowest among " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "d_1, \\ldots, d_k" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": ". Following the quantization, we generate a " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": "-dimensional one-hot encoding where the " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": "-th entry is 1 and all the others are 0. Having computed the one-hot vectors for all the image features, we sum them which is then normalized by " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "N_e" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": ", the number of images in " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "D_e" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": ". This gives the histogram representation " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_e" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": ", or BoP representation, for " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "D_e" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " where the " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": "-th entry indicates the density of features in " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "D_e" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": " belonging to prototype " + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 304, + 340, + 546, + 544 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 552, + 518, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 552, + 518, + 564 + ], + "spans": [ + { + "bbox": [ + 306, + 552, + 518, + 564 + ], + "type": "text", + "content": "3.3. Measuring Dataset-to-Dataset Similarity" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "text", + "content": "Similar to image / document retrieval where BoW vectors of instances are used for similarity comparison [14, 26, 59, 66, 73], this work uses the BoP representation to calculate dataset-to-dataset similarity. Specifically, given BoP representations " + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_x" + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_y" + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "text", + "content": " of two datasets " + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_x" + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_y" + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "text", + "content": ", we simply define their similarity " + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "inline_equation", + "content": "S_{x,y}" + }, + { + "bbox": [ + 304, + 570, + 545, + 665 + ], + "type": "text", + "content": " using Jensen-Shannon divergence (JS divergence), which is designed for histogram-based similarity measurement [16, 55]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Task-oriented similarity measure. We can build a universal codebook on a large-scale dataset following BoW model [14, 86]. By doing so, the resulting BoP representations are generic. We can also build a task-oriented code" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2883" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 102, + 79, + 492, + 281 + ], + "blocks": [ + { + "bbox": [ + 102, + 79, + 492, + 281 + ], + "lines": [ + { + "bbox": [ + 102, + 79, + 492, + 281 + ], + "spans": [ + { + "bbox": [ + 102, + 79, + 492, + 281 + ], + "type": "image", + "image_path": "15458d234ddfea33e58d1cb767cc770f7d3f294ee12622c7525847f7ef78d0c4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 286, + 547, + 353 + ], + "lines": [ + { + "bbox": [ + 46, + 286, + 547, + 353 + ], + "spans": [ + { + "bbox": [ + 46, + 286, + 547, + 353 + ], + "type": "text", + "content": "Figure 1. Workflow of BoP representation computation using CIFAR-10 [49] and one CIFAR-10 out-of-distribution (OOD) test set as an example. Top: We group image features of the reference dataset CIFAR-10 into 10 clusters, and the centers are called prototypes. The prototypes constitute the codebook of size 10. Bottom left: To encode the OOD test set, we project it onto the codebook by quantizing each image feature to its corresponding prototype. Lastly, we compute the histogram, i.e., BoP representation, of CIFAR-10 OOD test set. Bottom right: We regard dataset-to-dataset similarity as the Jensen-Shannon divergence between BoP histograms of CIFAR-10 OOD test set and reference dataset. With such similarity, we can measure the test set difficulty for the model trained on reference dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 359, + 289, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 359, + 289, + 504 + ], + "spans": [ + { + "bbox": [ + 46, + 359, + 289, + 504 + ], + "type": "text", + "content": "book on a reference dataset from a specific task to consider more task-oriented information. The latter is more suitable for the two dataset-level tasks considered in this work. For the task of training set suitability assessment, we use the target dataset as the reference for codebook generation to fully consider its semantic information. As a result, the JS divergence between BoP representations of the training set and the target dataset can well capture how a training set is similar to the target set. Similarly, for the task of test set difficulty assessment, we build codebook on the training set. This practice can effectively measure how an unlabeled test is similar to a given training set." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 514, + 119, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 119, + 525 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 119, + 525 + ], + "type": "text", + "content": "3.4. Discussion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 533, + 287, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 533, + 287, + 629 + ], + "spans": [ + { + "bbox": [ + 46, + 533, + 287, + 629 + ], + "type": "text", + "content": "Working mechanism of BoP. Codebook generation of BoP can be viewed as Centroidal Voronoi Tessellations [24]. Specifically, the prototypes (cluster centers) of codebook tessellate the feature space into Voronoi cells. Then, histogram computation approximates a probability distribution function in the same way as the nonparametric histogram [12, 28, 67]. That is, the BoP representation reflects the distribution of a dataset in the feature space." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 630, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 288, + 715 + ], + "type": "text", + "content": "As shown in Fig. 1, the prototypes of reference dataset tessellate feature space into Voronoi cells. Based on this, we quantify the histogram of the reference dataset to represent its distribution. Given a new dataset, we conduct the same histogram calculation procedure and correspondingly capture its dataset distribution with the histogram. Then, we measure discrepancy of the two datasets by calculating JS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 359, + 545, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 359, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 304, + 359, + 545, + 419 + ], + "type": "text", + "content": "divergence between their histograms. Compared with common measures of dataset distance (e.g., FD [27], KID [11] and MMD [33]) that only reflect global structure (e.g., first few moments) of dataset distributions, BoP, collaborated with JS divergence, considers more local structures." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 423, + 546, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 423, + 546, + 567 + ], + "spans": [ + { + "bbox": [ + 304, + 423, + 546, + 567 + ], + "type": "text", + "content": "Training set suitability vs. transferability estimation. Two tasks relate but differ significantly: 1) Given an unlabeled target dataset and a pool of training datasets, the former aims to select the most suitable training set for the target. The latter assumes a labeled target dataset and a pool of models pretrained on a source dataset, with the goal of selecting the most suitable source model for the target without fine-tuning them all [3,4,60]; 2) Datasets in training set suitability are used for the same classification problem. In contrast, in transferability estimation, the problem in the target dataset (e.g., CIFAR-10 classification) is different from that of the source dataset (e.g. ImageNet classification)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 570, + 547, + 714 + ], + "type": "text", + "content": "Analysis of the number of prototypes in a codebook. The codebook size is a critical factor influencing the usefulness of the BoP. A small codebook means a coarser partition of feature space, where similar features will likely be in the same cluster, but dissimilar features may also be in the same cluster. Moreover, a large codebook provides a finer description of the space, where dissimilar features are quantized to different prototypes and more semantics are explored. According to our experiment results in Fig. 2 and Fig. 5, we find, reassuringly, BoP is robust against the codebook size: prototype number can deviate within a wide range around the true classes number (e.g., 345 for Domain-" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2884" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 266, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 266, + 84 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 266, + 84 + ], + "type": "text", + "content": "Net [62]) without significantly affecting performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 84, + 288, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 84, + 288, + 240 + ], + "spans": [ + { + "bbox": [ + 46, + 84, + 288, + 240 + ], + "type": "text", + "content": "Application scope and future directions. BoP is proposed to study the two dataset-level tasks, and the datasets considered in each task share the same label space. We may encounter some situations where we need to compare datasets with different label spaces (e.g., pre-training datasets selection [1]). In this case, one potential way is to build a universal codebook on a large-scale and representative dataset similar to BoW models [14, 86]. By doing so, the resulting BoP representations can encode diverse and sufficient semantics for comparing datasets across various label spaces. We view our BoP as a starting point to encode datasets. It would be interesting to study other dataset vectorization schemes and dataset-level tasks." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 250, + 285, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 250, + 285, + 264 + ], + "spans": [ + { + "bbox": [ + 47, + 250, + 285, + 264 + ], + "type": "text", + "content": "4. Comparing Training Suitability of Datasets" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 270, + 288, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 270, + 288, + 377 + ], + "spans": [ + { + "bbox": [ + 46, + 270, + 288, + 377 + ], + "type": "text", + "content": "This task studies dataset valuation where multiple training sets are provided by different data contributors. The goal is to select the most suitable training set (ideally without training) whose trained model performs the best on a target test set. In this section, we first validate that BoP, collaborated with JS divergence " + }, + { + "bbox": [ + 46, + 270, + 288, + 377 + ], + "type": "inline_equation", + "content": "(\\mathrm{BoP} + \\mathrm{JS})" + }, + { + "bbox": [ + 46, + 270, + 288, + 377 + ], + "type": "text", + "content": ", is predictive of dataset suitability for the target test set. Then, we show that BoP is robust when using a wide range of codebook sizes and different networks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 384, + 174, + 397 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 384, + 174, + 397 + ], + "spans": [ + { + "bbox": [ + 47, + 384, + 174, + 397 + ], + "type": "text", + "content": "4.1. Experimental Settings" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 403, + 287, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 403, + 287, + 594 + ], + "spans": [ + { + "bbox": [ + 46, + 403, + 287, + 594 + ], + "type": "text", + "content": "Correlation study under DomainNet setup. We use domain generalization benchmark DomainNet [62], which consists of 6 domains: Painting, Real, Infograph, Quickdraw, Sketch and ClipArt, where the tasks are 345-way object classification. Each domain has its training and test splits. We conduct the correlation study in an leave-one-out manner, leading to 6 groups of correlation studies, with each group using the test split of one domain as the target test set. Additionally, we apply image transformations to the training split of six original domains. Specifically, we employ 'Cartoon' [48], 'Zoom Blur' and 'JPEG Compression' [36] to convert domains' style to be one specific type. We also use 'AugMix' [38] and 'AutoAugment' [15], which transform images with various operations to generate domains with mixed styles. This process synthesizes 30 new datasets, so we have 36 training sets in total." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 594, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 288, + 715 + ], + "type": "text", + "content": "We follow the training scheme provided by TLlib [44] to train ResNet-101 model [35], whose weights are pretrained on ImageNet [18], yielding 36 models. Moreover, penultimate outputs of pretrained ResNet-101 is used as image feature. On the test set, we generate a codebook of size 1000. Then, for each training set, we compute its BoP histogram, " + }, + { + "bbox": [ + 46, + 594, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 46, + 594, + 288, + 715 + ], + "type": "text", + "content": " from the test set, and the accuracy of its trained model on the test set. After this, we calculate correlation strength between " + }, + { + "bbox": [ + 46, + 594, + 288, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 46, + 594, + 288, + 715 + ], + "type": "text", + "content": " and model accuracy to evaluate whether BoP is indicative of datasets training suitability." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 307, + 72, + 549, + 166 + ], + "blocks": [ + { + "bbox": [ + 307, + 72, + 549, + 166 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 549, + 166 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 549, + 166 + ], + "type": "table", + "html": "
MethodResNet-34ResNet-101
rρτwrρτw
FD [27]-0.860-0.926-0.828-0.903-0.902-0.802
MMD [33]-0.817-0.801-0.691-0.821-0.817-0.704
KID [11]-0.773-0.904-0.804-0.876-0.896-0.800
BoP + JS-0.960-0.927-0.840-0.961-0.929-0.840
", + "image_path": "2249d97e7310739c4877deae7739deac5cdfdcf80fcc2096a3569889cf85ebdd.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 304, + 173, + 547, + 251 + ], + "lines": [ + { + "bbox": [ + 304, + 173, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 304, + 173, + 547, + 251 + ], + "type": "text", + "content": "Table 2. Compare averaged Pearson's correlation " + }, + { + "bbox": [ + 304, + 173, + 547, + 251 + ], + "type": "inline_equation", + "content": "(r)" + }, + { + "bbox": [ + 304, + 173, + 547, + 251 + ], + "type": "text", + "content": ", Spearman's correlation " + }, + { + "bbox": [ + 304, + 173, + 547, + 251 + ], + "type": "inline_equation", + "content": "(\\rho)" + }, + { + "bbox": [ + 304, + 173, + 547, + 251 + ], + "type": "text", + "content": " and weighted Kendall's correlation " + }, + { + "bbox": [ + 304, + 173, + 547, + 251 + ], + "type": "inline_equation", + "content": "(\\tau_w)" + }, + { + "bbox": [ + 304, + 173, + 547, + 251 + ], + "type": "text", + "content": " of Fréchet distance (FD), maximum mean discrepancy (MMD), kernel inception distance (KID) and BoP + JS (codebook size 1000) on six test sets in DomainNet. We report two groups of results using ResNet-34 (Left) and ResNet-101 (Right). We show BoP + JS is more effective in assessing training set suitability than others." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "spans": [ + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "text", + "content": "Evaluation metric. We use Pearson's correlation " + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "text", + "content": " and Spearman's rank correlation " + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "text", + "content": " to show linearity and monotonicity between BoP-based dataset distance and model accuracy, respectively. Both metrics range in " + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "inline_equation", + "content": "[-1, 1]" + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "inline_equation", + "content": "|r|" + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "inline_equation", + "content": "|\\rho|" + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "text", + "content": " is close to 1, the linearity or monotonicity is strong, and vice versa. In addition to these two metrics, we also use weighted variant of Kendall's correlation " + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "inline_equation", + "content": "(\\tau_w)" + }, + { + "bbox": [ + 304, + 266, + 547, + 422 + ], + "type": "text", + "content": " [83]. It is shown to be useful when selecting the best ranked item is of interest [71], while a major application of BoP + JS is to select the training dataset leading to the best performance on a test set. This metric has the same range where a number closer to -1 or 1 indicates stronger negative or positive correlation, respectively, and 0 means no correlation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 430, + 380, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 430, + 380, + 442 + ], + "spans": [ + { + "bbox": [ + 306, + 430, + 380, + 442 + ], + "type": "text", + "content": "4.2. Evaluation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": "Strong correlation: A training set is more suitable for a given test set if it has small " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": ". Fig. 2 shows correlation study on ClipArt, Painting, Real and Sketch. We notice that there are strong Pearson's correlations " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "(|r| > 0.95)" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": ", Spearman's rank correlations " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "(|\\rho| > 0.93)" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": " and relatively high weighted Kendall's correlations " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "(|\\tau_w| > 0.84)" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": " on four test sets. This suggests that " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": " is stable and useful across test sets. Table 2 compares average correlation strength of " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": " with Fréchet distance (FD) [27], maximum mean discrepancy (MMD) [33] and kernel inception distance (KID) [11]. They use that same image features as BoP. According to their formulae, mean and covariance of these features are used for distance computation. We see that " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": " has the highest average correlation scores on six test sets " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "(|r| = 0.961" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "|\\rho| = 0.929" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "|\\tau_w| = 0.840)" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": ". On average, " + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 449, + 547, + 653 + ], + "type": "text", + "content": " is superior in depicting training sets suitability for a test set without any training procedure." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 654, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 654, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 654, + 547, + 715 + ], + "type": "text", + "content": "Impact of codebook size is shown in the Fig. 3. We construct codebooks with different size within approximately one order of magnitude around 345. We find that the three correlation scores increase and then become stable when codebook size becomes larger. This indicates that the per" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2885" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 159, + 71, + 444, + 95 + ], + "blocks": [ + { + "bbox": [ + 159, + 71, + 444, + 95 + ], + "lines": [ + { + "bbox": [ + 159, + 71, + 444, + 95 + ], + "spans": [ + { + "bbox": [ + 159, + 71, + 444, + 95 + ], + "type": "image", + "image_path": "95aed49b03c7f756627bc5a2bc9af9e0427f44b529d54773a3deefe245f6a65b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 51, + 96, + 179, + 190 + ], + "blocks": [ + { + "bbox": [ + 51, + 96, + 179, + 190 + ], + "lines": [ + { + "bbox": [ + 51, + 96, + 179, + 190 + ], + "spans": [ + { + "bbox": [ + 51, + 96, + 179, + 190 + ], + "type": "image", + "image_path": "5643a69bbc88e3d4d9a753f5785172c67698b9964d7b4f8d56690ca73bce5432.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "lines": [ + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "text", + "content": "Figure 2. Correlation study for training suitability of datasets. We report the correlation strength between " + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "text", + "content": " and model classification accuracy on four test domains of DomainNet: ClipArt, Painting, Real and Sketch. The model architecture is ResNet-101. Each dot denotes a model trained on a training set of DomainNet. We mark training domains (e.g., ClipArt) by different shapes and transformation operations (e.g., AugMix) by different colors. The straight lines are fit with robust linear regression [41]. We consistently observe high correlation results: Pearson's correlation " + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "inline_equation", + "content": "(|r| > 0.95)" + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "text", + "content": ", Spearman's correlation " + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "inline_equation", + "content": "(|\\rho| > 0.93)" + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "text", + "content": " and weighted Kendall's correlation " + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "inline_equation", + "content": "(|\\tau_w| > 0.84)" + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "text", + "content": ". This suggests that " + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 46, + 209, + 547, + 276 + ], + "type": "text", + "content": " is predictive of the suitability of a training set." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 179, + 95, + 299, + 191 + ], + "blocks": [ + { + "bbox": [ + 179, + 95, + 299, + 191 + ], + "lines": [ + { + "bbox": [ + 179, + 95, + 299, + 191 + ], + "spans": [ + { + "bbox": [ + 179, + 95, + 299, + 191 + ], + "type": "image", + "image_path": "0386ccc80c9f44dfd12666803e30c349d2ba213c099709ceee4c3972e2c9dc26.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 277, + 194, + 339, + 204 + ], + "lines": [ + { + "bbox": [ + 277, + 194, + 339, + 204 + ], + "spans": [ + { + "bbox": [ + 277, + 194, + 339, + 204 + ], + "type": "text", + "content": "BoP + JS divergence" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 301, + 95, + 421, + 191 + ], + "blocks": [ + { + "bbox": [ + 301, + 95, + 421, + 191 + ], + "lines": [ + { + "bbox": [ + 301, + 95, + 421, + 191 + ], + "spans": [ + { + "bbox": [ + 301, + 95, + 421, + 191 + ], + "type": "image", + "image_path": "19d3fe321a43c59bd2607bc43646f863c24e6eb1dbe637e93448cef3667208f7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 422, + 96, + 541, + 191 + ], + "blocks": [ + { + "bbox": [ + 422, + 96, + 541, + 191 + ], + "lines": [ + { + "bbox": [ + 422, + 96, + 541, + 191 + ], + "spans": [ + { + "bbox": [ + 422, + 96, + 541, + 191 + ], + "type": "image", + "image_path": "8220335a4926c5656209fb26dbfb4c0cbb1fdfd8bc794248aa9badf98a5ac04e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 50, + 283, + 164, + 376 + ], + "blocks": [ + { + "bbox": [ + 50, + 283, + 164, + 376 + ], + "lines": [ + { + "bbox": [ + 50, + 283, + 164, + 376 + ], + "spans": [ + { + "bbox": [ + 50, + 283, + 164, + 376 + ], + "type": "image", + "image_path": "0d07af07218366662e123a19c2b4bd0df58a255cf0d1b79e074f809502fe6e86.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 165, + 283, + 272, + 376 + ], + "blocks": [ + { + "bbox": [ + 165, + 283, + 272, + 376 + ], + "lines": [ + { + "bbox": [ + 165, + 283, + 272, + 376 + ], + "spans": [ + { + "bbox": [ + 165, + 283, + 272, + 376 + ], + "type": "image", + "image_path": "372efe534f400d85614618c9636645bf3dbfc4ce8b181ae6c02f6007fffd42c5.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 50, + 376, + 164, + 471 + ], + "blocks": [ + { + "bbox": [ + 50, + 376, + 164, + 471 + ], + "lines": [ + { + "bbox": [ + 50, + 376, + 164, + 471 + ], + "spans": [ + { + "bbox": [ + 50, + 376, + 164, + 471 + ], + "type": "image", + "image_path": "024137bd89d4a2148c03aca8c6f37dd30fa586d2abb7999a397e6531ef3b5631.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 475, + 287, + 533 + ], + "lines": [ + { + "bbox": [ + 46, + 475, + 287, + 533 + ], + "spans": [ + { + "bbox": [ + 46, + 475, + 287, + 533 + ], + "type": "text", + "content": "Figure 3. Impact of codebook size on correlation strength for ResNet-101 on four test domains: ClipArt, Painting, Real and Sketch. For example, on Real domain, correlation scores " + }, + { + "bbox": [ + 46, + 475, + 287, + 533 + ], + "type": "inline_equation", + "content": "|\\rho|" + }, + { + "bbox": [ + 46, + 475, + 287, + 533 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 475, + 287, + 533 + ], + "type": "inline_equation", + "content": "|r|" + }, + { + "bbox": [ + 46, + 475, + 287, + 533 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 475, + 287, + 533 + ], + "type": "inline_equation", + "content": "|\\tau_w|" + }, + { + "bbox": [ + 46, + 475, + 287, + 533 + ], + "type": "text", + "content": " are relatively low under a small size and remain stably high when the size is greater than 400." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 165, + 376, + 273, + 471 + ], + "blocks": [ + { + "bbox": [ + 165, + 376, + 273, + 471 + ], + "lines": [ + { + "bbox": [ + 165, + 376, + 273, + 471 + ], + "spans": [ + { + "bbox": [ + 165, + 376, + 273, + 471 + ], + "type": "image", + "image_path": "4a5dcf19bff65ebb62bb994a02a31bdeea36c09dbb093f5201e1a943fa64cda5.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 538, + 212, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 538, + 212, + 549 + ], + "spans": [ + { + "bbox": [ + 47, + 538, + 212, + 549 + ], + "type": "text", + "content": "formance " + }, + { + "bbox": [ + 47, + 538, + 212, + 549 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 47, + 538, + 212, + 549 + ], + "type": "text", + "content": " is overall consistent." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 550, + 287, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 550, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 46, + 550, + 287, + 635 + ], + "type": "text", + "content": "Correlation study with a different model architecture. We additionally validate the robustness of BoP for ResNet-34 with codebook size 1000. As shown in Table 2, we compare the average correlation scores of " + }, + { + "bbox": [ + 46, + 550, + 287, + 635 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 46, + 550, + 287, + 635 + ], + "type": "text", + "content": ", FD, MMD and KID. We see that " + }, + { + "bbox": [ + 46, + 550, + 287, + 635 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 46, + 550, + 287, + 635 + ], + "type": "text", + "content": " has consistent performance on two models and remains preferable to characterize training suitability." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 645, + 285, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 645, + 285, + 659 + ], + "spans": [ + { + "bbox": [ + 47, + 645, + 285, + 659 + ], + "type": "text", + "content": "5. Assessing Test Set Difficulty without Labels" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 288, + 715 + ], + "type": "text", + "content": "In the task of test set difficulty assessment, we are provided with a labeled training set and a set of unlabeled datasets for testing. Given a classifier trained on the training set, the goal is to estimate the model accuracy on these" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 282, + 547, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 282, + 547, + 354 + ], + "spans": [ + { + "bbox": [ + 304, + 282, + 547, + 354 + ], + "type": "text", + "content": "test sets without any data annotations. In this section, we first show dataset distance measured by " + }, + { + "bbox": [ + 304, + 282, + 547, + 354 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 282, + 547, + 354 + ], + "type": "text", + "content": " exhibits strong negative correlation with classifier accuracy. We then demonstrate an accuracy predictor based on the BoP representation gives accurate performance estimates compared to state-of-the-art methods." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 360, + 433, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 360, + 433, + 373 + ], + "spans": [ + { + "bbox": [ + 306, + 360, + 433, + 373 + ], + "type": "text", + "content": "5.1. Experimental Settings" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 378, + 545, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 378, + 545, + 557 + ], + "spans": [ + { + "bbox": [ + 304, + 378, + 545, + 557 + ], + "type": "text", + "content": "Correlation study under CIFAR-10 setup. We conduct a correlation study by comparing " + }, + { + "bbox": [ + 304, + 378, + 545, + 557 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 378, + 545, + 557 + ], + "type": "text", + "content": " with classifier accuracy. Following the same setup in [21], we use a series of datasets sharing the same label space (but usually with distribution shift) with CIFAR-10 [49]. Specifically, we train ResNet-44 classifier [35] on the training set of CIFAR-10, which consists of 50,000 images from 10 classes. Here, we use the CIFAR-10-C benchmark [37] for correlation study, which contains different types of corruptions with 5 levels of severity including per-pixel noise, blurring, synthetic weather effects, and digital transforms. Then, for each dataset, we compute its BoP vector, its " + }, + { + "bbox": [ + 304, + 378, + 545, + 557 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 378, + 545, + 557 + ], + "type": "text", + "content": " from CIFAR-10 training set and the classifier accuracy. In addition to ResNet-44, we also study the RepVGG-A1 [22], VGG-16-BN [72] and MobileNet-V2 [70]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "content": "Predicting classification accuracy under CIFAR-10 setup. We train a regressor that takes as input the BoP representation and outputs classification accuracy. The regressor is a neural network with 3 fully connected layers and trained on CIFAR-10-C (regression training set). We evaluate accuracy prediction on CIFAR-10.1 [69], CIFAR-10.2 [69] and CIFAR-10.2-" + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "inline_equation", + "content": "\\tilde{C}" + }, + { + "bbox": [ + 304, + 558, + 546, + 713 + ], + "type": "text", + "content": " [58]. The former two are real-world datasets with natural shift, while the latter one is manually corrupted by the same synthetic shift as [58]. Specifically, we add 10 types of unseen and unique corruptions such as warps, blurs, color distortions and noise additions, with 5 severity levels to CIFAR-10.2. Note that, these corruptions have no overlap with those in CIFAR-10-C [58]." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2886" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 71, + 184, + 171 + ], + "blocks": [ + { + "bbox": [ + 50, + 71, + 184, + 171 + ], + "lines": [ + { + "bbox": [ + 50, + 71, + 184, + 171 + ], + "spans": [ + { + "bbox": [ + 50, + 71, + 184, + 171 + ], + "type": "image", + "image_path": "114a0f7deadcda905a7b3d8b52c58f98e9d3ce35f4f318968c9d0d4f2b4fcac5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 187, + 72, + 306, + 170 + ], + "blocks": [ + { + "bbox": [ + 187, + 72, + 306, + 170 + ], + "lines": [ + { + "bbox": [ + 187, + 72, + 306, + 170 + ], + "spans": [ + { + "bbox": [ + 187, + 72, + 306, + 170 + ], + "type": "image", + "image_path": "e3da439484b5169a327ac2fdf0d47882f557f84145d690c13fd972e0756eb678.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 72, + 421, + 171 + ], + "blocks": [ + { + "bbox": [ + 307, + 72, + 421, + 171 + ], + "lines": [ + { + "bbox": [ + 307, + 72, + 421, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 421, + 171 + ], + "type": "image", + "image_path": "2b69af6e1794263a51140d7ffbb1d30e5b4ae43439e9cba6ffbe19ed4ef07647.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 171, + 337, + 178 + ], + "lines": [ + { + "bbox": [ + 282, + 171, + 337, + 178 + ], + "spans": [ + { + "bbox": [ + 282, + 171, + 337, + 178 + ], + "type": "text", + "content": "BoP + JS divergence" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 422, + 72, + 541, + 171 + ], + "blocks": [ + { + "bbox": [ + 422, + 72, + 541, + 171 + ], + "lines": [ + { + "bbox": [ + 422, + 72, + 541, + 171 + ], + "spans": [ + { + "bbox": [ + 422, + 72, + 541, + 171 + ], + "type": "image", + "image_path": "04cfb99844c04a664155c54583e873e2c7606978a5c1800a3b719884868423a4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 51, + 180, + 185, + 277 + ], + "blocks": [ + { + "bbox": [ + 51, + 180, + 185, + 277 + ], + "lines": [ + { + "bbox": [ + 51, + 180, + 185, + 277 + ], + "spans": [ + { + "bbox": [ + 51, + 180, + 185, + 277 + ], + "type": "image", + "image_path": "f04c9c9a95bb02dacba75c08fa5d445492eda752c47f895aec31a1b307c913fc.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 294, + 545, + 350 + ], + "lines": [ + { + "bbox": [ + 46, + 294, + 545, + 350 + ], + "spans": [ + { + "bbox": [ + 46, + 294, + 545, + 350 + ], + "type": "text", + "content": "Figure 4. Correlation between train-test distance measured by " + }, + { + "bbox": [ + 46, + 294, + 545, + 350 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 46, + 294, + 545, + 350 + ], + "type": "text", + "content": " and model accuracy. Top: Correlation study under CIFAR-10 setup using ResNet-44, RepVGG-A1, VGG-16-BN and MobileNet-V2. Each data point denotes a dataset from CIFAR-10-C. Bottom: Correlation study under ImageNet setup using EfficientNet-B1, DenseNet-121, Inception-V4 and ViT-Base-16. ImageNet-C datasets are used as test sets. The straight lines are fit with robust linear regression [41]. Under both setups, we observe the strong Spearman's rank correlation " + }, + { + "bbox": [ + 46, + 294, + 545, + 350 + ], + "type": "inline_equation", + "content": "(|\\rho| > 0.98)" + }, + { + "bbox": [ + 46, + 294, + 545, + 350 + ], + "type": "text", + "content": " between " + }, + { + "bbox": [ + 46, + 294, + 545, + 350 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 46, + 294, + 545, + 350 + ], + "type": "text", + "content": " and model accuracy." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 187, + 181, + 303, + 277 + ], + "blocks": [ + { + "bbox": [ + 187, + 181, + 303, + 277 + ], + "lines": [ + { + "bbox": [ + 187, + 181, + 303, + 277 + ], + "spans": [ + { + "bbox": [ + 187, + 181, + 303, + 277 + ], + "type": "image", + "image_path": "7ec349e4283c573447053b3cd207667876f6ca26f0753912b835be7cad463ca4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 279, + 337, + 287 + ], + "lines": [ + { + "bbox": [ + 282, + 279, + 337, + 287 + ], + "spans": [ + { + "bbox": [ + 282, + 279, + 337, + 287 + ], + "type": "text", + "content": "BoP + JS divergence" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 304, + 181, + 421, + 277 + ], + "blocks": [ + { + "bbox": [ + 304, + 181, + 421, + 277 + ], + "lines": [ + { + "bbox": [ + 304, + 181, + 421, + 277 + ], + "spans": [ + { + "bbox": [ + 304, + 181, + 421, + 277 + ], + "type": "image", + "image_path": "25f24f8e80a721fc770a535be332d7efa631d636219482958ec1a09be9a8c349.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 422, + 181, + 538, + 277 + ], + "blocks": [ + { + "bbox": [ + 422, + 181, + 538, + 277 + ], + "lines": [ + { + "bbox": [ + 422, + 181, + 538, + 277 + ], + "spans": [ + { + "bbox": [ + 422, + 181, + 538, + 277 + ], + "type": "image", + "image_path": "da8fda1eb4dcc90dc0c5c19bbd31ea26b60a823346a9746205f4496c89ac2c79.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 354, + 288, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 354, + 288, + 401 + ], + "spans": [ + { + "bbox": [ + 46, + 354, + 288, + 401 + ], + "type": "text", + "content": "For the above, we extract image features (output of penultimate layer of ResNet-44) from CIFAR-10 training set. We construct a codebook by dividing the features into 80 clusters with k-means." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 402, + 287, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 402, + 287, + 569 + ], + "spans": [ + { + "bbox": [ + 46, + 402, + 287, + 569 + ], + "type": "text", + "content": "Correlation study under ImageNet setup. We use DenseNet-121 [40] classifier trained on ImageNet training set. We employ a series of datasets from the ImageNet-C benchmark [36] where the classifier is tested. ImageNet-C uses the same types of corruptions as CIFAR-10-C. We construct a codebook of size 1000 on the ImageNet training set from which images features are extracted by the penultimate layer of DenseNet-121. We project each dataset in ImageNet-C onto the codebook and obtain their BoP representations. When exhibiting linear correlations, we calculate " + }, + { + "bbox": [ + 46, + 402, + 287, + 569 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 46, + 402, + 287, + 569 + ], + "type": "text", + "content": " between each ImageNet-C dataset and the training set, and compute classification accuracy. We also use EfficientNet-B1 [78], Inception-V4 [77] and ViT-Base-16 [23] to repeat above procedure for correlation study." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 570, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 570, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 570, + 287, + 677 + ], + "type": "text", + "content": "Evaluation metric. Same as Section 4.1, we use Pearson's correlation " + }, + { + "bbox": [ + 46, + 570, + 287, + 677 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 46, + 570, + 287, + 677 + ], + "type": "text", + "content": " and Spearman's rank correlation " + }, + { + "bbox": [ + 46, + 570, + 287, + 677 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 46, + 570, + 287, + 677 + ], + "type": "text", + "content": " to show linearity and monotonicity between BoP based dataset distance and model accuracy, respectively. To evaluate the effectiveness of accuracy estimation, we use root mean squared error (RMSE) by calculating the difference between estimated accuracy and ground truth before taking the mean across all the test sets. A larger RMSE means a less accurate prediction, and vice versa." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 677, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 714 + ], + "type": "text", + "content": "Compared methods. We compare our system with four existing ones. 1) Prediction score: it estimates model accuracy using the maximum of Softmax output (i.e., confidence" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "spans": [ + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "text", + "content": "score). An image with a confidence score greater than a predefined threshold " + }, + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "inline_equation", + "content": "\\tau \\in [0,1]" + }, + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "text", + "content": " is considered correctly predicted. We select two thresholds (" + }, + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "inline_equation", + "content": "\\tau = 0.8" + }, + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "inline_equation", + "content": "0.9" + }, + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "text", + "content": "). 2) Difference of confidence (DoC) [34] trains a linear regressor mapping average confidence to classifier accuracy; 3) Average thresholded confidence with maximum confidence score function (ATC-MC) [31] calculates a threshold on CIFAR-10 validation set and regards an image with a confidence score higher than the threshold as correctly classified; 4) Network regression " + }, + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "inline_equation", + "content": "(\\mu + \\sigma + FD)" + }, + { + "bbox": [ + 304, + 354, + 545, + 535 + ], + "type": "text", + "content": " [21] trains a neural network that takes as input the feature mean, covariance and Fréchet distance between a set of interest and training set and outputs model accuracy. All methods, if applicable, are compared under the same conditions as our system, e.g., classification training set and regression training set." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 540, + 380, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 540, + 380, + 551 + ], + "spans": [ + { + "bbox": [ + 306, + 540, + 380, + 551 + ], + "type": "text", + "content": "5.2. Evaluation" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 558, + 545, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 558, + 545, + 701 + ], + "spans": [ + { + "bbox": [ + 304, + 558, + 545, + 701 + ], + "type": "text", + "content": "Strong correlation: A test set is difficult (low accuracy) if it is dissimilar to the training set using " + }, + { + "bbox": [ + 304, + 558, + 545, + 701 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 558, + 545, + 701 + ], + "type": "text", + "content": ". Fig. 4 presents the correlation study of two setups and various classifiers. We observe a very high Spearman's rank correlation " + }, + { + "bbox": [ + 304, + 558, + 545, + 701 + ], + "type": "inline_equation", + "content": "(|\\rho| > 0.99)" + }, + { + "bbox": [ + 304, + 558, + 545, + 701 + ], + "type": "text", + "content": " in all the scenarios. It indicates that classification accuracy is highly correlated with JS divergence between BoPs of training and test sets. That is, test accuracy drops proportionally to the distance between the given training set and a test set. The results demonstrate " + }, + { + "bbox": [ + 304, + 558, + 545, + 701 + ], + "type": "inline_equation", + "content": "\\mathrm{BoP} + \\mathrm{JS}" + }, + { + "bbox": [ + 304, + 558, + 545, + 701 + ], + "type": "text", + "content": " between training and test sets is an effective indicator of classification accuracy. More studies are presented in the supplementary materials." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "type": "text", + "content": "Effectiveness of the BoP representation in predict" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2887" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 133, + 537, + 266 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 547, + 127 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 547, + 127 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 547, + 127 + ], + "type": "text", + "content": "Table 3. Method comparison in predicting classifier accuracy under CIFAR-10 setup. We compare four methods: predicted score-based method with hard threshold " + }, + { + "bbox": [ + 46, + 70, + 547, + 127 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 70, + 547, + 127 + ], + "type": "text", + "content": ", neural network regression based on feature statistics " + }, + { + "bbox": [ + 46, + 70, + 547, + 127 + ], + "type": "inline_equation", + "content": "(\\mu + \\sigma + \\mathrm{FD})" + }, + { + "bbox": [ + 46, + 70, + 547, + 127 + ], + "type": "text", + "content": " [20], average thresholded confidence with maximum confidence score function (ATC-MC) [30] and difference of confidences (DoC) [34]. We use CIFAR-10.1 and CIFAR-10.2 (both real-world) and CIFAR-10.2-" + }, + { + "bbox": [ + 46, + 70, + 547, + 127 + ], + "type": "inline_equation", + "content": "\\bar{C}" + }, + { + "bbox": [ + 46, + 70, + 547, + 127 + ], + "type": "text", + "content": " (manually corrupted) as unseen test sets for accuracy prediction. We use RMSE (\\%) to indicate precision of estimates. In each column, we compare our method with the best of the competing ones. We report results by average of five runs." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 133, + 537, + 266 + ], + "lines": [ + { + "bbox": [ + 57, + 133, + 537, + 266 + ], + "spans": [ + { + "bbox": [ + 57, + 133, + 537, + 266 + ], + "type": "table", + "html": "
MethodCIFAR-10.1CIFAR-10.2CIFAR-10.2-C (50)
Severity 1Severity 2Severity 3Severity 4Severity 5Overall
Prediction score (τ = 0.8)4.8994.80010.12712.86916.80921.42724.37117.910
Prediction score (τ = 0.9)0.2970.5503.6385.0788.04811.80414.1089.404
ATC-MC [30]2.6502.6723.0804.3067.10811.01513.0408.601
DoC [34]0.4900.2632.2472.9165.1179.0126.6375.744
μ + σ + FD [21]0.4550.5615.8755.8234.7244.9086.4865.602
BoP (K = 80)0.2180.1222.4582.8183.7305.8366.4514.551
BoP (K = 100)0.1860.1242.8492.8083.5484.0254.7773.663
", + "image_path": "4e91acc407cb219dc26baca5c22e8b4cd7a23d2de4dc3a6d714564222cf8b60b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 52, + 273, + 541, + 380 + ], + "blocks": [ + { + "bbox": [ + 52, + 273, + 541, + 380 + ], + "lines": [ + { + "bbox": [ + 52, + 273, + 541, + 380 + ], + "spans": [ + { + "bbox": [ + 52, + 273, + 541, + 380 + ], + "type": "image", + "image_path": "862be20553eceff7e6b1353d88c7c4c9743ff9cdb50fb1b180001579e19e7436.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 387, + 546, + 410 + ], + "lines": [ + { + "bbox": [ + 46, + 387, + 546, + 410 + ], + "spans": [ + { + "bbox": [ + 46, + 387, + 546, + 410 + ], + "type": "text", + "content": "Figure 5. Impact of codebook size on correlation strength on CIFAR-10-C. Correlation scores " + }, + { + "bbox": [ + 46, + 387, + 546, + 410 + ], + "type": "inline_equation", + "content": "|\\rho|" + }, + { + "bbox": [ + 46, + 387, + 546, + 410 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 387, + 546, + 410 + ], + "type": "inline_equation", + "content": "|r|" + }, + { + "bbox": [ + 46, + 387, + 546, + 410 + ], + "type": "text", + "content": " are relatively low under a small size and become stably high when the size is greater than 20 for all four model architectures." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 417, + 289, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 417, + 289, + 668 + ], + "spans": [ + { + "bbox": [ + 46, + 417, + 289, + 668 + ], + "type": "text", + "content": "ing classification accuracy on variou unseen test sets. After performing correlation study, we train a neural network regressor on CIFAR-10-C and test it on a series of test sets. Results are summarized in Table 3. We have the following observations. First and foremost, BoP representation achieves the best accuracy prediction performance, evidenced by the lowest RMSE across all the four test scenarios. For example, on the test sets of CIFAR-10.2-" + }, + { + "bbox": [ + 46, + 417, + 289, + 668 + ], + "type": "inline_equation", + "content": "\\bar{C}" + }, + { + "bbox": [ + 46, + 417, + 289, + 668 + ], + "type": "text", + "content": ", the RMSE of our method is 4.551, which is 1.051 lower than the second best method [21]. This clearly validates the effectiveness of the BoP representation. Second, we observe that the \"Prediction score\" method is unstable. While it has good results under " + }, + { + "bbox": [ + 46, + 417, + 289, + 668 + ], + "type": "inline_equation", + "content": "\\tau = 0.9" + }, + { + "bbox": [ + 46, + 417, + 289, + 668 + ], + "type": "text", + "content": " on CIFAR-10.1 and CIFAR-10.2 datasets, it is generally inferior to the competing methods in other test scenarios. Our observation is similar to [21], suggesting that a more robust threshold selection method is needed for this method. Third, although BoP has slightly higher RMSE than DoC on Severity 1 of CIFAR-10.2-" + }, + { + "bbox": [ + 46, + 417, + 289, + 668 + ], + "type": "inline_equation", + "content": "\\bar{C}" + }, + { + "bbox": [ + 46, + 417, + 289, + 668 + ], + "type": "text", + "content": " (2.458 v.s., 2.247), we stress that BoP is overall more stable and effective on real world datasets and other severity levels of synthetic datasets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 288, + 713 + ], + "type": "text", + "content": "Impact of codebook size is summarized in Fig. 5 under CIFAR-10 setup. We conduct the study using different sizes on four classifiers. We observe correlation scores first in" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 417, + 546, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 417, + 546, + 453 + ], + "spans": [ + { + "bbox": [ + 305, + 417, + 546, + 453 + ], + "type": "text", + "content": "crease and then become stable when codebook size is larger than 20. These results are considered validation and help us use competitive and stable codebook sizes in Table 3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 306, + 464, + 379, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 464, + 379, + 475 + ], + "spans": [ + { + "bbox": [ + 306, + 464, + 379, + 475 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 483, + 547, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 547, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 547, + 639 + ], + "type": "text", + "content": "This work introduces a bag-of-prototypes (BoP) dataset representation to vectorize visual datasets. It first computes a codebook composed of clustering prototypes and then a prototype histogram for a dataset. The BoP vector considers the underlying local semantic distribution of a dataset and is thus more discriminative than global dataset statistics. Specifically, when used in conjunction with JS divergence, the proposed descriptor better captures the underlying relationship across datasets. This advantage is validated by its promising results in two dataset-level tasks: assessing training set suitability and test set difficulty. This work establishes the baseline usage of the BoP scheme, and more investigations and applications will be made in future work." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 650, + 409, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 650, + 409, + 662 + ], + "spans": [ + { + "bbox": [ + 306, + 650, + 409, + 662 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 670, + 546, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 670, + 546, + 706 + ], + "spans": [ + { + "bbox": [ + 304, + 670, + 546, + 706 + ], + "type": "text", + "content": "We thank all anonymous reviewers for their constructive comments in improving this paper. This work was supported by the ARC Discovery Project (DP210102801)." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2888" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 156 + ], + "type": "text", + "content": "[1] Alessandro Achille, Michael Lam, Rahul Tewari, Avinash Ravichandran, Subhransu Maji, Charless C Fowlkes, Stefano Soatto, and Pietro Perona. Task2vec: Task embedding for meta-learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6430-6439, 2019. 1, 2, 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 159, + 288, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 159, + 288, + 202 + ], + "spans": [ + { + "bbox": [ + 53, + 159, + 288, + 202 + ], + "type": "text", + "content": "[2] David Acuna, Guojun Zhang, Marc T Law, and Sanja Fidler. f-domain adversarial learning: Theory and algorithms. In International Conference on Machine Learning, pages 66-75, 2021. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 204, + 288, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 204, + 288, + 248 + ], + "spans": [ + { + "bbox": [ + 53, + 204, + 288, + 248 + ], + "type": "text", + "content": "[3] Andrea Agostinelli, Michal Pandy, Jasper Uijlings, Thomas Mensink, and Vittorio Ferrari. How stable are transferability metrics evaluations? In European Conference on Computer Vision, pages 303-321. Springer, 2022. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 250, + 287, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 250, + 287, + 303 + ], + "spans": [ + { + "bbox": [ + 53, + 250, + 287, + 303 + ], + "type": "text", + "content": "[4] Andrea Agostinelli, Jasper Uijlings, Thomas Mensink, and Vittorio Ferrari. Transferability metrics for selecting source model ensembles. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7936-7946, 2022. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 304, + 287, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 304, + 287, + 338 + ], + "spans": [ + { + "bbox": [ + 53, + 304, + 287, + 338 + ], + "type": "text", + "content": "[5] David Alvarez-Melis and Nicolo Fusi. Geometric dataset distances via optimal transport. In Advances in Neural Information Processing Systems, pages 21428-21439, 2020. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 340, + 287, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 340, + 287, + 372 + ], + "spans": [ + { + "bbox": [ + 53, + 340, + 287, + 372 + ], + "type": "text", + "content": "[6] Lora Aroyo, Matthew Lease, Praveen Paritosh, and Mike Schaekermann. Data excellence for ai: why should you care? Interactions, 29(2):66-69, 2022. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 374, + 287, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 374, + 287, + 417 + ], + "spans": [ + { + "bbox": [ + 53, + 374, + 287, + 417 + ], + "type": "text", + "content": "[7] Robert Baldock, Hartmut Maennel, and Behnam Neyshabur. Deep learning through the lens of example difficulty. Advances in Neural Information Processing Systems, 34:10876-10889, 2021. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 419, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 419, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 53, + 419, + 287, + 453 + ], + "type": "text", + "content": "[8] Herbert Bay, Tinne Tuytelaars, and Luc Van Gool. Surf: Speeded up robust features. In European Conference on Computer Vision, pages 404-417. Springer, 2006. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 454, + 287, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 454, + 287, + 496 + ], + "spans": [ + { + "bbox": [ + 53, + 454, + 287, + 496 + ], + "type": "text", + "content": "[9] Shai Ben-David, John Blitzer, Koby Crammer, Alex Kulesza, Fernando Pereira, and Jennifer Vaughan. A theory of learning from different domains. Machine Learning, 79:151-175, 2010. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 499, + 287, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 499, + 287, + 543 + ], + "spans": [ + { + "bbox": [ + 48, + 499, + 287, + 543 + ], + "type": "text", + "content": "[10] Shai Ben-David, John Blitzer, Koby Crammer, and Fernando Pereira. Analysis of representations for domain adaptation. In Advances in Neural Information Processing Systems, pages 137-144, 2006. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 544, + 287, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 544, + 287, + 577 + ], + "spans": [ + { + "bbox": [ + 48, + 544, + 287, + 577 + ], + "type": "text", + "content": "[11] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. arXiv preprint arXiv:1801.01401, 2018. 4, 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 578, + 287, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 578, + 287, + 611 + ], + "spans": [ + { + "bbox": [ + 48, + 578, + 287, + 611 + ], + "type": "text", + "content": "[12] Barry Boots, Kokichi Sugihara, Sung Nok Chiu, and Atsuyuki Okabe. Spatial tessellations: concepts and applications of voronoi diagrams. 2009. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 613, + 287, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 666 + ], + "type": "text", + "content": "[13] Jiefeng Chen, Frederick Liu, Besim Avci, Xi Wu, Yingyu Liang, and Somesh Jha. Detecting errors and estimating accuracy on unlabeled data with self-training ensembles. In Advances in Neural Information Processing Systems, 2021. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "text", + "content": "[14] Gabriella Csurka, Christopher Dance, Lixin Fan, Jutta Willamowski, and Cedric Bray. Visual categorization with bags of keypoints. In Workshop on statistical learning in computer vision, ECCV, volume 1, pages 1-2, 2004. 3, 5" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 546, + 713 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 117 + ], + "type": "text", + "content": "[15] Ekin D Cubuk, Barret Zoph, Dandelion Mane, Vijay Vasudevan, and Quoc V Le. Autoaugment: Learning augmentation policies from data. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2019. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 119, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 119, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 545, + 185 + ], + "type": "text", + "content": "[16] Ido Dagan, Lillian Lee, and Fernando Pereira. Similarity-based methods for word sense disambiguation. In Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics and Eighth Conference of the European Chapter of the Association for Computational Linguistics, pages 56-63, 1997. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 186, + 545, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 220 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 220 + ], + "type": "text", + "content": "[17] Julie Delon and Agnes Desolneux. A wasserstein-type distance in the space of gaussian mixture models. SIAM Journal on Imaging Sciences, 13(2):936-970, 2020. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 222, + 545, + 276 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 222, + 545, + 276 + ], + "spans": [ + { + "bbox": [ + 308, + 222, + 545, + 276 + ], + "type": "text", + "content": "[18] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255, 2009. 1, 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 279, + 545, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 279, + 545, + 323 + ], + "spans": [ + { + "bbox": [ + 308, + 279, + 545, + 323 + ], + "type": "text", + "content": "[19] Weijian Deng, Stephen Gould, and Liang Zheng. What does rotation prediction tell us about classifier accuracy under varying testing environments? In International Conference on Machine Learning, 2021. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 325, + 546, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 325, + 546, + 369 + ], + "spans": [ + { + "bbox": [ + 307, + 325, + 546, + 369 + ], + "type": "text", + "content": "[20] Weijian Deng and Liang Zheng. Are labels always necessary for classifier accuracy evaluation? In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15069-15078, 2021. 1, 2, 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 371, + 545, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 371, + 545, + 414 + ], + "spans": [ + { + "bbox": [ + 307, + 371, + 545, + 414 + ], + "type": "text", + "content": "[21] Weijian Deng and Liang Zheng. Are labels always necessary for classifier accuracy evaluation. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-1, 2021. 6, 7, 8" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 417, + 545, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 417, + 545, + 471 + ], + "spans": [ + { + "bbox": [ + 307, + 417, + 545, + 471 + ], + "type": "text", + "content": "[22] Xiaohan Ding, Xiangyu Zhang, Ningning Ma, Jungong Han, Guiguang Ding, and Jian Sun. Repvgg: Making vgg-style convnets great again. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13733–13742, 2021. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 474, + 545, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 474, + 545, + 551 + ], + "spans": [ + { + "bbox": [ + 307, + 474, + 545, + 551 + ], + "type": "text", + "content": "[23] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In Proceedings of the International Conference on Learning Representations, 2021. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 553, + 545, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 553, + 545, + 586 + ], + "spans": [ + { + "bbox": [ + 307, + 553, + 545, + 586 + ], + "type": "text", + "content": "[24] Qiang Du, Vance Faber, and Max Gunzburger. Centroidal voronoi tessellations: Applications and algorithms. SIAM review, 41(4):637-676, 1999. 4" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 588, + 545, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 588, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 307, + 588, + 545, + 632 + ], + "type": "text", + "content": "[25] Sabri Eyuboglu, Bojan Karlaš, Christopher Ré, Ce Zhang, and James Zou. dcbench: A benchmark for data-centric ai systems. In Proceedings of the Sixth Workshop on Data Management for End-To-End Machine Learning, 2022. 1" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 634, + 545, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 634, + 545, + 678 + ], + "spans": [ + { + "bbox": [ + 307, + 634, + 545, + 678 + ], + "type": "text", + "content": "[26] L. Fei-Fei and P. Perona. A bayesian hierarchical model for learning natural scene categories. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 524–531, 2005. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 680, + 545, + 713 + ], + "type": "text", + "content": "[27] Maurice Fréchet. Sur la distance de deux lois de probabilité. Comptes Rendus Hebdomadaires des Seances de L'Académie des Sciences, 244(6):689-692, 1957. 2, 4, 5" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2889" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "text", + "content": "[28] David Freedman and Persi Diaconis. On the histogram as a density estimator: L 2 theory. Zeitschrift für Wahrscheinlichkeitstheorie und verwandte Gebiete, 57(4):453-476, 1981. 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 117, + 287, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 117, + 287, + 172 + ], + "spans": [ + { + "bbox": [ + 48, + 117, + 287, + 172 + ], + "type": "text", + "content": "[29] Jianglin Fu, Shikai Li, Yuming Jiang, Kwan-Yee Lin, Chen Qian, Chen Change Loy, Wayne Wu, and Ziwei Liu. Stylegan-human: A data-centric odyssey of human generation. In European Conference on Computer Vision, pages 1-19. Springer, 2022. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 172, + 287, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 172, + 287, + 226 + ], + "spans": [ + { + "bbox": [ + 48, + 172, + 287, + 226 + ], + "type": "text", + "content": "[30] Saurabh Garg, Sivaraman Balakrishnan, Zachary C Lipton, Behnam Neyshabur, and Hanie Sedghi. Leveraging unlabeled data to predict out-of-distribution performance. In International Conference on Learning Representations, 2022, 2, 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 228, + 287, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 228, + 287, + 282 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 287, + 282 + ], + "type": "text", + "content": "[31] Saurabh Garg, Sivaraman Balakrishnan, Zachary Chase Lipton, Behnam Neyshabur, and Hanie Sedghi. Leveraging unlabeled data to predict out-of-distribution performance. In Proceedings of the International Conference on Learning Representations, 2022. 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 283, + 287, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 283, + 287, + 326 + ], + "spans": [ + { + "bbox": [ + 48, + 283, + 287, + 326 + ], + "type": "text", + "content": "[32] Amirata Ghorbani and James Zou. Data shapley: Equitable valuation of data for machine learning. In International Conference on Machine Learning, pages 2242-2251. PMLR, 2019. 1, 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 287, + 370 + ], + "type": "text", + "content": "[33] Arthur Gretton, Karsten Borgwardt, Malte Rasch, Bernhard Schölkopf, and Alex Smola. A kernel method for the two-sample-problem. In Advances in Neural Information Processing Systems, 2006. 2, 4, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 371, + 287, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 371, + 287, + 426 + ], + "spans": [ + { + "bbox": [ + 48, + 371, + 287, + 426 + ], + "type": "text", + "content": "[34] Devin Guillery, Vaishaal Shankar, Sayna Ebrahimi, Trevor Darrell, and Ludwig Schmidt. Predicting with confidence on unseen distributions. In Proceedings of the IEEE/CVF International Conference on Computer Vision and Pattern Recognition, pages 1134-1144, 2021. 2, 7, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 426, + 287, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 426, + 287, + 470 + ], + "spans": [ + { + "bbox": [ + 48, + 426, + 287, + 470 + ], + "type": "text", + "content": "[35] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016. 5, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 471, + 287, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 471, + 287, + 514 + ], + "spans": [ + { + "bbox": [ + 48, + 471, + 287, + 514 + ], + "type": "text", + "content": "[36] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. In Proceedings of the International Conference on Learning Representations, 2019. 5, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 514, + 287, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 558 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 558 + ], + "type": "text", + "content": "[37] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. In Proceedings of the International Conference on Learning Representations, 2019. 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 559, + 287, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 287, + 613 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 287, + 613 + ], + "type": "text", + "content": "[38] Dan Hendrycks, Norman Mu, Ekin D Cubuk, Barret Zoph, Justin Gilmer, and Balaji Lakshminarayanan. Augmix: A simple data processing method to improve robustness and uncertainty. In Proceedings of the International Conference on Learning Representations, 2020. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 614, + 287, + 668 + ], + "type": "text", + "content": "[39] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems, 2017. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 713 + ], + "type": "text", + "content": "[40] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4700-4708, 2017. 7" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 545, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 307, + 72, + 545, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 72, + 545, + 106 + ], + "spans": [ + { + "bbox": [ + 307, + 72, + 545, + 106 + ], + "type": "text", + "content": "[41] Peter J Huber. Robust statistics. In International Encyclopedia of Statistical Science, pages 1248-1251. Springer, 2011. 6, 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 106, + 545, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 106, + 545, + 149 + ], + "spans": [ + { + "bbox": [ + 307, + 106, + 545, + 149 + ], + "type": "text", + "content": "[42] Hervé Jégou and Ondrej Chum. Negative evidences and co-occurrences in image retrieval: The benefit of pca and whiten-ing. In European Conference on Computer Vision, pages 774–787, 2012. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 150, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 150, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 307, + 150, + 545, + 205 + ], + "type": "text", + "content": "[43] Hervé Jégou, Matthijs Douze, Cordelia Schmid, and Patrick Pérez. Aggregating local descriptors into a compact image representation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3304-3311, 2010. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 205, + 545, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 205, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 307, + 205, + 545, + 239 + ], + "type": "text", + "content": "[44] Junguang Jiang, Baixu Chen, Bo Fu, and Mingsheng Long. Transfer-learning-library. https://github.com/thuml/Transfer-Learning-Library, 2020.5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 239, + 545, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 239, + 545, + 282 + ], + "spans": [ + { + "bbox": [ + 307, + 239, + 545, + 282 + ], + "type": "text", + "content": "[45] Ziheng Jiang, Chiyuan Zhang, Kunal Talwar, and Michael C Mozer. Characterizing structural regularities of labeled data in overparameterized models. arXiv preprint arXiv:2002.03206, 2020. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 283, + 545, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 283, + 545, + 326 + ], + "spans": [ + { + "bbox": [ + 307, + 283, + 545, + 326 + ], + "type": "text", + "content": "[46] Thorsten Joachims. Text categorization with support vector machines: Learning with many relevant features. In European Conference on Machine Learning, pages 137-142, 1998. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 327, + 545, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 327, + 545, + 370 + ], + "spans": [ + { + "bbox": [ + 307, + 327, + 545, + 370 + ], + "type": "text", + "content": "[47] Thorsten Joachims, Dayne Freitag, Tom Mitchell, et al. Webwatcher: A tour guide for the world wide web. In International Joint Conference on Artificial Intelligence, pages 770-777. CiteSeer, 1997. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 371, + 545, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 371, + 545, + 393 + ], + "spans": [ + { + "bbox": [ + 307, + 371, + 545, + 393 + ], + "type": "text", + "content": "[48] Alexander B. Jung. imgaug. https://github.com/ aleju/imgaug, 2018. [Online; accessed 30-Oct-2018]. 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 393, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 393, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 393, + 545, + 415 + ], + "type": "text", + "content": "[49] Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. 2009. 4, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 415, + 545, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 415, + 545, + 469 + ], + "spans": [ + { + "bbox": [ + 307, + 415, + 545, + 469 + ], + "type": "text", + "content": "[50] David D Lewis and William A Gale. A sequential algorithm for training text classifiers. In Proceedings of the Seventeenth Annual International ACM-SIGIR Conference on Research and Development in Information Retrieval, pages 3-12, 1994. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 471, + 545, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 471, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 307, + 471, + 545, + 514 + ], + "type": "text", + "content": "[51] Weixin Liang, Girmaw Abebe Tadesse, Daniel Ho, L Fei-Fei, Matei Zaharia, Ce Zhang, and James Zou. Advances, challenges and opportunities in creating data for trustworthy ai. Nature Machine Intelligence, 4(8):669–677, 2022. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 514, + 545, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 514, + 545, + 568 + ], + "spans": [ + { + "bbox": [ + 307, + 514, + 545, + 568 + ], + "type": "text", + "content": "[52] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European Conference on Computer Vision, pages 740-755, 2014. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 569, + 545, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 569, + 545, + 603 + ], + "spans": [ + { + "bbox": [ + 307, + 569, + 545, + 603 + ], + "type": "text", + "content": "[53] D.G. Lowe. Object recognition from local scale-invariant features. In Proceedings of the IEEE International Conference on Computer Vision, pages 1150-1157, 1999. 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 603, + 545, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 603, + 545, + 656 + ], + "spans": [ + { + "bbox": [ + 307, + 603, + 545, + 656 + ], + "type": "text", + "content": "[54] James MacQueen et al. Some methods for classification and analysis of multivariate observations. In Proceedings of the Fifth Berkeley Symposium on Mathematical statistics and probability, volume 1, pages 281-297. Oakland, CA, USA, 1967. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 658, + 545, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 658, + 545, + 680 + ], + "spans": [ + { + "bbox": [ + 307, + 658, + 545, + 680 + ], + "type": "text", + "content": "[55] Christopher Manning and Hinrich Schutze. Foundations of statistical natural language processing. MIT press, 1999. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 681, + 545, + 713 + ], + "type": "text", + "content": "[56] Mark Mazumder, Colby Banbury, Xiaozhe Yao, Bojan Karlas, William Gaviria Rojas, Sudnya Diamos, Greg Diamos, Lynn He, Douwe Kiela, David Jurado, et al. Dataperf:" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "2890" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 287, + 95 + ], + "type": "text", + "content": "Benchmarks for data-centric ai development. arXiv preprint arXiv:2207.10062, 2022. 1, 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 95, + 288, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 95, + 288, + 138 + ], + "spans": [ + { + "bbox": [ + 48, + 95, + 288, + 138 + ], + "type": "text", + "content": "[57] Andrew McCallum, Kamal Nigam, et al. A comparison of event models for naive bayes text classification. In AAAI Workshop on Learning for Text Categorization, pages 41-48, 1998. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 140, + 288, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 288, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 288, + 183 + ], + "type": "text", + "content": "[58] Eric Mintun, Alexander Kirillov, and Saining Xie. On interaction between augmentations and corruptions in natural corruption robustness. In Advances in Neural Information Processing Systems, 2021. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 184, + 288, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 184, + 288, + 227 + ], + "spans": [ + { + "bbox": [ + 48, + 184, + 288, + 227 + ], + "type": "text", + "content": "[59] David Nister and Henrik Stewenius. Scalable recognition with a vocabulary tree. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2161-2168, 2006. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 228, + 288, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 228, + 288, + 282 + ], + "spans": [ + { + "bbox": [ + 48, + 228, + 288, + 282 + ], + "type": "text", + "content": "[60] Michal Páncy, Andrea Agostinelli, Jasper Uijlings, Vittorio Ferrari, and Thomas Mensink. Transferability estimation using bhattacharyya class separability. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9172-9182, 2022. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 283, + 288, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 283, + 288, + 327 + ], + "spans": [ + { + "bbox": [ + 48, + 283, + 288, + 327 + ], + "type": "text", + "content": "[61] Amandalynne Paullada, Inioluwa Deborah Raji, Emily Bender, Emily Denton, and Alex Hanna. Data and its (dis)contents: A survey of dataset development and use in machine learning research. *Patterns*, 2021. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 327, + 288, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 327, + 288, + 381 + ], + "spans": [ + { + "bbox": [ + 48, + 327, + 288, + 381 + ], + "type": "text", + "content": "[62] Xingchao Peng, Qinxun Bai, Xide Xia, Zijun Huang, Kate Saenko, and Bo Wang. Moment matching for multi-source domain adaptation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1406-1415, 2019. 1, 2, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 382, + 288, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 382, + 288, + 425 + ], + "spans": [ + { + "bbox": [ + 48, + 382, + 288, + 425 + ], + "type": "text", + "content": "[63] Xingchao Peng, Yichen Li, and Kate Saenko. Domain2vec: Domain embedding for unsupervised domain adaptation. In European Conference on Computer Vision, pages 756-774, 2020. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 426, + 288, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 426, + 288, + 470 + ], + "spans": [ + { + "bbox": [ + 48, + 426, + 288, + 470 + ], + "type": "text", + "content": "[64] Florent Perronnin and Christopher Dance. Fisher kernels on visual vocabularies for image categorization. In Proceedings of the IEEE International Conference on Computer Vision, pages 1-8, 2007. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 471, + 288, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 471, + 288, + 514 + ], + "spans": [ + { + "bbox": [ + 48, + 471, + 288, + 514 + ], + "type": "text", + "content": "[65] Florent Perronnin, Jorge Sánchez, and Thomas Mensink. Improving the fisher kernel for large-scale image classification. In European Conference on Computer Vision, pages 143-156, 2010. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 514, + 288, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 288, + 568 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 288, + 568 + ], + "type": "text", + "content": "[66] James Philbin, Ondrej Chum, Michael Isard, Josef Sivic, and Andrew Zisserman. Object retrieval with large vocabularies and fast spatial matching. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1-8, 2007. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 570, + 288, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 570, + 288, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 570, + 288, + 634 + ], + "type": "text", + "content": "[67] Vladislav Polianskii, Giovanni Luca Marchetti, Alexander Kravberg, Anastasiia Varava, Florian T Pokorny, and Danica Kragic. Voronoi density estimator for high-dimensional data: Computation, compactification and convergence. In Uncertainty in Artificial Intelligence, pages 1644-1653. PMLR, 2022. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 635, + 288, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 635, + 288, + 658 + ], + "spans": [ + { + "bbox": [ + 48, + 635, + 288, + 658 + ], + "type": "text", + "content": "[68] Anand Rajaraman and Jeffrey David Ullman. Mining of massive datasets. Cambridge University Press, 2011. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 658, + 288, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 658, + 288, + 691 + ], + "spans": [ + { + "bbox": [ + 48, + 658, + 288, + 691 + ], + "type": "text", + "content": "[69] Benjamin Recht, Rebecca Roelofs, Ludwig Schmidt, and Vaishaal Shankar. Do cifar-10 classifiers generalize to cifar-10? arXiv preprint arXiv:1806.00451, 2018. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 692, + 288, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 692, + 288, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 692, + 288, + 713 + ], + "type": "text", + "content": "[70] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zh-moginov, and Liang-Chieh Chen. Mobilenetv2: Inverted" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 546, + 713 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 326, + 73, + 546, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 546, + 106 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 546, + 106 + ], + "type": "text", + "content": "residuals and linear bottlenecks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4510-4520, 2018. 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 108, + 545, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 108, + 545, + 129 + ], + "spans": [ + { + "bbox": [ + 307, + 108, + 545, + 129 + ], + "type": "text", + "content": "[71] Grace S Shieh. A weighted kendall's tau statistic. Statistics & Probability Letters, 39(1):17-24, 1998. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 131, + 545, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 131, + 545, + 165 + ], + "spans": [ + { + "bbox": [ + 307, + 131, + 545, + 165 + ], + "type": "text", + "content": "[72] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 166, + 546, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 166, + 546, + 209 + ], + "spans": [ + { + "bbox": [ + 307, + 166, + 546, + 209 + ], + "type": "text", + "content": "[73] Sivic and Zisserman. Video google: a text retrieval approach to object matching in videos. In Proceedings of the IEEE International Conference on Computer Vision, pages 1470-1477, 2003. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 211, + 546, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 211, + 546, + 245 + ], + "spans": [ + { + "bbox": [ + 307, + 211, + 546, + 245 + ], + "type": "text", + "content": "[74] Baochen Sun, Jiashi Feng, and Kate Saenko. Return of frustratingly easy domain adaptation. In Proceedings of the AAAI Conference on Artificial Intelligence, 2016. 1, 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 246, + 546, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 246, + 546, + 280 + ], + "spans": [ + { + "bbox": [ + 307, + 246, + 546, + 280 + ], + "type": "text", + "content": "[75] Baochen Sun and Kate Saenko. Deep coral: Correlation alignment for deep domain adaptation. In European Conference on Computer Vision, pages 443-450, 2016. 1, 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 281, + 546, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 281, + 546, + 335 + ], + "spans": [ + { + "bbox": [ + 307, + 281, + 546, + 335 + ], + "type": "text", + "content": "[76] Swabha Swayamdipta, Roy Schwartz, Nicholas Lourie, Yizhong Wang, Hannaneh Hajishirzi, Noah A Smith, and Yejin Choi. Dataset cartography: Mapping and diagnosing datasets with training dynamics. arXiv preprint arXiv:2009.10795, 2020. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 337, + 546, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 337, + 546, + 381 + ], + "spans": [ + { + "bbox": [ + 307, + 337, + 546, + 381 + ], + "type": "text", + "content": "[77] Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, and Alexander A Alemi. Inception-v4, inception-resnet and the impact of residual connections on learning. In Thirty-first AAAI Conference on Artificial Intelligence, 2017. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 383, + 546, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 383, + 546, + 426 + ], + "spans": [ + { + "bbox": [ + 307, + 383, + 546, + 426 + ], + "type": "text", + "content": "[78] Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning, pages 6105-6114, 2019. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 428, + 546, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 428, + 546, + 473 + ], + "spans": [ + { + "bbox": [ + 307, + 428, + 546, + 473 + ], + "type": "text", + "content": "[79] Yang Tan, Yang Li, and Shao-Lun Huang. Otce: A transferability metric for cross-domain cross-task representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15779-15788, 2021. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 474, + 546, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 474, + 546, + 529 + ], + "spans": [ + { + "bbox": [ + 307, + 474, + 546, + 529 + ], + "type": "text", + "content": "[80] Mariya Toneva, Alessandro Sordoni, Remi Tachet des Combes, Adam Trischler, Yoshua Bengio, and Geoffrey J Gordon. An empirical study of example forgetting during deep neural network learning. arXiv preprint arXiv:1812.05159, 2018. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 530, + 546, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 530, + 546, + 564 + ], + "spans": [ + { + "bbox": [ + 307, + 530, + 546, + 564 + ], + "type": "text", + "content": "[81] Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014. 2" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 565, + 546, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 565, + 546, + 588 + ], + "spans": [ + { + "bbox": [ + 307, + 565, + 546, + 588 + ], + "type": "text", + "content": "[82] Joaquin Vanschoren. Meta-learning: A survey. arXiv preprint arXiv:1810.03548, 2018. 1, 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 589, + 546, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 589, + 546, + 622 + ], + "spans": [ + { + "bbox": [ + 307, + 589, + 546, + 622 + ], + "type": "text", + "content": "[83] Sebastiano Vigna. A weighted correlation index for rankings with ties. In Proceedings of International Conference on World Wide Web, pages 1166-1176, 2015. 5" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 624, + 546, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 624, + 546, + 666 + ], + "spans": [ + { + "bbox": [ + 307, + 624, + 546, + 666 + ], + "type": "text", + "content": "[84] Wei Ying, Yu Zhang, Junzhou Huang, and Qiang Yang. Transfer learning via learning to transfer. In International Conference on Machine Learning, pages 5085-5094. PMLR, 2018. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 669, + 546, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 669, + 546, + 713 + ], + "spans": [ + { + "bbox": [ + 307, + 669, + 546, + 713 + ], + "type": "text", + "content": "[85] Yuchen Zhang, Tianle Liu, Mingsheng Long, and Michael Jordan. Bridging theory and algorithm for domain adaptation. In International Conference on Machine Learning, pages 7404-7413, 2019. 2" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "2891" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 173 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 115 + ], + "type": "text", + "content": "[86] Liang Zheng, Yi Yang, and Qi Tian. Sift meets cnn: A decade survey of instance retrieval. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(5):1224-1244, 2017. 3, 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 118, + 287, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 118, + 287, + 173 + ], + "spans": [ + { + "bbox": [ + 48, + 118, + 287, + 173 + ], + "type": "text", + "content": "[87] Yujie Zhong, Relja Arandjelovic, and Andrew Zisserman. Ghostvlad for set-based face recognition. In Computer Vision-ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2–6, 2018, Revised Selected Papers, Part II 14, pages 35–50. Springer, 2019. 2" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "2892" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_content_list.json b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..40e2c26d3970da054ea333e65528679584869898 --- /dev/null +++ b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_content_list.json @@ -0,0 +1,1578 @@ +[ + { + "type": "text", + "text": "A Characteristic Function-based Method for Bottom-up Human Pose Estimation", + "text_level": 1, + "bbox": [ + 78, + 130, + 890, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Haoxuan $\\mathrm{Qu}^{1}$ , Yujun $\\mathrm{Cai}^{2}$ , Lin Geng $\\mathrm{Foo}^{1}$ , Ajay Kumar $^{3}$ , Jun Liu $^{1,*}$", + "bbox": [ + 215, + 179, + 756, + 199 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Singapore University of Technology and Design, Singapore", + "bbox": [ + 246, + 199, + 723, + 217 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Nanyang Technological University, Singapore", + "bbox": [ + 300, + 217, + 669, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3The Hong Kong Polytechnic University, Hong Kong", + "bbox": [ + 276, + 234, + 696, + 252 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "haoxuan-qu@mymail.sutd.edu.sg, yujun001@e.ntu.edu.sg, lingeng.foo@mymail.sutd.edu.sg", + "bbox": [ + 114, + 253, + 851, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ajay.kumar@polyu.edu.hk, jun.liu@sutd.edu.sg", + "bbox": [ + 287, + 272, + 676, + 286 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 321, + 313, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Most recent methods formulate the task of human pose estimation as a heatmap estimation problem, and use the overall L2 loss computed from the entire heatmap to optimize the heatmap prediction. In this paper, we show that in bottom-up human pose estimation where each heatmap often contains multiple body joints, using the overall L2 loss to optimize the heatmap prediction may not be the optimal choice. This is because, minimizing the overall L2 loss cannot always lead the model to locate all the body joints across different sub-regions of the heatmap more accurately. To cope with this problem, from a novel perspective, we propose a new bottom-up human pose estimation method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions respectively constructed from the predicted heatmap and the groundtruth heatmap. Our analysis presented in this paper indicates that the distance between these two characteristic functions is essentially the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap. Therefore, via minimizing the distance between the two characteristic functions, we can optimize the model to provide a more accurate localization result for the body joints in different sub-regions of the predicted heatmap. We show the effectiveness of our proposed method through extensive experiments on the COCO dataset and the CrowdPose dataset.", + "bbox": [ + 75, + 353, + 473, + 731 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 775, + 209, + 791 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Human pose estimation aims to locate the body joints of each person in a given RGB image. It is relevant to various applications, such as action recognition [7, 43], person Re-ID [28], and human object interaction [35]. For tackling human pose estimation, most of the recent methods fall", + "bbox": [ + 75, + 801, + 468, + 878 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "into two major categories: top-down methods and bottom-up methods. Top-down methods [24,32,33,39,44] generally use a human detector to detect all the people in the image, and then perform single-person pose estimation for each detected subject separately. In contrast, bottom-up methods [5,6,16,17,22,23,25,26] usually locate the body joints of all people in the image at the same time. Hence, bottom-up methods, the main focus of this paper, are often a more efficient choice compared to top-down methods, especially when there are many people in the input image [5].", + "bbox": [ + 500, + 323, + 890, + 474 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In existing works, it is common to regard human pose estimation as a heatmap prediction problem, since this can preserve the spatial structure of the input image throughout the encoding and decoding process [12]. During the general optimization process, the groundtruth (GT) heatmaps $\\mathbf{H}_g$ are first constructed via putting 2D Gaussian blobs centered at the GT coordinates of the body joints. After that, these constructed GT heatmaps are used to supervise the predicted heatmaps $\\mathbf{H}_p$ via the overall L2 loss $L_2^{overall}$ calculated (averaged) over the whole heatmap. Specifically, denoting the area of the heatmap as $A$ , we have $L_2^{overall} = \\frac{\\|\\mathbf{H}_p - \\mathbf{H}_g\\|_2^2}{A}$ .", + "bbox": [ + 498, + 474, + 892, + 657 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We argue that using the overall L2 loss to supervise the predicted heatmap may not be the optimal choice in bottom-up methods where each heatmap often contains multiple body joints from the multiple people in various sub-regions, as shown in Fig. 1(b). This is because, a smaller overall L2 loss calculated over the whole heatmap cannot always lead the model to locate all the body joints across different sub-regions in the heatmap more accurately. As illustrated in Fig. 1(a), the predicted heatmap #2 has a smaller overall L2 loss compared to the predicted heatmap #1. However, the predicted heatmap #2 locates the body joint in the top-right sub-region wrongly, whereas the predicted heatmap #1 locates body joints in both the top-right and bottom-left sub-regions correctly. This is because, while the decrease of the overall L2 loss can be achieved when the L2 loss w.r.t. each sub-region either decreases or remains the same, this is not necessarily true for all regions.", + "bbox": [ + 498, + 659, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding Author", + "bbox": [ + 94, + 886, + 223, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "13009", + "bbox": [ + 480, + 944, + 519, + 957 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/02e7d252ce814c9f2ba6b4c3168443053f5d3fa053626fd76dfe151096be69a8.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 163, + 90, + 400, + 337 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/18735f821139ff761ecdb7c55bc60c0937b2ab52d3e8fc8c84dd5d5010284298.jpg", + "image_caption": [ + "(b)", + "Figure 1. (a) Illustration of heatmaps. The predicted heatmap #2 with a smaller overall L2 loss locates the body joint in the top-right sub-region wrongly, while the predicted heatmap #1 with a larger overall L2 loss locates body joints in both the top-right and bottom-left sub-regions correctly. (b) Output of a commonly used bottom-up method, HrHRNet-W32 [6]. As shown, it misses left ankle in the dashed sub-region of image (i) completely, and misidentifies right knee in the dashed sub-region of image (ii). This indicates that accurately localizing the body joints of multiple people in a single heatmap is a challenging problem. (Best viewed in color.)" + ], + "image_footnote": [], + "bbox": [ + 442, + 90, + 826, + 335 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "same (e.g., from predicted heatmap #0 to predicted heatmap #1), it can also be achieved when there is a decrease of L2 loss w.r.t. certain sub-regions and an increase of L2 loss for some other sub-regions (e.g., from predicted heatmap #1 to predicted heatmap #2). This indicates that, in bottom-up methods, the decrease of the overall L2 loss does not always lead to a more accurate localization result for the body joints in different sub-regions of the predicted heatmap at the same time. Besides, we also show some results of a commonly used bottom-up method, HrHRNet-W32 [6], in Fig. 1(b). As shown, it may even miss or misidentify certain body joints when there are a number of people in the input image. This indicates that it is quite difficult to accurately locate all body joints of all people in the predicted heatmap.", + "bbox": [ + 75, + 438, + 468, + 650 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To tackle the above-mentioned problem in bottom-up methods, in this paper, rather than using the overall L2 loss to supervise the whole heatmap, we instead aim to optimize the body joints over sub-regions of the predicted heatmap at the same time. To this end, from a new perspective, we express the predicted and GT heatmaps as characteristic functions, and minimize the difference between these functions, allowing different sub-regions of the predicted heatmap to be optimized at the same time.", + "bbox": [ + 75, + 654, + 468, + 789 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "More specifically, we first construct two distributions respectively from the predicted heatmap and the GT heatmap. After that, we obtain two characteristic functions of these two distributions and optimize the heatmap prediction via minimizing the distance between these two characteristic functions. We analyze in Sec. 3.3 that the distance between the two characteristic functions is the upper bound of the", + "bbox": [ + 75, + 795, + 468, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "L2 losses w.r.t sub-regions in the predicted heatmap. Therefore, via minimizing the distance between the two characteristic functions, our method can locate body joints in different sub-regions more accurately at the same time, and thus achieve superior performance.", + "bbox": [ + 496, + 438, + 890, + 513 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The contributions of our work are summarized as follows. 1) From a new perspective, we supervise the predicted heatmap using the distance between the characteristic functions of the predicted and GT heatmaps. 2) We analyze (in Sec. 3.3) that the L2 losses w.r.t. sub-regions of the predicted heatmap are upper-bounded by the distance between the characteristic functions. 3) Our proposed method achieves state-of-the-art performance on the evaluation benchmarks [19, 21].", + "bbox": [ + 496, + 513, + 890, + 650 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 500, + 664, + 640, + 679 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Human Pose Estimation. Due to the wide range of applications, human pose estimation has received lots of attention [5, 6, 16, 17, 22-26, 29, 32, 33, 39, 44], and most of the recent methods fall into two categories: top-down methods and bottom-up methods. In top-down methods, a human detector is generally used to detect all the people in the image first, and then single-person pose estimation is conducted for each detected subject separately. The single-person pose estimation methods that are commonly used in top-down methods include Hourglass [24], Simple Baseline [39], HRNet [32], and HRFormaler [44], etc. Besides top-down methods, bottom-up methods [5, 6, 16, 17, 22, 23, 25, 26] have also attracted a lot of attention recently due to its efficiency [5].", + "bbox": [ + 496, + 689, + 890, + 883 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In bottom-up methods, most methods first detect all", + "bbox": [ + 517, + 885, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "13010", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "identity-free body joints over the whole input image, and then group them into different people. Among these methods, DeepCut and Person-Lab [14,26,27] incorporate offset fields into their methods, while Openpose and PifPaf [5,17] make use of part affinity fields in their methods. From another perspective, associate embedding [23] teaches the model to output the group assignments and the localization results of the body joints at the same time, and HGG [16] further combines graph neural networks on top of the associate embedding. Besides the above methods, there also exist some bottom-up methods [11,25,45] that directly regress the coordinates of body joints belonging to the same person.", + "bbox": [ + 75, + 90, + 468, + 271 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Existing heatmap-based bottom-up methods often use an overall L2 loss calculated over the whole heatmap to optimize heatmap prediction. Differently, in this paper, we propose a new bottom-up method that optimizes the heatmap prediction via minimizing the difference between the characteristic functions of the predicted and GT heatmaps.", + "bbox": [ + 75, + 272, + 468, + 362 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Characteristic Function. The characteristic function, a concept originally proposed in probability theory and statistics, has been studied in various areas [2,8,9,13,20,31,41] over the years, such as two-sample testing [8,9,13], generative adversarial nets [2,20], and few-shot classification [41]. Inspired by these works, in this paper, from a novel perspective, we propose to optimize the heatmap prediction for bottom-up human pose estimation via minimizing the distance between two characteristic functions. We theoretically analyze that the distance between the two characteristic functions respectively constructed from the predicted heatmap and the GT heatmap is the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap.", + "bbox": [ + 75, + 363, + 468, + 559 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 76, + 571, + 166, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In bottom-up human pose estimation, as shown in Fig. 1(a), minimizing the overall L2 loss between the predicted heatmap and the GT heatmap cannot always lead the model to locate all the body joints across different sub-regions of the heatmap more accurately. In this work, we aim to optimize the body joints over sub-regions of the predicted heatmap at the same time. To achieve this, we propose a new bottom-up method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions constructed from the predicted and GT heatmaps.", + "bbox": [ + 75, + 597, + 468, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Below, we first briefly introduce the characteristic function, and then discuss how we formulate the heatmap optimization process. After that, we show the theoretical analysis of our proposed method.", + "bbox": [ + 75, + 763, + 468, + 823 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Revisiting Characteristic Function", + "text_level": 1, + "bbox": [ + 76, + 832, + 377, + 848 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The characteristic function is generally used in probability theory and statistics. Given an $N$ -dimensional distribution $D$ , its corresponding characteristic function $\\varphi_{D}$ can be", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "written as:", + "bbox": [ + 500, + 92, + 573, + 104 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\varphi_ {D} (\\mathbf {t}) = E _ {\\mathbf {x} \\sim D} [ e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} ] = \\int_ {\\mathbb {R} ^ {N}} e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d D \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 558, + 106, + 890, + 137 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $E$ represents expectation, $i^2 = -1$ , $\\langle \\cdot, \\cdot \\rangle$ represents dot product, $\\mathbf{t}$ is a random $N$ -dimensional vector, and $\\mathbf{x}$ is an $N$ -dimensional vector sampled from $D$ . Note that the characteristic function always exists and has a one-to-one correspondence with the distribution. Besides, the characteristic function is the Fourier transform of the probability density function if the latter exists as well. Moreover, the characteristic function is always finite and bounded ( $|\\varphi_D(\\mathbf{t})| \\leq 1$ ). This makes calculation of the distance between two characteristic functions always meaningful.", + "bbox": [ + 496, + 138, + 890, + 290 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Proposed Heatmap Optimization Process", + "text_level": 1, + "bbox": [ + 498, + 304, + 849, + 320 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Below, we discuss how we formulate the heatmap optimization process for bottom-up human pose estimation via (1) constructing two distributions from the predicted heatmap and the GT heatmap respectively; (2) calculating characteristic functions from these two distributions; and (3) formulating the loss function as the distance between the two characteristic functions.", + "bbox": [ + 496, + 328, + 890, + 431 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Distribution Construction. Given an input image, for each type of body joints, we denote the corresponding predicted heatmap as $H_{p}$ and the corresponding GT heatmap as $H_{g}$ . We propose to formulate the two distributions $D(H_{p})$ and $D(H_{g})$ from the two heatmaps $H_{p}$ and $H_{g}$ with the following two steps. (1) As distributions cannot hold negative probabilities, we first pass $H_{p}$ through a relu activation function to make it non-negative. Note that $H_{g}$ is already non-negative. (2) After that, as the sum of probabilities of each constructed distribution needs to be 1, we further normalize both the output of step (1) and $H_{g}$ . Hence, with the above two steps, we formulate $D(H_{p})$ and $D(H_{g})$ as:", + "bbox": [ + 496, + 435, + 890, + 617 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nD \\left(H _ {p}\\right) = \\frac {\\operatorname {r e l u} \\left(H _ {p}\\right)}{\\left\\| \\operatorname {r e l u} \\left(H _ {p}\\right) \\right\\| _ {1}}, \\quad D \\left(H _ {g}\\right) = \\frac {H _ {g}}{\\left\\| H _ {g} \\right\\| _ {1}} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 619, + 890, + 652 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Characteristic Function Calculation. For every type of body joints, after formulating the two distributions $D(H_{p})$ and $D(H_{g})$ , we follow Eq. 1 to calculate the two characteristic functions $\\varphi_{D(H_p)}(\\mathbf{t})$ and $\\varphi_{D(H_g)}(\\mathbf{t})$ as:", + "bbox": [ + 496, + 655, + 890, + 718 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\varphi_ {D \\left(H _ {p}\\right)} (\\mathbf {t}) = E _ {\\mathbf {x} \\sim D \\left(H _ {p}\\right)} \\left[ e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} \\right], \\tag {3} \\\\ \\varphi_ {D (H _ {g})} (\\mathbf {t}) = E _ {\\mathbf {x} \\sim D (H _ {g})} [ e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} ] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 720, + 890, + 761 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Loss Function Formulation. Above we discuss how we obtain the two characteristic functions w.r.t. the predicted heatmap and the GT heatmap for a single type of body joints. Note that in bottom-up human pose estimation, multiple types of body joints are required to be located at the same time. Here, we first discuss how we formulate the loss function for a single type of body joints, and then introduce the overall loss function for all types of body joints.", + "bbox": [ + 496, + 763, + 890, + 883 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To formulate the loss function for the $k$ -th type of body", + "bbox": [ + 517, + 885, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "13011", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "joints, given the two characteristic functions $\\varphi_{D(H_p)}^k (\\mathbf{t})$ and $\\varphi_{D(H_g)}^k (\\mathbf{t})$ , we first write the loss function $L_{k}$ as the distance between these two characteristic functions [2]:", + "bbox": [ + 75, + 90, + 468, + 140 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {k} = \\int_ {\\mathbb {R} ^ {2}} \\| \\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t}) \\| _ {2} ^ {2} \\omega (\\mathbf {t}, \\eta) d \\mathbf {t} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 143, + 468, + 172 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\omega (\\mathbf{t},\\eta)$ is a weighting function. Here we set $\\omega (\\mathbf{t},\\eta)$ to be the probability density function of a uniform distribution in $B_{U}$ , where $B_{U} = [-U,U]\\times [-U,U]$ is a finite predefined range and $U$ is a hyperparameter. This means that, $\\omega (\\mathbf{t},\\eta) = \\frac{1}{4U^2}$ when $\\mathbf{t}\\in B_U$ and $\\omega (\\mathbf{t},\\eta) = 0$ otherwise. We thus further rewrite Eq. 4 as:", + "bbox": [ + 75, + 176, + 468, + 267 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {k} = \\int_ {B _ {U}} \\| \\frac {1}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})\\right) \\| _ {2} ^ {2} d \\mathbf {t} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 281, + 468, + 314 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Finally, from Eq. 5, we formulate the loss function $L_{k}$ as:", + "bbox": [ + 75, + 316, + 467, + 344 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {k} = \\int_ {B _ {U}} \\| \\frac {\\gamma}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})\\right) \\| _ {2} ^ {2} d \\mathbf {t} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 347, + 468, + 378 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\gamma = \\frac{U^2\\sqrt{A}}{\\pi^2}$ is a constant coefficient and $A$ is the area of the heatmap. Note that Eq. 6 is equivalent to Eq. 5 during the optimization process, as the efficacy of the added constant $\\gamma$ can be achieved by adjusting the learning rate.", + "bbox": [ + 75, + 382, + 468, + 445 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After getting the loss function for each type of body joints, we formulate the total loss for all types of joints as:", + "bbox": [ + 73, + 446, + 467, + 476 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nL _ {t o t a l} = \\sum_ {k = 1} ^ {K} L _ {k} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 479, + 468, + 520 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $K$ denotes the total number of body joint types.", + "bbox": [ + 76, + 522, + 434, + 537 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3. Theoretical Analysis", + "text_level": 1, + "bbox": [ + 76, + 547, + 269, + 564 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Below, we perform theoretical analysis to show the effectiveness of our method for bottom-up human pose estimation. Before going into the theorem, we first introduce a lemma that can facilitate the proof of the theorem.", + "bbox": [ + 75, + 571, + 468, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Lemma 1. Let $\\varphi_{D}$ be the characteristic function of a 2-dimensional distribution $D$ . Let $R^{r} = [x_{1}^{lower}, x_{1}^{upper}] \\times [x_{2}^{lower}, x_{2}^{upper}]$ a rectangular region, $R^{e} = \\{x_{1}^{lower}, x_{1}^{upper}\\} \\times [x_{2}^{lower}, x_{2}^{upper}]\\cup [x_{1}^{lower}, x_{1}^{upper}] \\times \\{x_{2}^{lower}, x_{2}^{upper}\\}$ the edges of this region, and $R^{v} = \\{x_{1}^{lower}, x_{1}^{upper}\\} \\times \\{x_{2}^{lower}, x_{2}^{upper}\\}$ the vertices of this region. Let $B_{T} = [-T,T] \\times [-T,T]$ . Denote $[D]_{R}$ the portion of the distribution $D$ in $R$ . $[D]_{R^r}$ can then be written as:", + "bbox": [ + 75, + 645, + 468, + 779 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} [ D ] _ {R ^ {r}} = \\left(\\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\left(\\prod_ {n = 1} ^ {2} \\left(\\frac {e ^ {- i t _ {n} x _ {n} ^ {\\text {l o w e r}}} - e ^ {- i t _ {n} x _ {n} ^ {\\text {u p p e r}}}}{i t _ {n}}\\right)\\right.\\right. \\\\ \\left. \\varphi_ {D} (\\boldsymbol {t})\\right) d t _ {1} d t _ {2}) + \\epsilon ([ D ] _ {R ^ {r}}) \\tag {8} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 781, + 488, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\epsilon ([D]_{R^r}) = \\frac{[D]_{R^e}}{2} +\\frac{[D]_{R^v}}{4}$ and $dt_{1}dt_{2}$ are calculated based on the Lebesgue measure.", + "bbox": [ + 75, + 867, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proof of Lemma 1 is provided in the supplementary. After introducing this lemma, we analyze our proposed method below.", + "bbox": [ + 498, + 90, + 890, + 135 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Theorem 1. Let $R_{sub}^{r}$ be a random rectangular sub-region in the heatmap of the $k$ -th type of body joints where $\\left\\| [D(H_p)]_{R_{sub}^e} - [D(H_g)]_{R_{sub}^e}\\right\\|_2^2$ is relatively small compared to $\\left\\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sub}^r}\\right\\|_2^2$ . The relation between the L2 loss w.r.t. this sub-region and $L_k$ can be written as:", + "bbox": [ + 498, + 151, + 890, + 241 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\left\\| \\left[ D \\left(H _ {p}\\right) \\right] _ {R _ {s u b} ^ {r}} - \\left[ D \\left(H _ {g}\\right) \\right] _ {R _ {s u b} ^ {r}} \\right\\| _ {2} ^ {2}}{\\lambda \\left(R _ {s u b} ^ {r}\\right)} \\leq L _ {k} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 242, + 890, + 279 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that $\\lambda(R_{sub}^{r})$ as the Lebesgue measure represents the area of $R_{sub}^{r}$ .", + "bbox": [ + 498, + 280, + 890, + 311 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Proof. To prove Theorem 1, we first reformulate Lemma 1 as:", + "bbox": [ + 498, + 325, + 888, + 354 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} [ D ] _ {R ^ {r}} = \\left(\\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\left(\\prod_ {n = 1} ^ {2} \\left(\\frac {e ^ {- i t _ {n} x _ {n} ^ {l o w e r}} - e ^ {- i t _ {n} x _ {n} ^ {u p p e r}}}{i t _ {n}}\\right)\\right.\\right. \\\\ \\left. \\varphi_ {D} (\\mathbf {t})\\right) d t _ {1} d t _ {2}) + \\epsilon \\left(\\left[ D \\right] _ {R ^ {r}}\\right) \\\\ = \\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\varphi_ {D} (\\mathbf {t}) \\int_ {R ^ {r}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} + \\epsilon ([ D ] _ {R ^ {r}}) \\tag {10} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 500, + 356, + 919, + 478 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $d\\mathbf{t} = dt_1dt_2$ , and both $d\\mathbf{x}$ and $d\\mathbf{t}$ are calculated based on the Lebesgue measure.", + "bbox": [ + 498, + 479, + 890, + 508 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After that, we rewrite $\\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sab}^r}\\| _2^2$ as:", + "bbox": [ + 500, + 511, + 890, + 529 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\left\\| \\left[ D \\left(H _ {p}\\right) \\right] _ {R _ {s u b} ^ {r}} - \\left[ D \\left(H _ {g}\\right) \\right] _ {R _ {s u b} ^ {r}} \\right\\| _ {2} ^ {2} (11) \\\\ \\approx \\left\\| \\left[ D \\left(H _ {p}\\right) \\right] _ {R _ {s u b} ^ {r}} - \\left[ D \\left(H _ {g}\\right) \\right] _ {R _ {s u b} ^ {r}} \\right. (12) \\\\ - \\left(\\epsilon ([ D (H _ {p}) ] _ {R _ {s u b} ^ {r}}) - \\epsilon ([ D (H _ {g}) ] _ {R _ {s u b} ^ {r}})\\right) \\| _ {2} ^ {2} \\\\ = \\| \\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) \\int_ {R _ {s u b} ^ {r}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} (13) \\\\ - \\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t}) \\int_ {R _ {s u b} ^ {r}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} \\| _ {2} ^ {2} \\\\ = \\| \\lim _ {T \\rightarrow \\infty} \\int_ {B _ {T}} \\int_ {R _ {s u b} ^ {r}} \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} \\| _ {2} ^ {2} (14) \\\\ \\approx \\left\\| \\int_ {B _ {U}} \\int_ {R _ {s u b} ^ {r}} \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} \\right\\| _ {2} ^ {2} (15) \\\\ \\leq 4 U ^ {2} A \\int_ {B _ {U}} \\int_ {R _ {s u b} ^ {r}} \\| \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} \\| _ {2} ^ {2} d \\mathbf {x} d \\mathbf {t} (16) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 500, + 556, + 913, + 878 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "13012", + "bbox": [ + 478, + 944, + 517, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/0f16cddc193ecab6c0f786f612699157d12ab70cc871936cab702075c9d01afe.jpg", + "table_caption": [ + "Table 1. Comparisons with bottom-up methods on the COCO val2017 set (single-scale testing)." + ], + "table_footnote": [], + "table_body": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
OpenPose [5]CVPR 2017VGG-19-61.084.967.556.369.3
HGG [16]ECCV 2020Hourglass51260.483.066.2--
PersonLab [26]ECCV 2018ResNet-152140166.586.271.962.373.2
PifPaf [17]CVPR 2019ResNet-152-67.4----
PETR [30]CVPR 2022-133367.487.074.961.775.9
DEKR [11]CVPR 2021HRNet-W4864071.088.377.466.778.5
PINet [37]NIPS 2021HRNet-W3251267.4----
CIR&QEM [40]AAAI 2022HRNet-W4864072.489.1-67.380.4
CID [36]CVPR 2022HRNet-W3251266.086.772.359.876.0
LOGP-CAP [42]CVPR 2022HRNet-W4864072.288.978.968.178.9
SWAHR [22]CVPR 2021HrHRNet-W3251268.987.874.963.077.4
SWAHR [22]CVPR 2021HrHRNet-W4864070.888.576.866.377.4
CenterAttention [4]ICCV 2021HrHRNet-W3251268.687.674.162.078.0
PoseTrans [15]ECCV 2022HrHRNet-W3251268.487.174.862.777.1
HrHRNet [6]CVPR 2020HrHRNet-W3251267.186.273.061.576.1
+ OursHrHRNet-W3251269.9(↑2.8)88.176.064.278.1
HrHRNet [6]CVPR 2020HrHRNet-W4864069.987.276.165.476.4
+ OursHrHRNet-W4864072.5(↑2.6)89.379.168.379.0
", + "bbox": [ + 81, + 107, + 893, + 356 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/656ee330773926249188773bb24cfbd2f9fa8ce540e949484aeeee5249f30471.jpg", + "table_caption": [ + "Table 2. Comparisons with bottom-up methods on the COCO val2017 set (multi-scale testing)." + ], + "table_footnote": [], + "table_body": "
MethodVenueBackboneInput sizeAP\\(AP^{50}\\)\\(AP^{75}\\)\\(AP^M\\)\\(AP^L\\)
HGG [16]ECCV 2020Hourglass51268.386.775.8--
Point-Set Anchors [38]ECCV 2020HRNet-W4864069.888.876.3--
DEKR [11]CVPR 2021HRNet-W4864072.388.378.668.678.6
SWAHR [22]CVPR 2021HrHRNet-W3251271.488.977.866.378.9
SWAHR [22]CVPR 2021HrHRNet-W4864073.289.879.169.179.3
PoseTrans [15]ECCV 2022HrHRNet-W3251271.288.277.266.578.0
HrHRNet [6]CVPR 2020HrHRNet-W3251269.987.176.065.377.0
+ OursHrHRNet-W3251271.8(↑1.9)88.978.167.378.4
HrHRNet [6]CVPR 2020HrHRNet-W4864072.188.478.267.878.3
+ OursHrHRNet-W4864073.7(↑1.6)89.979.669.679.5
", + "bbox": [ + 81, + 377, + 893, + 522 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\leq 4 U ^ {2} A \\int_ {B _ {U}} \\int_ {R _ {s u b} ^ {r}} \\| \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} \\| _ {2} ^ {2} d \\mathbf {x} d \\mathbf {t} (17) \\\\ = 4 U ^ {2} A \\int_ {R _ {s u b} ^ {r}} \\int_ {B _ {U}} \\| \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} \\| _ {2} ^ {2} d \\mathbf {t} d \\mathbf {x} (18) \\\\ = \\int_ {R _ {s u b} ^ {r}} \\int_ {B _ {U}} \\| \\frac {\\gamma}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})\\right) \\| _ {2} ^ {2} d \\mathbf {t} d \\mathbf {x} (19) \\\\ = L _ {k} \\lambda \\left(R _ {\\text {s u b}} ^ {r}\\right) (20) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 84, + 531, + 468, + 717 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where Eq. 12 holds since $\\| [D(H_p)]_{R_{sub}^e} - [D(H_g)]_{R_{sub}^e}\\| _2^2$ is relatively small compared to $\\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sub}^r}\\| _2^2$ , Eq. 13 holds because of Eq. 10, Eq. 15 holds based on the analysis in the supplementary, Eq. 16 holds due to the continuity of L2 distance and the Cauchy-Schwarz inequality, Eq. 17 holds due to the fact that $\\| e^{-i\\langle \\mathbf{t},\\mathbf{x}\\rangle}\\| _2^2 = 1$ and the Cauchy-Schwarz inequality, Eq. 18 holds due to Fubini's theorem.", + "bbox": [ + 75, + 729, + 468, + 851 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We can then move $\\lambda(R_{sub}^r)$ on the right hand side of Eq. 20 to the left hand side to get Theorem 1.", + "bbox": [ + 75, + 852, + 468, + 883 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Theorem 1, for the sub-region $R_{sub}^{r}$ , when the sum of the pixelwise L2 distances between the predicted and GT heatmaps over this entire sub-region is relatively large compared to only over its edges, $L_{k}$ will be the upper bound of the L2 loss w.r.t. this sub-region. Because of this, via minimizing $L_{k}$ , we can enable the L2 losses w.r.t. all such sub-regions to be smaller. Note that such sub-regions can be easily found, since the edge of a sub-region typically contains many less pixels compared to the entire sub-region in the first place. Furthermore, for sub-regions containing missed or inaccurate body joints in its center, which are precisely the erroneous predictions that need to be corrected, the sum of the pixelwise L2 distances over the entire sub-region will then be much larger compared to only over its edge. Therefore, our method can optimize the model to provide a more accurate localization result for the body joints in different sub-regions of the predicted heatmap at the same time, whereas the existing bottom-up methods, usually relying on the overall L2 loss, do not hold this property. Thus, our method can achieve superior performance for bottom-up human pose estimation.", + "bbox": [ + 496, + 532, + 890, + 851 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that during implementation, since $L_{k}$ itself as an integral is not tractable, inspired by [2], we define $\\hat{L}_k$ as a", + "bbox": [ + 498, + 869, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "13013", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/15920ca6c29c7ea94c0cca90d001e98d3fccc10a1ec7c5f7f38167869b29c577.jpg", + "table_caption": [ + "Table 3. Comparisons with bottom-up methods on the COCO test-dev2017 set (single-scale testing)." + ], + "table_footnote": [], + "table_body": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
OpenPose [5]CVPR 2017VGG-19-61.884.967.557.168.2
Hourglass [24]ECCV 2016Hourglass51256.681.861.849.867.0
Associative Embedding [23]NIPS 2017Hourglass51256.681.861.849.867.0
SPM [25]ICCV 2019Hourglass-66.988.572.962.673.1
MDN [34]CVPR 2020Hourglass-62.985.169.458.871.4
PersonLab [26]ECCV 2018ResNet-152140166.588.072.662.472.3
PifPaf [17]CVPR 2019ResNet-152-66.7--62.472.9
PETR [30]CVPR 2022SWin-L133370.591.578.765.278.0
DEKR [11]CVPR 2021HRNet-W4864070.089.477.365.776.9
PINet [37]NIPS 2021HRNet-W3251266.7----
CIR&QEM [40]AAAI 2022HRNet-W4864071.090.278.266.277.8
CID [36]CVPR 2022HRNet-W4864070.790.377.966.377.8
LOGP-CAP [42]CVPR 2022HRNet-W4864070.889.777.866.777.0
SWAHR [22]CVPR 2021HrHRNet-W4864070.289.976.965.277.0
CenterAttention [4]ICCV 2021HrHRNet-W4864069.689.776.064.976.3
PoseTrans [15]ECCV 2022HrHRNet-W3251267.488.373.962.175.1
HrHRNet [6]CVPR 2020HrHRNet-W3251266.487.572.861.274.2
+ OursHrHRNet-W3251268.9(↑2.5)89.275.763.776.1
HrHRNet [6]CVPR 2020HrHRNet-W4864068.488.275.164.474.2
+ OursHrHRNet-W4864071.1(↑2.7)90.478.266.977.2
", + "bbox": [ + 80, + 107, + 893, + 367 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/772ce1316efd04ebc68d37c4a520a26d12e7371edab3bd47e3fa4b49ed9d0f2a.jpg", + "table_caption": [ + "Table 4. Comparisons with bottom-up methods on the COCO test-dev2017 set (multi-scale testing)." + ], + "table_footnote": [], + "table_body": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
Hourglass [24]ECCV 2016Hourglass51263.085.768.958.070.4
Associative Embedding [23]NIPS 2017Hourglass51263.085.768.958.070.4
HGG [16]ECCV 2020Hourglass51267.685.173.762.774.6
SimplePose [18]AAAI 2020IMHN51268.1--66.870.5
PersonLab [26]ECCV 2018-140168.789.075.464.175.5
PETR [30]CVPR 2022SWin-L133371.291.479.666.978.0
Point-Set Anchors [38]ECCV 2020HRNet-W4864068.789.976.364.875.3
DEKR [11]CVPR 2021HRNet-W4864071.089.278.067.176.9
CIR&QEM [40]AAAI 2022HRNet-W4864071.790.478.767.378.5
SWAHR [22]CVPR 2021HrHRNet-W4864072.090.778.867.877.7
CenterAttention [4]ICCV 2021HrHRNet-W4864071.190.577.566.976.7
PoseTrans [15]ECCV 2022HrHRNet-W3251269.989.377.065.276.2
HrHRNet [6]CVPR 2020HrHRNet-W3251269.089.075.864.475.2
+ OursHrHRNet-W3251270.8(↑1.8)90.177.866.077.3
HrHRNet [6]CVPR 2020HrHRNet-W4864070.589.377.266.675.8
+ OursHrHRNet-W4864072.3(↑1.8)91.579.867.978.2
", + "bbox": [ + 80, + 388, + 893, + 599 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tractable alternative of $L_{k}$ as:", + "bbox": [ + 76, + 611, + 274, + 626 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {L} _ {k} = \\sum_ {m = 1} ^ {M} \\| \\frac {\\gamma}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t} _ {m}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t} _ {m})\\right) \\| _ {2} ^ {2} \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 98, + 628, + 468, + 669 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\{\\mathbf{t}_1,\\dots ,\\mathbf{t}_M\\}$ denotes a set of $M$ vectors randomly sampled from $B_{U}$ .", + "bbox": [ + 76, + 671, + 468, + 700 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The total loss $\\hat{L}_{total}$ for all body joint types can then be written as:", + "bbox": [ + 76, + 700, + 468, + 729 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {L} _ {\\text {t o t a l}} = \\sum_ {k = 1} ^ {K} \\hat {L} _ {k} \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 729, + 468, + 770 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4. Overall Training and Testing", + "text_level": 1, + "bbox": [ + 76, + 786, + 334, + 803 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here we discuss the overall training and testing scheme of our method. Specifically, during training, we supervise the predicted heatmaps via the total loss in Eq. 22 instead of using the commonly used overall L2 loss, and following [6, 22, 23], we conduct grouping via associate embedding. During testing, we follow the evaluation procedure of", + "bbox": [ + 75, + 809, + 468, + 901 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "previous works [6, 22] that conduct bottom-up human pose estimation. Note that in experiments, it is easy to implement $\\hat{L}_k$ in Eq. 21, and we provide more details on how we implement $\\hat{L}_k$ in experiments in the supplementary.", + "bbox": [ + 498, + 611, + 890, + 672 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 686, + 633, + 704 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To evaluate the effectiveness of our method for bottom-up human pose estimation, we conduct experiments on the COCO dataset [21] and the CrowdPose dataset [19]. Besides, we also test the effectiveness of our method on top-down methods in the supplementary. We conduct our experiments on RTX 3090 GPUs.", + "bbox": [ + 496, + 713, + 890, + 805 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. COCO Keypoint Detection", + "text_level": 1, + "bbox": [ + 498, + 816, + 743, + 832 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dataset & evaluation metric. The COCO dataset [21] contains over 200k images, and in this dataset, each person instance is annotated with 17 body joints. This dataset consists of three subsets including COCO training set (57k", + "bbox": [ + 496, + 839, + 893, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "13014", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/a21c48680ca23caffa35f261f55784f258c39e6c55871b96d5a68619500087bc.jpg", + "table_caption": [ + "Table 5. Comparisons with bottom-up methods on the CrowdPose testing set." + ], + "table_footnote": [], + "table_body": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( APE \\)\\( APM \\)\\( AP^H \\)
w/ single-scale testing
OpenPose [5]CVPR 2017VGG-19----62.748.732.3
HrHRNet [6]CVPR 2020HrHRNet-W4864065.986.470.673.366.557.9
PETR [30]CVPR 2022--72.090.978.878.072.565.4
DEKR [11]CVPR 2021HRNet-W4864067.386.472.274.668.158.7
PINet [37]NIPS 2021HRNet-W3251268.988.774.775.469.661.5
CID [36]CVPR 2022HRNet-W4864072.390.877.978.773.064.8
SWAHR [22]CVPR 2021HrHRNet-W4864071.688.577.678.972.463.0
CenterAttention [4]ICCV 2021HrHRNet-W4864067.687.772.773.968.260.3
OursHrHRNet-W4864072.688.878.979.273.165.6
w/ multi-scale testing
HrHRNet [6]CVPR 2020HrHRNet-W4864067.687.472.675.868.158.9
DEKR [11]CVPR 2021HRNet-W4864068.085.573.476.668.858.4
PINet [37]NIPS 2021HRNet-W3251269.889.175.676.470.562.2
SWAHR [22]CVPR 2021HrHRNet-W4864073.890.579.981.274.764.7
CenterAttention [4]ICCV 2021HrHRNet-W4864069.488.674.676.670.061.5
OursHrHRNet-W4864074.190.780.281.374.965.1
", + "bbox": [ + 80, + 107, + 893, + 339 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "images), COCO validation set (5k images), and COCO test-dev set (20k images). Following the train-test split of [22], we report results on the val2017 set and test-dev2017 set. Also following [22], we evaluate model performance using standard average precision (AP) calculated based on Object Keypoint Similarity (OKS) on this dataset, and report the following metrics: AP, $\\mathrm{AP}^{50}$ , $\\mathrm{AP}^{75}$ , $\\mathrm{AP}^{\\mathrm{M}}$ , and $\\mathrm{AP}^{\\mathrm{L}}$ .", + "bbox": [ + 75, + 351, + 468, + 455 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Implementation details. Following [4, 22], we use the HrHRNet [6] as the baseline, and apply our proposed method to the respective two backbones including HrHRNet-W32 and HrHRNet-W48. For these backbones, we follow their original training and testing configurations specified in [6]. Also following [6], we adopt three scales 0.5, 1, and 2 in multi-scale testing. To calculate $\\hat{L}_k$ following Eq. 21, we set the number of samples $M$ to 256 and the hyperparameter $U$ w.r.t. the finite range $B_U$ to 64 in our experiments.", + "bbox": [ + 75, + 455, + 468, + 607 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results. In Tab. 1 and Tab. 2, we report single-scale testing and multi-scale testing results on the COCO val2017 set. In Tab. 3 and Tab. 4, we report single-scale testing and multi-scale testing results on the COCO test-dev2017 set. We observe that after applying our method on both HrHRNet-W32 and HrHRNet-W48, a significant performance improvement is achieved, demonstrating the effectiveness of our method. Moreover, we also compare our method with other state-of-the-art bottom-up human pose estimation methods. Compared to these methods, our method consistently achieves the highest AP score, further demonstrating the effectiveness of our method.", + "bbox": [ + 75, + 608, + 470, + 790 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. CrowdPose", + "text_level": 1, + "bbox": [ + 76, + 801, + 202, + 816 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dataset & evaluation metric. The CrowdPose dataset [19] contains about 20k images and 80k person instances, which are annotated with 14 body joints. This dataset consists of three subsets including CrowdPose training set (10k images), CrowdPose validation set (2k images), and Crowd-", + "bbox": [ + 75, + 824, + 468, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Pose testing set (8k images). Following the train-test split of [6, 22], we report results on the testing set. Also following [6, 22], we evaluate model performance using standard AP calculated based on OKS on the CrowdPose dataset, and report the following metrics: AP, $\\mathrm{AP}^{50}$ , $\\mathrm{AP}^{75}$ , $\\mathrm{AP}^{\\mathrm{E}}$ , $\\mathrm{AP}^{\\mathrm{M}}$ , and $\\mathrm{AP}^{\\mathrm{H}}$ .", + "bbox": [ + 496, + 351, + 890, + 439 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Implementation details. On the CrowdPose dataset, we also use the HrHRNet [6] as the baseline, and we use HrHRNet-W48 as the backbone following [4,6,22]. We follow the original training and testing configurations specified in [6], and also follow [6] to adopt three scales 0.5, 1, and 2 in multi-scale testing. Besides, same as the experiments on the COCO dataset, we also set the number of samples $M$ to 256 and the hyperparameter $U$ w.r.t. the finite range $B_U$ to 64 on the CrowdPose dataset.", + "bbox": [ + 496, + 441, + 892, + 575 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results. In Tab. 5, we report the single-scale testing and multi-scale testing results on the CrowdPose testing set. As shown, our method consistently achieves the highest AP score, demonstrating the effectiveness of our method.", + "bbox": [ + 496, + 578, + 890, + 638 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation Studies", + "text_level": 1, + "bbox": [ + 500, + 648, + 663, + 664 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct ablation studies on the COCO validation set via applying our proposed method on HrHRNet-W32 [6] with single-scale testing.", + "bbox": [ + 496, + 672, + 890, + 718 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Impact of the number of samples $M$ . To calculate $\\hat{L}_k$ following Eq. 21, we need to set the number of samples $M$ , which we set to 256 in our experiments. We evaluate other choices of the number of samples $M$ in Tab. 6. As shown, all variants outperform the baseline method, and after the number of samples $M$ becomes larger than 256 the model performance becomes stabilized. Therefore, we set the number of samples $M$ to be 256 in our experiments.", + "bbox": [ + 496, + 718, + 890, + 839 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Impact of the finite range $B_U$ with different $U$ . We evaluate different choices of $U$ in Tab. 7. As shown, all variants outperform the baseline method, and after the hyperparameter $U$ becomes larger than 64, the model performance does", + "bbox": [ + 496, + 839, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "13015", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/f458311b391225e64cfcb67431098212a095bd3e912c0e465b102c6e1346bb88.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
(a)(b)(c)(d)
Body joint name:Right shouldersRight eyesRight elbowsLeft shoulders
Baseline (HrHRNet-W32) result:
Ours result:
", + "bbox": [ + 80, + 89, + 883, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 2. Qualitative results of our method and the baseline HrHRNet-W32 model [6]. As shown, the baseline method misses body joints (in (a) and (b)) or misidentifies body joints (in (c) and (d)) in some sub-regions of the predicted heatmap (see the sub-regions framed with dashed lines). Meanwhile, our method provides a more accurate localization result for the body joints of different people in different sub-regions of the predicted heatmap at the same time. More qualitative results are in the supplementary. (Best viewed in color.)", + "bbox": [ + 75, + 313, + 892, + 369 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/76c4f1689cbc34dfc011608c9c3d47b53e30757c89b142d30ea1f7184cf370c2.jpg", + "table_caption": [ + "Table 6. Evaluation on the number of samples $M$" + ], + "table_footnote": [], + "table_body": "
MethodAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
Baseline(HrHRNet-W32)67.186.273.061.576.1
4 samples67.986.973.862.476.9
16 samples68.987.574.863.577.4
64 samples69.687.975.663.977.8
256 samples69.988.176.064.278.1
1024 samples69.888.276.064.378.0
", + "bbox": [ + 86, + 393, + 468, + 510 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "not enhance anymore. Thus, we set the hyperparameter $U$ to be 64 in our experiments.", + "bbox": [ + 76, + 520, + 468, + 550 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/016223ba8db63ee2e4482179ead189db24e2ce9b7546e2e77d3f63ac46e9e2ca.jpg", + "table_caption": [ + "Table 7. Evaluation on the hyperparameter $U$ w.r.t. the finite range ${B}_{U}$ ." + ], + "table_footnote": [], + "table_body": "
MethodAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
Baseline(HrHRNet-W32)67.186.273.061.576.1
U = 867.786.773.562.276.5
U = 1668.687.374.262.976.9
U = 3269.487.875.463.777.6
U = 6469.988.176.064.278.1
U = 12869.888.075.864.078.0
", + "bbox": [ + 86, + 584, + 468, + 699 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Training time. On the COCO dataset, we test the training time of our method that trains the backbone model (HrHRNet-W32 [6]) with the loss function in Eq. 22, and compare it with the training time of the baseline that trains the same network with the overall L2 loss. As shown in Tab. 8, though our method achieves much better performance, it brings only very little increase of the training time. Note that as we follow the same evaluation procedure of previous works [6, 22], the testing time with and without our proposed method are the same.", + "bbox": [ + 75, + 704, + 468, + 854 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Qualitative results. Some qualitative results are shown in Fig. 2. As shown, the baseline method which uses the overall L2 loss to optimize the heatmap prediction can miss or", + "bbox": [ + 75, + 854, + 468, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9729b02e2db8fdef0dbb2e980c6141e0f54b4e4daf4ac011243cc5291db407b7.jpg", + "table_caption": [ + "Table 8. Comparison of the training time." + ], + "table_footnote": [], + "table_body": "
MethodTraining time per epochPerformance(AP)
Baseline(HrHRNet-W32)1.11h67.1
Baseline + Ours1.19h69.9
", + "bbox": [ + 506, + 393, + 890, + 440 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "get inaccurate body joints in some sub-regions of the predicted heatmap (see the sub-regions framed with dashed lines). In contrast, our method locates body joints of different people in different sub-regions of the predicted heatmap more accurately at the same time, demonstrating the effectiveness of our method.", + "bbox": [ + 496, + 452, + 892, + 541 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 556, + 617, + 571 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we have proposed a novel bottom-up human pose estimation method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions respectively constructed from the predicted and GT heatmaps. We theoretically analyze that the distance between the two characteristic functions is the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap. Thus, via minimizing the distance between the two characteristic functions, our method locates body joints in different sub-regions of the predicted heatmap more accurately at the same time. Our method achieves superior performance on the COCO dataset and the CrowdPose dataset. Besides, our method could potentially also be applied in other tasks such as multi-object 6D pose estimation [1], facial landmark extraction [3], and fingerprint minutiae detection [10]. We leave this as our future work.", + "bbox": [ + 496, + 580, + 892, + 821 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgement. This work is supported by MOE AcRF Tier 2 (Proposal ID: T2EP20222-0035), National Research Foundation Singapore under its AI Singapore Programme (AISG-100E-2020-065), and SUTD SKI Project (SKI 2021_02_06).", + "bbox": [ + 496, + 821, + 890, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "13016", + "bbox": [ + 480, + 944, + 519, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Arash Amini, Arul Selvam Periyasamy, and Sven Behnke. Yolopose: Transformer-based multi-object 6d pose estimation using keypoint regression. In Intelligent Autonomous Systems 17: Proceedings of the 17th International Conference IAS-17, pages 392–406. Springer, 2023. 8", + "[2] Abdul Fatir Ansari, Jonathan Scarlett, and Harold Soh. A characteristic function approach to deep implicit generative modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7478-7487, 2020. 3, 4, 5", + "[3] Matteo Bodini. A review of facial landmark extraction in 2d images and videos using deep learning. *Big Data and Cognitive Computing*, 3(1):14, 2019. 8", + "[4] Guillem Brasó, Nikita Kister, and Laura Leal-Taixe. The center of attention: Center-keypoint grouping via attention for multi-person pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11853-11863, 2021. 5, 6, 7", + "[5] Zhe Cao, Tomas Simon, Shih-En Wei, and Yaser Sheikh. Realtime multi-person 2d pose estimation using part affinity fields. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7291–7299, 2017. 1, 2, 3, 5, 6, 7", + "[6] Bowen Cheng, Bin Xiao, Jingdong Wang, Honghui Shi, Thomas S Huang, and Lei Zhang. Higherhrnet: Scale-aware representation learning for bottom-up human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5386-5395, 2020. 1, 2, 5, 6, 7, 8", + "[7] Ke Cheng, Yifan Zhang, Xiangyu He, Weihan Chen, Jian Cheng, and Hanqing Lu. Skeleton-based action recognition with shift graph convolutional network. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 183-192, 2020. 1", + "[8] Kacper P Chwialkowski, Aaditya Ramdas, Dino Sejdinovic, and Arthur Gretton. Fast two-sample testing with analytic representations of probability measures. Advances in Neural Information Processing Systems, 28, 2015. 3", + "[9] TW Epps and Kenneth J Singleton. An omnibus test for the two-sample problem using the empirical characteristic function. Journal of Statistical Computation and Simulation, 26(3-4):177-203, 1986. 3", + "[10] Yulin Feng and Ajay Kumar. Detecting locally, patching globally: An end-to-end framework for high speed and accurate detection of fingerprint minutiae. IEEE Transactions on Information Forensics and Security, 2023. 8", + "[11] Zigang Geng, Ke Sun, Bin Xiao, Zhaoxiang Zhang, and Jingdong Wang. Bottom-up human pose estimation via disentangled keypoint regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14676-14686, 2021. 3, 5, 6, 7", + "[12] Kerui Gu, Linlin Yang, and Angela Yao. Removing the bias of integral pose regression. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11067-11076, 2021. 1" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] CE Heathcote. A test of goodness of fit for symmetric random variables1. Australian Journal of Statistics, 14(2):172-181, 1972. 3", + "[14] Eldar Insafutdinov, Leonid Pishchulin, Bjoern Andres, Mykhaylo Andriluka, and Bernt Schiele. Deepercut: A deeper, stronger, and faster multi-person pose estimation model. In European conference on computer vision, pages 34-50. Springer, 2016. 3", + "[15] Wentao Jiang, Sheng Jin, Wentao Liu, Chen Qian, Ping Luo, and Si Liu. Posetrans: A simple yet effective pose transformation augmentation for human pose estimation. arXiv preprint arXiv:2208.07755, 2022. 5, 6", + "[16] Sheng Jin, Wentao Liu, Enze Xie, Wenhai Wang, Chen Qian, Wanli Ouyang, and Ping Luo. Differentiable hierarchical graph grouping for multi-person pose estimation. In European Conference on Computer Vision, pages 718-734. Springer, 2020. 1, 2, 3, 5, 6", + "[17] Sven Kreiss, Lorenzo Bertoni, and Alexandre Alahi. Pifpaf: Composite fields for human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11977-11986, 2019. 1, 2, 3, 5, 6", + "[18] Jia Li, Wen Su, and Zengfu Wang. Simple pose: Rethinking and improving a bottom-up approach for multi-person pose estimation. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 11354-11361, 2020. 6", + "[19] Jiefeng Li, Can Wang, Hao Zhu, Yihuan Mao, Hao-Shu Fang, and Cewu Lu. Crowdpose: Efficient crowded scenes pose estimation and a new benchmark. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10863-10872, 2019. 2, 6, 7", + "[20] Shengxi Li, Zeyang Yu, Min Xiang, and Danilo Mandic. Reciprocal adversarial learning via characteristic functions. Advances in Neural Information Processing Systems, 33:217-228, 2020. 3", + "[21] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 2, 6", + "[22] Zhengxiong Luo, Zhicheng Wang, Yan Huang, Liang Wang, Tieniu Tan, and Erjin Zhou. Rethinking the heatmap regression for bottom-up human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13264-13273, 2021. 1, 2, 5, 6, 7, 8", + "[23] Alejandro Newell, Zhiao Huang, and Jia Deng. Associative embedding: End-to-end learning for joint detection and grouping. Advances in neural information processing systems, 30, 2017. 1, 2, 3, 6", + "[24] Alejandro Newell, Kaiyu Yang, and Jia Deng. Stacked hourglass networks for human pose estimation. In European conference on computer vision, pages 483-499. Springer, 2016. 1, 2, 6", + "[25] Xuecheng Nie, Jiashi Feng, Jianfeng Zhang, and Shuicheng Yan. Single-stage multi-person pose machines. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6951-6960, 2019. 1, 2, 3, 6" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "13017", + "bbox": [ + 480, + 944, + 517, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[26] George Papandreou, Tyler Zhu, Liang-Chieh Chen, Spyros Gidaris, Jonathan Tompson, and Kevin Murphy. Personlab: Person pose estimation and instance segmentation with a bottom-up, part-based, geometric embedding model. In Proceedings of the European conference on computer vision (ECCV), pages 269-286, 2018. 1, 2, 3, 5, 6", + "[27] Leonid Pishchulin, Eldar Insafutdinov, Siyu Tang, Bjoern Andres, Mykhaylo Andriluka, Peter V Gehler, and Bernt Schiele. Deepcut: Joint subset partition and labeling for multi person pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4929-4937, 2016. 3", + "[28] Xuelin Qian, Yanwei Fu, Tao Xiang, Wenxuan Wang, Jie Qiu, Yang Wu, Yu-Gang Jiang, and Xiangyang Xue. Pose-normalized image generation for person re-identification. In Proceedings of the European conference on computer vision (ECCV), pages 650–667, 2018. 1", + "[29] Haoxuan Qu, Li Xu, Yujun Cai, Lin Geng Foo, and Jun Liu. Heatmap distribution matching for human pose estimation. In Advances in Neural Information Processing Systems. 2", + "[30] Dahu Shi, Xing Wei, Liangqi Li, Ye Ren, and Wenming Tan. End-to-end multi-person pose estimation with transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11069-11078, 2022. 5, 6, 7", + "[31] Weibo Shu, Jia Wan, Kay Chen Tan, Sam Kwong, and Antoni B Chan. Crowd counting in the frequency domain. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19618-19627, 2022. 3", + "[32] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5693-5703, 2019. 1, 2", + "[33] Jonathan J. Thompson, Arjun Jain, Yann LeCun, and Christoph Bregler. Joint training of a convolutional network and a graphical model for human pose estimation. Advances in neural information processing systems, 27, 2014. 1, 2", + "[34] Ali Varamesh and Tinne Tuytelaars. Mixture dense regression for object detection and human pose estimation. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13083-13092. IEEE, 2020. 6", + "[35] Bo Wan, Desen Zhou, Yongfei Liu, Rongjie Li, and Xuming He. Pose-aware multi-level feature network for human object interaction detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9469-9478, 2019. 1", + "[36] Dongkai Wang and Shiliang Zhang. Contextual instance decoupling for robust multi-person pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11060-11068, 2022. 5, 6, 7", + "[37] Dongkai Wang, Shiliang Zhang, and Gang Hua. Robust pose estimation in crowded scenes with direct pose-level inference. Advances in Neural Information Processing Systems, 34:6278-6289, 2021. 5, 6, 7", + "[38] Fangyun Wei, Xiao Sun, Hongyang Li, Jingdong Wang, and Stephen Lin. Point-set anchors for object detection, instance" + ], + "bbox": [ + 78, + 90, + 470, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "segmentation and pose estimation. In European Conference on Computer Vision, pages 527-544. Springer, 2020. 5, 6", + "[39] Bin Xiao, Haiping Wu, and Yichen Wei. Simple baselines for human pose estimation and tracking. In Proceedings of the European conference on computer vision (ECCV), pages 466-481, 2018. 1, 2", + "[40] Yabo Xiao, Dongdong Yu, Xiao Juan Wang, Lei Jin, Guoli Wang, and Qian Zhang. Learning quality-aware representation for multi-person pose regression. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2822-2830, 2022. 5, 6", + "[41] Jiangtao Xie, Fei Long, Jiaming Lv, Qilong Wang, and Peihua Li. Joint distribution matters: Deep brownian distance covariance for few-shot classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7972-7981, 2022. 3", + "[42] Nan Xue, Tianfu Wu, Gui-Song Xia, and Liangpei Zhang. Learning local-global contextual adaptation for multi-person pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13065-13074, 2022. 5, 6", + "[43] Sijie Yan, Yuanjun Xiong, and Dahua Lin. Spatial temporal graph convolutional networks for skeleton-based action recognition. In Thirty-second AAAI conference on artificial intelligence, 2018. 1", + "[44] Yuhui Yuan, Rao Fu, Lang Huang, Weihong Lin, Chao Zhang, Xilin Chen, and Jingdong Wang. Hrformer: High-resolution transformer for dense prediction. 2021. 1, 2", + "[45] Xingyi Zhou, Dequan Wang, and Philipp Krahenbuhl. Objects as points. arXiv preprint arXiv:1904.07850, 2019. 3" + ], + "bbox": [ + 501, + 92, + 893, + 516 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "13018", + "bbox": [ + 480, + 945, + 519, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_model.json b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a0164af6143fe4ed59976eba10adb7d05c84a427 --- /dev/null +++ b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_model.json @@ -0,0 +1,2090 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.131, + 0.892, + 0.152 + ], + "angle": 0, + "content": "A Characteristic Function-based Method for Bottom-up Human Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.18, + 0.758, + 0.2 + ], + "angle": 0, + "content": "Haoxuan \\(\\mathrm{Qu}^{1}\\), Yujun \\(\\mathrm{Cai}^{2}\\), Lin Geng \\(\\mathrm{Foo}^{1}\\), Ajay Kumar\\(^{3}\\), Jun Liu\\(^{1,*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.2, + 0.725, + 0.218 + ], + "angle": 0, + "content": "\\(^{1}\\)Singapore University of Technology and Design, Singapore" + }, + { + "type": "text", + "bbox": [ + 0.301, + 0.218, + 0.671, + 0.235 + ], + "angle": 0, + "content": "\\(^{2}\\)Nanyang Technological University, Singapore" + }, + { + "type": "text", + "bbox": [ + 0.277, + 0.235, + 0.697, + 0.253 + ], + "angle": 0, + "content": "3The Hong Kong Polytechnic University, Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.255, + 0.852, + 0.27 + ], + "angle": 0, + "content": "haoxuan-qu@mymail.sutd.edu.sg, yujun001@e.ntu.edu.sg, lingeng.foo@mymail.sutd.edu.sg" + }, + { + "type": "text", + "bbox": [ + 0.288, + 0.273, + 0.678, + 0.287 + ], + "angle": 0, + "content": "ajay.kumar@polyu.edu.hk, jun.liu@sutd.edu.sg" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.322, + 0.314, + 0.339 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.354, + 0.474, + 0.732 + ], + "angle": 0, + "content": "Most recent methods formulate the task of human pose estimation as a heatmap estimation problem, and use the overall L2 loss computed from the entire heatmap to optimize the heatmap prediction. In this paper, we show that in bottom-up human pose estimation where each heatmap often contains multiple body joints, using the overall L2 loss to optimize the heatmap prediction may not be the optimal choice. This is because, minimizing the overall L2 loss cannot always lead the model to locate all the body joints across different sub-regions of the heatmap more accurately. To cope with this problem, from a novel perspective, we propose a new bottom-up human pose estimation method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions respectively constructed from the predicted heatmap and the groundtruth heatmap. Our analysis presented in this paper indicates that the distance between these two characteristic functions is essentially the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap. Therefore, via minimizing the distance between the two characteristic functions, we can optimize the model to provide a more accurate localization result for the body joints in different sub-regions of the predicted heatmap. We show the effectiveness of our proposed method through extensive experiments on the COCO dataset and the CrowdPose dataset." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.776, + 0.21, + 0.792 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.802, + 0.47, + 0.879 + ], + "angle": 0, + "content": "Human pose estimation aims to locate the body joints of each person in a given RGB image. It is relevant to various applications, such as action recognition [7, 43], person Re-ID [28], and human object interaction [35]. For tackling human pose estimation, most of the recent methods fall" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.324, + 0.892, + 0.475 + ], + "angle": 0, + "content": "into two major categories: top-down methods and bottom-up methods. Top-down methods [24,32,33,39,44] generally use a human detector to detect all the people in the image, and then perform single-person pose estimation for each detected subject separately. In contrast, bottom-up methods [5,6,16,17,22,23,25,26] usually locate the body joints of all people in the image at the same time. Hence, bottom-up methods, the main focus of this paper, are often a more efficient choice compared to top-down methods, especially when there are many people in the input image [5]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.475, + 0.893, + 0.659 + ], + "angle": 0, + "content": "In existing works, it is common to regard human pose estimation as a heatmap prediction problem, since this can preserve the spatial structure of the input image throughout the encoding and decoding process [12]. During the general optimization process, the groundtruth (GT) heatmaps \\(\\mathbf{H}_g\\) are first constructed via putting 2D Gaussian blobs centered at the GT coordinates of the body joints. After that, these constructed GT heatmaps are used to supervise the predicted heatmaps \\(\\mathbf{H}_p\\) via the overall L2 loss \\(L_2^{overall}\\) calculated (averaged) over the whole heatmap. Specifically, denoting the area of the heatmap as \\(A\\), we have \\(L_2^{overall} = \\frac{\\|\\mathbf{H}_p - \\mathbf{H}_g\\|_2^2}{A}\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.66, + 0.895, + 0.901 + ], + "angle": 0, + "content": "We argue that using the overall L2 loss to supervise the predicted heatmap may not be the optimal choice in bottom-up methods where each heatmap often contains multiple body joints from the multiple people in various sub-regions, as shown in Fig. 1(b). This is because, a smaller overall L2 loss calculated over the whole heatmap cannot always lead the model to locate all the body joints across different sub-regions in the heatmap more accurately. As illustrated in Fig. 1(a), the predicted heatmap #2 has a smaller overall L2 loss compared to the predicted heatmap #1. However, the predicted heatmap #2 locates the body joint in the top-right sub-region wrongly, whereas the predicted heatmap #1 locates body joints in both the top-right and bottom-left sub-regions correctly. This is because, while the decrease of the overall L2 loss can be achieved when the L2 loss w.r.t. each sub-region either decreases or remains the same, this is not necessarily true for all regions." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.225, + 0.9 + ], + "angle": 0, + "content": "*Corresponding Author" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.958 + ], + "angle": 0, + "content": "13009" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.164, + 0.091, + 0.401, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.277, + 0.345, + 0.295, + 0.358 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.444, + 0.091, + 0.828, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.628, + 0.344, + 0.647, + 0.358 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.361, + 0.892, + 0.43 + ], + "angle": 0, + "content": "Figure 1. (a) Illustration of heatmaps. The predicted heatmap #2 with a smaller overall L2 loss locates the body joint in the top-right sub-region wrongly, while the predicted heatmap #1 with a larger overall L2 loss locates body joints in both the top-right and bottom-left sub-regions correctly. (b) Output of a commonly used bottom-up method, HrHRNet-W32 [6]. As shown, it misses left ankle in the dashed sub-region of image (i) completely, and misidentifies right knee in the dashed sub-region of image (ii). This indicates that accurately localizing the body joints of multiple people in a single heatmap is a challenging problem. (Best viewed in color.)" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.439, + 0.47, + 0.651 + ], + "angle": 0, + "content": "same (e.g., from predicted heatmap #0 to predicted heatmap #1), it can also be achieved when there is a decrease of L2 loss w.r.t. certain sub-regions and an increase of L2 loss for some other sub-regions (e.g., from predicted heatmap #1 to predicted heatmap #2). This indicates that, in bottom-up methods, the decrease of the overall L2 loss does not always lead to a more accurate localization result for the body joints in different sub-regions of the predicted heatmap at the same time. Besides, we also show some results of a commonly used bottom-up method, HrHRNet-W32 [6], in Fig. 1(b). As shown, it may even miss or misidentify certain body joints when there are a number of people in the input image. This indicates that it is quite difficult to accurately locate all body joints of all people in the predicted heatmap." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.655, + 0.47, + 0.79 + ], + "angle": 0, + "content": "To tackle the above-mentioned problem in bottom-up methods, in this paper, rather than using the overall L2 loss to supervise the whole heatmap, we instead aim to optimize the body joints over sub-regions of the predicted heatmap at the same time. To this end, from a new perspective, we express the predicted and GT heatmaps as characteristic functions, and minimize the difference between these functions, allowing different sub-regions of the predicted heatmap to be optimized at the same time." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.796, + 0.47, + 0.901 + ], + "angle": 0, + "content": "More specifically, we first construct two distributions respectively from the predicted heatmap and the GT heatmap. After that, we obtain two characteristic functions of these two distributions and optimize the heatmap prediction via minimizing the distance between these two characteristic functions. We analyze in Sec. 3.3 that the distance between the two characteristic functions is the upper bound of the" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.439, + 0.892, + 0.514 + ], + "angle": 0, + "content": "L2 losses w.r.t sub-regions in the predicted heatmap. Therefore, via minimizing the distance between the two characteristic functions, our method can locate body joints in different sub-regions more accurately at the same time, and thus achieve superior performance." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.515, + 0.892, + 0.651 + ], + "angle": 0, + "content": "The contributions of our work are summarized as follows. 1) From a new perspective, we supervise the predicted heatmap using the distance between the characteristic functions of the predicted and GT heatmaps. 2) We analyze (in Sec. 3.3) that the L2 losses w.r.t. sub-regions of the predicted heatmap are upper-bounded by the distance between the characteristic functions. 3) Our proposed method achieves state-of-the-art performance on the evaluation benchmarks [19, 21]." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.665, + 0.642, + 0.68 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.69, + 0.892, + 0.885 + ], + "angle": 0, + "content": "Human Pose Estimation. Due to the wide range of applications, human pose estimation has received lots of attention [5, 6, 16, 17, 22-26, 29, 32, 33, 39, 44], and most of the recent methods fall into two categories: top-down methods and bottom-up methods. In top-down methods, a human detector is generally used to detect all the people in the image first, and then single-person pose estimation is conducted for each detected subject separately. The single-person pose estimation methods that are commonly used in top-down methods include Hourglass [24], Simple Baseline [39], HRNet [32], and HRFormaler [44], etc. Besides top-down methods, bottom-up methods [5, 6, 16, 17, 22, 23, 25, 26] have also attracted a lot of attention recently due to its efficiency [5]." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.886, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In bottom-up methods, most methods first detect all" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "13010" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.272 + ], + "angle": 0, + "content": "identity-free body joints over the whole input image, and then group them into different people. Among these methods, DeepCut and Person-Lab [14,26,27] incorporate offset fields into their methods, while Openpose and PifPaf [5,17] make use of part affinity fields in their methods. From another perspective, associate embedding [23] teaches the model to output the group assignments and the localization results of the body joints at the same time, and HGG [16] further combines graph neural networks on top of the associate embedding. Besides the above methods, there also exist some bottom-up methods [11,25,45] that directly regress the coordinates of body joints belonging to the same person." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.273, + 0.469, + 0.363 + ], + "angle": 0, + "content": "Existing heatmap-based bottom-up methods often use an overall L2 loss calculated over the whole heatmap to optimize heatmap prediction. Differently, in this paper, we propose a new bottom-up method that optimizes the heatmap prediction via minimizing the difference between the characteristic functions of the predicted and GT heatmaps." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.364, + 0.47, + 0.56 + ], + "angle": 0, + "content": "Characteristic Function. The characteristic function, a concept originally proposed in probability theory and statistics, has been studied in various areas [2,8,9,13,20,31,41] over the years, such as two-sample testing [8,9,13], generative adversarial nets [2,20], and few-shot classification [41]. Inspired by these works, in this paper, from a novel perspective, we propose to optimize the heatmap prediction for bottom-up human pose estimation via minimizing the distance between two characteristic functions. We theoretically analyze that the distance between the two characteristic functions respectively constructed from the predicted heatmap and the GT heatmap is the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.572, + 0.168, + 0.587 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.598, + 0.469, + 0.763 + ], + "angle": 0, + "content": "In bottom-up human pose estimation, as shown in Fig. 1(a), minimizing the overall L2 loss between the predicted heatmap and the GT heatmap cannot always lead the model to locate all the body joints across different sub-regions of the heatmap more accurately. In this work, we aim to optimize the body joints over sub-regions of the predicted heatmap at the same time. To achieve this, we propose a new bottom-up method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions constructed from the predicted and GT heatmaps." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.764, + 0.469, + 0.824 + ], + "angle": 0, + "content": "Below, we first briefly introduce the characteristic function, and then discuss how we formulate the heatmap optimization process. After that, we show the theoretical analysis of our proposed method." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.833, + 0.379, + 0.849 + ], + "angle": 0, + "content": "3.1. Revisiting Characteristic Function" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.469, + 0.901 + ], + "angle": 0, + "content": "The characteristic function is generally used in probability theory and statistics. Given an \\(N\\)-dimensional distribution \\(D\\), its corresponding characteristic function \\(\\varphi_{D}\\) can be" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.093, + 0.574, + 0.105 + ], + "angle": 0, + "content": "written as:" + }, + { + "type": "equation", + "bbox": [ + 0.56, + 0.107, + 0.892, + 0.138 + ], + "angle": 0, + "content": "\\[\n\\varphi_ {D} (\\mathbf {t}) = E _ {\\mathbf {x} \\sim D} [ e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} ] = \\int_ {\\mathbb {R} ^ {N}} e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d D \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.14, + 0.892, + 0.291 + ], + "angle": 0, + "content": "where \\(E\\) represents expectation, \\(i^2 = -1\\), \\(\\langle \\cdot, \\cdot \\rangle\\) represents dot product, \\(\\mathbf{t}\\) is a random \\(N\\)-dimensional vector, and \\(\\mathbf{x}\\) is an \\(N\\)-dimensional vector sampled from \\(D\\). Note that the characteristic function always exists and has a one-to-one correspondence with the distribution. Besides, the characteristic function is the Fourier transform of the probability density function if the latter exists as well. Moreover, the characteristic function is always finite and bounded (\\(|\\varphi_D(\\mathbf{t})| \\leq 1\\)). This makes calculation of the distance between two characteristic functions always meaningful." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.305, + 0.851, + 0.321 + ], + "angle": 0, + "content": "3.2. Proposed Heatmap Optimization Process" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.329, + 0.892, + 0.433 + ], + "angle": 0, + "content": "Below, we discuss how we formulate the heatmap optimization process for bottom-up human pose estimation via (1) constructing two distributions from the predicted heatmap and the GT heatmap respectively; (2) calculating characteristic functions from these two distributions; and (3) formulating the loss function as the distance between the two characteristic functions." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.436, + 0.892, + 0.618 + ], + "angle": 0, + "content": "Distribution Construction. Given an input image, for each type of body joints, we denote the corresponding predicted heatmap as \\( H_{p} \\) and the corresponding GT heatmap as \\( H_{g} \\). We propose to formulate the two distributions \\( D(H_{p}) \\) and \\( D(H_{g}) \\) from the two heatmaps \\( H_{p} \\) and \\( H_{g} \\) with the following two steps. (1) As distributions cannot hold negative probabilities, we first pass \\( H_{p} \\) through a relu activation function to make it non-negative. Note that \\( H_{g} \\) is already non-negative. (2) After that, as the sum of probabilities of each constructed distribution needs to be 1, we further normalize both the output of step (1) and \\( H_{g} \\). Hence, with the above two steps, we formulate \\( D(H_{p}) \\) and \\( D(H_{g}) \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.62, + 0.892, + 0.654 + ], + "angle": 0, + "content": "\\[\nD \\left(H _ {p}\\right) = \\frac {\\operatorname {r e l u} \\left(H _ {p}\\right)}{\\left\\| \\operatorname {r e l u} \\left(H _ {p}\\right) \\right\\| _ {1}}, \\quad D \\left(H _ {g}\\right) = \\frac {H _ {g}}{\\left\\| H _ {g} \\right\\| _ {1}} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.656, + 0.892, + 0.719 + ], + "angle": 0, + "content": "Characteristic Function Calculation. For every type of body joints, after formulating the two distributions \\( D(H_{p}) \\) and \\( D(H_{g}) \\), we follow Eq. 1 to calculate the two characteristic functions \\( \\varphi_{D(H_p)}(\\mathbf{t}) \\) and \\( \\varphi_{D(H_g)}(\\mathbf{t}) \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.588, + 0.721, + 0.892, + 0.762 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\varphi_ {D \\left(H _ {p}\\right)} (\\mathbf {t}) = E _ {\\mathbf {x} \\sim D \\left(H _ {p}\\right)} \\left[ e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} \\right], \\tag {3} \\\\ \\varphi_ {D (H _ {g})} (\\mathbf {t}) = E _ {\\mathbf {x} \\sim D (H _ {g})} [ e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} ] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.764, + 0.892, + 0.884 + ], + "angle": 0, + "content": "Loss Function Formulation. Above we discuss how we obtain the two characteristic functions w.r.t. the predicted heatmap and the GT heatmap for a single type of body joints. Note that in bottom-up human pose estimation, multiple types of body joints are required to be located at the same time. Here, we first discuss how we formulate the loss function for a single type of body joints, and then introduce the overall loss function for all types of body joints." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.886, + 0.892, + 0.901 + ], + "angle": 0, + "content": "To formulate the loss function for the \\(k\\)-th type of body" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "13011" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.091, + 0.47, + 0.141 + ], + "angle": 0, + "content": "joints, given the two characteristic functions \\(\\varphi_{D(H_p)}^k (\\mathbf{t})\\) and \\(\\varphi_{D(H_g)}^k (\\mathbf{t})\\), we first write the loss function \\(L_{k}\\) as the distance between these two characteristic functions [2]:" + }, + { + "type": "equation", + "bbox": [ + 0.105, + 0.144, + 0.47, + 0.174 + ], + "angle": 0, + "content": "\\[\nL _ {k} = \\int_ {\\mathbb {R} ^ {2}} \\| \\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t}) \\| _ {2} ^ {2} \\omega (\\mathbf {t}, \\eta) d \\mathbf {t} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.177, + 0.469, + 0.268 + ], + "angle": 0, + "content": "where \\(\\omega (\\mathbf{t},\\eta)\\) is a weighting function. Here we set \\(\\omega (\\mathbf{t},\\eta)\\) to be the probability density function of a uniform distribution in \\(B_{U}\\), where \\(B_{U} = [-U,U]\\times [-U,U]\\) is a finite predefined range and \\(U\\) is a hyperparameter. This means that, \\(\\omega (\\mathbf{t},\\eta) = \\frac{1}{4U^2}\\) when \\(\\mathbf{t}\\in B_U\\) and \\(\\omega (\\mathbf{t},\\eta) = 0\\) otherwise. We thus further rewrite Eq. 4 as:" + }, + { + "type": "equation", + "bbox": [ + 0.105, + 0.282, + 0.469, + 0.315 + ], + "angle": 0, + "content": "\\[\nL _ {k} = \\int_ {B _ {U}} \\| \\frac {1}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})\\right) \\| _ {2} ^ {2} d \\mathbf {t} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.317, + 0.468, + 0.345 + ], + "angle": 0, + "content": "Finally, from Eq. 5, we formulate the loss function \\( L_{k} \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.105, + 0.348, + 0.469, + 0.38 + ], + "angle": 0, + "content": "\\[\nL _ {k} = \\int_ {B _ {U}} \\| \\frac {\\gamma}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})\\right) \\| _ {2} ^ {2} d \\mathbf {t} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.383, + 0.469, + 0.446 + ], + "angle": 0, + "content": "where \\(\\gamma = \\frac{U^2\\sqrt{A}}{\\pi^2}\\) is a constant coefficient and \\(A\\) is the area of the heatmap. Note that Eq. 6 is equivalent to Eq. 5 during the optimization process, as the efficacy of the added constant \\(\\gamma\\) can be achieved by adjusting the learning rate." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.447, + 0.468, + 0.477 + ], + "angle": 0, + "content": "After getting the loss function for each type of body joints, we formulate the total loss for all types of joints as:" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.48, + 0.469, + 0.521 + ], + "angle": 0, + "content": "\\[\nL _ {t o t a l} = \\sum_ {k = 1} ^ {K} L _ {k} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.523, + 0.436, + 0.538 + ], + "angle": 0, + "content": "where \\(K\\) denotes the total number of body joint types." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.549, + 0.271, + 0.565 + ], + "angle": 0, + "content": "3.3. Theoretical Analysis" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.573, + 0.469, + 0.634 + ], + "angle": 0, + "content": "Below, we perform theoretical analysis to show the effectiveness of our method for bottom-up human pose estimation. Before going into the theorem, we first introduce a lemma that can facilitate the proof of the theorem." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.646, + 0.469, + 0.78 + ], + "angle": 0, + "content": "Lemma 1. Let \\(\\varphi_{D}\\) be the characteristic function of a 2-dimensional distribution \\(D\\). Let \\(R^{r} = [x_{1}^{lower}, x_{1}^{upper}] \\times [x_{2}^{lower}, x_{2}^{upper}]\\) a rectangular region, \\(R^{e} = \\{x_{1}^{lower}, x_{1}^{upper}\\} \\times [x_{2}^{lower}, x_{2}^{upper}]\\cup [x_{1}^{lower}, x_{1}^{upper}] \\times \\{x_{2}^{lower}, x_{2}^{upper}\\}\\) the edges of this region, and \\(R^{v} = \\{x_{1}^{lower}, x_{1}^{upper}\\} \\times \\{x_{2}^{lower}, x_{2}^{upper}\\}\\) the vertices of this region. Let \\(B_{T} = [-T,T] \\times [-T,T]\\). Denote \\([D]_{R}\\) the portion of the distribution \\(D\\) in \\(R\\). \\([D]_{R^r}\\) can then be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.782, + 0.49, + 0.87 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} [ D ] _ {R ^ {r}} = \\left(\\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\left(\\prod_ {n = 1} ^ {2} \\left(\\frac {e ^ {- i t _ {n} x _ {n} ^ {\\text {l o w e r}}} - e ^ {- i t _ {n} x _ {n} ^ {\\text {u p p e r}}}}{i t _ {n}}\\right)\\right.\\right. \\\\ \\left. \\varphi_ {D} (\\boldsymbol {t})\\right) d t _ {1} d t _ {2}) + \\epsilon ([ D ] _ {R ^ {r}}) \\tag {8} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.868, + 0.469, + 0.901 + ], + "angle": 0, + "content": "where \\(\\epsilon ([D]_{R^r}) = \\frac{[D]_{R^e}}{2} +\\frac{[D]_{R^v}}{4}\\) and \\(dt_{1}dt_{2}\\) are calculated based on the Lebesgue measure." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.092, + 0.892, + 0.136 + ], + "angle": 0, + "content": "The proof of Lemma 1 is provided in the supplementary. After introducing this lemma, we analyze our proposed method below." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.152, + 0.892, + 0.242 + ], + "angle": 0, + "content": "Theorem 1. Let \\( R_{sub}^{r} \\) be a random rectangular sub-region in the heatmap of the \\( k \\)-th type of body joints where \\( \\left\\| [D(H_p)]_{R_{sub}^e} - [D(H_g)]_{R_{sub}^e}\\right\\|_2^2 \\) is relatively small compared to \\( \\left\\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sub}^r}\\right\\|_2^2 \\). The relation between the L2 loss w.r.t. this sub-region and \\( L_k \\) can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.565, + 0.243, + 0.892, + 0.28 + ], + "angle": 0, + "content": "\\[\n\\frac {\\left\\| \\left[ D \\left(H _ {p}\\right) \\right] _ {R _ {s u b} ^ {r}} - \\left[ D \\left(H _ {g}\\right) \\right] _ {R _ {s u b} ^ {r}} \\right\\| _ {2} ^ {2}}{\\lambda \\left(R _ {s u b} ^ {r}\\right)} \\leq L _ {k} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.281, + 0.892, + 0.313 + ], + "angle": 0, + "content": "Note that \\(\\lambda(R_{sub}^{r})\\) as the Lebesgue measure represents the area of \\(R_{sub}^{r}\\)." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.327, + 0.89, + 0.355 + ], + "angle": 0, + "content": "Proof. To prove Theorem 1, we first reformulate Lemma 1 as:" + }, + { + "type": "equation", + "bbox": [ + 0.501, + 0.357, + 0.921, + 0.479 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} [ D ] _ {R ^ {r}} = \\left(\\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\left(\\prod_ {n = 1} ^ {2} \\left(\\frac {e ^ {- i t _ {n} x _ {n} ^ {l o w e r}} - e ^ {- i t _ {n} x _ {n} ^ {u p p e r}}}{i t _ {n}}\\right)\\right.\\right. \\\\ \\left. \\varphi_ {D} (\\mathbf {t})\\right) d t _ {1} d t _ {2}) + \\epsilon \\left(\\left[ D \\right] _ {R ^ {r}}\\right) \\\\ = \\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\varphi_ {D} (\\mathbf {t}) \\int_ {R ^ {r}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} + \\epsilon ([ D ] _ {R ^ {r}}) \\tag {10} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.481, + 0.892, + 0.51 + ], + "angle": 0, + "content": "where \\(d\\mathbf{t} = dt_1dt_2\\), and both \\(d\\mathbf{x}\\) and \\(d\\mathbf{t}\\) are calculated based on the Lebesgue measure." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.512, + 0.891, + 0.53 + ], + "angle": 0, + "content": "After that, we rewrite \\(\\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sab}^r}\\| _2^2\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.501, + 0.557, + 0.915, + 0.879 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\left\\| \\left[ D \\left(H _ {p}\\right) \\right] _ {R _ {s u b} ^ {r}} - \\left[ D \\left(H _ {g}\\right) \\right] _ {R _ {s u b} ^ {r}} \\right\\| _ {2} ^ {2} (11) \\\\ \\approx \\left\\| \\left[ D \\left(H _ {p}\\right) \\right] _ {R _ {s u b} ^ {r}} - \\left[ D \\left(H _ {g}\\right) \\right] _ {R _ {s u b} ^ {r}} \\right. (12) \\\\ - \\left(\\epsilon ([ D (H _ {p}) ] _ {R _ {s u b} ^ {r}}) - \\epsilon ([ D (H _ {g}) ] _ {R _ {s u b} ^ {r}})\\right) \\| _ {2} ^ {2} \\\\ = \\| \\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) \\int_ {R _ {s u b} ^ {r}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} (13) \\\\ - \\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t}) \\int_ {R _ {s u b} ^ {r}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} \\| _ {2} ^ {2} \\\\ = \\| \\lim _ {T \\rightarrow \\infty} \\int_ {B _ {T}} \\int_ {R _ {s u b} ^ {r}} \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} \\| _ {2} ^ {2} (14) \\\\ \\approx \\left\\| \\int_ {B _ {U}} \\int_ {R _ {s u b} ^ {r}} \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} \\right\\| _ {2} ^ {2} (15) \\\\ \\leq 4 U ^ {2} A \\int_ {B _ {U}} \\int_ {R _ {s u b} ^ {r}} \\| \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} \\| _ {2} ^ {2} d \\mathbf {x} d \\mathbf {t} (16) \\\\ \\end{array}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.48, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "13012" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.2, + 0.089, + 0.768, + 0.104 + ], + "angle": 0, + "content": "Table 1. Comparisons with bottom-up methods on the COCO val2017 set (single-scale testing)." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.108, + 0.895, + 0.357 + ], + "angle": 0, + "content": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
OpenPose [5]CVPR 2017VGG-19-61.084.967.556.369.3
HGG [16]ECCV 2020Hourglass51260.483.066.2--
PersonLab [26]ECCV 2018ResNet-152140166.586.271.962.373.2
PifPaf [17]CVPR 2019ResNet-152-67.4----
PETR [30]CVPR 2022-133367.487.074.961.775.9
DEKR [11]CVPR 2021HRNet-W4864071.088.377.466.778.5
PINet [37]NIPS 2021HRNet-W3251267.4----
CIR&QEM [40]AAAI 2022HRNet-W4864072.489.1-67.380.4
CID [36]CVPR 2022HRNet-W3251266.086.772.359.876.0
LOGP-CAP [42]CVPR 2022HRNet-W4864072.288.978.968.178.9
SWAHR [22]CVPR 2021HrHRNet-W3251268.987.874.963.077.4
SWAHR [22]CVPR 2021HrHRNet-W4864070.888.576.866.377.4
CenterAttention [4]ICCV 2021HrHRNet-W3251268.687.674.162.078.0
PoseTrans [15]ECCV 2022HrHRNet-W3251268.487.174.862.777.1
HrHRNet [6]CVPR 2020HrHRNet-W3251267.186.273.061.576.1
+ OursHrHRNet-W3251269.9(↑2.8)88.176.064.278.1
HrHRNet [6]CVPR 2020HrHRNet-W4864069.987.276.165.476.4
+ OursHrHRNet-W4864072.5(↑2.6)89.379.168.379.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.201, + 0.36, + 0.768, + 0.374 + ], + "angle": 0, + "content": "Table 2. Comparisons with bottom-up methods on the COCO val2017 set (multi-scale testing)." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.378, + 0.895, + 0.523 + ], + "angle": 0, + "content": "
MethodVenueBackboneInput sizeAP\\(AP^{50}\\)\\(AP^{75}\\)\\(AP^M\\)\\(AP^L\\)
HGG [16]ECCV 2020Hourglass51268.386.775.8--
Point-Set Anchors [38]ECCV 2020HRNet-W4864069.888.876.3--
DEKR [11]CVPR 2021HRNet-W4864072.388.378.668.678.6
SWAHR [22]CVPR 2021HrHRNet-W3251271.488.977.866.378.9
SWAHR [22]CVPR 2021HrHRNet-W4864073.289.879.169.179.3
PoseTrans [15]ECCV 2022HrHRNet-W3251271.288.277.266.578.0
HrHRNet [6]CVPR 2020HrHRNet-W3251269.987.176.065.377.0
+ OursHrHRNet-W3251271.8(↑1.9)88.978.167.378.4
HrHRNet [6]CVPR 2020HrHRNet-W4864072.188.478.267.878.3
+ OursHrHRNet-W4864073.7(↑1.6)89.979.669.679.5
" + }, + { + "type": "equation", + "bbox": [ + 0.085, + 0.532, + 0.469, + 0.718 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\leq 4 U ^ {2} A \\int_ {B _ {U}} \\int_ {R _ {s u b} ^ {r}} \\| \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} \\| _ {2} ^ {2} d \\mathbf {x} d \\mathbf {t} (17) \\\\ = 4 U ^ {2} A \\int_ {R _ {s u b} ^ {r}} \\int_ {B _ {U}} \\| \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} \\| _ {2} ^ {2} d \\mathbf {t} d \\mathbf {x} (18) \\\\ = \\int_ {R _ {s u b} ^ {r}} \\int_ {B _ {U}} \\| \\frac {\\gamma}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})\\right) \\| _ {2} ^ {2} d \\mathbf {t} d \\mathbf {x} (19) \\\\ = L _ {k} \\lambda \\left(R _ {\\text {s u b}} ^ {r}\\right) (20) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.73, + 0.47, + 0.852 + ], + "angle": 0, + "content": "where Eq. 12 holds since \\(\\| [D(H_p)]_{R_{sub}^e} - [D(H_g)]_{R_{sub}^e}\\| _2^2\\) is relatively small compared to \\(\\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sub}^r}\\| _2^2\\), Eq. 13 holds because of Eq. 10, Eq. 15 holds based on the analysis in the supplementary, Eq. 16 holds due to the continuity of L2 distance and the Cauchy-Schwarz inequality, Eq. 17 holds due to the fact that \\(\\| e^{-i\\langle \\mathbf{t},\\mathbf{x}\\rangle}\\| _2^2 = 1\\) and the Cauchy-Schwarz inequality, Eq. 18 holds due to Fubini's theorem." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.853, + 0.47, + 0.884 + ], + "angle": 0, + "content": "We can then move \\(\\lambda(R_{sub}^r)\\) on the right hand side of Eq. 20 to the left hand side to get Theorem 1." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.534, + 0.892, + 0.852 + ], + "angle": 0, + "content": "As shown in Theorem 1, for the sub-region \\( R_{sub}^{r} \\), when the sum of the pixelwise L2 distances between the predicted and GT heatmaps over this entire sub-region is relatively large compared to only over its edges, \\( L_{k} \\) will be the upper bound of the L2 loss w.r.t. this sub-region. Because of this, via minimizing \\( L_{k} \\), we can enable the L2 losses w.r.t. all such sub-regions to be smaller. Note that such sub-regions can be easily found, since the edge of a sub-region typically contains many less pixels compared to the entire sub-region in the first place. Furthermore, for sub-regions containing missed or inaccurate body joints in its center, which are precisely the erroneous predictions that need to be corrected, the sum of the pixelwise L2 distances over the entire sub-region will then be much larger compared to only over its edge. Therefore, our method can optimize the model to provide a more accurate localization result for the body joints in different sub-regions of the predicted heatmap at the same time, whereas the existing bottom-up methods, usually relying on the overall L2 loss, do not hold this property. Thus, our method can achieve superior performance for bottom-up human pose estimation." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Note that during implementation, since \\(L_{k}\\) itself as an integral is not tractable, inspired by [2], we define \\(\\hat{L}_k\\) as a" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.518, + 0.957 + ], + "angle": 0, + "content": "13013" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.184, + 0.09, + 0.784, + 0.104 + ], + "angle": 0, + "content": "Table 3. Comparisons with bottom-up methods on the COCO test-dev2017 set (single-scale testing)." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.108, + 0.895, + 0.368 + ], + "angle": 0, + "content": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
OpenPose [5]CVPR 2017VGG-19-61.884.967.557.168.2
Hourglass [24]ECCV 2016Hourglass51256.681.861.849.867.0
Associative Embedding [23]NIPS 2017Hourglass51256.681.861.849.867.0
SPM [25]ICCV 2019Hourglass-66.988.572.962.673.1
MDN [34]CVPR 2020Hourglass-62.985.169.458.871.4
PersonLab [26]ECCV 2018ResNet-152140166.588.072.662.472.3
PifPaf [17]CVPR 2019ResNet-152-66.7--62.472.9
PETR [30]CVPR 2022SWin-L133370.591.578.765.278.0
DEKR [11]CVPR 2021HRNet-W4864070.089.477.365.776.9
PINet [37]NIPS 2021HRNet-W3251266.7----
CIR&QEM [40]AAAI 2022HRNet-W4864071.090.278.266.277.8
CID [36]CVPR 2022HRNet-W4864070.790.377.966.377.8
LOGP-CAP [42]CVPR 2022HRNet-W4864070.889.777.866.777.0
SWAHR [22]CVPR 2021HrHRNet-W4864070.289.976.965.277.0
CenterAttention [4]ICCV 2021HrHRNet-W4864069.689.776.064.976.3
PoseTrans [15]ECCV 2022HrHRNet-W3251267.488.373.962.175.1
HrHRNet [6]CVPR 2020HrHRNet-W3251266.487.572.861.274.2
+ OursHrHRNet-W3251268.9(↑2.5)89.275.763.776.1
HrHRNet [6]CVPR 2020HrHRNet-W4864068.488.275.164.474.2
+ OursHrHRNet-W4864071.1(↑2.7)90.478.266.977.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.186, + 0.371, + 0.782, + 0.385 + ], + "angle": 0, + "content": "Table 4. Comparisons with bottom-up methods on the COCO test-dev2017 set (multi-scale testing)." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.389, + 0.895, + 0.601 + ], + "angle": 0, + "content": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
Hourglass [24]ECCV 2016Hourglass51263.085.768.958.070.4
Associative Embedding [23]NIPS 2017Hourglass51263.085.768.958.070.4
HGG [16]ECCV 2020Hourglass51267.685.173.762.774.6
SimplePose [18]AAAI 2020IMHN51268.1--66.870.5
PersonLab [26]ECCV 2018-140168.789.075.464.175.5
PETR [30]CVPR 2022SWin-L133371.291.479.666.978.0
Point-Set Anchors [38]ECCV 2020HRNet-W4864068.789.976.364.875.3
DEKR [11]CVPR 2021HRNet-W4864071.089.278.067.176.9
CIR&QEM [40]AAAI 2022HRNet-W4864071.790.478.767.378.5
SWAHR [22]CVPR 2021HrHRNet-W4864072.090.778.867.877.7
CenterAttention [4]ICCV 2021HrHRNet-W4864071.190.577.566.976.7
PoseTrans [15]ECCV 2022HrHRNet-W3251269.989.377.065.276.2
HrHRNet [6]CVPR 2020HrHRNet-W3251269.089.075.864.475.2
+ OursHrHRNet-W3251270.8(↑1.8)90.177.866.077.3
HrHRNet [6]CVPR 2020HrHRNet-W4864070.589.377.266.675.8
+ OursHrHRNet-W4864072.3(↑1.8)91.579.867.978.2
" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.612, + 0.275, + 0.627 + ], + "angle": 0, + "content": "tractable alternative of \\(L_{k}\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.099, + 0.63, + 0.469, + 0.67 + ], + "angle": 0, + "content": "\\[\n\\hat {L} _ {k} = \\sum_ {m = 1} ^ {M} \\| \\frac {\\gamma}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t} _ {m}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t} _ {m})\\right) \\| _ {2} ^ {2} \\tag {21}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.672, + 0.469, + 0.702 + ], + "angle": 0, + "content": "where \\(\\{\\mathbf{t}_1,\\dots ,\\mathbf{t}_M\\}\\) denotes a set of \\(M\\) vectors randomly sampled from \\(B_{U}\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.702, + 0.469, + 0.731 + ], + "angle": 0, + "content": "The total loss \\(\\hat{L}_{total}\\) for all body joint types can then be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.731, + 0.469, + 0.771 + ], + "angle": 0, + "content": "\\[\n\\hat {L} _ {\\text {t o t a l}} = \\sum_ {k = 1} ^ {K} \\hat {L} _ {k} \\tag {22}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.787, + 0.336, + 0.804 + ], + "angle": 0, + "content": "3.4. Overall Training and Testing" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.81, + 0.47, + 0.902 + ], + "angle": 0, + "content": "Here we discuss the overall training and testing scheme of our method. Specifically, during training, we supervise the predicted heatmaps via the total loss in Eq. 22 instead of using the commonly used overall L2 loss, and following [6, 22, 23], we conduct grouping via associate embedding. During testing, we follow the evaluation procedure of" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.612, + 0.892, + 0.673 + ], + "angle": 0, + "content": "previous works [6, 22] that conduct bottom-up human pose estimation. Note that in experiments, it is easy to implement \\(\\hat{L}_k\\) in Eq. 21, and we provide more details on how we implement \\(\\hat{L}_k\\) in experiments in the supplementary." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.688, + 0.634, + 0.705 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.714, + 0.892, + 0.806 + ], + "angle": 0, + "content": "To evaluate the effectiveness of our method for bottom-up human pose estimation, we conduct experiments on the COCO dataset [21] and the CrowdPose dataset [19]. Besides, we also test the effectiveness of our method on top-down methods in the supplementary. We conduct our experiments on RTX 3090 GPUs." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.817, + 0.744, + 0.833 + ], + "angle": 0, + "content": "4.1. COCO Keypoint Detection" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.901 + ], + "angle": 0, + "content": "Dataset & evaluation metric. The COCO dataset [21] contains over 200k images, and in this dataset, each person instance is annotated with 17 body joints. This dataset consists of three subsets including COCO training set (57k" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "13014" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.251, + 0.09, + 0.717, + 0.104 + ], + "angle": 0, + "content": "Table 5. Comparisons with bottom-up methods on the CrowdPose testing set." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.108, + 0.894, + 0.34 + ], + "angle": 0, + "content": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( APE \\)\\( APM \\)\\( AP^H \\)
w/ single-scale testing
OpenPose [5]CVPR 2017VGG-19----62.748.732.3
HrHRNet [6]CVPR 2020HrHRNet-W4864065.986.470.673.366.557.9
PETR [30]CVPR 2022--72.090.978.878.072.565.4
DEKR [11]CVPR 2021HRNet-W4864067.386.472.274.668.158.7
PINet [37]NIPS 2021HRNet-W3251268.988.774.775.469.661.5
CID [36]CVPR 2022HRNet-W4864072.390.877.978.773.064.8
SWAHR [22]CVPR 2021HrHRNet-W4864071.688.577.678.972.463.0
CenterAttention [4]ICCV 2021HrHRNet-W4864067.687.772.773.968.260.3
OursHrHRNet-W4864072.688.878.979.273.165.6
w/ multi-scale testing
HrHRNet [6]CVPR 2020HrHRNet-W4864067.687.472.675.868.158.9
DEKR [11]CVPR 2021HRNet-W4864068.085.573.476.668.858.4
PINet [37]NIPS 2021HRNet-W3251269.889.175.676.470.562.2
SWAHR [22]CVPR 2021HrHRNet-W4864073.890.579.981.274.764.7
CenterAttention [4]ICCV 2021HrHRNet-W4864069.488.674.676.670.061.5
OursHrHRNet-W4864074.190.780.281.374.965.1
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.352, + 0.47, + 0.457 + ], + "angle": 0, + "content": "images), COCO validation set (5k images), and COCO test-dev set (20k images). Following the train-test split of [22], we report results on the val2017 set and test-dev2017 set. Also following [22], we evaluate model performance using standard average precision (AP) calculated based on Object Keypoint Similarity (OKS) on this dataset, and report the following metrics: AP, \\(\\mathrm{AP}^{50}\\), \\(\\mathrm{AP}^{75}\\), \\(\\mathrm{AP}^{\\mathrm{M}}\\), and \\(\\mathrm{AP}^{\\mathrm{L}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.457, + 0.47, + 0.608 + ], + "angle": 0, + "content": "Implementation details. Following [4, 22], we use the HrHRNet [6] as the baseline, and apply our proposed method to the respective two backbones including HrHRNet-W32 and HrHRNet-W48. For these backbones, we follow their original training and testing configurations specified in [6]. Also following [6], we adopt three scales 0.5, 1, and 2 in multi-scale testing. To calculate \\(\\hat{L}_k\\) following Eq. 21, we set the number of samples \\(M\\) to 256 and the hyperparameter \\(U\\) w.r.t. the finite range \\(B_U\\) to 64 in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.609, + 0.471, + 0.791 + ], + "angle": 0, + "content": "Results. In Tab. 1 and Tab. 2, we report single-scale testing and multi-scale testing results on the COCO val2017 set. In Tab. 3 and Tab. 4, we report single-scale testing and multi-scale testing results on the COCO test-dev2017 set. We observe that after applying our method on both HrHRNet-W32 and HrHRNet-W48, a significant performance improvement is achieved, demonstrating the effectiveness of our method. Moreover, we also compare our method with other state-of-the-art bottom-up human pose estimation methods. Compared to these methods, our method consistently achieves the highest AP score, further demonstrating the effectiveness of our method." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.802, + 0.203, + 0.817 + ], + "angle": 0, + "content": "4.2. CrowdPose" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Dataset & evaluation metric. The CrowdPose dataset [19] contains about 20k images and 80k person instances, which are annotated with 14 body joints. This dataset consists of three subsets including CrowdPose training set (10k images), CrowdPose validation set (2k images), and Crowd-" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.352, + 0.892, + 0.44 + ], + "angle": 0, + "content": "Pose testing set (8k images). Following the train-test split of [6, 22], we report results on the testing set. Also following [6, 22], we evaluate model performance using standard AP calculated based on OKS on the CrowdPose dataset, and report the following metrics: AP, \\(\\mathrm{AP}^{50}\\), \\(\\mathrm{AP}^{75}\\), \\(\\mathrm{AP}^{\\mathrm{E}}\\), \\(\\mathrm{AP}^{\\mathrm{M}}\\), and \\(\\mathrm{AP}^{\\mathrm{H}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.443, + 0.893, + 0.577 + ], + "angle": 0, + "content": "Implementation details. On the CrowdPose dataset, we also use the HrHRNet [6] as the baseline, and we use HrHRNet-W48 as the backbone following [4,6,22]. We follow the original training and testing configurations specified in [6], and also follow [6] to adopt three scales 0.5, 1, and 2 in multi-scale testing. Besides, same as the experiments on the COCO dataset, we also set the number of samples \\(M\\) to 256 and the hyperparameter \\(U\\) w.r.t. the finite range \\(B_U\\) to 64 on the CrowdPose dataset." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.579, + 0.892, + 0.64 + ], + "angle": 0, + "content": "Results. In Tab. 5, we report the single-scale testing and multi-scale testing results on the CrowdPose testing set. As shown, our method consistently achieves the highest AP score, demonstrating the effectiveness of our method." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.65, + 0.665, + 0.665 + ], + "angle": 0, + "content": "4.3. Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.674, + 0.892, + 0.719 + ], + "angle": 0, + "content": "We conduct ablation studies on the COCO validation set via applying our proposed method on HrHRNet-W32 [6] with single-scale testing." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.719, + 0.892, + 0.84 + ], + "angle": 0, + "content": "Impact of the number of samples \\(M\\). To calculate \\(\\hat{L}_k\\) following Eq. 21, we need to set the number of samples \\(M\\), which we set to 256 in our experiments. We evaluate other choices of the number of samples \\(M\\) in Tab. 6. As shown, all variants outperform the baseline method, and after the number of samples \\(M\\) becomes larger than 256 the model performance becomes stabilized. Therefore, we set the number of samples \\(M\\) to be 256 in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Impact of the finite range \\(B_U\\) with different \\(U\\). We evaluate different choices of \\(U\\) in Tab. 7. As shown, all variants outperform the baseline method, and after the hyperparameter \\(U\\) becomes larger than 64, the model performance does" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "13015" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.081, + 0.09, + 0.885, + 0.312 + ], + "angle": 0, + "content": "
(a)(b)(c)(d)
Body joint name:Right shouldersRight eyesRight elbowsLeft shoulders
Baseline (HrHRNet-W32) result:
Ours result:
" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.314, + 0.893, + 0.371 + ], + "angle": 0, + "content": "Figure 2. Qualitative results of our method and the baseline HrHRNet-W32 model [6]. As shown, the baseline method misses body joints (in (a) and (b)) or misidentifies body joints (in (c) and (d)) in some sub-regions of the predicted heatmap (see the sub-regions framed with dashed lines). Meanwhile, our method provides a more accurate localization result for the body joints of different people in different sub-regions of the predicted heatmap at the same time. More qualitative results are in the supplementary. (Best viewed in color.)" + }, + { + "type": "table_caption", + "bbox": [ + 0.124, + 0.377, + 0.422, + 0.391 + ], + "angle": 0, + "content": "Table 6. Evaluation on the number of samples \\(M\\)" + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.395, + 0.47, + 0.511 + ], + "angle": 0, + "content": "
MethodAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
Baseline(HrHRNet-W32)67.186.273.061.576.1
4 samples67.986.973.862.476.9
16 samples68.987.574.863.577.4
64 samples69.687.975.663.977.8
256 samples69.988.176.064.278.1
1024 samples69.888.276.064.378.0
" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.521, + 0.469, + 0.551 + ], + "angle": 0, + "content": "not enhance anymore. Thus, we set the hyperparameter \\( U \\) to be 64 in our experiments." + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.553, + 0.469, + 0.58 + ], + "angle": 0, + "content": "Table 7. Evaluation on the hyperparameter \\( U \\) w.r.t. the finite range \\( {B}_{U} \\) ." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.585, + 0.47, + 0.7 + ], + "angle": 0, + "content": "
MethodAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
Baseline(HrHRNet-W32)67.186.273.061.576.1
U = 867.786.773.562.276.5
U = 1668.687.374.262.976.9
U = 3269.487.875.463.777.6
U = 6469.988.176.064.278.1
U = 12869.888.075.864.078.0
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.705, + 0.47, + 0.856 + ], + "angle": 0, + "content": "Training time. On the COCO dataset, we test the training time of our method that trains the backbone model (HrHRNet-W32 [6]) with the loss function in Eq. 22, and compare it with the training time of the baseline that trains the same network with the overall L2 loss. As shown in Tab. 8, though our method achieves much better performance, it brings only very little increase of the training time. Note that as we follow the same evaluation procedure of previous works [6, 22], the testing time with and without our proposed method are the same." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.856, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Qualitative results. Some qualitative results are shown in Fig. 2. As shown, the baseline method which uses the overall L2 loss to optimize the heatmap prediction can miss or" + }, + { + "type": "table_caption", + "bbox": [ + 0.572, + 0.377, + 0.819, + 0.392 + ], + "angle": 0, + "content": "Table 8. Comparison of the training time." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.395, + 0.892, + 0.441 + ], + "angle": 0, + "content": "
MethodTraining time per epochPerformance(AP)
Baseline(HrHRNet-W32)1.11h67.1
Baseline + Ours1.19h69.9
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.453, + 0.893, + 0.542 + ], + "angle": 0, + "content": "get inaccurate body joints in some sub-regions of the predicted heatmap (see the sub-regions framed with dashed lines). In contrast, our method locates body joints of different people in different sub-regions of the predicted heatmap more accurately at the same time, demonstrating the effectiveness of our method." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.557, + 0.619, + 0.572 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.582, + 0.893, + 0.822 + ], + "angle": 0, + "content": "In this paper, we have proposed a novel bottom-up human pose estimation method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions respectively constructed from the predicted and GT heatmaps. We theoretically analyze that the distance between the two characteristic functions is the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap. Thus, via minimizing the distance between the two characteristic functions, our method locates body joints in different sub-regions of the predicted heatmap more accurately at the same time. Our method achieves superior performance on the COCO dataset and the CrowdPose dataset. Besides, our method could potentially also be applied in other tasks such as multi-object 6D pose estimation [1], facial landmark extraction [3], and fingerprint minutiae detection [10]. We leave this as our future work." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.823, + 0.892, + 0.899 + ], + "angle": 0, + "content": "Acknowledgement. This work is supported by MOE AcRF Tier 2 (Proposal ID: T2EP20222-0035), National Research Foundation Singapore under its AI Singapore Programme (AISG-100E-2020-065), and SUTD SKI Project (SKI 2021_02_06)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.52, + 0.957 + ], + "angle": 0, + "content": "13016" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.116, + 0.47, + 0.186 + ], + "angle": 0, + "content": "[1] Arash Amini, Arul Selvam Periyasamy, and Sven Behnke. Yolopose: Transformer-based multi-object 6d pose estimation using keypoint regression. In Intelligent Autonomous Systems 17: Proceedings of the 17th International Conference IAS-17, pages 392–406. Springer, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.187, + 0.472, + 0.257 + ], + "angle": 0, + "content": "[2] Abdul Fatir Ansari, Jonathan Scarlett, and Harold Soh. A characteristic function approach to deep implicit generative modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7478-7487, 2020. 3, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.259, + 0.472, + 0.3 + ], + "angle": 0, + "content": "[3] Matteo Bodini. A review of facial landmark extraction in 2d images and videos using deep learning. *Big Data and Cognitive Computing*, 3(1):14, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.302, + 0.47, + 0.372 + ], + "angle": 0, + "content": "[4] Guillem Brasó, Nikita Kister, and Laura Leal-Taixe. The center of attention: Center-keypoint grouping via attention for multi-person pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11853-11863, 2021. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.373, + 0.47, + 0.442 + ], + "angle": 0, + "content": "[5] Zhe Cao, Tomas Simon, Shih-En Wei, and Yaser Sheikh. Realtime multi-person 2d pose estimation using part affinity fields. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7291–7299, 2017. 1, 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.445, + 0.47, + 0.528 + ], + "angle": 0, + "content": "[6] Bowen Cheng, Bin Xiao, Jingdong Wang, Honghui Shi, Thomas S Huang, and Lei Zhang. Higherhrnet: Scale-aware representation learning for bottom-up human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5386-5395, 2020. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.53, + 0.472, + 0.6 + ], + "angle": 0, + "content": "[7] Ke Cheng, Yifan Zhang, Xiangyu He, Weihan Chen, Jian Cheng, and Hanqing Lu. Skeleton-based action recognition with shift graph convolutional network. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 183-192, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.601, + 0.47, + 0.657 + ], + "angle": 0, + "content": "[8] Kacper P Chwialkowski, Aaditya Ramdas, Dino Sejdinovic, and Arthur Gretton. Fast two-sample testing with analytic representations of probability measures. Advances in Neural Information Processing Systems, 28, 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.659, + 0.47, + 0.714 + ], + "angle": 0, + "content": "[9] TW Epps and Kenneth J Singleton. An omnibus test for the two-sample problem using the empirical characteristic function. Journal of Statistical Computation and Simulation, 26(3-4):177-203, 1986. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.716, + 0.47, + 0.773 + ], + "angle": 0, + "content": "[10] Yulin Feng and Ajay Kumar. Detecting locally, patching globally: An end-to-end framework for high speed and accurate detection of fingerprint minutiae. IEEE Transactions on Information Forensics and Security, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.774, + 0.47, + 0.844 + ], + "angle": 0, + "content": "[11] Zigang Geng, Ke Sun, Bin Xiao, Zhaoxiang Zhang, and Jingdong Wang. Bottom-up human pose estimation via disentangled keypoint regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14676-14686, 2021. 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.845, + 0.47, + 0.9 + ], + "angle": 0, + "content": "[12] Kerui Gu, Linlin Yang, and Angela Yao. Removing the bias of integral pose regression. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11067-11076, 2021. 1" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.116, + 0.472, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "[13] CE Heathcote. A test of goodness of fit for symmetric random variables1. Australian Journal of Statistics, 14(2):172-181, 1972. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.134, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[14] Eldar Insafutdinov, Leonid Pishchulin, Bjoern Andres, Mykhaylo Andriluka, and Bernt Schiele. Deepercut: A deeper, stronger, and faster multi-person pose estimation model. In European conference on computer vision, pages 34-50. Springer, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.205, + 0.892, + 0.26 + ], + "angle": 0, + "content": "[15] Wentao Jiang, Sheng Jin, Wentao Liu, Chen Qian, Ping Luo, and Si Liu. Posetrans: A simple yet effective pose transformation augmentation for human pose estimation. arXiv preprint arXiv:2208.07755, 2022. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.26, + 0.892, + 0.329 + ], + "angle": 0, + "content": "[16] Sheng Jin, Wentao Liu, Enze Xie, Wenhai Wang, Chen Qian, Wanli Ouyang, and Ping Luo. Differentiable hierarchical graph grouping for multi-person pose estimation. In European Conference on Computer Vision, pages 718-734. Springer, 2020. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.33, + 0.892, + 0.385 + ], + "angle": 0, + "content": "[17] Sven Kreiss, Lorenzo Bertoni, and Alexandre Alahi. Pifpaf: Composite fields for human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11977-11986, 2019. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.386, + 0.892, + 0.453 + ], + "angle": 0, + "content": "[18] Jia Li, Wen Su, and Zengfu Wang. Simple pose: Rethinking and improving a bottom-up approach for multi-person pose estimation. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 11354-11361, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.455, + 0.892, + 0.525 + ], + "angle": 0, + "content": "[19] Jiefeng Li, Can Wang, Hao Zhu, Yihuan Mao, Hao-Shu Fang, and Cewu Lu. Crowdpose: Efficient crowded scenes pose estimation and a new benchmark. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10863-10872, 2019. 2, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.525, + 0.892, + 0.58 + ], + "angle": 0, + "content": "[20] Shengxi Li, Zeyang Yu, Min Xiang, and Danilo Mandic. Reciprocal adversarial learning via characteristic functions. Advances in Neural Information Processing Systems, 33:217-228, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.58, + 0.892, + 0.65 + ], + "angle": 0, + "content": "[21] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.65, + 0.892, + 0.733 + ], + "angle": 0, + "content": "[22] Zhengxiong Luo, Zhicheng Wang, Yan Huang, Liang Wang, Tieniu Tan, and Erjin Zhou. Rethinking the heatmap regression for bottom-up human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13264-13273, 2021. 1, 2, 5, 6, 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.734, + 0.892, + 0.789 + ], + "angle": 0, + "content": "[23] Alejandro Newell, Zhiao Huang, and Jia Deng. Associative embedding: End-to-end learning for joint detection and grouping. Advances in neural information processing systems, 30, 2017. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.79, + 0.892, + 0.844 + ], + "angle": 0, + "content": "[24] Alejandro Newell, Kaiyu Yang, and Jia Deng. Stacked hourglass networks for human pose estimation. In European conference on computer vision, pages 483-499. Springer, 2016. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.845, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[25] Xuecheng Nie, Jiashi Feng, Jianfeng Zhang, and Shuicheng Yan. Single-stage multi-person pose machines. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6951-6960, 2019. 1, 2, 3, 6" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.945, + 0.519, + 0.957 + ], + "angle": 0, + "content": "13017" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[26] George Papandreou, Tyler Zhu, Liang-Chieh Chen, Spyros Gidaris, Jonathan Tompson, and Kevin Murphy. Personlab: Person pose estimation and instance segmentation with a bottom-up, part-based, geometric embedding model. In Proceedings of the European conference on computer vision (ECCV), pages 269-286, 2018. 1, 2, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.177, + 0.471, + 0.259 + ], + "angle": 0, + "content": "[27] Leonid Pishchulin, Eldar Insafutdinov, Siyu Tang, Bjoern Andres, Mykhaylo Andriluka, Peter V Gehler, and Bernt Schiele. Deepcut: Joint subset partition and labeling for multi person pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4929-4937, 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.26, + 0.471, + 0.328 + ], + "angle": 0, + "content": "[28] Xuelin Qian, Yanwei Fu, Tao Xiang, Wenxuan Wang, Jie Qiu, Yang Wu, Yu-Gang Jiang, and Xiangyang Xue. Pose-normalized image generation for person re-identification. In Proceedings of the European conference on computer vision (ECCV), pages 650–667, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.33, + 0.47, + 0.371 + ], + "angle": 0, + "content": "[29] Haoxuan Qu, Li Xu, Yujun Cai, Lin Geng Foo, and Jun Liu. Heatmap distribution matching for human pose estimation. In Advances in Neural Information Processing Systems. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.372, + 0.47, + 0.439 + ], + "angle": 0, + "content": "[30] Dahu Shi, Xing Wei, Liangqi Li, Ye Ren, and Wenming Tan. End-to-end multi-person pose estimation with transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11069-11078, 2022. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.441, + 0.47, + 0.496 + ], + "angle": 0, + "content": "[31] Weibo Shu, Jia Wan, Kay Chen Tan, Sam Kwong, and Antoni B Chan. Crowd counting in the frequency domain. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19618-19627, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.497, + 0.47, + 0.564 + ], + "angle": 0, + "content": "[32] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5693-5703, 2019. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.567, + 0.47, + 0.621 + ], + "angle": 0, + "content": "[33] Jonathan J. Thompson, Arjun Jain, Yann LeCun, and Christoph Bregler. Joint training of a convolutional network and a graphical model for human pose estimation. Advances in neural information processing systems, 27, 2014. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.622, + 0.47, + 0.69 + ], + "angle": 0, + "content": "[34] Ali Varamesh and Tinne Tuytelaars. Mixture dense regression for object detection and human pose estimation. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13083-13092. IEEE, 2020. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.692, + 0.47, + 0.76 + ], + "angle": 0, + "content": "[35] Bo Wan, Desen Zhou, Yongfei Liu, Rongjie Li, and Xuming He. Pose-aware multi-level feature network for human object interaction detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9469-9478, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.762, + 0.47, + 0.816 + ], + "angle": 0, + "content": "[36] Dongkai Wang and Shiliang Zhang. Contextual instance decoupling for robust multi-person pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11060-11068, 2022. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.818, + 0.47, + 0.872 + ], + "angle": 0, + "content": "[37] Dongkai Wang, Shiliang Zhang, and Gang Hua. Robust pose estimation in crowded scenes with direct pose-level inference. Advances in Neural Information Processing Systems, 34:6278-6289, 2021. 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.873, + 0.47, + 0.901 + ], + "angle": 0, + "content": "[38] Fangyun Wei, Xiao Sun, Hongyang Li, Jingdong Wang, and Stephen Lin. Point-set anchors for object detection, instance" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.471, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.093, + 0.892, + 0.121 + ], + "angle": 0, + "content": "segmentation and pose estimation. In European Conference on Computer Vision, pages 527-544. Springer, 2020. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.894, + 0.176 + ], + "angle": 0, + "content": "[39] Bin Xiao, Haiping Wu, and Yichen Wei. Simple baselines for human pose estimation and tracking. In Proceedings of the European conference on computer vision (ECCV), pages 466-481, 2018. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.894, + 0.247 + ], + "angle": 0, + "content": "[40] Yabo Xiao, Dongdong Yu, Xiao Juan Wang, Lei Jin, Guoli Wang, and Qian Zhang. Learning quality-aware representation for multi-person pose regression. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2822-2830, 2022. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.894, + 0.318 + ], + "angle": 0, + "content": "[41] Jiangtao Xie, Fei Long, Jiaming Lv, Qilong Wang, and Peihua Li. Joint distribution matters: Deep brownian distance covariance for few-shot classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7972-7981, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.32, + 0.892, + 0.387 + ], + "angle": 0, + "content": "[42] Nan Xue, Tianfu Wu, Gui-Song Xia, and Liangpei Zhang. Learning local-global contextual adaptation for multi-person pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13065-13074, 2022. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.39, + 0.892, + 0.445 + ], + "angle": 0, + "content": "[43] Sijie Yan, Yuanjun Xiong, and Dahua Lin. Spatial temporal graph convolutional networks for skeleton-based action recognition. In Thirty-second AAAI conference on artificial intelligence, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.446, + 0.892, + 0.488 + ], + "angle": 0, + "content": "[44] Yuhui Yuan, Rao Fu, Lang Huang, Weihong Lin, Chao Zhang, Xilin Chen, and Jingdong Wang. Hrformer: High-resolution transformer for dense prediction. 2021. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.489, + 0.892, + 0.517 + ], + "angle": 0, + "content": "[45] Xingyi Zhou, Dequan Wang, and Philipp Krahenbuhl. Objects as points. arXiv preprint arXiv:1904.07850, 2019. 3" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.894, + 0.517 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.946, + 0.52, + 0.957 + ], + "angle": 0, + "content": "13018" + } + ] +] \ No newline at end of file diff --git a/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_origin.pdf b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4a5d4a882412655bd8afb8d54b67a38060c38a14 --- /dev/null +++ b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/1484ca20-37b6-4284-8188-8a19d046c61f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67752206afb45ec0d47295384b28d05ed7cb957004751230b521afa466cb7bd4 +size 8602077 diff --git a/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/full.md b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..539a4b1ab24cc4d29cd035df23272ede1c1032e6 --- /dev/null +++ b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/full.md @@ -0,0 +1,324 @@ +# A Characteristic Function-based Method for Bottom-up Human Pose Estimation + +Haoxuan $\mathrm{Qu}^{1}$ , Yujun $\mathrm{Cai}^{2}$ , Lin Geng $\mathrm{Foo}^{1}$ , Ajay Kumar $^{3}$ , Jun Liu $^{1,*}$ + +$^{1}$ Singapore University of Technology and Design, Singapore + +$^{2}$ Nanyang Technological University, Singapore + +3The Hong Kong Polytechnic University, Hong Kong + +haoxuan-qu@mymail.sutd.edu.sg, yujun001@e.ntu.edu.sg, lingeng.foo@mymail.sutd.edu.sg + +ajay.kumar@polyu.edu.hk, jun.liu@sutd.edu.sg + +# Abstract + +Most recent methods formulate the task of human pose estimation as a heatmap estimation problem, and use the overall L2 loss computed from the entire heatmap to optimize the heatmap prediction. In this paper, we show that in bottom-up human pose estimation where each heatmap often contains multiple body joints, using the overall L2 loss to optimize the heatmap prediction may not be the optimal choice. This is because, minimizing the overall L2 loss cannot always lead the model to locate all the body joints across different sub-regions of the heatmap more accurately. To cope with this problem, from a novel perspective, we propose a new bottom-up human pose estimation method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions respectively constructed from the predicted heatmap and the groundtruth heatmap. Our analysis presented in this paper indicates that the distance between these two characteristic functions is essentially the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap. Therefore, via minimizing the distance between the two characteristic functions, we can optimize the model to provide a more accurate localization result for the body joints in different sub-regions of the predicted heatmap. We show the effectiveness of our proposed method through extensive experiments on the COCO dataset and the CrowdPose dataset. + +# 1. Introduction + +Human pose estimation aims to locate the body joints of each person in a given RGB image. It is relevant to various applications, such as action recognition [7, 43], person Re-ID [28], and human object interaction [35]. For tackling human pose estimation, most of the recent methods fall + +into two major categories: top-down methods and bottom-up methods. Top-down methods [24,32,33,39,44] generally use a human detector to detect all the people in the image, and then perform single-person pose estimation for each detected subject separately. In contrast, bottom-up methods [5,6,16,17,22,23,25,26] usually locate the body joints of all people in the image at the same time. Hence, bottom-up methods, the main focus of this paper, are often a more efficient choice compared to top-down methods, especially when there are many people in the input image [5]. + +In existing works, it is common to regard human pose estimation as a heatmap prediction problem, since this can preserve the spatial structure of the input image throughout the encoding and decoding process [12]. During the general optimization process, the groundtruth (GT) heatmaps $\mathbf{H}_g$ are first constructed via putting 2D Gaussian blobs centered at the GT coordinates of the body joints. After that, these constructed GT heatmaps are used to supervise the predicted heatmaps $\mathbf{H}_p$ via the overall L2 loss $L_2^{overall}$ calculated (averaged) over the whole heatmap. Specifically, denoting the area of the heatmap as $A$ , we have $L_2^{overall} = \frac{\|\mathbf{H}_p - \mathbf{H}_g\|_2^2}{A}$ . + +We argue that using the overall L2 loss to supervise the predicted heatmap may not be the optimal choice in bottom-up methods where each heatmap often contains multiple body joints from the multiple people in various sub-regions, as shown in Fig. 1(b). This is because, a smaller overall L2 loss calculated over the whole heatmap cannot always lead the model to locate all the body joints across different sub-regions in the heatmap more accurately. As illustrated in Fig. 1(a), the predicted heatmap #2 has a smaller overall L2 loss compared to the predicted heatmap #1. However, the predicted heatmap #2 locates the body joint in the top-right sub-region wrongly, whereas the predicted heatmap #1 locates body joints in both the top-right and bottom-left sub-regions correctly. This is because, while the decrease of the overall L2 loss can be achieved when the L2 loss w.r.t. each sub-region either decreases or remains the same, this is not necessarily true for all regions. + +![](images/02e7d252ce814c9f2ba6b4c3168443053f5d3fa053626fd76dfe151096be69a8.jpg) +(a) + +![](images/18735f821139ff761ecdb7c55bc60c0937b2ab52d3e8fc8c84dd5d5010284298.jpg) +(b) +Figure 1. (a) Illustration of heatmaps. The predicted heatmap #2 with a smaller overall L2 loss locates the body joint in the top-right sub-region wrongly, while the predicted heatmap #1 with a larger overall L2 loss locates body joints in both the top-right and bottom-left sub-regions correctly. (b) Output of a commonly used bottom-up method, HrHRNet-W32 [6]. As shown, it misses left ankle in the dashed sub-region of image (i) completely, and misidentifies right knee in the dashed sub-region of image (ii). This indicates that accurately localizing the body joints of multiple people in a single heatmap is a challenging problem. (Best viewed in color.) + +same (e.g., from predicted heatmap #0 to predicted heatmap #1), it can also be achieved when there is a decrease of L2 loss w.r.t. certain sub-regions and an increase of L2 loss for some other sub-regions (e.g., from predicted heatmap #1 to predicted heatmap #2). This indicates that, in bottom-up methods, the decrease of the overall L2 loss does not always lead to a more accurate localization result for the body joints in different sub-regions of the predicted heatmap at the same time. Besides, we also show some results of a commonly used bottom-up method, HrHRNet-W32 [6], in Fig. 1(b). As shown, it may even miss or misidentify certain body joints when there are a number of people in the input image. This indicates that it is quite difficult to accurately locate all body joints of all people in the predicted heatmap. + +To tackle the above-mentioned problem in bottom-up methods, in this paper, rather than using the overall L2 loss to supervise the whole heatmap, we instead aim to optimize the body joints over sub-regions of the predicted heatmap at the same time. To this end, from a new perspective, we express the predicted and GT heatmaps as characteristic functions, and minimize the difference between these functions, allowing different sub-regions of the predicted heatmap to be optimized at the same time. + +More specifically, we first construct two distributions respectively from the predicted heatmap and the GT heatmap. After that, we obtain two characteristic functions of these two distributions and optimize the heatmap prediction via minimizing the distance between these two characteristic functions. We analyze in Sec. 3.3 that the distance between the two characteristic functions is the upper bound of the + +L2 losses w.r.t sub-regions in the predicted heatmap. Therefore, via minimizing the distance between the two characteristic functions, our method can locate body joints in different sub-regions more accurately at the same time, and thus achieve superior performance. + +The contributions of our work are summarized as follows. 1) From a new perspective, we supervise the predicted heatmap using the distance between the characteristic functions of the predicted and GT heatmaps. 2) We analyze (in Sec. 3.3) that the L2 losses w.r.t. sub-regions of the predicted heatmap are upper-bounded by the distance between the characteristic functions. 3) Our proposed method achieves state-of-the-art performance on the evaluation benchmarks [19, 21]. + +# 2. Related Work + +Human Pose Estimation. Due to the wide range of applications, human pose estimation has received lots of attention [5, 6, 16, 17, 22-26, 29, 32, 33, 39, 44], and most of the recent methods fall into two categories: top-down methods and bottom-up methods. In top-down methods, a human detector is generally used to detect all the people in the image first, and then single-person pose estimation is conducted for each detected subject separately. The single-person pose estimation methods that are commonly used in top-down methods include Hourglass [24], Simple Baseline [39], HRNet [32], and HRFormaler [44], etc. Besides top-down methods, bottom-up methods [5, 6, 16, 17, 22, 23, 25, 26] have also attracted a lot of attention recently due to its efficiency [5]. + +In bottom-up methods, most methods first detect all + +identity-free body joints over the whole input image, and then group them into different people. Among these methods, DeepCut and Person-Lab [14,26,27] incorporate offset fields into their methods, while Openpose and PifPaf [5,17] make use of part affinity fields in their methods. From another perspective, associate embedding [23] teaches the model to output the group assignments and the localization results of the body joints at the same time, and HGG [16] further combines graph neural networks on top of the associate embedding. Besides the above methods, there also exist some bottom-up methods [11,25,45] that directly regress the coordinates of body joints belonging to the same person. + +Existing heatmap-based bottom-up methods often use an overall L2 loss calculated over the whole heatmap to optimize heatmap prediction. Differently, in this paper, we propose a new bottom-up method that optimizes the heatmap prediction via minimizing the difference between the characteristic functions of the predicted and GT heatmaps. + +Characteristic Function. The characteristic function, a concept originally proposed in probability theory and statistics, has been studied in various areas [2,8,9,13,20,31,41] over the years, such as two-sample testing [8,9,13], generative adversarial nets [2,20], and few-shot classification [41]. Inspired by these works, in this paper, from a novel perspective, we propose to optimize the heatmap prediction for bottom-up human pose estimation via minimizing the distance between two characteristic functions. We theoretically analyze that the distance between the two characteristic functions respectively constructed from the predicted heatmap and the GT heatmap is the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap. + +# 3. Method + +In bottom-up human pose estimation, as shown in Fig. 1(a), minimizing the overall L2 loss between the predicted heatmap and the GT heatmap cannot always lead the model to locate all the body joints across different sub-regions of the heatmap more accurately. In this work, we aim to optimize the body joints over sub-regions of the predicted heatmap at the same time. To achieve this, we propose a new bottom-up method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions constructed from the predicted and GT heatmaps. + +Below, we first briefly introduce the characteristic function, and then discuss how we formulate the heatmap optimization process. After that, we show the theoretical analysis of our proposed method. + +# 3.1. Revisiting Characteristic Function + +The characteristic function is generally used in probability theory and statistics. Given an $N$ -dimensional distribution $D$ , its corresponding characteristic function $\varphi_{D}$ can be + +written as: + +$$ +\varphi_ {D} (\mathbf {t}) = E _ {\mathbf {x} \sim D} [ e ^ {i \langle \mathbf {t}, \mathbf {x} \rangle} ] = \int_ {\mathbb {R} ^ {N}} e ^ {i \langle \mathbf {t}, \mathbf {x} \rangle} d D \tag {1} +$$ + +where $E$ represents expectation, $i^2 = -1$ , $\langle \cdot, \cdot \rangle$ represents dot product, $\mathbf{t}$ is a random $N$ -dimensional vector, and $\mathbf{x}$ is an $N$ -dimensional vector sampled from $D$ . Note that the characteristic function always exists and has a one-to-one correspondence with the distribution. Besides, the characteristic function is the Fourier transform of the probability density function if the latter exists as well. Moreover, the characteristic function is always finite and bounded ( $|\varphi_D(\mathbf{t})| \leq 1$ ). This makes calculation of the distance between two characteristic functions always meaningful. + +# 3.2. Proposed Heatmap Optimization Process + +Below, we discuss how we formulate the heatmap optimization process for bottom-up human pose estimation via (1) constructing two distributions from the predicted heatmap and the GT heatmap respectively; (2) calculating characteristic functions from these two distributions; and (3) formulating the loss function as the distance between the two characteristic functions. + +Distribution Construction. Given an input image, for each type of body joints, we denote the corresponding predicted heatmap as $H_{p}$ and the corresponding GT heatmap as $H_{g}$ . We propose to formulate the two distributions $D(H_{p})$ and $D(H_{g})$ from the two heatmaps $H_{p}$ and $H_{g}$ with the following two steps. (1) As distributions cannot hold negative probabilities, we first pass $H_{p}$ through a relu activation function to make it non-negative. Note that $H_{g}$ is already non-negative. (2) After that, as the sum of probabilities of each constructed distribution needs to be 1, we further normalize both the output of step (1) and $H_{g}$ . Hence, with the above two steps, we formulate $D(H_{p})$ and $D(H_{g})$ as: + +$$ +D \left(H _ {p}\right) = \frac {\operatorname {r e l u} \left(H _ {p}\right)}{\left\| \operatorname {r e l u} \left(H _ {p}\right) \right\| _ {1}}, \quad D \left(H _ {g}\right) = \frac {H _ {g}}{\left\| H _ {g} \right\| _ {1}} \tag {2} +$$ + +Characteristic Function Calculation. For every type of body joints, after formulating the two distributions $D(H_{p})$ and $D(H_{g})$ , we follow Eq. 1 to calculate the two characteristic functions $\varphi_{D(H_p)}(\mathbf{t})$ and $\varphi_{D(H_g)}(\mathbf{t})$ as: + +$$ +\begin{array}{l} \varphi_ {D \left(H _ {p}\right)} (\mathbf {t}) = E _ {\mathbf {x} \sim D \left(H _ {p}\right)} \left[ e ^ {i \langle \mathbf {t}, \mathbf {x} \rangle} \right], \tag {3} \\ \varphi_ {D (H _ {g})} (\mathbf {t}) = E _ {\mathbf {x} \sim D (H _ {g})} [ e ^ {i \langle \mathbf {t}, \mathbf {x} \rangle} ] \\ \end{array} +$$ + +Loss Function Formulation. Above we discuss how we obtain the two characteristic functions w.r.t. the predicted heatmap and the GT heatmap for a single type of body joints. Note that in bottom-up human pose estimation, multiple types of body joints are required to be located at the same time. Here, we first discuss how we formulate the loss function for a single type of body joints, and then introduce the overall loss function for all types of body joints. + +To formulate the loss function for the $k$ -th type of body + +joints, given the two characteristic functions $\varphi_{D(H_p)}^k (\mathbf{t})$ and $\varphi_{D(H_g)}^k (\mathbf{t})$ , we first write the loss function $L_{k}$ as the distance between these two characteristic functions [2]: + +$$ +L _ {k} = \int_ {\mathbb {R} ^ {2}} \| \varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t}) \| _ {2} ^ {2} \omega (\mathbf {t}, \eta) d \mathbf {t} \tag {4} +$$ + +where $\omega (\mathbf{t},\eta)$ is a weighting function. Here we set $\omega (\mathbf{t},\eta)$ to be the probability density function of a uniform distribution in $B_{U}$ , where $B_{U} = [-U,U]\times [-U,U]$ is a finite predefined range and $U$ is a hyperparameter. This means that, $\omega (\mathbf{t},\eta) = \frac{1}{4U^2}$ when $\mathbf{t}\in B_U$ and $\omega (\mathbf{t},\eta) = 0$ otherwise. We thus further rewrite Eq. 4 as: + +$$ +L _ {k} = \int_ {B _ {U}} \| \frac {1}{2 U} \left(\varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t})\right) \| _ {2} ^ {2} d \mathbf {t} \tag {5} +$$ + +Finally, from Eq. 5, we formulate the loss function $L_{k}$ as: + +$$ +L _ {k} = \int_ {B _ {U}} \| \frac {\gamma}{2 U} \left(\varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t})\right) \| _ {2} ^ {2} d \mathbf {t} \tag {6} +$$ + +where $\gamma = \frac{U^2\sqrt{A}}{\pi^2}$ is a constant coefficient and $A$ is the area of the heatmap. Note that Eq. 6 is equivalent to Eq. 5 during the optimization process, as the efficacy of the added constant $\gamma$ can be achieved by adjusting the learning rate. + +After getting the loss function for each type of body joints, we formulate the total loss for all types of joints as: + +$$ +L _ {t o t a l} = \sum_ {k = 1} ^ {K} L _ {k} \tag {7} +$$ + +where $K$ denotes the total number of body joint types. + +# 3.3. Theoretical Analysis + +Below, we perform theoretical analysis to show the effectiveness of our method for bottom-up human pose estimation. Before going into the theorem, we first introduce a lemma that can facilitate the proof of the theorem. + +Lemma 1. Let $\varphi_{D}$ be the characteristic function of a 2-dimensional distribution $D$ . Let $R^{r} = [x_{1}^{lower}, x_{1}^{upper}] \times [x_{2}^{lower}, x_{2}^{upper}]$ a rectangular region, $R^{e} = \{x_{1}^{lower}, x_{1}^{upper}\} \times [x_{2}^{lower}, x_{2}^{upper}]\cup [x_{1}^{lower}, x_{1}^{upper}] \times \{x_{2}^{lower}, x_{2}^{upper}\}$ the edges of this region, and $R^{v} = \{x_{1}^{lower}, x_{1}^{upper}\} \times \{x_{2}^{lower}, x_{2}^{upper}\}$ the vertices of this region. Let $B_{T} = [-T,T] \times [-T,T]$ . Denote $[D]_{R}$ the portion of the distribution $D$ in $R$ . $[D]_{R^r}$ can then be written as: + +$$ +\begin{array}{l} [ D ] _ {R ^ {r}} = \left(\lim _ {T \rightarrow \infty} \frac {1}{(2 \pi) ^ {2}} \int_ {B _ {T}} \left(\prod_ {n = 1} ^ {2} \left(\frac {e ^ {- i t _ {n} x _ {n} ^ {\text {l o w e r}}} - e ^ {- i t _ {n} x _ {n} ^ {\text {u p p e r}}}}{i t _ {n}}\right)\right.\right. \\ \left. \varphi_ {D} (\boldsymbol {t})\right) d t _ {1} d t _ {2}) + \epsilon ([ D ] _ {R ^ {r}}) \tag {8} \\ \end{array} +$$ + +where $\epsilon ([D]_{R^r}) = \frac{[D]_{R^e}}{2} +\frac{[D]_{R^v}}{4}$ and $dt_{1}dt_{2}$ are calculated based on the Lebesgue measure. + +The proof of Lemma 1 is provided in the supplementary. After introducing this lemma, we analyze our proposed method below. + +Theorem 1. Let $R_{sub}^{r}$ be a random rectangular sub-region in the heatmap of the $k$ -th type of body joints where $\left\| [D(H_p)]_{R_{sub}^e} - [D(H_g)]_{R_{sub}^e}\right\|_2^2$ is relatively small compared to $\left\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sub}^r}\right\|_2^2$ . The relation between the L2 loss w.r.t. this sub-region and $L_k$ can be written as: + +$$ +\frac {\left\| \left[ D \left(H _ {p}\right) \right] _ {R _ {s u b} ^ {r}} - \left[ D \left(H _ {g}\right) \right] _ {R _ {s u b} ^ {r}} \right\| _ {2} ^ {2}}{\lambda \left(R _ {s u b} ^ {r}\right)} \leq L _ {k} \tag {9} +$$ + +Note that $\lambda(R_{sub}^{r})$ as the Lebesgue measure represents the area of $R_{sub}^{r}$ . + +Proof. To prove Theorem 1, we first reformulate Lemma 1 as: + +$$ +\begin{array}{l} [ D ] _ {R ^ {r}} = \left(\lim _ {T \rightarrow \infty} \frac {1}{(2 \pi) ^ {2}} \int_ {B _ {T}} \left(\prod_ {n = 1} ^ {2} \left(\frac {e ^ {- i t _ {n} x _ {n} ^ {l o w e r}} - e ^ {- i t _ {n} x _ {n} ^ {u p p e r}}}{i t _ {n}}\right)\right.\right. \\ \left. \varphi_ {D} (\mathbf {t})\right) d t _ {1} d t _ {2}) + \epsilon \left(\left[ D \right] _ {R ^ {r}}\right) \\ = \lim _ {T \rightarrow \infty} \frac {1}{(2 \pi) ^ {2}} \int_ {B _ {T}} \varphi_ {D} (\mathbf {t}) \int_ {R ^ {r}} e ^ {- i \langle \mathbf {t}, \mathbf {x} \rangle} d \mathbf {x} d \mathbf {t} + \epsilon ([ D ] _ {R ^ {r}}) \tag {10} \\ \end{array} +$$ + +where $d\mathbf{t} = dt_1dt_2$ , and both $d\mathbf{x}$ and $d\mathbf{t}$ are calculated based on the Lebesgue measure. + +After that, we rewrite $\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sab}^r}\| _2^2$ as: + +$$ +\begin{array}{l} \left\| \left[ D \left(H _ {p}\right) \right] _ {R _ {s u b} ^ {r}} - \left[ D \left(H _ {g}\right) \right] _ {R _ {s u b} ^ {r}} \right\| _ {2} ^ {2} (11) \\ \approx \left\| \left[ D \left(H _ {p}\right) \right] _ {R _ {s u b} ^ {r}} - \left[ D \left(H _ {g}\right) \right] _ {R _ {s u b} ^ {r}} \right. (12) \\ - \left(\epsilon ([ D (H _ {p}) ] _ {R _ {s u b} ^ {r}}) - \epsilon ([ D (H _ {g}) ] _ {R _ {s u b} ^ {r}})\right) \| _ {2} ^ {2} \\ = \| \lim _ {T \rightarrow \infty} \frac {1}{(2 \pi) ^ {2}} \int_ {B _ {T}} \varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) \int_ {R _ {s u b} ^ {r}} e ^ {- i \langle \mathbf {t}, \mathbf {x} \rangle} d \mathbf {x} d \mathbf {t} (13) \\ - \lim _ {T \rightarrow \infty} \frac {1}{(2 \pi) ^ {2}} \int_ {B _ {T}} \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t}) \int_ {R _ {s u b} ^ {r}} e ^ {- i \langle \mathbf {t}, \mathbf {x} \rangle} d \mathbf {x} d \mathbf {t} \| _ {2} ^ {2} \\ = \| \lim _ {T \rightarrow \infty} \int_ {B _ {T}} \int_ {R _ {s u b} ^ {r}} \frac {\varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t})}{(2 \pi) ^ {2}} e ^ {- i \langle \mathbf {t}, \mathbf {x} \rangle} d \mathbf {x} d \mathbf {t} \| _ {2} ^ {2} (14) \\ \approx \left\| \int_ {B _ {U}} \int_ {R _ {s u b} ^ {r}} \frac {\varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t})}{(2 \pi) ^ {2}} e ^ {- i \langle \mathbf {t}, \mathbf {x} \rangle} d \mathbf {x} d \mathbf {t} \right\| _ {2} ^ {2} (15) \\ \leq 4 U ^ {2} A \int_ {B _ {U}} \int_ {R _ {s u b} ^ {r}} \| \frac {\varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t})}{(2 \pi) ^ {2}} e ^ {- i \langle \mathbf {t}, \mathbf {x} \rangle} \| _ {2} ^ {2} d \mathbf {x} d \mathbf {t} (16) \\ \end{array} +$$ + +Table 1. Comparisons with bottom-up methods on the COCO val2017 set (single-scale testing). + +
MethodVenueBackboneInput sizeAP\( AP^{50} \)\( AP^{75} \)\( AP^M \)\( AP^L \)
OpenPose [5]CVPR 2017VGG-19-61.084.967.556.369.3
HGG [16]ECCV 2020Hourglass51260.483.066.2--
PersonLab [26]ECCV 2018ResNet-152140166.586.271.962.373.2
PifPaf [17]CVPR 2019ResNet-152-67.4----
PETR [30]CVPR 2022-133367.487.074.961.775.9
DEKR [11]CVPR 2021HRNet-W4864071.088.377.466.778.5
PINet [37]NIPS 2021HRNet-W3251267.4----
CIR&QEM [40]AAAI 2022HRNet-W4864072.489.1-67.380.4
CID [36]CVPR 2022HRNet-W3251266.086.772.359.876.0
LOGP-CAP [42]CVPR 2022HRNet-W4864072.288.978.968.178.9
SWAHR [22]CVPR 2021HrHRNet-W3251268.987.874.963.077.4
SWAHR [22]CVPR 2021HrHRNet-W4864070.888.576.866.377.4
CenterAttention [4]ICCV 2021HrHRNet-W3251268.687.674.162.078.0
PoseTrans [15]ECCV 2022HrHRNet-W3251268.487.174.862.777.1
HrHRNet [6]CVPR 2020HrHRNet-W3251267.186.273.061.576.1
+ OursHrHRNet-W3251269.9(↑2.8)88.176.064.278.1
HrHRNet [6]CVPR 2020HrHRNet-W4864069.987.276.165.476.4
+ OursHrHRNet-W4864072.5(↑2.6)89.379.168.379.0
+ +Table 2. Comparisons with bottom-up methods on the COCO val2017 set (multi-scale testing). + +
MethodVenueBackboneInput sizeAP\(AP^{50}\)\(AP^{75}\)\(AP^M\)\(AP^L\)
HGG [16]ECCV 2020Hourglass51268.386.775.8--
Point-Set Anchors [38]ECCV 2020HRNet-W4864069.888.876.3--
DEKR [11]CVPR 2021HRNet-W4864072.388.378.668.678.6
SWAHR [22]CVPR 2021HrHRNet-W3251271.488.977.866.378.9
SWAHR [22]CVPR 2021HrHRNet-W4864073.289.879.169.179.3
PoseTrans [15]ECCV 2022HrHRNet-W3251271.288.277.266.578.0
HrHRNet [6]CVPR 2020HrHRNet-W3251269.987.176.065.377.0
+ OursHrHRNet-W3251271.8(↑1.9)88.978.167.378.4
HrHRNet [6]CVPR 2020HrHRNet-W4864072.188.478.267.878.3
+ OursHrHRNet-W4864073.7(↑1.6)89.979.669.679.5
+ +$$ +\begin{array}{l} \leq 4 U ^ {2} A \int_ {B _ {U}} \int_ {R _ {s u b} ^ {r}} \| \frac {\varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t})}{(2 \pi) ^ {2}} \| _ {2} ^ {2} d \mathbf {x} d \mathbf {t} (17) \\ = 4 U ^ {2} A \int_ {R _ {s u b} ^ {r}} \int_ {B _ {U}} \| \frac {\varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t})}{(2 \pi) ^ {2}} \| _ {2} ^ {2} d \mathbf {t} d \mathbf {x} (18) \\ = \int_ {R _ {s u b} ^ {r}} \int_ {B _ {U}} \| \frac {\gamma}{2 U} \left(\varphi_ {D (H _ {p})} ^ {k} (\mathbf {t}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t})\right) \| _ {2} ^ {2} d \mathbf {t} d \mathbf {x} (19) \\ = L _ {k} \lambda \left(R _ {\text {s u b}} ^ {r}\right) (20) \\ \end{array} +$$ + +where Eq. 12 holds since $\| [D(H_p)]_{R_{sub}^e} - [D(H_g)]_{R_{sub}^e}\| _2^2$ is relatively small compared to $\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sub}^r}\| _2^2$ , Eq. 13 holds because of Eq. 10, Eq. 15 holds based on the analysis in the supplementary, Eq. 16 holds due to the continuity of L2 distance and the Cauchy-Schwarz inequality, Eq. 17 holds due to the fact that $\| e^{-i\langle \mathbf{t},\mathbf{x}\rangle}\| _2^2 = 1$ and the Cauchy-Schwarz inequality, Eq. 18 holds due to Fubini's theorem. + +We can then move $\lambda(R_{sub}^r)$ on the right hand side of Eq. 20 to the left hand side to get Theorem 1. + +As shown in Theorem 1, for the sub-region $R_{sub}^{r}$ , when the sum of the pixelwise L2 distances between the predicted and GT heatmaps over this entire sub-region is relatively large compared to only over its edges, $L_{k}$ will be the upper bound of the L2 loss w.r.t. this sub-region. Because of this, via minimizing $L_{k}$ , we can enable the L2 losses w.r.t. all such sub-regions to be smaller. Note that such sub-regions can be easily found, since the edge of a sub-region typically contains many less pixels compared to the entire sub-region in the first place. Furthermore, for sub-regions containing missed or inaccurate body joints in its center, which are precisely the erroneous predictions that need to be corrected, the sum of the pixelwise L2 distances over the entire sub-region will then be much larger compared to only over its edge. Therefore, our method can optimize the model to provide a more accurate localization result for the body joints in different sub-regions of the predicted heatmap at the same time, whereas the existing bottom-up methods, usually relying on the overall L2 loss, do not hold this property. Thus, our method can achieve superior performance for bottom-up human pose estimation. + +Note that during implementation, since $L_{k}$ itself as an integral is not tractable, inspired by [2], we define $\hat{L}_k$ as a + +Table 3. Comparisons with bottom-up methods on the COCO test-dev2017 set (single-scale testing). + +
MethodVenueBackboneInput sizeAP\( AP^{50} \)\( AP^{75} \)\( AP^M \)\( AP^L \)
OpenPose [5]CVPR 2017VGG-19-61.884.967.557.168.2
Hourglass [24]ECCV 2016Hourglass51256.681.861.849.867.0
Associative Embedding [23]NIPS 2017Hourglass51256.681.861.849.867.0
SPM [25]ICCV 2019Hourglass-66.988.572.962.673.1
MDN [34]CVPR 2020Hourglass-62.985.169.458.871.4
PersonLab [26]ECCV 2018ResNet-152140166.588.072.662.472.3
PifPaf [17]CVPR 2019ResNet-152-66.7--62.472.9
PETR [30]CVPR 2022SWin-L133370.591.578.765.278.0
DEKR [11]CVPR 2021HRNet-W4864070.089.477.365.776.9
PINet [37]NIPS 2021HRNet-W3251266.7----
CIR&QEM [40]AAAI 2022HRNet-W4864071.090.278.266.277.8
CID [36]CVPR 2022HRNet-W4864070.790.377.966.377.8
LOGP-CAP [42]CVPR 2022HRNet-W4864070.889.777.866.777.0
SWAHR [22]CVPR 2021HrHRNet-W4864070.289.976.965.277.0
CenterAttention [4]ICCV 2021HrHRNet-W4864069.689.776.064.976.3
PoseTrans [15]ECCV 2022HrHRNet-W3251267.488.373.962.175.1
HrHRNet [6]CVPR 2020HrHRNet-W3251266.487.572.861.274.2
+ OursHrHRNet-W3251268.9(↑2.5)89.275.763.776.1
HrHRNet [6]CVPR 2020HrHRNet-W4864068.488.275.164.474.2
+ OursHrHRNet-W4864071.1(↑2.7)90.478.266.977.2
+ +Table 4. Comparisons with bottom-up methods on the COCO test-dev2017 set (multi-scale testing). + +
MethodVenueBackboneInput sizeAP\( AP^{50} \)\( AP^{75} \)\( AP^M \)\( AP^L \)
Hourglass [24]ECCV 2016Hourglass51263.085.768.958.070.4
Associative Embedding [23]NIPS 2017Hourglass51263.085.768.958.070.4
HGG [16]ECCV 2020Hourglass51267.685.173.762.774.6
SimplePose [18]AAAI 2020IMHN51268.1--66.870.5
PersonLab [26]ECCV 2018-140168.789.075.464.175.5
PETR [30]CVPR 2022SWin-L133371.291.479.666.978.0
Point-Set Anchors [38]ECCV 2020HRNet-W4864068.789.976.364.875.3
DEKR [11]CVPR 2021HRNet-W4864071.089.278.067.176.9
CIR&QEM [40]AAAI 2022HRNet-W4864071.790.478.767.378.5
SWAHR [22]CVPR 2021HrHRNet-W4864072.090.778.867.877.7
CenterAttention [4]ICCV 2021HrHRNet-W4864071.190.577.566.976.7
PoseTrans [15]ECCV 2022HrHRNet-W3251269.989.377.065.276.2
HrHRNet [6]CVPR 2020HrHRNet-W3251269.089.075.864.475.2
+ OursHrHRNet-W3251270.8(↑1.8)90.177.866.077.3
HrHRNet [6]CVPR 2020HrHRNet-W4864070.589.377.266.675.8
+ OursHrHRNet-W4864072.3(↑1.8)91.579.867.978.2
+ +tractable alternative of $L_{k}$ as: + +$$ +\hat {L} _ {k} = \sum_ {m = 1} ^ {M} \| \frac {\gamma}{2 U} \left(\varphi_ {D (H _ {p})} ^ {k} (\mathbf {t} _ {m}) - \varphi_ {D (H _ {g})} ^ {k} (\mathbf {t} _ {m})\right) \| _ {2} ^ {2} \tag {21} +$$ + +where $\{\mathbf{t}_1,\dots ,\mathbf{t}_M\}$ denotes a set of $M$ vectors randomly sampled from $B_{U}$ . + +The total loss $\hat{L}_{total}$ for all body joint types can then be written as: + +$$ +\hat {L} _ {\text {t o t a l}} = \sum_ {k = 1} ^ {K} \hat {L} _ {k} \tag {22} +$$ + +# 3.4. Overall Training and Testing + +Here we discuss the overall training and testing scheme of our method. Specifically, during training, we supervise the predicted heatmaps via the total loss in Eq. 22 instead of using the commonly used overall L2 loss, and following [6, 22, 23], we conduct grouping via associate embedding. During testing, we follow the evaluation procedure of + +previous works [6, 22] that conduct bottom-up human pose estimation. Note that in experiments, it is easy to implement $\hat{L}_k$ in Eq. 21, and we provide more details on how we implement $\hat{L}_k$ in experiments in the supplementary. + +# 4. Experiments + +To evaluate the effectiveness of our method for bottom-up human pose estimation, we conduct experiments on the COCO dataset [21] and the CrowdPose dataset [19]. Besides, we also test the effectiveness of our method on top-down methods in the supplementary. We conduct our experiments on RTX 3090 GPUs. + +# 4.1. COCO Keypoint Detection + +Dataset & evaluation metric. The COCO dataset [21] contains over 200k images, and in this dataset, each person instance is annotated with 17 body joints. This dataset consists of three subsets including COCO training set (57k + +Table 5. Comparisons with bottom-up methods on the CrowdPose testing set. + +
MethodVenueBackboneInput sizeAP\( AP^{50} \)\( AP^{75} \)\( APE \)\( APM \)\( AP^H \)
w/ single-scale testing
OpenPose [5]CVPR 2017VGG-19----62.748.732.3
HrHRNet [6]CVPR 2020HrHRNet-W4864065.986.470.673.366.557.9
PETR [30]CVPR 2022--72.090.978.878.072.565.4
DEKR [11]CVPR 2021HRNet-W4864067.386.472.274.668.158.7
PINet [37]NIPS 2021HRNet-W3251268.988.774.775.469.661.5
CID [36]CVPR 2022HRNet-W4864072.390.877.978.773.064.8
SWAHR [22]CVPR 2021HrHRNet-W4864071.688.577.678.972.463.0
CenterAttention [4]ICCV 2021HrHRNet-W4864067.687.772.773.968.260.3
OursHrHRNet-W4864072.688.878.979.273.165.6
w/ multi-scale testing
HrHRNet [6]CVPR 2020HrHRNet-W4864067.687.472.675.868.158.9
DEKR [11]CVPR 2021HRNet-W4864068.085.573.476.668.858.4
PINet [37]NIPS 2021HRNet-W3251269.889.175.676.470.562.2
SWAHR [22]CVPR 2021HrHRNet-W4864073.890.579.981.274.764.7
CenterAttention [4]ICCV 2021HrHRNet-W4864069.488.674.676.670.061.5
OursHrHRNet-W4864074.190.780.281.374.965.1
+ +images), COCO validation set (5k images), and COCO test-dev set (20k images). Following the train-test split of [22], we report results on the val2017 set and test-dev2017 set. Also following [22], we evaluate model performance using standard average precision (AP) calculated based on Object Keypoint Similarity (OKS) on this dataset, and report the following metrics: AP, $\mathrm{AP}^{50}$ , $\mathrm{AP}^{75}$ , $\mathrm{AP}^{\mathrm{M}}$ , and $\mathrm{AP}^{\mathrm{L}}$ . + +Implementation details. Following [4, 22], we use the HrHRNet [6] as the baseline, and apply our proposed method to the respective two backbones including HrHRNet-W32 and HrHRNet-W48. For these backbones, we follow their original training and testing configurations specified in [6]. Also following [6], we adopt three scales 0.5, 1, and 2 in multi-scale testing. To calculate $\hat{L}_k$ following Eq. 21, we set the number of samples $M$ to 256 and the hyperparameter $U$ w.r.t. the finite range $B_U$ to 64 in our experiments. + +Results. In Tab. 1 and Tab. 2, we report single-scale testing and multi-scale testing results on the COCO val2017 set. In Tab. 3 and Tab. 4, we report single-scale testing and multi-scale testing results on the COCO test-dev2017 set. We observe that after applying our method on both HrHRNet-W32 and HrHRNet-W48, a significant performance improvement is achieved, demonstrating the effectiveness of our method. Moreover, we also compare our method with other state-of-the-art bottom-up human pose estimation methods. Compared to these methods, our method consistently achieves the highest AP score, further demonstrating the effectiveness of our method. + +# 4.2. CrowdPose + +Dataset & evaluation metric. The CrowdPose dataset [19] contains about 20k images and 80k person instances, which are annotated with 14 body joints. This dataset consists of three subsets including CrowdPose training set (10k images), CrowdPose validation set (2k images), and Crowd- + +Pose testing set (8k images). Following the train-test split of [6, 22], we report results on the testing set. Also following [6, 22], we evaluate model performance using standard AP calculated based on OKS on the CrowdPose dataset, and report the following metrics: AP, $\mathrm{AP}^{50}$ , $\mathrm{AP}^{75}$ , $\mathrm{AP}^{\mathrm{E}}$ , $\mathrm{AP}^{\mathrm{M}}$ , and $\mathrm{AP}^{\mathrm{H}}$ . + +Implementation details. On the CrowdPose dataset, we also use the HrHRNet [6] as the baseline, and we use HrHRNet-W48 as the backbone following [4,6,22]. We follow the original training and testing configurations specified in [6], and also follow [6] to adopt three scales 0.5, 1, and 2 in multi-scale testing. Besides, same as the experiments on the COCO dataset, we also set the number of samples $M$ to 256 and the hyperparameter $U$ w.r.t. the finite range $B_U$ to 64 on the CrowdPose dataset. + +Results. In Tab. 5, we report the single-scale testing and multi-scale testing results on the CrowdPose testing set. As shown, our method consistently achieves the highest AP score, demonstrating the effectiveness of our method. + +# 4.3. Ablation Studies + +We conduct ablation studies on the COCO validation set via applying our proposed method on HrHRNet-W32 [6] with single-scale testing. + +Impact of the number of samples $M$ . To calculate $\hat{L}_k$ following Eq. 21, we need to set the number of samples $M$ , which we set to 256 in our experiments. We evaluate other choices of the number of samples $M$ in Tab. 6. As shown, all variants outperform the baseline method, and after the number of samples $M$ becomes larger than 256 the model performance becomes stabilized. Therefore, we set the number of samples $M$ to be 256 in our experiments. + +Impact of the finite range $B_U$ with different $U$ . We evaluate different choices of $U$ in Tab. 7. As shown, all variants outperform the baseline method, and after the hyperparameter $U$ becomes larger than 64, the model performance does + +
(a)(b)(c)(d)
Body joint name:Right shouldersRight eyesRight elbowsLeft shoulders
Baseline (HrHRNet-W32) result:
Ours result:
+ +Figure 2. Qualitative results of our method and the baseline HrHRNet-W32 model [6]. As shown, the baseline method misses body joints (in (a) and (b)) or misidentifies body joints (in (c) and (d)) in some sub-regions of the predicted heatmap (see the sub-regions framed with dashed lines). Meanwhile, our method provides a more accurate localization result for the body joints of different people in different sub-regions of the predicted heatmap at the same time. More qualitative results are in the supplementary. (Best viewed in color.) + +Table 6. Evaluation on the number of samples $M$ + +
MethodAP\( AP^{50} \)\( AP^{75} \)\( AP^M \)\( AP^L \)
Baseline(HrHRNet-W32)67.186.273.061.576.1
4 samples67.986.973.862.476.9
16 samples68.987.574.863.577.4
64 samples69.687.975.663.977.8
256 samples69.988.176.064.278.1
1024 samples69.888.276.064.378.0
+ +not enhance anymore. Thus, we set the hyperparameter $U$ to be 64 in our experiments. + +Table 7. Evaluation on the hyperparameter $U$ w.r.t. the finite range ${B}_{U}$ . + +
MethodAP\( AP^{50} \)\( AP^{75} \)\( AP^M \)\( AP^L \)
Baseline(HrHRNet-W32)67.186.273.061.576.1
U = 867.786.773.562.276.5
U = 1668.687.374.262.976.9
U = 3269.487.875.463.777.6
U = 6469.988.176.064.278.1
U = 12869.888.075.864.078.0
+ +Training time. On the COCO dataset, we test the training time of our method that trains the backbone model (HrHRNet-W32 [6]) with the loss function in Eq. 22, and compare it with the training time of the baseline that trains the same network with the overall L2 loss. As shown in Tab. 8, though our method achieves much better performance, it brings only very little increase of the training time. Note that as we follow the same evaluation procedure of previous works [6, 22], the testing time with and without our proposed method are the same. + +Qualitative results. Some qualitative results are shown in Fig. 2. As shown, the baseline method which uses the overall L2 loss to optimize the heatmap prediction can miss or + +Table 8. Comparison of the training time. + +
MethodTraining time per epochPerformance(AP)
Baseline(HrHRNet-W32)1.11h67.1
Baseline + Ours1.19h69.9
+ +get inaccurate body joints in some sub-regions of the predicted heatmap (see the sub-regions framed with dashed lines). In contrast, our method locates body joints of different people in different sub-regions of the predicted heatmap more accurately at the same time, demonstrating the effectiveness of our method. + +# 5. Conclusion + +In this paper, we have proposed a novel bottom-up human pose estimation method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions respectively constructed from the predicted and GT heatmaps. We theoretically analyze that the distance between the two characteristic functions is the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap. Thus, via minimizing the distance between the two characteristic functions, our method locates body joints in different sub-regions of the predicted heatmap more accurately at the same time. Our method achieves superior performance on the COCO dataset and the CrowdPose dataset. Besides, our method could potentially also be applied in other tasks such as multi-object 6D pose estimation [1], facial landmark extraction [3], and fingerprint minutiae detection [10]. We leave this as our future work. + +Acknowledgement. This work is supported by MOE AcRF Tier 2 (Proposal ID: T2EP20222-0035), National Research Foundation Singapore under its AI Singapore Programme (AISG-100E-2020-065), and SUTD SKI Project (SKI 2021_02_06). + +# References + +[1] Arash Amini, Arul Selvam Periyasamy, and Sven Behnke. Yolopose: Transformer-based multi-object 6d pose estimation using keypoint regression. In Intelligent Autonomous Systems 17: Proceedings of the 17th International Conference IAS-17, pages 392–406. Springer, 2023. 8 +[2] Abdul Fatir Ansari, Jonathan Scarlett, and Harold Soh. A characteristic function approach to deep implicit generative modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7478-7487, 2020. 3, 4, 5 +[3] Matteo Bodini. A review of facial landmark extraction in 2d images and videos using deep learning. *Big Data and Cognitive Computing*, 3(1):14, 2019. 8 +[4] Guillem Brasó, Nikita Kister, and Laura Leal-Taixe. The center of attention: Center-keypoint grouping via attention for multi-person pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11853-11863, 2021. 5, 6, 7 +[5] Zhe Cao, Tomas Simon, Shih-En Wei, and Yaser Sheikh. Realtime multi-person 2d pose estimation using part affinity fields. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7291–7299, 2017. 1, 2, 3, 5, 6, 7 +[6] Bowen Cheng, Bin Xiao, Jingdong Wang, Honghui Shi, Thomas S Huang, and Lei Zhang. Higherhrnet: Scale-aware representation learning for bottom-up human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5386-5395, 2020. 1, 2, 5, 6, 7, 8 +[7] Ke Cheng, Yifan Zhang, Xiangyu He, Weihan Chen, Jian Cheng, and Hanqing Lu. Skeleton-based action recognition with shift graph convolutional network. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 183-192, 2020. 1 +[8] Kacper P Chwialkowski, Aaditya Ramdas, Dino Sejdinovic, and Arthur Gretton. Fast two-sample testing with analytic representations of probability measures. Advances in Neural Information Processing Systems, 28, 2015. 3 +[9] TW Epps and Kenneth J Singleton. An omnibus test for the two-sample problem using the empirical characteristic function. Journal of Statistical Computation and Simulation, 26(3-4):177-203, 1986. 3 +[10] Yulin Feng and Ajay Kumar. Detecting locally, patching globally: An end-to-end framework for high speed and accurate detection of fingerprint minutiae. IEEE Transactions on Information Forensics and Security, 2023. 8 +[11] Zigang Geng, Ke Sun, Bin Xiao, Zhaoxiang Zhang, and Jingdong Wang. Bottom-up human pose estimation via disentangled keypoint regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14676-14686, 2021. 3, 5, 6, 7 +[12] Kerui Gu, Linlin Yang, and Angela Yao. Removing the bias of integral pose regression. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11067-11076, 2021. 1 + +[13] CE Heathcote. A test of goodness of fit for symmetric random variables1. Australian Journal of Statistics, 14(2):172-181, 1972. 3 +[14] Eldar Insafutdinov, Leonid Pishchulin, Bjoern Andres, Mykhaylo Andriluka, and Bernt Schiele. Deepercut: A deeper, stronger, and faster multi-person pose estimation model. In European conference on computer vision, pages 34-50. Springer, 2016. 3 +[15] Wentao Jiang, Sheng Jin, Wentao Liu, Chen Qian, Ping Luo, and Si Liu. Posetrans: A simple yet effective pose transformation augmentation for human pose estimation. arXiv preprint arXiv:2208.07755, 2022. 5, 6 +[16] Sheng Jin, Wentao Liu, Enze Xie, Wenhai Wang, Chen Qian, Wanli Ouyang, and Ping Luo. Differentiable hierarchical graph grouping for multi-person pose estimation. In European Conference on Computer Vision, pages 718-734. Springer, 2020. 1, 2, 3, 5, 6 +[17] Sven Kreiss, Lorenzo Bertoni, and Alexandre Alahi. Pifpaf: Composite fields for human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11977-11986, 2019. 1, 2, 3, 5, 6 +[18] Jia Li, Wen Su, and Zengfu Wang. Simple pose: Rethinking and improving a bottom-up approach for multi-person pose estimation. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 11354-11361, 2020. 6 +[19] Jiefeng Li, Can Wang, Hao Zhu, Yihuan Mao, Hao-Shu Fang, and Cewu Lu. Crowdpose: Efficient crowded scenes pose estimation and a new benchmark. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10863-10872, 2019. 2, 6, 7 +[20] Shengxi Li, Zeyang Yu, Min Xiang, and Danilo Mandic. Reciprocal adversarial learning via characteristic functions. Advances in Neural Information Processing Systems, 33:217-228, 2020. 3 +[21] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 2, 6 +[22] Zhengxiong Luo, Zhicheng Wang, Yan Huang, Liang Wang, Tieniu Tan, and Erjin Zhou. Rethinking the heatmap regression for bottom-up human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13264-13273, 2021. 1, 2, 5, 6, 7, 8 +[23] Alejandro Newell, Zhiao Huang, and Jia Deng. Associative embedding: End-to-end learning for joint detection and grouping. Advances in neural information processing systems, 30, 2017. 1, 2, 3, 6 +[24] Alejandro Newell, Kaiyu Yang, and Jia Deng. Stacked hourglass networks for human pose estimation. In European conference on computer vision, pages 483-499. Springer, 2016. 1, 2, 6 +[25] Xuecheng Nie, Jiashi Feng, Jianfeng Zhang, and Shuicheng Yan. Single-stage multi-person pose machines. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6951-6960, 2019. 1, 2, 3, 6 + +[26] George Papandreou, Tyler Zhu, Liang-Chieh Chen, Spyros Gidaris, Jonathan Tompson, and Kevin Murphy. Personlab: Person pose estimation and instance segmentation with a bottom-up, part-based, geometric embedding model. In Proceedings of the European conference on computer vision (ECCV), pages 269-286, 2018. 1, 2, 3, 5, 6 +[27] Leonid Pishchulin, Eldar Insafutdinov, Siyu Tang, Bjoern Andres, Mykhaylo Andriluka, Peter V Gehler, and Bernt Schiele. Deepcut: Joint subset partition and labeling for multi person pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4929-4937, 2016. 3 +[28] Xuelin Qian, Yanwei Fu, Tao Xiang, Wenxuan Wang, Jie Qiu, Yang Wu, Yu-Gang Jiang, and Xiangyang Xue. Pose-normalized image generation for person re-identification. In Proceedings of the European conference on computer vision (ECCV), pages 650–667, 2018. 1 +[29] Haoxuan Qu, Li Xu, Yujun Cai, Lin Geng Foo, and Jun Liu. Heatmap distribution matching for human pose estimation. In Advances in Neural Information Processing Systems. 2 +[30] Dahu Shi, Xing Wei, Liangqi Li, Ye Ren, and Wenming Tan. End-to-end multi-person pose estimation with transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11069-11078, 2022. 5, 6, 7 +[31] Weibo Shu, Jia Wan, Kay Chen Tan, Sam Kwong, and Antoni B Chan. Crowd counting in the frequency domain. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19618-19627, 2022. 3 +[32] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5693-5703, 2019. 1, 2 +[33] Jonathan J. Thompson, Arjun Jain, Yann LeCun, and Christoph Bregler. Joint training of a convolutional network and a graphical model for human pose estimation. Advances in neural information processing systems, 27, 2014. 1, 2 +[34] Ali Varamesh and Tinne Tuytelaars. Mixture dense regression for object detection and human pose estimation. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13083-13092. IEEE, 2020. 6 +[35] Bo Wan, Desen Zhou, Yongfei Liu, Rongjie Li, and Xuming He. Pose-aware multi-level feature network for human object interaction detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9469-9478, 2019. 1 +[36] Dongkai Wang and Shiliang Zhang. Contextual instance decoupling for robust multi-person pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11060-11068, 2022. 5, 6, 7 +[37] Dongkai Wang, Shiliang Zhang, and Gang Hua. Robust pose estimation in crowded scenes with direct pose-level inference. Advances in Neural Information Processing Systems, 34:6278-6289, 2021. 5, 6, 7 +[38] Fangyun Wei, Xiao Sun, Hongyang Li, Jingdong Wang, and Stephen Lin. Point-set anchors for object detection, instance + +segmentation and pose estimation. In European Conference on Computer Vision, pages 527-544. Springer, 2020. 5, 6 +[39] Bin Xiao, Haiping Wu, and Yichen Wei. Simple baselines for human pose estimation and tracking. In Proceedings of the European conference on computer vision (ECCV), pages 466-481, 2018. 1, 2 +[40] Yabo Xiao, Dongdong Yu, Xiao Juan Wang, Lei Jin, Guoli Wang, and Qian Zhang. Learning quality-aware representation for multi-person pose regression. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2822-2830, 2022. 5, 6 +[41] Jiangtao Xie, Fei Long, Jiaming Lv, Qilong Wang, and Peihua Li. Joint distribution matters: Deep brownian distance covariance for few-shot classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7972-7981, 2022. 3 +[42] Nan Xue, Tianfu Wu, Gui-Song Xia, and Liangpei Zhang. Learning local-global contextual adaptation for multi-person pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13065-13074, 2022. 5, 6 +[43] Sijie Yan, Yuanjun Xiong, and Dahua Lin. Spatial temporal graph convolutional networks for skeleton-based action recognition. In Thirty-second AAAI conference on artificial intelligence, 2018. 1 +[44] Yuhui Yuan, Rao Fu, Lang Huang, Weihong Lin, Chao Zhang, Xilin Chen, and Jingdong Wang. Hrformer: High-resolution transformer for dense prediction. 2021. 1, 2 +[45] Xingyi Zhou, Dequan Wang, and Philipp Krahenbuhl. Objects as points. arXiv preprint arXiv:1904.07850, 2019. 3 \ No newline at end of file diff --git a/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/images.zip b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..6f17e83e3a4d2ab2f0ea797ce42fffa6b589eadc --- /dev/null +++ b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b692d7e6757633eb707187bb045be2684734e5751cb0a38bd06f003d55460911 +size 945097 diff --git a/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/layout.json b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..5e9455dc7212a36310ea73c4902b72589d819d0e --- /dev/null +++ b/2023/A Characteristic Function-Based Method for Bottom-Up Human Pose Estimation/layout.json @@ -0,0 +1,8859 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 48, + 103, + 545, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 103, + 545, + 120 + ], + "spans": [ + { + "bbox": [ + 48, + 103, + 545, + 120 + ], + "type": "text", + "content": "A Characteristic Function-based Method for Bottom-up Human Pose Estimation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "spans": [ + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "text", + "content": "Haoxuan " + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "inline_equation", + "content": "\\mathrm{Qu}^{1}" + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "text", + "content": ", Yujun " + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "inline_equation", + "content": "\\mathrm{Cai}^{2}" + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "text", + "content": ", Lin Geng " + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "inline_equation", + "content": "\\mathrm{Foo}^{1}" + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "text", + "content": ", Ajay Kumar" + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "text", + "content": ", Jun Liu" + }, + { + "bbox": [ + 132, + 142, + 463, + 158 + ], + "type": "inline_equation", + "content": "^{1,*}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 151, + 158, + 443, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 158, + 443, + 172 + ], + "spans": [ + { + "bbox": [ + 151, + 158, + 443, + 172 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 151, + 158, + 443, + 172 + ], + "type": "text", + "content": "Singapore University of Technology and Design, Singapore" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 184, + 172, + 410, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 172, + 410, + 186 + ], + "spans": [ + { + "bbox": [ + 184, + 172, + 410, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 184, + 172, + 410, + 186 + ], + "type": "text", + "content": "Nanyang Technological University, Singapore" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 169, + 186, + 426, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 186, + 426, + 200 + ], + "spans": [ + { + "bbox": [ + 169, + 186, + 426, + 200 + ], + "type": "text", + "content": "3The Hong Kong Polytechnic University, Hong Kong" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 201, + 521, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 201, + 521, + 213 + ], + "spans": [ + { + "bbox": [ + 70, + 201, + 521, + 213 + ], + "type": "text", + "content": "haoxuan-qu@mymail.sutd.edu.sg, yujun001@e.ntu.edu.sg, lingeng.foo@mymail.sutd.edu.sg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 176, + 216, + 414, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 216, + 414, + 227 + ], + "spans": [ + { + "bbox": [ + 176, + 216, + 414, + 227 + ], + "type": "text", + "content": "ajay.kumar@polyu.edu.hk, jun.liu@sutd.edu.sg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "spans": [ + { + "bbox": [ + 143, + 255, + 192, + 268 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 280, + 290, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 280, + 290, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 280, + 290, + 579 + ], + "type": "text", + "content": "Most recent methods formulate the task of human pose estimation as a heatmap estimation problem, and use the overall L2 loss computed from the entire heatmap to optimize the heatmap prediction. In this paper, we show that in bottom-up human pose estimation where each heatmap often contains multiple body joints, using the overall L2 loss to optimize the heatmap prediction may not be the optimal choice. This is because, minimizing the overall L2 loss cannot always lead the model to locate all the body joints across different sub-regions of the heatmap more accurately. To cope with this problem, from a novel perspective, we propose a new bottom-up human pose estimation method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions respectively constructed from the predicted heatmap and the groundtruth heatmap. Our analysis presented in this paper indicates that the distance between these two characteristic functions is essentially the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap. Therefore, via minimizing the distance between the two characteristic functions, we can optimize the model to provide a more accurate localization result for the body joints in different sub-regions of the predicted heatmap. We show the effectiveness of our proposed method through extensive experiments on the COCO dataset and the CrowdPose dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 614, + 128, + 627 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 614, + 128, + 627 + ], + "spans": [ + { + "bbox": [ + 47, + 614, + 128, + 627 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 635, + 287, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 635, + 287, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 635, + 287, + 696 + ], + "type": "text", + "content": "Human pose estimation aims to locate the body joints of each person in a given RGB image. It is relevant to various applications, such as action recognition [7, 43], person Re-ID [28], and human object interaction [35]. For tackling human pose estimation, most of the recent methods fall" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 256, + 545, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 256, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 306, + 256, + 545, + 376 + ], + "type": "text", + "content": "into two major categories: top-down methods and bottom-up methods. Top-down methods [24,32,33,39,44] generally use a human detector to detect all the people in the image, and then perform single-person pose estimation for each detected subject separately. In contrast, bottom-up methods [5,6,16,17,22,23,25,26] usually locate the body joints of all people in the image at the same time. Hence, bottom-up methods, the main focus of this paper, are often a more efficient choice compared to top-down methods, especially when there are many people in the input image [5]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "spans": [ + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "text", + "content": "In existing works, it is common to regard human pose estimation as a heatmap prediction problem, since this can preserve the spatial structure of the input image throughout the encoding and decoding process [12]. During the general optimization process, the groundtruth (GT) heatmaps " + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_g" + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "text", + "content": " are first constructed via putting 2D Gaussian blobs centered at the GT coordinates of the body joints. After that, these constructed GT heatmaps are used to supervise the predicted heatmaps " + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_p" + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "text", + "content": " via the overall L2 loss " + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "inline_equation", + "content": "L_2^{overall}" + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "text", + "content": " calculated (averaged) over the whole heatmap. Specifically, denoting the area of the heatmap as " + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "inline_equation", + "content": "L_2^{overall} = \\frac{\\|\\mathbf{H}_p - \\mathbf{H}_g\\|_2^2}{A}" + }, + { + "bbox": [ + 305, + 376, + 546, + 521 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 522, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 522, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 522, + 547, + 713 + ], + "type": "text", + "content": "We argue that using the overall L2 loss to supervise the predicted heatmap may not be the optimal choice in bottom-up methods where each heatmap often contains multiple body joints from the multiple people in various sub-regions, as shown in Fig. 1(b). This is because, a smaller overall L2 loss calculated over the whole heatmap cannot always lead the model to locate all the body joints across different sub-regions in the heatmap more accurately. As illustrated in Fig. 1(a), the predicted heatmap #2 has a smaller overall L2 loss compared to the predicted heatmap #1. However, the predicted heatmap #2 locates the body joint in the top-right sub-region wrongly, whereas the predicted heatmap #1 locates body joints in both the top-right and bottom-left sub-regions correctly. This is because, while the decrease of the overall L2 loss can be achieved when the L2 loss w.r.t. each sub-region either decreases or remains the same, this is not necessarily true for all regions." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 137, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 137, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 137, + 712 + ], + "type": "text", + "content": "*Corresponding Author" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 758 + ], + "type": "text", + "content": "13009" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 100, + 72, + 245, + 267 + ], + "blocks": [ + { + "bbox": [ + 100, + 72, + 245, + 267 + ], + "lines": [ + { + "bbox": [ + 100, + 72, + 245, + 267 + ], + "spans": [ + { + "bbox": [ + 100, + 72, + 245, + 267 + ], + "type": "image", + "image_path": "02e7d252ce814c9f2ba6b4c3168443053f5d3fa053626fd76dfe151096be69a8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 169, + 273, + 180, + 283 + ], + "lines": [ + { + "bbox": [ + 169, + 273, + 180, + 283 + ], + "spans": [ + { + "bbox": [ + 169, + 273, + 180, + 283 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 271, + 72, + 506, + 266 + ], + "blocks": [ + { + "bbox": [ + 271, + 72, + 506, + 266 + ], + "lines": [ + { + "bbox": [ + 271, + 72, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 271, + 72, + 506, + 266 + ], + "type": "image", + "image_path": "18735f821139ff761ecdb7c55bc60c0937b2ab52d3e8fc8c84dd5d5010284298.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 384, + 272, + 395, + 283 + ], + "lines": [ + { + "bbox": [ + 384, + 272, + 395, + 283 + ], + "spans": [ + { + "bbox": [ + 384, + 272, + 395, + 283 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 285, + 545, + 340 + ], + "lines": [ + { + "bbox": [ + 46, + 285, + 545, + 340 + ], + "spans": [ + { + "bbox": [ + 46, + 285, + 545, + 340 + ], + "type": "text", + "content": "Figure 1. (a) Illustration of heatmaps. The predicted heatmap #2 with a smaller overall L2 loss locates the body joint in the top-right sub-region wrongly, while the predicted heatmap #1 with a larger overall L2 loss locates body joints in both the top-right and bottom-left sub-regions correctly. (b) Output of a commonly used bottom-up method, HrHRNet-W32 [6]. As shown, it misses left ankle in the dashed sub-region of image (i) completely, and misidentifies right knee in the dashed sub-region of image (ii). This indicates that accurately localizing the body joints of multiple people in a single heatmap is a challenging problem. (Best viewed in color.)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 347, + 287, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 347, + 287, + 515 + ], + "spans": [ + { + "bbox": [ + 46, + 347, + 287, + 515 + ], + "type": "text", + "content": "same (e.g., from predicted heatmap #0 to predicted heatmap #1), it can also be achieved when there is a decrease of L2 loss w.r.t. certain sub-regions and an increase of L2 loss for some other sub-regions (e.g., from predicted heatmap #1 to predicted heatmap #2). This indicates that, in bottom-up methods, the decrease of the overall L2 loss does not always lead to a more accurate localization result for the body joints in different sub-regions of the predicted heatmap at the same time. Besides, we also show some results of a commonly used bottom-up method, HrHRNet-W32 [6], in Fig. 1(b). As shown, it may even miss or misidentify certain body joints when there are a number of people in the input image. This indicates that it is quite difficult to accurately locate all body joints of all people in the predicted heatmap." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 518, + 287, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 518, + 287, + 625 + ], + "spans": [ + { + "bbox": [ + 46, + 518, + 287, + 625 + ], + "type": "text", + "content": "To tackle the above-mentioned problem in bottom-up methods, in this paper, rather than using the overall L2 loss to supervise the whole heatmap, we instead aim to optimize the body joints over sub-regions of the predicted heatmap at the same time. To this end, from a new perspective, we express the predicted and GT heatmaps as characteristic functions, and minimize the difference between these functions, allowing different sub-regions of the predicted heatmap to be optimized at the same time." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 630, + 287, + 713 + ], + "type": "text", + "content": "More specifically, we first construct two distributions respectively from the predicted heatmap and the GT heatmap. After that, we obtain two characteristic functions of these two distributions and optimize the heatmap prediction via minimizing the distance between these two characteristic functions. We analyze in Sec. 3.3 that the distance between the two characteristic functions is the upper bound of the" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 347, + 545, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 347, + 545, + 407 + ], + "spans": [ + { + "bbox": [ + 304, + 347, + 545, + 407 + ], + "type": "text", + "content": "L2 losses w.r.t sub-regions in the predicted heatmap. Therefore, via minimizing the distance between the two characteristic functions, our method can locate body joints in different sub-regions more accurately at the same time, and thus achieve superior performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 407, + 545, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 545, + 515 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 545, + 515 + ], + "type": "text", + "content": "The contributions of our work are summarized as follows. 1) From a new perspective, we supervise the predicted heatmap using the distance between the characteristic functions of the predicted and GT heatmaps. 2) We analyze (in Sec. 3.3) that the L2 losses w.r.t. sub-regions of the predicted heatmap are upper-bounded by the distance between the characteristic functions. 3) Our proposed method achieves state-of-the-art performance on the evaluation benchmarks [19, 21]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 526, + 392, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 526, + 392, + 538 + ], + "spans": [ + { + "bbox": [ + 306, + 526, + 392, + 538 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 546, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 546, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 546, + 545, + 700 + ], + "type": "text", + "content": "Human Pose Estimation. Due to the wide range of applications, human pose estimation has received lots of attention [5, 6, 16, 17, 22-26, 29, 32, 33, 39, 44], and most of the recent methods fall into two categories: top-down methods and bottom-up methods. In top-down methods, a human detector is generally used to detect all the people in the image first, and then single-person pose estimation is conducted for each detected subject separately. The single-person pose estimation methods that are commonly used in top-down methods include Hourglass [24], Simple Baseline [39], HRNet [32], and HRFormaler [44], etc. Besides top-down methods, bottom-up methods [5, 6, 16, 17, 22, 23, 25, 26] have also attracted a lot of attention recently due to its efficiency [5]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "content": "In bottom-up methods, most methods first detect all" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "13010" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 215 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 215 + ], + "type": "text", + "content": "identity-free body joints over the whole input image, and then group them into different people. Among these methods, DeepCut and Person-Lab [14,26,27] incorporate offset fields into their methods, while Openpose and PifPaf [5,17] make use of part affinity fields in their methods. From another perspective, associate embedding [23] teaches the model to output the group assignments and the localization results of the body joints at the same time, and HGG [16] further combines graph neural networks on top of the associate embedding. Besides the above methods, there also exist some bottom-up methods [11,25,45] that directly regress the coordinates of body joints belonging to the same person." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 216, + 287, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 216, + 287, + 287 + ], + "spans": [ + { + "bbox": [ + 46, + 216, + 287, + 287 + ], + "type": "text", + "content": "Existing heatmap-based bottom-up methods often use an overall L2 loss calculated over the whole heatmap to optimize heatmap prediction. Differently, in this paper, we propose a new bottom-up method that optimizes the heatmap prediction via minimizing the difference between the characteristic functions of the predicted and GT heatmaps." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 288, + 287, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 288, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 46, + 288, + 287, + 443 + ], + "type": "text", + "content": "Characteristic Function. The characteristic function, a concept originally proposed in probability theory and statistics, has been studied in various areas [2,8,9,13,20,31,41] over the years, such as two-sample testing [8,9,13], generative adversarial nets [2,20], and few-shot classification [41]. Inspired by these works, in this paper, from a novel perspective, we propose to optimize the heatmap prediction for bottom-up human pose estimation via minimizing the distance between two characteristic functions. We theoretically analyze that the distance between the two characteristic functions respectively constructed from the predicted heatmap and the GT heatmap is the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 453, + 102, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 453, + 102, + 464 + ], + "spans": [ + { + "bbox": [ + 47, + 453, + 102, + 464 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 473, + 287, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 473, + 287, + 604 + ], + "spans": [ + { + "bbox": [ + 46, + 473, + 287, + 604 + ], + "type": "text", + "content": "In bottom-up human pose estimation, as shown in Fig. 1(a), minimizing the overall L2 loss between the predicted heatmap and the GT heatmap cannot always lead the model to locate all the body joints across different sub-regions of the heatmap more accurately. In this work, we aim to optimize the body joints over sub-regions of the predicted heatmap at the same time. To achieve this, we propose a new bottom-up method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions constructed from the predicted and GT heatmaps." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 605, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 605, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 605, + 287, + 652 + ], + "type": "text", + "content": "Below, we first briefly introduce the characteristic function, and then discuss how we formulate the heatmap optimization process. After that, we show the theoretical analysis of our proposed method." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 659, + 231, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 659, + 231, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 659, + 231, + 672 + ], + "type": "text", + "content": "3.1. Revisiting Characteristic Function" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "The characteristic function is generally used in probability theory and statistics. Given an " + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "-dimensional distribution " + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": ", its corresponding characteristic function " + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\varphi_{D}" + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": " can be" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 73, + 351, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 73, + 351, + 83 + ], + "spans": [ + { + "bbox": [ + 306, + 73, + 351, + 83 + ], + "type": "text", + "content": "written as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 342, + 84, + 545, + 109 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 342, + 84, + 545, + 109 + ], + "spans": [ + { + "bbox": [ + 342, + 84, + 545, + 109 + ], + "type": "interline_equation", + "content": "\\varphi_ {D} (\\mathbf {t}) = E _ {\\mathbf {x} \\sim D} [ e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} ] = \\int_ {\\mathbb {R} ^ {N}} e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d D \\tag {1}", + "image_path": "1ecf85c1ec965792f6887c435c73b33650af0fd154fbc79c19d7e981e81667f9.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "spans": [ + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": " represents expectation, " + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "inline_equation", + "content": "i^2 = -1" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "inline_equation", + "content": "\\langle \\cdot, \\cdot \\rangle" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": " represents dot product, " + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": " is a random " + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": "-dimensional vector, and " + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": " is an " + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": "-dimensional vector sampled from " + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": ". Note that the characteristic function always exists and has a one-to-one correspondence with the distribution. Besides, the characteristic function is the Fourier transform of the probability density function if the latter exists as well. Moreover, the characteristic function is always finite and bounded (" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "inline_equation", + "content": "|\\varphi_D(\\mathbf{t})| \\leq 1" + }, + { + "bbox": [ + 304, + 110, + 545, + 230 + ], + "type": "text", + "content": "). This makes calculation of the distance between two characteristic functions always meaningful." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 241, + 520, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 241, + 520, + 254 + ], + "spans": [ + { + "bbox": [ + 305, + 241, + 520, + 254 + ], + "type": "text", + "content": "3.2. Proposed Heatmap Optimization Process" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 260, + 545, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 260, + 545, + 342 + ], + "spans": [ + { + "bbox": [ + 304, + 260, + 545, + 342 + ], + "type": "text", + "content": "Below, we discuss how we formulate the heatmap optimization process for bottom-up human pose estimation via (1) constructing two distributions from the predicted heatmap and the GT heatmap respectively; (2) calculating characteristic functions from these two distributions; and (3) formulating the loss function as the distance between the two characteristic functions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "spans": [ + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": "Distribution Construction. Given an input image, for each type of body joints, we denote the corresponding predicted heatmap as " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "H_{p}" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": " and the corresponding GT heatmap as " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "H_{g}" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": ". We propose to formulate the two distributions " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "D(H_{p})" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "D(H_{g})" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": " from the two heatmaps " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "H_{p}" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "H_{g}" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": " with the following two steps. (1) As distributions cannot hold negative probabilities, we first pass " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "H_{p}" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": " through a relu activation function to make it non-negative. Note that " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "H_{g}" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": " is already non-negative. (2) After that, as the sum of probabilities of each constructed distribution needs to be 1, we further normalize both the output of step (1) and " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "H_{g}" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": ". Hence, with the above two steps, we formulate " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "D(H_{p})" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "inline_equation", + "content": "D(H_{g})" + }, + { + "bbox": [ + 304, + 345, + 545, + 489 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 319, + 491, + 545, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 491, + 545, + 517 + ], + "spans": [ + { + "bbox": [ + 319, + 491, + 545, + 517 + ], + "type": "interline_equation", + "content": "D \\left(H _ {p}\\right) = \\frac {\\operatorname {r e l u} \\left(H _ {p}\\right)}{\\left\\| \\operatorname {r e l u} \\left(H _ {p}\\right) \\right\\| _ {1}}, \\quad D \\left(H _ {g}\\right) = \\frac {H _ {g}}{\\left\\| H _ {g} \\right\\| _ {1}} \\tag {2}", + "image_path": "fb0c1a90496d46f28b104600e1118cfa651422278e97d91188edd4c171fb16ac.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "text", + "content": "Characteristic Function Calculation. For every type of body joints, after formulating the two distributions " + }, + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "inline_equation", + "content": "D(H_{p})" + }, + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "inline_equation", + "content": "D(H_{g})" + }, + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "text", + "content": ", we follow Eq. 1 to calculate the two characteristic functions " + }, + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "inline_equation", + "content": "\\varphi_{D(H_p)}(\\mathbf{t})" + }, + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "inline_equation", + "content": "\\varphi_{D(H_g)}(\\mathbf{t})" + }, + { + "bbox": [ + 304, + 519, + 545, + 569 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 359, + 571, + 545, + 603 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 571, + 545, + 603 + ], + "spans": [ + { + "bbox": [ + 359, + 571, + 545, + 603 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\varphi_ {D \\left(H _ {p}\\right)} (\\mathbf {t}) = E _ {\\mathbf {x} \\sim D \\left(H _ {p}\\right)} \\left[ e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} \\right], \\tag {3} \\\\ \\varphi_ {D (H _ {g})} (\\mathbf {t}) = E _ {\\mathbf {x} \\sim D (H _ {g})} [ e ^ {i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} ] \\\\ \\end{array}", + "image_path": "12cf7e7f09369b8d9a33236292963bfa1046294aaaf9d2abfc3ee43e7aeb9b99.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 605, + 545, + 700 + ], + "type": "text", + "content": "Loss Function Formulation. Above we discuss how we obtain the two characteristic functions w.r.t. the predicted heatmap and the GT heatmap for a single type of body joints. Note that in bottom-up human pose estimation, multiple types of body joints are required to be located at the same time. Here, we first discuss how we formulate the loss function for a single type of body joints, and then introduce the overall loss function for all types of body joints." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "content": "To formulate the loss function for the " + }, + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 317, + 701, + 545, + 713 + ], + "type": "text", + "content": "-th type of body" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "13011" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 111 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 111 + ], + "type": "text", + "content": "joints, given the two characteristic functions " + }, + { + "bbox": [ + 46, + 72, + 287, + 111 + ], + "type": "inline_equation", + "content": "\\varphi_{D(H_p)}^k (\\mathbf{t})" + }, + { + "bbox": [ + 46, + 72, + 287, + 111 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 72, + 287, + 111 + ], + "type": "inline_equation", + "content": "\\varphi_{D(H_g)}^k (\\mathbf{t})" + }, + { + "bbox": [ + 46, + 72, + 287, + 111 + ], + "type": "text", + "content": ", we first write the loss function " + }, + { + "bbox": [ + 46, + 72, + 287, + 111 + ], + "type": "inline_equation", + "content": "L_{k}" + }, + { + "bbox": [ + 46, + 72, + 287, + 111 + ], + "type": "text", + "content": " as the distance between these two characteristic functions [2]:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 64, + 114, + 287, + 137 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 114, + 287, + 137 + ], + "spans": [ + { + "bbox": [ + 64, + 114, + 287, + 137 + ], + "type": "interline_equation", + "content": "L _ {k} = \\int_ {\\mathbb {R} ^ {2}} \\| \\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t}) \\| _ {2} ^ {2} \\omega (\\mathbf {t}, \\eta) d \\mathbf {t} \\tag {4}", + "image_path": "a996f45774beb3d34862f4bcf52ca45a88033de548ee861ea297b4849adc851f.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "spans": [ + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "inline_equation", + "content": "\\omega (\\mathbf{t},\\eta)" + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "content": " is a weighting function. Here we set " + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "inline_equation", + "content": "\\omega (\\mathbf{t},\\eta)" + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "content": " to be the probability density function of a uniform distribution in " + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "inline_equation", + "content": "B_{U}" + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "inline_equation", + "content": "B_{U} = [-U,U]\\times [-U,U]" + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "content": " is a finite predefined range and " + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "content": " is a hyperparameter. This means that, " + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "inline_equation", + "content": "\\omega (\\mathbf{t},\\eta) = \\frac{1}{4U^2}" + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "inline_equation", + "content": "\\mathbf{t}\\in B_U" + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "inline_equation", + "content": "\\omega (\\mathbf{t},\\eta) = 0" + }, + { + "bbox": [ + 46, + 140, + 287, + 212 + ], + "type": "text", + "content": " otherwise. We thus further rewrite Eq. 4 as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 64, + 223, + 287, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 223, + 287, + 249 + ], + "spans": [ + { + "bbox": [ + 64, + 223, + 287, + 249 + ], + "type": "interline_equation", + "content": "L _ {k} = \\int_ {B _ {U}} \\| \\frac {1}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})\\right) \\| _ {2} ^ {2} d \\mathbf {t} \\tag {5}", + "image_path": "420169d2b6151526f00593bc241e18e9c62ef1546bfa3c56573895628c691714.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 251, + 286, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 251, + 286, + 273 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 286, + 273 + ], + "type": "text", + "content": "Finally, from Eq. 5, we formulate the loss function " + }, + { + "bbox": [ + 46, + 251, + 286, + 273 + ], + "type": "inline_equation", + "content": "L_{k}" + }, + { + "bbox": [ + 46, + 251, + 286, + 273 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 64, + 275, + 287, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 275, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 64, + 275, + 287, + 300 + ], + "type": "interline_equation", + "content": "L _ {k} = \\int_ {B _ {U}} \\| \\frac {\\gamma}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})\\right) \\| _ {2} ^ {2} d \\mathbf {t} \\tag {6}", + "image_path": "e8841547bf3b73e1bffa2ca847ea66c53478263a0cdd00a862c1a0c44f9f18e6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 303, + 287, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 303, + 287, + 353 + ], + "spans": [ + { + "bbox": [ + 46, + 303, + 287, + 353 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 303, + 287, + 353 + ], + "type": "inline_equation", + "content": "\\gamma = \\frac{U^2\\sqrt{A}}{\\pi^2}" + }, + { + "bbox": [ + 46, + 303, + 287, + 353 + ], + "type": "text", + "content": " is a constant coefficient and " + }, + { + "bbox": [ + 46, + 303, + 287, + 353 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 303, + 287, + 353 + ], + "type": "text", + "content": " is the area of the heatmap. Note that Eq. 6 is equivalent to Eq. 5 during the optimization process, as the efficacy of the added constant " + }, + { + "bbox": [ + 46, + 303, + 287, + 353 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 46, + 303, + 287, + 353 + ], + "type": "text", + "content": " can be achieved by adjusting the learning rate." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 354, + 286, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 354, + 286, + 377 + ], + "spans": [ + { + "bbox": [ + 45, + 354, + 286, + 377 + ], + "type": "text", + "content": "After getting the loss function for each type of body joints, we formulate the total loss for all types of joints as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 380, + 287, + 412 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 380, + 287, + 412 + ], + "spans": [ + { + "bbox": [ + 132, + 380, + 287, + 412 + ], + "type": "interline_equation", + "content": "L _ {t o t a l} = \\sum_ {k = 1} ^ {K} L _ {k} \\tag {7}", + "image_path": "87c19e62388a2401b82b345f28600d614d484ec1267f32ac932c12094a60f57d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 414, + 266, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 414, + 266, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 414, + 266, + 426 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 414, + 266, + 426 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 47, + 414, + 266, + 426 + ], + "type": "text", + "content": " denotes the total number of body joint types." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 434, + 165, + 447 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 434, + 165, + 447 + ], + "spans": [ + { + "bbox": [ + 47, + 434, + 165, + 447 + ], + "type": "text", + "content": "3.3. Theoretical Analysis" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 453, + 287, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 453, + 287, + 502 + ], + "spans": [ + { + "bbox": [ + 46, + 453, + 287, + 502 + ], + "type": "text", + "content": "Below, we perform theoretical analysis to show the effectiveness of our method for bottom-up human pose estimation. Before going into the theorem, we first introduce a lemma that can facilitate the proof of the theorem." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "spans": [ + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": "Lemma 1. Let " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "\\varphi_{D}" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": " be the characteristic function of a 2-dimensional distribution " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "R^{r} = [x_{1}^{lower}, x_{1}^{upper}] \\times [x_{2}^{lower}, x_{2}^{upper}]" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": " a rectangular region, " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "R^{e} = \\{x_{1}^{lower}, x_{1}^{upper}\\} \\times [x_{2}^{lower}, x_{2}^{upper}]\\cup [x_{1}^{lower}, x_{1}^{upper}] \\times \\{x_{2}^{lower}, x_{2}^{upper}\\}" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": " the edges of this region, and " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "R^{v} = \\{x_{1}^{lower}, x_{1}^{upper}\\} \\times \\{x_{2}^{lower}, x_{2}^{upper}\\}" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": " the vertices of this region. Let " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "B_{T} = [-T,T] \\times [-T,T]" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": ". Denote " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "[D]_{R}" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": " the portion of the distribution " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "inline_equation", + "content": "[D]_{R^r}" + }, + { + "bbox": [ + 46, + 511, + 287, + 617 + ], + "type": "text", + "content": " can then be written as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 619, + 299, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 619, + 299, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 619, + 299, + 689 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} [ D ] _ {R ^ {r}} = \\left(\\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\left(\\prod_ {n = 1} ^ {2} \\left(\\frac {e ^ {- i t _ {n} x _ {n} ^ {\\text {l o w e r}}} - e ^ {- i t _ {n} x _ {n} ^ {\\text {u p p e r}}}}{i t _ {n}}\\right)\\right.\\right. \\\\ \\left. \\varphi_ {D} (\\boldsymbol {t})\\right) d t _ {1} d t _ {2}) + \\epsilon ([ D ] _ {R ^ {r}}) \\tag {8} \\\\ \\end{array}", + "image_path": "7445744b0b919b7dd75fd16afd2aa588a3c08167acf7813608d657f2a3da763a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 687, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 687, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 687, + 287, + 713 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 687, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\epsilon ([D]_{R^r}) = \\frac{[D]_{R^e}}{2} +\\frac{[D]_{R^v}}{4}" + }, + { + "bbox": [ + 46, + 687, + 287, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 687, + 287, + 713 + ], + "type": "inline_equation", + "content": "dt_{1}dt_{2}" + }, + { + "bbox": [ + 46, + 687, + 287, + 713 + ], + "type": "text", + "content": " are calculated based on the Lebesgue measure." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 545, + 107 + ], + "type": "text", + "content": "The proof of Lemma 1 is provided in the supplementary. After introducing this lemma, we analyze our proposed method below." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "spans": [ + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "text", + "content": "Theorem 1. Let " + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "inline_equation", + "content": "R_{sub}^{r}" + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "text", + "content": " be a random rectangular sub-region in the heatmap of the " + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "text", + "content": "-th type of body joints where " + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "inline_equation", + "content": "\\left\\| [D(H_p)]_{R_{sub}^e} - [D(H_g)]_{R_{sub}^e}\\right\\|_2^2" + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "text", + "content": " is relatively small compared to " + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "inline_equation", + "content": "\\left\\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sub}^r}\\right\\|_2^2" + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "text", + "content": ". The relation between the L2 loss w.r.t. this sub-region and " + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "inline_equation", + "content": "L_k" + }, + { + "bbox": [ + 305, + 120, + 545, + 191 + ], + "type": "text", + "content": " can be written as:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 345, + 192, + 545, + 221 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 192, + 545, + 221 + ], + "spans": [ + { + "bbox": [ + 345, + 192, + 545, + 221 + ], + "type": "interline_equation", + "content": "\\frac {\\left\\| \\left[ D \\left(H _ {p}\\right) \\right] _ {R _ {s u b} ^ {r}} - \\left[ D \\left(H _ {g}\\right) \\right] _ {R _ {s u b} ^ {r}} \\right\\| _ {2} ^ {2}}{\\lambda \\left(R _ {s u b} ^ {r}\\right)} \\leq L _ {k} \\tag {9}", + "image_path": "eefa82cd022daf8169d63865385421e38916dc2fca41a44fe2aa553a542bb269.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 222, + 545, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 222, + 545, + 247 + ], + "spans": [ + { + "bbox": [ + 305, + 222, + 545, + 247 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 305, + 222, + 545, + 247 + ], + "type": "inline_equation", + "content": "\\lambda(R_{sub}^{r})" + }, + { + "bbox": [ + 305, + 222, + 545, + 247 + ], + "type": "text", + "content": " as the Lebesgue measure represents the area of " + }, + { + "bbox": [ + 305, + 222, + 545, + 247 + ], + "type": "inline_equation", + "content": "R_{sub}^{r}" + }, + { + "bbox": [ + 305, + 222, + 545, + 247 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 258, + 544, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 258, + 544, + 281 + ], + "spans": [ + { + "bbox": [ + 305, + 258, + 544, + 281 + ], + "type": "text", + "content": "Proof. To prove Theorem 1, we first reformulate Lemma 1 as:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 282, + 563, + 379 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 282, + 563, + 379 + ], + "spans": [ + { + "bbox": [ + 306, + 282, + 563, + 379 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} [ D ] _ {R ^ {r}} = \\left(\\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\left(\\prod_ {n = 1} ^ {2} \\left(\\frac {e ^ {- i t _ {n} x _ {n} ^ {l o w e r}} - e ^ {- i t _ {n} x _ {n} ^ {u p p e r}}}{i t _ {n}}\\right)\\right.\\right. \\\\ \\left. \\varphi_ {D} (\\mathbf {t})\\right) d t _ {1} d t _ {2}) + \\epsilon \\left(\\left[ D \\right] _ {R ^ {r}}\\right) \\\\ = \\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\varphi_ {D} (\\mathbf {t}) \\int_ {R ^ {r}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} + \\epsilon ([ D ] _ {R ^ {r}}) \\tag {10} \\\\ \\end{array}", + "image_path": "e396615fe9cd55e890d26e977506e57ee17486b529f17ea408671bd044f91a29.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 380, + 545, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 380, + 545, + 403 + ], + "spans": [ + { + "bbox": [ + 305, + 380, + 545, + 403 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 380, + 545, + 403 + ], + "type": "inline_equation", + "content": "d\\mathbf{t} = dt_1dt_2" + }, + { + "bbox": [ + 305, + 380, + 545, + 403 + ], + "type": "text", + "content": ", and both " + }, + { + "bbox": [ + 305, + 380, + 545, + 403 + ], + "type": "inline_equation", + "content": "d\\mathbf{x}" + }, + { + "bbox": [ + 305, + 380, + 545, + 403 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 380, + 545, + 403 + ], + "type": "inline_equation", + "content": "d\\mathbf{t}" + }, + { + "bbox": [ + 305, + 380, + 545, + 403 + ], + "type": "text", + "content": " are calculated based on the Lebesgue measure." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 306, + 405, + 545, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 405, + 545, + 419 + ], + "spans": [ + { + "bbox": [ + 306, + 405, + 545, + 419 + ], + "type": "text", + "content": "After that, we rewrite " + }, + { + "bbox": [ + 306, + 405, + 545, + 419 + ], + "type": "inline_equation", + "content": "\\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sab}^r}\\| _2^2" + }, + { + "bbox": [ + 306, + 405, + 545, + 419 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 306, + 441, + 559, + 696 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 441, + 559, + 696 + ], + "spans": [ + { + "bbox": [ + 306, + 441, + 559, + 696 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\left\\| \\left[ D \\left(H _ {p}\\right) \\right] _ {R _ {s u b} ^ {r}} - \\left[ D \\left(H _ {g}\\right) \\right] _ {R _ {s u b} ^ {r}} \\right\\| _ {2} ^ {2} (11) \\\\ \\approx \\left\\| \\left[ D \\left(H _ {p}\\right) \\right] _ {R _ {s u b} ^ {r}} - \\left[ D \\left(H _ {g}\\right) \\right] _ {R _ {s u b} ^ {r}} \\right. (12) \\\\ - \\left(\\epsilon ([ D (H _ {p}) ] _ {R _ {s u b} ^ {r}}) - \\epsilon ([ D (H _ {g}) ] _ {R _ {s u b} ^ {r}})\\right) \\| _ {2} ^ {2} \\\\ = \\| \\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) \\int_ {R _ {s u b} ^ {r}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} (13) \\\\ - \\lim _ {T \\rightarrow \\infty} \\frac {1}{(2 \\pi) ^ {2}} \\int_ {B _ {T}} \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t}) \\int_ {R _ {s u b} ^ {r}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} \\| _ {2} ^ {2} \\\\ = \\| \\lim _ {T \\rightarrow \\infty} \\int_ {B _ {T}} \\int_ {R _ {s u b} ^ {r}} \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} \\| _ {2} ^ {2} (14) \\\\ \\approx \\left\\| \\int_ {B _ {U}} \\int_ {R _ {s u b} ^ {r}} \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} d \\mathbf {x} d \\mathbf {t} \\right\\| _ {2} ^ {2} (15) \\\\ \\leq 4 U ^ {2} A \\int_ {B _ {U}} \\int_ {R _ {s u b} ^ {r}} \\| \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} e ^ {- i \\langle \\mathbf {t}, \\mathbf {x} \\rangle} \\| _ {2} ^ {2} d \\mathbf {x} d \\mathbf {t} (16) \\\\ \\end{array}", + "image_path": "01ad8babb7c4a197f116b11ac0bb80facdf4b80b3df1cb0843d3d731804259ed.jpg" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 293, + 748, + 317, + 757 + ], + "type": "text", + "content": "13012" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 85, + 547, + 282 + ], + "blocks": [ + { + "bbox": [ + 122, + 70, + 470, + 82 + ], + "lines": [ + { + "bbox": [ + 122, + 70, + 470, + 82 + ], + "spans": [ + { + "bbox": [ + 122, + 70, + 470, + 82 + ], + "type": "text", + "content": "Table 1. Comparisons with bottom-up methods on the COCO val2017 set (single-scale testing)." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 85, + 547, + 282 + ], + "lines": [ + { + "bbox": [ + 50, + 85, + 547, + 282 + ], + "spans": [ + { + "bbox": [ + 50, + 85, + 547, + 282 + ], + "type": "table", + "html": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
OpenPose [5]CVPR 2017VGG-19-61.084.967.556.369.3
HGG [16]ECCV 2020Hourglass51260.483.066.2--
PersonLab [26]ECCV 2018ResNet-152140166.586.271.962.373.2
PifPaf [17]CVPR 2019ResNet-152-67.4----
PETR [30]CVPR 2022-133367.487.074.961.775.9
DEKR [11]CVPR 2021HRNet-W4864071.088.377.466.778.5
PINet [37]NIPS 2021HRNet-W3251267.4----
CIR&QEM [40]AAAI 2022HRNet-W4864072.489.1-67.380.4
CID [36]CVPR 2022HRNet-W3251266.086.772.359.876.0
LOGP-CAP [42]CVPR 2022HRNet-W4864072.288.978.968.178.9
SWAHR [22]CVPR 2021HrHRNet-W3251268.987.874.963.077.4
SWAHR [22]CVPR 2021HrHRNet-W4864070.888.576.866.377.4
CenterAttention [4]ICCV 2021HrHRNet-W3251268.687.674.162.078.0
PoseTrans [15]ECCV 2022HrHRNet-W3251268.487.174.862.777.1
HrHRNet [6]CVPR 2020HrHRNet-W3251267.186.273.061.576.1
+ OursHrHRNet-W3251269.9(↑2.8)88.176.064.278.1
HrHRNet [6]CVPR 2020HrHRNet-W4864069.987.276.165.476.4
+ OursHrHRNet-W4864072.5(↑2.6)89.379.168.379.0
", + "image_path": "0f16cddc193ecab6c0f786f612699157d12ab70cc871936cab702075c9d01afe.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 50, + 299, + 547, + 414 + ], + "blocks": [ + { + "bbox": [ + 123, + 285, + 470, + 296 + ], + "lines": [ + { + "bbox": [ + 123, + 285, + 470, + 296 + ], + "spans": [ + { + "bbox": [ + 123, + 285, + 470, + 296 + ], + "type": "text", + "content": "Table 2. Comparisons with bottom-up methods on the COCO val2017 set (multi-scale testing)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 299, + 547, + 414 + ], + "lines": [ + { + "bbox": [ + 50, + 299, + 547, + 414 + ], + "spans": [ + { + "bbox": [ + 50, + 299, + 547, + 414 + ], + "type": "table", + "html": "
MethodVenueBackboneInput sizeAP\\(AP^{50}\\)\\(AP^{75}\\)\\(AP^M\\)\\(AP^L\\)
HGG [16]ECCV 2020Hourglass51268.386.775.8--
Point-Set Anchors [38]ECCV 2020HRNet-W4864069.888.876.3--
DEKR [11]CVPR 2021HRNet-W4864072.388.378.668.678.6
SWAHR [22]CVPR 2021HrHRNet-W3251271.488.977.866.378.9
SWAHR [22]CVPR 2021HrHRNet-W4864073.289.879.169.179.3
PoseTrans [15]ECCV 2022HrHRNet-W3251271.288.277.266.578.0
HrHRNet [6]CVPR 2020HrHRNet-W3251269.987.176.065.377.0
+ OursHrHRNet-W3251271.8(↑1.9)88.978.167.378.4
HrHRNet [6]CVPR 2020HrHRNet-W4864072.188.478.267.878.3
+ OursHrHRNet-W4864073.7(↑1.6)89.979.669.679.5
", + "image_path": "656ee330773926249188773bb24cfbd2f9fa8ce540e949484aeeee5249f30471.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 421, + 287, + 568 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 421, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 52, + 421, + 287, + 568 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\leq 4 U ^ {2} A \\int_ {B _ {U}} \\int_ {R _ {s u b} ^ {r}} \\| \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} \\| _ {2} ^ {2} d \\mathbf {x} d \\mathbf {t} (17) \\\\ = 4 U ^ {2} A \\int_ {R _ {s u b} ^ {r}} \\int_ {B _ {U}} \\| \\frac {\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})}{(2 \\pi) ^ {2}} \\| _ {2} ^ {2} d \\mathbf {t} d \\mathbf {x} (18) \\\\ = \\int_ {R _ {s u b} ^ {r}} \\int_ {B _ {U}} \\| \\frac {\\gamma}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t})\\right) \\| _ {2} ^ {2} d \\mathbf {t} d \\mathbf {x} (19) \\\\ = L _ {k} \\lambda \\left(R _ {\\text {s u b}} ^ {r}\\right) (20) \\\\ \\end{array}", + "image_path": "a9ab9956edb2077b6f9f13b24c63265a2e21875024407f5786118ccc93c9f13d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 578, + 287, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 578, + 287, + 674 + ], + "spans": [ + { + "bbox": [ + 46, + 578, + 287, + 674 + ], + "type": "text", + "content": "where Eq. 12 holds since " + }, + { + "bbox": [ + 46, + 578, + 287, + 674 + ], + "type": "inline_equation", + "content": "\\| [D(H_p)]_{R_{sub}^e} - [D(H_g)]_{R_{sub}^e}\\| _2^2" + }, + { + "bbox": [ + 46, + 578, + 287, + 674 + ], + "type": "text", + "content": " is relatively small compared to " + }, + { + "bbox": [ + 46, + 578, + 287, + 674 + ], + "type": "inline_equation", + "content": "\\| [D(H_p)]_{R_{sub}^r} - [D(H_g)]_{R_{sub}^r}\\| _2^2" + }, + { + "bbox": [ + 46, + 578, + 287, + 674 + ], + "type": "text", + "content": ", Eq. 13 holds because of Eq. 10, Eq. 15 holds based on the analysis in the supplementary, Eq. 16 holds due to the continuity of L2 distance and the Cauchy-Schwarz inequality, Eq. 17 holds due to the fact that " + }, + { + "bbox": [ + 46, + 578, + 287, + 674 + ], + "type": "inline_equation", + "content": "\\| e^{-i\\langle \\mathbf{t},\\mathbf{x}\\rangle}\\| _2^2 = 1" + }, + { + "bbox": [ + 46, + 578, + 287, + 674 + ], + "type": "text", + "content": " and the Cauchy-Schwarz inequality, Eq. 18 holds due to Fubini's theorem." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 675, + 287, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 675, + 287, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 675, + 287, + 700 + ], + "type": "text", + "content": "We can then move " + }, + { + "bbox": [ + 46, + 675, + 287, + 700 + ], + "type": "inline_equation", + "content": "\\lambda(R_{sub}^r)" + }, + { + "bbox": [ + 46, + 675, + 287, + 700 + ], + "type": "text", + "content": " on the right hand side of Eq. 20 to the left hand side to get Theorem 1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 422, + 545, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 422, + 545, + 674 + ], + "spans": [ + { + "bbox": [ + 304, + 422, + 545, + 674 + ], + "type": "text", + "content": "As shown in Theorem 1, for the sub-region " + }, + { + "bbox": [ + 304, + 422, + 545, + 674 + ], + "type": "inline_equation", + "content": "R_{sub}^{r}" + }, + { + "bbox": [ + 304, + 422, + 545, + 674 + ], + "type": "text", + "content": ", when the sum of the pixelwise L2 distances between the predicted and GT heatmaps over this entire sub-region is relatively large compared to only over its edges, " + }, + { + "bbox": [ + 304, + 422, + 545, + 674 + ], + "type": "inline_equation", + "content": "L_{k}" + }, + { + "bbox": [ + 304, + 422, + 545, + 674 + ], + "type": "text", + "content": " will be the upper bound of the L2 loss w.r.t. this sub-region. Because of this, via minimizing " + }, + { + "bbox": [ + 304, + 422, + 545, + 674 + ], + "type": "inline_equation", + "content": "L_{k}" + }, + { + "bbox": [ + 304, + 422, + 545, + 674 + ], + "type": "text", + "content": ", we can enable the L2 losses w.r.t. all such sub-regions to be smaller. Note that such sub-regions can be easily found, since the edge of a sub-region typically contains many less pixels compared to the entire sub-region in the first place. Furthermore, for sub-regions containing missed or inaccurate body joints in its center, which are precisely the erroneous predictions that need to be corrected, the sum of the pixelwise L2 distances over the entire sub-region will then be much larger compared to only over its edge. Therefore, our method can optimize the model to provide a more accurate localization result for the body joints in different sub-regions of the predicted heatmap at the same time, whereas the existing bottom-up methods, usually relying on the overall L2 loss, do not hold this property. Thus, our method can achieve superior performance for bottom-up human pose estimation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": "Note that during implementation, since " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "L_{k}" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " itself as an integral is not tractable, inspired by [2], we define " + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "inline_equation", + "content": "\\hat{L}_k" + }, + { + "bbox": [ + 305, + 689, + 545, + 713 + ], + "type": "text", + "content": " as a" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "13013" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 85, + 547, + 291 + ], + "blocks": [ + { + "bbox": [ + 112, + 71, + 479, + 82 + ], + "lines": [ + { + "bbox": [ + 112, + 71, + 479, + 82 + ], + "spans": [ + { + "bbox": [ + 112, + 71, + 479, + 82 + ], + "type": "text", + "content": "Table 3. Comparisons with bottom-up methods on the COCO test-dev2017 set (single-scale testing)." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 85, + 547, + 291 + ], + "lines": [ + { + "bbox": [ + 49, + 85, + 547, + 291 + ], + "spans": [ + { + "bbox": [ + 49, + 85, + 547, + 291 + ], + "type": "table", + "html": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
OpenPose [5]CVPR 2017VGG-19-61.884.967.557.168.2
Hourglass [24]ECCV 2016Hourglass51256.681.861.849.867.0
Associative Embedding [23]NIPS 2017Hourglass51256.681.861.849.867.0
SPM [25]ICCV 2019Hourglass-66.988.572.962.673.1
MDN [34]CVPR 2020Hourglass-62.985.169.458.871.4
PersonLab [26]ECCV 2018ResNet-152140166.588.072.662.472.3
PifPaf [17]CVPR 2019ResNet-152-66.7--62.472.9
PETR [30]CVPR 2022SWin-L133370.591.578.765.278.0
DEKR [11]CVPR 2021HRNet-W4864070.089.477.365.776.9
PINet [37]NIPS 2021HRNet-W3251266.7----
CIR&QEM [40]AAAI 2022HRNet-W4864071.090.278.266.277.8
CID [36]CVPR 2022HRNet-W4864070.790.377.966.377.8
LOGP-CAP [42]CVPR 2022HRNet-W4864070.889.777.866.777.0
SWAHR [22]CVPR 2021HrHRNet-W4864070.289.976.965.277.0
CenterAttention [4]ICCV 2021HrHRNet-W4864069.689.776.064.976.3
PoseTrans [15]ECCV 2022HrHRNet-W3251267.488.373.962.175.1
HrHRNet [6]CVPR 2020HrHRNet-W3251266.487.572.861.274.2
+ OursHrHRNet-W3251268.9(↑2.5)89.275.763.776.1
HrHRNet [6]CVPR 2020HrHRNet-W4864068.488.275.164.474.2
+ OursHrHRNet-W4864071.1(↑2.7)90.478.266.977.2
", + "image_path": "15920ca6c29c7ea94c0cca90d001e98d3fccc10a1ec7c5f7f38167869b29c577.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 49, + 308, + 547, + 475 + ], + "blocks": [ + { + "bbox": [ + 113, + 293, + 478, + 304 + ], + "lines": [ + { + "bbox": [ + 113, + 293, + 478, + 304 + ], + "spans": [ + { + "bbox": [ + 113, + 293, + 478, + 304 + ], + "type": "text", + "content": "Table 4. Comparisons with bottom-up methods on the COCO test-dev2017 set (multi-scale testing)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 308, + 547, + 475 + ], + "lines": [ + { + "bbox": [ + 49, + 308, + 547, + 475 + ], + "spans": [ + { + "bbox": [ + 49, + 308, + 547, + 475 + ], + "type": "table", + "html": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
Hourglass [24]ECCV 2016Hourglass51263.085.768.958.070.4
Associative Embedding [23]NIPS 2017Hourglass51263.085.768.958.070.4
HGG [16]ECCV 2020Hourglass51267.685.173.762.774.6
SimplePose [18]AAAI 2020IMHN51268.1--66.870.5
PersonLab [26]ECCV 2018-140168.789.075.464.175.5
PETR [30]CVPR 2022SWin-L133371.291.479.666.978.0
Point-Set Anchors [38]ECCV 2020HRNet-W4864068.789.976.364.875.3
DEKR [11]CVPR 2021HRNet-W4864071.089.278.067.176.9
CIR&QEM [40]AAAI 2022HRNet-W4864071.790.478.767.378.5
SWAHR [22]CVPR 2021HrHRNet-W4864072.090.778.867.877.7
CenterAttention [4]ICCV 2021HrHRNet-W4864071.190.577.566.976.7
PoseTrans [15]ECCV 2022HrHRNet-W3251269.989.377.065.276.2
HrHRNet [6]CVPR 2020HrHRNet-W3251269.089.075.864.475.2
+ OursHrHRNet-W3251270.8(↑1.8)90.177.866.077.3
HrHRNet [6]CVPR 2020HrHRNet-W4864070.589.377.266.675.8
+ OursHrHRNet-W4864072.3(↑1.8)91.579.867.978.2
", + "image_path": "772ce1316efd04ebc68d37c4a520a26d12e7371edab3bd47e3fa4b49ed9d0f2a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 484, + 168, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 484, + 168, + 496 + ], + "spans": [ + { + "bbox": [ + 47, + 484, + 168, + 496 + ], + "type": "text", + "content": "tractable alternative of " + }, + { + "bbox": [ + 47, + 484, + 168, + 496 + ], + "type": "inline_equation", + "content": "L_{k}" + }, + { + "bbox": [ + 47, + 484, + 168, + 496 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 60, + 498, + 287, + 530 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 498, + 287, + 530 + ], + "spans": [ + { + "bbox": [ + 60, + 498, + 287, + 530 + ], + "type": "interline_equation", + "content": "\\hat {L} _ {k} = \\sum_ {m = 1} ^ {M} \\| \\frac {\\gamma}{2 U} \\left(\\varphi_ {D (H _ {p})} ^ {k} (\\mathbf {t} _ {m}) - \\varphi_ {D (H _ {g})} ^ {k} (\\mathbf {t} _ {m})\\right) \\| _ {2} ^ {2} \\tag {21}", + "image_path": "1ff95c97eaff6f0a2bf589132e32b862693188aeac6dcd38fb0eb7ca4ce89770.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 532, + 287, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 532, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 47, + 532, + 287, + 555 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 532, + 287, + 555 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{t}_1,\\dots ,\\mathbf{t}_M\\}" + }, + { + "bbox": [ + 47, + 532, + 287, + 555 + ], + "type": "text", + "content": " denotes a set of " + }, + { + "bbox": [ + 47, + 532, + 287, + 555 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 47, + 532, + 287, + 555 + ], + "type": "text", + "content": " vectors randomly sampled from " + }, + { + "bbox": [ + 47, + 532, + 287, + 555 + ], + "type": "inline_equation", + "content": "B_{U}" + }, + { + "bbox": [ + 47, + 532, + 287, + 555 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 555, + 287, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 555, + 287, + 578 + ], + "spans": [ + { + "bbox": [ + 47, + 555, + 287, + 578 + ], + "type": "text", + "content": "The total loss " + }, + { + "bbox": [ + 47, + 555, + 287, + 578 + ], + "type": "inline_equation", + "content": "\\hat{L}_{total}" + }, + { + "bbox": [ + 47, + 555, + 287, + 578 + ], + "type": "text", + "content": " for all body joint types can then be written as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 578, + 287, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 578, + 287, + 610 + ], + "spans": [ + { + "bbox": [ + 132, + 578, + 287, + 610 + ], + "type": "interline_equation", + "content": "\\hat {L} _ {\\text {t o t a l}} = \\sum_ {k = 1} ^ {K} \\hat {L} _ {k} \\tag {22}", + "image_path": "5e302e3ea023d5c14406304bf35eb4a57dfb78aaf96bf20db65dd50299c8d97c.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 623, + 205, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 623, + 205, + 636 + ], + "spans": [ + { + "bbox": [ + 47, + 623, + 205, + 636 + ], + "type": "text", + "content": "3.4. Overall Training and Testing" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 641, + 287, + 714 + ], + "type": "text", + "content": "Here we discuss the overall training and testing scheme of our method. Specifically, during training, we supervise the predicted heatmaps via the total loss in Eq. 22 instead of using the commonly used overall L2 loss, and following [6, 22, 23], we conduct grouping via associate embedding. During testing, we follow the evaluation procedure of" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 484, + 545, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 484, + 545, + 533 + ], + "spans": [ + { + "bbox": [ + 305, + 484, + 545, + 533 + ], + "type": "text", + "content": "previous works [6, 22] that conduct bottom-up human pose estimation. Note that in experiments, it is easy to implement " + }, + { + "bbox": [ + 305, + 484, + 545, + 533 + ], + "type": "inline_equation", + "content": "\\hat{L}_k" + }, + { + "bbox": [ + 305, + 484, + 545, + 533 + ], + "type": "text", + "content": " in Eq. 21, and we provide more details on how we implement " + }, + { + "bbox": [ + 305, + 484, + 545, + 533 + ], + "type": "inline_equation", + "content": "\\hat{L}_k" + }, + { + "bbox": [ + 305, + 484, + 545, + 533 + ], + "type": "text", + "content": " in experiments in the supplementary." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 544, + 388, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 544, + 388, + 558 + ], + "spans": [ + { + "bbox": [ + 306, + 544, + 388, + 558 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 565, + 545, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 565, + 545, + 638 + ], + "spans": [ + { + "bbox": [ + 304, + 565, + 545, + 638 + ], + "type": "text", + "content": "To evaluate the effectiveness of our method for bottom-up human pose estimation, we conduct experiments on the COCO dataset [21] and the CrowdPose dataset [19]. Besides, we also test the effectiveness of our method on top-down methods in the supplementary. We conduct our experiments on RTX 3090 GPUs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 647, + 455, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 647, + 455, + 659 + ], + "spans": [ + { + "bbox": [ + 305, + 647, + 455, + 659 + ], + "type": "text", + "content": "4.1. COCO Keypoint Detection" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 713 + ], + "type": "text", + "content": "Dataset & evaluation metric. The COCO dataset [21] contains over 200k images, and in this dataset, each person instance is annotated with 17 body joints. This dataset consists of three subsets including COCO training set (57k" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "13014" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 85, + 547, + 269 + ], + "blocks": [ + { + "bbox": [ + 153, + 71, + 438, + 82 + ], + "lines": [ + { + "bbox": [ + 153, + 71, + 438, + 82 + ], + "spans": [ + { + "bbox": [ + 153, + 71, + 438, + 82 + ], + "type": "text", + "content": "Table 5. Comparisons with bottom-up methods on the CrowdPose testing set." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 85, + 547, + 269 + ], + "lines": [ + { + "bbox": [ + 49, + 85, + 547, + 269 + ], + "spans": [ + { + "bbox": [ + 49, + 85, + 547, + 269 + ], + "type": "table", + "html": "
MethodVenueBackboneInput sizeAP\\( AP^{50} \\)\\( AP^{75} \\)\\( APE \\)\\( APM \\)\\( AP^H \\)
w/ single-scale testing
OpenPose [5]CVPR 2017VGG-19----62.748.732.3
HrHRNet [6]CVPR 2020HrHRNet-W4864065.986.470.673.366.557.9
PETR [30]CVPR 2022--72.090.978.878.072.565.4
DEKR [11]CVPR 2021HRNet-W4864067.386.472.274.668.158.7
PINet [37]NIPS 2021HRNet-W3251268.988.774.775.469.661.5
CID [36]CVPR 2022HRNet-W4864072.390.877.978.773.064.8
SWAHR [22]CVPR 2021HrHRNet-W4864071.688.577.678.972.463.0
CenterAttention [4]ICCV 2021HrHRNet-W4864067.687.772.773.968.260.3
OursHrHRNet-W4864072.688.878.979.273.165.6
w/ multi-scale testing
HrHRNet [6]CVPR 2020HrHRNet-W4864067.687.472.675.868.158.9
DEKR [11]CVPR 2021HRNet-W4864068.085.573.476.668.858.4
PINet [37]NIPS 2021HRNet-W3251269.889.175.676.470.562.2
SWAHR [22]CVPR 2021HrHRNet-W4864073.890.579.981.274.764.7
CenterAttention [4]ICCV 2021HrHRNet-W4864069.488.674.676.670.061.5
OursHrHRNet-W4864074.190.780.281.374.965.1
", + "image_path": "a21c48680ca23caffa35f261f55784f258c39e6c55871b96d5a68619500087bc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "spans": [ + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "text", + "content": "images), COCO validation set (5k images), and COCO test-dev set (20k images). Following the train-test split of [22], we report results on the val2017 set and test-dev2017 set. Also following [22], we evaluate model performance using standard average precision (AP) calculated based on Object Keypoint Similarity (OKS) on this dataset, and report the following metrics: AP, " + }, + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^{50}" + }, + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^{75}" + }, + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^{\\mathrm{M}}" + }, + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^{\\mathrm{L}}" + }, + { + "bbox": [ + 46, + 278, + 287, + 361 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "spans": [ + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "text", + "content": "Implementation details. Following [4, 22], we use the HrHRNet [6] as the baseline, and apply our proposed method to the respective two backbones including HrHRNet-W32 and HrHRNet-W48. For these backbones, we follow their original training and testing configurations specified in [6]. Also following [6], we adopt three scales 0.5, 1, and 2 in multi-scale testing. To calculate " + }, + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "inline_equation", + "content": "\\hat{L}_k" + }, + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "text", + "content": " following Eq. 21, we set the number of samples " + }, + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "text", + "content": " to 256 and the hyperparameter " + }, + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "text", + "content": " w.r.t. the finite range " + }, + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "inline_equation", + "content": "B_U" + }, + { + "bbox": [ + 46, + 361, + 287, + 481 + ], + "type": "text", + "content": " to 64 in our experiments." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 482, + 288, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 482, + 288, + 626 + ], + "spans": [ + { + "bbox": [ + 46, + 482, + 288, + 626 + ], + "type": "text", + "content": "Results. In Tab. 1 and Tab. 2, we report single-scale testing and multi-scale testing results on the COCO val2017 set. In Tab. 3 and Tab. 4, we report single-scale testing and multi-scale testing results on the COCO test-dev2017 set. We observe that after applying our method on both HrHRNet-W32 and HrHRNet-W48, a significant performance improvement is achieved, demonstrating the effectiveness of our method. Moreover, we also compare our method with other state-of-the-art bottom-up human pose estimation methods. Compared to these methods, our method consistently achieves the highest AP score, further demonstrating the effectiveness of our method." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 635, + 124, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 635, + 124, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 635, + 124, + 647 + ], + "type": "text", + "content": "4.2. CrowdPose" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": "Dataset & evaluation metric. The CrowdPose dataset [19] contains about 20k images and 80k person instances, which are annotated with 14 body joints. This dataset consists of three subsets including CrowdPose training set (10k images), CrowdPose validation set (2k images), and Crowd-" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "spans": [ + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "text", + "content": "Pose testing set (8k images). Following the train-test split of [6, 22], we report results on the testing set. Also following [6, 22], we evaluate model performance using standard AP calculated based on OKS on the CrowdPose dataset, and report the following metrics: AP, " + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^{50}" + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^{75}" + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^{\\mathrm{E}}" + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^{\\mathrm{M}}" + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^{\\mathrm{H}}" + }, + { + "bbox": [ + 304, + 278, + 545, + 348 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 350, + 546, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 546, + 456 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 546, + 456 + ], + "type": "text", + "content": "Implementation details. On the CrowdPose dataset, we also use the HrHRNet [6] as the baseline, and we use HrHRNet-W48 as the backbone following [4,6,22]. We follow the original training and testing configurations specified in [6], and also follow [6] to adopt three scales 0.5, 1, and 2 in multi-scale testing. Besides, same as the experiments on the COCO dataset, we also set the number of samples " + }, + { + "bbox": [ + 304, + 350, + 546, + 456 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 350, + 546, + 456 + ], + "type": "text", + "content": " to 256 and the hyperparameter " + }, + { + "bbox": [ + 304, + 350, + 546, + 456 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 304, + 350, + 546, + 456 + ], + "type": "text", + "content": " w.r.t. the finite range " + }, + { + "bbox": [ + 304, + 350, + 546, + 456 + ], + "type": "inline_equation", + "content": "B_U" + }, + { + "bbox": [ + 304, + 350, + 546, + 456 + ], + "type": "text", + "content": " to 64 on the CrowdPose dataset." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 458, + 545, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 458, + 545, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 458, + 545, + 506 + ], + "type": "text", + "content": "Results. In Tab. 5, we report the single-scale testing and multi-scale testing results on the CrowdPose testing set. As shown, our method consistently achieves the highest AP score, demonstrating the effectiveness of our method." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 514, + 406, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 514, + 406, + 526 + ], + "spans": [ + { + "bbox": [ + 306, + 514, + 406, + 526 + ], + "type": "text", + "content": "4.3. Ablation Studies" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 533, + 545, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 533, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 304, + 533, + 545, + 569 + ], + "type": "text", + "content": "We conduct ablation studies on the COCO validation set via applying our proposed method on HrHRNet-W32 [6] with single-scale testing." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "text", + "content": "Impact of the number of samples " + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "text", + "content": ". To calculate " + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "inline_equation", + "content": "\\hat{L}_k" + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "text", + "content": " following Eq. 21, we need to set the number of samples " + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "text", + "content": ", which we set to 256 in our experiments. We evaluate other choices of the number of samples " + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "text", + "content": " in Tab. 6. As shown, all variants outperform the baseline method, and after the number of samples " + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "text", + "content": " becomes larger than 256 the model performance becomes stabilized. Therefore, we set the number of samples " + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 304, + 569, + 545, + 665 + ], + "type": "text", + "content": " to be 256 in our experiments." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": "Impact of the finite range " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "B_U" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": " with different " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": ". We evaluate different choices of " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": " in Tab. 7. As shown, all variants outperform the baseline method, and after the hyperparameter " + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 304, + 665, + 545, + 713 + ], + "type": "text", + "content": " becomes larger than 64, the model performance does" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "13015" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 71, + 541, + 247 + ], + "blocks": [ + { + "bbox": [ + 49, + 71, + 541, + 247 + ], + "lines": [ + { + "bbox": [ + 49, + 71, + 541, + 247 + ], + "spans": [ + { + "bbox": [ + 49, + 71, + 541, + 247 + ], + "type": "table", + "html": "
(a)(b)(c)(d)
Body joint name:Right shouldersRight eyesRight elbowsLeft shoulders
Baseline (HrHRNet-W32) result:
Ours result:
", + "image_path": "f458311b391225e64cfcb67431098212a095bd3e912c0e465b102c6e1346bb88.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 248, + 546, + 293 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 248, + 546, + 293 + ], + "spans": [ + { + "bbox": [ + 46, + 248, + 546, + 293 + ], + "type": "text", + "content": "Figure 2. Qualitative results of our method and the baseline HrHRNet-W32 model [6]. As shown, the baseline method misses body joints (in (a) and (b)) or misidentifies body joints (in (c) and (d)) in some sub-regions of the predicted heatmap (see the sub-regions framed with dashed lines). Meanwhile, our method provides a more accurate localization result for the body joints of different people in different sub-regions of the predicted heatmap at the same time. More qualitative results are in the supplementary. (Best viewed in color.)" + } + ] + } + ], + "index": 1, + "type": "text" + }, + { + "type": "table", + "bbox": [ + 53, + 312, + 287, + 404 + ], + "blocks": [ + { + "bbox": [ + 75, + 298, + 258, + 309 + ], + "lines": [ + { + "bbox": [ + 75, + 298, + 258, + 309 + ], + "spans": [ + { + "bbox": [ + 75, + 298, + 258, + 309 + ], + "type": "text", + "content": "Table 6. Evaluation on the number of samples " + }, + { + "bbox": [ + 75, + 298, + 258, + 309 + ], + "type": "inline_equation", + "content": "M" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 312, + 287, + 404 + ], + "lines": [ + { + "bbox": [ + 53, + 312, + 287, + 404 + ], + "spans": [ + { + "bbox": [ + 53, + 312, + 287, + 404 + ], + "type": "table", + "html": "
MethodAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
Baseline(HrHRNet-W32)67.186.273.061.576.1
4 samples67.986.973.862.476.9
16 samples68.987.574.863.577.4
64 samples69.687.975.663.977.8
256 samples69.988.176.064.278.1
1024 samples69.888.276.064.378.0
", + "image_path": "76c4f1689cbc34dfc011608c9c3d47b53e30757c89b142d30ea1f7184cf370c2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 412, + 287, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 287, + 436 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 287, + 436 + ], + "type": "text", + "content": "not enhance anymore. Thus, we set the hyperparameter " + }, + { + "bbox": [ + 47, + 412, + 287, + 436 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 47, + 412, + 287, + 436 + ], + "type": "text", + "content": " to be 64 in our experiments." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 53, + 463, + 287, + 554 + ], + "blocks": [ + { + "bbox": [ + 47, + 437, + 287, + 459 + ], + "lines": [ + { + "bbox": [ + 47, + 437, + 287, + 459 + ], + "spans": [ + { + "bbox": [ + 47, + 437, + 287, + 459 + ], + "type": "text", + "content": "Table 7. Evaluation on the hyperparameter " + }, + { + "bbox": [ + 47, + 437, + 287, + 459 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 47, + 437, + 287, + 459 + ], + "type": "text", + "content": " w.r.t. the finite range " + }, + { + "bbox": [ + 47, + 437, + 287, + 459 + ], + "type": "inline_equation", + "content": "{B}_{U}" + }, + { + "bbox": [ + 47, + 437, + 287, + 459 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 463, + 287, + 554 + ], + "lines": [ + { + "bbox": [ + 53, + 463, + 287, + 554 + ], + "spans": [ + { + "bbox": [ + 53, + 463, + 287, + 554 + ], + "type": "table", + "html": "
MethodAP\\( AP^{50} \\)\\( AP^{75} \\)\\( AP^M \\)\\( AP^L \\)
Baseline(HrHRNet-W32)67.186.273.061.576.1
U = 867.786.773.562.276.5
U = 1668.687.374.262.976.9
U = 3269.487.875.463.777.6
U = 6469.988.176.064.278.1
U = 12869.888.075.864.078.0
", + "image_path": "016223ba8db63ee2e4482179ead189db24e2ce9b7546e2e77d3f63ac46e9e2ca.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 558, + 287, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 558, + 287, + 677 + ], + "spans": [ + { + "bbox": [ + 46, + 558, + 287, + 677 + ], + "type": "text", + "content": "Training time. On the COCO dataset, we test the training time of our method that trains the backbone model (HrHRNet-W32 [6]) with the loss function in Eq. 22, and compare it with the training time of the baseline that trains the same network with the overall L2 loss. As shown in Tab. 8, though our method achieves much better performance, it brings only very little increase of the training time. Note that as we follow the same evaluation procedure of previous works [6, 22], the testing time with and without our proposed method are the same." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 677, + 287, + 713 + ], + "type": "text", + "content": "Qualitative results. Some qualitative results are shown in Fig. 2. As shown, the baseline method which uses the overall L2 loss to optimize the heatmap prediction can miss or" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 310, + 312, + 545, + 349 + ], + "blocks": [ + { + "bbox": [ + 350, + 298, + 501, + 310 + ], + "lines": [ + { + "bbox": [ + 350, + 298, + 501, + 310 + ], + "spans": [ + { + "bbox": [ + 350, + 298, + 501, + 310 + ], + "type": "text", + "content": "Table 8. Comparison of the training time." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 312, + 545, + 349 + ], + "lines": [ + { + "bbox": [ + 310, + 312, + 545, + 349 + ], + "spans": [ + { + "bbox": [ + 310, + 312, + 545, + 349 + ], + "type": "table", + "html": "
MethodTraining time per epochPerformance(AP)
Baseline(HrHRNet-W32)1.11h67.1
Baseline + Ours1.19h69.9
", + "image_path": "9729b02e2db8fdef0dbb2e980c6141e0f54b4e4daf4ac011243cc5291db407b7.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 358, + 546, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 358, + 546, + 429 + ], + "spans": [ + { + "bbox": [ + 304, + 358, + 546, + 429 + ], + "type": "text", + "content": "get inaccurate body joints in some sub-regions of the predicted heatmap (see the sub-regions framed with dashed lines). In contrast, our method locates body joints of different people in different sub-regions of the predicted heatmap more accurately at the same time, demonstrating the effectiveness of our method." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 441, + 378, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 441, + 378, + 453 + ], + "spans": [ + { + "bbox": [ + 306, + 441, + 378, + 453 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 460, + 546, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 460, + 546, + 651 + ], + "spans": [ + { + "bbox": [ + 304, + 460, + 546, + 651 + ], + "type": "text", + "content": "In this paper, we have proposed a novel bottom-up human pose estimation method that optimizes the heatmap prediction via minimizing the distance between two characteristic functions respectively constructed from the predicted and GT heatmaps. We theoretically analyze that the distance between the two characteristic functions is the upper bound of the L2 losses w.r.t. sub-regions of the predicted heatmap. Thus, via minimizing the distance between the two characteristic functions, our method locates body joints in different sub-regions of the predicted heatmap more accurately at the same time. Our method achieves superior performance on the COCO dataset and the CrowdPose dataset. Besides, our method could potentially also be applied in other tasks such as multi-object 6D pose estimation [1], facial landmark extraction [3], and fingerprint minutiae detection [10]. We leave this as our future work." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 651, + 545, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 651, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 304, + 651, + 545, + 712 + ], + "type": "text", + "content": "Acknowledgement. This work is supported by MOE AcRF Tier 2 (Proposal ID: T2EP20222-0035), National Research Foundation Singapore under its AI Singapore Programme (AISG-100E-2020-065), and SUTD SKI Project (SKI 2021_02_06)." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 318, + 757 + ], + "type": "text", + "content": "13016" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 147 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 147 + ], + "type": "text", + "content": "[1] Arash Amini, Arul Selvam Periyasamy, and Sven Behnke. Yolopose: Transformer-based multi-object 6d pose estimation using keypoint regression. In Intelligent Autonomous Systems 17: Proceedings of the 17th International Conference IAS-17, pages 392–406. Springer, 2023. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 148, + 288, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 148, + 288, + 203 + ], + "spans": [ + { + "bbox": [ + 53, + 148, + 288, + 203 + ], + "type": "text", + "content": "[2] Abdul Fatir Ansari, Jonathan Scarlett, and Harold Soh. A characteristic function approach to deep implicit generative modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7478-7487, 2020. 3, 4, 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 205, + 288, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 288, + 237 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 288, + 237 + ], + "type": "text", + "content": "[3] Matteo Bodini. A review of facial landmark extraction in 2d images and videos using deep learning. *Big Data and Cognitive Computing*, 3(1):14, 2019. 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 239, + 287, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 239, + 287, + 294 + ], + "spans": [ + { + "bbox": [ + 53, + 239, + 287, + 294 + ], + "type": "text", + "content": "[4] Guillem Brasó, Nikita Kister, and Laura Leal-Taixe. The center of attention: Center-keypoint grouping via attention for multi-person pose estimation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11853-11863, 2021. 5, 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 295, + 287, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 295, + 287, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 295, + 287, + 350 + ], + "type": "text", + "content": "[5] Zhe Cao, Tomas Simon, Shih-En Wei, and Yaser Sheikh. Realtime multi-person 2d pose estimation using part affinity fields. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7291–7299, 2017. 1, 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 352, + 287, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 352, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 53, + 352, + 287, + 418 + ], + "type": "text", + "content": "[6] Bowen Cheng, Bin Xiao, Jingdong Wang, Honghui Shi, Thomas S Huang, and Lei Zhang. Higherhrnet: Scale-aware representation learning for bottom-up human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5386-5395, 2020. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 419, + 288, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 419, + 288, + 475 + ], + "spans": [ + { + "bbox": [ + 53, + 419, + 288, + 475 + ], + "type": "text", + "content": "[7] Ke Cheng, Yifan Zhang, Xiangyu He, Weihan Chen, Jian Cheng, and Hanqing Lu. Skeleton-based action recognition with shift graph convolutional network. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 183-192, 2020. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 475, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 475, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 53, + 475, + 287, + 520 + ], + "type": "text", + "content": "[8] Kacper P Chwialkowski, Aaditya Ramdas, Dino Sejdinovic, and Arthur Gretton. Fast two-sample testing with analytic representations of probability measures. Advances in Neural Information Processing Systems, 28, 2015. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 521, + 287, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 521, + 287, + 565 + ], + "spans": [ + { + "bbox": [ + 53, + 521, + 287, + 565 + ], + "type": "text", + "content": "[9] TW Epps and Kenneth J Singleton. An omnibus test for the two-sample problem using the empirical characteristic function. Journal of Statistical Computation and Simulation, 26(3-4):177-203, 1986. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 567, + 287, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 567, + 287, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 567, + 287, + 612 + ], + "type": "text", + "content": "[10] Yulin Feng and Ajay Kumar. Detecting locally, patching globally: An end-to-end framework for high speed and accurate detection of fingerprint minutiae. IEEE Transactions on Information Forensics and Security, 2023. 8" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 287, + 668 + ], + "type": "text", + "content": "[11] Zigang Geng, Ke Sun, Bin Xiao, Zhaoxiang Zhang, and Jingdong Wang. Bottom-up human pose estimation via disentangled keypoint regression. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14676-14686, 2021. 3, 5, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 287, + 712 + ], + "type": "text", + "content": "[12] Kerui Gu, Linlin Yang, and Angela Yao. Removing the bias of integral pose regression. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11067-11076, 2021. 1" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "text", + "content": "[13] CE Heathcote. A test of goodness of fit for symmetric random variables1. Australian Journal of Statistics, 14(2):172-181, 1972. 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 106, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 106, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 106, + 545, + 161 + ], + "type": "text", + "content": "[14] Eldar Insafutdinov, Leonid Pishchulin, Bjoern Andres, Mykhaylo Andriluka, and Bernt Schiele. Deepercut: A deeper, stronger, and faster multi-person pose estimation model. In European conference on computer vision, pages 34-50. Springer, 2016. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 162, + 545, + 205 + ], + "type": "text", + "content": "[15] Wentao Jiang, Sheng Jin, Wentao Liu, Chen Qian, Ping Luo, and Si Liu. Posetrans: A simple yet effective pose transformation augmentation for human pose estimation. arXiv preprint arXiv:2208.07755, 2022. 5, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 205, + 545, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 205, + 545, + 260 + ], + "spans": [ + { + "bbox": [ + 308, + 205, + 545, + 260 + ], + "type": "text", + "content": "[16] Sheng Jin, Wentao Liu, Enze Xie, Wenhai Wang, Chen Qian, Wanli Ouyang, and Ping Luo. Differentiable hierarchical graph grouping for multi-person pose estimation. In European Conference on Computer Vision, pages 718-734. Springer, 2020. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 261, + 545, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 261, + 545, + 304 + ], + "spans": [ + { + "bbox": [ + 308, + 261, + 545, + 304 + ], + "type": "text", + "content": "[17] Sven Kreiss, Lorenzo Bertoni, and Alexandre Alahi. Pifpaf: Composite fields for human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11977-11986, 2019. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 305, + 545, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 305, + 545, + 358 + ], + "spans": [ + { + "bbox": [ + 308, + 305, + 545, + 358 + ], + "type": "text", + "content": "[18] Jia Li, Wen Su, and Zengfu Wang. Simple pose: Rethinking and improving a bottom-up approach for multi-person pose estimation. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 11354-11361, 2020. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 360, + 545, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 360, + 545, + 415 + ], + "spans": [ + { + "bbox": [ + 308, + 360, + 545, + 415 + ], + "type": "text", + "content": "[19] Jiefeng Li, Can Wang, Hao Zhu, Yihuan Mao, Hao-Shu Fang, and Cewu Lu. Crowdpose: Efficient crowded scenes pose estimation and a new benchmark. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10863-10872, 2019. 2, 6, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 415, + 545, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 415, + 545, + 459 + ], + "spans": [ + { + "bbox": [ + 308, + 415, + 545, + 459 + ], + "type": "text", + "content": "[20] Shengxi Li, Zeyang Yu, Min Xiang, and Danilo Mandic. Reciprocal adversarial learning via characteristic functions. Advances in Neural Information Processing Systems, 33:217-228, 2020. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 459, + 545, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 459, + 545, + 514 + ], + "spans": [ + { + "bbox": [ + 308, + 459, + 545, + 514 + ], + "type": "text", + "content": "[21] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 2, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 514, + 545, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 514, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 308, + 514, + 545, + 580 + ], + "type": "text", + "content": "[22] Zhengxiong Luo, Zhicheng Wang, Yan Huang, Liang Wang, Tieniu Tan, and Erjin Zhou. Rethinking the heatmap regression for bottom-up human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13264-13273, 2021. 1, 2, 5, 6, 7, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 581, + 545, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 581, + 545, + 624 + ], + "spans": [ + { + "bbox": [ + 308, + 581, + 545, + 624 + ], + "type": "text", + "content": "[23] Alejandro Newell, Zhiao Huang, and Jia Deng. Associative embedding: End-to-end learning for joint detection and grouping. Advances in neural information processing systems, 30, 2017. 1, 2, 3, 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 625, + 545, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 625, + 545, + 668 + ], + "spans": [ + { + "bbox": [ + 308, + 625, + 545, + 668 + ], + "type": "text", + "content": "[24] Alejandro Newell, Kaiyu Yang, and Jia Deng. Stacked hourglass networks for human pose estimation. In European conference on computer vision, pages 483-499. Springer, 2016. 1, 2, 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 669, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 669, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 308, + 669, + 545, + 712 + ], + "type": "text", + "content": "[25] Xuecheng Nie, Jiashi Feng, Jianfeng Zhang, and Shuicheng Yan. Single-stage multi-person pose machines. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6951-6960, 2019. 1, 2, 3, 6" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 748, + 317, + 757 + ], + "type": "text", + "content": "13017" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 72, + 287, + 139 + ], + "type": "text", + "content": "[26] George Papandreou, Tyler Zhu, Liang-Chieh Chen, Spyros Gidaris, Jonathan Tompson, and Kevin Murphy. Personlab: Person pose estimation and instance segmentation with a bottom-up, part-based, geometric embedding model. In Proceedings of the European conference on computer vision (ECCV), pages 269-286, 2018. 1, 2, 3, 5, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 140, + 288, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 140, + 288, + 205 + ], + "spans": [ + { + "bbox": [ + 48, + 140, + 288, + 205 + ], + "type": "text", + "content": "[27] Leonid Pishchulin, Eldar Insafutdinov, Siyu Tang, Bjoern Andres, Mykhaylo Andriluka, Peter V Gehler, and Bernt Schiele. Deepcut: Joint subset partition and labeling for multi person pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4929-4937, 2016. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 205, + 288, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 205, + 288, + 259 + ], + "spans": [ + { + "bbox": [ + 48, + 205, + 288, + 259 + ], + "type": "text", + "content": "[28] Xuelin Qian, Yanwei Fu, Tao Xiang, Wenxuan Wang, Jie Qiu, Yang Wu, Yu-Gang Jiang, and Xiangyang Xue. Pose-normalized image generation for person re-identification. In Proceedings of the European conference on computer vision (ECCV), pages 650–667, 2018. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 261, + 287, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 261, + 287, + 293 + ], + "spans": [ + { + "bbox": [ + 48, + 261, + 287, + 293 + ], + "type": "text", + "content": "[29] Haoxuan Qu, Li Xu, Yujun Cai, Lin Geng Foo, and Jun Liu. Heatmap distribution matching for human pose estimation. In Advances in Neural Information Processing Systems. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 294, + 287, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 294, + 287, + 347 + ], + "spans": [ + { + "bbox": [ + 48, + 294, + 287, + 347 + ], + "type": "text", + "content": "[30] Dahu Shi, Xing Wei, Liangqi Li, Ye Ren, and Wenming Tan. End-to-end multi-person pose estimation with transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11069-11078, 2022. 5, 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 349, + 287, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 349, + 287, + 392 + ], + "spans": [ + { + "bbox": [ + 48, + 349, + 287, + 392 + ], + "type": "text", + "content": "[31] Weibo Shu, Jia Wan, Kay Chen Tan, Sam Kwong, and Antoni B Chan. Crowd counting in the frequency domain. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19618-19627, 2022. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 393, + 287, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 393, + 287, + 446 + ], + "spans": [ + { + "bbox": [ + 48, + 393, + 287, + 446 + ], + "type": "text", + "content": "[32] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5693-5703, 2019. 1, 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 449, + 287, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 449, + 287, + 491 + ], + "spans": [ + { + "bbox": [ + 48, + 449, + 287, + 491 + ], + "type": "text", + "content": "[33] Jonathan J. Thompson, Arjun Jain, Yann LeCun, and Christoph Bregler. Joint training of a convolutional network and a graphical model for human pose estimation. Advances in neural information processing systems, 27, 2014. 1, 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 492, + 287, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 492, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 492, + 287, + 546 + ], + "type": "text", + "content": "[34] Ali Varamesh and Tinne Tuytelaars. Mixture dense regression for object detection and human pose estimation. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13083-13092. IEEE, 2020. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 548, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 548, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 548, + 287, + 601 + ], + "type": "text", + "content": "[35] Bo Wan, Desen Zhou, Yongfei Liu, Rongjie Li, and Xuming He. Pose-aware multi-level feature network for human object interaction detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9469-9478, 2019. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 603, + 287, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 603, + 287, + 646 + ], + "spans": [ + { + "bbox": [ + 48, + 603, + 287, + 646 + ], + "type": "text", + "content": "[36] Dongkai Wang and Shiliang Zhang. Contextual instance decoupling for robust multi-person pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11060-11068, 2022. 5, 6, 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 647, + 287, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 287, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 287, + 690 + ], + "type": "text", + "content": "[37] Dongkai Wang, Shiliang Zhang, and Gang Hua. Robust pose estimation in crowded scenes with direct pose-level inference. Advances in Neural Information Processing Systems, 34:6278-6289, 2021. 5, 6, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 48, + 691, + 287, + 713 + ], + "type": "text", + "content": "[38] Fangyun Wei, Xiao Sun, Hongyang Li, Jingdong Wang, and Stephen Lin. Point-set anchors for object detection, instance" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 547, + 409 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 326, + 73, + 545, + 95 + ], + "type": "text", + "content": "segmentation and pose estimation. In European Conference on Computer Vision, pages 527-544. Springer, 2020. 5, 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 96, + 547, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 547, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 547, + 139 + ], + "type": "text", + "content": "[39] Bin Xiao, Haiping Wu, and Yichen Wei. Simple baselines for human pose estimation and tracking. In Proceedings of the European conference on computer vision (ECCV), pages 466-481, 2018. 1, 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 141, + 547, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 547, + 195 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 547, + 195 + ], + "type": "text", + "content": "[40] Yabo Xiao, Dongdong Yu, Xiao Juan Wang, Lei Jin, Guoli Wang, and Qian Zhang. Learning quality-aware representation for multi-person pose regression. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 2822-2830, 2022. 5, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 197, + 547, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 547, + 251 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 547, + 251 + ], + "type": "text", + "content": "[41] Jiangtao Xie, Fei Long, Jiaming Lv, Qilong Wang, and Peihua Li. Joint distribution matters: Deep brownian distance covariance for few-shot classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7972-7981, 2022. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 253, + 545, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 253, + 545, + 306 + ], + "spans": [ + { + "bbox": [ + 307, + 253, + 545, + 306 + ], + "type": "text", + "content": "[42] Nan Xue, Tianfu Wu, Gui-Song Xia, and Liangpei Zhang. Learning local-global contextual adaptation for multi-person pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13065-13074, 2022. 5, 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 308, + 545, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 308, + 545, + 352 + ], + "spans": [ + { + "bbox": [ + 307, + 308, + 545, + 352 + ], + "type": "text", + "content": "[43] Sijie Yan, Yuanjun Xiong, and Dahua Lin. Spatial temporal graph convolutional networks for skeleton-based action recognition. In Thirty-second AAAI conference on artificial intelligence, 2018. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 353, + 545, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 353, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 307, + 353, + 545, + 386 + ], + "type": "text", + "content": "[44] Yuhui Yuan, Rao Fu, Lang Huang, Weihong Lin, Chao Zhang, Xilin Chen, and Jingdong Wang. Hrformer: High-resolution transformer for dense prediction. 2021. 1, 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 387, + 545, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 387, + 545, + 409 + ], + "spans": [ + { + "bbox": [ + 307, + 387, + 545, + 409 + ], + "type": "text", + "content": "[45] Xingyi Zhou, Dequan Wang, and Philipp Krahenbuhl. Objects as points. arXiv preprint arXiv:1904.07850, 2019. 3" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "spans": [ + { + "bbox": [ + 294, + 749, + 318, + 757 + ], + "type": "text", + "content": "13018" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/A Data-Based Perspective on Transfer Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_content_list.json b/2023/A Data-Based Perspective on Transfer Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7e6d25471df554e7974fd28eaba8a813e39d117b --- /dev/null +++ b/2023/A Data-Based Perspective on Transfer Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_content_list.json @@ -0,0 +1,1868 @@ +[ + { + "type": "text", + "text": "A Data-Based Perspective on Transfer Learning", + "text_level": 1, + "bbox": [ + 241, + 130, + 728, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Saachi Jain*", + "bbox": [ + 228, + 180, + 328, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MIT", + "bbox": [ + 256, + 199, + 295, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "saachij@mit.edu", + "bbox": [ + 187, + 217, + 367, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hadi Salman*", + "bbox": [ + 437, + 181, + 550, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MIT", + "bbox": [ + 470, + 199, + 509, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "hady@mit.edu", + "bbox": [ + 418, + 217, + 562, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Alaa Khaddaj *", + "bbox": [ + 640, + 181, + 763, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MIT", + "bbox": [ + 678, + 199, + 718, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "alaakah@mit.edu", + "bbox": [ + 614, + 217, + 782, + 232 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Eric Wong", + "bbox": [ + 233, + 258, + 323, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Pennsylvania", + "bbox": [ + 171, + 277, + 387, + 294 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "exwong@seas.upenn.edu", + "bbox": [ + 155, + 296, + 403, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sung Min Park", + "bbox": [ + 467, + 258, + 589, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MIT", + "bbox": [ + 509, + 277, + 547, + 291 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sp765@mit.edu", + "bbox": [ + 452, + 296, + 606, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Aleksander Mądry", + "bbox": [ + 660, + 258, + 808, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MIT", + "bbox": [ + 715, + 277, + 751, + 291 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "madry@mit.edu", + "bbox": [ + 653, + 296, + 808, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 345, + 313, + 362 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "It is commonly believed that in transfer learning including more pre-training data translates into better performance. However, recent evidence suggests that removing data from the source dataset can actually help too. In this work, we take a closer look at the role of the source dataset's composition in transfer learning and present a framework for probing its impact on downstream performance. Our framework gives rise to new capabilities such as pinpointing transfer learning brittleness as well as detecting pathologies such as data-leakage and the presence of misleading examples in the source dataset. In particular, we demonstrate that removing detrimental datapoints identified by our framework indeed improves transfer learning performance from ImageNet on a variety of target tasks.", + "bbox": [ + 75, + 378, + 473, + 590 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 619, + 209, + 633 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Transfer learning enables us to adapt a model trained on a source dataset to perform better on a downstream target task. This technique is employed in a range of machine learning applications including radiology [23, 45], autonomous driving [11, 24], and satellite imagery analysis [44, 47]. Despite its successes, however, it is still not clear what the drivers of performance gains brought by transfer learning actually are.", + "bbox": [ + 75, + 641, + 470, + 747 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "So far, a dominant approach to studying these drivers focused on the role of the source model—i.e., the model trained on the source dataset. The corresponding works involve investigating the source model's architecture [23], accuracy [27], adversarial vulnerability [42, 43], and training procedure [21, 30]. This line of work makes it clear that the properties of the source model has a significant impact on", + "bbox": [ + 75, + 747, + 470, + 853 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "transfer learning. There is some evidence, however, that the source dataset might play an important role as well [18, 26, 38]. For example, several works have shown that while increasing the size of the source dataset generally boosts transfer learning performance, removing specific classes can help too [18, 26, 38]. All of this motivates a natural question:", + "bbox": [ + 500, + 348, + 893, + 439 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "How can we pinpoint the exact impact of the source dataset in transfer learning?", + "bbox": [ + 500, + 458, + 893, + 488 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Our Contributions. In this paper, we present a framework for measuring and analyzing the impact of the source dataset's composition on transfer learning performance. To do this, our framework provides us with the ability to investigate the counterfactual impact on downstream predictions of including or excluding datapoints from the source dataset, drawing inspiration from classical supervised learning techniques such as influence functions [7, 13, 25] and datamodels [19]. Using our framework, we can:", + "bbox": [ + 496, + 517, + 895, + 654 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Pinpoint what parts of the source dataset are most utilized by the downstream task.", + "- Automatically extract granular subpopulations in the target dataset through projection of the fine-grained labels of the source dataset.", + "- Surface pathologies such as source-target data leakage and mislabelled source datapoints." + ], + "bbox": [ + 517, + 675, + 893, + 819 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We also demonstrate how our framework can be used to find detrimental subsets of ImageNet [9] that, when removed, give rise to better downstream performance on a variety of image classification tasks.", + "bbox": [ + 500, + 839, + 893, + 900 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 94, + 863, + 205, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Code is available at https://github.com/MadryLab/data-ansfer", + "bbox": [ + 96, + 875, + 470, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "3613", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2. A Data-Based Framework for Studying Transfer Learning", + "text_level": 1, + "bbox": [ + 76, + 89, + 470, + 125 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In order to pinpoint the role of the source dataset in transfer learning, we need to understand how the composition of that source dataset impacts the downstream model's performance. To do so, we draw inspiration from supervised machine learning approaches that study the impact of the training data on the model's subsequent predictions. In particular, these approaches capture this impact via studying (and approximating) the counterfactual effect of excluding certain training datapoints. This paradigm underlies a number of techniques, from influence functions [7, 13, 25], to datamodels [19], to data Shapley values [14, 22, 31].", + "bbox": [ + 75, + 132, + 472, + 297 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Now, to adapt this paradigm to our setting, we study the counterfactual effect of excluding datapoints from the source dataset on the downstream, target task predictions. In our framework, we will focus on the inclusion or exclusion of entire classes in the source dataset, as opposed to individual examples2. This is motivated by the fact that, intuitively, we expect these classes to be the ones that embody whole concepts and thus drive the formation of (transferred) features. We therefore anticipate the removal of entire classes to have a more measurable impact on the representation learned by the source model (and consequently on the downstream model's predictions).", + "bbox": [ + 75, + 299, + 473, + 479 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Once we have chosen to focus on removal of entire source classes, we can design counterfactual experiments to estimate their influences. A natural approach here, the leave-one-out method [7, 25], would involve removing each individual class from the source dataset separately and then measuring the change in the downstream model's predictions. However, in the transfer learning setting, we suspect that removing a single class from the source dataset won't significantly change the downstream model's performance. Thus, leave-one-out methodology may be able to capture meaningful influences only in rare cases. This is especially so as many common source datasets contain highly redundant classes. For example, ImageNet contains over 100 dog-breed classes. The removal of a single dog-breed class might thus have a negligible impact on transfer learning performance, but the removal of all of the dog classes might significantly change the features learned by the downstream model. For these reasons, we adapt the subsampling [13, 19] approach, which revolves around removing a random collection of source classes at once.", + "bbox": [ + 75, + 481, + 473, + 782 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Computing transfer influences. In the light of the above, our methodology for computing the influence of source classes on transfer learning performance involves training a large number of models with random subsets of the source", + "bbox": [ + 75, + 803, + 470, + 864 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Algorithm 1 Estimation of source dataset class influences on transfer learning performance.", + "bbox": [ + 500, + 90, + 890, + 121 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Require: Source dataset $\\mathcal{S} = \\cup_{k=1}^{K} \\mathcal{C}_k$ (with $K$ classes), a target dataset $\\mathcal{T} = (t_1, t_2, \\dots, t_n)$ , training algorithm $\\mathcal{A}$ , subset ratio $\\alpha$ , and number of models $m$", + "bbox": [ + 500, + 125, + 893, + 172 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {\\sum_ {i = 1} ^ {m} f _ {i} \\left(t _ {j} ; S _ {i}\\right) \\mathbb {1} _ {C _ {k} \\nsubseteq S _ {i}}}{\\sum_ {i = 1} ^ {m} \\mathbb {1} _ {C _ {k} \\nsubseteq S _ {i}}}\n$$\n", + "text_format": "latex", + "bbox": [ + 531, + 299, + 661, + 324 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1: Sample $m$ random subsets $S_{1}, S_{2}, \\dots, S_{m} \\subset \\mathcal{S}$ of size $\\alpha \\cdot |\\mathcal{S}|$ :", + "2: for $i \\in 1$ to $m$ do", + "3: Train model $f_{i}$ by running algorithm $\\mathcal{A}$ on $S_{i}$", + "4: end for", + "5: for $k \\in 1$ to $K$ do", + "6: for $j\\in 1$ to $n$ do", + "7: $\\operatorname{Infl}[\\mathcal{C}_k \\to t_j] = \\frac{\\sum_{i=1}^m f_i(t_j; S_i) \\mathbb{1}_{\\mathcal{C}_k \\subset s_i}}{\\sum_{i=1}^m \\mathbb{1}_{\\mathcal{C}_k \\subset s_i}} -$", + "8: end for", + "9: end for", + "10: return Infl $\\left[\\mathcal{C}_k\\rightarrow t_j\\right]$ , for all $j\\in [n],k\\in [K]$" + ], + "bbox": [ + 504, + 172, + 890, + 368 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "classes removed, and fine-tuning these models on the target task. We then estimate the influence value of a source class $\\mathcal{C}$ on a target example $t$ as the expected difference in the transfer model's performance on example $t$ when class $\\mathcal{C}$ was either included in or excluded from the source dataset:", + "bbox": [ + 496, + 398, + 893, + 474 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {I n f l} [ \\mathcal {C} \\rightarrow t ] = \\mathbb {E} _ {S} [ f (t; S) \\mid \\mathcal {C} \\subset S ] - \\mathbb {E} _ {S} [ f (t; S) \\mid \\mathcal {C} \\not \\subset S ], \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 500, + 484, + 892, + 518 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $f(t; S)$ is the softmax output3 of a model trained on a subset $S$ of the source dataset. A positive influence value indicates that including the source class $\\mathcal{C}$ helps the model predict the target example $t$ correctly. On the other hand, a negative influence value suggests that the source class $\\mathcal{C}$ actually hurts the model's performance on the target example $t$ . We outline the overall procedure in Algorithm 1, and defer a detailed description of our approach to Appendix A.", + "bbox": [ + 496, + 531, + 893, + 652 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A note on computational costs. In order to compute transfer influences, we need to train a large number of source models, each on a fraction of the source dataset. Specifically, we pre-train 7,540 models on ImageNet, each on a randomly chosen $50\\%$ of the ImageNet dataset. This pre-training step needs to be performed only once though: these same models can then be used to fine-tune on each new target task. Overall, the whole process (training the source models and fine-tuning on target datasets) takes less than 20 days using 8 V100 GPUs4.", + "bbox": [ + 496, + 671, + 893, + 820 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Are so many models necessary? In Section A.5, we explore computing transfer influences with smaller numbers", + "bbox": [ + 496, + 821, + 893, + 853 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "2In Section 4.3, we adapt our framework to calculate more granular influences of individual source examples too.", + "bbox": [ + 76, + 875, + 470, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "3We experiment with other outputs such as logits, margins, or correctness too. We discuss the corresponding results in Appendix B.", + "bbox": [ + 500, + 862, + 893, + 887 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "4Details are in Appendix A.", + "bbox": [ + 517, + 887, + 668, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "3614", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/235656645a456c8f109cbf63a2fda6094ec63d0324c222c795449cb3e854a7bb.jpg", + "image_caption": [ + "Figure 1. Most positive and negative ImageNet classes ordered based on their overall influence on the CIFAR-10 dataset. The top source classes (e.g., tailed frog and sorrel horse) turn out to be semantically relevant to the target classes (e.g., frog and horse)." + ], + "image_footnote": [], + "bbox": [ + 122, + 89, + 851, + 263 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5bbcf455e014cb3e5a303487377cb9e1f151c831431e5a96630b2e1cb1590206.jpg", + "image_caption": [ + "(a) CIFAR-10 results", + "Figure 2. Target task accuracies after removing the K most positively or negatively influential ImageNet classes from the source dataset. Mean/std are reported over 10 runs. (a) Results with CIFAR-10 as the target task after removing different numbers of classes from the source dataset. We also include baselines of using the full ImageNet dataset and removing random classes. One can note that, by removing negatively influential source classes, we can obtain a test accuracy that is $2.5\\%$ larger than what using the entire ImageNet dataset would yield. Results for other target tasks can be found in Appendix C. (b) Peak performances when removing the most negatively influential source classes across a range of other target tasks. We also compare against using the full ImageNet dataset or a subset of source classes that are semantically relevant to the target classes (defined via the WordNet hierarchy, see Appendix A for details)." + ], + "image_footnote": [], + "bbox": [ + 81, + 324, + 467, + 530 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/48f17def53eb892cdbb330e0f31676796ecd634445e365aa841699e565718edd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Target DatasetSource Dataset
Full ImageNetRemoving Bottom Infl.Semantically Relevant Classes
AIRCRAFT36.08 ± 1.0736.88 ± 0.74N/A
BIRDSNAP38.42 ± 0.4039.19 ± 0.3826.74 ± 0.31
CALTECH10186.69 ± 0.7987.03 ± 0.3082.28 ± 0.40
CALTECH25674.97 ± 0.2775.24 ± 0.2167.42 ± 0.39
CARS39.55 ± 0.3240.59 ± 0.5721.71 ± 0.40
CIFAR1081.16 ± 0.3083.64 ± 0.4075.53 ± 0.42
CIFAR10059.37 ± 0.5861.46 ± 0.5955.21 ± 0.52
FLOWERS82.92 ± 0.5282.89 ± 0.48N/A
FOOD56.19 ± 0.1456.85 ± 0.2739.36 ± 0.39
PETS83.41 ± 0.5587.59 ± 0.2487.16 ± 0.24
SUN39750.15 ± 0.2351.34 ± 0.29N/A
", + "bbox": [ + 501, + 348, + 890, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "(b) Summary of 11 target tasks", + "bbox": [ + 622, + 534, + 767, + 546 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "of models. While using the full number of models provides the best results, training a much smaller number of models (e.g., 1000 models, taking slightly over 2.5 days on 8 V100 GPUs) still provides meaningful transfer influences. Thus in practice, one can choose the number of source models based on noise tolerance and computational budget. Further convergence results can be found in Appendix A.5.", + "bbox": [ + 75, + 681, + 470, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Identifying the Most Influential Classes of the Source Dataset", + "text_level": 1, + "bbox": [ + 76, + 808, + 470, + 844 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In Section 2, we presented a framework for pinpointing the role of the source dataset in transfer learning by estimating the influence of each source class on the target model's", + "bbox": [ + 76, + 854, + 472, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "predictions. Using these influences, we can now take a look at the classes from the source dataset that have the largest positive or negative impact on the overall transfer learning performance. We focus our analysis on the fixed-weights transfer learning setting (further results, including full model fine-tuning as well as generalization to other architectures, can be found in Appendix E).", + "bbox": [ + 496, + 681, + 893, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As one might expect, not all source classes have large influences. Figure 1 displays the most influential classes of ImageNet with CIFAR-10 as the target task. Notably, the most positively influential source classes turn out to be directly related to classes in the target task (e.g., the ImageNet label \"tailed frog\" is an instance of the CIFAR class \"frog\"). This trend holds across all of the target datasets and transfer", + "bbox": [ + 496, + 795, + 895, + 902 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3615", + "bbox": [ + 482, + 944, + 514, + 957 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/af69a0124de5c06b0afeb91305ad6cd5b33db58ebee1e57128634501170edbbd.jpg", + "image_caption": [ + "Figure 3. Most positive and negative influencing ImageNet classes for the CIFAR-10 class \"bird\". These are calculated by averaging the influence of each source class over all bird examples. We find that the most positively influencing ImageNet classes (e.g., \"ostrich\" and \"bustard\") are related to the CIFAR-10 class \"bird\". See Appendix E for results on other CIFAR-10 classes." + ], + "image_footnote": [], + "bbox": [ + 122, + 89, + 851, + 262 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d1390e88abe3fdca289ecbb34d0c8b4fc504ce154ff647c1ee48fed57f676841.jpg", + "image_caption": [ + "Figure 4. Projecting source labels onto the target dataset. For various target datasets (right), we display the images that were most positively influenced by various ImageNet classes in the source dataset (left). We find that the identified images from the target datasets look similar to the corresponding images in the source dataset." + ], + "image_footnote": [], + "bbox": [ + 181, + 338, + 795, + 700 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "learning settings we considered (see Appendix C). Interestingly, the source dataset also contains classes that are overall negatively influential for the target task, e.g., \"bookshop\" and \"jigsaw puzzle\" classes. (In Section 4, we will take a closer look at the factors that can cause a source class to be negatively influential for a target prediction.)", + "bbox": [ + 75, + 785, + 473, + 878 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "How important are the most influential source classes? We now remove each of the most influential classes from the source dataset to observe their actual impact on transfer learning performance (Figure 2a). As expected, removing the most positively influential classes severely degrades transfer learning performance as compared to removing random classes. This counterfactual experiment confirms that", + "bbox": [ + 498, + 785, + 895, + 893 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "3616", + "bbox": [ + 482, + 944, + 517, + 957 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d2264184567399bafc67d40f990002acaa9713315000ffaaa13db27892f3beec.jpg", + "image_caption": [ + "Figure 5. The CIFAR-10 images that were most positively (or negatively) influenced by the ImageNet classes \"starfish\" and \"rapeseed.\" CIFAR-10 images that are highly influenced by the \"starfish\" class have similar shapes, while those influenced by \"rapeseed\" class have yellow-green colors." + ], + "image_footnote": [], + "bbox": [ + 127, + 95, + 849, + 484 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "these classes are indeed important to the performance of transfer learning. On the other hand, removing the most negatively influential classes actually improves the overall transfer learning performance beyond what using the entire ImageNet dataset provides (see Figure 2b).", + "bbox": [ + 75, + 566, + 468, + 641 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Above, we noted that the top influential source classes are typically related to the classes in the target dataset. What happens if we only choose source classes that are semantically relevant to the classes of the target dataset? Indeed, [38] found that hand-picking such source datasets can sometimes boost transfer learning performance. For each target dataset, we select ImageNet classes that are semantically relevant to the target classes (using the WordNet hierarchy, see Appendix A). As shown in Figure 2b, choosing an optimal subset of classes via transfer influences substantially outperforms this baseline.", + "bbox": [ + 75, + 642, + 472, + 809 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Probing the Impact of the Source Dataset on Transfer Learning", + "text_level": 1, + "bbox": [ + 76, + 825, + 470, + 862 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Section 3, we developed a methodology for identifying source dataset classes that have the most impact on", + "bbox": [ + 76, + 869, + 472, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "transfer learning performance. Now, we demonstrate how this methodology can be extended into a framework for probing and understanding transfer learning, including: (1) identifying granular target subpopulations that correspond to source classes, (2) debugging transfer learning failures, and (3) detecting data leakage between the source and target datasets. We focus our demonstration of these capabilities on a commonly-used transfer learning setting: ImageNet to CIFAR-10 (experimental details are in Appendix A).", + "bbox": [ + 496, + 566, + 893, + 703 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Capability 1: Extracting target subpopulations by projecting source class labels", + "text_level": 1, + "bbox": [ + 498, + 710, + 890, + 742 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Imagine that we would like to find all the ostriches in the CIFAR-10 dataset. This is not an easy task as CIFAR-10 only has \"bird\" as a label, and thus lacks sufficiently fine-grained annotations. Luckily, however, ImageNet does contain an ostrich class! Our computed influences enable us to \"project\" this ostrich class annotation (and, more broadly, the fine-grained label hierarchy of our source dataset) to find this subpopulation of interest in the target dataset.", + "bbox": [ + 496, + 750, + 893, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Indeed, our examination from Section 3 suggests that the most positively influencing source classes are typically those", + "bbox": [ + 498, + 869, + 890, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "3617", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6d2ea1b2b94a28a4425c3a0896ca56ae1f95aa6d13736b307476d1adb709d499.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 90, + 272, + 185 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a9ebfc2f64cd6995d626a93161045698d0778ff49ce790f94ebb00b7b85121d0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 279, + 95, + 514, + 234 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2a170b84bbf8e8b8c856d0f95e83999c96416ff2e03aeb908c2ff35cac705bf9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 526, + 98, + 805, + 220 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1845b6a248cb356bf82e8f11d5c59f70b443a942e8961b0cd4fca4f91bc1460d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 239, + 264, + 327 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8d9209c766037076e99394926fcabc0d351757387a495ac250a3f1dd2599555a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 276, + 244, + 511, + 385 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6f68316446faa07221a10558bc0324672f1bfa423e1b0cbe30aa0a07b33c395a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 244, + 805, + 363 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/79b6e4ba9a79234ec4b4b92619abdbd026a5488c0465fb6d4b98290c257ff180.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 163, + 391, + 285, + 494 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e542c2d8e5ce40027bcbe6da7b63261105424dddb7af76b0191a2da41a735002.jpg", + "image_caption": [ + "Figure 6. Pinpointing highly negatively influential source classes can help explain model mistakes. Left: For three CIFAR-10 images, we plot the most negatively influential source classes. Right: Over 20 runs, the fraction of times that our downstream model predicts each label for the given CIFAR-10 image. When the most negatively influential class is removed, the model predicts the correct label more frequently. More examples can be found in Appendix E." + ], + "image_footnote": [], + "bbox": [ + 285, + 396, + 514, + 532 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3ff9ce6bfa7e5910c4f1b25bcc8ef57b82273fc3f0158f91d370fa9da6b8381d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 531, + 398, + 805, + 529 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "that directly overlap with the target classes (see Figure 1). In particular, for our example, \"ostrich\" is highly positively influential for the \"bird\" class (see Figure 3). To find ostriches in the CIFAR-10 dataset, we thus need to simply surface the CIFAR-10 images which were most positively influenced by the \"ostrich\" source class (see Figure 4).", + "bbox": [ + 75, + 628, + 470, + 720 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "It turns out that this type of projection approach can be applied more broadly. Even when the source class is not a direct sub-type of a target class, the downstream model can still leverage salient features from this class — such as shape or color — to predict on the target dataset. For such classes, projecting source labels can extract target subpopulations which share such features. To illustrate this, in Figure 5, we display the CIFAR-10 images that are highly influenced by the classes \"starfish\" and \"rapeseed\" (both of which do not directly appear in the CIFAR-10 dataset). For these classes, the most influenced CIFAR-10 images share the same shape", + "bbox": [ + 75, + 734, + 472, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(\"starfish\") or color (\"rapeseed\") as their ImageNet counterparts. More examples of such projections can be found in Appendix E.", + "bbox": [ + 496, + 628, + 893, + 675 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Capability 2: Debugging the failures of a transferred model", + "text_level": 1, + "bbox": [ + 498, + 691, + 890, + 723 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our framework enables us to also reason about the possible mistakes of the transferred model caused by source dataset classes. For example, consider the CIFAR-10 image of a dog in Figure 6, which our transfer learning model often mispredicts as a horse. Using our framework, we can demonstrate that this image is strongly negatively influenced by the source class \"sorrel horse.\" Thus, our downstream model may be misusing a feature introduced by this class. Indeed, once we remove \"sorrel horse\" from the source dataset, our model predicts the correct label more frequently. (See Appendix E for more examples, as well as a quantitative", + "bbox": [ + 496, + 734, + 893, + 902 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "3618", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/24c98d6b8ae189ac6d1ef9136bf53a842c3309af55ad190880401fdc820d1585.jpg", + "image_caption": [ + "ImageNet Images", + "speedboat" + ], + "image_footnote": [], + "bbox": [ + 200, + 128, + 267, + 183 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e85f26acb175ee3896045a408094ed3f5c3fcf58e04c2fd87c51f3f53e50e327.jpg", + "image_caption": [ + "Most Positively Influenced", + "tailed frog" + ], + "image_footnote": [], + "bbox": [ + 284, + 130, + 352, + 183 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b78187756e5a1e31815a5efcf9654ddfba46d0fe0d09885eda106dba278a588c.jpg", + "image_caption": [ + "warplane" + ], + "image_footnote": [], + "bbox": [ + 367, + 130, + 436, + 183 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6c6ecb37cff6ebd59ed60cb24773e5d4d7b4c650fdd12f426f957687e164a443.jpg", + "image_caption": [ + "racer" + ], + "image_footnote": [], + "bbox": [ + 450, + 130, + 519, + 184 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/eed8fc068c21a53e01ae744ed7ab3c868b9ee0d231c698c69674f71c8f917c33.jpg", + "image_caption": [ + "CIFAR-10 Images", + "ship" + ], + "image_footnote": [], + "bbox": [ + 197, + 204, + 267, + 258 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e912c81e06a985a27646767bf43202c68e2b7794768dca975027ef80217bb3df.jpg", + "image_caption": [ + "frog" + ], + "image_footnote": [], + "bbox": [ + 284, + 205, + 352, + 258 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b0ea7a2e6aeb7e18b2eb5b3e683784d135c1b14802e08d9089a3297b547dbaeb.jpg", + "image_caption": [ + "airplane" + ], + "image_footnote": [], + "bbox": [ + 367, + 205, + 436, + 258 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8468c3c5b84e6aa89752f0c412ed708c3f429e4924326142a904beeeaaa9ebc5.jpg", + "image_caption": [ + "automobile" + ], + "image_footnote": [], + "bbox": [ + 450, + 205, + 519, + 258 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d639a1977a40d0b7f3e7b870929a2f33cf582ab313815455d2a2ed2c8df1d179.jpg", + "image_caption": [ + "Most Negatively Influenced", + "lawnmower" + ], + "image_footnote": [], + "bbox": [ + 563, + 126, + 633, + 183 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0f9b8a03d27b4c0fbad31470f2f14afcd3dc3e6ca2f5c8baa59b26de4837af60.jpg", + "image_caption": [ + "minivan" + ], + "image_footnote": [], + "bbox": [ + 651, + 130, + 720, + 183 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8f2f49548834a727623bfa1ac95596cd5443628dcc675c037f3bfd7c5c96f2da.jpg", + "image_caption": [ + "wing" + ], + "image_footnote": [], + "bbox": [ + 733, + 130, + 803, + 183 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/07faa20b87ec0f33fedc983fef21e0318efa232c8f59100ee0792dc3eddf838b.jpg", + "image_caption": [ + "book jacket" + ], + "image_footnote": [], + "bbox": [ + 818, + 130, + 885, + 183 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e8bbb4fd74bda0757b5385670c20d47e5b9fe2815e1bbaec6ec5bc3dbb996e29.jpg", + "image_caption": [ + "airplane" + ], + "image_footnote": [], + "bbox": [ + 565, + 205, + 633, + 258 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/49f1a3efd81fa4fbb77a767b1d63d74a0bc5f7fe04e1188d8207ccfb2defb722.jpg", + "image_caption": [ + "airplane" + ], + "image_footnote": [], + "bbox": [ + 651, + 205, + 720, + 258 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/82b6b7dd0adf53a900b3cbfca67ad0b90f25f8ce3c621b5d82dcb88da3996f78.jpg", + "image_caption": [ + "ship" + ], + "image_footnote": [], + "bbox": [ + 733, + 205, + 803, + 258 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/84b75cb17c9f7c53e4d9466d0f09618fb86aee0c58db53c3917fddb15d61d952.jpg", + "image_caption": [ + "deer" + ], + "image_footnote": [], + "bbox": [ + 816, + 205, + 885, + 258 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a955543a83359f02418fbd2bcd55c201e1c486a29303e620816cc2b0eee66faa.jpg", + "image_caption": [ + "ImageNet Images", + "ostrich" + ], + "image_footnote": [], + "bbox": [ + 197, + 299, + 267, + 352 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/59bb26ee10ff0f0936995aa2fd19a902af27c3ca52ef9b4ba4aa30b3783d2938.jpg", + "image_caption": [ + "warplane" + ], + "image_footnote": [], + "bbox": [ + 284, + 299, + 354, + 353 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/106a1f1cd9f8b82afa3b83193e2e220518d723fbc9334ee29e64f7b81bbae8cd.jpg", + "image_caption": [ + "sorrel horse" + ], + "image_footnote": [], + "bbox": [ + 367, + 299, + 437, + 353 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3a05da6efa0c53f7f07bdcaf52b9fc1bb44d2287fe2d33ecd72e90cd0a6ffc9b.jpg", + "image_caption": [ + "moving van" + ], + "image_footnote": [], + "bbox": [ + 450, + 300, + 516, + 353 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/36982405a6bc07a564f1e52cb70dbad7622c60ae6f4221d54fc443c27343d493.jpg", + "image_caption": [ + "warplane" + ], + "image_footnote": [], + "bbox": [ + 563, + 299, + 632, + 352 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/29136a5aac4e2987454b183fae55b11d858ff68a485d87945697ea1b38004622.jpg", + "image_caption": [ + "beach wagon" + ], + "image_footnote": [], + "bbox": [ + 651, + 299, + 720, + 352 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1a718365002950bbaa65ab0fa2a019e22bb89cc0e25f47c28d0b7f214a6998dd.jpg", + "image_caption": [ + "warplane" + ], + "image_footnote": [], + "bbox": [ + 735, + 299, + 803, + 352 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6b8f7c776d9773a24ca4618d5e531c7666a8d3c3b9f9120f206f6c0bc0187488.jpg", + "image_caption": [ + "moving van" + ], + "image_footnote": [], + "bbox": [ + 816, + 299, + 885, + 352 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/168b80208ee4478b70c7e6e43c9a58cab1975c037919a5d5af4cf299466e0af6.jpg", + "image_caption": [ + "CIFAR-10 Images", + "bird", + "Figure 7. ImageNet training images with highest positive (left) or negative (right) example-wise (average) influences on CIFAR-10 test images. We find that ImageNet images that are highly positively influential often correspond to data leakage, while ImageNet images that are highly negatively influential are often either mislabeled, ambiguous, or otherwise misleading. For example, the presence of a flying lawn mower in the ImageNet dataset hurts the downstream performance on a similarly shaped airplane (boxed)." + ], + "image_footnote": [], + "bbox": [ + 199, + 375, + 269, + 428 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/15fb82c4b3d8da541a211e1f93dc2f25ce18652dca8f2c43db29b3419d9b00a5.jpg", + "image_caption": [ + "airplane" + ], + "image_footnote": [], + "bbox": [ + 282, + 375, + 352, + 428 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a4451c709cfaf1ddf5f6b6599f60769b969d696137c3510ddac23067c4af97fe.jpg", + "image_caption": [ + "horse" + ], + "image_footnote": [], + "bbox": [ + 367, + 375, + 437, + 428 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b1b31a96d714600dfe02db18a53e4b69d7245597263a2e36a992b3d373946ecd.jpg", + "image_caption": [ + "truck" + ], + "image_footnote": [], + "bbox": [ + 450, + 375, + 517, + 428 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a537a378a4d35c2e687e6b5a5af15901f7a83988731b78b90f6c54fb7441152b.jpg", + "image_caption": [ + "ship" + ], + "image_footnote": [], + "bbox": [ + 563, + 375, + 633, + 428 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/22f15385166b2f6462e0365af68740d4b527886bfd5e9ca2e72de4a3fb2d48c1.jpg", + "image_caption": [ + "airplane" + ], + "image_footnote": [], + "bbox": [ + 651, + 375, + 720, + 428 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a49ef592bc4a2b211857e3026faec3cb114cf571122eaa86ca2a7b9e1b4706f0.jpg", + "image_caption": [ + "ship" + ], + "image_footnote": [], + "bbox": [ + 735, + 375, + 803, + 428 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fdfa218c1e16c1ab04e01a7ce42237db87132f2ca79b062c9cf5294b1ac5d61f.jpg", + "image_caption": [ + "automobile" + ], + "image_footnote": [], + "bbox": [ + 816, + 375, + 885, + 428 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "analysis of this experiment.)", + "bbox": [ + 76, + 542, + 264, + 556 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Capability 3: Detecting data leakage and misleading source examples", + "text_level": 1, + "bbox": [ + 76, + 571, + 468, + 603 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Thus far, we have focused on how the classes in the source dataset influence the predictions of the transferred model on target examples. In this section, we extend our analysis to the individual datapoints of the source dataset. We do so by adapting our approach to measure the influence of each individual source datapoint on each target datapoint. Further details on how these influences are computed can be found in Appendix D.", + "bbox": [ + 75, + 612, + 468, + 733 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 7 displays the ImageNet training examples that have highly positive or negative influences on CIFAR-10 test examples. We find that the source images that are highly positively influential are often instances of data leakage between the source training set and the target test set. On the other hand, the ImageNet images that are highly negatively influential are typically mislabeled, misleading, or otherwise surprising. For example, the presence of the ImageNet image of a flying lawn mower hurts the performance on a CIFAR-10 image of a regular (but similarly shaped) airplane (see Figure 7).", + "bbox": [ + 75, + 734, + 470, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5. Related Work", + "text_level": 1, + "bbox": [ + 500, + 540, + 640, + 556 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Transfer learning. Transfer learning is a technique commonly used in domains ranging from medical imaging [23, 36], language modeling [6], to object detection [5, 8, 15, 41]. Therefore, there has been considerable interest in understanding the drivers of transfer learning's success. For example, by performing transfer learning on block-shuffled images, [37] demonstrate that at least some of the benefits of transfer learning come from low-level image statistics of source data. There is also an important line of work studying transfer learning by investigating the relationship between different properties of the source model and performance on the target task [23, 27, 42, 43].", + "bbox": [ + 496, + 566, + 893, + 748 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The works that are the most relevant to ours are those which studied how modifying the source dataset can affect the downstream performance. For example, [26] showed that pre-training with an enormous source dataset (approximately 300 million) of noisily labeled images can outperform pretraining with ImageNet. [1, 18] investigated the importance of the number of classes and the number of images per class in transfer learning. Finally, [38] demonstrated that more pre-training data does not always help, and transfer learning can be sensitive to the choice of pre-training data. They also", + "bbox": [ + 496, + 750, + 893, + 901 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "3619", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "presented a framework for reweighting the source datapoints in order to boost transfer learning performance.", + "bbox": [ + 76, + 90, + 468, + 121 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Influence functions and datamodels. Influence functions are well-studied statistical tools that have been recently applied in machine learning settings [7, 17, 25]. For a given model, influence functions analyze the effect of a training input on the model's predictions by estimating the expected change in performance when this training input is added or removed. In order to apply this tool in machine learning, [25] propose estimating the influence functions using the Hessian of the loss function. A recent line of work estimates this quantity more efficiently by training on different subsets of the training set [13]. In a similar vein, [14] proposed running a Monte Carlo search to estimate the effect of every training input via Shapley values. More recently, [19] proposed datamodeling framework as an alternative way to estimate the effect of a training input on the models' prediction. Datamodels are represented using parametric functions (typically, linear functions) that aim to map a subset of the training set to the model's output.", + "bbox": [ + 76, + 148, + 472, + 420 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusions", + "text_level": 1, + "bbox": [ + 76, + 438, + 204, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we presented a new framework for examining the impact of the source dataset in transfer learning. Specifically, our approach estimates the influence of a source class (or datapoint) that captures how including that class (or datapoint) in the source dataset impacts the downstream model's predictions. Leveraging these estimates, we demonstrate that we can improve the transfer learning performance on a range of downstream tasks by identifying and removing detrimental datapoints from the source dataset. Furthermore, our framework enables us to identify granular subpopulations in the target dataset by projecting fine-grained labels from the source dataset, better understand model failures on the downstream task and detect potential data-leakages from the source to the downstream dataset. We believe our framework provides a new perspective on transfer learning: one that enables us to perform a fine-grained analysis of the impact of the source dataset.", + "bbox": [ + 75, + 465, + 472, + 723 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 739, + 174, + 756 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Hossein Azizpour, Ali Sharif Razavian, Josephine Sullivan, Atsuto Maki, and Stefan Carlsson. Factors of transferability for a generic convnet representation. IEEE transactions on pattern analysis and machine intelligence, 2015.", + "[2] Thomas Berg, Jiongxin Liu, Seung Woo Lee, Michelle L Alexander, David W Jacobs, and Peter N Belhumeur. Birdsnap: Large-scale fine-grained visual categorization of birds. In Proceedings of the IEEE" + ], + "bbox": [ + 84, + 765, + 468, + 901 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Conference on Computer Vision and Pattern Recognition, 2014.", + "[3] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, 2014.", + "[4] Emmanuel Candes, Yingying Fan, Lucas Janson, and Jinchi Lv. Panning for gold: model-x knockoffs for high dimensional controlled variable selection. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 80(3):551-577, 2018.", + "[5] Liang-Chieh Chen, George Papandreou, Iasonas Kokkinos, Kevin Murphy, and Alan L Yuille. Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE transactions on pattern analysis and machine intelligence, 2017.", + "[6] Alexis Conneau and Douwe Kiela. Senteval: An evaluation toolkit for universal sentence representations. Language Resources and Evaluation Conference (LREC), 2018.", + "[7] R Dennis Cook and Sanford Weisberg. *Residuals and influence in regression*. New York: Chapman and Hall, 1982.", + "[8] Jifeng Dai, Yi Li, Kaiming He, and Jian Sun. R-fcn: Object detection via region-based fully convolutional networks. In Advances in neural information processing systems (NeurIPS), 2016.", + "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Computer Vision and Pattern Recognition (CVPR), 2009.", + "[10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations (ICLR), 2021.", + "[11] Shuyang Du, Haoli Guo, and Andrew Simpson. Self-driving car steering angle prediction based on image recognition. arXiv preprint arXiv:1912.05440, 2019.", + "[12] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In 2004 conference on computer vision and pattern recognition workshop, pages 178–178. IEEE, 2004.", + "[13] Vitaly Feldman and Chiyuan Zhang. What neural networks memorize and why: Discovering the long tail via influence estimation. In Advances in Neural Information Processing Systems (NeurIPS), volume 33, pages 2881-2891, 2020.", + "[14] Amirata Ghorbani and James Zou. Data shapley: Eq-" + ], + "bbox": [ + 501, + 90, + 893, + 892 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "3620", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "suitable valuation of data for machine learning. In International Conference on Machine Learning (ICML), 2019.", + "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In computer vision and pattern recognition (CVPR), pages 580-587, 2014.", + "[16] Gregory Griffin, Alex Holub, and Pietro Perona. Caltech-256 object category dataset. 2007.", + "[17] Frank R Hampel, Elvezio M Ronchetti, Peter J Rousseuw, and Werner A Stahel. Robust statistics: the approach based on influence functions, volume 196. John Wiley & Sons, 2011.", + "[18] Minyoung Huh, Pulkit Agrawal, and Alexei A Efros. What makes imagenet good for transfer learning? arXiv preprint arXiv:1608.08614, 2016.", + "[19] Andrew Ilyas, Sung Min Park, Logan Engstrom, Guillaume Leclerc, and Aleksander Madry. Datamodels: Predicting predictions from training data. In International Conference on Machine Learning (ICML), 2022.", + "[20] Saachi Jain, Hadi Salman, Eric Wong, Pengchuan Zhang, Vibhav Vineet, Sai Vermprala, and Aleksander Madry. Missingness bias in model debugging. In International Conference on Learning Representations, 2022.", + "[21] Yunhun Jang, Hankook Lee, Sung Ju Hwang, and Jinwoo Shin. Learning what and where to transfer. In International Conference on Machine Learning, pages 3030-3039. PMLR, 2019.", + "[22] Bojan Karlas, David Dao, Matteo Interlandi, Bo Li, Sebastian Schelter, Wentao Wu, and Ce Zhang. Data debugging with shapley importance over end-to-end machine learning pipelines. arXiv preprint arXiv:2204.11131, 2022.", + "[23] Alexander Ke, William Ellsworth, Oishi Banerjee, Andrew Y Ng, and Pranav Rajpurkar. Chextransfer: performance and parameter efficiency of imagenet models for chest x-ray interpretation. In Proceedings of the Conference on Health, Inference, and Learning, pages 116-124, 2021.", + "[24] Jiman Kim and Chanjong Park. End-to-end ego lane estimation based on sequential transfer learning for self-driving cars. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 30-38, 2017.", + "[25] Pang Wei Koh and Percy Liang. Understanding blackbox predictions via influence functions. In International Conference on Machine Learning, 2017.", + "[26] Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, and Neil Houlsby. Big transfer (bit): General visual representation learning. arXiv preprint arXiv:1912.11370, 2019.", + "[27] Simon Kornblith, Jonathon Shlens, and Quoc V Le. Do" + ], + "bbox": [ + 78, + 90, + 470, + 888 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "better imagenet models transfer better? In computer vision and pattern recognition (CVPR), 2019.", + "[28] Jonathan Krause, Jia Deng, Michael Stark, and Li Fei-Fei. Collecting a large-scale dataset of fine-grained cars. 2013.", + "[29] Alex Krizhevsky. Learning multiple layers of features from tiny images. In Technical report, 2009.", + "[30] Ananya Kumar, Aditi Raghunathan, Robbie Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. arXiv preprint arXiv:2202.10054, 2022.", + "[31] Yongchan Kwon and James Zou. Beta shapley: a unified and noise-reduced data valuation framework for machine learning. arXiv preprint arXiv:2110.14049, 2021.", + "[32] Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/, 2022.", + "[33] Kaleel Mahmood, Rigel Mahmood, and Marten Van Dijk. On the robustness of vision transformers to adversarial examples. 2021.", + "[34] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013.", + "[35] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 1995.", + "[36] Romain Mormont, Pierre Geurts, and Raphael Marée. Comparison of deep transfer learning strategies for digital pathology. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2018.", + "[37] Behnam Neyshabur, Hanie Sedghi, and Chiyuan Zhang. What is being transferred in transfer learning? Advances in neural information processing systems, 33:512-523, 2020.", + "[38] Jiquan Ngiam, Daiyi Peng, Vijay Vasudevan, Simon Kornblith, Quoc V Le, and Ruoming Pang. Domain adaptive transfer learning with specialist models. arXiv preprint arXiv:1811.07056, 2018.", + "[39] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, 2008.", + "[40] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In 2012 IEEE conference on computer vision and pattern recognition, pages 3498-3505. IEEE, 2012.", + "[41] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems (NeurIPS), 2015.", + "[42] Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish" + ], + "bbox": [ + 503, + 90, + 893, + 888 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "3621", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kapoor, and Aleksander Madry. Do adversarially robust imagenet models transfer better? In Advances in Neural Information Processing Systems (NeurIPS), 2020.", + "[43] Francisco Utrera, Evan Kravitz, N. Benjamin Erickson, Rajiv Khanna, and Michael W. Mahoney. Adversarily-trained deep nets transfer better. In ArXiv preprint arXiv:2007.05869, 2020.", + "[44] Sherrie Wang, George Azzari, and David B Lobell. Crop type mapping without field-level labels: Random forest transfer and unsupervised clustering techniques. Remote sensing of environment, 222:303-317, 2019.", + "[45] Xiaosong Wang, Yifan Peng, Le Lu, Zhiyong Lu, Mohammadhadi Bagheri, and Ronald M Summers. Chestx-ray8: Hospital-scale chest x-ray database and benchmarks on weakly-supervised classification and localization of common thorax diseases. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2017.", + "[46] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene recognition from abbey to zoo. In Computer Vision and Pattern Recognition (CVPR), 2010.", + "[47] Michael Xie, Neal Jean, Marshall Burke, David Lobell, and Stefano Ermon. Transfer learning from deep features for remote sensing and poverty mapping. In Thirtieth AAAI Conference on Artificial Intelligence, 2016." + ], + "bbox": [ + 78, + 90, + 470, + 506 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "3622", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/A Data-Based Perspective on Transfer Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_model.json b/2023/A Data-Based Perspective on Transfer Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_model.json new file mode 100644 index 0000000000000000000000000000000000000000..3f9ada4158dcda88f3c6214df634a8e0b2d0d6fa --- /dev/null +++ b/2023/A Data-Based Perspective on Transfer Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_model.json @@ -0,0 +1,2849 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.243, + 0.131, + 0.73, + 0.154 + ], + "angle": 0, + "content": "A Data-Based Perspective on Transfer Learning" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.181, + 0.33, + 0.197 + ], + "angle": 0, + "content": "Saachi Jain*" + }, + { + "type": "text", + "bbox": [ + 0.258, + 0.2, + 0.297, + 0.214 + ], + "angle": 0, + "content": "MIT" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.218, + 0.368, + 0.234 + ], + "angle": 0, + "content": "saachij@mit.edu" + }, + { + "type": "text", + "bbox": [ + 0.439, + 0.182, + 0.551, + 0.198 + ], + "angle": 0, + "content": "Hadi Salman*" + }, + { + "type": "text", + "bbox": [ + 0.471, + 0.2, + 0.51, + 0.214 + ], + "angle": 0, + "content": "MIT" + }, + { + "type": "text", + "bbox": [ + 0.419, + 0.218, + 0.563, + 0.234 + ], + "angle": 0, + "content": "hady@mit.edu" + }, + { + "type": "text", + "bbox": [ + 0.641, + 0.182, + 0.764, + 0.199 + ], + "angle": 0, + "content": "Alaa Khaddaj *" + }, + { + "type": "text", + "bbox": [ + 0.679, + 0.2, + 0.72, + 0.214 + ], + "angle": 0, + "content": "MIT" + }, + { + "type": "text", + "bbox": [ + 0.616, + 0.218, + 0.783, + 0.233 + ], + "angle": 0, + "content": "alaakah@mit.edu" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.26, + 0.324, + 0.277 + ], + "angle": 0, + "content": "Eric Wong" + }, + { + "type": "text", + "bbox": [ + 0.173, + 0.278, + 0.388, + 0.295 + ], + "angle": 0, + "content": "University of Pennsylvania" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.297, + 0.404, + 0.312 + ], + "angle": 0, + "content": "exwong@seas.upenn.edu" + }, + { + "type": "text", + "bbox": [ + 0.468, + 0.26, + 0.591, + 0.277 + ], + "angle": 0, + "content": "Sung Min Park" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.279, + 0.548, + 0.292 + ], + "angle": 0, + "content": "MIT" + }, + { + "type": "text", + "bbox": [ + 0.453, + 0.297, + 0.607, + 0.312 + ], + "angle": 0, + "content": "sp765@mit.edu" + }, + { + "type": "text", + "bbox": [ + 0.661, + 0.26, + 0.809, + 0.277 + ], + "angle": 0, + "content": "Aleksander Mądry" + }, + { + "type": "text", + "bbox": [ + 0.716, + 0.279, + 0.753, + 0.292 + ], + "angle": 0, + "content": "MIT" + }, + { + "type": "text", + "bbox": [ + 0.654, + 0.297, + 0.81, + 0.312 + ], + "angle": 0, + "content": "madry@mit.edu" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.347, + 0.314, + 0.363 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.379, + 0.474, + 0.591 + ], + "angle": 0, + "content": "It is commonly believed that in transfer learning including more pre-training data translates into better performance. However, recent evidence suggests that removing data from the source dataset can actually help too. In this work, we take a closer look at the role of the source dataset's composition in transfer learning and present a framework for probing its impact on downstream performance. Our framework gives rise to new capabilities such as pinpointing transfer learning brittleness as well as detecting pathologies such as data-leakage and the presence of misleading examples in the source dataset. In particular, we demonstrate that removing detrimental datapoints identified by our framework indeed improves transfer learning performance from ImageNet on a variety of target tasks." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.62, + 0.21, + 0.635 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.642, + 0.472, + 0.748 + ], + "angle": 0, + "content": "Transfer learning enables us to adapt a model trained on a source dataset to perform better on a downstream target task. This technique is employed in a range of machine learning applications including radiology [23, 45], autonomous driving [11, 24], and satellite imagery analysis [44, 47]. Despite its successes, however, it is still not clear what the drivers of performance gains brought by transfer learning actually are." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.748, + 0.472, + 0.854 + ], + "angle": 0, + "content": "So far, a dominant approach to studying these drivers focused on the role of the source model—i.e., the model trained on the source dataset. The corresponding works involve investigating the source model's architecture [23], accuracy [27], adversarial vulnerability [42, 43], and training procedure [21, 30]. This line of work makes it clear that the properties of the source model has a significant impact on" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.349, + 0.895, + 0.44 + ], + "angle": 0, + "content": "transfer learning. There is some evidence, however, that the source dataset might play an important role as well [18, 26, 38]. For example, several works have shown that while increasing the size of the source dataset generally boosts transfer learning performance, removing specific classes can help too [18, 26, 38]. All of this motivates a natural question:" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.459, + 0.894, + 0.489 + ], + "angle": 0, + "content": "How can we pinpoint the exact impact of the source dataset in transfer learning?" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.518, + 0.896, + 0.655 + ], + "angle": 0, + "content": "Our Contributions. In this paper, we present a framework for measuring and analyzing the impact of the source dataset's composition on transfer learning performance. To do this, our framework provides us with the ability to investigate the counterfactual impact on downstream predictions of including or excluding datapoints from the source dataset, drawing inspiration from classical supervised learning techniques such as influence functions [7, 13, 25] and datamodels [19]. Using our framework, we can:" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.676, + 0.895, + 0.706 + ], + "angle": 0, + "content": "- Pinpoint what parts of the source dataset are most utilized by the downstream task." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.725, + 0.894, + 0.77 + ], + "angle": 0, + "content": "- Automatically extract granular subpopulations in the target dataset through projection of the fine-grained labels of the source dataset." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.789, + 0.893, + 0.82 + ], + "angle": 0, + "content": "- Surface pathologies such as source-target data leakage and mislabelled source datapoints." + }, + { + "type": "list", + "bbox": [ + 0.519, + 0.676, + 0.895, + 0.82 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.84, + 0.895, + 0.901 + ], + "angle": 0, + "content": "We also demonstrate how our framework can be used to find detrimental subsets of ImageNet [9] that, when removed, give rise to better downstream performance on a variety of image classification tasks." + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.864, + 0.207, + 0.876 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.097, + 0.876, + 0.472, + 0.899 + ], + "angle": 0, + "content": "\\(^{1}\\)Code is available at https://github.com/MadryLab/data-ansfer" + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.864, + 0.472, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3613" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.077, + 0.09, + 0.471, + 0.126 + ], + "angle": 0, + "content": "2. A Data-Based Framework for Studying Transfer Learning" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.133, + 0.473, + 0.299 + ], + "angle": 0, + "content": "In order to pinpoint the role of the source dataset in transfer learning, we need to understand how the composition of that source dataset impacts the downstream model's performance. To do so, we draw inspiration from supervised machine learning approaches that study the impact of the training data on the model's subsequent predictions. In particular, these approaches capture this impact via studying (and approximating) the counterfactual effect of excluding certain training datapoints. This paradigm underlies a number of techniques, from influence functions [7, 13, 25], to datamodels [19], to data Shapley values [14, 22, 31]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.3, + 0.474, + 0.48 + ], + "angle": 0, + "content": "Now, to adapt this paradigm to our setting, we study the counterfactual effect of excluding datapoints from the source dataset on the downstream, target task predictions. In our framework, we will focus on the inclusion or exclusion of entire classes in the source dataset, as opposed to individual examples2. This is motivated by the fact that, intuitively, we expect these classes to be the ones that embody whole concepts and thus drive the formation of (transferred) features. We therefore anticipate the removal of entire classes to have a more measurable impact on the representation learned by the source model (and consequently on the downstream model's predictions)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.482, + 0.474, + 0.784 + ], + "angle": 0, + "content": "Once we have chosen to focus on removal of entire source classes, we can design counterfactual experiments to estimate their influences. A natural approach here, the leave-one-out method [7, 25], would involve removing each individual class from the source dataset separately and then measuring the change in the downstream model's predictions. However, in the transfer learning setting, we suspect that removing a single class from the source dataset won't significantly change the downstream model's performance. Thus, leave-one-out methodology may be able to capture meaningful influences only in rare cases. This is especially so as many common source datasets contain highly redundant classes. For example, ImageNet contains over 100 dog-breed classes. The removal of a single dog-breed class might thus have a negligible impact on transfer learning performance, but the removal of all of the dog classes might significantly change the features learned by the downstream model. For these reasons, we adapt the subsampling [13, 19] approach, which revolves around removing a random collection of source classes at once." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.804, + 0.471, + 0.866 + ], + "angle": 0, + "content": "Computing transfer influences. In the light of the above, our methodology for computing the influence of source classes on transfer learning performance involves training a large number of models with random subsets of the source" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.091, + 0.892, + 0.122 + ], + "angle": 0, + "content": "Algorithm 1 Estimation of source dataset class influences on transfer learning performance." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.126, + 0.894, + 0.173 + ], + "angle": 0, + "content": "Require: Source dataset \\(\\mathcal{S} = \\cup_{k=1}^{K} \\mathcal{C}_k\\) (with \\(K\\) classes), a target dataset \\(\\mathcal{T} = (t_1, t_2, \\dots, t_n)\\), training algorithm \\(\\mathcal{A}\\), subset ratio \\(\\alpha\\), and number of models \\(m\\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.174, + 0.892, + 0.203 + ], + "angle": 0, + "content": "1: Sample \\( m \\) random subsets \\( S_{1}, S_{2}, \\dots, S_{m} \\subset \\mathcal{S} \\) of size \\( \\alpha \\cdot |\\mathcal{S}| \\):" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.204, + 0.649, + 0.217 + ], + "angle": 0, + "content": "2: for \\( i \\in 1 \\) to \\( m \\) do" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.219, + 0.854, + 0.234 + ], + "angle": 0, + "content": "3: Train model \\( f_{i} \\) by running algorithm \\( \\mathcal{A} \\) on \\( S_{i} \\)" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.235, + 0.585, + 0.247 + ], + "angle": 0, + "content": "4: end for" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.249, + 0.652, + 0.262 + ], + "angle": 0, + "content": "5: for \\( k \\in 1 \\) to \\( K \\) do" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.264, + 0.671, + 0.279 + ], + "angle": 0, + "content": "6: for \\(j\\in 1\\) to \\(n\\) do" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.279, + 0.892, + 0.301 + ], + "angle": 0, + "content": "7: \\(\\operatorname{Infl}[\\mathcal{C}_k \\to t_j] = \\frac{\\sum_{i=1}^m f_i(t_j; S_i) \\mathbb{1}_{\\mathcal{C}_k \\subset s_i}}{\\sum_{i=1}^m \\mathbb{1}_{\\mathcal{C}_k \\subset s_i}} -\\)" + }, + { + "type": "equation", + "bbox": [ + 0.532, + 0.3, + 0.663, + 0.325 + ], + "angle": 0, + "content": "\\[\n\\frac {\\sum_ {i = 1} ^ {m} f _ {i} \\left(t _ {j} ; S _ {i}\\right) \\mathbb {1} _ {C _ {k} \\nsubseteq S _ {i}}}{\\sum_ {i = 1} ^ {m} \\mathbb {1} _ {C _ {k} \\nsubseteq S _ {i}}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.325, + 0.608, + 0.336 + ], + "angle": 0, + "content": "8: end for" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.338, + 0.584, + 0.351 + ], + "angle": 0, + "content": "9: end for" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.352, + 0.83, + 0.369 + ], + "angle": 0, + "content": "10: return Infl \\(\\left[\\mathcal{C}_k\\rightarrow t_j\\right]\\) , for all \\(j\\in [n],k\\in [K]\\)" + }, + { + "type": "list", + "bbox": [ + 0.506, + 0.174, + 0.892, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.399, + 0.894, + 0.476 + ], + "angle": 0, + "content": "classes removed, and fine-tuning these models on the target task. We then estimate the influence value of a source class \\(\\mathcal{C}\\) on a target example \\(t\\) as the expected difference in the transfer model's performance on example \\(t\\) when class \\(\\mathcal{C}\\) was either included in or excluded from the source dataset:" + }, + { + "type": "equation", + "bbox": [ + 0.5, + 0.486, + 0.893, + 0.519 + ], + "angle": 0, + "content": "\\[\n\\operatorname {I n f l} [ \\mathcal {C} \\rightarrow t ] = \\mathbb {E} _ {S} [ f (t; S) \\mid \\mathcal {C} \\subset S ] - \\mathbb {E} _ {S} [ f (t; S) \\mid \\mathcal {C} \\not \\subset S ], \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.532, + 0.895, + 0.654 + ], + "angle": 0, + "content": "where \\( f(t; S) \\) is the softmax output3 of a model trained on a subset \\( S \\) of the source dataset. A positive influence value indicates that including the source class \\( \\mathcal{C} \\) helps the model predict the target example \\( t \\) correctly. On the other hand, a negative influence value suggests that the source class \\( \\mathcal{C} \\) actually hurts the model's performance on the target example \\( t \\). We outline the overall procedure in Algorithm 1, and defer a detailed description of our approach to Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.672, + 0.895, + 0.821 + ], + "angle": 0, + "content": "A note on computational costs. In order to compute transfer influences, we need to train a large number of source models, each on a fraction of the source dataset. Specifically, we pre-train 7,540 models on ImageNet, each on a randomly chosen \\(50\\%\\) of the ImageNet dataset. This pre-training step needs to be performed only once though: these same models can then be used to fine-tune on each new target task. Overall, the whole process (training the source models and fine-tuning on target datasets) takes less than 20 days using 8 V100 GPUs4." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.823, + 0.894, + 0.854 + ], + "angle": 0, + "content": "Are so many models necessary? In Section A.5, we explore computing transfer influences with smaller numbers" + }, + { + "type": "page_footnote", + "bbox": [ + 0.077, + 0.875, + 0.471, + 0.9 + ], + "angle": 0, + "content": "2In Section 4.3, we adapt our framework to calculate more granular influences of individual source examples too." + }, + { + "type": "page_footnote", + "bbox": [ + 0.5, + 0.863, + 0.894, + 0.888 + ], + "angle": 0, + "content": "3We experiment with other outputs such as logits, margins, or correctness too. We discuss the corresponding results in Appendix B." + }, + { + "type": "page_footnote", + "bbox": [ + 0.519, + 0.888, + 0.669, + 0.901 + ], + "angle": 0, + "content": "4Details are in Appendix A." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.863, + 0.894, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3614" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.09, + 0.852, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.279, + 0.895, + 0.31 + ], + "angle": 0, + "content": "Figure 1. Most positive and negative ImageNet classes ordered based on their overall influence on the CIFAR-10 dataset. The top source classes (e.g., tailed frog and sorrel horse) turn out to be semantically relevant to the target classes (e.g., frog and horse)." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.325, + 0.468, + 0.531 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.535, + 0.325, + 0.546 + ], + "angle": 0, + "content": "(a) CIFAR-10 results" + }, + { + "type": "table", + "bbox": [ + 0.502, + 0.349, + 0.891, + 0.533 + ], + "angle": 0, + "content": "
Target DatasetSource Dataset
Full ImageNetRemoving Bottom Infl.Semantically Relevant Classes
AIRCRAFT36.08 ± 1.0736.88 ± 0.74N/A
BIRDSNAP38.42 ± 0.4039.19 ± 0.3826.74 ± 0.31
CALTECH10186.69 ± 0.7987.03 ± 0.3082.28 ± 0.40
CALTECH25674.97 ± 0.2775.24 ± 0.2167.42 ± 0.39
CARS39.55 ± 0.3240.59 ± 0.5721.71 ± 0.40
CIFAR1081.16 ± 0.3083.64 ± 0.4075.53 ± 0.42
CIFAR10059.37 ± 0.5861.46 ± 0.5955.21 ± 0.52
FLOWERS82.92 ± 0.5282.89 ± 0.48N/A
FOOD56.19 ± 0.1456.85 ± 0.2739.36 ± 0.39
PETS83.41 ± 0.5587.59 ± 0.2487.16 ± 0.24
SUN39750.15 ± 0.2351.34 ± 0.29N/A
" + }, + { + "type": "table_caption", + "bbox": [ + 0.624, + 0.535, + 0.768, + 0.547 + ], + "angle": 0, + "content": "(b) Summary of 11 target tasks" + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.558, + 0.895, + 0.657 + ], + "angle": 0, + "content": "Figure 2. Target task accuracies after removing the K most positively or negatively influential ImageNet classes from the source dataset. Mean/std are reported over 10 runs. (a) Results with CIFAR-10 as the target task after removing different numbers of classes from the source dataset. We also include baselines of using the full ImageNet dataset and removing random classes. One can note that, by removing negatively influential source classes, we can obtain a test accuracy that is \\(2.5\\%\\) larger than what using the entire ImageNet dataset would yield. Results for other target tasks can be found in Appendix C. (b) Peak performances when removing the most negatively influential source classes across a range of other target tasks. We also compare against using the full ImageNet dataset or a subset of source classes that are semantically relevant to the target classes (defined via the WordNet hierarchy, see Appendix A for details)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.682, + 0.471, + 0.789 + ], + "angle": 0, + "content": "of models. While using the full number of models provides the best results, training a much smaller number of models (e.g., 1000 models, taking slightly over 2.5 days on 8 V100 GPUs) still provides meaningful transfer influences. Thus in practice, one can choose the number of source models based on noise tolerance and computational budget. Further convergence results can be found in Appendix A.5." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.809, + 0.472, + 0.845 + ], + "angle": 0, + "content": "3. Identifying the Most Influential Classes of the Source Dataset" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.856, + 0.473, + 0.903 + ], + "angle": 0, + "content": "In Section 2, we presented a framework for pinpointing the role of the source dataset in transfer learning by estimating the influence of each source class on the target model's" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.682, + 0.895, + 0.789 + ], + "angle": 0, + "content": "predictions. Using these influences, we can now take a look at the classes from the source dataset that have the largest positive or negative impact on the overall transfer learning performance. We focus our analysis on the fixed-weights transfer learning setting (further results, including full model fine-tuning as well as generalization to other architectures, can be found in Appendix E)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.796, + 0.897, + 0.903 + ], + "angle": 0, + "content": "As one might expect, not all source classes have large influences. Figure 1 displays the most influential classes of ImageNet with CIFAR-10 as the target task. Notably, the most positively influential source classes turn out to be directly related to classes in the target task (e.g., the ImageNet label \"tailed frog\" is an instance of the CIFAR class \"frog\"). This trend holds across all of the target datasets and transfer" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.958 + ], + "angle": 0, + "content": "3615" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.09, + 0.852, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.279, + 0.897, + 0.322 + ], + "angle": 0, + "content": "Figure 3. Most positive and negative influencing ImageNet classes for the CIFAR-10 class \"bird\". These are calculated by averaging the influence of each source class over all bird examples. We find that the most positively influencing ImageNet classes (e.g., \"ostrich\" and \"bustard\") are related to the CIFAR-10 class \"bird\". See Appendix E for results on other CIFAR-10 classes." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.339, + 0.797, + 0.701 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.717, + 0.895, + 0.76 + ], + "angle": 0, + "content": "Figure 4. Projecting source labels onto the target dataset. For various target datasets (right), we display the images that were most positively influenced by various ImageNet classes in the source dataset (left). We find that the identified images from the target datasets look similar to the corresponding images in the source dataset." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.786, + 0.475, + 0.879 + ], + "angle": 0, + "content": "learning settings we considered (see Appendix C). Interestingly, the source dataset also contains classes that are overall negatively influential for the target task, e.g., \"bookshop\" and \"jigsaw puzzle\" classes. (In Section 4, we will take a closer look at the factors that can cause a source class to be negatively influential for a target prediction.)" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.786, + 0.896, + 0.894 + ], + "angle": 0, + "content": "How important are the most influential source classes? We now remove each of the most influential classes from the source dataset to observe their actual impact on transfer learning performance (Figure 2a). As expected, removing the most positively influential classes severely degrades transfer learning performance as compared to removing random classes. This counterfactual experiment confirms that" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.519, + 0.958 + ], + "angle": 0, + "content": "3616" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.129, + 0.096, + 0.85, + 0.485 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.498, + 0.897, + 0.542 + ], + "angle": 0, + "content": "Figure 5. The CIFAR-10 images that were most positively (or negatively) influenced by the ImageNet classes \"starfish\" and \"rapeseed.\" CIFAR-10 images that are highly influenced by the \"starfish\" class have similar shapes, while those influenced by \"rapeseed\" class have yellow-green colors." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.567, + 0.47, + 0.642 + ], + "angle": 0, + "content": "these classes are indeed important to the performance of transfer learning. On the other hand, removing the most negatively influential classes actually improves the overall transfer learning performance beyond what using the entire ImageNet dataset provides (see Figure 2b)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.643, + 0.473, + 0.81 + ], + "angle": 0, + "content": "Above, we noted that the top influential source classes are typically related to the classes in the target dataset. What happens if we only choose source classes that are semantically relevant to the classes of the target dataset? Indeed, [38] found that hand-picking such source datasets can sometimes boost transfer learning performance. For each target dataset, we select ImageNet classes that are semantically relevant to the target classes (using the WordNet hierarchy, see Appendix A). As shown in Figure 2b, choosing an optimal subset of classes via transfer influences substantially outperforms this baseline." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.827, + 0.471, + 0.863 + ], + "angle": 0, + "content": "4. Probing the Impact of the Source Dataset on Transfer Learning" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.473, + 0.903 + ], + "angle": 0, + "content": "In Section 3, we developed a methodology for identifying source dataset classes that have the most impact on" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.567, + 0.895, + 0.704 + ], + "angle": 0, + "content": "transfer learning performance. Now, we demonstrate how this methodology can be extended into a framework for probing and understanding transfer learning, including: (1) identifying granular target subpopulations that correspond to source classes, (2) debugging transfer learning failures, and (3) detecting data leakage between the source and target datasets. We focus our demonstration of these capabilities on a commonly-used transfer learning setting: ImageNet to CIFAR-10 (experimental details are in Appendix A)." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.711, + 0.892, + 0.743 + ], + "angle": 0, + "content": "4.1. Capability 1: Extracting target subpopulations by projecting source class labels" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.871 + ], + "angle": 0, + "content": "Imagine that we would like to find all the ostriches in the CIFAR-10 dataset. This is not an easy task as CIFAR-10 only has \"bird\" as a label, and thus lacks sufficiently fine-grained annotations. Luckily, however, ImageNet does contain an ostrich class! Our computed influences enable us to \"project\" this ostrich class annotation (and, more broadly, the fine-grained label hierarchy of our source dataset) to find this subpopulation of interest in the target dataset." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.871, + 0.892, + 0.902 + ], + "angle": 0, + "content": "Indeed, our examination from Section 3 suggests that the most positively influencing source classes are typically those" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3617" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.164, + 0.092, + 0.273, + 0.186 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.28, + 0.097, + 0.516, + 0.235 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.099, + 0.807, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.241, + 0.265, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.246, + 0.512, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.245, + 0.807, + 0.364 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.164, + 0.392, + 0.287, + 0.495 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.287, + 0.397, + 0.516, + 0.533 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.399, + 0.807, + 0.53 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.547, + 0.895, + 0.604 + ], + "angle": 0, + "content": "Figure 6. Pinpointing highly negatively influential source classes can help explain model mistakes. Left: For three CIFAR-10 images, we plot the most negatively influential source classes. Right: Over 20 runs, the fraction of times that our downstream model predicts each label for the given CIFAR-10 image. When the most negatively influential class is removed, the model predicts the correct label more frequently. More examples can be found in Appendix E." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.63, + 0.471, + 0.722 + ], + "angle": 0, + "content": "that directly overlap with the target classes (see Figure 1). In particular, for our example, \"ostrich\" is highly positively influential for the \"bird\" class (see Figure 3). To find ostriches in the CIFAR-10 dataset, we thus need to simply surface the CIFAR-10 images which were most positively influenced by the \"ostrich\" source class (see Figure 4)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.473, + 0.903 + ], + "angle": 0, + "content": "It turns out that this type of projection approach can be applied more broadly. Even when the source class is not a direct sub-type of a target class, the downstream model can still leverage salient features from this class — such as shape or color — to predict on the target dataset. For such classes, projecting source labels can extract target subpopulations which share such features. To illustrate this, in Figure 5, we display the CIFAR-10 images that are highly influenced by the classes \"starfish\" and \"rapeseed\" (both of which do not directly appear in the CIFAR-10 dataset). For these classes, the most influenced CIFAR-10 images share the same shape" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.63, + 0.895, + 0.676 + ], + "angle": 0, + "content": "(\"starfish\") or color (\"rapeseed\") as their ImageNet counterparts. More examples of such projections can be found in Appendix E." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.693, + 0.892, + 0.724 + ], + "angle": 0, + "content": "4.2. Capability 2: Debugging the failures of a transferred model" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.735, + 0.895, + 0.903 + ], + "angle": 0, + "content": "Our framework enables us to also reason about the possible mistakes of the transferred model caused by source dataset classes. For example, consider the CIFAR-10 image of a dog in Figure 6, which our transfer learning model often mispredicts as a horse. Using our framework, we can demonstrate that this image is strongly negatively influenced by the source class \"sorrel horse.\" Thus, our downstream model may be misusing a feature introduced by this class. Indeed, once we remove \"sorrel horse\" from the source dataset, our model predicts the correct label more frequently. (See Appendix E for more examples, as well as a quantitative" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3618" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.233, + 0.098, + 0.484, + 0.117 + ], + "angle": 0, + "content": "Most Positively Influenced" + }, + { + "type": "image_caption", + "bbox": [ + 0.099, + 0.144, + 0.169, + 0.174 + ], + "angle": 0, + "content": "ImageNet Images" + }, + { + "type": "image", + "bbox": [ + 0.201, + 0.13, + 0.269, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.204, + 0.186, + 0.262, + 0.196 + ], + "angle": 0, + "content": "speedboat" + }, + { + "type": "image", + "bbox": [ + 0.285, + 0.131, + 0.354, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.292, + 0.186, + 0.349, + 0.196 + ], + "angle": 0, + "content": "tailed frog" + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.131, + 0.437, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.186, + 0.428, + 0.196 + ], + "angle": 0, + "content": "warplane" + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.131, + 0.521, + 0.185 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.471, + 0.187, + 0.5, + 0.196 + ], + "angle": 0, + "content": "racer" + }, + { + "type": "image_caption", + "bbox": [ + 0.104, + 0.219, + 0.165, + 0.248 + ], + "angle": 0, + "content": "CIFAR-10 Images" + }, + { + "type": "image", + "bbox": [ + 0.198, + 0.205, + 0.268, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.262, + 0.245, + 0.274 + ], + "angle": 0, + "content": "ship" + }, + { + "type": "image", + "bbox": [ + 0.285, + 0.206, + 0.354, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.308, + 0.262, + 0.332, + 0.274 + ], + "angle": 0, + "content": "frog" + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.206, + 0.437, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.38, + 0.262, + 0.426, + 0.273 + ], + "angle": 0, + "content": "airplane" + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.206, + 0.521, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.454, + 0.262, + 0.516, + 0.273 + ], + "angle": 0, + "content": "automobile" + }, + { + "type": "image_caption", + "bbox": [ + 0.595, + 0.098, + 0.856, + 0.117 + ], + "angle": 0, + "content": "Most Negatively Influenced" + }, + { + "type": "image", + "bbox": [ + 0.564, + 0.127, + 0.635, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.567, + 0.186, + 0.632, + 0.196 + ], + "angle": 0, + "content": "lawnmower" + }, + { + "type": "image", + "bbox": [ + 0.652, + 0.131, + 0.721, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.665, + 0.186, + 0.708, + 0.196 + ], + "angle": 0, + "content": "minivan" + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.131, + 0.804, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.756, + 0.186, + 0.783, + 0.197 + ], + "angle": 0, + "content": "wing" + }, + { + "type": "image", + "bbox": [ + 0.819, + 0.131, + 0.886, + 0.184 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.821, + 0.186, + 0.883, + 0.197 + ], + "angle": 0, + "content": "book jacket" + }, + { + "type": "image", + "bbox": [ + 0.566, + 0.206, + 0.635, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.577, + 0.262, + 0.622, + 0.273 + ], + "angle": 0, + "content": "airplane" + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.206, + 0.721, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.664, + 0.262, + 0.708, + 0.273 + ], + "angle": 0, + "content": "airplane" + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.206, + 0.804, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.757, + 0.262, + 0.781, + 0.274 + ], + "angle": 0, + "content": "ship" + }, + { + "type": "image", + "bbox": [ + 0.818, + 0.206, + 0.886, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.839, + 0.262, + 0.865, + 0.273 + ], + "angle": 0, + "content": "deer" + }, + { + "type": "image_caption", + "bbox": [ + 0.101, + 0.313, + 0.168, + 0.342 + ], + "angle": 0, + "content": "ImageNet Images" + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.3, + 0.268, + 0.353 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.355, + 0.252, + 0.366 + ], + "angle": 0, + "content": "ostrich" + }, + { + "type": "image", + "bbox": [ + 0.285, + 0.3, + 0.355, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.294, + 0.355, + 0.345, + 0.366 + ], + "angle": 0, + "content": "warplane" + }, + { + "type": "image", + "bbox": [ + 0.369, + 0.3, + 0.439, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.37, + 0.355, + 0.435, + 0.366 + ], + "angle": 0, + "content": "sorrel horse" + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.301, + 0.517, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.454, + 0.355, + 0.516, + 0.366 + ], + "angle": 0, + "content": "moving van" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.3, + 0.633, + 0.353 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.573, + 0.355, + 0.625, + 0.366 + ], + "angle": 0, + "content": "warplane" + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.3, + 0.722, + 0.353 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.65, + 0.355, + 0.722, + 0.366 + ], + "angle": 0, + "content": "beach wagon" + }, + { + "type": "image", + "bbox": [ + 0.736, + 0.3, + 0.805, + 0.353 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.743, + 0.355, + 0.796, + 0.366 + ], + "angle": 0, + "content": "warplane" + }, + { + "type": "image", + "bbox": [ + 0.818, + 0.3, + 0.887, + 0.353 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.82, + 0.355, + 0.882, + 0.366 + ], + "angle": 0, + "content": "moving van" + }, + { + "type": "image_caption", + "bbox": [ + 0.104, + 0.388, + 0.165, + 0.418 + ], + "angle": 0, + "content": "CIFAR-10 Images" + }, + { + "type": "image", + "bbox": [ + 0.2, + 0.376, + 0.27, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.432, + 0.245, + 0.442 + ], + "angle": 0, + "content": "bird" + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.376, + 0.354, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.297, + 0.432, + 0.342, + 0.443 + ], + "angle": 0, + "content": "airplane" + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.376, + 0.439, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.386, + 0.432, + 0.419, + 0.443 + ], + "angle": 0, + "content": "horse" + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.376, + 0.519, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.47, + 0.432, + 0.501, + 0.442 + ], + "angle": 0, + "content": "truck" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.376, + 0.635, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.588, + 0.432, + 0.611, + 0.443 + ], + "angle": 0, + "content": "ship" + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.376, + 0.721, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.664, + 0.432, + 0.708, + 0.443 + ], + "angle": 0, + "content": "airplane" + }, + { + "type": "image", + "bbox": [ + 0.736, + 0.376, + 0.805, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.757, + 0.432, + 0.78, + 0.443 + ], + "angle": 0, + "content": "ship" + }, + { + "type": "image", + "bbox": [ + 0.818, + 0.376, + 0.887, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.821, + 0.432, + 0.882, + 0.442 + ], + "angle": 0, + "content": "automobile" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.46, + 0.894, + 0.517 + ], + "angle": 0, + "content": "Figure 7. ImageNet training images with highest positive (left) or negative (right) example-wise (average) influences on CIFAR-10 test images. We find that ImageNet images that are highly positively influential often correspond to data leakage, while ImageNet images that are highly negatively influential are often either mislabeled, ambiguous, or otherwise misleading. For example, the presence of a flying lawn mower in the ImageNet dataset hurts the downstream performance on a similarly shaped airplane (boxed)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.543, + 0.266, + 0.558 + ], + "angle": 0, + "content": "analysis of this experiment.)" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.572, + 0.469, + 0.604 + ], + "angle": 0, + "content": "4.3. Capability 3: Detecting data leakage and misleading source examples" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.613, + 0.47, + 0.734 + ], + "angle": 0, + "content": "Thus far, we have focused on how the classes in the source dataset influence the predictions of the transferred model on target examples. In this section, we extend our analysis to the individual datapoints of the source dataset. We do so by adapting our approach to measure the influence of each individual source datapoint on each target datapoint. Further details on how these influences are computed can be found in Appendix D." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.471, + 0.902 + ], + "angle": 0, + "content": "Figure 7 displays the ImageNet training examples that have highly positive or negative influences on CIFAR-10 test examples. We find that the source images that are highly positively influential are often instances of data leakage between the source training set and the target test set. On the other hand, the ImageNet images that are highly negatively influential are typically mislabeled, misleading, or otherwise surprising. For example, the presence of the ImageNet image of a flying lawn mower hurts the performance on a CIFAR-10 image of a regular (but similarly shaped) airplane (see Figure 7)." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.541, + 0.641, + 0.557 + ], + "angle": 0, + "content": "5. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.568, + 0.895, + 0.749 + ], + "angle": 0, + "content": "Transfer learning. Transfer learning is a technique commonly used in domains ranging from medical imaging [23, 36], language modeling [6], to object detection [5, 8, 15, 41]. Therefore, there has been considerable interest in understanding the drivers of transfer learning's success. For example, by performing transfer learning on block-shuffled images, [37] demonstrate that at least some of the benefits of transfer learning come from low-level image statistics of source data. There is also an important line of work studying transfer learning by investigating the relationship between different properties of the source model and performance on the target task [23, 27, 42, 43]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.895, + 0.902 + ], + "angle": 0, + "content": "The works that are the most relevant to ours are those which studied how modifying the source dataset can affect the downstream performance. For example, [26] showed that pre-training with an enormous source dataset (approximately 300 million) of noisily labeled images can outperform pretraining with ImageNet. [1, 18] investigated the importance of the number of classes and the number of images per class in transfer learning. Finally, [38] demonstrated that more pre-training data does not always help, and transfer learning can be sensitive to the choice of pre-training data. They also" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "3619" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "presented a framework for reweighting the source datapoints in order to boost transfer learning performance." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.149, + 0.473, + 0.421 + ], + "angle": 0, + "content": "Influence functions and datamodels. Influence functions are well-studied statistical tools that have been recently applied in machine learning settings [7, 17, 25]. For a given model, influence functions analyze the effect of a training input on the model's predictions by estimating the expected change in performance when this training input is added or removed. In order to apply this tool in machine learning, [25] propose estimating the influence functions using the Hessian of the loss function. A recent line of work estimates this quantity more efficiently by training on different subsets of the training set [13]. In a similar vein, [14] proposed running a Monte Carlo search to estimate the effect of every training input via Shapley values. More recently, [19] proposed datamodeling framework as an alternative way to estimate the effect of a training input on the models' prediction. Datamodels are represented using parametric functions (typically, linear functions) that aim to map a subset of the training set to the model's output." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.439, + 0.205, + 0.455 + ], + "angle": 0, + "content": "6. Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.466, + 0.473, + 0.724 + ], + "angle": 0, + "content": "In this work, we presented a new framework for examining the impact of the source dataset in transfer learning. Specifically, our approach estimates the influence of a source class (or datapoint) that captures how including that class (or datapoint) in the source dataset impacts the downstream model's predictions. Leveraging these estimates, we demonstrate that we can improve the transfer learning performance on a range of downstream tasks by identifying and removing detrimental datapoints from the source dataset. Furthermore, our framework enables us to identify granular subpopulations in the target dataset by projecting fine-grained labels from the source dataset, better understand model failures on the downstream task and detect potential data-leakages from the source to the downstream dataset. We believe our framework provides a new perspective on transfer learning: one that enables us to perform a fine-grained analysis of the impact of the source dataset." + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.741, + 0.175, + 0.757 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.766, + 0.47, + 0.841 + ], + "angle": 0, + "content": "[1] Hossein Azizpour, Ali Sharif Razavian, Josephine Sullivan, Atsuto Maki, and Stefan Carlsson. Factors of transferability for a generic convnet representation. IEEE transactions on pattern analysis and machine intelligence, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.841, + 0.47, + 0.902 + ], + "angle": 0, + "content": "[2] Thomas Berg, Jiongxin Liu, Seung Woo Lee, Michelle L Alexander, David W Jacobs, and Peter N Belhumeur. Birdsnap: Large-scale fine-grained visual categorization of birds. In Proceedings of the IEEE" + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.766, + 0.47, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.536, + 0.092, + 0.895, + 0.12 + ], + "angle": 0, + "content": "Conference on Computer Vision and Pattern Recognition, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.121, + 0.895, + 0.18 + ], + "angle": 0, + "content": "[3] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.18, + 0.895, + 0.255 + ], + "angle": 0, + "content": "[4] Emmanuel Candes, Yingying Fan, Lucas Janson, and Jinchi Lv. Panning for gold: model-x knockoffs for high dimensional controlled variable selection. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 80(3):551-577, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.255, + 0.895, + 0.344 + ], + "angle": 0, + "content": "[5] Liang-Chieh Chen, George Papandreou, Iasonas Kokkinos, Kevin Murphy, and Alan L Yuille. Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE transactions on pattern analysis and machine intelligence, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.344, + 0.895, + 0.403 + ], + "angle": 0, + "content": "[6] Alexis Conneau and Douwe Kiela. Senteval: An evaluation toolkit for universal sentence representations. Language Resources and Evaluation Conference (LREC), 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.403, + 0.894, + 0.446 + ], + "angle": 0, + "content": "[7] R Dennis Cook and Sanford Weisberg. *Residuals and influence in regression*. New York: Chapman and Hall, 1982." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.447, + 0.895, + 0.506 + ], + "angle": 0, + "content": "[8] Jifeng Dai, Yi Li, Kaiming He, and Jian Sun. R-fcn: Object detection via region-based fully convolutional networks. In Advances in neural information processing systems (NeurIPS), 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.506, + 0.895, + 0.566 + ], + "angle": 0, + "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Computer Vision and Pattern Recognition (CVPR), 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.566, + 0.894, + 0.669 + ], + "angle": 0, + "content": "[10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations (ICLR), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.669, + 0.894, + 0.713 + ], + "angle": 0, + "content": "[11] Shuyang Du, Haoli Guo, and Andrew Simpson. Self-driving car steering angle prediction based on image recognition. arXiv preprint arXiv:1912.05440, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.713, + 0.894, + 0.802 + ], + "angle": 0, + "content": "[12] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In 2004 conference on computer vision and pattern recognition workshop, pages 178–178. IEEE, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.895, + 0.877 + ], + "angle": 0, + "content": "[13] Vitaly Feldman and Chiyuan Zhang. What neural networks memorize and why: Discovering the long tail via influence estimation. In Advances in Neural Information Processing Systems (NeurIPS), volume 33, pages 2881-2891, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.877, + 0.895, + 0.893 + ], + "angle": 0, + "content": "[14] Amirata Ghorbani and James Zou. Data shapley: Eq-" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.092, + 0.895, + 0.893 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "3620" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.092, + 0.471, + 0.135 + ], + "angle": 0, + "content": "suitable valuation of data for machine learning. In International Conference on Machine Learning (ICML), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.472, + 0.209 + ], + "angle": 0, + "content": "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In computer vision and pattern recognition (CVPR), pages 580-587, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.211, + 0.471, + 0.24 + ], + "angle": 0, + "content": "[16] Gregory Griffin, Alex Holub, and Pietro Perona. Caltech-256 object category dataset. 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.24, + 0.472, + 0.299 + ], + "angle": 0, + "content": "[17] Frank R Hampel, Elvezio M Ronchetti, Peter J Rousseuw, and Werner A Stahel. Robust statistics: the approach based on influence functions, volume 196. John Wiley & Sons, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.3, + 0.471, + 0.343 + ], + "angle": 0, + "content": "[18] Minyoung Huh, Pulkit Agrawal, and Alexei A Efros. What makes imagenet good for transfer learning? arXiv preprint arXiv:1608.08614, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.343, + 0.472, + 0.403 + ], + "angle": 0, + "content": "[19] Andrew Ilyas, Sung Min Park, Logan Engstrom, Guillaume Leclerc, and Aleksander Madry. Datamodels: Predicting predictions from training data. In International Conference on Machine Learning (ICML), 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.403, + 0.471, + 0.475 + ], + "angle": 0, + "content": "[20] Saachi Jain, Hadi Salman, Eric Wong, Pengchuan Zhang, Vibhav Vineet, Sai Vermprala, and Aleksander Madry. Missingness bias in model debugging. In International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.476, + 0.471, + 0.535 + ], + "angle": 0, + "content": "[21] Yunhun Jang, Hankook Lee, Sung Ju Hwang, and Jinwoo Shin. Learning what and where to transfer. In International Conference on Machine Learning, pages 3030-3039. PMLR, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.535, + 0.471, + 0.609 + ], + "angle": 0, + "content": "[22] Bojan Karlas, David Dao, Matteo Interlandi, Bo Li, Sebastian Schelter, Wentao Wu, and Ce Zhang. Data debugging with shapley importance over end-to-end machine learning pipelines. arXiv preprint arXiv:2204.11131, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.609, + 0.471, + 0.699 + ], + "angle": 0, + "content": "[23] Alexander Ke, William Ellsworth, Oishi Banerjee, Andrew Y Ng, and Pranav Rajpurkar. Chextransfer: performance and parameter efficiency of imagenet models for chest x-ray interpretation. In Proceedings of the Conference on Health, Inference, and Learning, pages 116-124, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.699, + 0.471, + 0.773 + ], + "angle": 0, + "content": "[24] Jiman Kim and Chanjong Park. End-to-end ego lane estimation based on sequential transfer learning for self-driving cars. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 30-38, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.773, + 0.471, + 0.817 + ], + "angle": 0, + "content": "[25] Pang Wei Koh and Percy Liang. Understanding blackbox predictions via influence functions. In International Conference on Machine Learning, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.817, + 0.471, + 0.877 + ], + "angle": 0, + "content": "[26] Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, and Neil Houlsby. Big transfer (bit): General visual representation learning. arXiv preprint arXiv:1912.11370, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.877, + 0.471, + 0.89 + ], + "angle": 0, + "content": "[27] Simon Kornblith, Jonathon Shlens, and Quoc V Le. Do" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.536, + 0.092, + 0.892, + 0.122 + ], + "angle": 0, + "content": "better imagenet models transfer better? In computer vision and pattern recognition (CVPR), 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.122, + 0.894, + 0.165 + ], + "angle": 0, + "content": "[28] Jonathan Krause, Jia Deng, Michael Stark, and Li Fei-Fei. Collecting a large-scale dataset of fine-grained cars. 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.166, + 0.892, + 0.195 + ], + "angle": 0, + "content": "[29] Alex Krizhevsky. Learning multiple layers of features from tiny images. In Technical report, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.195, + 0.894, + 0.254 + ], + "angle": 0, + "content": "[30] Ananya Kumar, Aditi Raghunathan, Robbie Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. arXiv preprint arXiv:2202.10054, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.254, + 0.894, + 0.312 + ], + "angle": 0, + "content": "[31] Yongchan Kwon and James Zou. Beta shapley: a unified and noise-reduced data valuation framework for machine learning. arXiv preprint arXiv:2110.14049, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.313, + 0.894, + 0.372 + ], + "angle": 0, + "content": "[32] Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.372, + 0.892, + 0.416 + ], + "angle": 0, + "content": "[33] Kaleel Mahmood, Rigel Mahmood, and Marten Van Dijk. On the robustness of vision transformers to adversarial examples. 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.416, + 0.892, + 0.475 + ], + "angle": 0, + "content": "[34] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.476, + 0.892, + 0.504 + ], + "angle": 0, + "content": "[35] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 1995." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.505, + 0.892, + 0.578 + ], + "angle": 0, + "content": "[36] Romain Mormont, Pierre Geurts, and Raphael Marée. Comparison of deep transfer learning strategies for digital pathology. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.579, + 0.892, + 0.637 + ], + "angle": 0, + "content": "[37] Behnam Neyshabur, Hanie Sedghi, and Chiyuan Zhang. What is being transferred in transfer learning? Advances in neural information processing systems, 33:512-523, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.638, + 0.892, + 0.697 + ], + "angle": 0, + "content": "[38] Jiquan Ngiam, Daiyi Peng, Vijay Vasudevan, Simon Kornblith, Quoc V Le, and Ruoming Pang. Domain adaptive transfer learning with specialist models. arXiv preprint arXiv:1811.07056, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.697, + 0.892, + 0.757 + ], + "angle": 0, + "content": "[39] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.757, + 0.892, + 0.815 + ], + "angle": 0, + "content": "[40] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In 2012 IEEE conference on computer vision and pattern recognition, pages 3498-3505. IEEE, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.815, + 0.892, + 0.874 + ], + "angle": 0, + "content": "[41] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems (NeurIPS), 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.504, + 0.874, + 0.892, + 0.889 + ], + "angle": 0, + "content": "[42] Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.092, + 0.894, + 0.889 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "3621" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.092, + 0.47, + 0.151 + ], + "angle": 0, + "content": "Kapoor, and Aleksander Madry. Do adversarially robust imagenet models transfer better? In Advances in Neural Information Processing Systems (NeurIPS), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.152, + 0.472, + 0.211 + ], + "angle": 0, + "content": "[43] Francisco Utrera, Evan Kravitz, N. Benjamin Erickson, Rajiv Khanna, and Michael W. Mahoney. Adversarily-trained deep nets transfer better. In ArXiv preprint arXiv:2007.05869, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.211, + 0.472, + 0.27 + ], + "angle": 0, + "content": "[44] Sherrie Wang, George Azzari, and David B Lobell. Crop type mapping without field-level labels: Random forest transfer and unsupervised clustering techniques. Remote sensing of environment, 222:303-317, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.27, + 0.472, + 0.375 + ], + "angle": 0, + "content": "[45] Xiaosong Wang, Yifan Peng, Le Lu, Zhiyong Lu, Mohammadhadi Bagheri, and Ronald M Summers. Chestx-ray8: Hospital-scale chest x-ray database and benchmarks on weakly-supervised classification and localization of common thorax diseases. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.375, + 0.472, + 0.434 + ], + "angle": 0, + "content": "[46] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene recognition from abbey to zoo. In Computer Vision and Pattern Recognition (CVPR), 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.434, + 0.472, + 0.507 + ], + "angle": 0, + "content": "[47] Michael Xie, Neal Jean, Marshall Burke, David Lobell, and Stefano Ermon. Transfer learning from deep features for remote sensing and poverty mapping. In Thirtieth AAAI Conference on Artificial Intelligence, 2016." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.472, + 0.507 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.956 + ], + "angle": 0, + "content": "3622" + } + ] +] \ No newline at end of file diff --git a/2023/A Data-Based Perspective on Transfer Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_origin.pdf b/2023/A Data-Based Perspective on Transfer Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..de782cd92f00aab18563095c615ebc28f07af66f --- /dev/null +++ b/2023/A Data-Based Perspective on Transfer Learning/b077d70d-8608-4443-a4ce-0c29fda55f28_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcc746e7cf15b8e28083cf16c5da5e68ee845bbd99118ba066d00b7edf02addb +size 2861834 diff --git a/2023/A Data-Based Perspective on Transfer Learning/full.md b/2023/A Data-Based Perspective on Transfer Learning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7237b4635c5a0f8a74b317b4255853466a2e3a13 --- /dev/null +++ b/2023/A Data-Based Perspective on Transfer Learning/full.md @@ -0,0 +1,362 @@ +# A Data-Based Perspective on Transfer Learning + +Saachi Jain* + +MIT + +saachij@mit.edu + +Hadi Salman* + +MIT + +hady@mit.edu + +Alaa Khaddaj * + +MIT + +alaakah@mit.edu + +Eric Wong + +University of Pennsylvania + +exwong@seas.upenn.edu + +Sung Min Park + +MIT + +sp765@mit.edu + +Aleksander Mądry + +MIT + +madry@mit.edu + +# Abstract + +It is commonly believed that in transfer learning including more pre-training data translates into better performance. However, recent evidence suggests that removing data from the source dataset can actually help too. In this work, we take a closer look at the role of the source dataset's composition in transfer learning and present a framework for probing its impact on downstream performance. Our framework gives rise to new capabilities such as pinpointing transfer learning brittleness as well as detecting pathologies such as data-leakage and the presence of misleading examples in the source dataset. In particular, we demonstrate that removing detrimental datapoints identified by our framework indeed improves transfer learning performance from ImageNet on a variety of target tasks. + +# 1. Introduction + +Transfer learning enables us to adapt a model trained on a source dataset to perform better on a downstream target task. This technique is employed in a range of machine learning applications including radiology [23, 45], autonomous driving [11, 24], and satellite imagery analysis [44, 47]. Despite its successes, however, it is still not clear what the drivers of performance gains brought by transfer learning actually are. + +So far, a dominant approach to studying these drivers focused on the role of the source model—i.e., the model trained on the source dataset. The corresponding works involve investigating the source model's architecture [23], accuracy [27], adversarial vulnerability [42, 43], and training procedure [21, 30]. This line of work makes it clear that the properties of the source model has a significant impact on + +transfer learning. There is some evidence, however, that the source dataset might play an important role as well [18, 26, 38]. For example, several works have shown that while increasing the size of the source dataset generally boosts transfer learning performance, removing specific classes can help too [18, 26, 38]. All of this motivates a natural question: + +How can we pinpoint the exact impact of the source dataset in transfer learning? + +Our Contributions. In this paper, we present a framework for measuring and analyzing the impact of the source dataset's composition on transfer learning performance. To do this, our framework provides us with the ability to investigate the counterfactual impact on downstream predictions of including or excluding datapoints from the source dataset, drawing inspiration from classical supervised learning techniques such as influence functions [7, 13, 25] and datamodels [19]. Using our framework, we can: + +- Pinpoint what parts of the source dataset are most utilized by the downstream task. +- Automatically extract granular subpopulations in the target dataset through projection of the fine-grained labels of the source dataset. +- Surface pathologies such as source-target data leakage and mislabelled source datapoints. + +We also demonstrate how our framework can be used to find detrimental subsets of ImageNet [9] that, when removed, give rise to better downstream performance on a variety of image classification tasks. + +# 2. A Data-Based Framework for Studying Transfer Learning + +In order to pinpoint the role of the source dataset in transfer learning, we need to understand how the composition of that source dataset impacts the downstream model's performance. To do so, we draw inspiration from supervised machine learning approaches that study the impact of the training data on the model's subsequent predictions. In particular, these approaches capture this impact via studying (and approximating) the counterfactual effect of excluding certain training datapoints. This paradigm underlies a number of techniques, from influence functions [7, 13, 25], to datamodels [19], to data Shapley values [14, 22, 31]. + +Now, to adapt this paradigm to our setting, we study the counterfactual effect of excluding datapoints from the source dataset on the downstream, target task predictions. In our framework, we will focus on the inclusion or exclusion of entire classes in the source dataset, as opposed to individual examples2. This is motivated by the fact that, intuitively, we expect these classes to be the ones that embody whole concepts and thus drive the formation of (transferred) features. We therefore anticipate the removal of entire classes to have a more measurable impact on the representation learned by the source model (and consequently on the downstream model's predictions). + +Once we have chosen to focus on removal of entire source classes, we can design counterfactual experiments to estimate their influences. A natural approach here, the leave-one-out method [7, 25], would involve removing each individual class from the source dataset separately and then measuring the change in the downstream model's predictions. However, in the transfer learning setting, we suspect that removing a single class from the source dataset won't significantly change the downstream model's performance. Thus, leave-one-out methodology may be able to capture meaningful influences only in rare cases. This is especially so as many common source datasets contain highly redundant classes. For example, ImageNet contains over 100 dog-breed classes. The removal of a single dog-breed class might thus have a negligible impact on transfer learning performance, but the removal of all of the dog classes might significantly change the features learned by the downstream model. For these reasons, we adapt the subsampling [13, 19] approach, which revolves around removing a random collection of source classes at once. + +Computing transfer influences. In the light of the above, our methodology for computing the influence of source classes on transfer learning performance involves training a large number of models with random subsets of the source + +Algorithm 1 Estimation of source dataset class influences on transfer learning performance. + +Require: Source dataset $\mathcal{S} = \cup_{k=1}^{K} \mathcal{C}_k$ (with $K$ classes), a target dataset $\mathcal{T} = (t_1, t_2, \dots, t_n)$ , training algorithm $\mathcal{A}$ , subset ratio $\alpha$ , and number of models $m$ + +$$ +\frac {\sum_ {i = 1} ^ {m} f _ {i} \left(t _ {j} ; S _ {i}\right) \mathbb {1} _ {C _ {k} \nsubseteq S _ {i}}}{\sum_ {i = 1} ^ {m} \mathbb {1} _ {C _ {k} \nsubseteq S _ {i}}} +$$ + +1: Sample $m$ random subsets $S_{1}, S_{2}, \dots, S_{m} \subset \mathcal{S}$ of size $\alpha \cdot |\mathcal{S}|$ : +2: for $i \in 1$ to $m$ do +3: Train model $f_{i}$ by running algorithm $\mathcal{A}$ on $S_{i}$ +4: end for +5: for $k \in 1$ to $K$ do +6: for $j\in 1$ to $n$ do +7: $\operatorname{Infl}[\mathcal{C}_k \to t_j] = \frac{\sum_{i=1}^m f_i(t_j; S_i) \mathbb{1}_{\mathcal{C}_k \subset s_i}}{\sum_{i=1}^m \mathbb{1}_{\mathcal{C}_k \subset s_i}} -$ +8: end for +9: end for +10: return Infl $\left[\mathcal{C}_k\rightarrow t_j\right]$ , for all $j\in [n],k\in [K]$ + +classes removed, and fine-tuning these models on the target task. We then estimate the influence value of a source class $\mathcal{C}$ on a target example $t$ as the expected difference in the transfer model's performance on example $t$ when class $\mathcal{C}$ was either included in or excluded from the source dataset: + +$$ +\operatorname {I n f l} [ \mathcal {C} \rightarrow t ] = \mathbb {E} _ {S} [ f (t; S) \mid \mathcal {C} \subset S ] - \mathbb {E} _ {S} [ f (t; S) \mid \mathcal {C} \not \subset S ], \tag {1} +$$ + +where $f(t; S)$ is the softmax output3 of a model trained on a subset $S$ of the source dataset. A positive influence value indicates that including the source class $\mathcal{C}$ helps the model predict the target example $t$ correctly. On the other hand, a negative influence value suggests that the source class $\mathcal{C}$ actually hurts the model's performance on the target example $t$ . We outline the overall procedure in Algorithm 1, and defer a detailed description of our approach to Appendix A. + +A note on computational costs. In order to compute transfer influences, we need to train a large number of source models, each on a fraction of the source dataset. Specifically, we pre-train 7,540 models on ImageNet, each on a randomly chosen $50\%$ of the ImageNet dataset. This pre-training step needs to be performed only once though: these same models can then be used to fine-tune on each new target task. Overall, the whole process (training the source models and fine-tuning on target datasets) takes less than 20 days using 8 V100 GPUs4. + +Are so many models necessary? In Section A.5, we explore computing transfer influences with smaller numbers + +![](images/235656645a456c8f109cbf63a2fda6094ec63d0324c222c795449cb3e854a7bb.jpg) +Figure 1. Most positive and negative ImageNet classes ordered based on their overall influence on the CIFAR-10 dataset. The top source classes (e.g., tailed frog and sorrel horse) turn out to be semantically relevant to the target classes (e.g., frog and horse). + +![](images/5bbcf455e014cb3e5a303487377cb9e1f151c831431e5a96630b2e1cb1590206.jpg) +(a) CIFAR-10 results +Figure 2. Target task accuracies after removing the K most positively or negatively influential ImageNet classes from the source dataset. Mean/std are reported over 10 runs. (a) Results with CIFAR-10 as the target task after removing different numbers of classes from the source dataset. We also include baselines of using the full ImageNet dataset and removing random classes. One can note that, by removing negatively influential source classes, we can obtain a test accuracy that is $2.5\%$ larger than what using the entire ImageNet dataset would yield. Results for other target tasks can be found in Appendix C. (b) Peak performances when removing the most negatively influential source classes across a range of other target tasks. We also compare against using the full ImageNet dataset or a subset of source classes that are semantically relevant to the target classes (defined via the WordNet hierarchy, see Appendix A for details). + +
Target DatasetSource Dataset
Full ImageNetRemoving Bottom Infl.Semantically Relevant Classes
AIRCRAFT36.08 ± 1.0736.88 ± 0.74N/A
BIRDSNAP38.42 ± 0.4039.19 ± 0.3826.74 ± 0.31
CALTECH10186.69 ± 0.7987.03 ± 0.3082.28 ± 0.40
CALTECH25674.97 ± 0.2775.24 ± 0.2167.42 ± 0.39
CARS39.55 ± 0.3240.59 ± 0.5721.71 ± 0.40
CIFAR1081.16 ± 0.3083.64 ± 0.4075.53 ± 0.42
CIFAR10059.37 ± 0.5861.46 ± 0.5955.21 ± 0.52
FLOWERS82.92 ± 0.5282.89 ± 0.48N/A
FOOD56.19 ± 0.1456.85 ± 0.2739.36 ± 0.39
PETS83.41 ± 0.5587.59 ± 0.2487.16 ± 0.24
SUN39750.15 ± 0.2351.34 ± 0.29N/A
+ +(b) Summary of 11 target tasks + +of models. While using the full number of models provides the best results, training a much smaller number of models (e.g., 1000 models, taking slightly over 2.5 days on 8 V100 GPUs) still provides meaningful transfer influences. Thus in practice, one can choose the number of source models based on noise tolerance and computational budget. Further convergence results can be found in Appendix A.5. + +# 3. Identifying the Most Influential Classes of the Source Dataset + +In Section 2, we presented a framework for pinpointing the role of the source dataset in transfer learning by estimating the influence of each source class on the target model's + +predictions. Using these influences, we can now take a look at the classes from the source dataset that have the largest positive or negative impact on the overall transfer learning performance. We focus our analysis on the fixed-weights transfer learning setting (further results, including full model fine-tuning as well as generalization to other architectures, can be found in Appendix E). + +As one might expect, not all source classes have large influences. Figure 1 displays the most influential classes of ImageNet with CIFAR-10 as the target task. Notably, the most positively influential source classes turn out to be directly related to classes in the target task (e.g., the ImageNet label "tailed frog" is an instance of the CIFAR class "frog"). This trend holds across all of the target datasets and transfer + +![](images/af69a0124de5c06b0afeb91305ad6cd5b33db58ebee1e57128634501170edbbd.jpg) +Figure 3. Most positive and negative influencing ImageNet classes for the CIFAR-10 class "bird". These are calculated by averaging the influence of each source class over all bird examples. We find that the most positively influencing ImageNet classes (e.g., "ostrich" and "bustard") are related to the CIFAR-10 class "bird". See Appendix E for results on other CIFAR-10 classes. + +![](images/d1390e88abe3fdca289ecbb34d0c8b4fc504ce154ff647c1ee48fed57f676841.jpg) +Figure 4. Projecting source labels onto the target dataset. For various target datasets (right), we display the images that were most positively influenced by various ImageNet classes in the source dataset (left). We find that the identified images from the target datasets look similar to the corresponding images in the source dataset. + +learning settings we considered (see Appendix C). Interestingly, the source dataset also contains classes that are overall negatively influential for the target task, e.g., "bookshop" and "jigsaw puzzle" classes. (In Section 4, we will take a closer look at the factors that can cause a source class to be negatively influential for a target prediction.) + +How important are the most influential source classes? We now remove each of the most influential classes from the source dataset to observe their actual impact on transfer learning performance (Figure 2a). As expected, removing the most positively influential classes severely degrades transfer learning performance as compared to removing random classes. This counterfactual experiment confirms that + +![](images/d2264184567399bafc67d40f990002acaa9713315000ffaaa13db27892f3beec.jpg) +Figure 5. The CIFAR-10 images that were most positively (or negatively) influenced by the ImageNet classes "starfish" and "rapeseed." CIFAR-10 images that are highly influenced by the "starfish" class have similar shapes, while those influenced by "rapeseed" class have yellow-green colors. + +these classes are indeed important to the performance of transfer learning. On the other hand, removing the most negatively influential classes actually improves the overall transfer learning performance beyond what using the entire ImageNet dataset provides (see Figure 2b). + +Above, we noted that the top influential source classes are typically related to the classes in the target dataset. What happens if we only choose source classes that are semantically relevant to the classes of the target dataset? Indeed, [38] found that hand-picking such source datasets can sometimes boost transfer learning performance. For each target dataset, we select ImageNet classes that are semantically relevant to the target classes (using the WordNet hierarchy, see Appendix A). As shown in Figure 2b, choosing an optimal subset of classes via transfer influences substantially outperforms this baseline. + +# 4. Probing the Impact of the Source Dataset on Transfer Learning + +In Section 3, we developed a methodology for identifying source dataset classes that have the most impact on + +transfer learning performance. Now, we demonstrate how this methodology can be extended into a framework for probing and understanding transfer learning, including: (1) identifying granular target subpopulations that correspond to source classes, (2) debugging transfer learning failures, and (3) detecting data leakage between the source and target datasets. We focus our demonstration of these capabilities on a commonly-used transfer learning setting: ImageNet to CIFAR-10 (experimental details are in Appendix A). + +# 4.1. Capability 1: Extracting target subpopulations by projecting source class labels + +Imagine that we would like to find all the ostriches in the CIFAR-10 dataset. This is not an easy task as CIFAR-10 only has "bird" as a label, and thus lacks sufficiently fine-grained annotations. Luckily, however, ImageNet does contain an ostrich class! Our computed influences enable us to "project" this ostrich class annotation (and, more broadly, the fine-grained label hierarchy of our source dataset) to find this subpopulation of interest in the target dataset. + +Indeed, our examination from Section 3 suggests that the most positively influencing source classes are typically those + +![](images/6d2ea1b2b94a28a4425c3a0896ca56ae1f95aa6d13736b307476d1adb709d499.jpg) + +![](images/a9ebfc2f64cd6995d626a93161045698d0778ff49ce790f94ebb00b7b85121d0.jpg) + +![](images/2a170b84bbf8e8b8c856d0f95e83999c96416ff2e03aeb908c2ff35cac705bf9.jpg) + +![](images/1845b6a248cb356bf82e8f11d5c59f70b443a942e8961b0cd4fca4f91bc1460d.jpg) + +![](images/8d9209c766037076e99394926fcabc0d351757387a495ac250a3f1dd2599555a.jpg) + +![](images/6f68316446faa07221a10558bc0324672f1bfa423e1b0cbe30aa0a07b33c395a.jpg) + +![](images/79b6e4ba9a79234ec4b4b92619abdbd026a5488c0465fb6d4b98290c257ff180.jpg) + +![](images/e542c2d8e5ce40027bcbe6da7b63261105424dddb7af76b0191a2da41a735002.jpg) +Figure 6. Pinpointing highly negatively influential source classes can help explain model mistakes. Left: For three CIFAR-10 images, we plot the most negatively influential source classes. Right: Over 20 runs, the fraction of times that our downstream model predicts each label for the given CIFAR-10 image. When the most negatively influential class is removed, the model predicts the correct label more frequently. More examples can be found in Appendix E. + +![](images/3ff9ce6bfa7e5910c4f1b25bcc8ef57b82273fc3f0158f91d370fa9da6b8381d.jpg) + +that directly overlap with the target classes (see Figure 1). In particular, for our example, "ostrich" is highly positively influential for the "bird" class (see Figure 3). To find ostriches in the CIFAR-10 dataset, we thus need to simply surface the CIFAR-10 images which were most positively influenced by the "ostrich" source class (see Figure 4). + +It turns out that this type of projection approach can be applied more broadly. Even when the source class is not a direct sub-type of a target class, the downstream model can still leverage salient features from this class — such as shape or color — to predict on the target dataset. For such classes, projecting source labels can extract target subpopulations which share such features. To illustrate this, in Figure 5, we display the CIFAR-10 images that are highly influenced by the classes "starfish" and "rapeseed" (both of which do not directly appear in the CIFAR-10 dataset). For these classes, the most influenced CIFAR-10 images share the same shape + +("starfish") or color ("rapeseed") as their ImageNet counterparts. More examples of such projections can be found in Appendix E. + +# 4.2. Capability 2: Debugging the failures of a transferred model + +Our framework enables us to also reason about the possible mistakes of the transferred model caused by source dataset classes. For example, consider the CIFAR-10 image of a dog in Figure 6, which our transfer learning model often mispredicts as a horse. Using our framework, we can demonstrate that this image is strongly negatively influenced by the source class "sorrel horse." Thus, our downstream model may be misusing a feature introduced by this class. Indeed, once we remove "sorrel horse" from the source dataset, our model predicts the correct label more frequently. (See Appendix E for more examples, as well as a quantitative + +![](images/24c98d6b8ae189ac6d1ef9136bf53a842c3309af55ad190880401fdc820d1585.jpg) +ImageNet Images +speedboat + +![](images/e85f26acb175ee3896045a408094ed3f5c3fcf58e04c2fd87c51f3f53e50e327.jpg) +Most Positively Influenced +tailed frog + +![](images/b78187756e5a1e31815a5efcf9654ddfba46d0fe0d09885eda106dba278a588c.jpg) +warplane + +![](images/6c6ecb37cff6ebd59ed60cb24773e5d4d7b4c650fdd12f426f957687e164a443.jpg) +racer + +![](images/eed8fc068c21a53e01ae744ed7ab3c868b9ee0d231c698c69674f71c8f917c33.jpg) +CIFAR-10 Images +ship + +![](images/e912c81e06a985a27646767bf43202c68e2b7794768dca975027ef80217bb3df.jpg) +frog + +![](images/b0ea7a2e6aeb7e18b2eb5b3e683784d135c1b14802e08d9089a3297b547dbaeb.jpg) +airplane + +![](images/8468c3c5b84e6aa89752f0c412ed708c3f429e4924326142a904beeeaaa9ebc5.jpg) +automobile + +![](images/d639a1977a40d0b7f3e7b870929a2f33cf582ab313815455d2a2ed2c8df1d179.jpg) +Most Negatively Influenced +lawnmower + +![](images/0f9b8a03d27b4c0fbad31470f2f14afcd3dc3e6ca2f5c8baa59b26de4837af60.jpg) +minivan + +![](images/8f2f49548834a727623bfa1ac95596cd5443628dcc675c037f3bfd7c5c96f2da.jpg) +wing + +![](images/07faa20b87ec0f33fedc983fef21e0318efa232c8f59100ee0792dc3eddf838b.jpg) +book jacket + +![](images/e8bbb4fd74bda0757b5385670c20d47e5b9fe2815e1bbaec6ec5bc3dbb996e29.jpg) +airplane + +![](images/49f1a3efd81fa4fbb77a767b1d63d74a0bc5f7fe04e1188d8207ccfb2defb722.jpg) +airplane + +![](images/82b6b7dd0adf53a900b3cbfca67ad0b90f25f8ce3c621b5d82dcb88da3996f78.jpg) +ship + +![](images/84b75cb17c9f7c53e4d9466d0f09618fb86aee0c58db53c3917fddb15d61d952.jpg) +deer + +![](images/a955543a83359f02418fbd2bcd55c201e1c486a29303e620816cc2b0eee66faa.jpg) +ImageNet Images +ostrich + +![](images/59bb26ee10ff0f0936995aa2fd19a902af27c3ca52ef9b4ba4aa30b3783d2938.jpg) +warplane + +![](images/106a1f1cd9f8b82afa3b83193e2e220518d723fbc9334ee29e64f7b81bbae8cd.jpg) +sorrel horse + +![](images/3a05da6efa0c53f7f07bdcaf52b9fc1bb44d2287fe2d33ecd72e90cd0a6ffc9b.jpg) +moving van + +![](images/36982405a6bc07a564f1e52cb70dbad7622c60ae6f4221d54fc443c27343d493.jpg) +warplane + +![](images/29136a5aac4e2987454b183fae55b11d858ff68a485d87945697ea1b38004622.jpg) +beach wagon + +![](images/1a718365002950bbaa65ab0fa2a019e22bb89cc0e25f47c28d0b7f214a6998dd.jpg) +warplane + +![](images/6b8f7c776d9773a24ca4618d5e531c7666a8d3c3b9f9120f206f6c0bc0187488.jpg) +moving van + +![](images/168b80208ee4478b70c7e6e43c9a58cab1975c037919a5d5af4cf299466e0af6.jpg) +CIFAR-10 Images +bird +Figure 7. ImageNet training images with highest positive (left) or negative (right) example-wise (average) influences on CIFAR-10 test images. We find that ImageNet images that are highly positively influential often correspond to data leakage, while ImageNet images that are highly negatively influential are often either mislabeled, ambiguous, or otherwise misleading. For example, the presence of a flying lawn mower in the ImageNet dataset hurts the downstream performance on a similarly shaped airplane (boxed). + +![](images/15fb82c4b3d8da541a211e1f93dc2f25ce18652dca8f2c43db29b3419d9b00a5.jpg) +airplane + +![](images/a4451c709cfaf1ddf5f6b6599f60769b969d696137c3510ddac23067c4af97fe.jpg) +horse + +![](images/b1b31a96d714600dfe02db18a53e4b69d7245597263a2e36a992b3d373946ecd.jpg) +truck + +![](images/a537a378a4d35c2e687e6b5a5af15901f7a83988731b78b90f6c54fb7441152b.jpg) +ship + +![](images/22f15385166b2f6462e0365af68740d4b527886bfd5e9ca2e72de4a3fb2d48c1.jpg) +airplane + +![](images/a49ef592bc4a2b211857e3026faec3cb114cf571122eaa86ca2a7b9e1b4706f0.jpg) +ship + +![](images/fdfa218c1e16c1ab04e01a7ce42237db87132f2ca79b062c9cf5294b1ac5d61f.jpg) +automobile + +analysis of this experiment.) + +# 4.3. Capability 3: Detecting data leakage and misleading source examples + +Thus far, we have focused on how the classes in the source dataset influence the predictions of the transferred model on target examples. In this section, we extend our analysis to the individual datapoints of the source dataset. We do so by adapting our approach to measure the influence of each individual source datapoint on each target datapoint. Further details on how these influences are computed can be found in Appendix D. + +Figure 7 displays the ImageNet training examples that have highly positive or negative influences on CIFAR-10 test examples. We find that the source images that are highly positively influential are often instances of data leakage between the source training set and the target test set. On the other hand, the ImageNet images that are highly negatively influential are typically mislabeled, misleading, or otherwise surprising. For example, the presence of the ImageNet image of a flying lawn mower hurts the performance on a CIFAR-10 image of a regular (but similarly shaped) airplane (see Figure 7). + +# 5. Related Work + +Transfer learning. Transfer learning is a technique commonly used in domains ranging from medical imaging [23, 36], language modeling [6], to object detection [5, 8, 15, 41]. Therefore, there has been considerable interest in understanding the drivers of transfer learning's success. For example, by performing transfer learning on block-shuffled images, [37] demonstrate that at least some of the benefits of transfer learning come from low-level image statistics of source data. There is also an important line of work studying transfer learning by investigating the relationship between different properties of the source model and performance on the target task [23, 27, 42, 43]. + +The works that are the most relevant to ours are those which studied how modifying the source dataset can affect the downstream performance. For example, [26] showed that pre-training with an enormous source dataset (approximately 300 million) of noisily labeled images can outperform pretraining with ImageNet. [1, 18] investigated the importance of the number of classes and the number of images per class in transfer learning. Finally, [38] demonstrated that more pre-training data does not always help, and transfer learning can be sensitive to the choice of pre-training data. They also + +presented a framework for reweighting the source datapoints in order to boost transfer learning performance. + +Influence functions and datamodels. Influence functions are well-studied statistical tools that have been recently applied in machine learning settings [7, 17, 25]. For a given model, influence functions analyze the effect of a training input on the model's predictions by estimating the expected change in performance when this training input is added or removed. In order to apply this tool in machine learning, [25] propose estimating the influence functions using the Hessian of the loss function. A recent line of work estimates this quantity more efficiently by training on different subsets of the training set [13]. In a similar vein, [14] proposed running a Monte Carlo search to estimate the effect of every training input via Shapley values. More recently, [19] proposed datamodeling framework as an alternative way to estimate the effect of a training input on the models' prediction. Datamodels are represented using parametric functions (typically, linear functions) that aim to map a subset of the training set to the model's output. + +# 6. Conclusions + +In this work, we presented a new framework for examining the impact of the source dataset in transfer learning. Specifically, our approach estimates the influence of a source class (or datapoint) that captures how including that class (or datapoint) in the source dataset impacts the downstream model's predictions. Leveraging these estimates, we demonstrate that we can improve the transfer learning performance on a range of downstream tasks by identifying and removing detrimental datapoints from the source dataset. Furthermore, our framework enables us to identify granular subpopulations in the target dataset by projecting fine-grained labels from the source dataset, better understand model failures on the downstream task and detect potential data-leakages from the source to the downstream dataset. We believe our framework provides a new perspective on transfer learning: one that enables us to perform a fine-grained analysis of the impact of the source dataset. + +# References + +[1] Hossein Azizpour, Ali Sharif Razavian, Josephine Sullivan, Atsuto Maki, and Stefan Carlsson. Factors of transferability for a generic convnet representation. IEEE transactions on pattern analysis and machine intelligence, 2015. +[2] Thomas Berg, Jiongxin Liu, Seung Woo Lee, Michelle L Alexander, David W Jacobs, and Peter N Belhumeur. Birdsnap: Large-scale fine-grained visual categorization of birds. In Proceedings of the IEEE + +Conference on Computer Vision and Pattern Recognition, 2014. +[3] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, 2014. +[4] Emmanuel Candes, Yingying Fan, Lucas Janson, and Jinchi Lv. Panning for gold: model-x knockoffs for high dimensional controlled variable selection. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 80(3):551-577, 2018. +[5] Liang-Chieh Chen, George Papandreou, Iasonas Kokkinos, Kevin Murphy, and Alan L Yuille. Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE transactions on pattern analysis and machine intelligence, 2017. +[6] Alexis Conneau and Douwe Kiela. Senteval: An evaluation toolkit for universal sentence representations. Language Resources and Evaluation Conference (LREC), 2018. +[7] R Dennis Cook and Sanford Weisberg. *Residuals and influence in regression*. New York: Chapman and Hall, 1982. +[8] Jifeng Dai, Yi Li, Kaiming He, and Jian Sun. R-fcn: Object detection via region-based fully convolutional networks. In Advances in neural information processing systems (NeurIPS), 2016. +[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Computer Vision and Pattern Recognition (CVPR), 2009. +[10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations (ICLR), 2021. +[11] Shuyang Du, Haoli Guo, and Andrew Simpson. Self-driving car steering angle prediction based on image recognition. arXiv preprint arXiv:1912.05440, 2019. +[12] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In 2004 conference on computer vision and pattern recognition workshop, pages 178–178. IEEE, 2004. +[13] Vitaly Feldman and Chiyuan Zhang. What neural networks memorize and why: Discovering the long tail via influence estimation. In Advances in Neural Information Processing Systems (NeurIPS), volume 33, pages 2881-2891, 2020. +[14] Amirata Ghorbani and James Zou. Data shapley: Eq- + +suitable valuation of data for machine learning. In International Conference on Machine Learning (ICML), 2019. +[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In computer vision and pattern recognition (CVPR), pages 580-587, 2014. +[16] Gregory Griffin, Alex Holub, and Pietro Perona. Caltech-256 object category dataset. 2007. +[17] Frank R Hampel, Elvezio M Ronchetti, Peter J Rousseuw, and Werner A Stahel. Robust statistics: the approach based on influence functions, volume 196. John Wiley & Sons, 2011. +[18] Minyoung Huh, Pulkit Agrawal, and Alexei A Efros. What makes imagenet good for transfer learning? arXiv preprint arXiv:1608.08614, 2016. +[19] Andrew Ilyas, Sung Min Park, Logan Engstrom, Guillaume Leclerc, and Aleksander Madry. Datamodels: Predicting predictions from training data. In International Conference on Machine Learning (ICML), 2022. +[20] Saachi Jain, Hadi Salman, Eric Wong, Pengchuan Zhang, Vibhav Vineet, Sai Vermprala, and Aleksander Madry. Missingness bias in model debugging. In International Conference on Learning Representations, 2022. +[21] Yunhun Jang, Hankook Lee, Sung Ju Hwang, and Jinwoo Shin. Learning what and where to transfer. In International Conference on Machine Learning, pages 3030-3039. PMLR, 2019. +[22] Bojan Karlas, David Dao, Matteo Interlandi, Bo Li, Sebastian Schelter, Wentao Wu, and Ce Zhang. Data debugging with shapley importance over end-to-end machine learning pipelines. arXiv preprint arXiv:2204.11131, 2022. +[23] Alexander Ke, William Ellsworth, Oishi Banerjee, Andrew Y Ng, and Pranav Rajpurkar. Chextransfer: performance and parameter efficiency of imagenet models for chest x-ray interpretation. In Proceedings of the Conference on Health, Inference, and Learning, pages 116-124, 2021. +[24] Jiman Kim and Chanjong Park. End-to-end ego lane estimation based on sequential transfer learning for self-driving cars. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 30-38, 2017. +[25] Pang Wei Koh and Percy Liang. Understanding blackbox predictions via influence functions. In International Conference on Machine Learning, 2017. +[26] Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, and Neil Houlsby. Big transfer (bit): General visual representation learning. arXiv preprint arXiv:1912.11370, 2019. +[27] Simon Kornblith, Jonathon Shlens, and Quoc V Le. Do + +better imagenet models transfer better? In computer vision and pattern recognition (CVPR), 2019. +[28] Jonathan Krause, Jia Deng, Michael Stark, and Li Fei-Fei. Collecting a large-scale dataset of fine-grained cars. 2013. +[29] Alex Krizhevsky. Learning multiple layers of features from tiny images. In Technical report, 2009. +[30] Ananya Kumar, Aditi Raghunathan, Robbie Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. arXiv preprint arXiv:2202.10054, 2022. +[31] Yongchan Kwon and James Zou. Beta shapley: a unified and noise-reduced data valuation framework for machine learning. arXiv preprint arXiv:2110.14049, 2021. +[32] Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/, 2022. +[33] Kaleel Mahmood, Rigel Mahmood, and Marten Van Dijk. On the robustness of vision transformers to adversarial examples. 2021. +[34] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013. +[35] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 1995. +[36] Romain Mormont, Pierre Geurts, and Raphael Marée. Comparison of deep transfer learning strategies for digital pathology. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2018. +[37] Behnam Neyshabur, Hanie Sedghi, and Chiyuan Zhang. What is being transferred in transfer learning? Advances in neural information processing systems, 33:512-523, 2020. +[38] Jiquan Ngiam, Daiyi Peng, Vijay Vasudevan, Simon Kornblith, Quoc V Le, and Ruoming Pang. Domain adaptive transfer learning with specialist models. arXiv preprint arXiv:1811.07056, 2018. +[39] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, 2008. +[40] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In 2012 IEEE conference on computer vision and pattern recognition, pages 3498-3505. IEEE, 2012. +[41] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems (NeurIPS), 2015. +[42] Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish + +Kapoor, and Aleksander Madry. Do adversarially robust imagenet models transfer better? In Advances in Neural Information Processing Systems (NeurIPS), 2020. +[43] Francisco Utrera, Evan Kravitz, N. Benjamin Erickson, Rajiv Khanna, and Michael W. Mahoney. Adversarily-trained deep nets transfer better. In ArXiv preprint arXiv:2007.05869, 2020. +[44] Sherrie Wang, George Azzari, and David B Lobell. Crop type mapping without field-level labels: Random forest transfer and unsupervised clustering techniques. Remote sensing of environment, 222:303-317, 2019. +[45] Xiaosong Wang, Yifan Peng, Le Lu, Zhiyong Lu, Mohammadhadi Bagheri, and Ronald M Summers. Chestx-ray8: Hospital-scale chest x-ray database and benchmarks on weakly-supervised classification and localization of common thorax diseases. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2017. +[46] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene recognition from abbey to zoo. In Computer Vision and Pattern Recognition (CVPR), 2010. +[47] Michael Xie, Neal Jean, Marshall Burke, David Lobell, and Stefano Ermon. Transfer learning from deep features for remote sensing and poverty mapping. In Thirtieth AAAI Conference on Artificial Intelligence, 2016. \ No newline at end of file diff --git a/2023/A Data-Based Perspective on Transfer Learning/images.zip b/2023/A Data-Based Perspective on Transfer Learning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..e533fd8eabc042605011b7ca3bc02737c21af156 --- /dev/null +++ b/2023/A Data-Based Perspective on Transfer Learning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:085bc131df1b5d5fdc4b70d366770b60231efa49bd569b1b89af1e3a5a37e111 +size 615975 diff --git a/2023/A Data-Based Perspective on Transfer Learning/layout.json b/2023/A Data-Based Perspective on Transfer Learning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..945ecbb99b29d5d94abb69a94b3fbaa7198361e4 --- /dev/null +++ b/2023/A Data-Based Perspective on Transfer Learning/layout.json @@ -0,0 +1,9595 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 148, + 103, + 446, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 103, + 446, + 121 + ], + "spans": [ + { + "bbox": [ + 148, + 103, + 446, + 121 + ], + "type": "text", + "content": "A Data-Based Perspective on Transfer Learning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 143, + 201, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 143, + 201, + 156 + ], + "spans": [ + { + "bbox": [ + 140, + 143, + 201, + 156 + ], + "type": "text", + "content": "Saachi Jain*" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 157, + 158, + 181, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 158, + 181, + 169 + ], + "spans": [ + { + "bbox": [ + 157, + 158, + 181, + 169 + ], + "type": "text", + "content": "MIT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 172, + 225, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 172, + 225, + 185 + ], + "spans": [ + { + "bbox": [ + 115, + 172, + 225, + 185 + ], + "type": "text", + "content": "saachij@mit.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 268, + 144, + 337, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 144, + 337, + 156 + ], + "spans": [ + { + "bbox": [ + 268, + 144, + 337, + 156 + ], + "type": "text", + "content": "Hadi Salman*" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 288, + 158, + 312, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 288, + 158, + 312, + 169 + ], + "spans": [ + { + "bbox": [ + 288, + 158, + 312, + 169 + ], + "type": "text", + "content": "MIT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 256, + 172, + 344, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 172, + 344, + 185 + ], + "spans": [ + { + "bbox": [ + 256, + 172, + 344, + 185 + ], + "type": "text", + "content": "hady@mit.edu" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 392, + 144, + 467, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 144, + 467, + 157 + ], + "spans": [ + { + "bbox": [ + 392, + 144, + 467, + 157 + ], + "type": "text", + "content": "Alaa Khaddaj *" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 415, + 158, + 440, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 158, + 440, + 169 + ], + "spans": [ + { + "bbox": [ + 415, + 158, + 440, + 169 + ], + "type": "text", + "content": "MIT" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 376, + 172, + 479, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 172, + 479, + 184 + ], + "spans": [ + { + "bbox": [ + 376, + 172, + 479, + 184 + ], + "type": "text", + "content": "alaakah@mit.edu" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 143, + 205, + 198, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 205, + 198, + 219 + ], + "spans": [ + { + "bbox": [ + 143, + 205, + 198, + 219 + ], + "type": "text", + "content": "Eric Wong" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 220, + 237, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 237, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 237, + 233 + ], + "type": "text", + "content": "University of Pennsylvania" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 95, + 235, + 247, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 235, + 247, + 247 + ], + "spans": [ + { + "bbox": [ + 95, + 235, + 247, + 247 + ], + "type": "text", + "content": "exwong@seas.upenn.edu" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 286, + 205, + 361, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 205, + 361, + 219 + ], + "spans": [ + { + "bbox": [ + 286, + 205, + 361, + 219 + ], + "type": "text", + "content": "Sung Min Park" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 220, + 335, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 220, + 335, + 231 + ], + "spans": [ + { + "bbox": [ + 312, + 220, + 335, + 231 + ], + "type": "text", + "content": "MIT" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 277, + 235, + 371, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 277, + 235, + 371, + 247 + ], + "spans": [ + { + "bbox": [ + 277, + 235, + 371, + 247 + ], + "type": "text", + "content": "sp765@mit.edu" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 404, + 205, + 495, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 404, + 205, + 495, + 219 + ], + "spans": [ + { + "bbox": [ + 404, + 205, + 495, + 219 + ], + "type": "text", + "content": "Aleksander Mądry" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 438, + 220, + 460, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 220, + 460, + 231 + ], + "spans": [ + { + "bbox": [ + 438, + 220, + 460, + 231 + ], + "type": "text", + "content": "MIT" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 400, + 235, + 495, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 235, + 495, + 247 + ], + "spans": [ + { + "bbox": [ + 400, + 235, + 495, + 247 + ], + "type": "text", + "content": "madry@mit.edu" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 143, + 274, + 192, + 287 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 274, + 192, + 287 + ], + "spans": [ + { + "bbox": [ + 143, + 274, + 192, + 287 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 46, + 300, + 290, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 300, + 290, + 468 + ], + "spans": [ + { + "bbox": [ + 46, + 300, + 290, + 468 + ], + "type": "text", + "content": "It is commonly believed that in transfer learning including more pre-training data translates into better performance. However, recent evidence suggests that removing data from the source dataset can actually help too. In this work, we take a closer look at the role of the source dataset's composition in transfer learning and present a framework for probing its impact on downstream performance. Our framework gives rise to new capabilities such as pinpointing transfer learning brittleness as well as detecting pathologies such as data-leakage and the presence of misleading examples in the source dataset. In particular, we demonstrate that removing detrimental datapoints identified by our framework indeed improves transfer learning performance from ImageNet on a variety of target tasks." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 47, + 491, + 128, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 491, + 128, + 502 + ], + "spans": [ + { + "bbox": [ + 47, + 491, + 128, + 502 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 46, + 508, + 288, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 508, + 288, + 592 + ], + "spans": [ + { + "bbox": [ + 46, + 508, + 288, + 592 + ], + "type": "text", + "content": "Transfer learning enables us to adapt a model trained on a source dataset to perform better on a downstream target task. This technique is employed in a range of machine learning applications including radiology [23, 45], autonomous driving [11, 24], and satellite imagery analysis [44, 47]. Despite its successes, however, it is still not clear what the drivers of performance gains brought by transfer learning actually are." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 46, + 592, + 288, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 592, + 288, + 676 + ], + "spans": [ + { + "bbox": [ + 46, + 592, + 288, + 676 + ], + "type": "text", + "content": "So far, a dominant approach to studying these drivers focused on the role of the source model—i.e., the model trained on the source dataset. The corresponding works involve investigating the source model's architecture [23], accuracy [27], adversarial vulnerability [42, 43], and training procedure [21, 30]. This line of work makes it clear that the properties of the source model has a significant impact on" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 306, + 276, + 547, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 276, + 547, + 348 + ], + "spans": [ + { + "bbox": [ + 306, + 276, + 547, + 348 + ], + "type": "text", + "content": "transfer learning. There is some evidence, however, that the source dataset might play an important role as well [18, 26, 38]. For example, several works have shown that while increasing the size of the source dataset generally boosts transfer learning performance, removing specific classes can help too [18, 26, 38]. All of this motivates a natural question:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 306, + 363, + 547, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 363, + 547, + 387 + ], + "spans": [ + { + "bbox": [ + 306, + 363, + 547, + 387 + ], + "type": "text", + "content": "How can we pinpoint the exact impact of the source dataset in transfer learning?" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 304, + 410, + 548, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 410, + 548, + 518 + ], + "spans": [ + { + "bbox": [ + 304, + 410, + 548, + 518 + ], + "type": "text", + "content": "Our Contributions. In this paper, we present a framework for measuring and analyzing the impact of the source dataset's composition on transfer learning performance. To do this, our framework provides us with the ability to investigate the counterfactual impact on downstream predictions of including or excluding datapoints from the source dataset, drawing inspiration from classical supervised learning techniques such as influence functions [7, 13, 25] and datamodels [19]. Using our framework, we can:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 535, + 547, + 649 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 317, + 535, + 547, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 535, + 547, + 559 + ], + "spans": [ + { + "bbox": [ + 317, + 535, + 547, + 559 + ], + "type": "text", + "content": "- Pinpoint what parts of the source dataset are most utilized by the downstream task." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 574, + 547, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 574, + 547, + 609 + ], + "spans": [ + { + "bbox": [ + 317, + 574, + 547, + 609 + ], + "type": "text", + "content": "- Automatically extract granular subpopulations in the target dataset through projection of the fine-grained labels of the source dataset." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 624, + 546, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 624, + 546, + 649 + ], + "spans": [ + { + "bbox": [ + 317, + 624, + 546, + 649 + ], + "type": "text", + "content": "- Surface pathologies such as source-target data leakage and mislabelled source datapoints." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 306, + 665, + 547, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 665, + 547, + 713 + ], + "spans": [ + { + "bbox": [ + 306, + 665, + 547, + 713 + ], + "type": "text", + "content": "We also demonstrate how our framework can be used to find detrimental subsets of ImageNet [9] that, when removed, give rise to better downstream performance on a variety of image classification tasks." + } + ] + } + ], + "index": 33 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 684, + 126, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 684, + 126, + 693 + ], + "spans": [ + { + "bbox": [ + 58, + 684, + 126, + 693 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 59, + 693, + 288, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 693, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 59, + 693, + 288, + 712 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 59, + 693, + 288, + 712 + ], + "type": "text", + "content": "Code is available at https://github.com/MadryLab/data-ansfer" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3613" + } + ] + } + ], + "index": 37 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 71, + 288, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 71, + 288, + 99 + ], + "spans": [ + { + "bbox": [ + 47, + 71, + 288, + 99 + ], + "type": "text", + "content": "2. A Data-Based Framework for Studying Transfer Learning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 105, + 289, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 105, + 289, + 236 + ], + "spans": [ + { + "bbox": [ + 46, + 105, + 289, + 236 + ], + "type": "text", + "content": "In order to pinpoint the role of the source dataset in transfer learning, we need to understand how the composition of that source dataset impacts the downstream model's performance. To do so, we draw inspiration from supervised machine learning approaches that study the impact of the training data on the model's subsequent predictions. In particular, these approaches capture this impact via studying (and approximating) the counterfactual effect of excluding certain training datapoints. This paradigm underlies a number of techniques, from influence functions [7, 13, 25], to datamodels [19], to data Shapley values [14, 22, 31]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 237, + 290, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 237, + 290, + 380 + ], + "spans": [ + { + "bbox": [ + 46, + 237, + 290, + 380 + ], + "type": "text", + "content": "Now, to adapt this paradigm to our setting, we study the counterfactual effect of excluding datapoints from the source dataset on the downstream, target task predictions. In our framework, we will focus on the inclusion or exclusion of entire classes in the source dataset, as opposed to individual examples2. This is motivated by the fact that, intuitively, we expect these classes to be the ones that embody whole concepts and thus drive the formation of (transferred) features. We therefore anticipate the removal of entire classes to have a more measurable impact on the representation learned by the source model (and consequently on the downstream model's predictions)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 381, + 290, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 381, + 290, + 620 + ], + "spans": [ + { + "bbox": [ + 46, + 381, + 290, + 620 + ], + "type": "text", + "content": "Once we have chosen to focus on removal of entire source classes, we can design counterfactual experiments to estimate their influences. A natural approach here, the leave-one-out method [7, 25], would involve removing each individual class from the source dataset separately and then measuring the change in the downstream model's predictions. However, in the transfer learning setting, we suspect that removing a single class from the source dataset won't significantly change the downstream model's performance. Thus, leave-one-out methodology may be able to capture meaningful influences only in rare cases. This is especially so as many common source datasets contain highly redundant classes. For example, ImageNet contains over 100 dog-breed classes. The removal of a single dog-breed class might thus have a negligible impact on transfer learning performance, but the removal of all of the dog classes might significantly change the features learned by the downstream model. For these reasons, we adapt the subsampling [13, 19] approach, which revolves around removing a random collection of source classes at once." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 636, + 288, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 636, + 288, + 685 + ], + "spans": [ + { + "bbox": [ + 46, + 636, + 288, + 685 + ], + "type": "text", + "content": "Computing transfer influences. In the light of the above, our methodology for computing the influence of source classes on transfer learning performance involves training a large number of models with random subsets of the source" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 306, + 72, + 545, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 545, + 96 + ], + "type": "text", + "content": "Algorithm 1 Estimation of source dataset class influences on transfer learning performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "spans": [ + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "text", + "content": "Require: Source dataset " + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "inline_equation", + "content": "\\mathcal{S} = \\cup_{k=1}^{K} \\mathcal{C}_k" + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "text", + "content": " (with " + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "text", + "content": " classes), a target dataset " + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = (t_1, t_2, \\dots, t_n)" + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "text", + "content": ", training algorithm " + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "text", + "content": ", subset ratio " + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "text", + "content": ", and number of models " + }, + { + "bbox": [ + 306, + 99, + 547, + 137 + ], + "type": "inline_equation", + "content": "m" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 325, + 237, + 405, + 257 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 237, + 405, + 257 + ], + "spans": [ + { + "bbox": [ + 325, + 237, + 405, + 257 + ], + "type": "interline_equation", + "content": "\\frac {\\sum_ {i = 1} ^ {m} f _ {i} \\left(t _ {j} ; S _ {i}\\right) \\mathbb {1} _ {C _ {k} \\nsubseteq S _ {i}}}{\\sum_ {i = 1} ^ {m} \\mathbb {1} _ {C _ {k} \\nsubseteq S _ {i}}}", + "image_path": "8b820300a523226060786a1659b7e6caeb18189658bc584315a6650addff0a1d.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 137, + 545, + 292 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 312, + 137, + 545, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 137, + 545, + 160 + ], + "spans": [ + { + "bbox": [ + 312, + 137, + 545, + 160 + ], + "type": "text", + "content": "1: Sample " + }, + { + "bbox": [ + 312, + 137, + 545, + 160 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 312, + 137, + 545, + 160 + ], + "type": "text", + "content": " random subsets " + }, + { + "bbox": [ + 312, + 137, + 545, + 160 + ], + "type": "inline_equation", + "content": "S_{1}, S_{2}, \\dots, S_{m} \\subset \\mathcal{S}" + }, + { + "bbox": [ + 312, + 137, + 545, + 160 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 312, + 137, + 545, + 160 + ], + "type": "inline_equation", + "content": "\\alpha \\cdot |\\mathcal{S}|" + }, + { + "bbox": [ + 312, + 137, + 545, + 160 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 312, + 161, + 397, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 161, + 397, + 171 + ], + "spans": [ + { + "bbox": [ + 312, + 161, + 397, + 171 + ], + "type": "text", + "content": "2: for " + }, + { + "bbox": [ + 312, + 161, + 397, + 171 + ], + "type": "inline_equation", + "content": "i \\in 1" + }, + { + "bbox": [ + 312, + 161, + 397, + 171 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 312, + 161, + 397, + 171 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 312, + 161, + 397, + 171 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 312, + 173, + 522, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 173, + 522, + 185 + ], + "spans": [ + { + "bbox": [ + 312, + 173, + 522, + 185 + ], + "type": "text", + "content": "3: Train model " + }, + { + "bbox": [ + 312, + 173, + 522, + 185 + ], + "type": "inline_equation", + "content": "f_{i}" + }, + { + "bbox": [ + 312, + 173, + 522, + 185 + ], + "type": "text", + "content": " by running algorithm " + }, + { + "bbox": [ + 312, + 173, + 522, + 185 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 312, + 173, + 522, + 185 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 312, + 173, + 522, + 185 + ], + "type": "inline_equation", + "content": "S_{i}" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 312, + 186, + 358, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 186, + 358, + 195 + ], + "spans": [ + { + "bbox": [ + 312, + 186, + 358, + 195 + ], + "type": "text", + "content": "4: end for" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 197, + 399, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 197, + 399, + 207 + ], + "spans": [ + { + "bbox": [ + 312, + 197, + 399, + 207 + ], + "type": "text", + "content": "5: for " + }, + { + "bbox": [ + 312, + 197, + 399, + 207 + ], + "type": "inline_equation", + "content": "k \\in 1" + }, + { + "bbox": [ + 312, + 197, + 399, + 207 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 312, + 197, + 399, + 207 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 312, + 197, + 399, + 207 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 312, + 209, + 410, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 209, + 410, + 220 + ], + "spans": [ + { + "bbox": [ + 312, + 209, + 410, + 220 + ], + "type": "text", + "content": "6: for " + }, + { + "bbox": [ + 312, + 209, + 410, + 220 + ], + "type": "inline_equation", + "content": "j\\in 1" + }, + { + "bbox": [ + 312, + 209, + 410, + 220 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 312, + 209, + 410, + 220 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 312, + 209, + 410, + 220 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 312, + 220, + 545, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 220, + 545, + 238 + ], + "spans": [ + { + "bbox": [ + 312, + 220, + 545, + 238 + ], + "type": "text", + "content": "7: " + }, + { + "bbox": [ + 312, + 220, + 545, + 238 + ], + "type": "inline_equation", + "content": "\\operatorname{Infl}[\\mathcal{C}_k \\to t_j] = \\frac{\\sum_{i=1}^m f_i(t_j; S_i) \\mathbb{1}_{\\mathcal{C}_k \\subset s_i}}{\\sum_{i=1}^m \\mathbb{1}_{\\mathcal{C}_k \\subset s_i}} -" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 257, + 372, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 257, + 372, + 266 + ], + "spans": [ + { + "bbox": [ + 312, + 257, + 372, + 266 + ], + "type": "text", + "content": "8: end for" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 267, + 357, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 267, + 357, + 277 + ], + "spans": [ + { + "bbox": [ + 312, + 267, + 357, + 277 + ], + "type": "text", + "content": "9: end for" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 278, + 507, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 278, + 507, + 292 + ], + "spans": [ + { + "bbox": [ + 309, + 278, + 507, + 292 + ], + "type": "text", + "content": "10: return Infl " + }, + { + "bbox": [ + 309, + 278, + 507, + 292 + ], + "type": "inline_equation", + "content": "\\left[\\mathcal{C}_k\\rightarrow t_j\\right]" + }, + { + "bbox": [ + 309, + 278, + 507, + 292 + ], + "type": "text", + "content": " , for all " + }, + { + "bbox": [ + 309, + 278, + 507, + 292 + ], + "type": "inline_equation", + "content": "j\\in [n],k\\in [K]" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "text", + "content": "classes removed, and fine-tuning these models on the target task. We then estimate the influence value of a source class " + }, + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "text", + "content": " on a target example " + }, + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "text", + "content": " as the expected difference in the transfer model's performance on example " + }, + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "text", + "content": " when class " + }, + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 304, + 316, + 547, + 376 + ], + "type": "text", + "content": " was either included in or excluded from the source dataset:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 384, + 546, + 411 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 384, + 546, + 411 + ], + "spans": [ + { + "bbox": [ + 306, + 384, + 546, + 411 + ], + "type": "interline_equation", + "content": "\\operatorname {I n f l} [ \\mathcal {C} \\rightarrow t ] = \\mathbb {E} _ {S} [ f (t; S) \\mid \\mathcal {C} \\subset S ] - \\mathbb {E} _ {S} [ f (t; S) \\mid \\mathcal {C} \\not \\subset S ], \\tag {1}", + "image_path": "cfcf72ebe80dbe8047ee1bfc0e1ce66059be6e63460ba9ecd931e047a5347703.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "spans": [ + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "inline_equation", + "content": "f(t; S)" + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "text", + "content": " is the softmax output3 of a model trained on a subset " + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "text", + "content": " of the source dataset. A positive influence value indicates that including the source class " + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "text", + "content": " helps the model predict the target example " + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "text", + "content": " correctly. On the other hand, a negative influence value suggests that the source class " + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "text", + "content": " actually hurts the model's performance on the target example " + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 421, + 547, + 517 + ], + "type": "text", + "content": ". We outline the overall procedure in Algorithm 1, and defer a detailed description of our approach to Appendix A." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 532, + 547, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 532, + 547, + 650 + ], + "spans": [ + { + "bbox": [ + 304, + 532, + 547, + 650 + ], + "type": "text", + "content": "A note on computational costs. In order to compute transfer influences, we need to train a large number of source models, each on a fraction of the source dataset. Specifically, we pre-train 7,540 models on ImageNet, each on a randomly chosen " + }, + { + "bbox": [ + 304, + 532, + 547, + 650 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 304, + 532, + 547, + 650 + ], + "type": "text", + "content": " of the ImageNet dataset. This pre-training step needs to be performed only once though: these same models can then be used to fine-tune on each new target task. Overall, the whole process (training the source models and fine-tuning on target datasets) takes less than 20 days using 8 V100 GPUs4." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 651, + 547, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 651, + 547, + 676 + ], + "spans": [ + { + "bbox": [ + 304, + 651, + 547, + 676 + ], + "type": "text", + "content": "Are so many models necessary? In Section A.5, we explore computing transfer influences with smaller numbers" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 693, + 288, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 288, + 712 + ], + "type": "text", + "content": "2In Section 4.3, we adapt our framework to calculate more granular influences of individual source examples too." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 306, + 683, + 547, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 683, + 547, + 703 + ], + "spans": [ + { + "bbox": [ + 306, + 683, + 547, + 703 + ], + "type": "text", + "content": "3We experiment with other outputs such as logits, margins, or correctness too. We discuss the corresponding results in Appendix B." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 703, + 409, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 703, + 409, + 713 + ], + "spans": [ + { + "bbox": [ + 317, + 703, + 409, + 713 + ], + "type": "text", + "content": "4Details are in Appendix A." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3614" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 71, + 521, + 209 + ], + "blocks": [ + { + "bbox": [ + 75, + 71, + 521, + 209 + ], + "lines": [ + { + "bbox": [ + 75, + 71, + 521, + 209 + ], + "spans": [ + { + "bbox": [ + 75, + 71, + 521, + 209 + ], + "type": "image", + "image_path": "235656645a456c8f109cbf63a2fda6094ec63d0324c222c795449cb3e854a7bb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 220, + 547, + 245 + ], + "lines": [ + { + "bbox": [ + 46, + 220, + 547, + 245 + ], + "spans": [ + { + "bbox": [ + 46, + 220, + 547, + 245 + ], + "type": "text", + "content": "Figure 1. Most positive and negative ImageNet classes ordered based on their overall influence on the CIFAR-10 dataset. The top source classes (e.g., tailed frog and sorrel horse) turn out to be semantically relevant to the target classes (e.g., frog and horse)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 50, + 257, + 286, + 420 + ], + "blocks": [ + { + "bbox": [ + 50, + 257, + 286, + 420 + ], + "lines": [ + { + "bbox": [ + 50, + 257, + 286, + 420 + ], + "spans": [ + { + "bbox": [ + 50, + 257, + 286, + 420 + ], + "type": "image", + "image_path": "5bbcf455e014cb3e5a303487377cb9e1f151c831431e5a96630b2e1cb1590206.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 423, + 198, + 432 + ], + "lines": [ + { + "bbox": [ + 137, + 423, + 198, + 432 + ], + "spans": [ + { + "bbox": [ + 137, + 423, + 198, + 432 + ], + "type": "text", + "content": "(a) CIFAR-10 results" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 45, + 441, + 547, + 520 + ], + "lines": [ + { + "bbox": [ + 45, + 441, + 547, + 520 + ], + "spans": [ + { + "bbox": [ + 45, + 441, + 547, + 520 + ], + "type": "text", + "content": "Figure 2. Target task accuracies after removing the K most positively or negatively influential ImageNet classes from the source dataset. Mean/std are reported over 10 runs. (a) Results with CIFAR-10 as the target task after removing different numbers of classes from the source dataset. We also include baselines of using the full ImageNet dataset and removing random classes. One can note that, by removing negatively influential source classes, we can obtain a test accuracy that is " + }, + { + "bbox": [ + 45, + 441, + 547, + 520 + ], + "type": "inline_equation", + "content": "2.5\\%" + }, + { + "bbox": [ + 45, + 441, + 547, + 520 + ], + "type": "text", + "content": " larger than what using the entire ImageNet dataset would yield. Results for other target tasks can be found in Appendix C. (b) Peak performances when removing the most negatively influential source classes across a range of other target tasks. We also compare against using the full ImageNet dataset or a subset of source classes that are semantically relevant to the target classes (defined via the WordNet hierarchy, see Appendix A for details)." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 307, + 276, + 545, + 422 + ], + "blocks": [ + { + "bbox": [ + 307, + 276, + 545, + 422 + ], + "lines": [ + { + "bbox": [ + 307, + 276, + 545, + 422 + ], + "spans": [ + { + "bbox": [ + 307, + 276, + 545, + 422 + ], + "type": "table", + "html": "
Target DatasetSource Dataset
Full ImageNetRemoving Bottom Infl.Semantically Relevant Classes
AIRCRAFT36.08 ± 1.0736.88 ± 0.74N/A
BIRDSNAP38.42 ± 0.4039.19 ± 0.3826.74 ± 0.31
CALTECH10186.69 ± 0.7987.03 ± 0.3082.28 ± 0.40
CALTECH25674.97 ± 0.2775.24 ± 0.2167.42 ± 0.39
CARS39.55 ± 0.3240.59 ± 0.5721.71 ± 0.40
CIFAR1081.16 ± 0.3083.64 ± 0.4075.53 ± 0.42
CIFAR10059.37 ± 0.5861.46 ± 0.5955.21 ± 0.52
FLOWERS82.92 ± 0.5282.89 ± 0.48N/A
FOOD56.19 ± 0.1456.85 ± 0.2739.36 ± 0.39
PETS83.41 ± 0.5587.59 ± 0.2487.16 ± 0.24
SUN39750.15 ± 0.2351.34 ± 0.29N/A
", + "image_path": "48f17def53eb892cdbb330e0f31676796ecd634445e365aa841699e565718edd.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 381, + 423, + 470, + 433 + ], + "lines": [ + { + "bbox": [ + 381, + 423, + 470, + 433 + ], + "spans": [ + { + "bbox": [ + 381, + 423, + 470, + 433 + ], + "type": "text", + "content": "(b) Summary of 11 target tasks" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 46, + 540, + 288, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 540, + 288, + 624 + ], + "spans": [ + { + "bbox": [ + 46, + 540, + 288, + 624 + ], + "type": "text", + "content": "of models. While using the full number of models provides the best results, training a much smaller number of models (e.g., 1000 models, taking slightly over 2.5 days on 8 V100 GPUs) still provides meaningful transfer influences. Thus in practice, one can choose the number of source models based on noise tolerance and computational budget. Further convergence results can be found in Appendix A.5." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 640, + 288, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 640, + 288, + 669 + ], + "spans": [ + { + "bbox": [ + 47, + 640, + 288, + 669 + ], + "type": "text", + "content": "3. Identifying the Most Influential Classes of the Source Dataset" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 677, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 289, + 715 + ], + "type": "text", + "content": "In Section 2, we presented a framework for pinpointing the role of the source dataset in transfer learning by estimating the influence of each source class on the target model's" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 540, + 547, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 540, + 547, + 624 + ], + "spans": [ + { + "bbox": [ + 304, + 540, + 547, + 624 + ], + "type": "text", + "content": "predictions. Using these influences, we can now take a look at the classes from the source dataset that have the largest positive or negative impact on the overall transfer learning performance. We focus our analysis on the fixed-weights transfer learning setting (further results, including full model fine-tuning as well as generalization to other architectures, can be found in Appendix E)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 630, + 548, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 630, + 548, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 630, + 548, + 715 + ], + "type": "text", + "content": "As one might expect, not all source classes have large influences. Figure 1 displays the most influential classes of ImageNet with CIFAR-10 as the target task. Notably, the most positively influential source classes turn out to be directly related to classes in the target task (e.g., the ImageNet label \"tailed frog\" is an instance of the CIFAR class \"frog\"). This trend holds across all of the target datasets and transfer" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 758 + ], + "type": "text", + "content": "3615" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 71, + 521, + 208 + ], + "blocks": [ + { + "bbox": [ + 75, + 71, + 521, + 208 + ], + "lines": [ + { + "bbox": [ + 75, + 71, + 521, + 208 + ], + "spans": [ + { + "bbox": [ + 75, + 71, + 521, + 208 + ], + "type": "image", + "image_path": "af69a0124de5c06b0afeb91305ad6cd5b33db58ebee1e57128634501170edbbd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 220, + 548, + 255 + ], + "lines": [ + { + "bbox": [ + 45, + 220, + 548, + 255 + ], + "spans": [ + { + "bbox": [ + 45, + 220, + 548, + 255 + ], + "type": "text", + "content": "Figure 3. Most positive and negative influencing ImageNet classes for the CIFAR-10 class \"bird\". These are calculated by averaging the influence of each source class over all bird examples. We find that the most positively influencing ImageNet classes (e.g., \"ostrich\" and \"bustard\") are related to the CIFAR-10 class \"bird\". See Appendix E for results on other CIFAR-10 classes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 111, + 268, + 487, + 555 + ], + "blocks": [ + { + "bbox": [ + 111, + 268, + 487, + 555 + ], + "lines": [ + { + "bbox": [ + 111, + 268, + 487, + 555 + ], + "spans": [ + { + "bbox": [ + 111, + 268, + 487, + 555 + ], + "type": "image", + "image_path": "d1390e88abe3fdca289ecbb34d0c8b4fc504ce154ff647c1ee48fed57f676841.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 567, + 547, + 601 + ], + "lines": [ + { + "bbox": [ + 45, + 567, + 547, + 601 + ], + "spans": [ + { + "bbox": [ + 45, + 567, + 547, + 601 + ], + "type": "text", + "content": "Figure 4. Projecting source labels onto the target dataset. For various target datasets (right), we display the images that were most positively influenced by various ImageNet classes in the source dataset (left). We find that the identified images from the target datasets look similar to the corresponding images in the source dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 622, + 290, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 622, + 290, + 696 + ], + "spans": [ + { + "bbox": [ + 46, + 622, + 290, + 696 + ], + "type": "text", + "content": "learning settings we considered (see Appendix C). Interestingly, the source dataset also contains classes that are overall negatively influential for the target task, e.g., \"bookshop\" and \"jigsaw puzzle\" classes. (In Section 4, we will take a closer look at the factors that can cause a source class to be negatively influential for a target prediction.)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 622, + 548, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 622, + 548, + 708 + ], + "spans": [ + { + "bbox": [ + 305, + 622, + 548, + 708 + ], + "type": "text", + "content": "How important are the most influential source classes? We now remove each of the most influential classes from the source dataset to observe their actual impact on transfer learning performance (Figure 2a). As expected, removing the most positively influential classes severely degrades transfer learning performance as compared to removing random classes. This counterfactual experiment confirms that" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 317, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 317, + 758 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 317, + 758 + ], + "type": "text", + "content": "3616" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 76, + 520, + 384 + ], + "blocks": [ + { + "bbox": [ + 78, + 76, + 520, + 384 + ], + "lines": [ + { + "bbox": [ + 78, + 76, + 520, + 384 + ], + "spans": [ + { + "bbox": [ + 78, + 76, + 520, + 384 + ], + "type": "image", + "image_path": "d2264184567399bafc67d40f990002acaa9713315000ffaaa13db27892f3beec.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 394, + 548, + 429 + ], + "lines": [ + { + "bbox": [ + 46, + 394, + 548, + 429 + ], + "spans": [ + { + "bbox": [ + 46, + 394, + 548, + 429 + ], + "type": "text", + "content": "Figure 5. The CIFAR-10 images that were most positively (or negatively) influenced by the ImageNet classes \"starfish\" and \"rapeseed.\" CIFAR-10 images that are highly influenced by the \"starfish\" class have similar shapes, while those influenced by \"rapeseed\" class have yellow-green colors." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 449, + 287, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 449, + 287, + 508 + ], + "spans": [ + { + "bbox": [ + 46, + 449, + 287, + 508 + ], + "type": "text", + "content": "these classes are indeed important to the performance of transfer learning. On the other hand, removing the most negatively influential classes actually improves the overall transfer learning performance beyond what using the entire ImageNet dataset provides (see Figure 2b)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 509, + 289, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 509, + 289, + 641 + ], + "spans": [ + { + "bbox": [ + 46, + 509, + 289, + 641 + ], + "type": "text", + "content": "Above, we noted that the top influential source classes are typically related to the classes in the target dataset. What happens if we only choose source classes that are semantically relevant to the classes of the target dataset? Indeed, [38] found that hand-picking such source datasets can sometimes boost transfer learning performance. For each target dataset, we select ImageNet classes that are semantically relevant to the target classes (using the WordNet hierarchy, see Appendix A). As shown in Figure 2b, choosing an optimal subset of classes via transfer influences substantially outperforms this baseline." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 654, + 288, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 654, + 288, + 683 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 288, + 683 + ], + "type": "text", + "content": "4. Probing the Impact of the Source Dataset on Transfer Learning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 689, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 289, + 715 + ], + "type": "text", + "content": "In Section 3, we developed a methodology for identifying source dataset classes that have the most impact on" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 449, + 547, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 547, + 557 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 547, + 557 + ], + "type": "text", + "content": "transfer learning performance. Now, we demonstrate how this methodology can be extended into a framework for probing and understanding transfer learning, including: (1) identifying granular target subpopulations that correspond to source classes, (2) debugging transfer learning failures, and (3) detecting data leakage between the source and target datasets. We focus our demonstration of these capabilities on a commonly-used transfer learning setting: ImageNet to CIFAR-10 (experimental details are in Appendix A)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 563, + 545, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 563, + 545, + 588 + ], + "spans": [ + { + "bbox": [ + 305, + 563, + 545, + 588 + ], + "type": "text", + "content": "4.1. Capability 1: Extracting target subpopulations by projecting source class labels" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 689 + ], + "type": "text", + "content": "Imagine that we would like to find all the ostriches in the CIFAR-10 dataset. This is not an easy task as CIFAR-10 only has \"bird\" as a label, and thus lacks sufficiently fine-grained annotations. Luckily, however, ImageNet does contain an ostrich class! Our computed influences enable us to \"project\" this ostrich class annotation (and, more broadly, the fine-grained label hierarchy of our source dataset) to find this subpopulation of interest in the target dataset." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 305, + 689, + 545, + 714 + ], + "type": "text", + "content": "Indeed, our examination from Section 3 suggests that the most positively influencing source classes are typically those" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3617" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 100, + 72, + 167, + 147 + ], + "blocks": [ + { + "bbox": [ + 100, + 72, + 167, + 147 + ], + "lines": [ + { + "bbox": [ + 100, + 72, + 167, + 147 + ], + "spans": [ + { + "bbox": [ + 100, + 72, + 167, + 147 + ], + "type": "image", + "image_path": "6d2ea1b2b94a28a4425c3a0896ca56ae1f95aa6d13736b307476d1adb709d499.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 171, + 76, + 315, + 186 + ], + "blocks": [ + { + "bbox": [ + 171, + 76, + 315, + 186 + ], + "lines": [ + { + "bbox": [ + 171, + 76, + 315, + 186 + ], + "spans": [ + { + "bbox": [ + 171, + 76, + 315, + 186 + ], + "type": "image", + "image_path": "a9ebfc2f64cd6995d626a93161045698d0778ff49ce790f94ebb00b7b85121d0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 322, + 78, + 493, + 175 + ], + "blocks": [ + { + "bbox": [ + 322, + 78, + 493, + 175 + ], + "lines": [ + { + "bbox": [ + 322, + 78, + 493, + 175 + ], + "spans": [ + { + "bbox": [ + 322, + 78, + 493, + 175 + ], + "type": "image", + "image_path": "2a170b84bbf8e8b8c856d0f95e83999c96416ff2e03aeb908c2ff35cac705bf9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 100, + 190, + 162, + 259 + ], + "blocks": [ + { + "bbox": [ + 100, + 190, + 162, + 259 + ], + "lines": [ + { + "bbox": [ + 100, + 190, + 162, + 259 + ], + "spans": [ + { + "bbox": [ + 100, + 190, + 162, + 259 + ], + "type": "image", + "image_path": "1845b6a248cb356bf82e8f11d5c59f70b443a942e8961b0cd4fca4f91bc1460d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 169, + 194, + 313, + 305 + ], + "blocks": [ + { + "bbox": [ + 169, + 194, + 313, + 305 + ], + "lines": [ + { + "bbox": [ + 169, + 194, + 313, + 305 + ], + "spans": [ + { + "bbox": [ + 169, + 194, + 313, + 305 + ], + "type": "image", + "image_path": "8d9209c766037076e99394926fcabc0d351757387a495ac250a3f1dd2599555a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 318, + 194, + 493, + 288 + ], + "blocks": [ + { + "bbox": [ + 318, + 194, + 493, + 288 + ], + "lines": [ + { + "bbox": [ + 318, + 194, + 493, + 288 + ], + "spans": [ + { + "bbox": [ + 318, + 194, + 493, + 288 + ], + "type": "image", + "image_path": "6f68316446faa07221a10558bc0324672f1bfa423e1b0cbe30aa0a07b33c395a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 100, + 310, + 175, + 392 + ], + "blocks": [ + { + "bbox": [ + 100, + 310, + 175, + 392 + ], + "lines": [ + { + "bbox": [ + 100, + 310, + 175, + 392 + ], + "spans": [ + { + "bbox": [ + 100, + 310, + 175, + 392 + ], + "type": "image", + "image_path": "79b6e4ba9a79234ec4b4b92619abdbd026a5488c0465fb6d4b98290c257ff180.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 175, + 314, + 315, + 422 + ], + "blocks": [ + { + "bbox": [ + 175, + 314, + 315, + 422 + ], + "lines": [ + { + "bbox": [ + 175, + 314, + 315, + 422 + ], + "spans": [ + { + "bbox": [ + 175, + 314, + 315, + 422 + ], + "type": "image", + "image_path": "e542c2d8e5ce40027bcbe6da7b63261105424dddb7af76b0191a2da41a735002.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 433, + 547, + 478 + ], + "lines": [ + { + "bbox": [ + 46, + 433, + 547, + 478 + ], + "spans": [ + { + "bbox": [ + 46, + 433, + 547, + 478 + ], + "type": "text", + "content": "Figure 6. Pinpointing highly negatively influential source classes can help explain model mistakes. Left: For three CIFAR-10 images, we plot the most negatively influential source classes. Right: Over 20 runs, the fraction of times that our downstream model predicts each label for the given CIFAR-10 image. When the most negatively influential class is removed, the model predicts the correct label more frequently. More examples can be found in Appendix E." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 325, + 316, + 493, + 419 + ], + "blocks": [ + { + "bbox": [ + 325, + 316, + 493, + 419 + ], + "lines": [ + { + "bbox": [ + 325, + 316, + 493, + 419 + ], + "spans": [ + { + "bbox": [ + 325, + 316, + 493, + 419 + ], + "type": "image", + "image_path": "3ff9ce6bfa7e5910c4f1b25bcc8ef57b82273fc3f0158f91d370fa9da6b8381d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 498, + 288, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 498, + 288, + 571 + ], + "spans": [ + { + "bbox": [ + 46, + 498, + 288, + 571 + ], + "type": "text", + "content": "that directly overlap with the target classes (see Figure 1). In particular, for our example, \"ostrich\" is highly positively influential for the \"bird\" class (see Figure 3). To find ostriches in the CIFAR-10 dataset, we thus need to simply surface the CIFAR-10 images which were most positively influenced by the \"ostrich\" source class (see Figure 4)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 289, + 715 + ], + "type": "text", + "content": "It turns out that this type of projection approach can be applied more broadly. Even when the source class is not a direct sub-type of a target class, the downstream model can still leverage salient features from this class — such as shape or color — to predict on the target dataset. For such classes, projecting source labels can extract target subpopulations which share such features. To illustrate this, in Figure 5, we display the CIFAR-10 images that are highly influenced by the classes \"starfish\" and \"rapeseed\" (both of which do not directly appear in the CIFAR-10 dataset). For these classes, the most influenced CIFAR-10 images share the same shape" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 498, + 547, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 547, + 535 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 547, + 535 + ], + "type": "text", + "content": "(\"starfish\") or color (\"rapeseed\") as their ImageNet counterparts. More examples of such projections can be found in Appendix E." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 548, + 545, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 548, + 545, + 573 + ], + "spans": [ + { + "bbox": [ + 305, + 548, + 545, + 573 + ], + "type": "text", + "content": "4.2. Capability 2: Debugging the failures of a transferred model" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 547, + 715 + ], + "type": "text", + "content": "Our framework enables us to also reason about the possible mistakes of the transferred model caused by source dataset classes. For example, consider the CIFAR-10 image of a dog in Figure 6, which our transfer learning model often mispredicts as a horse. Using our framework, we can demonstrate that this image is strongly negatively influenced by the source class \"sorrel horse.\" Thus, our downstream model may be misusing a feature introduced by this class. Indeed, once we remove \"sorrel horse\" from the source dataset, our model predicts the correct label more frequently. (See Appendix E for more examples, as well as a quantitative" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3618" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 123, + 102, + 164, + 145 + ], + "blocks": [ + { + "bbox": [ + 60, + 114, + 103, + 137 + ], + "lines": [ + { + "bbox": [ + 60, + 114, + 103, + 137 + ], + "spans": [ + { + "bbox": [ + 60, + 114, + 103, + 137 + ], + "type": "text", + "content": "ImageNet Images" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 123, + 102, + 164, + 145 + ], + "lines": [ + { + "bbox": [ + 123, + 102, + 164, + 145 + ], + "spans": [ + { + "bbox": [ + 123, + 102, + 164, + 145 + ], + "type": "image", + "image_path": "24c98d6b8ae189ac6d1ef9136bf53a842c3309af55ad190880401fdc820d1585.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 124, + 147, + 160, + 155 + ], + "lines": [ + { + "bbox": [ + 124, + 147, + 160, + 155 + ], + "spans": [ + { + "bbox": [ + 124, + 147, + 160, + 155 + ], + "type": "text", + "content": "speedboat" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 174, + 103, + 216, + 145 + ], + "blocks": [ + { + "bbox": [ + 142, + 77, + 296, + 92 + ], + "lines": [ + { + "bbox": [ + 142, + 77, + 296, + 92 + ], + "spans": [ + { + "bbox": [ + 142, + 77, + 296, + 92 + ], + "type": "text", + "content": "Most Positively Influenced" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 174, + 103, + 216, + 145 + ], + "lines": [ + { + "bbox": [ + 174, + 103, + 216, + 145 + ], + "spans": [ + { + "bbox": [ + 174, + 103, + 216, + 145 + ], + "type": "image", + "image_path": "e85f26acb175ee3896045a408094ed3f5c3fcf58e04c2fd87c51f3f53e50e327.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 178, + 147, + 213, + 155 + ], + "lines": [ + { + "bbox": [ + 178, + 147, + 213, + 155 + ], + "spans": [ + { + "bbox": [ + 178, + 147, + 213, + 155 + ], + "type": "text", + "content": "tailed frog" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 225, + 103, + 267, + 145 + ], + "blocks": [ + { + "bbox": [ + 225, + 103, + 267, + 145 + ], + "lines": [ + { + "bbox": [ + 225, + 103, + 267, + 145 + ], + "spans": [ + { + "bbox": [ + 225, + 103, + 267, + 145 + ], + "type": "image", + "image_path": "b78187756e5a1e31815a5efcf9654ddfba46d0fe0d09885eda106dba278a588c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 147, + 261, + 155 + ], + "lines": [ + { + "bbox": [ + 230, + 147, + 261, + 155 + ], + "spans": [ + { + "bbox": [ + 230, + 147, + 261, + 155 + ], + "type": "text", + "content": "warplane" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 276, + 103, + 318, + 146 + ], + "blocks": [ + { + "bbox": [ + 276, + 103, + 318, + 146 + ], + "lines": [ + { + "bbox": [ + 276, + 103, + 318, + 146 + ], + "spans": [ + { + "bbox": [ + 276, + 103, + 318, + 146 + ], + "type": "image", + "image_path": "6c6ecb37cff6ebd59ed60cb24773e5d4d7b4c650fdd12f426f957687e164a443.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 288, + 148, + 306, + 155 + ], + "lines": [ + { + "bbox": [ + 288, + 148, + 306, + 155 + ], + "spans": [ + { + "bbox": [ + 288, + 148, + 306, + 155 + ], + "type": "text", + "content": "racer" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 121, + 162, + 164, + 205 + ], + "blocks": [ + { + "bbox": [ + 63, + 173, + 100, + 196 + ], + "lines": [ + { + "bbox": [ + 63, + 173, + 100, + 196 + ], + "spans": [ + { + "bbox": [ + 63, + 173, + 100, + 196 + ], + "type": "text", + "content": "CIFAR-10 Images" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 121, + 162, + 164, + 205 + ], + "lines": [ + { + "bbox": [ + 121, + 162, + 164, + 205 + ], + "spans": [ + { + "bbox": [ + 121, + 162, + 164, + 205 + ], + "type": "image", + "image_path": "eed8fc068c21a53e01ae744ed7ab3c868b9ee0d231c698c69674f71c8f917c33.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 207, + 149, + 217 + ], + "lines": [ + { + "bbox": [ + 135, + 207, + 149, + 217 + ], + "spans": [ + { + "bbox": [ + 135, + 207, + 149, + 217 + ], + "type": "text", + "content": "ship" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 174, + 163, + 216, + 205 + ], + "blocks": [ + { + "bbox": [ + 174, + 163, + 216, + 205 + ], + "lines": [ + { + "bbox": [ + 174, + 163, + 216, + 205 + ], + "spans": [ + { + "bbox": [ + 174, + 163, + 216, + 205 + ], + "type": "image", + "image_path": "e912c81e06a985a27646767bf43202c68e2b7794768dca975027ef80217bb3df.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 188, + 207, + 203, + 217 + ], + "lines": [ + { + "bbox": [ + 188, + 207, + 203, + 217 + ], + "spans": [ + { + "bbox": [ + 188, + 207, + 203, + 217 + ], + "type": "text", + "content": "frog" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 225, + 163, + 267, + 205 + ], + "blocks": [ + { + "bbox": [ + 225, + 163, + 267, + 205 + ], + "lines": [ + { + "bbox": [ + 225, + 163, + 267, + 205 + ], + "spans": [ + { + "bbox": [ + 225, + 163, + 267, + 205 + ], + "type": "image", + "image_path": "b0ea7a2e6aeb7e18b2eb5b3e683784d135c1b14802e08d9089a3297b547dbaeb.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 232, + 207, + 260, + 216 + ], + "lines": [ + { + "bbox": [ + 232, + 207, + 260, + 216 + ], + "spans": [ + { + "bbox": [ + 232, + 207, + 260, + 216 + ], + "type": "text", + "content": "airplane" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 276, + 163, + 318, + 205 + ], + "blocks": [ + { + "bbox": [ + 276, + 163, + 318, + 205 + ], + "lines": [ + { + "bbox": [ + 276, + 163, + 318, + 205 + ], + "spans": [ + { + "bbox": [ + 276, + 163, + 318, + 205 + ], + "type": "image", + "image_path": "8468c3c5b84e6aa89752f0c412ed708c3f429e4924326142a904beeeaaa9ebc5.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 277, + 207, + 315, + 216 + ], + "lines": [ + { + "bbox": [ + 277, + 207, + 315, + 216 + ], + "spans": [ + { + "bbox": [ + 277, + 207, + 315, + 216 + ], + "type": "text", + "content": "automobile" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 345, + 100, + 388, + 145 + ], + "blocks": [ + { + "bbox": [ + 364, + 77, + 523, + 92 + ], + "lines": [ + { + "bbox": [ + 364, + 77, + 523, + 92 + ], + "spans": [ + { + "bbox": [ + 364, + 77, + 523, + 92 + ], + "type": "text", + "content": "Most Negatively Influenced" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 345, + 100, + 388, + 145 + ], + "lines": [ + { + "bbox": [ + 345, + 100, + 388, + 145 + ], + "spans": [ + { + "bbox": [ + 345, + 100, + 388, + 145 + ], + "type": "image", + "image_path": "d639a1977a40d0b7f3e7b870929a2f33cf582ab313815455d2a2ed2c8df1d179.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 347, + 147, + 386, + 155 + ], + "lines": [ + { + "bbox": [ + 347, + 147, + 386, + 155 + ], + "spans": [ + { + "bbox": [ + 347, + 147, + 386, + 155 + ], + "type": "text", + "content": "lawnmower" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 399, + 103, + 441, + 145 + ], + "blocks": [ + { + "bbox": [ + 399, + 103, + 441, + 145 + ], + "lines": [ + { + "bbox": [ + 399, + 103, + 441, + 145 + ], + "spans": [ + { + "bbox": [ + 399, + 103, + 441, + 145 + ], + "type": "image", + "image_path": "0f9b8a03d27b4c0fbad31470f2f14afcd3dc3e6ca2f5c8baa59b26de4837af60.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 147, + 433, + 155 + ], + "lines": [ + { + "bbox": [ + 406, + 147, + 433, + 155 + ], + "spans": [ + { + "bbox": [ + 406, + 147, + 433, + 155 + ], + "type": "text", + "content": "minivan" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 449, + 103, + 492, + 145 + ], + "blocks": [ + { + "bbox": [ + 449, + 103, + 492, + 145 + ], + "lines": [ + { + "bbox": [ + 449, + 103, + 492, + 145 + ], + "spans": [ + { + "bbox": [ + 449, + 103, + 492, + 145 + ], + "type": "image", + "image_path": "8f2f49548834a727623bfa1ac95596cd5443628dcc675c037f3bfd7c5c96f2da.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 462, + 147, + 479, + 156 + ], + "lines": [ + { + "bbox": [ + 462, + 147, + 479, + 156 + ], + "spans": [ + { + "bbox": [ + 462, + 147, + 479, + 156 + ], + "type": "text", + "content": "wing" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 501, + 103, + 542, + 145 + ], + "blocks": [ + { + "bbox": [ + 501, + 103, + 542, + 145 + ], + "lines": [ + { + "bbox": [ + 501, + 103, + 542, + 145 + ], + "spans": [ + { + "bbox": [ + 501, + 103, + 542, + 145 + ], + "type": "image", + "image_path": "07faa20b87ec0f33fedc983fef21e0318efa232c8f59100ee0792dc3eddf838b.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 502, + 147, + 540, + 156 + ], + "lines": [ + { + "bbox": [ + 502, + 147, + 540, + 156 + ], + "spans": [ + { + "bbox": [ + 502, + 147, + 540, + 156 + ], + "type": "text", + "content": "book jacket" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 346, + 163, + 388, + 205 + ], + "blocks": [ + { + "bbox": [ + 346, + 163, + 388, + 205 + ], + "lines": [ + { + "bbox": [ + 346, + 163, + 388, + 205 + ], + "spans": [ + { + "bbox": [ + 346, + 163, + 388, + 205 + ], + "type": "image", + "image_path": "e8bbb4fd74bda0757b5385670c20d47e5b9fe2815e1bbaec6ec5bc3dbb996e29.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 353, + 207, + 380, + 216 + ], + "lines": [ + { + "bbox": [ + 353, + 207, + 380, + 216 + ], + "spans": [ + { + "bbox": [ + 353, + 207, + 380, + 216 + ], + "type": "text", + "content": "airplane" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 399, + 163, + 441, + 205 + ], + "blocks": [ + { + "bbox": [ + 399, + 163, + 441, + 205 + ], + "lines": [ + { + "bbox": [ + 399, + 163, + 441, + 205 + ], + "spans": [ + { + "bbox": [ + 399, + 163, + 441, + 205 + ], + "type": "image", + "image_path": "49f1a3efd81fa4fbb77a767b1d63d74a0bc5f7fe04e1188d8207ccfb2defb722.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 207, + 433, + 216 + ], + "lines": [ + { + "bbox": [ + 406, + 207, + 433, + 216 + ], + "spans": [ + { + "bbox": [ + 406, + 207, + 433, + 216 + ], + "type": "text", + "content": "airplane" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 449, + 163, + 492, + 205 + ], + "blocks": [ + { + "bbox": [ + 449, + 163, + 492, + 205 + ], + "lines": [ + { + "bbox": [ + 449, + 163, + 492, + 205 + ], + "spans": [ + { + "bbox": [ + 449, + 163, + 492, + 205 + ], + "type": "image", + "image_path": "82b6b7dd0adf53a900b3cbfca67ad0b90f25f8ce3c621b5d82dcb88da3996f78.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 207, + 477, + 217 + ], + "lines": [ + { + "bbox": [ + 463, + 207, + 477, + 217 + ], + "spans": [ + { + "bbox": [ + 463, + 207, + 477, + 217 + ], + "type": "text", + "content": "ship" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 500, + 163, + 542, + 205 + ], + "blocks": [ + { + "bbox": [ + 500, + 163, + 542, + 205 + ], + "lines": [ + { + "bbox": [ + 500, + 163, + 542, + 205 + ], + "spans": [ + { + "bbox": [ + 500, + 163, + 542, + 205 + ], + "type": "image", + "image_path": "84b75cb17c9f7c53e4d9466d0f09618fb86aee0c58db53c3917fddb15d61d952.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 513, + 207, + 529, + 216 + ], + "lines": [ + { + "bbox": [ + 513, + 207, + 529, + 216 + ], + "spans": [ + { + "bbox": [ + 513, + 207, + 529, + 216 + ], + "type": "text", + "content": "deer" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 121, + 237, + 164, + 279 + ], + "blocks": [ + { + "bbox": [ + 61, + 247, + 102, + 270 + ], + "lines": [ + { + "bbox": [ + 61, + 247, + 102, + 270 + ], + "spans": [ + { + "bbox": [ + 61, + 247, + 102, + 270 + ], + "type": "text", + "content": "ImageNet Images" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 121, + 237, + 164, + 279 + ], + "lines": [ + { + "bbox": [ + 121, + 237, + 164, + 279 + ], + "spans": [ + { + "bbox": [ + 121, + 237, + 164, + 279 + ], + "type": "image", + "image_path": "a955543a83359f02418fbd2bcd55c201e1c486a29303e620816cc2b0eee66faa.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 281, + 154, + 289 + ], + "lines": [ + { + "bbox": [ + 130, + 281, + 154, + 289 + ], + "spans": [ + { + "bbox": [ + 130, + 281, + 154, + 289 + ], + "type": "text", + "content": "ostrich" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 174, + 237, + 217, + 280 + ], + "blocks": [ + { + "bbox": [ + 174, + 237, + 217, + 280 + ], + "lines": [ + { + "bbox": [ + 174, + 237, + 217, + 280 + ], + "spans": [ + { + "bbox": [ + 174, + 237, + 217, + 280 + ], + "type": "image", + "image_path": "59bb26ee10ff0f0936995aa2fd19a902af27c3ca52ef9b4ba4aa30b3783d2938.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 281, + 211, + 289 + ], + "lines": [ + { + "bbox": [ + 179, + 281, + 211, + 289 + ], + "spans": [ + { + "bbox": [ + 179, + 281, + 211, + 289 + ], + "type": "text", + "content": "warplane" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 225, + 237, + 268, + 280 + ], + "blocks": [ + { + "bbox": [ + 225, + 237, + 268, + 280 + ], + "lines": [ + { + "bbox": [ + 225, + 237, + 268, + 280 + ], + "spans": [ + { + "bbox": [ + 225, + 237, + 268, + 280 + ], + "type": "image", + "image_path": "106a1f1cd9f8b82afa3b83193e2e220518d723fbc9334ee29e64f7b81bbae8cd.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 226, + 281, + 266, + 289 + ], + "lines": [ + { + "bbox": [ + 226, + 281, + 266, + 289 + ], + "spans": [ + { + "bbox": [ + 226, + 281, + 266, + 289 + ], + "type": "text", + "content": "sorrel horse" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 276, + 238, + 316, + 280 + ], + "blocks": [ + { + "bbox": [ + 276, + 238, + 316, + 280 + ], + "lines": [ + { + "bbox": [ + 276, + 238, + 316, + 280 + ], + "spans": [ + { + "bbox": [ + 276, + 238, + 316, + 280 + ], + "type": "image", + "image_path": "3a05da6efa0c53f7f07bdcaf52b9fc1bb44d2287fe2d33ecd72e90cd0a6ffc9b.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 277, + 281, + 315, + 289 + ], + "lines": [ + { + "bbox": [ + 277, + 281, + 315, + 289 + ], + "spans": [ + { + "bbox": [ + 277, + 281, + 315, + 289 + ], + "type": "text", + "content": "moving van" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 345, + 237, + 387, + 279 + ], + "blocks": [ + { + "bbox": [ + 345, + 237, + 387, + 279 + ], + "lines": [ + { + "bbox": [ + 345, + 237, + 387, + 279 + ], + "spans": [ + { + "bbox": [ + 345, + 237, + 387, + 279 + ], + "type": "image", + "image_path": "36982405a6bc07a564f1e52cb70dbad7622c60ae6f4221d54fc443c27343d493.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 281, + 382, + 289 + ], + "lines": [ + { + "bbox": [ + 350, + 281, + 382, + 289 + ], + "spans": [ + { + "bbox": [ + 350, + 281, + 382, + 289 + ], + "type": "text", + "content": "warplane" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 399, + 237, + 441, + 279 + ], + "blocks": [ + { + "bbox": [ + 399, + 237, + 441, + 279 + ], + "lines": [ + { + "bbox": [ + 399, + 237, + 441, + 279 + ], + "spans": [ + { + "bbox": [ + 399, + 237, + 441, + 279 + ], + "type": "image", + "image_path": "29136a5aac4e2987454b183fae55b11d858ff68a485d87945697ea1b38004622.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 281, + 441, + 289 + ], + "lines": [ + { + "bbox": [ + 397, + 281, + 441, + 289 + ], + "spans": [ + { + "bbox": [ + 397, + 281, + 441, + 289 + ], + "type": "text", + "content": "beach wagon" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_caption" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 450, + 237, + 492, + 279 + ], + "blocks": [ + { + "bbox": [ + 450, + 237, + 492, + 279 + ], + "lines": [ + { + "bbox": [ + 450, + 237, + 492, + 279 + ], + "spans": [ + { + "bbox": [ + 450, + 237, + 492, + 279 + ], + "type": "image", + "image_path": "1a718365002950bbaa65ab0fa2a019e22bb89cc0e25f47c28d0b7f214a6998dd.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 454, + 281, + 487, + 289 + ], + "lines": [ + { + "bbox": [ + 454, + 281, + 487, + 289 + ], + "spans": [ + { + "bbox": [ + 454, + 281, + 487, + 289 + ], + "type": "text", + "content": "warplane" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 500, + 237, + 542, + 279 + ], + "blocks": [ + { + "bbox": [ + 500, + 237, + 542, + 279 + ], + "lines": [ + { + "bbox": [ + 500, + 237, + 542, + 279 + ], + "spans": [ + { + "bbox": [ + 500, + 237, + 542, + 279 + ], + "type": "image", + "image_path": "6b8f7c776d9773a24ca4618d5e531c7666a8d3c3b9f9120f206f6c0bc0187488.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 501, + 281, + 539, + 289 + ], + "lines": [ + { + "bbox": [ + 501, + 281, + 539, + 289 + ], + "spans": [ + { + "bbox": [ + 501, + 281, + 539, + 289 + ], + "type": "text", + "content": "moving van" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + } + ], + "index": 51 + }, + { + "type": "image", + "bbox": [ + 122, + 297, + 165, + 339 + ], + "blocks": [ + { + "bbox": [ + 63, + 307, + 100, + 331 + ], + "lines": [ + { + "bbox": [ + 63, + 307, + 100, + 331 + ], + "spans": [ + { + "bbox": [ + 63, + 307, + 100, + 331 + ], + "type": "text", + "content": "CIFAR-10 Images" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 122, + 297, + 165, + 339 + ], + "lines": [ + { + "bbox": [ + 122, + 297, + 165, + 339 + ], + "spans": [ + { + "bbox": [ + 122, + 297, + 165, + 339 + ], + "type": "image", + "image_path": "168b80208ee4478b70c7e6e43c9a58cab1975c037919a5d5af4cf299466e0af6.jpg" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 342, + 149, + 350 + ], + "lines": [ + { + "bbox": [ + 135, + 342, + 149, + 350 + ], + "spans": [ + { + "bbox": [ + 135, + 342, + 149, + 350 + ], + "type": "text", + "content": "bird" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 364, + 547, + 409 + ], + "lines": [ + { + "bbox": [ + 46, + 364, + 547, + 409 + ], + "spans": [ + { + "bbox": [ + 46, + 364, + 547, + 409 + ], + "type": "text", + "content": "Figure 7. ImageNet training images with highest positive (left) or negative (right) example-wise (average) influences on CIFAR-10 test images. We find that ImageNet images that are highly positively influential often correspond to data leakage, while ImageNet images that are highly negatively influential are often either mislabeled, ambiguous, or otherwise misleading. For example, the presence of a flying lawn mower in the ImageNet dataset hurts the downstream performance on a similarly shaped airplane (boxed)." + } + ] + } + ], + "index": 70, + "angle": 0, + "type": "image_caption" + } + ], + "index": 54 + }, + { + "type": "image", + "bbox": [ + 173, + 297, + 216, + 339 + ], + "blocks": [ + { + "bbox": [ + 173, + 297, + 216, + 339 + ], + "lines": [ + { + "bbox": [ + 173, + 297, + 216, + 339 + ], + "spans": [ + { + "bbox": [ + 173, + 297, + 216, + 339 + ], + "type": "image", + "image_path": "15fb82c4b3d8da541a211e1f93dc2f25ce18652dca8f2c43db29b3419d9b00a5.jpg" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 181, + 342, + 209, + 350 + ], + "lines": [ + { + "bbox": [ + 181, + 342, + 209, + 350 + ], + "spans": [ + { + "bbox": [ + 181, + 342, + 209, + 350 + ], + "type": "text", + "content": "airplane" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_caption" + } + ], + "index": 56 + }, + { + "type": "image", + "bbox": [ + 225, + 297, + 268, + 339 + ], + "blocks": [ + { + "bbox": [ + 225, + 297, + 268, + 339 + ], + "lines": [ + { + "bbox": [ + 225, + 297, + 268, + 339 + ], + "spans": [ + { + "bbox": [ + 225, + 297, + 268, + 339 + ], + "type": "image", + "image_path": "a4451c709cfaf1ddf5f6b6599f60769b969d696137c3510ddac23067c4af97fe.jpg" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 236, + 342, + 256, + 350 + ], + "lines": [ + { + "bbox": [ + 236, + 342, + 256, + 350 + ], + "spans": [ + { + "bbox": [ + 236, + 342, + 256, + 350 + ], + "type": "text", + "content": "horse" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_caption" + } + ], + "index": 58 + }, + { + "type": "image", + "bbox": [ + 276, + 297, + 317, + 339 + ], + "blocks": [ + { + "bbox": [ + 276, + 297, + 317, + 339 + ], + "lines": [ + { + "bbox": [ + 276, + 297, + 317, + 339 + ], + "spans": [ + { + "bbox": [ + 276, + 297, + 317, + 339 + ], + "type": "image", + "image_path": "b1b31a96d714600dfe02db18a53e4b69d7245597263a2e36a992b3d373946ecd.jpg" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 287, + 342, + 306, + 350 + ], + "lines": [ + { + "bbox": [ + 287, + 342, + 306, + 350 + ], + "spans": [ + { + "bbox": [ + 287, + 342, + 306, + 350 + ], + "type": "text", + "content": "truck" + } + ] + } + ], + "index": 61, + "angle": 0, + "type": "image_caption" + } + ], + "index": 60 + }, + { + "type": "image", + "bbox": [ + 345, + 297, + 388, + 339 + ], + "blocks": [ + { + "bbox": [ + 345, + 297, + 388, + 339 + ], + "lines": [ + { + "bbox": [ + 345, + 297, + 388, + 339 + ], + "spans": [ + { + "bbox": [ + 345, + 297, + 388, + 339 + ], + "type": "image", + "image_path": "a537a378a4d35c2e687e6b5a5af15901f7a83988731b78b90f6c54fb7441152b.jpg" + } + ] + } + ], + "index": 62, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 342, + 373, + 350 + ], + "lines": [ + { + "bbox": [ + 359, + 342, + 373, + 350 + ], + "spans": [ + { + "bbox": [ + 359, + 342, + 373, + 350 + ], + "type": "text", + "content": "ship" + } + ] + } + ], + "index": 63, + "angle": 0, + "type": "image_caption" + } + ], + "index": 62 + }, + { + "type": "image", + "bbox": [ + 399, + 297, + 441, + 339 + ], + "blocks": [ + { + "bbox": [ + 399, + 297, + 441, + 339 + ], + "lines": [ + { + "bbox": [ + 399, + 297, + 441, + 339 + ], + "spans": [ + { + "bbox": [ + 399, + 297, + 441, + 339 + ], + "type": "image", + "image_path": "22f15385166b2f6462e0365af68740d4b527886bfd5e9ca2e72de4a3fb2d48c1.jpg" + } + ] + } + ], + "index": 64, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 342, + 433, + 350 + ], + "lines": [ + { + "bbox": [ + 406, + 342, + 433, + 350 + ], + "spans": [ + { + "bbox": [ + 406, + 342, + 433, + 350 + ], + "type": "text", + "content": "airplane" + } + ] + } + ], + "index": 65, + "angle": 0, + "type": "image_caption" + } + ], + "index": 64 + }, + { + "type": "image", + "bbox": [ + 450, + 297, + 492, + 339 + ], + "blocks": [ + { + "bbox": [ + 450, + 297, + 492, + 339 + ], + "lines": [ + { + "bbox": [ + 450, + 297, + 492, + 339 + ], + "spans": [ + { + "bbox": [ + 450, + 297, + 492, + 339 + ], + "type": "image", + "image_path": "a49ef592bc4a2b211857e3026faec3cb114cf571122eaa86ca2a7b9e1b4706f0.jpg" + } + ] + } + ], + "index": 66, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 463, + 342, + 477, + 350 + ], + "lines": [ + { + "bbox": [ + 463, + 342, + 477, + 350 + ], + "spans": [ + { + "bbox": [ + 463, + 342, + 477, + 350 + ], + "type": "text", + "content": "ship" + } + ] + } + ], + "index": 67, + "angle": 0, + "type": "image_caption" + } + ], + "index": 66 + }, + { + "type": "image", + "bbox": [ + 500, + 297, + 542, + 339 + ], + "blocks": [ + { + "bbox": [ + 500, + 297, + 542, + 339 + ], + "lines": [ + { + "bbox": [ + 500, + 297, + 542, + 339 + ], + "spans": [ + { + "bbox": [ + 500, + 297, + 542, + 339 + ], + "type": "image", + "image_path": "fdfa218c1e16c1ab04e01a7ce42237db87132f2ca79b062c9cf5294b1ac5d61f.jpg" + } + ] + } + ], + "index": 68, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 502, + 342, + 539, + 350 + ], + "lines": [ + { + "bbox": [ + 502, + 342, + 539, + 350 + ], + "spans": [ + { + "bbox": [ + 502, + 342, + 539, + 350 + ], + "type": "text", + "content": "automobile" + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_caption" + } + ], + "index": 68 + }, + { + "bbox": [ + 47, + 430, + 162, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 430, + 162, + 441 + ], + "spans": [ + { + "bbox": [ + 47, + 430, + 162, + 441 + ], + "type": "text", + "content": "analysis of this experiment.)" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 47, + 453, + 287, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 453, + 287, + 478 + ], + "spans": [ + { + "bbox": [ + 47, + 453, + 287, + 478 + ], + "type": "text", + "content": "4.3. Capability 3: Detecting data leakage and misleading source examples" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 46, + 485, + 287, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 485, + 287, + 581 + ], + "spans": [ + { + "bbox": [ + 46, + 485, + 287, + 581 + ], + "type": "text", + "content": "Thus far, we have focused on how the classes in the source dataset influence the predictions of the transferred model on target examples. In this section, we extend our analysis to the individual datapoints of the source dataset. We do so by adapting our approach to measure the influence of each individual source datapoint on each target datapoint. Further details on how these influences are computed can be found in Appendix D." + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 288, + 714 + ], + "type": "text", + "content": "Figure 7 displays the ImageNet training examples that have highly positive or negative influences on CIFAR-10 test examples. We find that the source images that are highly positively influential are often instances of data leakage between the source training set and the target test set. On the other hand, the ImageNet images that are highly negatively influential are typically mislabeled, misleading, or otherwise surprising. For example, the presence of the ImageNet image of a flying lawn mower hurts the performance on a CIFAR-10 image of a regular (but similarly shaped) airplane (see Figure 7)." + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 306, + 428, + 392, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 428, + 392, + 441 + ], + "spans": [ + { + "bbox": [ + 306, + 428, + 392, + 441 + ], + "type": "text", + "content": "5. Related Work" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 304, + 449, + 547, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 449, + 547, + 593 + ], + "spans": [ + { + "bbox": [ + 304, + 449, + 547, + 593 + ], + "type": "text", + "content": "Transfer learning. Transfer learning is a technique commonly used in domains ranging from medical imaging [23, 36], language modeling [6], to object detection [5, 8, 15, 41]. Therefore, there has been considerable interest in understanding the drivers of transfer learning's success. For example, by performing transfer learning on block-shuffled images, [37] demonstrate that at least some of the benefits of transfer learning come from low-level image statistics of source data. There is also an important line of work studying transfer learning by investigating the relationship between different properties of the source model and performance on the target task [23, 27, 42, 43]." + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 547, + 714 + ], + "type": "text", + "content": "The works that are the most relevant to ours are those which studied how modifying the source dataset can affect the downstream performance. For example, [26] showed that pre-training with an enormous source dataset (approximately 300 million) of noisily labeled images can outperform pretraining with ImageNet. [1, 18] investigated the importance of the number of classes and the number of images per class in transfer learning. Finally, [38] demonstrated that more pre-training data does not always help, and transfer learning can be sensitive to the choice of pre-training data. They also" + } + ] + } + ], + "index": 77 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "3619" + } + ] + } + ], + "index": 78 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 287, + 96 + ], + "type": "text", + "content": "presented a framework for reweighting the source datapoints in order to boost transfer learning performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 118, + 289, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 118, + 289, + 333 + ], + "spans": [ + { + "bbox": [ + 47, + 118, + 289, + 333 + ], + "type": "text", + "content": "Influence functions and datamodels. Influence functions are well-studied statistical tools that have been recently applied in machine learning settings [7, 17, 25]. For a given model, influence functions analyze the effect of a training input on the model's predictions by estimating the expected change in performance when this training input is added or removed. In order to apply this tool in machine learning, [25] propose estimating the influence functions using the Hessian of the loss function. A recent line of work estimates this quantity more efficiently by training on different subsets of the training set [13]. In a similar vein, [14] proposed running a Monte Carlo search to estimate the effect of every training input via Shapley values. More recently, [19] proposed datamodeling framework as an alternative way to estimate the effect of a training input on the models' prediction. Datamodels are represented using parametric functions (typically, linear functions) that aim to map a subset of the training set to the model's output." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 347, + 125, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 347, + 125, + 360 + ], + "spans": [ + { + "bbox": [ + 47, + 347, + 125, + 360 + ], + "type": "text", + "content": "6. Conclusions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 369, + 289, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 369, + 289, + 573 + ], + "spans": [ + { + "bbox": [ + 46, + 369, + 289, + 573 + ], + "type": "text", + "content": "In this work, we presented a new framework for examining the impact of the source dataset in transfer learning. Specifically, our approach estimates the influence of a source class (or datapoint) that captures how including that class (or datapoint) in the source dataset impacts the downstream model's predictions. Leveraging these estimates, we demonstrate that we can improve the transfer learning performance on a range of downstream tasks by identifying and removing detrimental datapoints from the source dataset. Furthermore, our framework enables us to identify granular subpopulations in the target dataset by projecting fine-grained labels from the source dataset, better understand model failures on the downstream task and detect potential data-leakages from the source to the downstream dataset. We believe our framework provides a new perspective on transfer learning: one that enables us to perform a fine-grained analysis of the impact of the source dataset." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 586, + 107, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 586, + 107, + 599 + ], + "spans": [ + { + "bbox": [ + 48, + 586, + 107, + 599 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 606, + 287, + 714 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 52, + 606, + 287, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 606, + 287, + 666 + ], + "spans": [ + { + "bbox": [ + 52, + 606, + 287, + 666 + ], + "type": "text", + "content": "[1] Hossein Azizpour, Ali Sharif Razavian, Josephine Sullivan, Atsuto Maki, and Stefan Carlsson. Factors of transferability for a generic convnet representation. IEEE transactions on pattern analysis and machine intelligence, 2015." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 666, + 287, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 666, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 53, + 666, + 287, + 714 + ], + "type": "text", + "content": "[2] Thomas Berg, Jiongxin Liu, Seung Woo Lee, Michelle L Alexander, David W Jacobs, and Peter N Belhumeur. Birdsnap: Large-scale fine-grained visual categorization of birds. In Proceedings of the IEEE" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 72, + 547, + 707 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 328, + 72, + 547, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 72, + 547, + 95 + ], + "spans": [ + { + "bbox": [ + 328, + 72, + 547, + 95 + ], + "type": "text", + "content": "Conference on Computer Vision and Pattern Recognition, 2014." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 312, + 95, + 547, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 95, + 547, + 142 + ], + "spans": [ + { + "bbox": [ + 312, + 95, + 547, + 142 + ], + "type": "text", + "content": "[3] Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In European conference on computer vision, 2014." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 312, + 142, + 547, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 142, + 547, + 201 + ], + "spans": [ + { + "bbox": [ + 312, + 142, + 547, + 201 + ], + "type": "text", + "content": "[4] Emmanuel Candes, Yingying Fan, Lucas Janson, and Jinchi Lv. Panning for gold: model-x knockoffs for high dimensional controlled variable selection. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 80(3):551-577, 2018." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 201, + 547, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 201, + 547, + 272 + ], + "spans": [ + { + "bbox": [ + 312, + 201, + 547, + 272 + ], + "type": "text", + "content": "[5] Liang-Chieh Chen, George Papandreou, Iasonas Kokkinos, Kevin Murphy, and Alan L Yuille. Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE transactions on pattern analysis and machine intelligence, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 312, + 272, + 547, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 272, + 547, + 319 + ], + "spans": [ + { + "bbox": [ + 312, + 272, + 547, + 319 + ], + "type": "text", + "content": "[6] Alexis Conneau and Douwe Kiela. Senteval: An evaluation toolkit for universal sentence representations. Language Resources and Evaluation Conference (LREC), 2018." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 312, + 319, + 547, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 319, + 547, + 353 + ], + "spans": [ + { + "bbox": [ + 312, + 319, + 547, + 353 + ], + "type": "text", + "content": "[7] R Dennis Cook and Sanford Weisberg. *Residuals and influence in regression*. New York: Chapman and Hall, 1982." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 354, + 547, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 354, + 547, + 400 + ], + "spans": [ + { + "bbox": [ + 312, + 354, + 547, + 400 + ], + "type": "text", + "content": "[8] Jifeng Dai, Yi Li, Kaiming He, and Jian Sun. R-fcn: Object detection via region-based fully convolutional networks. In Advances in neural information processing systems (NeurIPS), 2016." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 312, + 400, + 547, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 400, + 547, + 448 + ], + "spans": [ + { + "bbox": [ + 312, + 400, + 547, + 448 + ], + "type": "text", + "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Computer Vision and Pattern Recognition (CVPR), 2009." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 448, + 547, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 448, + 547, + 529 + ], + "spans": [ + { + "bbox": [ + 307, + 448, + 547, + 529 + ], + "type": "text", + "content": "[10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations (ICLR), 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 529, + 547, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 529, + 547, + 564 + ], + "spans": [ + { + "bbox": [ + 307, + 529, + 547, + 564 + ], + "type": "text", + "content": "[11] Shuyang Du, Haoli Guo, and Andrew Simpson. Self-driving car steering angle prediction based on image recognition. arXiv preprint arXiv:1912.05440, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 564, + 547, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 564, + 547, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 564, + 547, + 635 + ], + "type": "text", + "content": "[12] Li Fei-Fei, Rob Fergus, and Pietro Perona. Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories. In 2004 conference on computer vision and pattern recognition workshop, pages 178–178. IEEE, 2004." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 635, + 547, + 694 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 547, + 694 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 547, + 694 + ], + "type": "text", + "content": "[13] Vitaly Feldman and Chiyuan Zhang. What neural networks memorize and why: Discovering the long tail via influence estimation. In Advances in Neural Information Processing Systems (NeurIPS), volume 33, pages 2881-2891, 2020." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 694, + 547, + 707 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 694, + 547, + 707 + ], + "spans": [ + { + "bbox": [ + 307, + 694, + 547, + 707 + ], + "type": "text", + "content": "[14] Amirata Ghorbani and James Zou. Data shapley: Eq-" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "3620" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 704 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 69, + 72, + 288, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 288, + 106 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 288, + 106 + ], + "type": "text", + "content": "suitable valuation of data for machine learning. In International Conference on Machine Learning (ICML), 2019." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 288, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 288, + 165 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 288, + 165 + ], + "type": "text", + "content": "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In computer vision and pattern recognition (CVPR), pages 580-587, 2014." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 167, + 288, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 167, + 288, + 190 + ], + "spans": [ + { + "bbox": [ + 48, + 167, + 288, + 190 + ], + "type": "text", + "content": "[16] Gregory Griffin, Alex Holub, and Pietro Perona. Caltech-256 object category dataset. 2007." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 190, + 288, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 190, + 288, + 236 + ], + "spans": [ + { + "bbox": [ + 48, + 190, + 288, + 236 + ], + "type": "text", + "content": "[17] Frank R Hampel, Elvezio M Ronchetti, Peter J Rousseuw, and Werner A Stahel. Robust statistics: the approach based on influence functions, volume 196. John Wiley & Sons, 2011." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 237, + 288, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 237, + 288, + 271 + ], + "spans": [ + { + "bbox": [ + 48, + 237, + 288, + 271 + ], + "type": "text", + "content": "[18] Minyoung Huh, Pulkit Agrawal, and Alexei A Efros. What makes imagenet good for transfer learning? arXiv preprint arXiv:1608.08614, 2016." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 271, + 288, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 271, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 271, + 288, + 319 + ], + "type": "text", + "content": "[19] Andrew Ilyas, Sung Min Park, Logan Engstrom, Guillaume Leclerc, and Aleksander Madry. Datamodels: Predicting predictions from training data. In International Conference on Machine Learning (ICML), 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 319, + 288, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 319, + 288, + 376 + ], + "spans": [ + { + "bbox": [ + 48, + 319, + 288, + 376 + ], + "type": "text", + "content": "[20] Saachi Jain, Hadi Salman, Eric Wong, Pengchuan Zhang, Vibhav Vineet, Sai Vermprala, and Aleksander Madry. Missingness bias in model debugging. In International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 376, + 288, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 288, + 423 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 288, + 423 + ], + "type": "text", + "content": "[21] Yunhun Jang, Hankook Lee, Sung Ju Hwang, and Jinwoo Shin. Learning what and where to transfer. In International Conference on Machine Learning, pages 3030-3039. PMLR, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 423, + 288, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 423, + 288, + 482 + ], + "spans": [ + { + "bbox": [ + 48, + 423, + 288, + 482 + ], + "type": "text", + "content": "[22] Bojan Karlas, David Dao, Matteo Interlandi, Bo Li, Sebastian Schelter, Wentao Wu, and Ce Zhang. Data debugging with shapley importance over end-to-end machine learning pipelines. arXiv preprint arXiv:2204.11131, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 482, + 288, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 482, + 288, + 553 + ], + "spans": [ + { + "bbox": [ + 48, + 482, + 288, + 553 + ], + "type": "text", + "content": "[23] Alexander Ke, William Ellsworth, Oishi Banerjee, Andrew Y Ng, and Pranav Rajpurkar. Chextransfer: performance and parameter efficiency of imagenet models for chest x-ray interpretation. In Proceedings of the Conference on Health, Inference, and Learning, pages 116-124, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 553, + 288, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 553, + 288, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 553, + 288, + 612 + ], + "type": "text", + "content": "[24] Jiman Kim and Chanjong Park. End-to-end ego lane estimation based on sequential transfer learning for self-driving cars. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 30-38, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 612, + 288, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 612, + 288, + 647 + ], + "spans": [ + { + "bbox": [ + 48, + 612, + 288, + 647 + ], + "type": "text", + "content": "[25] Pang Wei Koh and Percy Liang. Understanding blackbox predictions via influence functions. In International Conference on Machine Learning, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 647, + 288, + 694 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 647, + 288, + 694 + ], + "spans": [ + { + "bbox": [ + 48, + 647, + 288, + 694 + ], + "type": "text", + "content": "[26] Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, and Neil Houlsby. Big transfer (bit): General visual representation learning. arXiv preprint arXiv:1912.11370, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 694, + 288, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 694, + 288, + 704 + ], + "spans": [ + { + "bbox": [ + 48, + 694, + 288, + 704 + ], + "type": "text", + "content": "[27] Simon Kornblith, Jonathon Shlens, and Quoc V Le. Do" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 308, + 72, + 547, + 704 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 328, + 72, + 545, + 96 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 72, + 545, + 96 + ], + "spans": [ + { + "bbox": [ + 328, + 72, + 545, + 96 + ], + "type": "text", + "content": "better imagenet models transfer better? In computer vision and pattern recognition (CVPR), 2019." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 96, + 547, + 130 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 96, + 547, + 130 + ], + "spans": [ + { + "bbox": [ + 308, + 96, + 547, + 130 + ], + "type": "text", + "content": "[28] Jonathan Krause, Jia Deng, Michael Stark, and Li Fei-Fei. Collecting a large-scale dataset of fine-grained cars. 2013." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 131, + 545, + 154 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 131, + 545, + 154 + ], + "spans": [ + { + "bbox": [ + 308, + 131, + 545, + 154 + ], + "type": "text", + "content": "[29] Alex Krizhevsky. Learning multiple layers of features from tiny images. In Technical report, 2009." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 154, + 547, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 154, + 547, + 201 + ], + "spans": [ + { + "bbox": [ + 308, + 154, + 547, + 201 + ], + "type": "text", + "content": "[30] Ananya Kumar, Aditi Raghunathan, Robbie Jones, Tengyu Ma, and Percy Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. arXiv preprint arXiv:2202.10054, 2022." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 201, + 547, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 201, + 547, + 247 + ], + "spans": [ + { + "bbox": [ + 308, + 201, + 547, + 247 + ], + "type": "text", + "content": "[31] Yongchan Kwon and James Zou. Beta shapley: a unified and noise-reduced data valuation framework for machine learning. arXiv preprint arXiv:2110.14049, 2021." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 247, + 547, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 247, + 547, + 294 + ], + "spans": [ + { + "bbox": [ + 308, + 247, + 547, + 294 + ], + "type": "text", + "content": "[32] Guillaume Leclerc, Andrew Ilyas, Logan Engstrom, Sung Min Park, Hadi Salman, and Aleksander Madry. ffcv. https://github.com/libffcv/ffcv/, 2022." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 294, + 545, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 294, + 545, + 329 + ], + "spans": [ + { + "bbox": [ + 308, + 294, + 545, + 329 + ], + "type": "text", + "content": "[33] Kaleel Mahmood, Rigel Mahmood, and Marten Van Dijk. On the robustness of vision transformers to adversarial examples. 2021." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 329, + 545, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 329, + 545, + 376 + ], + "spans": [ + { + "bbox": [ + 308, + 329, + 545, + 376 + ], + "type": "text", + "content": "[34] Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 376, + 545, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 376, + 545, + 399 + ], + "spans": [ + { + "bbox": [ + 308, + 376, + 545, + 399 + ], + "type": "text", + "content": "[35] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 1995." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 308, + 399, + 545, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 399, + 545, + 457 + ], + "spans": [ + { + "bbox": [ + 308, + 399, + 545, + 457 + ], + "type": "text", + "content": "[36] Romain Mormont, Pierre Geurts, and Raphael Marée. Comparison of deep transfer learning strategies for digital pathology. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2018." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 308, + 458, + 545, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 458, + 545, + 504 + ], + "spans": [ + { + "bbox": [ + 308, + 458, + 545, + 504 + ], + "type": "text", + "content": "[37] Behnam Neyshabur, Hanie Sedghi, and Chiyuan Zhang. What is being transferred in transfer learning? Advances in neural information processing systems, 33:512-523, 2020." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 308, + 505, + 545, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 505, + 545, + 552 + ], + "spans": [ + { + "bbox": [ + 308, + 505, + 545, + 552 + ], + "type": "text", + "content": "[38] Jiquan Ngiam, Daiyi Peng, Vijay Vasudevan, Simon Kornblith, Quoc V Le, and Ruoming Pang. Domain adaptive transfer learning with specialist models. arXiv preprint arXiv:1811.07056, 2018." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 308, + 552, + 545, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 552, + 545, + 599 + ], + "spans": [ + { + "bbox": [ + 308, + 552, + 545, + 599 + ], + "type": "text", + "content": "[39] Maria-Elena Nilsback and Andrew Zisserman. Automated flower classification over a large number of classes. In 2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing, 2008." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 308, + 599, + 545, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 599, + 545, + 645 + ], + "spans": [ + { + "bbox": [ + 308, + 599, + 545, + 645 + ], + "type": "text", + "content": "[40] Omkar M Parkhi, Andrea Vedaldi, Andrew Zisserman, and CV Jawahar. Cats and dogs. In 2012 IEEE conference on computer vision and pattern recognition, pages 3498-3505. IEEE, 2012." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 308, + 645, + 545, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 645, + 545, + 692 + ], + "spans": [ + { + "bbox": [ + 308, + 645, + 545, + 692 + ], + "type": "text", + "content": "[41] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems (NeurIPS), 2015." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 308, + 692, + 545, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 692, + 545, + 704 + ], + "spans": [ + { + "bbox": [ + 308, + 692, + 545, + 704 + ], + "type": "text", + "content": "[42] Hadi Salman, Andrew Ilyas, Logan Engstrom, Ashish" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "3621" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 288, + 401 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 69, + 72, + 287, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 287, + 119 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 287, + 119 + ], + "type": "text", + "content": "Kapoor, and Aleksander Madry. Do adversarially robust imagenet models transfer better? In Advances in Neural Information Processing Systems (NeurIPS), 2020." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 120, + 288, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 120, + 288, + 167 + ], + "spans": [ + { + "bbox": [ + 48, + 120, + 288, + 167 + ], + "type": "text", + "content": "[43] Francisco Utrera, Evan Kravitz, N. Benjamin Erickson, Rajiv Khanna, and Michael W. Mahoney. Adversarily-trained deep nets transfer better. In ArXiv preprint arXiv:2007.05869, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 167, + 288, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 167, + 288, + 213 + ], + "spans": [ + { + "bbox": [ + 49, + 167, + 288, + 213 + ], + "type": "text", + "content": "[44] Sherrie Wang, George Azzari, and David B Lobell. Crop type mapping without field-level labels: Random forest transfer and unsupervised clustering techniques. Remote sensing of environment, 222:303-317, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 213, + 288, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 213, + 288, + 297 + ], + "spans": [ + { + "bbox": [ + 49, + 213, + 288, + 297 + ], + "type": "text", + "content": "[45] Xiaosong Wang, Yifan Peng, Le Lu, Zhiyong Lu, Mohammadhadi Bagheri, and Ronald M Summers. Chestx-ray8: Hospital-scale chest x-ray database and benchmarks on weakly-supervised classification and localization of common thorax diseases. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 297, + 288, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 297, + 288, + 343 + ], + "spans": [ + { + "bbox": [ + 49, + 297, + 288, + 343 + ], + "type": "text", + "content": "[46] Jianxiong Xiao, James Hays, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Sun database: Large-scale scene recognition from abbey to zoo. In Computer Vision and Pattern Recognition (CVPR), 2010." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 343, + 288, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 343, + 288, + 401 + ], + "spans": [ + { + "bbox": [ + 49, + 343, + 288, + 401 + ], + "type": "text", + "content": "[47] Michael Xie, Neal Jean, Marshall Burke, David Lobell, and Stefano Ermon. Transfer learning from deep features for remote sensing and poverty mapping. In Thirtieth AAAI Conference on Artificial Intelligence, 2016." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "3622" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_content_list.json b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0a71da2f35da33e9f5c46e0ee9828f2029b43c31 --- /dev/null +++ b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_content_list.json @@ -0,0 +1,1759 @@ +[ + { + "type": "text", + "text": "A Dynamic Multi-Scale Voxel Flow Network for Video Prediction", + "text_level": 1, + "bbox": [ + 155, + 130, + 816, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaotao Hu $^{1,2}$ Zhwei Huang $^{2}$ Ailin Huang $^{2,3}$ Jun Xu $^{4,*}$ Shuchang Zhou $^{2,*}$ $^{1}$ College of Computer Science, Nankai University $^{2}$ Megvii Technology \n $^{3}$ Wuhan University $^{4}$ School of Statistics and Data Science, Nankai University", + "bbox": [ + 148, + 179, + 828, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{huxiaotao, huangzhewei, huangailin, zhoushuchang}@megvii.com, nankaimathxujun@gmail.com https://huxiaotaostasy.github.io/DMVFN/", + "bbox": [ + 96, + 236, + 874, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 304, + 313, + 319 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The performance of video prediction has been greatly boosted by advanced deep neural networks. However, most of the current methods suffer from large model sizes and require extra inputs, e.g., semantic/depth maps, for promising performance. For efficiency consideration, in this paper, we propose a Dynamic Multi-scale Voxel Flow Network (DMVFN) to achieve better video prediction performance at lower computational costs with only RGB images, than previous methods. The core of our DMVFN is a differentiable routing module that can effectively perceive the motion scales of video frames. Once trained, our DMVFN selects adaptive sub-networks for different inputs at the inference stage. Experiments on several benchmarks demonstrate that our DMVFN is an order of magnitude faster than Deep Voxel Flow [35] and surpasses the state-of-the-art iterative-based OPT [63] on generated image quality.", + "bbox": [ + 73, + 335, + 473, + 580 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 607, + 209, + 625 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Video prediction aims to predict future video frames from the current ones. The task potentially benefits the study on representation learning [40] and downstream forecasting tasks such as human motion prediction [39], autonomous driving [6], and climate change [48], etc. During the last decade, video prediction has been increasingly studied in both academia and industry community [5, 7].", + "bbox": [ + 75, + 633, + 468, + 739 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Video prediction is challenging because of the diverse and complex motion patterns in the wild, in which accurate motion estimation plays a crucial role [35, 37, 58]. Early methods [37, 58] along this direction mainly utilize recurrent neural networks [19] to capture temporal motion information for video prediction. To achieve robust long-term prediction, the works of [41, 59, 62] additionally exploit the semantic or instance segmentation maps of video frames for semantically coherent motion estimation in complex scenes.", + "bbox": [ + 75, + 739, + 468, + 877 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6dc7efb43097abff1d9318fcdb362bba0ffca038bd707eec0cfd1dc410e60dfb.jpg", + "image_caption": [ + "Figure 1. Average MS-SSIM and GFLOPs of different video prediction methods on Cityscapes [9]. The parameter amounts are provided in brackets. DMVFN outperforms previous methods in terms of image quality, parameter amount, and GFLOPs." + ], + "image_footnote": [], + "bbox": [ + 504, + 303, + 890, + 539 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, the semantic or instance maps may not always be available in practical scenarios, which limits the application scope of these video prediction methods [41,59,62]. To improve the prediction capability while avoiding extra inputs, the method of OPT [63] utilizes only RGB images to estimate the optical flow of video motions in an optimization manner with impressive performance. However, its inference speed is largely bogged down mainly by the computational costs of pre-trained optical flow model [54] and frame interpolation model [22] used in the iterative generation.", + "bbox": [ + 496, + 638, + 893, + 790 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The motions of different objects between two adjacent frames are usually of different scales. This is especially evident in high-resolution videos with meticulous details [49]. The spatial resolution is also of huge differences in real-world video prediction applications. To this end, it is essential yet challenging to develop a single model for multiscale motion estimation. An early attempt is to extract", + "bbox": [ + 496, + 794, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 810, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding authors.", + "bbox": [ + 94, + 886, + 228, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "6121", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "multi-scale motion cues in different receptive fields by employing the encoder-decoder architecture [35], but in practice it is not flexible enough to deal with complex motions.", + "bbox": [ + 75, + 90, + 467, + 136 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose a Dynamic Multi-scale Voxel Flow Network (DMVFN) to explicitly model the complex motion cues of diverse scales between adjacent video frames by dynamic optical flow estimation. Our DMVFN is consisted of several Multi-scale Voxel Flow Blocks (MVFBs), which are stacked in a sequential manner. On top of MVFBs, a light-weight Routing Module is proposed to adaptively generate a routing vector according to the input frames, and to dynamically select a subnetwork for efficient future frame prediction. We conduct experiments on four benchmark datasets, including Cityscapes [9], KITTI [12], DAVIS17 [43], and ViceoTest [69], to demonstrate the comprehensive advantages of our DMVFN over representative video prediction methods in terms of visual quality, parameter amount, and computational efficiency measured by floating point operations (FLOPs). A glimpse of comparison results by different methods is provided in Figure 1. One can see that our DMVFN achieves much better performance in terms of accuracy and efficiency on the Cityscapes [9] dataset. Extensive ablation studies validate the effectiveness of the components in our DMVFN for video prediction.", + "bbox": [ + 75, + 136, + 468, + 467 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions are mainly three-fold:", + "bbox": [ + 96, + 469, + 442, + 482 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We design a light-weight DMVFN to accurately predict future frames with only RGB frames as inputs. Our DMVFN is consisted of new MVFB blocks that can model different motion scales in real-world videos.", + "- We propose an effective Routing Module to dynamically select a suitable sub-network according to the input frames. The proposed Routing Module is end-to-end trained along with our main network DMVFN.", + "- Experiments on four benchmarks show that our DMVFN achieves state-of-the-art results while being an order of magnitude faster than previous methods." + ], + "bbox": [ + 96, + 489, + 467, + 674 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 76, + 686, + 217, + 700 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Video Prediction", + "text_level": 1, + "bbox": [ + 76, + 710, + 240, + 724 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Early video prediction methods [35, 37, 58] only utilize RGB frames as inputs. For example, PredNet [37] learns an unsupervised neural network, with each layer making local predictions and forwarding deviations from those predictions to subsequent network layers. MCNet [58] decomposes the input frames into motion and content components, which are processed by two separate encoders. DVF [35] is a fully-convolutional encoder-decoder network synthesizing intermediate and future frames by approximating voxel flow for motion estimation. Later, extra information is exploited by video prediction methods in pursuit of better", + "bbox": [ + 75, + 734, + 467, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "performance. For example, the methods of Vid2vid [59], Seg2vid [41], HVP [32], and SADM [2] require additional semantic maps or human pose information for better video prediction results. Additionally, Qi et al. [44] used extra depth maps and semantic maps to explicitly inference scene dynamics in 3D space. FVS [62] separates the inputs into foreground objects and background areas by semantic and instance maps, and uses a spatial transformer to predict the motion of foreground objects. In this paper, we develop a light-weight and efficient video prediction network that requires only sRGB images as the inputs.", + "bbox": [ + 496, + 90, + 890, + 257 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Optical Flow", + "text_level": 1, + "bbox": [ + 500, + 268, + 633, + 284 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Optical flow estimation aims to predict the per-pixel motion between adjacent frames. Deep learning-based optical flow methods [17,29,38,53,54] have been considerably advanced ever since Flownet [11], a pioneering work to learn optical flow network from synthetic data. Flownet2.0 [25] improves the accuracy of optical flow estimation by stacking sub-networks for iterative refinement. A coarse-to-fine spatial pyramid network is employed in SPynet [46] to estimate optical flow at multiple scales. PWC-Net [53] employs feature warping operation at different resolutions and uses a cost volume layer to refine the estimated flow at each resolution. RAFT [54] is a lightweight recurrent network sharing weights during the iterative learning process. FlowFormer [21] utilizes an encoder to output latent tokens and a recurrent decoder to decode features, while refining the estimated flow iteratively. In video synthesis, optical flow for downstream tasks [22, 35, 68, 69, 72] is also a hot research topic. Based on these approaches, we aim to design a flow estimation network that can adaptively operate based on each sample for the video prediction task.", + "bbox": [ + 496, + 292, + 890, + 594 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.3. Dynamic Network", + "text_level": 1, + "bbox": [ + 500, + 604, + 676, + 619 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The design of dynamic networks is mainly divided into three categories: spatial-wise, temporal-wise, and sample-wise [16]. Spatial-wise dynamic networks perform adaptive operations in different spatial regions to reduce computational redundancy with comparable performance [20, 47, 57]. In addition to the spatial dimension, dynamic processing can also be applied in the temporal dimension. Temporal-wise dynamic networks [52, 64, 70] improve the inference efficiency by performing less or no computation on unimportant sequence frames. To handle the input in a data-driven manner, sample-wise dynamic networks adaptively adjust network structures to side-off the extra computation [56, 60], or adaptively change the network parameters to improve the performance [10, 18, 51, 76]. Designing and training a dynamic network is not trivial since it is difficult to directly enable a model with complex topology connections. We need to design a well-structured and robust model before considering its dynamic mechanism. In this paper,", + "bbox": [ + 496, + 628, + 890, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "6122", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0fc09876a02da94c26a51e557970913b2d8d61d9dc263948db00d9b38c7d8f75.jpg", + "image_caption": [ + "(a) Voxel Flow-based Image Fusion" + ], + "image_footnote": [], + "bbox": [ + 122, + 88, + 444, + 219 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/b76c9cc72387c1175de1d856c59ddbef47d4c19965d05183878bb6d6577eded8.jpg", + "image_caption": [ + "(b) DMVFN" + ], + "image_footnote": [], + "bbox": [ + 119, + 248, + 467, + 353 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/229ec29b64f11faf740246e7d45b364695f0da07d4b24ac609c9d69d8b11b42c.jpg", + "image_caption": [ + "(c) MVFB", + "(d) Routing Module", + "Figure 2. Overview of the proposed Dynamic Multi-scale Voxel Flow Network (DMVFN). $(a)$ : To predict a future frame, we use the voxel flow [35] to guide the pixel fusion of the input frames. The voxel flow contains the prediction of object motion and occlusion. $(b)$ : DMVFN contains several MVFBs with decreasing scaling factor $S^i$ . According to the routing vector $v$ estimated by a Routing Module, a sub-network is selected to process the input image. $(c)$ : Each MVFB has a scaling factor $S^i$ , which means that the motion path is performed on images whose sizes are $1 / S^i$ of the original. $(d)$ : Two consecutive frames are fed into several neural layers and a Differentiable Bernoulli sample to generate the hard routing vector." + ], + "image_footnote": [], + "bbox": [ + 472, + 87, + 851, + 354 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "we propose a module to dynamically perceive the motion magnitude of input frames to select the network structure.", + "bbox": [ + 75, + 498, + 468, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Methodology", + "text_level": 1, + "bbox": [ + 76, + 542, + 210, + 559 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Background", + "text_level": 1, + "bbox": [ + 76, + 568, + 207, + 583 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Video prediction. Given a sequence of past $t$ frames $\\{I_i\\in \\mathbb{R}^{h\\times w\\times 3}|i = 1,\\dots ,t\\}$ , video prediction aims to predict the future frames $\\{\\tilde{I}_{t + 1},\\tilde{I}_{t + 2},\\tilde{I}_{t + 3},\\ldots \\}$ . The inputs of our video prediction model are only the two consecutive frames $I_{t - 1}$ and $I_{t}$ . We concentrate on predicting $\\tilde{I}_{t + 1}$ , and iteratively predict future frames $\\{\\tilde{I}_{t + 2},\\tilde{I}_{t + 3},\\ldots \\}$ in a similar manner. Denote the video prediction model as $G_{\\theta}(I_{t - 1},I_t)$ , where $\\theta$ is the set of model parameters to be learned, the learning objective is to minimize the difference between $\\tilde{I}_{t + 1} = G_{\\theta}(I_{t - 1},I_t)$ and the \"ground truth\" $I_{t + 1}$ .", + "bbox": [ + 75, + 590, + 468, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Voxel flow. Considering the local consistency in space-time, the pixels of a generated future frame come from nearby regions of the previous frames [69, 75]. In video prediction task, researchers estimate optical flow $\\mathbf{f}_{t + 1\\rightarrow t}$ from $I_{t + 1}$ to $I_{t}$ [35]. And the corresponding frame is obtained using the pixel-wise backward warping [26] (denoted as $\\overleftarrow{\\mathcal{W}}$ ). In addition, to deal with the occlusion, some methods [28, 35] further introduce a fusion map $\\mathbf{m}$ to fuse the pixels of $I_{t}$ and $I_{t - 1}$ . The final predicted frame is obtained", + "bbox": [ + 75, + 762, + 468, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "by the following formulation (Figure 2 $(a)$ ):", + "bbox": [ + 498, + 498, + 792, + 513 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {I} _ {t + 1 \\leftarrow t - 1} = \\overleftarrow {\\mathcal {W}} \\left(I _ {t - 1}, \\mathbf {f} _ {t + 1 \\rightarrow t - 1}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 523, + 890, + 544 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {I} _ {t + 1 \\leftarrow t} = \\overleftarrow {\\mathcal {W}} \\left(I _ {t}, \\mathbf {f} _ {t + 1 \\rightarrow t}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 566, + 890, + 585 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {I} _ {t + 1} = \\hat {I} _ {t + 1 \\leftarrow t - 1} \\times \\mathbf {m} + \\hat {I} _ {t + 1 \\leftarrow t} \\times (1 - \\mathbf {m}). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 545, + 606, + 890, + 625 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here, $\\hat{I}_{t+1 \\leftarrow t}$ and $\\hat{I}_{t+1 \\leftarrow t-1}$ are intermediate warped images. To simplify notations, we refer to the optical flows $\\mathbf{f}_{t+1 \\rightarrow t}, \\mathbf{f}_{t+1 \\rightarrow t-1}$ and the fusion map $\\mathbf{m}$ collectively as the voxel flow $\\mathbf{F}_{t+1}$ , similar to the notations in [35]. The above equations can be simplified to the following form:", + "bbox": [ + 496, + 628, + 890, + 705 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {I} _ {t + 1} = \\overleftarrow {\\mathcal {W}} \\left(I _ {t - 1}, I _ {t}, \\mathbf {F} _ {t + 1}\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 715, + 890, + 736 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Dynamic Multi-Scale Voxel Flow Network", + "text_level": 1, + "bbox": [ + 498, + 756, + 857, + 772 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "MVFB. To estimate the voxel flow, DVF [35] assumes that all optical flows are locally linear and temporally symmetric around the targeted time, which may be unreasonable for large-scale motions. To address the object position changing issue [22] in adjacent frames, OPT [63] uses flow reversal layer [68] to convert forward flows to backward flows. We aim to estimate voxel flow end-to-end without introducing new components and unreasonable constraints.", + "bbox": [ + 496, + 779, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "6123", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f59e3d5d7093a275e2f4bd42f9e09d29bac48e89b75633ed29c7b57648462a68.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 99, + 88, + 276, + 258 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e291b529e57bb3fff07bd03becf4ca8a342f76b0b5905f22557f989afdaff33a.jpg", + "image_caption": [ + "DVF" + ], + "image_footnote": [], + "bbox": [ + 277, + 88, + 395, + 258 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4020023bc6d8f37032c16b5d81dc1b822d6370c94242fb14dab60404db8bd024.jpg", + "image_caption": [ + "DYAN", + "Figure 3. Visual comparison of $(t + 1)$ -th frame predicted from $t$ -th and $(t - 1)$ -th frames on the DAVIS17-Val [43]." + ], + "image_footnote": [], + "bbox": [ + 395, + 88, + 514, + 258 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/29b9326dbf78209ac643263e212efe256ed0690376355ba49318fa9c7e8a56b4.jpg", + "image_caption": [ + "OPT" + ], + "image_footnote": [], + "bbox": [ + 514, + 88, + 633, + 258 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f7c76414e3fb2116b4609b85664a59171dd8f2b3a26b2d6a949bf26d73b8bb0f.jpg", + "image_caption": [ + "DMVFN" + ], + "image_footnote": [], + "bbox": [ + 633, + 88, + 751, + 258 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4845f3b7a0c8ac6b91a14e5ee6a7c085035ee994ef026c62bffa882475cc7745.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 751, + 88, + 870, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We denote the $i$ -th MVFB as $f_{MVFB}^{i}(\\cdot)$ . It learns to approximate target voxel flow $\\mathbf{F}_{t+1}^{i}$ by taking two frames $I_{t-1}$ and $I_{t}$ , the synthesized frame $\\tilde{I}_{t+1}^{i-1}$ , and the voxel flow estimated by previous blocks $\\mathbf{F}_{t+1}^{i-1}$ as inputs.", + "bbox": [ + 76, + 316, + 468, + 380 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The architecture of our MVFB is shown in Figure 2 $(c)$ . To capture the large motion while retaining the original spatial information, we construct a two-branch network structure [71]. This design inherits from pyramidal optical flow estimation [46, 53]. In the motion path, the input is downsampled by a scaling factor $S^i$ to facilitate the expansion of the receptive field. Another spatial path operates at high resolution to complement the spatial information. We denote $\\tilde{I}_{t + 1}^{i}$ as the output of the $i$ -th MVFB. Formally,", + "bbox": [ + 75, + 381, + 468, + 518 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {I} _ {t + 1} ^ {i}, \\mathbf {F} _ {t + 1} ^ {i} = f _ {\\mathrm {M V F B}} ^ {i} \\left(I _ {t - 1}, I _ {t}, \\tilde {I} _ {t + 1} ^ {i - 1}, \\mathbf {F} _ {t + 1} ^ {i - 1}, S ^ {i}\\right). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 116, + 529, + 468, + 549 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The initial values of $\\tilde{I}_{t+1}^{0}$ and $\\mathbf{F}_{t+1}^{0}$ are set to zero. As illustrated in Figure 2 (b), our DMVFN contains 9 MVFBs. To generate a future frame, we iteratively refine a voxel flow [35] and fuse the pixels of the input frames.", + "bbox": [ + 75, + 561, + 468, + 622 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Many optical flow estimation methods predict the flow field on a small image, and then refine it on a large image [53, 67]. For simplicity and intuition, we consider decreasing scaling factor sequences. Finally, the scaling factors is experimentally set as $[4, 4, 4, 2, 2, 2, 1, 1, 1]$ .", + "bbox": [ + 75, + 623, + 468, + 699 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "DMVFN. Different pairs of adjacent frames have diverse motion scales and different computational demands. An intuitive idea is to adaptively select dynamic architectures conditioned on each input. We then perform dynamic routing within the super network (the whole architecture) [16], including multiple possible paths. DMVFN saves redundant computation for samples with small-scale motion and preserves the representation ability for large-scale motion.", + "bbox": [ + 75, + 718, + 468, + 839 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To make our DMVFN end-to-end trainable, we design a differentiable Routing Module containing a tiny neural network to estimate routing vector $v$ for each input sample. Based on this vector, our DMVFN dynamically selects a", + "bbox": [ + 75, + 839, + 468, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "sub-network to process the input data. As the Figure 2 $(b)$ shows, some blocks are skipped during inference.", + "bbox": [ + 498, + 316, + 888, + 345 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Different from some dynamic network methods that can only continuously select the first several blocks ( $n$ options) [4, 55], DMVFN is able to choose paths freely ( $2^n$ options). DMVFN trains different sub-networks in the super network with various possible inference paths and uses dynamic routing inside the super network during inference to reduce redundant computation while maintaining the performance. A dynamic routing vector $v \\in \\{0,1\\}^n$ is predicted by the proposed Routing Module. For the $i$ -th MVFN block of DMVFN, we denote $v_i$ as the reference of whether processing the reached voxel flow $\\mathbf{F}_{t+1}^{i-1}$ and the reached predicted frame $\\tilde{I}_{t+1}^{i-1}$ . The path $f_{\\mathrm{MVFB}}^i$ to the $i$ -th block from the last block will be activated only when $v_i = 1$ . Formally,", + "bbox": [ + 496, + 347, + 890, + 546 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {I} _ {t + 1} ^ {i}, \\mathbf {F} _ {t + 1} ^ {i} = \\left\\{ \\begin{array}{l l} f _ {\\mathrm {M V F B}} ^ {i} \\left(\\tilde {I} _ {t + 1} ^ {i - 1}, \\mathbf {F} _ {t + 1} ^ {i - 1}\\right), & v _ {i} = 1 \\\\ \\tilde {I} _ {t + 1} ^ {i - 1}, \\mathbf {F} _ {t + 1} ^ {i - 1}, & v _ {i} = 0. \\end{array} \\right. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 539, + 556, + 890, + 613 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During the training phase, to enable the backpropagation of Eqn. (6), we use $v_{i}$ and $(1 - v_{i})$ as the weights of the two branches and average their outputs.", + "bbox": [ + 496, + 614, + 890, + 660 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the iterative scheme of our DMVFN, each MVFB essentially refines the current voxel flow estimation to a new one. This special property allows our DMVFN to skip some MVFBs for every pair of input frames. Here, we design a differentiable and efficient routing module for learning to trade-off each MVFB block. This is achieved by predicting a routing vector $v \\in \\{0,1\\}^n$ to identify the proper sub-network (e.g., 0 for deactivated MVFBs, 1 for activated MVFBs). We implement the routing module by a small neural network ( $\\sim 1/6$ GFLOPs of the super network), and show its architecture in Figure 2 (d). It learns to predict the probability $\\tilde{v}$ of choosing MVFBs by:", + "bbox": [ + 496, + 660, + 890, + 840 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {v} = \\operatorname {L i n e a r} (\\operatorname {A v g P o o l i n g} (\\operatorname {C o n v s} (I _ {t - 1}, I _ {t}))), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 549, + 848, + 890, + 864 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nv = \\text {B e r n o u l l i - S a m p l i n g} (\\tilde {v}). \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 885, + 890, + 901 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "6124", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/5785c949229a8a5a025c47428805b7c291cb8d4e19110fc3d84a0376bbbf090f.jpg", + "table_caption": [ + "Table 1. Quantitative results of different methods on the Cityscapes [9], and KITTI [12] datasets. \"RGB\", \"F\", \"S\" and \"T\" denote the video frames, optical flow, semantic map, and instance map, respectively. We denote our DMVFN without routing module as \"DMVFN (w/o r)\". FVS [62] integrates a segmentation model [77] on KITTI [12] to obtain the semantic maps. \"N/A\" means not available." + ], + "table_footnote": [], + "table_body": "
MethodInputsCityscapes-Train→Cityscapes-Test [9]KITTI-Train→KITTI-Test [12]
GFLOPsMS-SSIM (×10-2) ↑ t+1t+3t+5LPIPS (×10-2) ↓ t+1t+3t+5GFLOPsMS-SSIM (×10-2) ↑ t+1t+3t+5LPIPS (×10-2) ↓ t+1t+3t+5
Vid2vid [59]RGB+S603.7988.1680.5575.1310.5815.9220.14N/AN/AN/AN/AN/AN/AN/AN/A
Seg2vid [41]RGB+S455.8488.32N/A61.639.69N/A25.99N/AN/AN/AN/AN/AN/AN/AN/A
FVS [62]RGB+S+I1891.6589.1081.1375.688.5012.9816.50768.9679.2867.6560.7718.4824.6130.49
SADM [2]RGB+S+FN/A95.99N/A83.517.67N/A14.93N/A83.0672.4464.7214.4124.5831.16
PredNet [37]RGB62.6284.0379.2575.2125.9929.9936.0325.4456.2651.4747.5655.3558.6662.95
MCNET [58]RGB502.8089.6978.0770.5818.8831.3437.34204.2675.3563.5255.4824.0531.7137.39
DVF [35]RGB409.7883.8576.2371.1117.3724.0528.79166.4753.9346.9942.6232.4737.4341.59
CorrWise [13]RGB944.2992.80N/A83.908.50N/A15.00383.6282.00N/A66.7017.20N/A25.90
OPT [63]RGB313482.1594.5486.8980.406.4612.5017.83127431.7182.7169.5061.0912.3420.2926.35
DMVFN (w/o r)RGB24.5195.2987.9181.485.6010.4814.919.9688.0676.5368.2910.7019.2826.13
DMVFNRGB12.7195.7389.2483.455.5810.4714.825.1588.5378.0170.5210.7419.2726.05
", + "bbox": [ + 98, + 142, + 874, + 320 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Differentiable Routing. To train the proposed Routing Module, we need to constrain the probability values to prevent the model from falling into trivial solutions (e.g., select all blocks). On the other hand, we allow this module to participate in the gradient calculation to achieve end-to-end training. We introduce the Gumbel Softmax [27] and the Straight-Through Estimator (STE) [3] to tackle this issue.", + "bbox": [ + 75, + 345, + 468, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "One popular method to make the routing probability $\\tilde{v}$ learnable is the Gumbel Softmax technique [24, 27]. By treating the selection of each MVFB as a binary classification task, the soft dynamic routing vector $v\\in \\mathbb{R}^n$ is", + "bbox": [ + 75, + 452, + 470, + 513 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nv _ {i} = \\frac {\\exp \\left(\\frac {1}{\\tau} \\left(\\tilde {v} _ {i} + G _ {i}\\right)\\right)}{\\exp \\left(\\frac {1}{\\tau} \\left(\\tilde {v} _ {i} + G _ {i}\\right)\\right) + \\exp \\left(\\frac {1}{\\tau} \\left(2 - \\tilde {v} _ {i} - G _ {i}\\right)\\right)}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 96, + 520, + 468, + 559 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $i = 1, \\dots, n$ , $G_{i} \\in \\mathbb{R}$ is Gumbel noise following the Gumbel(0,1) distribution, and $\\tau$ is a temperature parameter. We start at a very high temperature to ensure that all possible paths become candidates, and then the temperature is attenuated to a small value to approximate one-hot distribution. To encourage the sum of the routing vectors $\\{v_{i}\\}_{i=1}^{n}$ to be small, we add the regularization term $\\left(\\frac{1}{n} \\sum_{i=1}^{n} v_{i}\\right)$ to the final loss function. However, we experimentally find that our DMVFN usually converges to an input-independent structure when temperature decreases. We conjecture that the control of the temperature parameter $\\tau$ and the design of the regularization term require further study.", + "bbox": [ + 75, + 565, + 468, + 746 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Inspired by previous research on low-bit width neural networks [23, 74], we adopt STE for Bernoulli Sampling (STEBS) to make the binary dynamic routing vector differentiable. An STE can be regarded as an operator that has arbitrary forward and backward operations. Formally,", + "bbox": [ + 75, + 747, + 468, + 823 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {w} _ {i} = \\min (\\beta \\times n \\times \\sigma (\\tilde {v} _ {i}) / \\sum_ {i} ^ {n} \\sigma (\\tilde {v} _ {i}), 1), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 830, + 468, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "STE Forward: $v_{i}\\sim$ Bernoulli $(\\tilde{w}_i)$ (11)", + "bbox": [ + 148, + 883, + 468, + 902 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\text {S T E B a c k w a r d}: \\frac {\\partial o}{\\partial \\tilde {w}} = \\frac {\\partial o}{\\partial v}, \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 356, + 890, + 387 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\sigma$ is the Sigmoid function and we denote the objective function as $o$ . We use the well-defined gradient $\\frac{\\partial o}{\\partial v}$ as an approximation for $\\frac{\\partial o}{\\partial \\tilde{w}}$ to construct the backward pass. In Eqn. (10), we normalize the sample rate. During training, $\\beta$ is fixed at 0.5. We can adjust the hyper-parameter $\\beta$ to control the complexity in the inference phase.", + "bbox": [ + 496, + 390, + 893, + 484 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Implementation Details", + "text_level": 1, + "bbox": [ + 500, + 491, + 718, + 507 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Loss function. Our training loss $L_{total}$ is the sum of the reconstruction losses of outputs of each block $I_{t + 1}^{i}$ :", + "bbox": [ + 496, + 513, + 890, + 546 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {t o t a l} = \\sum_ {i = 1} ^ {n} \\gamma^ {n - i} d \\left(\\tilde {I} _ {t + 1} ^ {i}, I _ {t + 1}\\right), \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 586, + 556, + 890, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $d$ is the $\\ell_1$ loss calculated on the Laplacian pyramid representations [42] extracted from each pair of images. And we set $\\gamma = 0.8$ in our experiments following [54].", + "bbox": [ + 496, + 604, + 890, + 651 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training strategy. Our DMVFN is trained on $224 \\times 224$ image patches. The batch size is set as 64. We employ the AdamW optimizer [30, 36] with a weight decay of $10^{-4}$ . We use a cosine annealing strategy to reduce the learning rate from $10^{-4}$ to $10^{-5}$ . Our model is trained on four 2080Ti GPUs for 300 epochs, which takes about 35 hours.", + "bbox": [ + 496, + 667, + 893, + 758 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 500, + 772, + 633, + 789 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Dataset and Metric", + "text_level": 1, + "bbox": [ + 500, + 796, + 684, + 811 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset. We use several datasets in the experiments:", + "bbox": [ + 500, + 819, + 859, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Cityscapes dataset [9] contains 3,475 driving videos with resolution of $2048 \\times 1024$ . We use 2,945 videos for training (Cityscapes-Train) and 500 videos in Cityscapes dataset [9] for testing (Cityscapes-Test).", + "bbox": [ + 496, + 839, + 893, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "6125", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/f62fb5504e40abb953a28ac64a1d0347ff9c59843dd4fa7d8f1be8caff7e2317.jpg", + "table_caption": [ + "Table 2. Quantitative results on the DAVIS17-Val [43] and Vimeo90K-Test [69] benchmarks. We denote DMVFN without routing as \"DMVFN (w/o r)\". \"N/A\" means not available." + ], + "table_footnote": [], + "table_body": "
MethodUCF101-Train→DAVIS17-ValUCF101-Train→Vimeo90K-Test
GFLOPs ↓MS-SSIM (×10-2) ↑ t+1t+3LPIPS (×10-2) ↓ t+1t+3GFLOPs ↓MS-SSIM (×10-2) ↑ t+1LPIPS (×10-2) ↓ t+1
DVF [35]324.1568.6155.4723.2334.2289.6492.117.73
DYAN [34]130.1278.9670.4113.0921.43N/AN/AN/A
OPT [63]165312.8083.2673.8511.4018.2145716.2096.753.59
DMVFN (w/o r)19.3984.8175.059.4116.245.3697.243.30
DMVFN9.9683.9774.819.9617.282.7797.013.69
", + "bbox": [ + 99, + 128, + 867, + 258 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/56c72b869fbd430a17843e2c62d63c580540d90393ef8845f5d27046fa2f5bb3.jpg", + "image_caption": [ + "Figure 4. Prediction comparison on KITTI. The yellow line is aligned with the car in the ground truth. The results show that previous methods (DVF [35], FVS [62], and OPT [63]) cannot accurately predict the car's location in the long-term prediction. The motion predicted by our DMVFN is the most similar to the ground truth, while the errors of other methods grow larger with time. The fences predicted by DMVFN remain vertical when moving." + ], + "image_footnote": [], + "bbox": [ + 89, + 280, + 455, + 571 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "KITTI dataset [12] contains 28 driving videos with resolution of $375 \\times 1242$ . 24 videos in KITTI dataset are used for training (KITTI-Train) and the remaining four videos in KITTI dataset are used for testing (KITTI-Test).", + "bbox": [ + 75, + 708, + 468, + 768 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "UCF101 [50] dataset contains 13,320 videos under 101 different action categories with resolution of $240\\times 320$ . We only use the training subset of UCF101 [50].", + "bbox": [ + 75, + 773, + 468, + 820 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Vimeo90K [69] dataset has 51,312 triplets for training, where each triplet contains three consecutive video frames with resolution of $256 \\times 448$ . There are 3,782 triplets in the Vimeo90K testing set. We denote the training and testing subsets as Video-Train and Video-Test, respectively.", + "bbox": [ + 75, + 824, + 468, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "DAVIS17 [43] has videos with resolution around $854 \\times 480$ . We use the DAVIS17-Val containing 30 videos as test set. Configurations. We have four experimental configurations following previous works [34, 35, 63]:", + "bbox": [ + 498, + 282, + 890, + 345 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Cityscapes-Train $\\rightarrow$ Cityscapes-Test", + "KITTI-Train $\\rightarrow$ KITTI-Test", + "UCF101→DAVIS17-Val", + "UCF101 $\\rightarrow$ Vimeo-Test" + ], + "bbox": [ + 517, + 359, + 767, + 426 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here, the left and right sides of the arrow represent the training set and the test set, respectively. For a fair comparison with other methods that are not tailored for high resolution videos, we follow the setting in [62] and resize the images in Cityscapes [9] to $1024 \\times 512$ and images in KITTI [12] to $256 \\times 832$ , respectively. During inference of Cityscapes [9] and KITTI [12], we predict the next five frames. We predict the next three frames for DAVIS17-Val [43] and next one frame for Video-Test [69], respectively. Note that OPT [63] is an optimization-based approach and uses pretrained RAFT [54] and RIFE [22] models. RIFE [22] and RAFT [54] are trained on the Video-Train dataset [69] and the Flying Chairs dataset [11], respectively.", + "bbox": [ + 496, + 441, + 890, + 638 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation metrics. Following previous works [63], we use Multi-Scale Structural Similarity Index Measure (MSSSIM) [61] and a perceptual metric LPIPS [73] for quantitative evaluation. To measure the model complexity, we calculate the GFLOPs.", + "bbox": [ + 496, + 638, + 890, + 714 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. Comparison to State-of-the-Arts", + "text_level": 1, + "bbox": [ + 500, + 724, + 784, + 742 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compare our DMVFN with state-of-the-art video prediction methods. These methods fall into two categories: the methods requiring only RGB images as input (e.g., PredNet [37], MCNET [58], DVF [35], CorrWise [13], OPT [63]) and the methods requiring extra information as input (e.g., Vid2vid [59], Seg2vid [41], FVS [62], SADM [2]).", + "bbox": [ + 496, + 750, + 890, + 854 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quantitative results. The quantitative results are reported in Table 1 and Table 2. When calculating the GFLOPs of OPT [63], the number of iterations is set as 3,000. In", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6126", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3f0733e866e576f4d0e58151b4572ae6af34dbdc8fb0b18269c865f2665ccaa7.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 86, + 88, + 367, + 217 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/39725d0d7e1000ea5e1de7127b2b1616a228e7669411d738c8dd911c01cbc418.jpg", + "image_caption": [ + "(b)" + ], + "image_footnote": [], + "bbox": [ + 375, + 88, + 591, + 217 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/08080ea454f757a59be66a65cdc509558e3ac92d93123950f63248238175048b.jpg", + "image_caption": [ + "(c)" + ], + "image_footnote": [], + "bbox": [ + 604, + 88, + 883, + 217 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4c8a4c27bf3832b14061b616cb0ab93ca2ae743d6b999ccf8921122aa33b3b90.jpg", + "image_caption": [ + "Figure 5. (a): Average usage rate on videos with different motion magnitudes. \"Fast\": tested on Videox-Fast. \"Medium\": tested on Vimeo-Medium. \"Slow\": tested on Videox-Slow. (b): Difference between \"Fast\"/\"Slow\" and \"Medium\" of (a). (c): Averaged usage rate on different time intervals between two input frames from Videox-Slow. \"Int\": time interval.", + "$t - 1$" + ], + "image_footnote": [], + "bbox": [ + 106, + 311, + 205, + 392 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3d4ed97f0fc99b344c776ee05da6abd26ec989a8e76a6701f109f015efc2fb44.jpg", + "image_caption": [ + "t", + "Figure 6. Visual effect comparison in the Viceo-Test [69] dataset. our DMVFN faithfully reproduces the motion of the hand and the head with less distortion and artifacts." + ], + "image_footnote": [], + "bbox": [ + 106, + 411, + 205, + 491 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3a68218ddcd92346e21efec19d1a79797112fb3802e836176981175cf5eb873d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 325, + 442, + 404 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/737a7e019ba2a9232a98e72cc2e09a2628da9a7eaa613f08940afc7d6fb84fa1.jpg", + "image_caption": [ + "$t + 1$ $t + 3$" + ], + "image_footnote": [], + "bbox": [ + 223, + 402, + 442, + 482 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "terms of MS-SSIM and LPIPS, our DMVFN achieves much better results than the other methods in both short-term and long-term video prediction tasks. The GFLOPs of our DMVFN is considerably smaller than the comparison methods. These results show the proposed routing strategy reduces almost half the number of GFLOPs while maintaining comparable performance. Because the decrease of GFLOPs is not strictly linear with the actual latency [45], we measure the running speed on Titan 2080Ti. For predicting a 720P frame, DVF [35] spends 0.130s on average, while our DMVFN only needs 0.023s on average.", + "bbox": [ + 75, + 569, + 468, + 734 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/e6f0f564d22f3d724ca7a29a9ef72a6a0b79521cefbb962a5b128719ac62bcf9.jpg", + "table_caption": [ + "Table 3. Comparison between DMVFN and STRPM." + ], + "table_footnote": [], + "table_body": "
MethodUCF SportsHuman3.6M
t+1t+6t+1t+4
PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓
STRPM28.54 / 20.6920.59 / 41.1133.32 / 9.7429.01 / 10.44
DMVFN30.05 / 10.2422.67 / 22.5035.07 / 7.4829.56 / 9.74
", + "bbox": [ + 81, + 762, + 464, + 829 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "More comparison. The quantitative results compared with STRPM [8] are reported in Table 3. We train our DMVFN in UCFSports and Human3.6M datasets following the setting in [8]. We also measure the average running speed", + "bbox": [ + 75, + 840, + 468, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "on TITAN 2080Ti. To predict a $1024 \\times 1024$ frame, our DMVFN is averagely $4.06 \\times$ faster than STRPM [8].", + "bbox": [ + 498, + 314, + 890, + 345 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Qualitative results on different datasets are shown in Figure 3, Figure 4 and Figure 6. As we can see, the frames predicted by our DMVFN exhibit better temporal continuity and are more consistent with the ground truth than those by the other methods. Our DMVFN is able to predict correct motion while preserving the shape and texture of objects.", + "bbox": [ + 498, + 345, + 890, + 436 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation Study", + "text_level": 1, + "bbox": [ + 500, + 450, + 651, + 465 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Here, we perform extensive ablation studies to further study the effectiveness of components in our DMVFN. The experiments are performed on the Cityscapes [9] and KITTI [12] datasets unless otherwise specified.", + "bbox": [ + 498, + 474, + 890, + 535 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "1) How effective is the proposed Routing Module? As suggested in [65, 66], we divide the Vimeo-90K [69] test set into three subsets: Vimeo-Fast, Vimeo-Medium, and Vimeo-Slow, which correspond to the motion range. To verify that our DMVFN can perceive motion scales and adaptively choose the proper sub-networks, we retrain our DMVFN on the Vimeo-Train [69] using the same training strategy in §3.3. We calculate the averaged usage rate of each MVFB on three test subsets. From Figures 5 (a) and 5 (b), we observe that our DMVFN prefers to select MVFBs with large scale (e.g., 4x) for two frames with large motion. There are two MVFBs with clearly smaller selection probability. We believe this reflects the inductive bias of our DMVFN on different combinations of scaling factors.", + "bbox": [ + 498, + 537, + 892, + 748 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To further verify that our DMVFN also perceives the size of the time interval, we test our DMVFN on the two frames with different time intervals (but still in the same video). We choose Vimeo-Slow as the test set, and set the time intervals as 1, 3, and 5. The results are shown in Figure 5 $(c)$ . We observe that our DMVFN prefers large-scale blocks on long-interval inputs, and small-scale blocks on short-interval inputs. This verifies that our DMVFN can perceive temporal information and dynamically select different sub-networks to handle the input frames with different time intervals.", + "bbox": [ + 496, + 750, + 890, + 900 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "6127", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To further study how the MVFBs are selected, we select 103 video sequences (contain a high-speed moving car and a relatively static background) from the KITTI dataset, denoted as KITTI-A. As shown in Table 4, on the KITTI-A dataset, our DMVFN prefers to choose MVFBs with large scaling factors to capture large movements. The flow estimation for static backgrounds is straightforward, while the large motion dominates the choice of our DMVFN.", + "bbox": [ + 75, + 90, + 470, + 210 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/fb64041f5933989dfeb58b006b0d642139c7e7041f25764be09b4830030e7206.jpg", + "table_caption": [ + "Table 4. Average usage rate $\\left( {10}^{-2}\\right)$ of MVFBs in our DMVFN." + ], + "table_footnote": [], + "table_body": "
Scale444222111
KITTI-A80.9534.2226.7081.1973.9144.9055.340.490
", + "bbox": [ + 84, + 241, + 465, + 275 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/2eb77f5f091e373a8296da3d5f90aa01ebe24e7f001a4f7d192d020bd14e8d62.jpg", + "table_caption": [ + "Table 5. Routing Module based on STEBS is effective. The evaluation metric is MS-SSIM $(\\times 10^{-2})$" + ], + "table_footnote": [], + "table_body": "
SettingCityscapesKITTI
t+1t+3t+5t+1t+3t+5
Copy last frame76.9568.8264.4558.3148.9944.16
w/o routing95.2987.9181.4888.0676.5368.29
Random91.9782.1170.0581.3169.8962.42
Gumbel Softmax95.0587.5779.5487.4275.5665.83
STEBS95.7389.2483.4588.5378.0170.52
", + "bbox": [ + 81, + 338, + 465, + 445 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2) How to design the Routing Module? A trivial solution is to process the routing probability $p$ with Gumbel Softmax. The comparison results of our DMVFNs with different differentiable routing methods are summarized in Table 5. Our DMVFN with STEBS outperforms the DMVFN variant with Gumbel Softmax on MS-SSIM, especially for long-term prediction. The DMVFN variant with Gumbel Softmax usually degenerates to a fixed and static structure. We also compare with the DMVFN randomly selecting each MVFB with probability 0.5 (denoted as \"Random\") and that without routing module (denoted as \"w/o routing\").", + "bbox": [ + 75, + 459, + 468, + 625 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c76042f224820397a6cd8bf19de79107ade7f3193736d170c038df008cfe74e5.jpg", + "table_caption": [ + "Table 6. Results of our DMVFN with different scaling factor settings. The evaluation metric is MS-SSIM $(\\times 10^{-2})$" + ], + "table_footnote": [], + "table_body": "
Setting in DMVFNCityscapesKITTI
t+1t+3t+5t+1t+3t+5
[1]94.7087.2680.9387.6476.7168.76
[2,1]95.3087.9382.0287.9777.2369.58
[4,2,1]95.7389.2483.4588.5378.0170.52
", + "bbox": [ + 81, + 676, + 465, + 765 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3) How to set the scaling factors? We evaluate our DMVFN with different scaling factors. We use three non-increasing factor sequences of “[1, 1, 1, 1, 1, 1, 1, 1, 1]”, “[2, 2, 2, 2, 2, 1, 1, 1, 1]” and “[4, 4, 4, 2, 2, 2, 1, 1, 1]”, denoted as “[1]”, “[2, 1]” and “[4, 2, 1]”, respectively. The results are listed in Table 6. Our DMVFN with “[4, 2, 1]” performs better than that with “[2, 1]” and “[1]”. The gap is more obvious on longer-term future frames.", + "bbox": [ + 75, + 779, + 467, + 900 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/4f3ecade755763a7255e4d18f696f7d3556e94ec11563de0af4341b04971be4d.jpg", + "table_caption": [ + "Table 7. Spatial path is effective in our DMVFN. The evaluation metric is MS-SSIM $(\\times 10^{-2})$" + ], + "table_footnote": [], + "table_body": "
SettingCityscapesKITTI
t+1t+3t+5t+1t+3t+5
w/o r, w/o path94.9987.5980.9887.7576.2267.86
w/o r95.2987.9181.4888.0676.5368.29
w/o path95.5588.8983.0388.2977.5369.86
DMVFN95.7389.2483.4588.5378.0170.52
", + "bbox": [ + 504, + 128, + 888, + 227 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4) How effective is the spatial path? To verify the effectiveness of the spatial path in our DMVFN, we compare it with the DMVFN without spatial path (denoted as \"w/o path\"). The results listed in Table 7 show our DMVFN enjoys better performance with the spatial path, no matter with or without the routing module (denoted as \"w/o r\").", + "bbox": [ + 496, + 252, + 890, + 343 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 500, + 357, + 617, + 372 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we developed an efficient Dynamic Multiscale Voxel Flow Network (DMVFN) that excels previous video prediction methods on dealing with complex motions of different scales. With the proposed routing module, our DMVFN adaptively activates different sub-networks based on the input frames, improving the prediction performance while reducing the computation costs. Experiments on diverse benchmark datasets demonstrated that our DMVFN achieves state-of-the-art performance with greatly reduced computation burden. We believe our DMVFN can provide general insights for long-term prediction, video frame synthesis, and representation learning [14, 15]. We hope our DMVFN will inspire further research in light-weight video processing and make video prediction more accessible for downstream tasks such as CODEC for streaming video.", + "bbox": [ + 496, + 382, + 890, + 608 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our DMVFN can be improved at several aspects. Firstly, iteratively predicting future frames suffers from accumulate errors. This issue may be addressed by further bringing explicit temporal modeling [22, 31, 66, 68] to our DMVFN. Secondly, our DMVFN simply selects the nodes in a chain network topology, which can be improved by exploring more complex topology. For example, our routing module can be extended to automatically determine the scaling factors for parallel branches [33]. Thirdly, forecast uncertainty modeling is more of an extrapolation abiding to past flow information, especially considering bifurcation, which exceeds the current capability of our DMVFN. We believe that research on long-term forecast uncertainty may uncover deeper interplay with dynamic modeling methods [1, 14].", + "bbox": [ + 496, + 609, + 890, + 819 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgements. We sincerely thank Wen Heng for his exploration on neural architecture search at Megvii Research and Tianyuan Zhang for meaningful suggestions. This work is supported in part by the National Natural Science Foundation of China (No. 62002176 and 62176068).", + "bbox": [ + 496, + 819, + 890, + 895 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "6128", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Adil Kaan Akan, Erkut Erdem, Aykut Erdem, and Fatma Güney. Slamp: Stochastic latent appearance and motion prediction. In ICCV, 2021. 8", + "[2] Xinzhu Bei, Yanchao Yang, and Stefano Soatto. Learning semantic-aware dynamics for video prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2021. 2, 5, 6", + "[3] Yoshua Bengio, Nicholas Léonard, and Aaron Courville. Estimating or propagating gradients through stochastic neurons for conditional computation. arXiv preprint arXiv:1308.3432, 2013. 5", + "[4] Tolga Bolukbasi, Joseph Wang, Ofer Dekel, and Venkatesh Saligrama. Adaptive neural networks for efficient inference. In Inf. Conf. Mach. Learn., 2017. 4", + "[5] Wonmin Byeon, Qin Wang, Rupesh Kumar Srivastava, and Petros Koumoutsakos. Contextvp: Fully context-aware video prediction. In Eur. Conf. Comput. Vis., 2018. 1", + "[6] Lluis Castrejon, Nicolas Ballas, and Aaron Courville. Improved conditional vrnns for video prediction. In Int. Conf. Comput. Vis., 2019. 1", + "[7] Rohan Chandra, Uttaran Bhattacharya, Aniket Bera, and Dinesh Manocha. Traphic: Trajectory prediction in dense and heterogeneous traffic using weighted interactions. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 1", + "[8] Zheng Chang, Xinfeng Zhang, Shanshe Wang, Siwei Ma, and Wen Gao. Strpm: A spatiotemporal residual predictive model for high-resolution video prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 7", + "[9] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In IEEE Conf. Comput. Vis. Pattern Recog., 2016. 1, 2, 5, 6, 7", + "[10] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Int. Conf. Comput. Vis., 2017. 2", + "[11] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In Int. Conf. Comput. Vis., 2015. 2, 6", + "[12] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. I. J. Robotics Res., 2013. 2, 5, 6, 7", + "[13] Daniel Geng, Max Hamilton, and Andrew Owens. Comparing correspondences: Video prediction with correspondence-wise losses. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 5, 6", + "[14] David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018. 8", + "[15] Danijar Hafner, Jurgis Pasukonis, Jimmy Ba, and Timothy Lillicrap. Mastering diverse domains through world models. arXiv preprint arXiv:2301.04104, 2023. 8", + "[16] Yizeng Han, Gao Huang, Shiji Song, Le Yang, Honghui Wang, and Yulin Wang. Dynamic neural networks: A survey. In IEEE Trans. Pattern Anal. Mach. Intell., 2021. 2, 4" + ], + "bbox": [ + 78, + 114, + 470, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Yunhui Han, Kunming Luo, Ao Luo, Jiangyu Liu, Haoqiang Fan, Guiming Luo, and Shuaicheng Liu. Realflow: Embased realistic optical flow dataset generation from videos. In ECCV, 2022. 2", + "[18] Adam W Harley, Konstantinos G Derpanis, and Iasonas Kokkinos. Segmentation-aware convolutional networks using local attention masks. In Int. Conf. Comput. Vis., 2017. 2", + "[19] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. In Neural Comput., 1997. 1", + "[20] Xiaotao Hu, Jun Xu, Shuhang Gu, Ming-Ming Cheng, and Li Liu. Restore globally, refine locally: A mask-guided scheme to accelerate super-resolution networks. In Eur. Conf. Comput. Vis., 2022. 2", + "[21] Zhaoyang Huang, Xiaoyu Shi, Chao Zhang, Qiang Wang, Ka Chun Cheung, Hongwei Qin, Jifeng Dai, and Hongsheng Li. Flowformer: A transformer architecture for optical flow. In Eur. Conf. Comput. Vis., 2022. 2", + "[22] Zhewei Huang, Tianyuan Zhang, Wen Heng, Boxin Shi, and Shuchang Zhou. Real-time intermediate flow estimation for video frame interpolation. In Eur. Conf. Comput. Vis., 2022, 1, 2, 3, 6, 8", + "[23] Itay Hubara, Matthieu Courbariaux, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. Quantized neural networks: Training neural networks with low precision weights and activations. In J. Mach. Learn. Res., 2017. 5", + "[24] Ryan Humble, Maying Shen, Jorge Albericio Latorre, Eric Darve, and Jose Alvarez. Soft masking for cost-constrained channel pruning. In Eur. Conf. Comput. Vis., 2022. 5", + "[25] Eddy Ilg, Nikolaus Mayer, Tonmoy Saikia, Margret Keuper, Alexey Dosovitskiy, and Thomas Brox. Flownet 2.0: Evolution of optical flow estimation with deep networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 2", + "[26] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. In Adv. Neural Inform. Process. Syst., 2015. 3", + "[27] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. In Int. Conf. Learn. Represent., 2017. 5", + "[28] Huaizu Jiang, Deqing Sun, Varun Jampani, Ming-Hsuan Yang, Erik Learned-Miller, and Jan Kautz. Super slomo: High quality estimation of multiple intermediate frames for video interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 3", + "[29] Rico Jonschkowski, Austin Stone, Jonathan T Barron, Ariel Gordon, Kurt Konolige, and Anelia Angelova. What matters in unsupervised optical flow. In ECCV, 2020. 2", + "[30] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Int. Conf. Learn. Represent., 2015. 5", + "[31] Lingtong Kong, Boyuan Jiang, Donghao Luo, Wenqing Chu, Xiaoming Huang, Ying Tai, Chengjie Wang, and Jie Yang. Ifrnet: Intermediate feature refine network for efficient frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 8", + "[32] Wonkwang Lee, Whie Jung, Han Zhang, Ting Chen, Jing Yu Koh, Thomas Huang, Hyungsuk Yoon, Honglak Lee, and" + ], + "bbox": [ + 501, + 92, + 890, + 901 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "6129", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Seunghoon Hong. Revisiting hierarchical approach for persistent long-term video prediction. In Int. Conf. Learn. Represent., 2021. 2", + "[33] Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. In Int. Conf. Learn. Represent., 2019. 8", + "[34] Wenqian Liu, Abhishek Sharma, Octavia Camps, and Mario Sznaier. Dyan: A dynamical atoms-based network for video prediction. In Eur. Conf. Comput. Vis., 2018. 6", + "[35] Ziwei Liu, Raymond A Yeh, Xiaou Tang, Yiming Liu, and Aseem Agarwala. Video frame synthesis using deep voxel flow. In Int. Conf. Comput. Vis., 2017. 1, 2, 3, 4, 5, 6, 7", + "[36] Ilya Loshchilov and F. Hutter. Fixing weight decay regularization in adam. arXiv preprint arXiv:1711.05101, 2017. 5", + "[37] William Lotter, Gabriel Kreiman, and David Cox. Deep predictive coding networks for video prediction and unsupervised learning. In Int. Conf. Learn. Represent., 2017. 1, 2, 5, 6", + "[38] Kunming Luo, Chuan Wang, Shuaicheng Liu, Haoqiang Fan, Jue Wang, and Jian Sun. Upflow: Upsampling pyramid for unsupervised optical flow learning. In CVPR, 2021. 2", + "[39] Julieta Martinez, Michael J Black, and Javier Romero. On human motion prediction using recurrent neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 1", + "[40] Sergiu Oprea, Pablo Martinez-Gonzalez, Alberto Garcia-Garcia, John Alejandro Castro-Vargas, Sergio Orts-Escolano, Jose Garcia-Rodriguez, and Antonis Argyros. A review on deep learning techniques for video prediction. In IEEE Trans. Pattern Anal. Mach. Intell., 2020. 1", + "[41] Junting Pan, Chengyu Wang, Xu Jia, Jing Shao, Lu Sheng, Junjie Yan, and Xiaogang Wang. Video generation from single semantic label map. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 1, 2, 5, 6", + "[42] Sylvain Paris, Samuel W Hasinoff, and Jan Kautz. Local laplacian filters: edge-aware image processing with a laplacian pyramid. ACM Trans. Graph., 2011. 5", + "[43] Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alex Sorkine-Hornung, and Luc Van Gool. The 2017 davis challenge on video object segmentation. arXiv preprint arXiv:1704.00675, 2017. 2, 4, 6", + "[44] Xiaojuan Qi, Zhengzhe Liu, Qifeng Chen, and Jiaya Jia. 3d motion decomposition for rgbd future dynamic scene synthesis. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2", + "[45] Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, and Piotr Dólár. Designing network design spaces. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 7", + "[46] Anurag Ranjan and Michael J Black. Optical flow estimation using a spatial pyramid network. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 2, 4", + "[47] Mengye Ren, Andrei Pokrovsky, Bin Yang, and Raquel Urtasun. Sbnet: Sparse blocks network for fast inference. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 2", + "[48] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM network: A machine learning approach for precipitation nowcasting. In Adv. Neural Inform. Process. Syst., 2015. 1" + ], + "bbox": [ + 78, + 90, + 468, + 898 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[49] Hyeonjun Sim, Jihyong Oh, and Munchurl Kim. Xvfi: extreme video frame interpolation. In Int. Conf. Comput. Vis., 2021. 1", + "[50] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. A dataset of 101 human action classes from videos in the wild. *Cent. Res. Comput. Vis.*, 2012. 6", + "[51] Hang Su, Varun Jampani, Deqing Sun, Orazio Gallo, Erik Learned-Miller, and Jan Kautz. Pixel-adaptive convolutional neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2", + "[52] Yu-Chuan Su and Kristen Grauman. Leaving some stones unturned: dynamic feature prioritization for activity detection in streaming video. In *Eur. Conf. Comput. Vis.*, 2016. 2", + "[53] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. Pwc-net: Cnns for optical flow using pyramid, warping, and cost volume. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 2, 4", + "[54] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Eur. Conf. Comput. Vis., 2020, 1, 2, 5, 6", + "[55] Surat Teerapittayanon, Bradley McDanel, and Hsiang-Tsung Kung. Branchynet: Fast inference via early exiting from deep neural networks. In Int. Conf. Pattern Recog., 2016. 4", + "[56] Andreas Veit and Serge Belongie. Convolutional networks with adaptive inference graphs. In Eur. Conf. Comput. Vis., 2018. 2", + "[57] Thomas Verelst and Tinne Tuytelaars. Dynamic convolutions: Exploiting spatial sparsity for faster inference. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 2", + "[58] Ruben Villegas, Jimei Yang, Seunghoon Hong, Xunyu Lin, and Honglak Lee. Decomposing motion and content for natural video sequence prediction. In Int. Conf. Learn. Represent., 2017. 1, 2, 5, 6", + "[59] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Guilin Liu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. Video-to-video synthesis. In Adv. Neural Inform. Process. Syst., 2018. 1, 2, 5, 6", + "[60] Xin Wang, Fisher Yu, Zi-Yi Dou, Trevor Darrell, and Joseph E Gonzalez. Skipnet: Learning dynamic routing in convolutional networks. In Eur. Conf. Comput. Vis., 2018. 2", + "[61] Zhou Wang, Eero P Simoncelli, and Alan C Bovik. Multiscale structural similarity for image quality assessment. In Asilomar Conf. Signals Syst. Comput., 2003. 6", + "[62] Yue Wu, Rongrong Gao, Jaesik Park, and Qifeng Chen. Future video synthesis with object motion prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 1, 2, 5, 6", + "[63] Yue Wu, Qiang Wen, and Qifeng Chen. Optimizing video prediction via video frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 1, 3, 5, 6", + "[64] Zuxuan Wu, Caiming Xiong, Chih-Yao Ma, Richard Socher, and Larry S Davis. Adaframe: Adaptive frame selection for fast video recognition. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2", + "[65] Xiaoyu Xiang, Yapeng Tian, Yulun Zhang, Yun Fu, Jan P Allebach, and Chenliang Xu. Zooming slow-mo: Fast and" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "6130", + "bbox": [ + 482, + 945, + 514, + 955 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "accurate one-stage space-time video super-resolution. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 7", + "[66] Gang Xu, Jun Xu, Zhen Li, Liang Wang, Xing Sun, and Ming-Ming Cheng. Temporal modulation network for controllable space-time video super-resolution. In IEEE Conf. Comput. Vis. Pattern Recog., June 2021. 7, 8", + "[67] Haofei Xu, Jing Zhang, Jianfei Cai, Hamid Rezatofighi, and Dacheng Tao. Gmflow: Learning optical flow via global matching. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 4", + "[68] Xiangyu Xu, Li Siyao, Wenxiu Sun, Qian Yin, and Ming-Hsuan Yang. Quadratic video interpolation. In Adv. Neural Inform. Process. Syst., 2019. 2, 3, 8", + "[69] Tianfan Xue, Baian Chen, Jiajun Wu, Donglai Wei, and William T Freeman. Video enhancement with task-oriented flow. In Int. J. Comput. Vis., 2019. 2, 3, 6, 7", + "[70] Serena Yeung, Olga Russakovsky, Greg Mori, and Li Fei-Fei. End-to-end learning of action detection from frame glimpses in videos. In IEEE Conf. Comput. Vis. Pattern Recog., 2016. 2", + "[71] Changqian Yu, Jingbo Wang, Chao Peng, Changxin Gao, Gang Yu, and Nong Sang. Bisenet: Bilateral segmentation network for real-time semantic segmentation. In Eur. Conf. Comput. Vis., 2018. 4", + "[72] Guozhen Zhang, Yuhan Zhu, Haonan Wang, Youxin Chen, Gangshan Wu, and Limin Wang. Extracting motion and appearance via inter-frame attention for efficient video frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2023. 2", + "[73] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 6", + "[74] Shuchang Zhou, Yuxin Wu, Zekun Ni, Xinyu Zhou, He Wen, and Yuheng Zou. Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients. arXiv preprint arXiv:1606.06160, 2016. 5", + "[75] Tinghui Zhou, Shubham Tulsiani, Weilun Sun, Jitendra Malik, and Alexei A Efros. View synthesis by appearance flow. In Eur. Conf. Comput. Vis., 2016. 3", + "[76] Xizhou Zhu, Han Hu, Stephen Lin, and Jifeng Dai. Deformable convnets v2: More deformable, better results. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2", + "[77] Yi Zhu, Karan Sapra, Fitsum A Reda, Kevin J Shih, Shawn Newsam, Andrew Tao, and Bryan Catanzaro. Improving semantic segmentation via video propagation and label relaxation. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 5" + ], + "bbox": [ + 78, + 92, + 468, + 757 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "6131", + "bbox": [ + 482, + 945, + 513, + 955 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_model.json b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..3d0618cf2fc3241b48b8598d623e493a1179545d --- /dev/null +++ b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_model.json @@ -0,0 +1,2807 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.812, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.156, + 0.131, + 0.817, + 0.152 + ], + "angle": 0, + "content": "A Dynamic Multi-Scale Voxel Flow Network for Video Prediction" + }, + { + "type": "text", + "bbox": [ + 0.149, + 0.18, + 0.829, + 0.235 + ], + "angle": 0, + "content": "Xiaotao Hu\\(^{1,2}\\) Zhwei Huang\\(^{2}\\) Ailin Huang\\(^{2,3}\\) Jun Xu\\(^{4,*}\\) Shuchang Zhou\\(^{2,*}\\) \n\\(^{1}\\)College of Computer Science, Nankai University \\(^{2}\\)Megvii Technology \n\\(^{3}\\)Wuhan University \\(^{4}\\)School of Statistics and Data Science, Nankai University" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.237, + 0.875, + 0.27 + ], + "angle": 0, + "content": "{huxiaotao, huangzhewei, huangailin, zhoushuchang}@megvii.com, nankaimathxujun@gmail.com https://huxiaotaostasy.github.io/DMVFN/" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.305, + 0.314, + 0.32 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.337, + 0.474, + 0.581 + ], + "angle": 0, + "content": "The performance of video prediction has been greatly boosted by advanced deep neural networks. However, most of the current methods suffer from large model sizes and require extra inputs, e.g., semantic/depth maps, for promising performance. For efficiency consideration, in this paper, we propose a Dynamic Multi-scale Voxel Flow Network (DMVFN) to achieve better video prediction performance at lower computational costs with only RGB images, than previous methods. The core of our DMVFN is a differentiable routing module that can effectively perceive the motion scales of video frames. Once trained, our DMVFN selects adaptive sub-networks for different inputs at the inference stage. Experiments on several benchmarks demonstrate that our DMVFN is an order of magnitude faster than Deep Voxel Flow [35] and surpasses the state-of-the-art iterative-based OPT [63] on generated image quality." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.608, + 0.21, + 0.625 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.634, + 0.47, + 0.74 + ], + "angle": 0, + "content": "Video prediction aims to predict future video frames from the current ones. The task potentially benefits the study on representation learning [40] and downstream forecasting tasks such as human motion prediction [39], autonomous driving [6], and climate change [48], etc. During the last decade, video prediction has been increasingly studied in both academia and industry community [5, 7]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.74, + 0.47, + 0.878 + ], + "angle": 0, + "content": "Video prediction is challenging because of the diverse and complex motion patterns in the wild, in which accurate motion estimation plays a crucial role [35, 37, 58]. Early methods [37, 58] along this direction mainly utilize recurrent neural networks [19] to capture temporal motion information for video prediction. To achieve robust long-term prediction, the works of [41, 59, 62] additionally exploit the semantic or instance segmentation maps of video frames for semantically coherent motion estimation in complex scenes." + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.304, + 0.891, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.499, + 0.549, + 0.895, + 0.606 + ], + "angle": 0, + "content": "Figure 1. Average MS-SSIM and GFLOPs of different video prediction methods on Cityscapes [9]. The parameter amounts are provided in brackets. DMVFN outperforms previous methods in terms of image quality, parameter amount, and GFLOPs." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.64, + 0.895, + 0.791 + ], + "angle": 0, + "content": "However, the semantic or instance maps may not always be available in practical scenarios, which limits the application scope of these video prediction methods [41,59,62]. To improve the prediction capability while avoiding extra inputs, the method of OPT [63] utilizes only RGB images to estimate the optical flow of video motions in an optimization manner with impressive performance. However, its inference speed is largely bogged down mainly by the computational costs of pre-trained optical flow model [54] and frame interpolation model [22] used in the iterative generation." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The motions of different objects between two adjacent frames are usually of different scales. This is especially evident in high-resolution videos with meticulous details [49]. The spatial resolution is also of huge differences in real-world video prediction applications. To this end, it is essential yet challenging to develop a single model for multiscale motion estimation. An early attempt is to extract" + }, + { + "type": "page_footnote", + "bbox": [ + 0.096, + 0.887, + 0.229, + 0.9 + ], + "angle": 0, + "content": "*Corresponding authors." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "6121" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.468, + 0.137 + ], + "angle": 0, + "content": "multi-scale motion cues in different receptive fields by employing the encoder-decoder architecture [35], but in practice it is not flexible enough to deal with complex motions." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.137, + 0.47, + 0.468 + ], + "angle": 0, + "content": "In this paper, we propose a Dynamic Multi-scale Voxel Flow Network (DMVFN) to explicitly model the complex motion cues of diverse scales between adjacent video frames by dynamic optical flow estimation. Our DMVFN is consisted of several Multi-scale Voxel Flow Blocks (MVFBs), which are stacked in a sequential manner. On top of MVFBs, a light-weight Routing Module is proposed to adaptively generate a routing vector according to the input frames, and to dynamically select a subnetwork for efficient future frame prediction. We conduct experiments on four benchmark datasets, including Cityscapes [9], KITTI [12], DAVIS17 [43], and ViceoTest [69], to demonstrate the comprehensive advantages of our DMVFN over representative video prediction methods in terms of visual quality, parameter amount, and computational efficiency measured by floating point operations (FLOPs). A glimpse of comparison results by different methods is provided in Figure 1. One can see that our DMVFN achieves much better performance in terms of accuracy and efficiency on the Cityscapes [9] dataset. Extensive ablation studies validate the effectiveness of the components in our DMVFN for video prediction." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.47, + 0.444, + 0.483 + ], + "angle": 0, + "content": "In summary, our contributions are mainly three-fold:" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.491, + 0.468, + 0.552 + ], + "angle": 0, + "content": "- We design a light-weight DMVFN to accurately predict future frames with only RGB frames as inputs. Our DMVFN is consisted of new MVFB blocks that can model different motion scales in real-world videos." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.561, + 0.468, + 0.621 + ], + "angle": 0, + "content": "- We propose an effective Routing Module to dynamically select a suitable sub-network according to the input frames. The proposed Routing Module is end-to-end trained along with our main network DMVFN." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.63, + 0.468, + 0.675 + ], + "angle": 0, + "content": "- Experiments on four benchmarks show that our DMVFN achieves state-of-the-art results while being an order of magnitude faster than previous methods." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.491, + 0.468, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.687, + 0.218, + 0.702 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.712, + 0.241, + 0.726 + ], + "angle": 0, + "content": "2.1. Video Prediction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.468, + 0.901 + ], + "angle": 0, + "content": "Early video prediction methods [35, 37, 58] only utilize RGB frames as inputs. For example, PredNet [37] learns an unsupervised neural network, with each layer making local predictions and forwarding deviations from those predictions to subsequent network layers. MCNet [58] decomposes the input frames into motion and content components, which are processed by two separate encoders. DVF [35] is a fully-convolutional encoder-decoder network synthesizing intermediate and future frames by approximating voxel flow for motion estimation. Later, extra information is exploited by video prediction methods in pursuit of better" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.258 + ], + "angle": 0, + "content": "performance. For example, the methods of Vid2vid [59], Seg2vid [41], HVP [32], and SADM [2] require additional semantic maps or human pose information for better video prediction results. Additionally, Qi et al. [44] used extra depth maps and semantic maps to explicitly inference scene dynamics in 3D space. FVS [62] separates the inputs into foreground objects and background areas by semantic and instance maps, and uses a spatial transformer to predict the motion of foreground objects. In this paper, we develop a light-weight and efficient video prediction network that requires only sRGB images as the inputs." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.269, + 0.635, + 0.285 + ], + "angle": 0, + "content": "2.2. Optical Flow" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.293, + 0.892, + 0.595 + ], + "angle": 0, + "content": "Optical flow estimation aims to predict the per-pixel motion between adjacent frames. Deep learning-based optical flow methods [17,29,38,53,54] have been considerably advanced ever since Flownet [11], a pioneering work to learn optical flow network from synthetic data. Flownet2.0 [25] improves the accuracy of optical flow estimation by stacking sub-networks for iterative refinement. A coarse-to-fine spatial pyramid network is employed in SPynet [46] to estimate optical flow at multiple scales. PWC-Net [53] employs feature warping operation at different resolutions and uses a cost volume layer to refine the estimated flow at each resolution. RAFT [54] is a lightweight recurrent network sharing weights during the iterative learning process. FlowFormer [21] utilizes an encoder to output latent tokens and a recurrent decoder to decode features, while refining the estimated flow iteratively. In video synthesis, optical flow for downstream tasks [22, 35, 68, 69, 72] is also a hot research topic. Based on these approaches, we aim to design a flow estimation network that can adaptively operate based on each sample for the video prediction task." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.606, + 0.677, + 0.621 + ], + "angle": 0, + "content": "2.3. Dynamic Network" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.892, + 0.901 + ], + "angle": 0, + "content": "The design of dynamic networks is mainly divided into three categories: spatial-wise, temporal-wise, and sample-wise [16]. Spatial-wise dynamic networks perform adaptive operations in different spatial regions to reduce computational redundancy with comparable performance [20, 47, 57]. In addition to the spatial dimension, dynamic processing can also be applied in the temporal dimension. Temporal-wise dynamic networks [52, 64, 70] improve the inference efficiency by performing less or no computation on unimportant sequence frames. To handle the input in a data-driven manner, sample-wise dynamic networks adaptively adjust network structures to side-off the extra computation [56, 60], or adaptively change the network parameters to improve the performance [10, 18, 51, 76]. Designing and training a dynamic network is not trivial since it is difficult to directly enable a model with complex topology connections. We need to design a well-structured and robust model before considering its dynamic mechanism. In this paper," + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6122" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.089, + 0.446, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.176, + 0.226, + 0.402, + 0.241 + ], + "angle": 0, + "content": "(a) Voxel Flow-based Image Fusion" + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.249, + 0.468, + 0.354 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.248, + 0.362, + 0.33, + 0.377 + ], + "angle": 0, + "content": "(b) DMVFN" + }, + { + "type": "image", + "bbox": [ + 0.473, + 0.088, + 0.852, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.363, + 0.621, + 0.377 + ], + "angle": 0, + "content": "(c) MVFB" + }, + { + "type": "image_caption", + "bbox": [ + 0.708, + 0.362, + 0.838, + 0.377 + ], + "angle": 0, + "content": "(d) Routing Module" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.389, + 0.892, + 0.472 + ], + "angle": 0, + "content": "Figure 2. Overview of the proposed Dynamic Multi-scale Voxel Flow Network (DMVFN). \\((a)\\): To predict a future frame, we use the voxel flow [35] to guide the pixel fusion of the input frames. The voxel flow contains the prediction of object motion and occlusion. \\((b)\\): DMVFN contains several MVFBs with decreasing scaling factor \\(S^i\\). According to the routing vector \\(v\\) estimated by a Routing Module, a sub-network is selected to process the input image. \\((c)\\): Each MVFB has a scaling factor \\(S^i\\), which means that the motion path is performed on images whose sizes are \\(1 / S^i\\) of the original. \\((d)\\): Two consecutive frames are fed into several neural layers and a Differentiable Bernoulli sample to generate the hard routing vector." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.499, + 0.469, + 0.529 + ], + "angle": 0, + "content": "we propose a module to dynamically perceive the motion magnitude of input frames to select the network structure." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.544, + 0.212, + 0.56 + ], + "angle": 0, + "content": "3. Methodology" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.569, + 0.208, + 0.584 + ], + "angle": 0, + "content": "3.1. Background" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.592, + 0.47, + 0.744 + ], + "angle": 0, + "content": "Video prediction. Given a sequence of past \\(t\\) frames \\(\\{I_i\\in \\mathbb{R}^{h\\times w\\times 3}|i = 1,\\dots ,t\\}\\), video prediction aims to predict the future frames \\(\\{\\tilde{I}_{t + 1},\\tilde{I}_{t + 2},\\tilde{I}_{t + 3},\\ldots \\}\\). The inputs of our video prediction model are only the two consecutive frames \\(I_{t - 1}\\) and \\(I_{t}\\). We concentrate on predicting \\(\\tilde{I}_{t + 1}\\), and iteratively predict future frames \\(\\{\\tilde{I}_{t + 2},\\tilde{I}_{t + 3},\\ldots \\}\\) in a similar manner. Denote the video prediction model as \\(G_{\\theta}(I_{t - 1},I_t)\\), where \\(\\theta\\) is the set of model parameters to be learned, the learning objective is to minimize the difference between \\(\\tilde{I}_{t + 1} = G_{\\theta}(I_{t - 1},I_t)\\) and the \"ground truth\" \\(I_{t + 1}\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.763, + 0.47, + 0.901 + ], + "angle": 0, + "content": "Voxel flow. Considering the local consistency in space-time, the pixels of a generated future frame come from nearby regions of the previous frames [69, 75]. In video prediction task, researchers estimate optical flow \\(\\mathbf{f}_{t + 1\\rightarrow t}\\) from \\(I_{t + 1}\\) to \\(I_{t}\\) [35]. And the corresponding frame is obtained using the pixel-wise backward warping [26] (denoted as \\(\\overleftarrow{\\mathcal{W}}\\)). In addition, to deal with the occlusion, some methods [28, 35] further introduce a fusion map \\(\\mathbf{m}\\) to fuse the pixels of \\(I_{t}\\) and \\(I_{t - 1}\\). The final predicted frame is obtained" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.499, + 0.793, + 0.515 + ], + "angle": 0, + "content": "by the following formulation (Figure 2 \\((a)\\)):" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.524, + 0.892, + 0.545 + ], + "angle": 0, + "content": "\\[\n\\hat {I} _ {t + 1 \\leftarrow t - 1} = \\overleftarrow {\\mathcal {W}} \\left(I _ {t - 1}, \\mathbf {f} _ {t + 1 \\rightarrow t - 1}\\right), \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.567, + 0.891, + 0.587 + ], + "angle": 0, + "content": "\\[\n\\hat {I} _ {t + 1 \\leftarrow t} = \\overleftarrow {\\mathcal {W}} \\left(I _ {t}, \\mathbf {f} _ {t + 1 \\rightarrow t}\\right), \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.546, + 0.607, + 0.891, + 0.625 + ], + "angle": 0, + "content": "\\[\n\\tilde {I} _ {t + 1} = \\hat {I} _ {t + 1 \\leftarrow t - 1} \\times \\mathbf {m} + \\hat {I} _ {t + 1 \\leftarrow t} \\times (1 - \\mathbf {m}). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.629, + 0.892, + 0.707 + ], + "angle": 0, + "content": "Here, \\(\\hat{I}_{t+1 \\leftarrow t}\\) and \\(\\hat{I}_{t+1 \\leftarrow t-1}\\) are intermediate warped images. To simplify notations, we refer to the optical flows \\(\\mathbf{f}_{t+1 \\rightarrow t}, \\mathbf{f}_{t+1 \\rightarrow t-1}\\) and the fusion map \\(\\mathbf{m}\\) collectively as the voxel flow \\(\\mathbf{F}_{t+1}\\), similar to the notations in [35]. The above equations can be simplified to the following form:" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.716, + 0.891, + 0.737 + ], + "angle": 0, + "content": "\\[\n\\tilde {I} _ {t + 1} = \\overleftarrow {\\mathcal {W}} \\left(I _ {t - 1}, I _ {t}, \\mathbf {F} _ {t + 1}\\right). \\tag {4}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.757, + 0.858, + 0.773 + ], + "angle": 0, + "content": "3.2. Dynamic Multi-Scale Voxel Flow Network" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.78, + 0.892, + 0.901 + ], + "angle": 0, + "content": "MVFB. To estimate the voxel flow, DVF [35] assumes that all optical flows are locally linear and temporally symmetric around the targeted time, which may be unreasonable for large-scale motions. To address the object position changing issue [22] in adjacent frames, OPT [63] uses flow reversal layer [68] to convert forward flows to backward flows. We aim to estimate voxel flow end-to-end without introducing new components and unreasonable constraints." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6123" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.1, + 0.089, + 0.277, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.178, + 0.267, + 0.201, + 0.278 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.089, + 0.396, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.321, + 0.267, + 0.354, + 0.279 + ], + "angle": 0, + "content": "DVF" + }, + { + "type": "image", + "bbox": [ + 0.397, + 0.089, + 0.516, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.434, + 0.267, + 0.478, + 0.279 + ], + "angle": 0, + "content": "DYAN" + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.089, + 0.634, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.559, + 0.268, + 0.59, + 0.279 + ], + "angle": 0, + "content": "OPT" + }, + { + "type": "image", + "bbox": [ + 0.634, + 0.089, + 0.752, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.665, + 0.268, + 0.721, + 0.279 + ], + "angle": 0, + "content": "DMVFN" + }, + { + "type": "image", + "bbox": [ + 0.752, + 0.089, + 0.871, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.8, + 0.268, + 0.822, + 0.279 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.123, + 0.289, + 0.843, + 0.305 + ], + "angle": 0, + "content": "Figure 3. Visual comparison of \\((t + 1)\\)-th frame predicted from \\(t\\)-th and \\((t - 1)\\)-th frames on the DAVIS17-Val [43]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.317, + 0.469, + 0.381 + ], + "angle": 0, + "content": "We denote the \\(i\\)-th MVFB as \\(f_{MVFB}^{i}(\\cdot)\\). It learns to approximate target voxel flow \\(\\mathbf{F}_{t+1}^{i}\\) by taking two frames \\(I_{t-1}\\) and \\(I_{t}\\), the synthesized frame \\(\\tilde{I}_{t+1}^{i-1}\\), and the voxel flow estimated by previous blocks \\(\\mathbf{F}_{t+1}^{i-1}\\) as inputs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.382, + 0.47, + 0.519 + ], + "angle": 0, + "content": "The architecture of our MVFB is shown in Figure 2 \\((c)\\). To capture the large motion while retaining the original spatial information, we construct a two-branch network structure [71]. This design inherits from pyramidal optical flow estimation [46, 53]. In the motion path, the input is downsampled by a scaling factor \\(S^i\\) to facilitate the expansion of the receptive field. Another spatial path operates at high resolution to complement the spatial information. We denote \\(\\tilde{I}_{t + 1}^{i}\\) as the output of the \\(i\\)-th MVFB. Formally," + }, + { + "type": "equation", + "bbox": [ + 0.117, + 0.53, + 0.469, + 0.55 + ], + "angle": 0, + "content": "\\[\n\\tilde {I} _ {t + 1} ^ {i}, \\mathbf {F} _ {t + 1} ^ {i} = f _ {\\mathrm {M V F B}} ^ {i} \\left(I _ {t - 1}, I _ {t}, \\tilde {I} _ {t + 1} ^ {i - 1}, \\mathbf {F} _ {t + 1} ^ {i - 1}, S ^ {i}\\right). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.563, + 0.469, + 0.623 + ], + "angle": 0, + "content": "The initial values of \\(\\tilde{I}_{t+1}^{0}\\) and \\(\\mathbf{F}_{t+1}^{0}\\) are set to zero. As illustrated in Figure 2 (b), our DMVFN contains 9 MVFBs. To generate a future frame, we iteratively refine a voxel flow [35] and fuse the pixels of the input frames." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.624, + 0.469, + 0.7 + ], + "angle": 0, + "content": "Many optical flow estimation methods predict the flow field on a small image, and then refine it on a large image [53, 67]. For simplicity and intuition, we consider decreasing scaling factor sequences. Finally, the scaling factors is experimentally set as \\([4, 4, 4, 2, 2, 2, 1, 1, 1]\\)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.719, + 0.469, + 0.84 + ], + "angle": 0, + "content": "DMVFN. Different pairs of adjacent frames have diverse motion scales and different computational demands. An intuitive idea is to adaptively select dynamic architectures conditioned on each input. We then perform dynamic routing within the super network (the whole architecture) [16], including multiple possible paths. DMVFN saves redundant computation for samples with small-scale motion and preserves the representation ability for large-scale motion." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.469, + 0.901 + ], + "angle": 0, + "content": "To make our DMVFN end-to-end trainable, we design a differentiable Routing Module containing a tiny neural network to estimate routing vector \\( v \\) for each input sample. Based on this vector, our DMVFN dynamically selects a" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.318, + 0.89, + 0.347 + ], + "angle": 0, + "content": "sub-network to process the input data. As the Figure 2 \\((b)\\) shows, some blocks are skipped during inference." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.348, + 0.892, + 0.547 + ], + "angle": 0, + "content": "Different from some dynamic network methods that can only continuously select the first several blocks (\\(n\\) options) [4, 55], DMVFN is able to choose paths freely (\\(2^n\\) options). DMVFN trains different sub-networks in the super network with various possible inference paths and uses dynamic routing inside the super network during inference to reduce redundant computation while maintaining the performance. A dynamic routing vector \\(v \\in \\{0,1\\}^n\\) is predicted by the proposed Routing Module. For the \\(i\\)-th MVFN block of DMVFN, we denote \\(v_i\\) as the reference of whether processing the reached voxel flow \\(\\mathbf{F}_{t+1}^{i-1}\\) and the reached predicted frame \\(\\tilde{I}_{t+1}^{i-1}\\). The path \\(f_{\\mathrm{MVFB}}^i\\) to the \\(i\\)-th block from the last block will be activated only when \\(v_i = 1\\). Formally," + }, + { + "type": "equation", + "bbox": [ + 0.54, + 0.557, + 0.891, + 0.614 + ], + "angle": 0, + "content": "\\[\n\\tilde {I} _ {t + 1} ^ {i}, \\mathbf {F} _ {t + 1} ^ {i} = \\left\\{ \\begin{array}{l l} f _ {\\mathrm {M V F B}} ^ {i} \\left(\\tilde {I} _ {t + 1} ^ {i - 1}, \\mathbf {F} _ {t + 1} ^ {i - 1}\\right), & v _ {i} = 1 \\\\ \\tilde {I} _ {t + 1} ^ {i - 1}, \\mathbf {F} _ {t + 1} ^ {i - 1}, & v _ {i} = 0. \\end{array} \\right. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.616, + 0.892, + 0.661 + ], + "angle": 0, + "content": "During the training phase, to enable the backpropagation of Eqn. (6), we use \\( v_{i} \\) and \\( (1 - v_{i}) \\) as the weights of the two branches and average their outputs." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.661, + 0.892, + 0.842 + ], + "angle": 0, + "content": "In the iterative scheme of our DMVFN, each MVFB essentially refines the current voxel flow estimation to a new one. This special property allows our DMVFN to skip some MVFBs for every pair of input frames. Here, we design a differentiable and efficient routing module for learning to trade-off each MVFB block. This is achieved by predicting a routing vector \\( v \\in \\{0,1\\}^n \\) to identify the proper sub-network (e.g., 0 for deactivated MVFBs, 1 for activated MVFBs). We implement the routing module by a small neural network (\\( \\sim 1/6 \\) GFLOPs of the super network), and show its architecture in Figure 2 (d). It learns to predict the probability \\( \\tilde{v} \\) of choosing MVFBs by:" + }, + { + "type": "equation", + "bbox": [ + 0.55, + 0.849, + 0.891, + 0.865 + ], + "angle": 0, + "content": "\\[\n\\tilde {v} = \\operatorname {L i n e a r} (\\operatorname {A v g P o o l i n g} (\\operatorname {C o n v s} (I _ {t - 1}, I _ {t}))), \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.886, + 0.891, + 0.902 + ], + "angle": 0, + "content": "\\[\nv = \\text {B e r n o u l l i - S a m p l i n g} (\\tilde {v}). \\tag {8}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6124" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.895, + 0.133 + ], + "angle": 0, + "content": "Table 1. Quantitative results of different methods on the Cityscapes [9], and KITTI [12] datasets. \"RGB\", \"F\", \"S\" and \"T\" denote the video frames, optical flow, semantic map, and instance map, respectively. We denote our DMVFN without routing module as \"DMVFN (w/o r)\". FVS [62] integrates a segmentation model [77] on KITTI [12] to obtain the semantic maps. \"N/A\" means not available." + }, + { + "type": "table", + "bbox": [ + 0.099, + 0.143, + 0.875, + 0.321 + ], + "angle": 0, + "content": "
MethodInputsCityscapes-Train→Cityscapes-Test [9]KITTI-Train→KITTI-Test [12]
GFLOPsMS-SSIM (×10-2) ↑ t+1t+3t+5LPIPS (×10-2) ↓ t+1t+3t+5GFLOPsMS-SSIM (×10-2) ↑ t+1t+3t+5LPIPS (×10-2) ↓ t+1t+3t+5
Vid2vid [59]RGB+S603.7988.1680.5575.1310.5815.9220.14N/AN/AN/AN/AN/AN/AN/AN/A
Seg2vid [41]RGB+S455.8488.32N/A61.639.69N/A25.99N/AN/AN/AN/AN/AN/AN/AN/A
FVS [62]RGB+S+I1891.6589.1081.1375.688.5012.9816.50768.9679.2867.6560.7718.4824.6130.49
SADM [2]RGB+S+FN/A95.99N/A83.517.67N/A14.93N/A83.0672.4464.7214.4124.5831.16
PredNet [37]RGB62.6284.0379.2575.2125.9929.9936.0325.4456.2651.4747.5655.3558.6662.95
MCNET [58]RGB502.8089.6978.0770.5818.8831.3437.34204.2675.3563.5255.4824.0531.7137.39
DVF [35]RGB409.7883.8576.2371.1117.3724.0528.79166.4753.9346.9942.6232.4737.4341.59
CorrWise [13]RGB944.2992.80N/A83.908.50N/A15.00383.6282.00N/A66.7017.20N/A25.90
OPT [63]RGB313482.1594.5486.8980.406.4612.5017.83127431.7182.7169.5061.0912.3420.2926.35
DMVFN (w/o r)RGB24.5195.2987.9181.485.6010.4814.919.9688.0676.5368.2910.7019.2826.13
DMVFNRGB12.7195.7389.2483.455.5810.4714.825.1588.5378.0170.5210.7419.2726.05
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.347, + 0.47, + 0.452 + ], + "angle": 0, + "content": "Differentiable Routing. To train the proposed Routing Module, we need to constrain the probability values to prevent the model from falling into trivial solutions (e.g., select all blocks). On the other hand, we allow this module to participate in the gradient calculation to achieve end-to-end training. We introduce the Gumbel Softmax [27] and the Straight-Through Estimator (STE) [3] to tackle this issue." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.453, + 0.472, + 0.514 + ], + "angle": 0, + "content": "One popular method to make the routing probability \\(\\tilde{v}\\) learnable is the Gumbel Softmax technique [24, 27]. By treating the selection of each MVFB as a binary classification task, the soft dynamic routing vector \\(v\\in \\mathbb{R}^n\\) is" + }, + { + "type": "equation", + "bbox": [ + 0.097, + 0.521, + 0.47, + 0.56 + ], + "angle": 0, + "content": "\\[\nv _ {i} = \\frac {\\exp \\left(\\frac {1}{\\tau} \\left(\\tilde {v} _ {i} + G _ {i}\\right)\\right)}{\\exp \\left(\\frac {1}{\\tau} \\left(\\tilde {v} _ {i} + G _ {i}\\right)\\right) + \\exp \\left(\\frac {1}{\\tau} \\left(2 - \\tilde {v} _ {i} - G _ {i}\\right)\\right)}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.566, + 0.47, + 0.747 + ], + "angle": 0, + "content": "where \\( i = 1, \\dots, n \\), \\( G_{i} \\in \\mathbb{R} \\) is Gumbel noise following the Gumbel(0,1) distribution, and \\( \\tau \\) is a temperature parameter. We start at a very high temperature to ensure that all possible paths become candidates, and then the temperature is attenuated to a small value to approximate one-hot distribution. To encourage the sum of the routing vectors \\( \\{v_{i}\\}_{i=1}^{n} \\) to be small, we add the regularization term \\( \\left(\\frac{1}{n} \\sum_{i=1}^{n} v_{i}\\right) \\) to the final loss function. However, we experimentally find that our DMVFN usually converges to an input-independent structure when temperature decreases. We conjecture that the control of the temperature parameter \\( \\tau \\) and the design of the regularization term require further study." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.748, + 0.47, + 0.824 + ], + "angle": 0, + "content": "Inspired by previous research on low-bit width neural networks [23, 74], we adopt STE for Bernoulli Sampling (STEBS) to make the binary dynamic routing vector differentiable. An STE can be regarded as an operator that has arbitrary forward and backward operations. Formally," + }, + { + "type": "equation", + "bbox": [ + 0.137, + 0.832, + 0.47, + 0.871 + ], + "angle": 0, + "content": "\\[\n\\tilde {w} _ {i} = \\min (\\beta \\times n \\times \\sigma (\\tilde {v} _ {i}) / \\sum_ {i} ^ {n} \\sigma (\\tilde {v} _ {i}), 1), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.885, + 0.47, + 0.903 + ], + "angle": 0, + "content": "STE Forward: \\(v_{i}\\sim\\) Bernoulli \\((\\tilde{w}_i)\\) (11)" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.357, + 0.892, + 0.388 + ], + "angle": 0, + "content": "\\[\n\\text {S T E B a c k w a r d}: \\frac {\\partial o}{\\partial \\tilde {w}} = \\frac {\\partial o}{\\partial v}, \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.391, + 0.895, + 0.485 + ], + "angle": 0, + "content": "where \\(\\sigma\\) is the Sigmoid function and we denote the objective function as \\(o\\). We use the well-defined gradient \\(\\frac{\\partial o}{\\partial v}\\) as an approximation for \\(\\frac{\\partial o}{\\partial \\tilde{w}}\\) to construct the backward pass. In Eqn. (10), we normalize the sample rate. During training, \\(\\beta\\) is fixed at 0.5. We can adjust the hyper-parameter \\(\\beta\\) to control the complexity in the inference phase." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.492, + 0.719, + 0.508 + ], + "angle": 0, + "content": "3.3. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.515, + 0.892, + 0.547 + ], + "angle": 0, + "content": "Loss function. Our training loss \\( L_{total} \\) is the sum of the reconstruction losses of outputs of each block \\( I_{t + 1}^{i} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.587, + 0.557, + 0.892, + 0.597 + ], + "angle": 0, + "content": "\\[\nL _ {t o t a l} = \\sum_ {i = 1} ^ {n} \\gamma^ {n - i} d \\left(\\tilde {I} _ {t + 1} ^ {i}, I _ {t + 1}\\right), \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.605, + 0.892, + 0.652 + ], + "angle": 0, + "content": "where \\(d\\) is the \\(\\ell_1\\) loss calculated on the Laplacian pyramid representations [42] extracted from each pair of images. And we set \\(\\gamma = 0.8\\) in our experiments following [54]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.668, + 0.894, + 0.76 + ], + "angle": 0, + "content": "Training strategy. Our DMVFN is trained on \\(224 \\times 224\\) image patches. The batch size is set as 64. We employ the AdamW optimizer [30, 36] with a weight decay of \\(10^{-4}\\). We use a cosine annealing strategy to reduce the learning rate from \\(10^{-4}\\) to \\(10^{-5}\\). Our model is trained on four 2080Ti GPUs for 300 epochs, which takes about 35 hours." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.773, + 0.634, + 0.79 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.797, + 0.686, + 0.812 + ], + "angle": 0, + "content": "4.1. Dataset and Metric" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.82, + 0.86, + 0.836 + ], + "angle": 0, + "content": "Dataset. We use several datasets in the experiments:" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.84, + 0.894, + 0.902 + ], + "angle": 0, + "content": "Cityscapes dataset [9] contains 3,475 driving videos with resolution of \\(2048 \\times 1024\\). We use 2,945 videos for training (Cityscapes-Train) and 500 videos in Cityscapes dataset [9] for testing (Cityscapes-Test)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6125" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.892, + 0.118 + ], + "angle": 0, + "content": "Table 2. Quantitative results on the DAVIS17-Val [43] and Vimeo90K-Test [69] benchmarks. We denote DMVFN without routing as \"DMVFN (w/o r)\". \"N/A\" means not available." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.129, + 0.868, + 0.26 + ], + "angle": 0, + "content": "
MethodUCF101-Train→DAVIS17-ValUCF101-Train→Vimeo90K-Test
GFLOPs ↓MS-SSIM (×10-2) ↑ t+1t+3LPIPS (×10-2) ↓ t+1t+3GFLOPs ↓MS-SSIM (×10-2) ↑ t+1LPIPS (×10-2) ↓ t+1
DVF [35]324.1568.6155.4723.2334.2289.6492.117.73
DYAN [34]130.1278.9670.4113.0921.43N/AN/AN/A
OPT [63]165312.8083.2673.8511.4018.2145716.2096.753.59
DMVFN (w/o r)19.3984.8175.059.4116.245.3697.243.30
DMVFN9.9683.9774.819.9617.282.7797.013.69
" + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.281, + 0.457, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.584, + 0.47, + 0.681 + ], + "angle": 0, + "content": "Figure 4. Prediction comparison on KITTI. The yellow line is aligned with the car in the ground truth. The results show that previous methods (DVF [35], FVS [62], and OPT [63]) cannot accurately predict the car's location in the long-term prediction. The motion predicted by our DMVFN is the most similar to the ground truth, while the errors of other methods grow larger with time. The fences predicted by DMVFN remain vertical when moving." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.709, + 0.47, + 0.77 + ], + "angle": 0, + "content": "KITTI dataset [12] contains 28 driving videos with resolution of \\(375 \\times 1242\\). 24 videos in KITTI dataset are used for training (KITTI-Train) and the remaining four videos in KITTI dataset are used for testing (KITTI-Test)." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.774, + 0.47, + 0.821 + ], + "angle": 0, + "content": "UCF101 [50] dataset contains 13,320 videos under 101 different action categories with resolution of \\(240\\times 320\\). We only use the training subset of UCF101 [50]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.825, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Vimeo90K [69] dataset has 51,312 triplets for training, where each triplet contains three consecutive video frames with resolution of \\(256 \\times 448\\). There are 3,782 triplets in the Vimeo90K testing set. We denote the training and testing subsets as Video-Train and Video-Test, respectively." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.284, + 0.892, + 0.346 + ], + "angle": 0, + "content": "DAVIS17 [43] has videos with resolution around \\(854 \\times 480\\). We use the DAVIS17-Val containing 30 videos as test set. Configurations. We have four experimental configurations following previous works [34, 35, 63]:" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.36, + 0.768, + 0.375 + ], + "angle": 0, + "content": "- Cityscapes-Train \\(\\rightarrow\\) Cityscapes-Test" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.378, + 0.713, + 0.392 + ], + "angle": 0, + "content": "KITTI-Train \\(\\rightarrow\\) KITTI-Test" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.396, + 0.701, + 0.41 + ], + "angle": 0, + "content": "UCF101→DAVIS17-Val" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.414, + 0.687, + 0.428 + ], + "angle": 0, + "content": "UCF101 \\(\\rightarrow\\) Vimeo-Test" + }, + { + "type": "list", + "bbox": [ + 0.519, + 0.36, + 0.768, + 0.428 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.443, + 0.892, + 0.639 + ], + "angle": 0, + "content": "Here, the left and right sides of the arrow represent the training set and the test set, respectively. For a fair comparison with other methods that are not tailored for high resolution videos, we follow the setting in [62] and resize the images in Cityscapes [9] to \\(1024 \\times 512\\) and images in KITTI [12] to \\(256 \\times 832\\), respectively. During inference of Cityscapes [9] and KITTI [12], we predict the next five frames. We predict the next three frames for DAVIS17-Val [43] and next one frame for Video-Test [69], respectively. Note that OPT [63] is an optimization-based approach and uses pretrained RAFT [54] and RIFE [22] models. RIFE [22] and RAFT [54] are trained on the Video-Train dataset [69] and the Flying Chairs dataset [11], respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.64, + 0.892, + 0.715 + ], + "angle": 0, + "content": "Evaluation metrics. Following previous works [63], we use Multi-Scale Structural Similarity Index Measure (MSSSIM) [61] and a perceptual metric LPIPS [73] for quantitative evaluation. To measure the model complexity, we calculate the GFLOPs." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.726, + 0.785, + 0.743 + ], + "angle": 0, + "content": "4.2. Comparison to State-of-the-Arts" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.855 + ], + "angle": 0, + "content": "We compare our DMVFN with state-of-the-art video prediction methods. These methods fall into two categories: the methods requiring only RGB images as input (e.g., PredNet [37], MCNET [58], DVF [35], CorrWise [13], OPT [63]) and the methods requiring extra information as input (e.g., Vid2vid [59], Seg2vid [41], FVS [62], SADM [2])." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Quantitative results. The quantitative results are reported in Table 1 and Table 2. When calculating the GFLOPs of OPT [63], the number of iterations is set as 3,000. In" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6126" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.088, + 0.089, + 0.368, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.222, + 0.237, + 0.235 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.377, + 0.089, + 0.592, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.476, + 0.222, + 0.495, + 0.234 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image", + "bbox": [ + 0.605, + 0.089, + 0.885, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.735, + 0.222, + 0.753, + 0.234 + ], + "angle": 0, + "content": "(c)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.247, + 0.895, + 0.29 + ], + "angle": 0, + "content": "Figure 5. (a): Average usage rate on videos with different motion magnitudes. \"Fast\": tested on Videox-Fast. \"Medium\": tested on Vimeo-Medium. \"Slow\": tested on Videox-Slow. (b): Difference between \"Fast\"/\"Slow\" and \"Medium\" of (a). (c): Averaged usage rate on different time intervals between two input frames from Videox-Slow. \"Int\": time interval." + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.313, + 0.207, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.138, + 0.395, + 0.172, + 0.407 + ], + "angle": 0, + "content": "\\(t - 1\\)" + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.412, + 0.206, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.151, + 0.497, + 0.16, + 0.506 + ], + "angle": 0, + "content": "t" + }, + { + "type": "image", + "bbox": [ + 0.224, + 0.326, + 0.443, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.224, + 0.404, + 0.443, + 0.483 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.28, + 0.487, + 0.412, + 0.5 + ], + "angle": 0, + "content": "\\(t + 1\\) \\(t + 3\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.517, + 0.47, + 0.558 + ], + "angle": 0, + "content": "Figure 6. Visual effect comparison in the Viceo-Test [69] dataset. our DMVFN faithfully reproduces the motion of the hand and the head with less distortion and artifacts." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.57, + 0.47, + 0.736 + ], + "angle": 0, + "content": "terms of MS-SSIM and LPIPS, our DMVFN achieves much better results than the other methods in both short-term and long-term video prediction tasks. The GFLOPs of our DMVFN is considerably smaller than the comparison methods. These results show the proposed routing strategy reduces almost half the number of GFLOPs while maintaining comparable performance. Because the decrease of GFLOPs is not strictly linear with the actual latency [45], we measure the running speed on Titan 2080Ti. For predicting a 720P frame, DVF [35] spends 0.130s on average, while our DMVFN only needs 0.023s on average." + }, + { + "type": "table_caption", + "bbox": [ + 0.106, + 0.741, + 0.439, + 0.755 + ], + "angle": 0, + "content": "Table 3. Comparison between DMVFN and STRPM." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.763, + 0.465, + 0.83 + ], + "angle": 0, + "content": "
MethodUCF SportsHuman3.6M
t+1t+6t+1t+4
PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓
STRPM28.54 / 20.6920.59 / 41.1133.32 / 9.7429.01 / 10.44
DMVFN30.05 / 10.2422.67 / 22.5035.07 / 7.4829.56 / 9.74
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.841, + 0.47, + 0.902 + ], + "angle": 0, + "content": "More comparison. The quantitative results compared with STRPM [8] are reported in Table 3. We train our DMVFN in UCFSports and Human3.6M datasets following the setting in [8]. We also measure the average running speed" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.315, + 0.892, + 0.346 + ], + "angle": 0, + "content": "on TITAN 2080Ti. To predict a \\(1024 \\times 1024\\) frame, our DMVFN is averagely \\(4.06 \\times\\) faster than STRPM [8]." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.347, + 0.892, + 0.438 + ], + "angle": 0, + "content": "Qualitative results on different datasets are shown in Figure 3, Figure 4 and Figure 6. As we can see, the frames predicted by our DMVFN exhibit better temporal continuity and are more consistent with the ground truth than those by the other methods. Our DMVFN is able to predict correct motion while preserving the shape and texture of objects." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.451, + 0.653, + 0.467 + ], + "angle": 0, + "content": "4.3. Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.476, + 0.892, + 0.536 + ], + "angle": 0, + "content": "Here, we perform extensive ablation studies to further study the effectiveness of components in our DMVFN. The experiments are performed on the Cityscapes [9] and KITTI [12] datasets unless otherwise specified." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.538, + 0.893, + 0.749 + ], + "angle": 0, + "content": "1) How effective is the proposed Routing Module? As suggested in [65, 66], we divide the Vimeo-90K [69] test set into three subsets: Vimeo-Fast, Vimeo-Medium, and Vimeo-Slow, which correspond to the motion range. To verify that our DMVFN can perceive motion scales and adaptively choose the proper sub-networks, we retrain our DMVFN on the Vimeo-Train [69] using the same training strategy in §3.3. We calculate the averaged usage rate of each MVFB on three test subsets. From Figures 5 (a) and 5 (b), we observe that our DMVFN prefers to select MVFBs with large scale (e.g., 4x) for two frames with large motion. There are two MVFBs with clearly smaller selection probability. We believe this reflects the inductive bias of our DMVFN on different combinations of scaling factors." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.75, + 0.892, + 0.901 + ], + "angle": 0, + "content": "To further verify that our DMVFN also perceives the size of the time interval, we test our DMVFN on the two frames with different time intervals (but still in the same video). We choose Vimeo-Slow as the test set, and set the time intervals as 1, 3, and 5. The results are shown in Figure 5 \\((c)\\). We observe that our DMVFN prefers large-scale blocks on long-interval inputs, and small-scale blocks on short-interval inputs. This verifies that our DMVFN can perceive temporal information and dynamically select different sub-networks to handle the input frames with different time intervals." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6127" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.471, + 0.212 + ], + "angle": 0, + "content": "To further study how the MVFBs are selected, we select 103 video sequences (contain a high-speed moving car and a relatively static background) from the KITTI dataset, denoted as KITTI-A. As shown in Table 4, on the KITTI-A dataset, our DMVFN prefers to choose MVFBs with large scaling factors to capture large movements. The flow estimation for static backgrounds is straightforward, while the large motion dominates the choice of our DMVFN." + }, + { + "type": "table_caption", + "bbox": [ + 0.078, + 0.216, + 0.468, + 0.23 + ], + "angle": 0, + "content": "Table 4. Average usage rate \\( \\left( {10}^{-2}\\right) \\) of MVFBs in our DMVFN." + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.242, + 0.466, + 0.276 + ], + "angle": 0, + "content": "
Scale444222111
KITTI-A80.9534.2226.7081.1973.9144.9055.340.490
" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.299, + 0.468, + 0.327 + ], + "angle": 0, + "content": "Table 5. Routing Module based on STEBS is effective. The evaluation metric is MS-SSIM \\((\\times 10^{-2})\\)" + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.339, + 0.466, + 0.446 + ], + "angle": 0, + "content": "
SettingCityscapesKITTI
t+1t+3t+5t+1t+3t+5
Copy last frame76.9568.8264.4558.3148.9944.16
w/o routing95.2987.9181.4888.0676.5368.29
Random91.9782.1170.0581.3169.8962.42
Gumbel Softmax95.0587.5779.5487.4275.5665.83
STEBS95.7389.2483.4588.5378.0170.52
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.46, + 0.47, + 0.626 + ], + "angle": 0, + "content": "2) How to design the Routing Module? A trivial solution is to process the routing probability \\( p \\) with Gumbel Softmax. The comparison results of our DMVFNs with different differentiable routing methods are summarized in Table 5. Our DMVFN with STEBS outperforms the DMVFN variant with Gumbel Softmax on MS-SSIM, especially for long-term prediction. The DMVFN variant with Gumbel Softmax usually degenerates to a fixed and static structure. We also compare with the DMVFN randomly selecting each MVFB with probability 0.5 (denoted as \"Random\") and that without routing module (denoted as \"w/o routing\")." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.637, + 0.468, + 0.665 + ], + "angle": 0, + "content": "Table 6. Results of our DMVFN with different scaling factor settings. The evaluation metric is MS-SSIM \\((\\times 10^{-2})\\)" + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.677, + 0.466, + 0.766 + ], + "angle": 0, + "content": "
Setting in DMVFNCityscapesKITTI
t+1t+3t+5t+1t+3t+5
[1]94.7087.2680.9387.6476.7168.76
[2,1]95.3087.9382.0287.9777.2369.58
[4,2,1]95.7389.2483.4588.5378.0170.52
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.78, + 0.468, + 0.901 + ], + "angle": 0, + "content": "3) How to set the scaling factors? We evaluate our DMVFN with different scaling factors. We use three non-increasing factor sequences of “[1, 1, 1, 1, 1, 1, 1, 1, 1]”, “[2, 2, 2, 2, 2, 1, 1, 1, 1]” and “[4, 4, 4, 2, 2, 2, 1, 1, 1]”, denoted as “[1]”, “[2, 1]” and “[4, 2, 1]”, respectively. The results are listed in Table 6. Our DMVFN with “[4, 2, 1]” performs better than that with “[2, 1]” and “[1]”. The gap is more obvious on longer-term future frames." + }, + { + "type": "table_caption", + "bbox": [ + 0.5, + 0.09, + 0.892, + 0.117 + ], + "angle": 0, + "content": "Table 7. Spatial path is effective in our DMVFN. The evaluation metric is MS-SSIM \\((\\times 10^{-2})\\)" + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.129, + 0.889, + 0.228 + ], + "angle": 0, + "content": "
SettingCityscapesKITTI
t+1t+3t+5t+1t+3t+5
w/o r, w/o path94.9987.5980.9887.7576.2267.86
w/o r95.2987.9181.4888.0676.5368.29
w/o path95.5588.8983.0388.2977.5369.86
DMVFN95.7389.2483.4588.5378.0170.52
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.253, + 0.892, + 0.344 + ], + "angle": 0, + "content": "4) How effective is the spatial path? To verify the effectiveness of the spatial path in our DMVFN, we compare it with the DMVFN without spatial path (denoted as \"w/o path\"). The results listed in Table 7 show our DMVFN enjoys better performance with the spatial path, no matter with or without the routing module (denoted as \"w/o r\")." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.358, + 0.618, + 0.373 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.383, + 0.892, + 0.609 + ], + "angle": 0, + "content": "In this work, we developed an efficient Dynamic Multiscale Voxel Flow Network (DMVFN) that excels previous video prediction methods on dealing with complex motions of different scales. With the proposed routing module, our DMVFN adaptively activates different sub-networks based on the input frames, improving the prediction performance while reducing the computation costs. Experiments on diverse benchmark datasets demonstrated that our DMVFN achieves state-of-the-art performance with greatly reduced computation burden. We believe our DMVFN can provide general insights for long-term prediction, video frame synthesis, and representation learning [14, 15]. We hope our DMVFN will inspire further research in light-weight video processing and make video prediction more accessible for downstream tasks such as CODEC for streaming video." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.61, + 0.892, + 0.82 + ], + "angle": 0, + "content": "Our DMVFN can be improved at several aspects. Firstly, iteratively predicting future frames suffers from accumulate errors. This issue may be addressed by further bringing explicit temporal modeling [22, 31, 66, 68] to our DMVFN. Secondly, our DMVFN simply selects the nodes in a chain network topology, which can be improved by exploring more complex topology. For example, our routing module can be extended to automatically determine the scaling factors for parallel branches [33]. Thirdly, forecast uncertainty modeling is more of an extrapolation abiding to past flow information, especially considering bifurcation, which exceeds the current capability of our DMVFN. We believe that research on long-term forecast uncertainty may uncover deeper interplay with dynamic modeling methods [1, 14]." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.82, + 0.892, + 0.896 + ], + "angle": 0, + "content": "Acknowledgements. We sincerely thank Wen Heng for his exploration on neural architecture search at Megvii Research and Tianyuan Zhang for meaningful suggestions. This work is supported in part by the National Natural Science Foundation of China (No. 62002176 and 62176068)." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6128" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.156 + ], + "angle": 0, + "content": "[1] Adil Kaan Akan, Erkut Erdem, Aykut Erdem, and Fatma Güney. Slamp: Stochastic latent appearance and motion prediction. In ICCV, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.157, + 0.472, + 0.2 + ], + "angle": 0, + "content": "[2] Xinzhu Bei, Yanchao Yang, and Stefano Soatto. Learning semantic-aware dynamics for video prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2021. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.2, + 0.471, + 0.255 + ], + "angle": 0, + "content": "[3] Yoshua Bengio, Nicholas Léonard, and Aaron Courville. Estimating or propagating gradients through stochastic neurons for conditional computation. arXiv preprint arXiv:1308.3432, 2013. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.471, + 0.297 + ], + "angle": 0, + "content": "[4] Tolga Bolukbasi, Joseph Wang, Ofer Dekel, and Venkatesh Saligrama. Adaptive neural networks for efficient inference. In Inf. Conf. Mach. Learn., 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.298, + 0.471, + 0.34 + ], + "angle": 0, + "content": "[5] Wonmin Byeon, Qin Wang, Rupesh Kumar Srivastava, and Petros Koumoutsakos. Contextvp: Fully context-aware video prediction. In Eur. Conf. Comput. Vis., 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.34, + 0.471, + 0.382 + ], + "angle": 0, + "content": "[6] Lluis Castrejon, Nicolas Ballas, and Aaron Courville. Improved conditional vrnns for video prediction. In Int. Conf. Comput. Vis., 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.471, + 0.437 + ], + "angle": 0, + "content": "[7] Rohan Chandra, Uttaran Bhattacharya, Aniket Bera, and Dinesh Manocha. Traphic: Trajectory prediction in dense and heterogeneous traffic using weighted interactions. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.438, + 0.471, + 0.494 + ], + "angle": 0, + "content": "[8] Zheng Chang, Xinfeng Zhang, Shanshe Wang, Siwei Ma, and Wen Gao. Strpm: A spatiotemporal residual predictive model for high-resolution video prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.495, + 0.471, + 0.564 + ], + "angle": 0, + "content": "[9] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In IEEE Conf. Comput. Vis. Pattern Recog., 2016. 1, 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.565, + 0.471, + 0.606 + ], + "angle": 0, + "content": "[10] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Int. Conf. Comput. Vis., 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.607, + 0.471, + 0.675 + ], + "angle": 0, + "content": "[11] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In Int. Conf. Comput. Vis., 2015. 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.676, + 0.471, + 0.717 + ], + "angle": 0, + "content": "[12] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. I. J. Robotics Res., 2013. 2, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.718, + 0.471, + 0.773 + ], + "angle": 0, + "content": "[13] Daniel Geng, Max Hamilton, and Andrew Owens. Comparing correspondences: Video prediction with correspondence-wise losses. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.774, + 0.471, + 0.803 + ], + "angle": 0, + "content": "[14] David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.804, + 0.471, + 0.845 + ], + "angle": 0, + "content": "[15] Danijar Hafner, Jurgis Pasukonis, Jimmy Ba, and Timothy Lillicrap. Mastering diverse domains through world models. arXiv preprint arXiv:2301.04104, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.846, + 0.471, + 0.899 + ], + "angle": 0, + "content": "[16] Yizeng Han, Gao Huang, Shiji Song, Le Yang, Honghui Wang, and Yulin Wang. Dynamic neural networks: A survey. In IEEE Trans. Pattern Anal. Mach. Intell., 2021. 2, 4" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.472, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.147 + ], + "angle": 0, + "content": "[17] Yunhui Han, Kunming Luo, Ao Luo, Jiangyu Liu, Haoqiang Fan, Guiming Luo, and Shuaicheng Liu. Realflow: Embased realistic optical flow dataset generation from videos. In ECCV, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.149, + 0.892, + 0.204 + ], + "angle": 0, + "content": "[18] Adam W Harley, Konstantinos G Derpanis, and Iasonas Kokkinos. Segmentation-aware convolutional networks using local attention masks. In Int. Conf. Comput. Vis., 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.205, + 0.892, + 0.234 + ], + "angle": 0, + "content": "[19] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. In Neural Comput., 1997. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.234, + 0.892, + 0.29 + ], + "angle": 0, + "content": "[20] Xiaotao Hu, Jun Xu, Shuhang Gu, Ming-Ming Cheng, and Li Liu. Restore globally, refine locally: A mask-guided scheme to accelerate super-resolution networks. In Eur. Conf. Comput. Vis., 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.892, + 0.347 + ], + "angle": 0, + "content": "[21] Zhaoyang Huang, Xiaoyu Shi, Chao Zhang, Qiang Wang, Ka Chun Cheung, Hongwei Qin, Jifeng Dai, and Hongsheng Li. Flowformer: A transformer architecture for optical flow. In Eur. Conf. Comput. Vis., 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.348, + 0.892, + 0.403 + ], + "angle": 0, + "content": "[22] Zhewei Huang, Tianyuan Zhang, Wen Heng, Boxin Shi, and Shuchang Zhou. Real-time intermediate flow estimation for video frame interpolation. In Eur. Conf. Comput. Vis., 2022, 1, 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.404, + 0.892, + 0.46 + ], + "angle": 0, + "content": "[23] Itay Hubara, Matthieu Courbariaux, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. Quantized neural networks: Training neural networks with low precision weights and activations. In J. Mach. Learn. Res., 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.461, + 0.892, + 0.503 + ], + "angle": 0, + "content": "[24] Ryan Humble, Maying Shen, Jorge Albericio Latorre, Eric Darve, and Jose Alvarez. Soft masking for cost-constrained channel pruning. In Eur. Conf. Comput. Vis., 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.892, + 0.56 + ], + "angle": 0, + "content": "[25] Eddy Ilg, Nikolaus Mayer, Tonmoy Saikia, Margret Keuper, Alexey Dosovitskiy, and Thomas Brox. Flownet 2.0: Evolution of optical flow estimation with deep networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.561, + 0.892, + 0.603 + ], + "angle": 0, + "content": "[26] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. In Adv. Neural Inform. Process. Syst., 2015. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.604, + 0.892, + 0.645 + ], + "angle": 0, + "content": "[27] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. In Int. Conf. Learn. Represent., 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.646, + 0.892, + 0.716 + ], + "angle": 0, + "content": "[28] Huaizu Jiang, Deqing Sun, Varun Jampani, Ming-Hsuan Yang, Erik Learned-Miller, and Jan Kautz. Super slomo: High quality estimation of multiple intermediate frames for video interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.717, + 0.892, + 0.758 + ], + "angle": 0, + "content": "[29] Rico Jonschkowski, Austin Stone, Jonathan T Barron, Ariel Gordon, Kurt Konolige, and Anelia Angelova. What matters in unsupervised optical flow. In ECCV, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.759, + 0.892, + 0.8 + ], + "angle": 0, + "content": "[30] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Int. Conf. Learn. Represent., 2015. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.802, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[31] Lingtong Kong, Boyuan Jiang, Donghao Luo, Wenqing Chu, Xiaoming Huang, Ying Tai, Chengjie Wang, and Jie Yang. Ifrnet: Intermediate feature refine network for efficient frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.872, + 0.892, + 0.902 + ], + "angle": 0, + "content": "[32] Wonkwang Lee, Whie Jung, Han Zhang, Ting Chen, Jing Yu Koh, Thomas Huang, Hyungsuk Yoon, Honglak Lee, and" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "6129" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.092, + 0.47, + 0.134 + ], + "angle": 0, + "content": "Seunghoon Hong. Revisiting hierarchical approach for persistent long-term video prediction. In Int. Conf. Learn. Represent., 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.136, + 0.47, + 0.176 + ], + "angle": 0, + "content": "[33] Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. In Int. Conf. Learn. Represent., 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.47, + 0.22 + ], + "angle": 0, + "content": "[34] Wenqian Liu, Abhishek Sharma, Octavia Camps, and Mario Sznaier. Dyan: A dynamical atoms-based network for video prediction. In Eur. Conf. Comput. Vis., 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.222, + 0.47, + 0.262 + ], + "angle": 0, + "content": "[35] Ziwei Liu, Raymond A Yeh, Xiaou Tang, Yiming Liu, and Aseem Agarwala. Video frame synthesis using deep voxel flow. In Int. Conf. Comput. Vis., 2017. 1, 2, 3, 4, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.264, + 0.47, + 0.303 + ], + "angle": 0, + "content": "[36] Ilya Loshchilov and F. Hutter. Fixing weight decay regularization in adam. arXiv preprint arXiv:1711.05101, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.307, + 0.47, + 0.359 + ], + "angle": 0, + "content": "[37] William Lotter, Gabriel Kreiman, and David Cox. Deep predictive coding networks for video prediction and unsupervised learning. In Int. Conf. Learn. Represent., 2017. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.363, + 0.469, + 0.404 + ], + "angle": 0, + "content": "[38] Kunming Luo, Chuan Wang, Shuaicheng Liu, Haoqiang Fan, Jue Wang, and Jian Sun. Upflow: Upsampling pyramid for unsupervised optical flow learning. In CVPR, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.406, + 0.469, + 0.447 + ], + "angle": 0, + "content": "[39] Julieta Martinez, Michael J Black, and Javier Romero. On human motion prediction using recurrent neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.449, + 0.469, + 0.516 + ], + "angle": 0, + "content": "[40] Sergiu Oprea, Pablo Martinez-Gonzalez, Alberto Garcia-Garcia, John Alejandro Castro-Vargas, Sergio Orts-Escolano, Jose Garcia-Rodriguez, and Antonis Argyros. A review on deep learning techniques for video prediction. In IEEE Trans. Pattern Anal. Mach. Intell., 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.519, + 0.469, + 0.573 + ], + "angle": 0, + "content": "[41] Junting Pan, Chengyu Wang, Xu Jia, Jing Shao, Lu Sheng, Junjie Yan, and Xiaogang Wang. Video generation from single semantic label map. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.576, + 0.469, + 0.616 + ], + "angle": 0, + "content": "[42] Sylvain Paris, Samuel W Hasinoff, and Jan Kautz. Local laplacian filters: edge-aware image processing with a laplacian pyramid. ACM Trans. Graph., 2011. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.618, + 0.469, + 0.672 + ], + "angle": 0, + "content": "[43] Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alex Sorkine-Hornung, and Luc Van Gool. The 2017 davis challenge on video object segmentation. arXiv preprint arXiv:1704.00675, 2017. 2, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.675, + 0.469, + 0.716 + ], + "angle": 0, + "content": "[44] Xiaojuan Qi, Zhengzhe Liu, Qifeng Chen, and Jiaya Jia. 3d motion decomposition for rgbd future dynamic scene synthesis. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.718, + 0.469, + 0.759 + ], + "angle": 0, + "content": "[45] Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, and Piotr Dólár. Designing network design spaces. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.761, + 0.469, + 0.801 + ], + "angle": 0, + "content": "[46] Anurag Ranjan and Michael J Black. Optical flow estimation using a spatial pyramid network. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.804, + 0.469, + 0.844 + ], + "angle": 0, + "content": "[47] Mengye Ren, Andrei Pokrovsky, Bin Yang, and Raquel Urtasun. Sbnet: Sparse blocks network for fast inference. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.846, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[48] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM network: A machine learning approach for precipitation nowcasting. In Adv. Neural Inform. Process. Syst., 2015. 1" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.133 + ], + "angle": 0, + "content": "[49] Hyeonjun Sim, Jihyong Oh, and Munchurl Kim. Xvfi: extreme video frame interpolation. In Int. Conf. Comput. Vis., 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.136, + 0.892, + 0.176 + ], + "angle": 0, + "content": "[50] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. A dataset of 101 human action classes from videos in the wild. *Cent. Res. Comput. Vis.*, 2012. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.179, + 0.892, + 0.232 + ], + "angle": 0, + "content": "[51] Hang Su, Varun Jampani, Deqing Sun, Orazio Gallo, Erik Learned-Miller, and Jan Kautz. Pixel-adaptive convolutional neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.235, + 0.892, + 0.287 + ], + "angle": 0, + "content": "[52] Yu-Chuan Su and Kristen Grauman. Leaving some stones unturned: dynamic feature prioritization for activity detection in streaming video. In *Eur. Conf. Comput. Vis.*, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.291, + 0.892, + 0.345 + ], + "angle": 0, + "content": "[53] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. Pwc-net: Cnns for optical flow using pyramid, warping, and cost volume. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.348, + 0.892, + 0.388 + ], + "angle": 0, + "content": "[54] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Eur. Conf. Comput. Vis., 2020, 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.391, + 0.892, + 0.444 + ], + "angle": 0, + "content": "[55] Surat Teerapittayanon, Bradley McDanel, and Hsiang-Tsung Kung. Branchynet: Fast inference via early exiting from deep neural networks. In Int. Conf. Pattern Recog., 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.447, + 0.892, + 0.487 + ], + "angle": 0, + "content": "[56] Andreas Veit and Serge Belongie. Convolutional networks with adaptive inference graphs. In Eur. Conf. Comput. Vis., 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.49, + 0.892, + 0.531 + ], + "angle": 0, + "content": "[57] Thomas Verelst and Tinne Tuytelaars. Dynamic convolutions: Exploiting spatial sparsity for faster inference. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.533, + 0.892, + 0.587 + ], + "angle": 0, + "content": "[58] Ruben Villegas, Jimei Yang, Seunghoon Hong, Xunyu Lin, and Honglak Lee. Decomposing motion and content for natural video sequence prediction. In Int. Conf. Learn. Represent., 2017. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.59, + 0.892, + 0.643 + ], + "angle": 0, + "content": "[59] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Guilin Liu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. Video-to-video synthesis. In Adv. Neural Inform. Process. Syst., 2018. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.646, + 0.892, + 0.687 + ], + "angle": 0, + "content": "[60] Xin Wang, Fisher Yu, Zi-Yi Dou, Trevor Darrell, and Joseph E Gonzalez. Skipnet: Learning dynamic routing in convolutional networks. In Eur. Conf. Comput. Vis., 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.689, + 0.892, + 0.73 + ], + "angle": 0, + "content": "[61] Zhou Wang, Eero P Simoncelli, and Alan C Bovik. Multiscale structural similarity for image quality assessment. In Asilomar Conf. Signals Syst. Comput., 2003. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.732, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[62] Yue Wu, Rongrong Gao, Jaesik Park, and Qifeng Chen. Future video synthesis with object motion prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 1, 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.774, + 0.892, + 0.814 + ], + "angle": 0, + "content": "[63] Yue Wu, Qiang Wen, and Qifeng Chen. Optimizing video prediction via video frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 1, 3, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[64] Zuxuan Wu, Caiming Xiong, Chih-Yao Ma, Richard Socher, and Larry S Davis. Adaframe: Adaptive frame selection for fast video recognition. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.873, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[65] Xiaoyu Xiang, Yapeng Tian, Yulun Zhang, Yun Fu, Jan P Allebach, and Chenliang Xu. Zooming slow-mo: Fast and" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.516, + 0.956 + ], + "angle": 0, + "content": "6130" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.093, + 0.468, + 0.12 + ], + "angle": 0, + "content": "accurate one-stage space-time video super-resolution. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.469, + 0.177 + ], + "angle": 0, + "content": "[66] Gang Xu, Jun Xu, Zhen Li, Liang Wang, Xing Sun, and Ming-Ming Cheng. Temporal modulation network for controllable space-time video super-resolution. In IEEE Conf. Comput. Vis. Pattern Recog., June 2021. 7, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.232 + ], + "angle": 0, + "content": "[67] Haofei Xu, Jing Zhang, Jianfei Cai, Hamid Rezatofighi, and Dacheng Tao. Gmflow: Learning optical flow via global matching. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.235, + 0.469, + 0.276 + ], + "angle": 0, + "content": "[68] Xiangyu Xu, Li Siyao, Wenxiu Sun, Qian Yin, and Ming-Hsuan Yang. Quadratic video interpolation. In Adv. Neural Inform. Process. Syst., 2019. 2, 3, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.278, + 0.469, + 0.319 + ], + "angle": 0, + "content": "[69] Tianfan Xue, Baian Chen, Jiajun Wu, Donglai Wei, and William T Freeman. Video enhancement with task-oriented flow. In Int. J. Comput. Vis., 2019. 2, 3, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.321, + 0.469, + 0.375 + ], + "angle": 0, + "content": "[70] Serena Yeung, Olga Russakovsky, Greg Mori, and Li Fei-Fei. End-to-end learning of action detection from frame glimpses in videos. In IEEE Conf. Comput. Vis. Pattern Recog., 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.378, + 0.469, + 0.432 + ], + "angle": 0, + "content": "[71] Changqian Yu, Jingbo Wang, Chao Peng, Changxin Gao, Gang Yu, and Nong Sang. Bisenet: Bilateral segmentation network for real-time semantic segmentation. In Eur. Conf. Comput. Vis., 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.434, + 0.469, + 0.501 + ], + "angle": 0, + "content": "[72] Guozhen Zhang, Yuhan Zhu, Haonan Wang, Youxin Chen, Gangshan Wu, and Limin Wang. Extracting motion and appearance via inter-frame attention for efficient video frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.504, + 0.469, + 0.559 + ], + "angle": 0, + "content": "[73] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.562, + 0.469, + 0.615 + ], + "angle": 0, + "content": "[74] Shuchang Zhou, Yuxin Wu, Zekun Ni, Xinyu Zhou, He Wen, and Yuheng Zou. Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients. arXiv preprint arXiv:1606.06160, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.618, + 0.469, + 0.657 + ], + "angle": 0, + "content": "[75] Tinghui Zhou, Shubham Tulsiani, Weilun Sun, Jitendra Malik, and Alexei A Efros. View synthesis by appearance flow. In Eur. Conf. Comput. Vis., 2016. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.661, + 0.469, + 0.702 + ], + "angle": 0, + "content": "[76] Xizhou Zhu, Han Hu, Stephen Lin, and Jifeng Dai. Deformable convnets v2: More deformable, better results. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.704, + 0.469, + 0.758 + ], + "angle": 0, + "content": "[77] Yi Zhu, Karan Sapra, Fitsum A Reda, Kevin J Shih, Shawn Newsam, Andrew Tao, and Bryan Catanzaro. Improving semantic segmentation via video propagation and label relaxation. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 5" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.093, + 0.469, + 0.758 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.946, + 0.514, + 0.956 + ], + "angle": 0, + "content": "6131" + } + ] +] \ No newline at end of file diff --git a/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_origin.pdf b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c9e42d14e3a9bb83f4c1f8bce3476300dc5d6ecf --- /dev/null +++ b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/932e5c1f-279d-4c41-943b-431182e5f76a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93198ad5c347d61d5dee9ef7017dd6ff0cf0eb9d5338f6dc96891c6dc7266563 +size 6312525 diff --git a/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/full.md b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/full.md new file mode 100644 index 0000000000000000000000000000000000000000..99289bab706faa3a7ddd6cfe95b4234a1ee7a2c8 --- /dev/null +++ b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/full.md @@ -0,0 +1,387 @@ +# A Dynamic Multi-Scale Voxel Flow Network for Video Prediction + +Xiaotao Hu $^{1,2}$ Zhwei Huang $^{2}$ Ailin Huang $^{2,3}$ Jun Xu $^{4,*}$ Shuchang Zhou $^{2,*}$ $^{1}$ College of Computer Science, Nankai University $^{2}$ Megvii Technology + $^{3}$ Wuhan University $^{4}$ School of Statistics and Data Science, Nankai University + +{huxiaotao, huangzhewei, huangailin, zhoushuchang}@megvii.com, nankaimathxujun@gmail.com https://huxiaotaostasy.github.io/DMVFN/ + +# Abstract + +The performance of video prediction has been greatly boosted by advanced deep neural networks. However, most of the current methods suffer from large model sizes and require extra inputs, e.g., semantic/depth maps, for promising performance. For efficiency consideration, in this paper, we propose a Dynamic Multi-scale Voxel Flow Network (DMVFN) to achieve better video prediction performance at lower computational costs with only RGB images, than previous methods. The core of our DMVFN is a differentiable routing module that can effectively perceive the motion scales of video frames. Once trained, our DMVFN selects adaptive sub-networks for different inputs at the inference stage. Experiments on several benchmarks demonstrate that our DMVFN is an order of magnitude faster than Deep Voxel Flow [35] and surpasses the state-of-the-art iterative-based OPT [63] on generated image quality. + +# 1. Introduction + +Video prediction aims to predict future video frames from the current ones. The task potentially benefits the study on representation learning [40] and downstream forecasting tasks such as human motion prediction [39], autonomous driving [6], and climate change [48], etc. During the last decade, video prediction has been increasingly studied in both academia and industry community [5, 7]. + +Video prediction is challenging because of the diverse and complex motion patterns in the wild, in which accurate motion estimation plays a crucial role [35, 37, 58]. Early methods [37, 58] along this direction mainly utilize recurrent neural networks [19] to capture temporal motion information for video prediction. To achieve robust long-term prediction, the works of [41, 59, 62] additionally exploit the semantic or instance segmentation maps of video frames for semantically coherent motion estimation in complex scenes. + +![](images/6dc7efb43097abff1d9318fcdb362bba0ffca038bd707eec0cfd1dc410e60dfb.jpg) +Figure 1. Average MS-SSIM and GFLOPs of different video prediction methods on Cityscapes [9]. The parameter amounts are provided in brackets. DMVFN outperforms previous methods in terms of image quality, parameter amount, and GFLOPs. + +However, the semantic or instance maps may not always be available in practical scenarios, which limits the application scope of these video prediction methods [41,59,62]. To improve the prediction capability while avoiding extra inputs, the method of OPT [63] utilizes only RGB images to estimate the optical flow of video motions in an optimization manner with impressive performance. However, its inference speed is largely bogged down mainly by the computational costs of pre-trained optical flow model [54] and frame interpolation model [22] used in the iterative generation. + +The motions of different objects between two adjacent frames are usually of different scales. This is especially evident in high-resolution videos with meticulous details [49]. The spatial resolution is also of huge differences in real-world video prediction applications. To this end, it is essential yet challenging to develop a single model for multiscale motion estimation. An early attempt is to extract + +multi-scale motion cues in different receptive fields by employing the encoder-decoder architecture [35], but in practice it is not flexible enough to deal with complex motions. + +In this paper, we propose a Dynamic Multi-scale Voxel Flow Network (DMVFN) to explicitly model the complex motion cues of diverse scales between adjacent video frames by dynamic optical flow estimation. Our DMVFN is consisted of several Multi-scale Voxel Flow Blocks (MVFBs), which are stacked in a sequential manner. On top of MVFBs, a light-weight Routing Module is proposed to adaptively generate a routing vector according to the input frames, and to dynamically select a subnetwork for efficient future frame prediction. We conduct experiments on four benchmark datasets, including Cityscapes [9], KITTI [12], DAVIS17 [43], and ViceoTest [69], to demonstrate the comprehensive advantages of our DMVFN over representative video prediction methods in terms of visual quality, parameter amount, and computational efficiency measured by floating point operations (FLOPs). A glimpse of comparison results by different methods is provided in Figure 1. One can see that our DMVFN achieves much better performance in terms of accuracy and efficiency on the Cityscapes [9] dataset. Extensive ablation studies validate the effectiveness of the components in our DMVFN for video prediction. + +In summary, our contributions are mainly three-fold: + +- We design a light-weight DMVFN to accurately predict future frames with only RGB frames as inputs. Our DMVFN is consisted of new MVFB blocks that can model different motion scales in real-world videos. +- We propose an effective Routing Module to dynamically select a suitable sub-network according to the input frames. The proposed Routing Module is end-to-end trained along with our main network DMVFN. +- Experiments on four benchmarks show that our DMVFN achieves state-of-the-art results while being an order of magnitude faster than previous methods. + +# 2. Related Work + +# 2.1. Video Prediction + +Early video prediction methods [35, 37, 58] only utilize RGB frames as inputs. For example, PredNet [37] learns an unsupervised neural network, with each layer making local predictions and forwarding deviations from those predictions to subsequent network layers. MCNet [58] decomposes the input frames into motion and content components, which are processed by two separate encoders. DVF [35] is a fully-convolutional encoder-decoder network synthesizing intermediate and future frames by approximating voxel flow for motion estimation. Later, extra information is exploited by video prediction methods in pursuit of better + +performance. For example, the methods of Vid2vid [59], Seg2vid [41], HVP [32], and SADM [2] require additional semantic maps or human pose information for better video prediction results. Additionally, Qi et al. [44] used extra depth maps and semantic maps to explicitly inference scene dynamics in 3D space. FVS [62] separates the inputs into foreground objects and background areas by semantic and instance maps, and uses a spatial transformer to predict the motion of foreground objects. In this paper, we develop a light-weight and efficient video prediction network that requires only sRGB images as the inputs. + +# 2.2. Optical Flow + +Optical flow estimation aims to predict the per-pixel motion between adjacent frames. Deep learning-based optical flow methods [17,29,38,53,54] have been considerably advanced ever since Flownet [11], a pioneering work to learn optical flow network from synthetic data. Flownet2.0 [25] improves the accuracy of optical flow estimation by stacking sub-networks for iterative refinement. A coarse-to-fine spatial pyramid network is employed in SPynet [46] to estimate optical flow at multiple scales. PWC-Net [53] employs feature warping operation at different resolutions and uses a cost volume layer to refine the estimated flow at each resolution. RAFT [54] is a lightweight recurrent network sharing weights during the iterative learning process. FlowFormer [21] utilizes an encoder to output latent tokens and a recurrent decoder to decode features, while refining the estimated flow iteratively. In video synthesis, optical flow for downstream tasks [22, 35, 68, 69, 72] is also a hot research topic. Based on these approaches, we aim to design a flow estimation network that can adaptively operate based on each sample for the video prediction task. + +# 2.3. Dynamic Network + +The design of dynamic networks is mainly divided into three categories: spatial-wise, temporal-wise, and sample-wise [16]. Spatial-wise dynamic networks perform adaptive operations in different spatial regions to reduce computational redundancy with comparable performance [20, 47, 57]. In addition to the spatial dimension, dynamic processing can also be applied in the temporal dimension. Temporal-wise dynamic networks [52, 64, 70] improve the inference efficiency by performing less or no computation on unimportant sequence frames. To handle the input in a data-driven manner, sample-wise dynamic networks adaptively adjust network structures to side-off the extra computation [56, 60], or adaptively change the network parameters to improve the performance [10, 18, 51, 76]. Designing and training a dynamic network is not trivial since it is difficult to directly enable a model with complex topology connections. We need to design a well-structured and robust model before considering its dynamic mechanism. In this paper, + +![](images/0fc09876a02da94c26a51e557970913b2d8d61d9dc263948db00d9b38c7d8f75.jpg) +(a) Voxel Flow-based Image Fusion + +![](images/b76c9cc72387c1175de1d856c59ddbef47d4c19965d05183878bb6d6577eded8.jpg) +(b) DMVFN + +![](images/229ec29b64f11faf740246e7d45b364695f0da07d4b24ac609c9d69d8b11b42c.jpg) +(c) MVFB +(d) Routing Module +Figure 2. Overview of the proposed Dynamic Multi-scale Voxel Flow Network (DMVFN). $(a)$ : To predict a future frame, we use the voxel flow [35] to guide the pixel fusion of the input frames. The voxel flow contains the prediction of object motion and occlusion. $(b)$ : DMVFN contains several MVFBs with decreasing scaling factor $S^i$ . According to the routing vector $v$ estimated by a Routing Module, a sub-network is selected to process the input image. $(c)$ : Each MVFB has a scaling factor $S^i$ , which means that the motion path is performed on images whose sizes are $1 / S^i$ of the original. $(d)$ : Two consecutive frames are fed into several neural layers and a Differentiable Bernoulli sample to generate the hard routing vector. + +we propose a module to dynamically perceive the motion magnitude of input frames to select the network structure. + +# 3. Methodology + +# 3.1. Background + +Video prediction. Given a sequence of past $t$ frames $\{I_i\in \mathbb{R}^{h\times w\times 3}|i = 1,\dots ,t\}$ , video prediction aims to predict the future frames $\{\tilde{I}_{t + 1},\tilde{I}_{t + 2},\tilde{I}_{t + 3},\ldots \}$ . The inputs of our video prediction model are only the two consecutive frames $I_{t - 1}$ and $I_{t}$ . We concentrate on predicting $\tilde{I}_{t + 1}$ , and iteratively predict future frames $\{\tilde{I}_{t + 2},\tilde{I}_{t + 3},\ldots \}$ in a similar manner. Denote the video prediction model as $G_{\theta}(I_{t - 1},I_t)$ , where $\theta$ is the set of model parameters to be learned, the learning objective is to minimize the difference between $\tilde{I}_{t + 1} = G_{\theta}(I_{t - 1},I_t)$ and the "ground truth" $I_{t + 1}$ . + +Voxel flow. Considering the local consistency in space-time, the pixels of a generated future frame come from nearby regions of the previous frames [69, 75]. In video prediction task, researchers estimate optical flow $\mathbf{f}_{t + 1\rightarrow t}$ from $I_{t + 1}$ to $I_{t}$ [35]. And the corresponding frame is obtained using the pixel-wise backward warping [26] (denoted as $\overleftarrow{\mathcal{W}}$ ). In addition, to deal with the occlusion, some methods [28, 35] further introduce a fusion map $\mathbf{m}$ to fuse the pixels of $I_{t}$ and $I_{t - 1}$ . The final predicted frame is obtained + +by the following formulation (Figure 2 $(a)$ ): + +$$ +\hat {I} _ {t + 1 \leftarrow t - 1} = \overleftarrow {\mathcal {W}} \left(I _ {t - 1}, \mathbf {f} _ {t + 1 \rightarrow t - 1}\right), \tag {1} +$$ + +$$ +\hat {I} _ {t + 1 \leftarrow t} = \overleftarrow {\mathcal {W}} \left(I _ {t}, \mathbf {f} _ {t + 1 \rightarrow t}\right), \tag {2} +$$ + +$$ +\tilde {I} _ {t + 1} = \hat {I} _ {t + 1 \leftarrow t - 1} \times \mathbf {m} + \hat {I} _ {t + 1 \leftarrow t} \times (1 - \mathbf {m}). \tag {3} +$$ + +Here, $\hat{I}_{t+1 \leftarrow t}$ and $\hat{I}_{t+1 \leftarrow t-1}$ are intermediate warped images. To simplify notations, we refer to the optical flows $\mathbf{f}_{t+1 \rightarrow t}, \mathbf{f}_{t+1 \rightarrow t-1}$ and the fusion map $\mathbf{m}$ collectively as the voxel flow $\mathbf{F}_{t+1}$ , similar to the notations in [35]. The above equations can be simplified to the following form: + +$$ +\tilde {I} _ {t + 1} = \overleftarrow {\mathcal {W}} \left(I _ {t - 1}, I _ {t}, \mathbf {F} _ {t + 1}\right). \tag {4} +$$ + +# 3.2. Dynamic Multi-Scale Voxel Flow Network + +MVFB. To estimate the voxel flow, DVF [35] assumes that all optical flows are locally linear and temporally symmetric around the targeted time, which may be unreasonable for large-scale motions. To address the object position changing issue [22] in adjacent frames, OPT [63] uses flow reversal layer [68] to convert forward flows to backward flows. We aim to estimate voxel flow end-to-end without introducing new components and unreasonable constraints. + +![](images/f59e3d5d7093a275e2f4bd42f9e09d29bac48e89b75633ed29c7b57648462a68.jpg) +GT + +![](images/e291b529e57bb3fff07bd03becf4ca8a342f76b0b5905f22557f989afdaff33a.jpg) +DVF + +![](images/4020023bc6d8f37032c16b5d81dc1b822d6370c94242fb14dab60404db8bd024.jpg) +DYAN +Figure 3. Visual comparison of $(t + 1)$ -th frame predicted from $t$ -th and $(t - 1)$ -th frames on the DAVIS17-Val [43]. + +![](images/29b9326dbf78209ac643263e212efe256ed0690376355ba49318fa9c7e8a56b4.jpg) +OPT + +![](images/f7c76414e3fb2116b4609b85664a59171dd8f2b3a26b2d6a949bf26d73b8bb0f.jpg) +DMVFN + +![](images/4845f3b7a0c8ac6b91a14e5ee6a7c085035ee994ef026c62bffa882475cc7745.jpg) +GT + +We denote the $i$ -th MVFB as $f_{MVFB}^{i}(\cdot)$ . It learns to approximate target voxel flow $\mathbf{F}_{t+1}^{i}$ by taking two frames $I_{t-1}$ and $I_{t}$ , the synthesized frame $\tilde{I}_{t+1}^{i-1}$ , and the voxel flow estimated by previous blocks $\mathbf{F}_{t+1}^{i-1}$ as inputs. + +The architecture of our MVFB is shown in Figure 2 $(c)$ . To capture the large motion while retaining the original spatial information, we construct a two-branch network structure [71]. This design inherits from pyramidal optical flow estimation [46, 53]. In the motion path, the input is downsampled by a scaling factor $S^i$ to facilitate the expansion of the receptive field. Another spatial path operates at high resolution to complement the spatial information. We denote $\tilde{I}_{t + 1}^{i}$ as the output of the $i$ -th MVFB. Formally, + +$$ +\tilde {I} _ {t + 1} ^ {i}, \mathbf {F} _ {t + 1} ^ {i} = f _ {\mathrm {M V F B}} ^ {i} \left(I _ {t - 1}, I _ {t}, \tilde {I} _ {t + 1} ^ {i - 1}, \mathbf {F} _ {t + 1} ^ {i - 1}, S ^ {i}\right). \tag {5} +$$ + +The initial values of $\tilde{I}_{t+1}^{0}$ and $\mathbf{F}_{t+1}^{0}$ are set to zero. As illustrated in Figure 2 (b), our DMVFN contains 9 MVFBs. To generate a future frame, we iteratively refine a voxel flow [35] and fuse the pixels of the input frames. + +Many optical flow estimation methods predict the flow field on a small image, and then refine it on a large image [53, 67]. For simplicity and intuition, we consider decreasing scaling factor sequences. Finally, the scaling factors is experimentally set as $[4, 4, 4, 2, 2, 2, 1, 1, 1]$ . + +DMVFN. Different pairs of adjacent frames have diverse motion scales and different computational demands. An intuitive idea is to adaptively select dynamic architectures conditioned on each input. We then perform dynamic routing within the super network (the whole architecture) [16], including multiple possible paths. DMVFN saves redundant computation for samples with small-scale motion and preserves the representation ability for large-scale motion. + +To make our DMVFN end-to-end trainable, we design a differentiable Routing Module containing a tiny neural network to estimate routing vector $v$ for each input sample. Based on this vector, our DMVFN dynamically selects a + +sub-network to process the input data. As the Figure 2 $(b)$ shows, some blocks are skipped during inference. + +Different from some dynamic network methods that can only continuously select the first several blocks ( $n$ options) [4, 55], DMVFN is able to choose paths freely ( $2^n$ options). DMVFN trains different sub-networks in the super network with various possible inference paths and uses dynamic routing inside the super network during inference to reduce redundant computation while maintaining the performance. A dynamic routing vector $v \in \{0,1\}^n$ is predicted by the proposed Routing Module. For the $i$ -th MVFN block of DMVFN, we denote $v_i$ as the reference of whether processing the reached voxel flow $\mathbf{F}_{t+1}^{i-1}$ and the reached predicted frame $\tilde{I}_{t+1}^{i-1}$ . The path $f_{\mathrm{MVFB}}^i$ to the $i$ -th block from the last block will be activated only when $v_i = 1$ . Formally, + +$$ +\tilde {I} _ {t + 1} ^ {i}, \mathbf {F} _ {t + 1} ^ {i} = \left\{ \begin{array}{l l} f _ {\mathrm {M V F B}} ^ {i} \left(\tilde {I} _ {t + 1} ^ {i - 1}, \mathbf {F} _ {t + 1} ^ {i - 1}\right), & v _ {i} = 1 \\ \tilde {I} _ {t + 1} ^ {i - 1}, \mathbf {F} _ {t + 1} ^ {i - 1}, & v _ {i} = 0. \end{array} \right. \tag {6} +$$ + +During the training phase, to enable the backpropagation of Eqn. (6), we use $v_{i}$ and $(1 - v_{i})$ as the weights of the two branches and average their outputs. + +In the iterative scheme of our DMVFN, each MVFB essentially refines the current voxel flow estimation to a new one. This special property allows our DMVFN to skip some MVFBs for every pair of input frames. Here, we design a differentiable and efficient routing module for learning to trade-off each MVFB block. This is achieved by predicting a routing vector $v \in \{0,1\}^n$ to identify the proper sub-network (e.g., 0 for deactivated MVFBs, 1 for activated MVFBs). We implement the routing module by a small neural network ( $\sim 1/6$ GFLOPs of the super network), and show its architecture in Figure 2 (d). It learns to predict the probability $\tilde{v}$ of choosing MVFBs by: + +$$ +\tilde {v} = \operatorname {L i n e a r} (\operatorname {A v g P o o l i n g} (\operatorname {C o n v s} (I _ {t - 1}, I _ {t}))), \tag {7} +$$ + +$$ +v = \text {B e r n o u l l i - S a m p l i n g} (\tilde {v}). \tag {8} +$$ + +Table 1. Quantitative results of different methods on the Cityscapes [9], and KITTI [12] datasets. "RGB", "F", "S" and "T" denote the video frames, optical flow, semantic map, and instance map, respectively. We denote our DMVFN without routing module as "DMVFN (w/o r)". FVS [62] integrates a segmentation model [77] on KITTI [12] to obtain the semantic maps. "N/A" means not available. + +
MethodInputsCityscapes-Train→Cityscapes-Test [9]KITTI-Train→KITTI-Test [12]
GFLOPsMS-SSIM (×10-2) ↑ t+1t+3t+5LPIPS (×10-2) ↓ t+1t+3t+5GFLOPsMS-SSIM (×10-2) ↑ t+1t+3t+5LPIPS (×10-2) ↓ t+1t+3t+5
Vid2vid [59]RGB+S603.7988.1680.5575.1310.5815.9220.14N/AN/AN/AN/AN/AN/AN/AN/A
Seg2vid [41]RGB+S455.8488.32N/A61.639.69N/A25.99N/AN/AN/AN/AN/AN/AN/AN/A
FVS [62]RGB+S+I1891.6589.1081.1375.688.5012.9816.50768.9679.2867.6560.7718.4824.6130.49
SADM [2]RGB+S+FN/A95.99N/A83.517.67N/A14.93N/A83.0672.4464.7214.4124.5831.16
PredNet [37]RGB62.6284.0379.2575.2125.9929.9936.0325.4456.2651.4747.5655.3558.6662.95
MCNET [58]RGB502.8089.6978.0770.5818.8831.3437.34204.2675.3563.5255.4824.0531.7137.39
DVF [35]RGB409.7883.8576.2371.1117.3724.0528.79166.4753.9346.9942.6232.4737.4341.59
CorrWise [13]RGB944.2992.80N/A83.908.50N/A15.00383.6282.00N/A66.7017.20N/A25.90
OPT [63]RGB313482.1594.5486.8980.406.4612.5017.83127431.7182.7169.5061.0912.3420.2926.35
DMVFN (w/o r)RGB24.5195.2987.9181.485.6010.4814.919.9688.0676.5368.2910.7019.2826.13
DMVFNRGB12.7195.7389.2483.455.5810.4714.825.1588.5378.0170.5210.7419.2726.05
+ +Differentiable Routing. To train the proposed Routing Module, we need to constrain the probability values to prevent the model from falling into trivial solutions (e.g., select all blocks). On the other hand, we allow this module to participate in the gradient calculation to achieve end-to-end training. We introduce the Gumbel Softmax [27] and the Straight-Through Estimator (STE) [3] to tackle this issue. + +One popular method to make the routing probability $\tilde{v}$ learnable is the Gumbel Softmax technique [24, 27]. By treating the selection of each MVFB as a binary classification task, the soft dynamic routing vector $v\in \mathbb{R}^n$ is + +$$ +v _ {i} = \frac {\exp \left(\frac {1}{\tau} \left(\tilde {v} _ {i} + G _ {i}\right)\right)}{\exp \left(\frac {1}{\tau} \left(\tilde {v} _ {i} + G _ {i}\right)\right) + \exp \left(\frac {1}{\tau} \left(2 - \tilde {v} _ {i} - G _ {i}\right)\right)}, \tag {9} +$$ + +where $i = 1, \dots, n$ , $G_{i} \in \mathbb{R}$ is Gumbel noise following the Gumbel(0,1) distribution, and $\tau$ is a temperature parameter. We start at a very high temperature to ensure that all possible paths become candidates, and then the temperature is attenuated to a small value to approximate one-hot distribution. To encourage the sum of the routing vectors $\{v_{i}\}_{i=1}^{n}$ to be small, we add the regularization term $\left(\frac{1}{n} \sum_{i=1}^{n} v_{i}\right)$ to the final loss function. However, we experimentally find that our DMVFN usually converges to an input-independent structure when temperature decreases. We conjecture that the control of the temperature parameter $\tau$ and the design of the regularization term require further study. + +Inspired by previous research on low-bit width neural networks [23, 74], we adopt STE for Bernoulli Sampling (STEBS) to make the binary dynamic routing vector differentiable. An STE can be regarded as an operator that has arbitrary forward and backward operations. Formally, + +$$ +\tilde {w} _ {i} = \min (\beta \times n \times \sigma (\tilde {v} _ {i}) / \sum_ {i} ^ {n} \sigma (\tilde {v} _ {i}), 1), \tag {10} +$$ + +STE Forward: $v_{i}\sim$ Bernoulli $(\tilde{w}_i)$ (11) + +$$ +\text {S T E B a c k w a r d}: \frac {\partial o}{\partial \tilde {w}} = \frac {\partial o}{\partial v}, \tag {12} +$$ + +where $\sigma$ is the Sigmoid function and we denote the objective function as $o$ . We use the well-defined gradient $\frac{\partial o}{\partial v}$ as an approximation for $\frac{\partial o}{\partial \tilde{w}}$ to construct the backward pass. In Eqn. (10), we normalize the sample rate. During training, $\beta$ is fixed at 0.5. We can adjust the hyper-parameter $\beta$ to control the complexity in the inference phase. + +# 3.3. Implementation Details + +Loss function. Our training loss $L_{total}$ is the sum of the reconstruction losses of outputs of each block $I_{t + 1}^{i}$ : + +$$ +L _ {t o t a l} = \sum_ {i = 1} ^ {n} \gamma^ {n - i} d \left(\tilde {I} _ {t + 1} ^ {i}, I _ {t + 1}\right), \tag {13} +$$ + +where $d$ is the $\ell_1$ loss calculated on the Laplacian pyramid representations [42] extracted from each pair of images. And we set $\gamma = 0.8$ in our experiments following [54]. + +Training strategy. Our DMVFN is trained on $224 \times 224$ image patches. The batch size is set as 64. We employ the AdamW optimizer [30, 36] with a weight decay of $10^{-4}$ . We use a cosine annealing strategy to reduce the learning rate from $10^{-4}$ to $10^{-5}$ . Our model is trained on four 2080Ti GPUs for 300 epochs, which takes about 35 hours. + +# 4. Experiments + +# 4.1. Dataset and Metric + +Dataset. We use several datasets in the experiments: + +Cityscapes dataset [9] contains 3,475 driving videos with resolution of $2048 \times 1024$ . We use 2,945 videos for training (Cityscapes-Train) and 500 videos in Cityscapes dataset [9] for testing (Cityscapes-Test). + +Table 2. Quantitative results on the DAVIS17-Val [43] and Vimeo90K-Test [69] benchmarks. We denote DMVFN without routing as "DMVFN (w/o r)". "N/A" means not available. + +
MethodUCF101-Train→DAVIS17-ValUCF101-Train→Vimeo90K-Test
GFLOPs ↓MS-SSIM (×10-2) ↑ t+1t+3LPIPS (×10-2) ↓ t+1t+3GFLOPs ↓MS-SSIM (×10-2) ↑ t+1LPIPS (×10-2) ↓ t+1
DVF [35]324.1568.6155.4723.2334.2289.6492.117.73
DYAN [34]130.1278.9670.4113.0921.43N/AN/AN/A
OPT [63]165312.8083.2673.8511.4018.2145716.2096.753.59
DMVFN (w/o r)19.3984.8175.059.4116.245.3697.243.30
DMVFN9.9683.9774.819.9617.282.7797.013.69
+ +![](images/56c72b869fbd430a17843e2c62d63c580540d90393ef8845f5d27046fa2f5bb3.jpg) +Figure 4. Prediction comparison on KITTI. The yellow line is aligned with the car in the ground truth. The results show that previous methods (DVF [35], FVS [62], and OPT [63]) cannot accurately predict the car's location in the long-term prediction. The motion predicted by our DMVFN is the most similar to the ground truth, while the errors of other methods grow larger with time. The fences predicted by DMVFN remain vertical when moving. + +KITTI dataset [12] contains 28 driving videos with resolution of $375 \times 1242$ . 24 videos in KITTI dataset are used for training (KITTI-Train) and the remaining four videos in KITTI dataset are used for testing (KITTI-Test). + +UCF101 [50] dataset contains 13,320 videos under 101 different action categories with resolution of $240\times 320$ . We only use the training subset of UCF101 [50]. + +Vimeo90K [69] dataset has 51,312 triplets for training, where each triplet contains three consecutive video frames with resolution of $256 \times 448$ . There are 3,782 triplets in the Vimeo90K testing set. We denote the training and testing subsets as Video-Train and Video-Test, respectively. + +DAVIS17 [43] has videos with resolution around $854 \times 480$ . We use the DAVIS17-Val containing 30 videos as test set. Configurations. We have four experimental configurations following previous works [34, 35, 63]: + +- Cityscapes-Train $\rightarrow$ Cityscapes-Test +KITTI-Train $\rightarrow$ KITTI-Test +UCF101→DAVIS17-Val +UCF101 $\rightarrow$ Vimeo-Test + +Here, the left and right sides of the arrow represent the training set and the test set, respectively. For a fair comparison with other methods that are not tailored for high resolution videos, we follow the setting in [62] and resize the images in Cityscapes [9] to $1024 \times 512$ and images in KITTI [12] to $256 \times 832$ , respectively. During inference of Cityscapes [9] and KITTI [12], we predict the next five frames. We predict the next three frames for DAVIS17-Val [43] and next one frame for Video-Test [69], respectively. Note that OPT [63] is an optimization-based approach and uses pretrained RAFT [54] and RIFE [22] models. RIFE [22] and RAFT [54] are trained on the Video-Train dataset [69] and the Flying Chairs dataset [11], respectively. + +Evaluation metrics. Following previous works [63], we use Multi-Scale Structural Similarity Index Measure (MSSSIM) [61] and a perceptual metric LPIPS [73] for quantitative evaluation. To measure the model complexity, we calculate the GFLOPs. + +# 4.2. Comparison to State-of-the-Arts + +We compare our DMVFN with state-of-the-art video prediction methods. These methods fall into two categories: the methods requiring only RGB images as input (e.g., PredNet [37], MCNET [58], DVF [35], CorrWise [13], OPT [63]) and the methods requiring extra information as input (e.g., Vid2vid [59], Seg2vid [41], FVS [62], SADM [2]). + +Quantitative results. The quantitative results are reported in Table 1 and Table 2. When calculating the GFLOPs of OPT [63], the number of iterations is set as 3,000. In + +![](images/3f0733e866e576f4d0e58151b4572ae6af34dbdc8fb0b18269c865f2665ccaa7.jpg) +(a) + +![](images/39725d0d7e1000ea5e1de7127b2b1616a228e7669411d738c8dd911c01cbc418.jpg) +(b) + +![](images/08080ea454f757a59be66a65cdc509558e3ac92d93123950f63248238175048b.jpg) +(c) + +![](images/4c8a4c27bf3832b14061b616cb0ab93ca2ae743d6b999ccf8921122aa33b3b90.jpg) +Figure 5. (a): Average usage rate on videos with different motion magnitudes. "Fast": tested on Videox-Fast. "Medium": tested on Vimeo-Medium. "Slow": tested on Videox-Slow. (b): Difference between "Fast"/"Slow" and "Medium" of (a). (c): Averaged usage rate on different time intervals between two input frames from Videox-Slow. "Int": time interval. +$t - 1$ + +![](images/3d4ed97f0fc99b344c776ee05da6abd26ec989a8e76a6701f109f015efc2fb44.jpg) +t +Figure 6. Visual effect comparison in the Viceo-Test [69] dataset. our DMVFN faithfully reproduces the motion of the hand and the head with less distortion and artifacts. + +![](images/3a68218ddcd92346e21efec19d1a79797112fb3802e836176981175cf5eb873d.jpg) + +![](images/737a7e019ba2a9232a98e72cc2e09a2628da9a7eaa613f08940afc7d6fb84fa1.jpg) +$t + 1$ $t + 3$ + +terms of MS-SSIM and LPIPS, our DMVFN achieves much better results than the other methods in both short-term and long-term video prediction tasks. The GFLOPs of our DMVFN is considerably smaller than the comparison methods. These results show the proposed routing strategy reduces almost half the number of GFLOPs while maintaining comparable performance. Because the decrease of GFLOPs is not strictly linear with the actual latency [45], we measure the running speed on Titan 2080Ti. For predicting a 720P frame, DVF [35] spends 0.130s on average, while our DMVFN only needs 0.023s on average. + +Table 3. Comparison between DMVFN and STRPM. + +
MethodUCF SportsHuman3.6M
t+1t+6t+1t+4
PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓
STRPM28.54 / 20.6920.59 / 41.1133.32 / 9.7429.01 / 10.44
DMVFN30.05 / 10.2422.67 / 22.5035.07 / 7.4829.56 / 9.74
+ +More comparison. The quantitative results compared with STRPM [8] are reported in Table 3. We train our DMVFN in UCFSports and Human3.6M datasets following the setting in [8]. We also measure the average running speed + +on TITAN 2080Ti. To predict a $1024 \times 1024$ frame, our DMVFN is averagely $4.06 \times$ faster than STRPM [8]. + +Qualitative results on different datasets are shown in Figure 3, Figure 4 and Figure 6. As we can see, the frames predicted by our DMVFN exhibit better temporal continuity and are more consistent with the ground truth than those by the other methods. Our DMVFN is able to predict correct motion while preserving the shape and texture of objects. + +# 4.3. Ablation Study + +Here, we perform extensive ablation studies to further study the effectiveness of components in our DMVFN. The experiments are performed on the Cityscapes [9] and KITTI [12] datasets unless otherwise specified. + +1) How effective is the proposed Routing Module? As suggested in [65, 66], we divide the Vimeo-90K [69] test set into three subsets: Vimeo-Fast, Vimeo-Medium, and Vimeo-Slow, which correspond to the motion range. To verify that our DMVFN can perceive motion scales and adaptively choose the proper sub-networks, we retrain our DMVFN on the Vimeo-Train [69] using the same training strategy in §3.3. We calculate the averaged usage rate of each MVFB on three test subsets. From Figures 5 (a) and 5 (b), we observe that our DMVFN prefers to select MVFBs with large scale (e.g., 4x) for two frames with large motion. There are two MVFBs with clearly smaller selection probability. We believe this reflects the inductive bias of our DMVFN on different combinations of scaling factors. + +To further verify that our DMVFN also perceives the size of the time interval, we test our DMVFN on the two frames with different time intervals (but still in the same video). We choose Vimeo-Slow as the test set, and set the time intervals as 1, 3, and 5. The results are shown in Figure 5 $(c)$ . We observe that our DMVFN prefers large-scale blocks on long-interval inputs, and small-scale blocks on short-interval inputs. This verifies that our DMVFN can perceive temporal information and dynamically select different sub-networks to handle the input frames with different time intervals. + +To further study how the MVFBs are selected, we select 103 video sequences (contain a high-speed moving car and a relatively static background) from the KITTI dataset, denoted as KITTI-A. As shown in Table 4, on the KITTI-A dataset, our DMVFN prefers to choose MVFBs with large scaling factors to capture large movements. The flow estimation for static backgrounds is straightforward, while the large motion dominates the choice of our DMVFN. + +Table 4. Average usage rate $\left( {10}^{-2}\right)$ of MVFBs in our DMVFN. + +
Scale444222111
KITTI-A80.9534.2226.7081.1973.9144.9055.340.490
+ +Table 5. Routing Module based on STEBS is effective. The evaluation metric is MS-SSIM $(\times 10^{-2})$ + +
SettingCityscapesKITTI
t+1t+3t+5t+1t+3t+5
Copy last frame76.9568.8264.4558.3148.9944.16
w/o routing95.2987.9181.4888.0676.5368.29
Random91.9782.1170.0581.3169.8962.42
Gumbel Softmax95.0587.5779.5487.4275.5665.83
STEBS95.7389.2483.4588.5378.0170.52
+ +2) How to design the Routing Module? A trivial solution is to process the routing probability $p$ with Gumbel Softmax. The comparison results of our DMVFNs with different differentiable routing methods are summarized in Table 5. Our DMVFN with STEBS outperforms the DMVFN variant with Gumbel Softmax on MS-SSIM, especially for long-term prediction. The DMVFN variant with Gumbel Softmax usually degenerates to a fixed and static structure. We also compare with the DMVFN randomly selecting each MVFB with probability 0.5 (denoted as "Random") and that without routing module (denoted as "w/o routing"). + +Table 6. Results of our DMVFN with different scaling factor settings. The evaluation metric is MS-SSIM $(\times 10^{-2})$ + +
Setting in DMVFNCityscapesKITTI
t+1t+3t+5t+1t+3t+5
[1]94.7087.2680.9387.6476.7168.76
[2,1]95.3087.9382.0287.9777.2369.58
[4,2,1]95.7389.2483.4588.5378.0170.52
+ +3) How to set the scaling factors? We evaluate our DMVFN with different scaling factors. We use three non-increasing factor sequences of “[1, 1, 1, 1, 1, 1, 1, 1, 1]”, “[2, 2, 2, 2, 2, 1, 1, 1, 1]” and “[4, 4, 4, 2, 2, 2, 1, 1, 1]”, denoted as “[1]”, “[2, 1]” and “[4, 2, 1]”, respectively. The results are listed in Table 6. Our DMVFN with “[4, 2, 1]” performs better than that with “[2, 1]” and “[1]”. The gap is more obvious on longer-term future frames. + +Table 7. Spatial path is effective in our DMVFN. The evaluation metric is MS-SSIM $(\times 10^{-2})$ + +
SettingCityscapesKITTI
t+1t+3t+5t+1t+3t+5
w/o r, w/o path94.9987.5980.9887.7576.2267.86
w/o r95.2987.9181.4888.0676.5368.29
w/o path95.5588.8983.0388.2977.5369.86
DMVFN95.7389.2483.4588.5378.0170.52
+ +4) How effective is the spatial path? To verify the effectiveness of the spatial path in our DMVFN, we compare it with the DMVFN without spatial path (denoted as "w/o path"). The results listed in Table 7 show our DMVFN enjoys better performance with the spatial path, no matter with or without the routing module (denoted as "w/o r"). + +# 5. Conclusion + +In this work, we developed an efficient Dynamic Multiscale Voxel Flow Network (DMVFN) that excels previous video prediction methods on dealing with complex motions of different scales. With the proposed routing module, our DMVFN adaptively activates different sub-networks based on the input frames, improving the prediction performance while reducing the computation costs. Experiments on diverse benchmark datasets demonstrated that our DMVFN achieves state-of-the-art performance with greatly reduced computation burden. We believe our DMVFN can provide general insights for long-term prediction, video frame synthesis, and representation learning [14, 15]. We hope our DMVFN will inspire further research in light-weight video processing and make video prediction more accessible for downstream tasks such as CODEC for streaming video. + +Our DMVFN can be improved at several aspects. Firstly, iteratively predicting future frames suffers from accumulate errors. This issue may be addressed by further bringing explicit temporal modeling [22, 31, 66, 68] to our DMVFN. Secondly, our DMVFN simply selects the nodes in a chain network topology, which can be improved by exploring more complex topology. For example, our routing module can be extended to automatically determine the scaling factors for parallel branches [33]. Thirdly, forecast uncertainty modeling is more of an extrapolation abiding to past flow information, especially considering bifurcation, which exceeds the current capability of our DMVFN. We believe that research on long-term forecast uncertainty may uncover deeper interplay with dynamic modeling methods [1, 14]. + +Acknowledgements. We sincerely thank Wen Heng for his exploration on neural architecture search at Megvii Research and Tianyuan Zhang for meaningful suggestions. This work is supported in part by the National Natural Science Foundation of China (No. 62002176 and 62176068). + +# References + +[1] Adil Kaan Akan, Erkut Erdem, Aykut Erdem, and Fatma Güney. Slamp: Stochastic latent appearance and motion prediction. In ICCV, 2021. 8 +[2] Xinzhu Bei, Yanchao Yang, and Stefano Soatto. Learning semantic-aware dynamics for video prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2021. 2, 5, 6 +[3] Yoshua Bengio, Nicholas Léonard, and Aaron Courville. Estimating or propagating gradients through stochastic neurons for conditional computation. arXiv preprint arXiv:1308.3432, 2013. 5 +[4] Tolga Bolukbasi, Joseph Wang, Ofer Dekel, and Venkatesh Saligrama. Adaptive neural networks for efficient inference. In Inf. Conf. Mach. Learn., 2017. 4 +[5] Wonmin Byeon, Qin Wang, Rupesh Kumar Srivastava, and Petros Koumoutsakos. Contextvp: Fully context-aware video prediction. In Eur. Conf. Comput. Vis., 2018. 1 +[6] Lluis Castrejon, Nicolas Ballas, and Aaron Courville. Improved conditional vrnns for video prediction. In Int. Conf. Comput. Vis., 2019. 1 +[7] Rohan Chandra, Uttaran Bhattacharya, Aniket Bera, and Dinesh Manocha. Traphic: Trajectory prediction in dense and heterogeneous traffic using weighted interactions. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 1 +[8] Zheng Chang, Xinfeng Zhang, Shanshe Wang, Siwei Ma, and Wen Gao. Strpm: A spatiotemporal residual predictive model for high-resolution video prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 7 +[9] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In IEEE Conf. Comput. Vis. Pattern Recog., 2016. 1, 2, 5, 6, 7 +[10] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Int. Conf. Comput. Vis., 2017. 2 +[11] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In Int. Conf. Comput. Vis., 2015. 2, 6 +[12] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. I. J. Robotics Res., 2013. 2, 5, 6, 7 +[13] Daniel Geng, Max Hamilton, and Andrew Owens. Comparing correspondences: Video prediction with correspondence-wise losses. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 5, 6 +[14] David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018. 8 +[15] Danijar Hafner, Jurgis Pasukonis, Jimmy Ba, and Timothy Lillicrap. Mastering diverse domains through world models. arXiv preprint arXiv:2301.04104, 2023. 8 +[16] Yizeng Han, Gao Huang, Shiji Song, Le Yang, Honghui Wang, and Yulin Wang. Dynamic neural networks: A survey. In IEEE Trans. Pattern Anal. Mach. Intell., 2021. 2, 4 + +[17] Yunhui Han, Kunming Luo, Ao Luo, Jiangyu Liu, Haoqiang Fan, Guiming Luo, and Shuaicheng Liu. Realflow: Embased realistic optical flow dataset generation from videos. In ECCV, 2022. 2 +[18] Adam W Harley, Konstantinos G Derpanis, and Iasonas Kokkinos. Segmentation-aware convolutional networks using local attention masks. In Int. Conf. Comput. Vis., 2017. 2 +[19] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. In Neural Comput., 1997. 1 +[20] Xiaotao Hu, Jun Xu, Shuhang Gu, Ming-Ming Cheng, and Li Liu. Restore globally, refine locally: A mask-guided scheme to accelerate super-resolution networks. In Eur. Conf. Comput. Vis., 2022. 2 +[21] Zhaoyang Huang, Xiaoyu Shi, Chao Zhang, Qiang Wang, Ka Chun Cheung, Hongwei Qin, Jifeng Dai, and Hongsheng Li. Flowformer: A transformer architecture for optical flow. In Eur. Conf. Comput. Vis., 2022. 2 +[22] Zhewei Huang, Tianyuan Zhang, Wen Heng, Boxin Shi, and Shuchang Zhou. Real-time intermediate flow estimation for video frame interpolation. In Eur. Conf. Comput. Vis., 2022, 1, 2, 3, 6, 8 +[23] Itay Hubara, Matthieu Courbariaux, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. Quantized neural networks: Training neural networks with low precision weights and activations. In J. Mach. Learn. Res., 2017. 5 +[24] Ryan Humble, Maying Shen, Jorge Albericio Latorre, Eric Darve, and Jose Alvarez. Soft masking for cost-constrained channel pruning. In Eur. Conf. Comput. Vis., 2022. 5 +[25] Eddy Ilg, Nikolaus Mayer, Tonmoy Saikia, Margret Keuper, Alexey Dosovitskiy, and Thomas Brox. Flownet 2.0: Evolution of optical flow estimation with deep networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 2 +[26] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. In Adv. Neural Inform. Process. Syst., 2015. 3 +[27] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. In Int. Conf. Learn. Represent., 2017. 5 +[28] Huaizu Jiang, Deqing Sun, Varun Jampani, Ming-Hsuan Yang, Erik Learned-Miller, and Jan Kautz. Super slomo: High quality estimation of multiple intermediate frames for video interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 3 +[29] Rico Jonschkowski, Austin Stone, Jonathan T Barron, Ariel Gordon, Kurt Konolige, and Anelia Angelova. What matters in unsupervised optical flow. In ECCV, 2020. 2 +[30] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Int. Conf. Learn. Represent., 2015. 5 +[31] Lingtong Kong, Boyuan Jiang, Donghao Luo, Wenqing Chu, Xiaoming Huang, Ying Tai, Chengjie Wang, and Jie Yang. Ifrnet: Intermediate feature refine network for efficient frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 8 +[32] Wonkwang Lee, Whie Jung, Han Zhang, Ting Chen, Jing Yu Koh, Thomas Huang, Hyungsuk Yoon, Honglak Lee, and + +Seunghoon Hong. Revisiting hierarchical approach for persistent long-term video prediction. In Int. Conf. Learn. Represent., 2021. 2 +[33] Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. In Int. Conf. Learn. Represent., 2019. 8 +[34] Wenqian Liu, Abhishek Sharma, Octavia Camps, and Mario Sznaier. Dyan: A dynamical atoms-based network for video prediction. In Eur. Conf. Comput. Vis., 2018. 6 +[35] Ziwei Liu, Raymond A Yeh, Xiaou Tang, Yiming Liu, and Aseem Agarwala. Video frame synthesis using deep voxel flow. In Int. Conf. Comput. Vis., 2017. 1, 2, 3, 4, 5, 6, 7 +[36] Ilya Loshchilov and F. Hutter. Fixing weight decay regularization in adam. arXiv preprint arXiv:1711.05101, 2017. 5 +[37] William Lotter, Gabriel Kreiman, and David Cox. Deep predictive coding networks for video prediction and unsupervised learning. In Int. Conf. Learn. Represent., 2017. 1, 2, 5, 6 +[38] Kunming Luo, Chuan Wang, Shuaicheng Liu, Haoqiang Fan, Jue Wang, and Jian Sun. Upflow: Upsampling pyramid for unsupervised optical flow learning. In CVPR, 2021. 2 +[39] Julieta Martinez, Michael J Black, and Javier Romero. On human motion prediction using recurrent neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 1 +[40] Sergiu Oprea, Pablo Martinez-Gonzalez, Alberto Garcia-Garcia, John Alejandro Castro-Vargas, Sergio Orts-Escolano, Jose Garcia-Rodriguez, and Antonis Argyros. A review on deep learning techniques for video prediction. In IEEE Trans. Pattern Anal. Mach. Intell., 2020. 1 +[41] Junting Pan, Chengyu Wang, Xu Jia, Jing Shao, Lu Sheng, Junjie Yan, and Xiaogang Wang. Video generation from single semantic label map. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 1, 2, 5, 6 +[42] Sylvain Paris, Samuel W Hasinoff, and Jan Kautz. Local laplacian filters: edge-aware image processing with a laplacian pyramid. ACM Trans. Graph., 2011. 5 +[43] Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alex Sorkine-Hornung, and Luc Van Gool. The 2017 davis challenge on video object segmentation. arXiv preprint arXiv:1704.00675, 2017. 2, 4, 6 +[44] Xiaojuan Qi, Zhengzhe Liu, Qifeng Chen, and Jiaya Jia. 3d motion decomposition for rgbd future dynamic scene synthesis. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2 +[45] Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, and Piotr Dólár. Designing network design spaces. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 7 +[46] Anurag Ranjan and Michael J Black. Optical flow estimation using a spatial pyramid network. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 2, 4 +[47] Mengye Ren, Andrei Pokrovsky, Bin Yang, and Raquel Urtasun. Sbnet: Sparse blocks network for fast inference. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 2 +[48] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM network: A machine learning approach for precipitation nowcasting. In Adv. Neural Inform. Process. Syst., 2015. 1 + +[49] Hyeonjun Sim, Jihyong Oh, and Munchurl Kim. Xvfi: extreme video frame interpolation. In Int. Conf. Comput. Vis., 2021. 1 +[50] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. A dataset of 101 human action classes from videos in the wild. *Cent. Res. Comput. Vis.*, 2012. 6 +[51] Hang Su, Varun Jampani, Deqing Sun, Orazio Gallo, Erik Learned-Miller, and Jan Kautz. Pixel-adaptive convolutional neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2 +[52] Yu-Chuan Su and Kristen Grauman. Leaving some stones unturned: dynamic feature prioritization for activity detection in streaming video. In *Eur. Conf. Comput. Vis.*, 2016. 2 +[53] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. Pwc-net: Cnns for optical flow using pyramid, warping, and cost volume. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 2, 4 +[54] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Eur. Conf. Comput. Vis., 2020, 1, 2, 5, 6 +[55] Surat Teerapittayanon, Bradley McDanel, and Hsiang-Tsung Kung. Branchynet: Fast inference via early exiting from deep neural networks. In Int. Conf. Pattern Recog., 2016. 4 +[56] Andreas Veit and Serge Belongie. Convolutional networks with adaptive inference graphs. In Eur. Conf. Comput. Vis., 2018. 2 +[57] Thomas Verelst and Tinne Tuytelaars. Dynamic convolutions: Exploiting spatial sparsity for faster inference. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 2 +[58] Ruben Villegas, Jimei Yang, Seunghoon Hong, Xunyu Lin, and Honglak Lee. Decomposing motion and content for natural video sequence prediction. In Int. Conf. Learn. Represent., 2017. 1, 2, 5, 6 +[59] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Guilin Liu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. Video-to-video synthesis. In Adv. Neural Inform. Process. Syst., 2018. 1, 2, 5, 6 +[60] Xin Wang, Fisher Yu, Zi-Yi Dou, Trevor Darrell, and Joseph E Gonzalez. Skipnet: Learning dynamic routing in convolutional networks. In Eur. Conf. Comput. Vis., 2018. 2 +[61] Zhou Wang, Eero P Simoncelli, and Alan C Bovik. Multiscale structural similarity for image quality assessment. In Asilomar Conf. Signals Syst. Comput., 2003. 6 +[62] Yue Wu, Rongrong Gao, Jaesik Park, and Qifeng Chen. Future video synthesis with object motion prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 1, 2, 5, 6 +[63] Yue Wu, Qiang Wen, and Qifeng Chen. Optimizing video prediction via video frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 1, 3, 5, 6 +[64] Zuxuan Wu, Caiming Xiong, Chih-Yao Ma, Richard Socher, and Larry S Davis. Adaframe: Adaptive frame selection for fast video recognition. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2 +[65] Xiaoyu Xiang, Yapeng Tian, Yulun Zhang, Yun Fu, Jan P Allebach, and Chenliang Xu. Zooming slow-mo: Fast and + +accurate one-stage space-time video super-resolution. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 7 +[66] Gang Xu, Jun Xu, Zhen Li, Liang Wang, Xing Sun, and Ming-Ming Cheng. Temporal modulation network for controllable space-time video super-resolution. In IEEE Conf. Comput. Vis. Pattern Recog., June 2021. 7, 8 +[67] Haofei Xu, Jing Zhang, Jianfei Cai, Hamid Rezatofighi, and Dacheng Tao. Gmflow: Learning optical flow via global matching. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 4 +[68] Xiangyu Xu, Li Siyao, Wenxiu Sun, Qian Yin, and Ming-Hsuan Yang. Quadratic video interpolation. In Adv. Neural Inform. Process. Syst., 2019. 2, 3, 8 +[69] Tianfan Xue, Baian Chen, Jiajun Wu, Donglai Wei, and William T Freeman. Video enhancement with task-oriented flow. In Int. J. Comput. Vis., 2019. 2, 3, 6, 7 +[70] Serena Yeung, Olga Russakovsky, Greg Mori, and Li Fei-Fei. End-to-end learning of action detection from frame glimpses in videos. In IEEE Conf. Comput. Vis. Pattern Recog., 2016. 2 +[71] Changqian Yu, Jingbo Wang, Chao Peng, Changxin Gao, Gang Yu, and Nong Sang. Bisenet: Bilateral segmentation network for real-time semantic segmentation. In Eur. Conf. Comput. Vis., 2018. 4 +[72] Guozhen Zhang, Yuhan Zhu, Haonan Wang, Youxin Chen, Gangshan Wu, and Limin Wang. Extracting motion and appearance via inter-frame attention for efficient video frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2023. 2 +[73] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 6 +[74] Shuchang Zhou, Yuxin Wu, Zekun Ni, Xinyu Zhou, He Wen, and Yuheng Zou. Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients. arXiv preprint arXiv:1606.06160, 2016. 5 +[75] Tinghui Zhou, Shubham Tulsiani, Weilun Sun, Jitendra Malik, and Alexei A Efros. View synthesis by appearance flow. In Eur. Conf. Comput. Vis., 2016. 3 +[76] Xizhou Zhu, Han Hu, Stephen Lin, and Jifeng Dai. Deformable convnets v2: More deformable, better results. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2 +[77] Yi Zhu, Karan Sapra, Fitsum A Reda, Kevin J Shih, Shawn Newsam, Andrew Tao, and Bryan Catanzaro. Improving semantic segmentation via video propagation and label relaxation. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 5 \ No newline at end of file diff --git a/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/images.zip b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..9647e493277c3203b98137d4846e8f5aa9e460a5 --- /dev/null +++ b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dda4911fe69ea74eb609dd4406116644c438adbf612dc936791799253c0b6c2c +size 716423 diff --git a/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/layout.json b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..623f1d21cc8f02ff7c1328db67e2b0277c1d5387 --- /dev/null +++ b/2023/A Dynamic Multi-Scale Voxel Flow Network for Video Prediction/layout.json @@ -0,0 +1,10992 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 95, + 103, + 500, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 103, + 500, + 120 + ], + "spans": [ + { + "bbox": [ + 95, + 103, + 500, + 120 + ], + "type": "text", + "content": "A Dynamic Multi-Scale Voxel Flow Network for Video Prediction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "spans": [ + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "content": "Xiaotao Hu" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "content": " Zhwei Huang" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "content": " Ailin Huang" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "inline_equation", + "content": "^{2,3}" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "content": " Jun Xu" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "inline_equation", + "content": "^{4,*}" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "content": " Shuchang Zhou" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "inline_equation", + "content": "^{2,*}" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "content": "College of Computer Science, Nankai University " + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "content": "Megvii Technology \n" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "content": "Wuhan University " + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 91, + 142, + 507, + 186 + ], + "type": "text", + "content": "School of Statistics and Data Science, Nankai University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 187, + 535, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 187, + 535, + 213 + ], + "spans": [ + { + "bbox": [ + 59, + 187, + 535, + 213 + ], + "type": "text", + "content": "{huxiaotao, huangzhewei, huangailin, zhoushuchang}@megvii.com, nankaimathxujun@gmail.com https://huxiaotaostasy.github.io/DMVFN/" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 241, + 192, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 241, + 192, + 253 + ], + "spans": [ + { + "bbox": [ + 143, + 241, + 192, + 253 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 266, + 290, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 266, + 290, + 460 + ], + "spans": [ + { + "bbox": [ + 45, + 266, + 290, + 460 + ], + "type": "text", + "content": "The performance of video prediction has been greatly boosted by advanced deep neural networks. However, most of the current methods suffer from large model sizes and require extra inputs, e.g., semantic/depth maps, for promising performance. For efficiency consideration, in this paper, we propose a Dynamic Multi-scale Voxel Flow Network (DMVFN) to achieve better video prediction performance at lower computational costs with only RGB images, than previous methods. The core of our DMVFN is a differentiable routing module that can effectively perceive the motion scales of video frames. Once trained, our DMVFN selects adaptive sub-networks for different inputs at the inference stage. Experiments on several benchmarks demonstrate that our DMVFN is an order of magnitude faster than Deep Voxel Flow [35] and surpasses the state-of-the-art iterative-based OPT [63] on generated image quality." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 481, + 128, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 128, + 495 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 128, + 495 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 502, + 287, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 502, + 287, + 586 + ], + "spans": [ + { + "bbox": [ + 46, + 502, + 287, + 586 + ], + "type": "text", + "content": "Video prediction aims to predict future video frames from the current ones. The task potentially benefits the study on representation learning [40] and downstream forecasting tasks such as human motion prediction [39], autonomous driving [6], and climate change [48], etc. During the last decade, video prediction has been increasingly studied in both academia and industry community [5, 7]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 586, + 287, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 586, + 287, + 695 + ], + "spans": [ + { + "bbox": [ + 46, + 586, + 287, + 695 + ], + "type": "text", + "content": "Video prediction is challenging because of the diverse and complex motion patterns in the wild, in which accurate motion estimation plays a crucial role [35, 37, 58]. Early methods [37, 58] along this direction mainly utilize recurrent neural networks [19] to capture temporal motion information for video prediction. To achieve robust long-term prediction, the works of [41, 59, 62] additionally exploit the semantic or instance segmentation maps of video frames for semantically coherent motion estimation in complex scenes." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 240, + 545, + 427 + ], + "blocks": [ + { + "bbox": [ + 309, + 240, + 545, + 427 + ], + "lines": [ + { + "bbox": [ + 309, + 240, + 545, + 427 + ], + "spans": [ + { + "bbox": [ + 309, + 240, + 545, + 427 + ], + "type": "image", + "image_path": "6dc7efb43097abff1d9318fcdb362bba0ffca038bd707eec0cfd1dc410e60dfb.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 305, + 434, + 547, + 479 + ], + "lines": [ + { + "bbox": [ + 305, + 434, + 547, + 479 + ], + "spans": [ + { + "bbox": [ + 305, + 434, + 547, + 479 + ], + "type": "text", + "content": "Figure 1. Average MS-SSIM and GFLOPs of different video prediction methods on Cityscapes [9]. The parameter amounts are provided in brackets. DMVFN outperforms previous methods in terms of image quality, parameter amount, and GFLOPs." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 506, + 547, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 547, + 626 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 547, + 626 + ], + "type": "text", + "content": "However, the semantic or instance maps may not always be available in practical scenarios, which limits the application scope of these video prediction methods [41,59,62]. To improve the prediction capability while avoiding extra inputs, the method of OPT [63] utilizes only RGB images to estimate the optical flow of video motions in an optimization manner with impressive performance. However, its inference speed is largely bogged down mainly by the computational costs of pre-trained optical flow model [54] and frame interpolation model [22] used in the iterative generation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "content": "The motions of different objects between two adjacent frames are usually of different scales. This is especially evident in high-resolution videos with meticulous details [49]. The spatial resolution is also of huge differences in real-world video prediction applications. To this end, it is essential yet challenging to develop a single model for multiscale motion estimation. An early attempt is to extract" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 496, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 702, + 140, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 702, + 140, + 712 + ], + "spans": [ + { + "bbox": [ + 58, + 702, + 140, + 712 + ], + "type": "text", + "content": "*Corresponding authors." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "6121" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 286, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 286, + 108 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 286, + 108 + ], + "type": "text", + "content": "multi-scale motion cues in different receptive fields by employing the encoder-decoder architecture [35], but in practice it is not flexible enough to deal with complex motions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 108, + 287, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 108, + 287, + 370 + ], + "spans": [ + { + "bbox": [ + 46, + 108, + 287, + 370 + ], + "type": "text", + "content": "In this paper, we propose a Dynamic Multi-scale Voxel Flow Network (DMVFN) to explicitly model the complex motion cues of diverse scales between adjacent video frames by dynamic optical flow estimation. Our DMVFN is consisted of several Multi-scale Voxel Flow Blocks (MVFBs), which are stacked in a sequential manner. On top of MVFBs, a light-weight Routing Module is proposed to adaptively generate a routing vector according to the input frames, and to dynamically select a subnetwork for efficient future frame prediction. We conduct experiments on four benchmark datasets, including Cityscapes [9], KITTI [12], DAVIS17 [43], and ViceoTest [69], to demonstrate the comprehensive advantages of our DMVFN over representative video prediction methods in terms of visual quality, parameter amount, and computational efficiency measured by floating point operations (FLOPs). A glimpse of comparison results by different methods is provided in Figure 1. One can see that our DMVFN achieves much better performance in terms of accuracy and efficiency on the Cityscapes [9] dataset. Extensive ablation studies validate the effectiveness of the components in our DMVFN for video prediction." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 372, + 271, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 372, + 271, + 382 + ], + "spans": [ + { + "bbox": [ + 59, + 372, + 271, + 382 + ], + "type": "text", + "content": "In summary, our contributions are mainly three-fold:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 388, + 286, + 534 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 59, + 388, + 286, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 388, + 286, + 437 + ], + "spans": [ + { + "bbox": [ + 59, + 388, + 286, + 437 + ], + "type": "text", + "content": "- We design a light-weight DMVFN to accurately predict future frames with only RGB frames as inputs. Our DMVFN is consisted of new MVFB blocks that can model different motion scales in real-world videos." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 444, + 286, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 444, + 286, + 491 + ], + "spans": [ + { + "bbox": [ + 59, + 444, + 286, + 491 + ], + "type": "text", + "content": "- We propose an effective Routing Module to dynamically select a suitable sub-network according to the input frames. The proposed Routing Module is end-to-end trained along with our main network DMVFN." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 498, + 286, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 498, + 286, + 534 + ], + "spans": [ + { + "bbox": [ + 59, + 498, + 286, + 534 + ], + "type": "text", + "content": "- Experiments on four benchmarks show that our DMVFN achieves state-of-the-art results while being an order of magnitude faster than previous methods." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 47, + 544, + 133, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 544, + 133, + 555 + ], + "spans": [ + { + "bbox": [ + 47, + 544, + 133, + 555 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 563, + 147, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 563, + 147, + 574 + ], + "spans": [ + { + "bbox": [ + 47, + 563, + 147, + 574 + ], + "type": "text", + "content": "2.1. Video Prediction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 286, + 713 + ], + "type": "text", + "content": "Early video prediction methods [35, 37, 58] only utilize RGB frames as inputs. For example, PredNet [37] learns an unsupervised neural network, with each layer making local predictions and forwarding deviations from those predictions to subsequent network layers. MCNet [58] decomposes the input frames into motion and content components, which are processed by two separate encoders. DVF [35] is a fully-convolutional encoder-decoder network synthesizing intermediate and future frames by approximating voxel flow for motion estimation. Later, extra information is exploited by video prediction methods in pursuit of better" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 204 + ], + "type": "text", + "content": "performance. For example, the methods of Vid2vid [59], Seg2vid [41], HVP [32], and SADM [2] require additional semantic maps or human pose information for better video prediction results. Additionally, Qi et al. [44] used extra depth maps and semantic maps to explicitly inference scene dynamics in 3D space. FVS [62] separates the inputs into foreground objects and background areas by semantic and instance maps, and uses a spatial transformer to predict the motion of foreground objects. In this paper, we develop a light-weight and efficient video prediction network that requires only sRGB images as the inputs." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 213, + 388, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 213, + 388, + 225 + ], + "spans": [ + { + "bbox": [ + 306, + 213, + 388, + 225 + ], + "type": "text", + "content": "2.2. Optical Flow" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 232, + 545, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 232, + 545, + 471 + ], + "spans": [ + { + "bbox": [ + 304, + 232, + 545, + 471 + ], + "type": "text", + "content": "Optical flow estimation aims to predict the per-pixel motion between adjacent frames. Deep learning-based optical flow methods [17,29,38,53,54] have been considerably advanced ever since Flownet [11], a pioneering work to learn optical flow network from synthetic data. Flownet2.0 [25] improves the accuracy of optical flow estimation by stacking sub-networks for iterative refinement. A coarse-to-fine spatial pyramid network is employed in SPynet [46] to estimate optical flow at multiple scales. PWC-Net [53] employs feature warping operation at different resolutions and uses a cost volume layer to refine the estimated flow at each resolution. RAFT [54] is a lightweight recurrent network sharing weights during the iterative learning process. FlowFormer [21] utilizes an encoder to output latent tokens and a recurrent decoder to decode features, while refining the estimated flow iteratively. In video synthesis, optical flow for downstream tasks [22, 35, 68, 69, 72] is also a hot research topic. Based on these approaches, we aim to design a flow estimation network that can adaptively operate based on each sample for the video prediction task." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 479, + 414, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 479, + 414, + 491 + ], + "spans": [ + { + "bbox": [ + 306, + 479, + 414, + 491 + ], + "type": "text", + "content": "2.3. Dynamic Network" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 713 + ], + "type": "text", + "content": "The design of dynamic networks is mainly divided into three categories: spatial-wise, temporal-wise, and sample-wise [16]. Spatial-wise dynamic networks perform adaptive operations in different spatial regions to reduce computational redundancy with comparable performance [20, 47, 57]. In addition to the spatial dimension, dynamic processing can also be applied in the temporal dimension. Temporal-wise dynamic networks [52, 64, 70] improve the inference efficiency by performing less or no computation on unimportant sequence frames. To handle the input in a data-driven manner, sample-wise dynamic networks adaptively adjust network structures to side-off the extra computation [56, 60], or adaptively change the network parameters to improve the performance [10, 18, 51, 76]. Designing and training a dynamic network is not trivial since it is difficult to directly enable a model with complex topology connections. We need to design a well-structured and robust model before considering its dynamic mechanism. In this paper," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6122" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 70, + 272, + 174 + ], + "blocks": [ + { + "bbox": [ + 75, + 70, + 272, + 174 + ], + "lines": [ + { + "bbox": [ + 75, + 70, + 272, + 174 + ], + "spans": [ + { + "bbox": [ + 75, + 70, + 272, + 174 + ], + "type": "image", + "image_path": "0fc09876a02da94c26a51e557970913b2d8d61d9dc263948db00d9b38c7d8f75.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 107, + 178, + 246, + 190 + ], + "lines": [ + { + "bbox": [ + 107, + 178, + 246, + 190 + ], + "spans": [ + { + "bbox": [ + 107, + 178, + 246, + 190 + ], + "type": "text", + "content": "(a) Voxel Flow-based Image Fusion" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 73, + 197, + 286, + 280 + ], + "blocks": [ + { + "bbox": [ + 73, + 197, + 286, + 280 + ], + "lines": [ + { + "bbox": [ + 73, + 197, + 286, + 280 + ], + "spans": [ + { + "bbox": [ + 73, + 197, + 286, + 280 + ], + "type": "image", + "image_path": "b76c9cc72387c1175de1d856c59ddbef47d4c19965d05183878bb6d6577eded8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 151, + 286, + 201, + 298 + ], + "lines": [ + { + "bbox": [ + 151, + 286, + 201, + 298 + ], + "spans": [ + { + "bbox": [ + 151, + 286, + 201, + 298 + ], + "type": "text", + "content": "(b) DMVFN" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 289, + 69, + 521, + 281 + ], + "blocks": [ + { + "bbox": [ + 289, + 69, + 521, + 281 + ], + "lines": [ + { + "bbox": [ + 289, + 69, + 521, + 281 + ], + "spans": [ + { + "bbox": [ + 289, + 69, + 521, + 281 + ], + "type": "image", + "image_path": "229ec29b64f11faf740246e7d45b364695f0da07d4b24ac609c9d69d8b11b42c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 287, + 380, + 298 + ], + "lines": [ + { + "bbox": [ + 336, + 287, + 380, + 298 + ], + "spans": [ + { + "bbox": [ + 336, + 287, + 380, + 298 + ], + "type": "text", + "content": "(c) MVFB" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 433, + 286, + 512, + 298 + ], + "lines": [ + { + "bbox": [ + 433, + 286, + 512, + 298 + ], + "spans": [ + { + "bbox": [ + 433, + 286, + 512, + 298 + ], + "type": "text", + "content": "(d) Routing Module" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "lines": [ + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "spans": [ + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "text", + "content": "Figure 2. Overview of the proposed Dynamic Multi-scale Voxel Flow Network (DMVFN). " + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "text", + "content": ": To predict a future frame, we use the voxel flow [35] to guide the pixel fusion of the input frames. The voxel flow contains the prediction of object motion and occlusion. " + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "text", + "content": ": DMVFN contains several MVFBs with decreasing scaling factor " + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "S^i" + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "text", + "content": ". According to the routing vector " + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "text", + "content": " estimated by a Routing Module, a sub-network is selected to process the input image. " + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "text", + "content": ": Each MVFB has a scaling factor " + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "S^i" + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "text", + "content": ", which means that the motion path is performed on images whose sizes are " + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "1 / S^i" + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "text", + "content": " of the original. " + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "inline_equation", + "content": "(d)" + }, + { + "bbox": [ + 46, + 308, + 545, + 373 + ], + "type": "text", + "content": ": Two consecutive frames are fed into several neural layers and a Differentiable Bernoulli sample to generate the hard routing vector." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 395, + 287, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 395, + 287, + 418 + ], + "spans": [ + { + "bbox": [ + 46, + 395, + 287, + 418 + ], + "type": "text", + "content": "we propose a module to dynamically perceive the motion magnitude of input frames to select the network structure." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 430, + 129, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 430, + 129, + 443 + ], + "spans": [ + { + "bbox": [ + 47, + 430, + 129, + 443 + ], + "type": "text", + "content": "3. Methodology" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 450, + 127, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 450, + 127, + 462 + ], + "spans": [ + { + "bbox": [ + 47, + 450, + 127, + 462 + ], + "type": "text", + "content": "3.1. Background" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "spans": [ + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": "Video prediction. Given a sequence of past " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": " frames " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\{I_i\\in \\mathbb{R}^{h\\times w\\times 3}|i = 1,\\dots ,t\\}" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": ", video prediction aims to predict the future frames " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\{\\tilde{I}_{t + 1},\\tilde{I}_{t + 2},\\tilde{I}_{t + 3},\\ldots \\}" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": ". The inputs of our video prediction model are only the two consecutive frames " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "I_{t - 1}" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": ". We concentrate on predicting " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t + 1}" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": ", and iteratively predict future frames " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\{\\tilde{I}_{t + 2},\\tilde{I}_{t + 3},\\ldots \\}" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": " in a similar manner. Denote the video prediction model as " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "G_{\\theta}(I_{t - 1},I_t)" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": " is the set of model parameters to be learned, the learning objective is to minimize the difference between " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t + 1} = G_{\\theta}(I_{t - 1},I_t)" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": " and the \"ground truth\" " + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "inline_equation", + "content": "I_{t + 1}" + }, + { + "bbox": [ + 46, + 468, + 287, + 589 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "text", + "content": "Voxel flow. Considering the local consistency in space-time, the pixels of a generated future frame come from nearby regions of the previous frames [69, 75]. In video prediction task, researchers estimate optical flow " + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{t + 1\\rightarrow t}" + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "inline_equation", + "content": "I_{t + 1}" + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "text", + "content": " [35]. And the corresponding frame is obtained using the pixel-wise backward warping [26] (denoted as " + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\overleftarrow{\\mathcal{W}}" + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "text", + "content": "). In addition, to deal with the occlusion, some methods [28, 35] further introduce a fusion map " + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{m}" + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "text", + "content": " to fuse the pixels of " + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "inline_equation", + "content": "I_{t - 1}" + }, + { + "bbox": [ + 46, + 604, + 287, + 713 + ], + "type": "text", + "content": ". The final predicted frame is obtained" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 395, + 485, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 395, + 485, + 407 + ], + "spans": [ + { + "bbox": [ + 305, + 395, + 485, + 407 + ], + "type": "text", + "content": "by the following formulation (Figure 2 " + }, + { + "bbox": [ + 305, + 395, + 485, + 407 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 305, + 395, + 485, + 407 + ], + "type": "text", + "content": "):" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 357, + 415, + 545, + 431 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 415, + 545, + 431 + ], + "spans": [ + { + "bbox": [ + 357, + 415, + 545, + 431 + ], + "type": "interline_equation", + "content": "\\hat {I} _ {t + 1 \\leftarrow t - 1} = \\overleftarrow {\\mathcal {W}} \\left(I _ {t - 1}, \\mathbf {f} _ {t + 1 \\rightarrow t - 1}\\right), \\tag {1}", + "image_path": "db399839e74b528d3fd55d9513365969049abfd05a63f92f3133158682485e16.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 372, + 449, + 545, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 449, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 372, + 449, + 545, + 464 + ], + "type": "interline_equation", + "content": "\\hat {I} _ {t + 1 \\leftarrow t} = \\overleftarrow {\\mathcal {W}} \\left(I _ {t}, \\mathbf {f} _ {t + 1 \\rightarrow t}\\right), \\tag {2}", + "image_path": "c00c91f04560c19d83d311d708ce035c20cc32427b7ec19e2189446ff65850a4.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 334, + 480, + 545, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 480, + 545, + 495 + ], + "spans": [ + { + "bbox": [ + 334, + 480, + 545, + 495 + ], + "type": "interline_equation", + "content": "\\tilde {I} _ {t + 1} = \\hat {I} _ {t + 1 \\leftarrow t - 1} \\times \\mathbf {m} + \\hat {I} _ {t + 1 \\leftarrow t} \\times (1 - \\mathbf {m}). \\tag {3}", + "image_path": "8f86b8955676ee78ecdb15934090b5b1ea1a6b25dd02518d6ca4aaa46c06dadd.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "inline_equation", + "content": "\\hat{I}_{t+1 \\leftarrow t}" + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "inline_equation", + "content": "\\hat{I}_{t+1 \\leftarrow t-1}" + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "text", + "content": " are intermediate warped images. To simplify notations, we refer to the optical flows " + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{t+1 \\rightarrow t}, \\mathbf{f}_{t+1 \\rightarrow t-1}" + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "text", + "content": " and the fusion map " + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "inline_equation", + "content": "\\mathbf{m}" + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "text", + "content": " collectively as the voxel flow " + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{t+1}" + }, + { + "bbox": [ + 304, + 498, + 545, + 559 + ], + "type": "text", + "content": ", similar to the notations in [35]. The above equations can be simplified to the following form:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 370, + 567, + 545, + 583 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 567, + 545, + 583 + ], + "spans": [ + { + "bbox": [ + 370, + 567, + 545, + 583 + ], + "type": "interline_equation", + "content": "\\tilde {I} _ {t + 1} = \\overleftarrow {\\mathcal {W}} \\left(I _ {t - 1}, I _ {t}, \\mathbf {F} _ {t + 1}\\right). \\tag {4}", + "image_path": "92a0045dad84be40d5eac671fea52fbf46bd77637904ab8f76ab78f15228cc53.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 599, + 525, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 599, + 525, + 612 + ], + "spans": [ + { + "bbox": [ + 305, + 599, + 525, + 612 + ], + "type": "text", + "content": "3.2. Dynamic Multi-Scale Voxel Flow Network" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 617, + 545, + 713 + ], + "type": "text", + "content": "MVFB. To estimate the voxel flow, DVF [35] assumes that all optical flows are locally linear and temporally symmetric around the targeted time, which may be unreasonable for large-scale motions. To address the object position changing issue [22] in adjacent frames, OPT [63] uses flow reversal layer [68] to convert forward flows to backward flows. We aim to estimate voxel flow end-to-end without introducing new components and unreasonable constraints." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6123" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 70, + 169, + 205 + ], + "blocks": [ + { + "bbox": [ + 61, + 70, + 169, + 205 + ], + "lines": [ + { + "bbox": [ + 61, + 70, + 169, + 205 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 169, + 205 + ], + "type": "image", + "image_path": "f59e3d5d7093a275e2f4bd42f9e09d29bac48e89b75633ed29c7b57648462a68.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 108, + 211, + 123, + 220 + ], + "lines": [ + { + "bbox": [ + 108, + 211, + 123, + 220 + ], + "spans": [ + { + "bbox": [ + 108, + 211, + 123, + 220 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 170, + 70, + 242, + 205 + ], + "blocks": [ + { + "bbox": [ + 170, + 70, + 242, + 205 + ], + "lines": [ + { + "bbox": [ + 170, + 70, + 242, + 205 + ], + "spans": [ + { + "bbox": [ + 170, + 70, + 242, + 205 + ], + "type": "image", + "image_path": "e291b529e57bb3fff07bd03becf4ca8a342f76b0b5905f22557f989afdaff33a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 196, + 211, + 216, + 220 + ], + "lines": [ + { + "bbox": [ + 196, + 211, + 216, + 220 + ], + "spans": [ + { + "bbox": [ + 196, + 211, + 216, + 220 + ], + "type": "text", + "content": "DVF" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 242, + 70, + 315, + 205 + ], + "blocks": [ + { + "bbox": [ + 242, + 70, + 315, + 205 + ], + "lines": [ + { + "bbox": [ + 242, + 70, + 315, + 205 + ], + "spans": [ + { + "bbox": [ + 242, + 70, + 315, + 205 + ], + "type": "image", + "image_path": "4020023bc6d8f37032c16b5d81dc1b822d6370c94242fb14dab60404db8bd024.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 265, + 211, + 292, + 220 + ], + "lines": [ + { + "bbox": [ + 265, + 211, + 292, + 220 + ], + "spans": [ + { + "bbox": [ + 265, + 211, + 292, + 220 + ], + "type": "text", + "content": "DYAN" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 75, + 228, + 515, + 241 + ], + "lines": [ + { + "bbox": [ + 75, + 228, + 515, + 241 + ], + "spans": [ + { + "bbox": [ + 75, + 228, + 515, + 241 + ], + "type": "text", + "content": "Figure 3. Visual comparison of " + }, + { + "bbox": [ + 75, + 228, + 515, + 241 + ], + "type": "inline_equation", + "content": "(t + 1)" + }, + { + "bbox": [ + 75, + 228, + 515, + 241 + ], + "type": "text", + "content": "-th frame predicted from " + }, + { + "bbox": [ + 75, + 228, + 515, + 241 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 75, + 228, + 515, + 241 + ], + "type": "text", + "content": "-th and " + }, + { + "bbox": [ + 75, + 228, + 515, + 241 + ], + "type": "inline_equation", + "content": "(t - 1)" + }, + { + "bbox": [ + 75, + 228, + 515, + 241 + ], + "type": "text", + "content": "-th frames on the DAVIS17-Val [43]." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 315, + 70, + 388, + 205 + ], + "blocks": [ + { + "bbox": [ + 315, + 70, + 388, + 205 + ], + "lines": [ + { + "bbox": [ + 315, + 70, + 388, + 205 + ], + "spans": [ + { + "bbox": [ + 315, + 70, + 388, + 205 + ], + "type": "image", + "image_path": "29b9326dbf78209ac643263e212efe256ed0690376355ba49318fa9c7e8a56b4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 212, + 361, + 220 + ], + "lines": [ + { + "bbox": [ + 342, + 212, + 361, + 220 + ], + "spans": [ + { + "bbox": [ + 342, + 212, + 361, + 220 + ], + "type": "text", + "content": "OPT" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 388, + 70, + 460, + 205 + ], + "blocks": [ + { + "bbox": [ + 388, + 70, + 460, + 205 + ], + "lines": [ + { + "bbox": [ + 388, + 70, + 460, + 205 + ], + "spans": [ + { + "bbox": [ + 388, + 70, + 460, + 205 + ], + "type": "image", + "image_path": "f7c76414e3fb2116b4609b85664a59171dd8f2b3a26b2d6a949bf26d73b8bb0f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 212, + 441, + 220 + ], + "lines": [ + { + "bbox": [ + 406, + 212, + 441, + 220 + ], + "spans": [ + { + "bbox": [ + 406, + 212, + 441, + 220 + ], + "type": "text", + "content": "DMVFN" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 460, + 70, + 533, + 205 + ], + "blocks": [ + { + "bbox": [ + 460, + 70, + 533, + 205 + ], + "lines": [ + { + "bbox": [ + 460, + 70, + 533, + 205 + ], + "spans": [ + { + "bbox": [ + 460, + 70, + 533, + 205 + ], + "type": "image", + "image_path": "4845f3b7a0c8ac6b91a14e5ee6a7c085035ee994ef026c62bffa882475cc7745.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 489, + 212, + 503, + 220 + ], + "lines": [ + { + "bbox": [ + 489, + 212, + 503, + 220 + ], + "spans": [ + { + "bbox": [ + 489, + 212, + 503, + 220 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "text", + "content": "We denote the " + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "text", + "content": "-th MVFB as " + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "inline_equation", + "content": "f_{MVFB}^{i}(\\cdot)" + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "text", + "content": ". It learns to approximate target voxel flow " + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{t+1}^{i}" + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "text", + "content": " by taking two frames " + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "inline_equation", + "content": "I_{t-1}" + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "text", + "content": ", the synthesized frame " + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t+1}^{i-1}" + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "text", + "content": ", and the voxel flow estimated by previous blocks " + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{t+1}^{i-1}" + }, + { + "bbox": [ + 47, + 251, + 287, + 301 + ], + "type": "text", + "content": " as inputs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "spans": [ + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "text", + "content": "The architecture of our MVFB is shown in Figure 2 " + }, + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "text", + "content": ". To capture the large motion while retaining the original spatial information, we construct a two-branch network structure [71]. This design inherits from pyramidal optical flow estimation [46, 53]. In the motion path, the input is downsampled by a scaling factor " + }, + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "inline_equation", + "content": "S^i" + }, + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "text", + "content": " to facilitate the expansion of the receptive field. Another spatial path operates at high resolution to complement the spatial information. We denote " + }, + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t + 1}^{i}" + }, + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "text", + "content": " as the output of the " + }, + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 302, + 287, + 411 + ], + "type": "text", + "content": "-th MVFB. Formally," + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 71, + 419, + 287, + 435 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 419, + 287, + 435 + ], + "spans": [ + { + "bbox": [ + 71, + 419, + 287, + 435 + ], + "type": "interline_equation", + "content": "\\tilde {I} _ {t + 1} ^ {i}, \\mathbf {F} _ {t + 1} ^ {i} = f _ {\\mathrm {M V F B}} ^ {i} \\left(I _ {t - 1}, I _ {t}, \\tilde {I} _ {t + 1} ^ {i - 1}, \\mathbf {F} _ {t + 1} ^ {i - 1}, S ^ {i}\\right). \\tag {5}", + "image_path": "1b47df92e120554afd839ac6712662545f43de58a50b632caf9c9ea879b13bd5.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 445, + 287, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 445, + 287, + 493 + ], + "spans": [ + { + "bbox": [ + 46, + 445, + 287, + 493 + ], + "type": "text", + "content": "The initial values of " + }, + { + "bbox": [ + 46, + 445, + 287, + 493 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t+1}^{0}" + }, + { + "bbox": [ + 46, + 445, + 287, + 493 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 445, + 287, + 493 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{t+1}^{0}" + }, + { + "bbox": [ + 46, + 445, + 287, + 493 + ], + "type": "text", + "content": " are set to zero. As illustrated in Figure 2 (b), our DMVFN contains 9 MVFBs. To generate a future frame, we iteratively refine a voxel flow [35] and fuse the pixels of the input frames." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 494, + 287, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 494, + 287, + 554 + ], + "spans": [ + { + "bbox": [ + 46, + 494, + 287, + 554 + ], + "type": "text", + "content": "Many optical flow estimation methods predict the flow field on a small image, and then refine it on a large image [53, 67]. For simplicity and intuition, we consider decreasing scaling factor sequences. Finally, the scaling factors is experimentally set as " + }, + { + "bbox": [ + 46, + 494, + 287, + 554 + ], + "type": "inline_equation", + "content": "[4, 4, 4, 2, 2, 2, 1, 1, 1]" + }, + { + "bbox": [ + 46, + 494, + 287, + 554 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 46, + 569, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 569, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 569, + 287, + 665 + ], + "type": "text", + "content": "DMVFN. Different pairs of adjacent frames have diverse motion scales and different computational demands. An intuitive idea is to adaptively select dynamic architectures conditioned on each input. We then perform dynamic routing within the super network (the whole architecture) [16], including multiple possible paths. DMVFN saves redundant computation for samples with small-scale motion and preserves the representation ability for large-scale motion." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": "To make our DMVFN end-to-end trainable, we design a differentiable Routing Module containing a tiny neural network to estimate routing vector " + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": " for each input sample. Based on this vector, our DMVFN dynamically selects a" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 251, + 544, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 251, + 544, + 274 + ], + "spans": [ + { + "bbox": [ + 305, + 251, + 544, + 274 + ], + "type": "text", + "content": "sub-network to process the input data. As the Figure 2 " + }, + { + "bbox": [ + 305, + 251, + 544, + 274 + ], + "type": "inline_equation", + "content": "(b)" + }, + { + "bbox": [ + 305, + 251, + 544, + 274 + ], + "type": "text", + "content": " shows, some blocks are skipped during inference." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "spans": [ + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": "Different from some dynamic network methods that can only continuously select the first several blocks (" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": " options) [4, 55], DMVFN is able to choose paths freely (" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "2^n" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": " options). DMVFN trains different sub-networks in the super network with various possible inference paths and uses dynamic routing inside the super network during inference to reduce redundant computation while maintaining the performance. A dynamic routing vector " + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "v \\in \\{0,1\\}^n" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": " is predicted by the proposed Routing Module. For the " + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": "-th MVFN block of DMVFN, we denote " + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "v_i" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": " as the reference of whether processing the reached voxel flow " + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{t+1}^{i-1}" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": " and the reached predicted frame " + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{t+1}^{i-1}" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": ". The path " + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{MVFB}}^i" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": " to the " + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": "-th block from the last block will be activated only when " + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "inline_equation", + "content": "v_i = 1" + }, + { + "bbox": [ + 304, + 275, + 545, + 433 + ], + "type": "text", + "content": ". Formally," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 330, + 441, + 545, + 486 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 441, + 545, + 486 + ], + "spans": [ + { + "bbox": [ + 330, + 441, + 545, + 486 + ], + "type": "interline_equation", + "content": "\\tilde {I} _ {t + 1} ^ {i}, \\mathbf {F} _ {t + 1} ^ {i} = \\left\\{ \\begin{array}{l l} f _ {\\mathrm {M V F B}} ^ {i} \\left(\\tilde {I} _ {t + 1} ^ {i - 1}, \\mathbf {F} _ {t + 1} ^ {i - 1}\\right), & v _ {i} = 1 \\\\ \\tilde {I} _ {t + 1} ^ {i - 1}, \\mathbf {F} _ {t + 1} ^ {i - 1}, & v _ {i} = 0. \\end{array} \\right. \\tag {6}", + "image_path": "0f6a6073dc9aec24d8fb6bd93532d0fccfb33e66de538cb9a20e4bcf952f8b1f.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 487, + 545, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 487, + 545, + 523 + ], + "spans": [ + { + "bbox": [ + 304, + 487, + 545, + 523 + ], + "type": "text", + "content": "During the training phase, to enable the backpropagation of Eqn. (6), we use " + }, + { + "bbox": [ + 304, + 487, + 545, + 523 + ], + "type": "inline_equation", + "content": "v_{i}" + }, + { + "bbox": [ + 304, + 487, + 545, + 523 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 487, + 545, + 523 + ], + "type": "inline_equation", + "content": "(1 - v_{i})" + }, + { + "bbox": [ + 304, + 487, + 545, + 523 + ], + "type": "text", + "content": " as the weights of the two branches and average their outputs." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 523, + 545, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 523, + 545, + 666 + ], + "spans": [ + { + "bbox": [ + 304, + 523, + 545, + 666 + ], + "type": "text", + "content": "In the iterative scheme of our DMVFN, each MVFB essentially refines the current voxel flow estimation to a new one. This special property allows our DMVFN to skip some MVFBs for every pair of input frames. Here, we design a differentiable and efficient routing module for learning to trade-off each MVFB block. This is achieved by predicting a routing vector " + }, + { + "bbox": [ + 304, + 523, + 545, + 666 + ], + "type": "inline_equation", + "content": "v \\in \\{0,1\\}^n" + }, + { + "bbox": [ + 304, + 523, + 545, + 666 + ], + "type": "text", + "content": " to identify the proper sub-network (e.g., 0 for deactivated MVFBs, 1 for activated MVFBs). We implement the routing module by a small neural network (" + }, + { + "bbox": [ + 304, + 523, + 545, + 666 + ], + "type": "inline_equation", + "content": "\\sim 1/6" + }, + { + "bbox": [ + 304, + 523, + 545, + 666 + ], + "type": "text", + "content": " GFLOPs of the super network), and show its architecture in Figure 2 (d). It learns to predict the probability " + }, + { + "bbox": [ + 304, + 523, + 545, + 666 + ], + "type": "inline_equation", + "content": "\\tilde{v}" + }, + { + "bbox": [ + 304, + 523, + 545, + 666 + ], + "type": "text", + "content": " of choosing MVFBs by:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 336, + 672, + 545, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 672, + 545, + 685 + ], + "spans": [ + { + "bbox": [ + 336, + 672, + 545, + 685 + ], + "type": "interline_equation", + "content": "\\tilde {v} = \\operatorname {L i n e a r} (\\operatorname {A v g P o o l i n g} (\\operatorname {C o n v s} (I _ {t - 1}, I _ {t}))), \\tag {7}", + "image_path": "6bd2ab1f9f8397e80a61c2fec6b6870c20f26de955061a84a10de50e919caf0b.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 367, + 701, + 545, + 714 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 367, + 701, + 545, + 714 + ], + "type": "interline_equation", + "content": "v = \\text {B e r n o u l l i - S a m p l i n g} (\\tilde {v}). \\tag {8}", + "image_path": "70f4cdf2da03234693eb9ac08a895bd347565d9c33894e04afe42b32bef2d3bd.jpg" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6124" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 60, + 113, + 535, + 254 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 547, + 105 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 547, + 105 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 547, + 105 + ], + "type": "text", + "content": "Table 1. Quantitative results of different methods on the Cityscapes [9], and KITTI [12] datasets. \"RGB\", \"F\", \"S\" and \"T\" denote the video frames, optical flow, semantic map, and instance map, respectively. We denote our DMVFN without routing module as \"DMVFN (w/o r)\". FVS [62] integrates a segmentation model [77] on KITTI [12] to obtain the semantic maps. \"N/A\" means not available." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 60, + 113, + 535, + 254 + ], + "lines": [ + { + "bbox": [ + 60, + 113, + 535, + 254 + ], + "spans": [ + { + "bbox": [ + 60, + 113, + 535, + 254 + ], + "type": "table", + "html": "
MethodInputsCityscapes-Train→Cityscapes-Test [9]KITTI-Train→KITTI-Test [12]
GFLOPsMS-SSIM (×10-2) ↑ t+1t+3t+5LPIPS (×10-2) ↓ t+1t+3t+5GFLOPsMS-SSIM (×10-2) ↑ t+1t+3t+5LPIPS (×10-2) ↓ t+1t+3t+5
Vid2vid [59]RGB+S603.7988.1680.5575.1310.5815.9220.14N/AN/AN/AN/AN/AN/AN/AN/A
Seg2vid [41]RGB+S455.8488.32N/A61.639.69N/A25.99N/AN/AN/AN/AN/AN/AN/AN/A
FVS [62]RGB+S+I1891.6589.1081.1375.688.5012.9816.50768.9679.2867.6560.7718.4824.6130.49
SADM [2]RGB+S+FN/A95.99N/A83.517.67N/A14.93N/A83.0672.4464.7214.4124.5831.16
PredNet [37]RGB62.6284.0379.2575.2125.9929.9936.0325.4456.2651.4747.5655.3558.6662.95
MCNET [58]RGB502.8089.6978.0770.5818.8831.3437.34204.2675.3563.5255.4824.0531.7137.39
DVF [35]RGB409.7883.8576.2371.1117.3724.0528.79166.4753.9346.9942.6232.4737.4341.59
CorrWise [13]RGB944.2992.80N/A83.908.50N/A15.00383.6282.00N/A66.7017.20N/A25.90
OPT [63]RGB313482.1594.5486.8980.406.4612.5017.83127431.7182.7169.5061.0912.3420.2926.35
DMVFN (w/o r)RGB24.5195.2987.9181.485.6010.4814.919.9688.0676.5368.2910.7019.2826.13
DMVFNRGB12.7195.7389.2483.455.5810.4714.825.1588.5378.0170.5210.7419.2726.05
", + "image_path": "5785c949229a8a5a025c47428805b7c291cb8d4e19110fc3d84a0376bbbf090f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 274, + 287, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 274, + 287, + 357 + ], + "spans": [ + { + "bbox": [ + 46, + 274, + 287, + 357 + ], + "type": "text", + "content": "Differentiable Routing. To train the proposed Routing Module, we need to constrain the probability values to prevent the model from falling into trivial solutions (e.g., select all blocks). On the other hand, we allow this module to participate in the gradient calculation to achieve end-to-end training. We introduce the Gumbel Softmax [27] and the Straight-Through Estimator (STE) [3] to tackle this issue." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 358, + 288, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 358, + 288, + 407 + ], + "spans": [ + { + "bbox": [ + 46, + 358, + 288, + 407 + ], + "type": "text", + "content": "One popular method to make the routing probability " + }, + { + "bbox": [ + 46, + 358, + 288, + 407 + ], + "type": "inline_equation", + "content": "\\tilde{v}" + }, + { + "bbox": [ + 46, + 358, + 288, + 407 + ], + "type": "text", + "content": " learnable is the Gumbel Softmax technique [24, 27]. By treating the selection of each MVFB as a binary classification task, the soft dynamic routing vector " + }, + { + "bbox": [ + 46, + 358, + 288, + 407 + ], + "type": "inline_equation", + "content": "v\\in \\mathbb{R}^n" + }, + { + "bbox": [ + 46, + 358, + 288, + 407 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 412, + 287, + 443 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 412, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 59, + 412, + 287, + 443 + ], + "type": "interline_equation", + "content": "v _ {i} = \\frac {\\exp \\left(\\frac {1}{\\tau} \\left(\\tilde {v} _ {i} + G _ {i}\\right)\\right)}{\\exp \\left(\\frac {1}{\\tau} \\left(\\tilde {v} _ {i} + G _ {i}\\right)\\right) + \\exp \\left(\\frac {1}{\\tau} \\left(2 - \\tilde {v} _ {i} - G _ {i}\\right)\\right)}, \\tag {9}", + "image_path": "f9dae84c16bad0d23565af8239599b2393fd653bfbafd1892b21156301746c40.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "spans": [ + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "inline_equation", + "content": "i = 1, \\dots, n" + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "inline_equation", + "content": "G_{i} \\in \\mathbb{R}" + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "text", + "content": " is Gumbel noise following the Gumbel(0,1) distribution, and " + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "text", + "content": " is a temperature parameter. We start at a very high temperature to ensure that all possible paths become candidates, and then the temperature is attenuated to a small value to approximate one-hot distribution. To encourage the sum of the routing vectors " + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\{v_{i}\\}_{i=1}^{n}" + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "text", + "content": " to be small, we add the regularization term " + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\left(\\frac{1}{n} \\sum_{i=1}^{n} v_{i}\\right)" + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "text", + "content": " to the final loss function. However, we experimentally find that our DMVFN usually converges to an input-independent structure when temperature decreases. We conjecture that the control of the temperature parameter " + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 46, + 448, + 287, + 591 + ], + "type": "text", + "content": " and the design of the regularization term require further study." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 592, + 287, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 592, + 287, + 652 + ], + "spans": [ + { + "bbox": [ + 46, + 592, + 287, + 652 + ], + "type": "text", + "content": "Inspired by previous research on low-bit width neural networks [23, 74], we adopt STE for Bernoulli Sampling (STEBS) to make the binary dynamic routing vector differentiable. An STE can be regarded as an operator that has arbitrary forward and backward operations. Formally," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 658, + 287, + 689 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 658, + 287, + 689 + ], + "spans": [ + { + "bbox": [ + 83, + 658, + 287, + 689 + ], + "type": "interline_equation", + "content": "\\tilde {w} _ {i} = \\min (\\beta \\times n \\times \\sigma (\\tilde {v} _ {i}) / \\sum_ {i} ^ {n} \\sigma (\\tilde {v} _ {i}), 1), \\tag {10}", + "image_path": "af004e1f2880123bc64fafadd53efaae15c23039cc9a5c4499ea31860d15c483.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 91, + 700, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 700, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 91, + 700, + 287, + 715 + ], + "type": "text", + "content": "STE Forward: " + }, + { + "bbox": [ + 91, + 700, + 287, + 715 + ], + "type": "inline_equation", + "content": "v_{i}\\sim" + }, + { + "bbox": [ + 91, + 700, + 287, + 715 + ], + "type": "text", + "content": " Bernoulli " + }, + { + "bbox": [ + 91, + 700, + 287, + 715 + ], + "type": "inline_equation", + "content": "(\\tilde{w}_i)" + }, + { + "bbox": [ + 91, + 700, + 287, + 715 + ], + "type": "text", + "content": " (11)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 365, + 282, + 545, + 307 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 282, + 545, + 307 + ], + "spans": [ + { + "bbox": [ + 365, + 282, + 545, + 307 + ], + "type": "interline_equation", + "content": "\\text {S T E B a c k w a r d}: \\frac {\\partial o}{\\partial \\tilde {w}} = \\frac {\\partial o}{\\partial v}, \\tag {12}", + "image_path": "22499b36045c3b590ce5da6e35e39a45a6efb99cb374999f0d9c2bb89be28be8.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "spans": [ + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "text", + "content": " is the Sigmoid function and we denote the objective function as " + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "inline_equation", + "content": "o" + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "text", + "content": ". We use the well-defined gradient " + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "inline_equation", + "content": "\\frac{\\partial o}{\\partial v}" + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "text", + "content": " as an approximation for " + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "inline_equation", + "content": "\\frac{\\partial o}{\\partial \\tilde{w}}" + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "text", + "content": " to construct the backward pass. In Eqn. (10), we normalize the sample rate. During training, " + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "text", + "content": " is fixed at 0.5. We can adjust the hyper-parameter " + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 304, + 309, + 547, + 384 + ], + "type": "text", + "content": " to control the complexity in the inference phase." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 389, + 440, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 389, + 440, + 402 + ], + "spans": [ + { + "bbox": [ + 306, + 389, + 440, + 402 + ], + "type": "text", + "content": "3.3. Implementation Details" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 407, + 545, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 545, + 433 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 545, + 433 + ], + "type": "text", + "content": "Loss function. Our training loss " + }, + { + "bbox": [ + 304, + 407, + 545, + 433 + ], + "type": "inline_equation", + "content": "L_{total}" + }, + { + "bbox": [ + 304, + 407, + 545, + 433 + ], + "type": "text", + "content": " is the sum of the reconstruction losses of outputs of each block " + }, + { + "bbox": [ + 304, + 407, + 545, + 433 + ], + "type": "inline_equation", + "content": "I_{t + 1}^{i}" + }, + { + "bbox": [ + 304, + 407, + 545, + 433 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 359, + 441, + 545, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 359, + 441, + 545, + 472 + ], + "spans": [ + { + "bbox": [ + 359, + 441, + 545, + 472 + ], + "type": "interline_equation", + "content": "L _ {t o t a l} = \\sum_ {i = 1} ^ {n} \\gamma^ {n - i} d \\left(\\tilde {I} _ {t + 1} ^ {i}, I _ {t + 1}\\right), \\tag {13}", + "image_path": "fe2234daf0a9b9980d9dc1f1abb8b16679868ec9ea61498ad519d6587915cb9a.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 479, + 545, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 479, + 545, + 516 + ], + "spans": [ + { + "bbox": [ + 304, + 479, + 545, + 516 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 304, + 479, + 545, + 516 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 304, + 479, + 545, + 516 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 304, + 479, + 545, + 516 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 304, + 479, + 545, + 516 + ], + "type": "text", + "content": " loss calculated on the Laplacian pyramid representations [42] extracted from each pair of images. And we set " + }, + { + "bbox": [ + 304, + 479, + 545, + 516 + ], + "type": "inline_equation", + "content": "\\gamma = 0.8" + }, + { + "bbox": [ + 304, + 479, + 545, + 516 + ], + "type": "text", + "content": " in our experiments following [54]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "spans": [ + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "text", + "content": "Training strategy. Our DMVFN is trained on " + }, + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "text", + "content": " image patches. The batch size is set as 64. We employ the AdamW optimizer [30, 36] with a weight decay of " + }, + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "text", + "content": ". We use a cosine annealing strategy to reduce the learning rate from " + }, + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "inline_equation", + "content": "10^{-5}" + }, + { + "bbox": [ + 304, + 529, + 547, + 601 + ], + "type": "text", + "content": ". Our model is trained on four 2080Ti GPUs for 300 epochs, which takes about 35 hours." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 306, + 612, + 388, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 612, + 388, + 625 + ], + "spans": [ + { + "bbox": [ + 306, + 612, + 388, + 625 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 631, + 419, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 631, + 419, + 643 + ], + "spans": [ + { + "bbox": [ + 306, + 631, + 419, + 643 + ], + "type": "text", + "content": "4.1. Dataset and Metric" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 306, + 649, + 526, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 649, + 526, + 662 + ], + "spans": [ + { + "bbox": [ + 306, + 649, + 526, + 662 + ], + "type": "text", + "content": "Dataset. We use several datasets in the experiments:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "content": "Cityscapes dataset [9] contains 3,475 driving videos with resolution of " + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "inline_equation", + "content": "2048 \\times 1024" + }, + { + "bbox": [ + 304, + 665, + 547, + 714 + ], + "type": "text", + "content": ". We use 2,945 videos for training (Cityscapes-Train) and 500 videos in Cityscapes dataset [9] for testing (Cityscapes-Test)." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6125" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 61, + 102, + 531, + 205 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 545, + 93 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 545, + 93 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 545, + 93 + ], + "type": "text", + "content": "Table 2. Quantitative results on the DAVIS17-Val [43] and Vimeo90K-Test [69] benchmarks. We denote DMVFN without routing as \"DMVFN (w/o r)\". \"N/A\" means not available." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 102, + 531, + 205 + ], + "lines": [ + { + "bbox": [ + 61, + 102, + 531, + 205 + ], + "spans": [ + { + "bbox": [ + 61, + 102, + 531, + 205 + ], + "type": "table", + "html": "
MethodUCF101-Train→DAVIS17-ValUCF101-Train→Vimeo90K-Test
GFLOPs ↓MS-SSIM (×10-2) ↑ t+1t+3LPIPS (×10-2) ↓ t+1t+3GFLOPs ↓MS-SSIM (×10-2) ↑ t+1LPIPS (×10-2) ↓ t+1
DVF [35]324.1568.6155.4723.2334.2289.6492.117.73
DYAN [34]130.1278.9670.4113.0921.43N/AN/AN/A
OPT [63]165312.8083.2673.8511.4018.2145716.2096.753.59
DMVFN (w/o r)19.3984.8175.059.4116.245.3697.243.30
DMVFN9.9683.9774.819.9617.282.7797.013.69
", + "image_path": "f62fb5504e40abb953a28ac64a1d0347ff9c59843dd4fa7d8f1be8caff7e2317.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 55, + 222, + 279, + 453 + ], + "blocks": [ + { + "bbox": [ + 55, + 222, + 279, + 453 + ], + "lines": [ + { + "bbox": [ + 55, + 222, + 279, + 453 + ], + "spans": [ + { + "bbox": [ + 55, + 222, + 279, + 453 + ], + "type": "image", + "image_path": "56c72b869fbd430a17843e2c62d63c580540d90393ef8845f5d27046fa2f5bb3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 462, + 287, + 539 + ], + "lines": [ + { + "bbox": [ + 46, + 462, + 287, + 539 + ], + "spans": [ + { + "bbox": [ + 46, + 462, + 287, + 539 + ], + "type": "text", + "content": "Figure 4. Prediction comparison on KITTI. The yellow line is aligned with the car in the ground truth. The results show that previous methods (DVF [35], FVS [62], and OPT [63]) cannot accurately predict the car's location in the long-term prediction. The motion predicted by our DMVFN is the most similar to the ground truth, while the errors of other methods grow larger with time. The fences predicted by DMVFN remain vertical when moving." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 561, + 287, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 561, + 287, + 609 + ], + "spans": [ + { + "bbox": [ + 46, + 561, + 287, + 609 + ], + "type": "text", + "content": "KITTI dataset [12] contains 28 driving videos with resolution of " + }, + { + "bbox": [ + 46, + 561, + 287, + 609 + ], + "type": "inline_equation", + "content": "375 \\times 1242" + }, + { + "bbox": [ + 46, + 561, + 287, + 609 + ], + "type": "text", + "content": ". 24 videos in KITTI dataset are used for training (KITTI-Train) and the remaining four videos in KITTI dataset are used for testing (KITTI-Test)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 613, + 287, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 613, + 287, + 650 + ], + "spans": [ + { + "bbox": [ + 46, + 613, + 287, + 650 + ], + "type": "text", + "content": "UCF101 [50] dataset contains 13,320 videos under 101 different action categories with resolution of " + }, + { + "bbox": [ + 46, + 613, + 287, + 650 + ], + "type": "inline_equation", + "content": "240\\times 320" + }, + { + "bbox": [ + 46, + 613, + 287, + 650 + ], + "type": "text", + "content": ". We only use the training subset of UCF101 [50]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": "Vimeo90K [69] dataset has 51,312 triplets for training, where each triplet contains three consecutive video frames with resolution of " + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "inline_equation", + "content": "256 \\times 448" + }, + { + "bbox": [ + 46, + 653, + 287, + 715 + ], + "type": "text", + "content": ". There are 3,782 triplets in the Vimeo90K testing set. We denote the training and testing subsets as Video-Train and Video-Test, respectively." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 224, + 545, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 224, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 305, + 224, + 545, + 274 + ], + "type": "text", + "content": "DAVIS17 [43] has videos with resolution around " + }, + { + "bbox": [ + 305, + 224, + 545, + 274 + ], + "type": "inline_equation", + "content": "854 \\times 480" + }, + { + "bbox": [ + 305, + 224, + 545, + 274 + ], + "type": "text", + "content": ". We use the DAVIS17-Val containing 30 videos as test set. Configurations. We have four experimental configurations following previous works [34, 35, 63]:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 285, + 470, + 338 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 317, + 285, + 470, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 285, + 470, + 297 + ], + "spans": [ + { + "bbox": [ + 317, + 285, + 470, + 297 + ], + "type": "text", + "content": "- Cityscapes-Train " + }, + { + "bbox": [ + 317, + 285, + 470, + 297 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 317, + 285, + 470, + 297 + ], + "type": "text", + "content": " Cityscapes-Test" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 318, + 299, + 436, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 299, + 436, + 310 + ], + "spans": [ + { + "bbox": [ + 318, + 299, + 436, + 310 + ], + "type": "text", + "content": "KITTI-Train " + }, + { + "bbox": [ + 318, + 299, + 436, + 310 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 318, + 299, + 436, + 310 + ], + "type": "text", + "content": " KITTI-Test" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 318, + 313, + 429, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 313, + 429, + 324 + ], + "spans": [ + { + "bbox": [ + 318, + 313, + 429, + 324 + ], + "type": "text", + "content": "UCF101→DAVIS17-Val" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 318, + 327, + 420, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 327, + 420, + 338 + ], + "spans": [ + { + "bbox": [ + 318, + 327, + 420, + 338 + ], + "type": "text", + "content": "UCF101 " + }, + { + "bbox": [ + 318, + 327, + 420, + 338 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 318, + 327, + 420, + 338 + ], + "type": "text", + "content": " Vimeo-Test" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "spans": [ + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "text", + "content": "Here, the left and right sides of the arrow represent the training set and the test set, respectively. For a fair comparison with other methods that are not tailored for high resolution videos, we follow the setting in [62] and resize the images in Cityscapes [9] to " + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "inline_equation", + "content": "1024 \\times 512" + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "text", + "content": " and images in KITTI [12] to " + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "inline_equation", + "content": "256 \\times 832" + }, + { + "bbox": [ + 304, + 350, + 545, + 506 + ], + "type": "text", + "content": ", respectively. During inference of Cityscapes [9] and KITTI [12], we predict the next five frames. We predict the next three frames for DAVIS17-Val [43] and next one frame for Video-Test [69], respectively. Note that OPT [63] is an optimization-based approach and uses pretrained RAFT [54] and RIFE [22] models. RIFE [22] and RAFT [54] are trained on the Video-Train dataset [69] and the Flying Chairs dataset [11], respectively." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 506, + 545, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 545, + 566 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 545, + 566 + ], + "type": "text", + "content": "Evaluation metrics. Following previous works [63], we use Multi-Scale Structural Similarity Index Measure (MSSSIM) [61] and a perceptual metric LPIPS [73] for quantitative evaluation. To measure the model complexity, we calculate the GFLOPs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 574, + 480, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 574, + 480, + 588 + ], + "spans": [ + { + "bbox": [ + 306, + 574, + 480, + 588 + ], + "type": "text", + "content": "4.2. Comparison to State-of-the-Arts" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 677 + ], + "type": "text", + "content": "We compare our DMVFN with state-of-the-art video prediction methods. These methods fall into two categories: the methods requiring only RGB images as input (e.g., PredNet [37], MCNET [58], DVF [35], CorrWise [13], OPT [63]) and the methods requiring extra information as input (e.g., Vid2vid [59], Seg2vid [41], FVS [62], SADM [2])." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "Quantitative results. The quantitative results are reported in Table 1 and Table 2. When calculating the GFLOPs of OPT [63], the number of iterations is set as 3,000. In" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6126" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 70, + 225, + 172 + ], + "blocks": [ + { + "bbox": [ + 53, + 70, + 225, + 172 + ], + "lines": [ + { + "bbox": [ + 53, + 70, + 225, + 172 + ], + "spans": [ + { + "bbox": [ + 53, + 70, + 225, + 172 + ], + "type": "image", + "image_path": "3f0733e866e576f4d0e58151b4572ae6af34dbdc8fb0b18269c865f2665ccaa7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 175, + 145, + 186 + ], + "lines": [ + { + "bbox": [ + 133, + 175, + 145, + 186 + ], + "spans": [ + { + "bbox": [ + 133, + 175, + 145, + 186 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 230, + 70, + 362, + 172 + ], + "blocks": [ + { + "bbox": [ + 230, + 70, + 362, + 172 + ], + "lines": [ + { + "bbox": [ + 230, + 70, + 362, + 172 + ], + "spans": [ + { + "bbox": [ + 230, + 70, + 362, + 172 + ], + "type": "image", + "image_path": "39725d0d7e1000ea5e1de7127b2b1616a228e7669411d738c8dd911c01cbc418.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 291, + 175, + 302, + 185 + ], + "lines": [ + { + "bbox": [ + 291, + 175, + 302, + 185 + ], + "spans": [ + { + "bbox": [ + 291, + 175, + 302, + 185 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 370, + 70, + 541, + 172 + ], + "blocks": [ + { + "bbox": [ + 370, + 70, + 541, + 172 + ], + "lines": [ + { + "bbox": [ + 370, + 70, + 541, + 172 + ], + "spans": [ + { + "bbox": [ + 370, + 70, + 541, + 172 + ], + "type": "image", + "image_path": "08080ea454f757a59be66a65cdc509558e3ac92d93123950f63248238175048b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 449, + 175, + 460, + 185 + ], + "lines": [ + { + "bbox": [ + 449, + 175, + 460, + 185 + ], + "spans": [ + { + "bbox": [ + 449, + 175, + 460, + 185 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 65, + 247, + 126, + 311 + ], + "blocks": [ + { + "bbox": [ + 46, + 195, + 547, + 229 + ], + "lines": [ + { + "bbox": [ + 46, + 195, + 547, + 229 + ], + "spans": [ + { + "bbox": [ + 46, + 195, + 547, + 229 + ], + "type": "text", + "content": "Figure 5. (a): Average usage rate on videos with different motion magnitudes. \"Fast\": tested on Videox-Fast. \"Medium\": tested on Vimeo-Medium. \"Slow\": tested on Videox-Slow. (b): Difference between \"Fast\"/\"Slow\" and \"Medium\" of (a). (c): Averaged usage rate on different time intervals between two input frames from Videox-Slow. \"Int\": time interval." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 65, + 247, + 126, + 311 + ], + "lines": [ + { + "bbox": [ + 65, + 247, + 126, + 311 + ], + "spans": [ + { + "bbox": [ + 65, + 247, + 126, + 311 + ], + "type": "image", + "image_path": "4c8a4c27bf3832b14061b616cb0ab93ca2ae743d6b999ccf8921122aa33b3b90.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 84, + 312, + 105, + 322 + ], + "lines": [ + { + "bbox": [ + 84, + 312, + 105, + 322 + ], + "spans": [ + { + "bbox": [ + 84, + 312, + 105, + 322 + ], + "type": "inline_equation", + "content": "t - 1" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 65, + 326, + 126, + 389 + ], + "blocks": [ + { + "bbox": [ + 65, + 326, + 126, + 389 + ], + "lines": [ + { + "bbox": [ + 65, + 326, + 126, + 389 + ], + "spans": [ + { + "bbox": [ + 65, + 326, + 126, + 389 + ], + "type": "image", + "image_path": "3d4ed97f0fc99b344c776ee05da6abd26ec989a8e76a6701f109f015efc2fb44.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 92, + 393, + 97, + 400 + ], + "lines": [ + { + "bbox": [ + 92, + 393, + 97, + 400 + ], + "spans": [ + { + "bbox": [ + 92, + 393, + 97, + 400 + ], + "type": "text", + "content": "t" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 46, + 409, + 287, + 441 + ], + "lines": [ + { + "bbox": [ + 46, + 409, + 287, + 441 + ], + "spans": [ + { + "bbox": [ + 46, + 409, + 287, + 441 + ], + "type": "text", + "content": "Figure 6. Visual effect comparison in the Viceo-Test [69] dataset. our DMVFN faithfully reproduces the motion of the hand and the head with less distortion and artifacts." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 137, + 258, + 271, + 320 + ], + "blocks": [ + { + "bbox": [ + 137, + 258, + 271, + 320 + ], + "lines": [ + { + "bbox": [ + 137, + 258, + 271, + 320 + ], + "spans": [ + { + "bbox": [ + 137, + 258, + 271, + 320 + ], + "type": "image", + "image_path": "3a68218ddcd92346e21efec19d1a79797112fb3802e836176981175cf5eb873d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 137, + 319, + 271, + 382 + ], + "blocks": [ + { + "bbox": [ + 137, + 319, + 271, + 382 + ], + "lines": [ + { + "bbox": [ + 137, + 319, + 271, + 382 + ], + "spans": [ + { + "bbox": [ + 137, + 319, + 271, + 382 + ], + "type": "image", + "image_path": "737a7e019ba2a9232a98e72cc2e09a2628da9a7eaa613f08940afc7d6fb84fa1.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 171, + 385, + 252, + 396 + ], + "lines": [ + { + "bbox": [ + 171, + 385, + 252, + 396 + ], + "spans": [ + { + "bbox": [ + 171, + 385, + 252, + 396 + ], + "type": "inline_equation", + "content": "t + 1" + }, + { + "bbox": [ + 171, + 385, + 252, + 396 + ], + "type": "inline_equation", + "content": "t + 3" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 451, + 287, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 451, + 287, + 582 + ], + "spans": [ + { + "bbox": [ + 46, + 451, + 287, + 582 + ], + "type": "text", + "content": "terms of MS-SSIM and LPIPS, our DMVFN achieves much better results than the other methods in both short-term and long-term video prediction tasks. The GFLOPs of our DMVFN is considerably smaller than the comparison methods. These results show the proposed routing strategy reduces almost half the number of GFLOPs while maintaining comparable performance. Because the decrease of GFLOPs is not strictly linear with the actual latency [45], we measure the running speed on Titan 2080Ti. For predicting a 720P frame, DVF [35] spends 0.130s on average, while our DMVFN only needs 0.023s on average." + } + ] + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 50, + 604, + 284, + 657 + ], + "blocks": [ + { + "bbox": [ + 64, + 586, + 268, + 597 + ], + "lines": [ + { + "bbox": [ + 64, + 586, + 268, + 597 + ], + "spans": [ + { + "bbox": [ + 64, + 586, + 268, + 597 + ], + "type": "text", + "content": "Table 3. Comparison between DMVFN and STRPM." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 604, + 284, + 657 + ], + "lines": [ + { + "bbox": [ + 50, + 604, + 284, + 657 + ], + "spans": [ + { + "bbox": [ + 50, + 604, + 284, + 657 + ], + "type": "table", + "html": "
MethodUCF SportsHuman3.6M
t+1t+6t+1t+4
PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓PSNR↑ / LPIPS↓
STRPM28.54 / 20.6920.59 / 41.1133.32 / 9.7429.01 / 10.44
DMVFN30.05 / 10.2422.67 / 22.5035.07 / 7.4829.56 / 9.74
", + "image_path": "e6f0f564d22f3d724ca7a29a9ef72a6a0b79521cefbb962a5b128719ac62bcf9.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 46, + 666, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 287, + 714 + ], + "type": "text", + "content": "More comparison. The quantitative results compared with STRPM [8] are reported in Table 3. We train our DMVFN in UCFSports and Human3.6M datasets following the setting in [8]. We also measure the average running speed" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 249, + 545, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 249, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 305, + 249, + 545, + 274 + ], + "type": "text", + "content": "on TITAN 2080Ti. To predict a " + }, + { + "bbox": [ + 305, + 249, + 545, + 274 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 305, + 249, + 545, + 274 + ], + "type": "text", + "content": " frame, our DMVFN is averagely " + }, + { + "bbox": [ + 305, + 249, + 545, + 274 + ], + "type": "inline_equation", + "content": "4.06 \\times" + }, + { + "bbox": [ + 305, + 249, + 545, + 274 + ], + "type": "text", + "content": " faster than STRPM [8]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 274, + 545, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 274, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 305, + 274, + 545, + 346 + ], + "type": "text", + "content": "Qualitative results on different datasets are shown in Figure 3, Figure 4 and Figure 6. As we can see, the frames predicted by our DMVFN exhibit better temporal continuity and are more consistent with the ground truth than those by the other methods. Our DMVFN is able to predict correct motion while preserving the shape and texture of objects." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 306, + 357, + 399, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 357, + 399, + 369 + ], + "spans": [ + { + "bbox": [ + 306, + 357, + 399, + 369 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 376, + 545, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 376, + 545, + 424 + ], + "spans": [ + { + "bbox": [ + 305, + 376, + 545, + 424 + ], + "type": "text", + "content": "Here, we perform extensive ablation studies to further study the effectiveness of components in our DMVFN. The experiments are performed on the Cityscapes [9] and KITTI [12] datasets unless otherwise specified." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 426, + 546, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 426, + 546, + 593 + ], + "spans": [ + { + "bbox": [ + 305, + 426, + 546, + 593 + ], + "type": "text", + "content": "1) How effective is the proposed Routing Module? As suggested in [65, 66], we divide the Vimeo-90K [69] test set into three subsets: Vimeo-Fast, Vimeo-Medium, and Vimeo-Slow, which correspond to the motion range. To verify that our DMVFN can perceive motion scales and adaptively choose the proper sub-networks, we retrain our DMVFN on the Vimeo-Train [69] using the same training strategy in §3.3. We calculate the averaged usage rate of each MVFB on three test subsets. From Figures 5 (a) and 5 (b), we observe that our DMVFN prefers to select MVFBs with large scale (e.g., 4x) for two frames with large motion. There are two MVFBs with clearly smaller selection probability. We believe this reflects the inductive bias of our DMVFN on different combinations of scaling factors." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": "To further verify that our DMVFN also perceives the size of the time interval, we test our DMVFN on the two frames with different time intervals (but still in the same video). We choose Vimeo-Slow as the test set, and set the time intervals as 1, 3, and 5. The results are shown in Figure 5 " + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 304, + 594, + 545, + 713 + ], + "type": "text", + "content": ". We observe that our DMVFN prefers large-scale blocks on long-interval inputs, and small-scale blocks on short-interval inputs. This verifies that our DMVFN can perceive temporal information and dynamically select different sub-networks to handle the input frames with different time intervals." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6127" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 288, + 167 + ], + "type": "text", + "content": "To further study how the MVFBs are selected, we select 103 video sequences (contain a high-speed moving car and a relatively static background) from the KITTI dataset, denoted as KITTI-A. As shown in Table 4, on the KITTI-A dataset, our DMVFN prefers to choose MVFBs with large scaling factors to capture large movements. The flow estimation for static backgrounds is straightforward, while the large motion dominates the choice of our DMVFN." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 52, + 191, + 285, + 218 + ], + "blocks": [ + { + "bbox": [ + 47, + 171, + 286, + 182 + ], + "lines": [ + { + "bbox": [ + 47, + 171, + 286, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 171, + 286, + 182 + ], + "type": "text", + "content": "Table 4. Average usage rate " + }, + { + "bbox": [ + 47, + 171, + 286, + 182 + ], + "type": "inline_equation", + "content": "\\left( {10}^{-2}\\right)" + }, + { + "bbox": [ + 47, + 171, + 286, + 182 + ], + "type": "text", + "content": " of MVFBs in our DMVFN." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 191, + 285, + 218 + ], + "lines": [ + { + "bbox": [ + 52, + 191, + 285, + 218 + ], + "spans": [ + { + "bbox": [ + 52, + 191, + 285, + 218 + ], + "type": "table", + "html": "
Scale444222111
KITTI-A80.9534.2226.7081.1973.9144.9055.340.490
", + "image_path": "fb64041f5933989dfeb58b006b0d642139c7e7041f25764be09b4830030e7206.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 50, + 268, + 285, + 353 + ], + "blocks": [ + { + "bbox": [ + 46, + 236, + 286, + 258 + ], + "lines": [ + { + "bbox": [ + 46, + 236, + 286, + 258 + ], + "spans": [ + { + "bbox": [ + 46, + 236, + 286, + 258 + ], + "type": "text", + "content": "Table 5. Routing Module based on STEBS is effective. The evaluation metric is MS-SSIM " + }, + { + "bbox": [ + 46, + 236, + 286, + 258 + ], + "type": "inline_equation", + "content": "(\\times 10^{-2})" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 268, + 285, + 353 + ], + "lines": [ + { + "bbox": [ + 50, + 268, + 285, + 353 + ], + "spans": [ + { + "bbox": [ + 50, + 268, + 285, + 353 + ], + "type": "table", + "html": "
SettingCityscapesKITTI
t+1t+3t+5t+1t+3t+5
Copy last frame76.9568.8264.4558.3148.9944.16
w/o routing95.2987.9181.4888.0676.5368.29
Random91.9782.1170.0581.3169.8962.42
Gumbel Softmax95.0587.5779.5487.4275.5665.83
STEBS95.7389.2483.4588.5378.0170.52
", + "image_path": "2eb77f5f091e373a8296da3d5f90aa01ebe24e7f001a4f7d192d020bd14e8d62.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 364, + 287, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 364, + 287, + 495 + ], + "spans": [ + { + "bbox": [ + 46, + 364, + 287, + 495 + ], + "type": "text", + "content": "2) How to design the Routing Module? A trivial solution is to process the routing probability " + }, + { + "bbox": [ + 46, + 364, + 287, + 495 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 46, + 364, + 287, + 495 + ], + "type": "text", + "content": " with Gumbel Softmax. The comparison results of our DMVFNs with different differentiable routing methods are summarized in Table 5. Our DMVFN with STEBS outperforms the DMVFN variant with Gumbel Softmax on MS-SSIM, especially for long-term prediction. The DMVFN variant with Gumbel Softmax usually degenerates to a fixed and static structure. We also compare with the DMVFN randomly selecting each MVFB with probability 0.5 (denoted as \"Random\") and that without routing module (denoted as \"w/o routing\")." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 50, + 536, + 285, + 606 + ], + "blocks": [ + { + "bbox": [ + 46, + 504, + 286, + 526 + ], + "lines": [ + { + "bbox": [ + 46, + 504, + 286, + 526 + ], + "spans": [ + { + "bbox": [ + 46, + 504, + 286, + 526 + ], + "type": "text", + "content": "Table 6. Results of our DMVFN with different scaling factor settings. The evaluation metric is MS-SSIM " + }, + { + "bbox": [ + 46, + 504, + 286, + 526 + ], + "type": "inline_equation", + "content": "(\\times 10^{-2})" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 536, + 285, + 606 + ], + "lines": [ + { + "bbox": [ + 50, + 536, + 285, + 606 + ], + "spans": [ + { + "bbox": [ + 50, + 536, + 285, + 606 + ], + "type": "table", + "html": "
Setting in DMVFNCityscapesKITTI
t+1t+3t+5t+1t+3t+5
[1]94.7087.2680.9387.6476.7168.76
[2,1]95.3087.9382.0287.9777.2369.58
[4,2,1]95.7389.2483.4588.5378.0170.52
", + "image_path": "c76042f224820397a6cd8bf19de79107ade7f3193736d170c038df008cfe74e5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 617, + 286, + 713 + ], + "type": "text", + "content": "3) How to set the scaling factors? We evaluate our DMVFN with different scaling factors. We use three non-increasing factor sequences of “[1, 1, 1, 1, 1, 1, 1, 1, 1]”, “[2, 2, 2, 2, 2, 1, 1, 1, 1]” and “[4, 4, 4, 2, 2, 2, 1, 1, 1]”, denoted as “[1]”, “[2, 1]” and “[4, 2, 1]”, respectively. The results are listed in Table 6. Our DMVFN with “[4, 2, 1]” performs better than that with “[2, 1]” and “[1]”. The gap is more obvious on longer-term future frames." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 309, + 102, + 544, + 180 + ], + "blocks": [ + { + "bbox": [ + 306, + 71, + 545, + 92 + ], + "lines": [ + { + "bbox": [ + 306, + 71, + 545, + 92 + ], + "spans": [ + { + "bbox": [ + 306, + 71, + 545, + 92 + ], + "type": "text", + "content": "Table 7. Spatial path is effective in our DMVFN. The evaluation metric is MS-SSIM " + }, + { + "bbox": [ + 306, + 71, + 545, + 92 + ], + "type": "inline_equation", + "content": "(\\times 10^{-2})" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 102, + 544, + 180 + ], + "lines": [ + { + "bbox": [ + 309, + 102, + 544, + 180 + ], + "spans": [ + { + "bbox": [ + 309, + 102, + 544, + 180 + ], + "type": "table", + "html": "
SettingCityscapesKITTI
t+1t+3t+5t+1t+3t+5
w/o r, w/o path94.9987.5980.9887.7576.2267.86
w/o r95.2987.9181.4888.0676.5368.29
w/o path95.5588.8983.0388.2977.5369.86
DMVFN95.7389.2483.4588.5378.0170.52
", + "image_path": "4f3ecade755763a7255e4d18f696f7d3556e94ec11563de0af4341b04971be4d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 200, + 545, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 200, + 545, + 272 + ], + "spans": [ + { + "bbox": [ + 304, + 200, + 545, + 272 + ], + "type": "text", + "content": "4) How effective is the spatial path? To verify the effectiveness of the spatial path in our DMVFN, we compare it with the DMVFN without spatial path (denoted as \"w/o path\"). The results listed in Table 7 show our DMVFN enjoys better performance with the spatial path, no matter with or without the routing module (denoted as \"w/o r\")." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 283, + 378, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 283, + 378, + 295 + ], + "spans": [ + { + "bbox": [ + 306, + 283, + 378, + 295 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 303, + 545, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 303, + 545, + 482 + ], + "spans": [ + { + "bbox": [ + 304, + 303, + 545, + 482 + ], + "type": "text", + "content": "In this work, we developed an efficient Dynamic Multiscale Voxel Flow Network (DMVFN) that excels previous video prediction methods on dealing with complex motions of different scales. With the proposed routing module, our DMVFN adaptively activates different sub-networks based on the input frames, improving the prediction performance while reducing the computation costs. Experiments on diverse benchmark datasets demonstrated that our DMVFN achieves state-of-the-art performance with greatly reduced computation burden. We believe our DMVFN can provide general insights for long-term prediction, video frame synthesis, and representation learning [14, 15]. We hope our DMVFN will inspire further research in light-weight video processing and make video prediction more accessible for downstream tasks such as CODEC for streaming video." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 483, + 545, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 483, + 545, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 483, + 545, + 649 + ], + "type": "text", + "content": "Our DMVFN can be improved at several aspects. Firstly, iteratively predicting future frames suffers from accumulate errors. This issue may be addressed by further bringing explicit temporal modeling [22, 31, 66, 68] to our DMVFN. Secondly, our DMVFN simply selects the nodes in a chain network topology, which can be improved by exploring more complex topology. For example, our routing module can be extended to automatically determine the scaling factors for parallel branches [33]. Thirdly, forecast uncertainty modeling is more of an extrapolation abiding to past flow information, especially considering bifurcation, which exceeds the current capability of our DMVFN. We believe that research on long-term forecast uncertainty may uncover deeper interplay with dynamic modeling methods [1, 14]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 649, + 545, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 649, + 545, + 709 + ], + "spans": [ + { + "bbox": [ + 304, + 649, + 545, + 709 + ], + "type": "text", + "content": "Acknowledgements. We sincerely thank Wen Heng for his exploration on neural architecture search at Megvii Research and Tianyuan Zhang for meaningful suggestions. This work is supported in part by the National Natural Science Foundation of China (No. 62002176 and 62176068)." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6128" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 288, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 123 + ], + "type": "text", + "content": "[1] Adil Kaan Akan, Erkut Erdem, Aykut Erdem, and Fatma Güney. Slamp: Stochastic latent appearance and motion prediction. In ICCV, 2021. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 124, + 288, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 124, + 288, + 158 + ], + "spans": [ + { + "bbox": [ + 53, + 124, + 288, + 158 + ], + "type": "text", + "content": "[2] Xinzhu Bei, Yanchao Yang, and Stefano Soatto. Learning semantic-aware dynamics for video prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2021. 2, 5, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 158, + 288, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 158, + 288, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 288, + 201 + ], + "type": "text", + "content": "[3] Yoshua Bengio, Nicholas Léonard, and Aaron Courville. Estimating or propagating gradients through stochastic neurons for conditional computation. arXiv preprint arXiv:1308.3432, 2013. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 288, + 235 + ], + "type": "text", + "content": "[4] Tolga Bolukbasi, Joseph Wang, Ofer Dekel, and Venkatesh Saligrama. Adaptive neural networks for efficient inference. In Inf. Conf. Mach. Learn., 2017. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 236, + 288, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 288, + 269 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 288, + 269 + ], + "type": "text", + "content": "[5] Wonmin Byeon, Qin Wang, Rupesh Kumar Srivastava, and Petros Koumoutsakos. Contextvp: Fully context-aware video prediction. In Eur. Conf. Comput. Vis., 2018. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 269, + 288, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 269, + 288, + 302 + ], + "spans": [ + { + "bbox": [ + 53, + 269, + 288, + 302 + ], + "type": "text", + "content": "[6] Lluis Castrejon, Nicolas Ballas, and Aaron Courville. Improved conditional vrnns for video prediction. In Int. Conf. Comput. Vis., 2019. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 302, + 288, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 288, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 288, + 346 + ], + "type": "text", + "content": "[7] Rohan Chandra, Uttaran Bhattacharya, Aniket Bera, and Dinesh Manocha. Traphic: Trajectory prediction in dense and heterogeneous traffic using weighted interactions. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 346, + 288, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 346, + 288, + 391 + ], + "spans": [ + { + "bbox": [ + 53, + 346, + 288, + 391 + ], + "type": "text", + "content": "[8] Zheng Chang, Xinfeng Zhang, Shanshe Wang, Siwei Ma, and Wen Gao. Strpm: A spatiotemporal residual predictive model for high-resolution video prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 392, + 288, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 392, + 288, + 446 + ], + "spans": [ + { + "bbox": [ + 53, + 392, + 288, + 446 + ], + "type": "text", + "content": "[9] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In IEEE Conf. Comput. Vis. Pattern Recog., 2016. 1, 2, 5, 6, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 447, + 288, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 447, + 288, + 479 + ], + "spans": [ + { + "bbox": [ + 48, + 447, + 288, + 479 + ], + "type": "text", + "content": "[10] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Int. Conf. Comput. Vis., 2017. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 480, + 288, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 480, + 288, + 534 + ], + "spans": [ + { + "bbox": [ + 48, + 480, + 288, + 534 + ], + "type": "text", + "content": "[11] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In Int. Conf. Comput. Vis., 2015. 2, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 535, + 288, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 535, + 288, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 535, + 288, + 567 + ], + "type": "text", + "content": "[12] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. I. J. Robotics Res., 2013. 2, 5, 6, 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 568, + 288, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 288, + 612 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 288, + 612 + ], + "type": "text", + "content": "[13] Daniel Geng, Max Hamilton, and Andrew Owens. Comparing correspondences: Video prediction with correspondence-wise losses. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 5, 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 613, + 288, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 613, + 288, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 613, + 288, + 635 + ], + "type": "text", + "content": "[14] David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018. 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 636, + 288, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 636, + 288, + 669 + ], + "spans": [ + { + "bbox": [ + 48, + 636, + 288, + 669 + ], + "type": "text", + "content": "[15] Danijar Hafner, Jurgis Pasukonis, Jimmy Ba, and Timothy Lillicrap. Mastering diverse domains through world models. arXiv preprint arXiv:2301.04104, 2023. 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 288, + 712 + ], + "type": "text", + "content": "[16] Yizeng Han, Gao Huang, Shiji Song, Le Yang, Honghui Wang, and Yulin Wang. Dynamic neural networks: A survey. In IEEE Trans. Pattern Anal. Mach. Intell., 2021. 2, 4" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 714 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 116 + ], + "type": "text", + "content": "[17] Yunhui Han, Kunming Luo, Ao Luo, Jiangyu Liu, Haoqiang Fan, Guiming Luo, and Shuaicheng Liu. Realflow: Embased realistic optical flow dataset generation from videos. In ECCV, 2022. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 545, + 161 + ], + "type": "text", + "content": "[18] Adam W Harley, Konstantinos G Derpanis, and Iasonas Kokkinos. Segmentation-aware convolutional networks using local attention masks. In Int. Conf. Comput. Vis., 2017. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 162, + 545, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 162, + 545, + 185 + ], + "spans": [ + { + "bbox": [ + 307, + 162, + 545, + 185 + ], + "type": "text", + "content": "[19] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. In Neural Comput., 1997. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 185, + 545, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 185, + 545, + 229 + ], + "spans": [ + { + "bbox": [ + 307, + 185, + 545, + 229 + ], + "type": "text", + "content": "[20] Xiaotao Hu, Jun Xu, Shuhang Gu, Ming-Ming Cheng, and Li Liu. Restore globally, refine locally: A mask-guided scheme to accelerate super-resolution networks. In Eur. Conf. Comput. Vis., 2022. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 230, + 545, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 545, + 274 + ], + "type": "text", + "content": "[21] Zhaoyang Huang, Xiaoyu Shi, Chao Zhang, Qiang Wang, Ka Chun Cheung, Hongwei Qin, Jifeng Dai, and Hongsheng Li. Flowformer: A transformer architecture for optical flow. In Eur. Conf. Comput. Vis., 2022. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 275, + 545, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 275, + 545, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 275, + 545, + 319 + ], + "type": "text", + "content": "[22] Zhewei Huang, Tianyuan Zhang, Wen Heng, Boxin Shi, and Shuchang Zhou. Real-time intermediate flow estimation for video frame interpolation. In Eur. Conf. Comput. Vis., 2022, 1, 2, 3, 6, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 319, + 545, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 319, + 545, + 364 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 545, + 364 + ], + "type": "text", + "content": "[23] Itay Hubara, Matthieu Courbariaux, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. Quantized neural networks: Training neural networks with low precision weights and activations. In J. Mach. Learn. Res., 2017. 5" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 365, + 545, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 365, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 365, + 545, + 398 + ], + "type": "text", + "content": "[24] Ryan Humble, Maying Shen, Jorge Albericio Latorre, Eric Darve, and Jose Alvarez. Soft masking for cost-constrained channel pruning. In Eur. Conf. Comput. Vis., 2022. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 399, + 545, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 443 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 443 + ], + "type": "text", + "content": "[25] Eddy Ilg, Nikolaus Mayer, Tonmoy Saikia, Margret Keuper, Alexey Dosovitskiy, and Thomas Brox. Flownet 2.0: Evolution of optical flow estimation with deep networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 444, + 545, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 444, + 545, + 477 + ], + "spans": [ + { + "bbox": [ + 307, + 444, + 545, + 477 + ], + "type": "text", + "content": "[26] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. In Adv. Neural Inform. Process. Syst., 2015. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 478, + 545, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 478, + 545, + 510 + ], + "spans": [ + { + "bbox": [ + 307, + 478, + 545, + 510 + ], + "type": "text", + "content": "[27] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. In Int. Conf. Learn. Represent., 2017. 5" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 511, + 545, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 511, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 307, + 511, + 545, + 567 + ], + "type": "text", + "content": "[28] Huaizu Jiang, Deqing Sun, Varun Jampani, Ming-Hsuan Yang, Erik Learned-Miller, and Jan Kautz. Super slomo: High quality estimation of multiple intermediate frames for video interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 567, + 545, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 567, + 545, + 600 + ], + "spans": [ + { + "bbox": [ + 307, + 567, + 545, + 600 + ], + "type": "text", + "content": "[29] Rico Jonschkowski, Austin Stone, Jonathan T Barron, Ariel Gordon, Kurt Konolige, and Anelia Angelova. What matters in unsupervised optical flow. In ECCV, 2020. 2" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 601, + 545, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 601, + 545, + 633 + ], + "spans": [ + { + "bbox": [ + 307, + 601, + 545, + 633 + ], + "type": "text", + "content": "[30] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Int. Conf. Learn. Represent., 2015. 5" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 635, + 545, + 689 + ], + "type": "text", + "content": "[31] Lingtong Kong, Boyuan Jiang, Donghao Luo, Wenqing Chu, Xiaoming Huang, Ying Tai, Chengjie Wang, and Jie Yang. Ifrnet: Intermediate feature refine network for efficient frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 8" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 690, + 545, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 690, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 690, + 545, + 714 + ], + "type": "text", + "content": "[32] Wonkwang Lee, Whie Jung, Han Zhang, Ting Chen, Jing Yu Koh, Thomas Huang, Hyungsuk Yoon, Honglak Lee, and" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "6129" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "spans": [ + { + "bbox": [ + 66, + 72, + 287, + 106 + ], + "type": "text", + "content": "Seunghoon Hong. Revisiting hierarchical approach for persistent long-term video prediction. In Int. Conf. Learn. Represent., 2021. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 107, + 287, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 107, + 287, + 139 + ], + "spans": [ + { + "bbox": [ + 48, + 107, + 287, + 139 + ], + "type": "text", + "content": "[33] Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. In Int. Conf. Learn. Represent., 2019. 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 174 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 174 + ], + "type": "text", + "content": "[34] Wenqian Liu, Abhishek Sharma, Octavia Camps, and Mario Sznaier. Dyan: A dynamical atoms-based network for video prediction. In Eur. Conf. Comput. Vis., 2018. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 175, + 287, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 287, + 207 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 287, + 207 + ], + "type": "text", + "content": "[35] Ziwei Liu, Raymond A Yeh, Xiaou Tang, Yiming Liu, and Aseem Agarwala. Video frame synthesis using deep voxel flow. In Int. Conf. Comput. Vis., 2017. 1, 2, 3, 4, 5, 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 209, + 287, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 209, + 287, + 239 + ], + "spans": [ + { + "bbox": [ + 48, + 209, + 287, + 239 + ], + "type": "text", + "content": "[36] Ilya Loshchilov and F. Hutter. Fixing weight decay regularization in adam. arXiv preprint arXiv:1711.05101, 2017. 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 243, + 287, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 243, + 287, + 284 + ], + "spans": [ + { + "bbox": [ + 48, + 243, + 287, + 284 + ], + "type": "text", + "content": "[37] William Lotter, Gabriel Kreiman, and David Cox. Deep predictive coding networks for video prediction and unsupervised learning. In Int. Conf. Learn. Represent., 2017. 1, 2, 5, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 287, + 287, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 287, + 287, + 319 + ], + "spans": [ + { + "bbox": [ + 48, + 287, + 287, + 319 + ], + "type": "text", + "content": "[38] Kunming Luo, Chuan Wang, Shuaicheng Liu, Haoqiang Fan, Jue Wang, and Jian Sun. Upflow: Upsampling pyramid for unsupervised optical flow learning. In CVPR, 2021. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 321, + 287, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 321, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 48, + 321, + 287, + 354 + ], + "type": "text", + "content": "[39] Julieta Martinez, Michael J Black, and Javier Romero. On human motion prediction using recurrent neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 355, + 287, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 355, + 287, + 408 + ], + "spans": [ + { + "bbox": [ + 48, + 355, + 287, + 408 + ], + "type": "text", + "content": "[40] Sergiu Oprea, Pablo Martinez-Gonzalez, Alberto Garcia-Garcia, John Alejandro Castro-Vargas, Sergio Orts-Escolano, Jose Garcia-Rodriguez, and Antonis Argyros. A review on deep learning techniques for video prediction. In IEEE Trans. Pattern Anal. Mach. Intell., 2020. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 411, + 287, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 411, + 287, + 453 + ], + "spans": [ + { + "bbox": [ + 48, + 411, + 287, + 453 + ], + "type": "text", + "content": "[41] Junting Pan, Chengyu Wang, Xu Jia, Jing Shao, Lu Sheng, Junjie Yan, and Xiaogang Wang. Video generation from single semantic label map. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 1, 2, 5, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 456, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 456, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 48, + 456, + 287, + 487 + ], + "type": "text", + "content": "[42] Sylvain Paris, Samuel W Hasinoff, and Jan Kautz. Local laplacian filters: edge-aware image processing with a laplacian pyramid. ACM Trans. Graph., 2011. 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 489, + 287, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 489, + 287, + 532 + ], + "spans": [ + { + "bbox": [ + 48, + 489, + 287, + 532 + ], + "type": "text", + "content": "[43] Jordi Pont-Tuset, Federico Perazzi, Sergi Caelles, Pablo Arbeláez, Alex Sorkine-Hornung, and Luc Van Gool. The 2017 davis challenge on video object segmentation. arXiv preprint arXiv:1704.00675, 2017. 2, 4, 6" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 534, + 287, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 534, + 287, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 534, + 287, + 567 + ], + "type": "text", + "content": "[44] Xiaojuan Qi, Zhengzhe Liu, Qifeng Chen, and Jiaya Jia. 3d motion decomposition for rgbd future dynamic scene synthesis. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 568, + 287, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 568, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 48, + 568, + 287, + 601 + ], + "type": "text", + "content": "[45] Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, and Piotr Dólár. Designing network design spaces. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 7" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 602, + 287, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 602, + 287, + 634 + ], + "spans": [ + { + "bbox": [ + 48, + 602, + 287, + 634 + ], + "type": "text", + "content": "[46] Anurag Ranjan and Michael J Black. Optical flow estimation using a spatial pyramid network. In IEEE Conf. Comput. Vis. Pattern Recog., 2017. 2, 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 636, + 287, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 636, + 287, + 668 + ], + "spans": [ + { + "bbox": [ + 48, + 636, + 287, + 668 + ], + "type": "text", + "content": "[47] Mengye Ren, Andrei Pokrovsky, Bin Yang, and Raquel Urtasun. Sbnet: Sparse blocks network for fast inference. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 287, + 712 + ], + "type": "text", + "content": "[48] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-Kin Wong, and Wang-chun Woo. Convolutional LSTM network: A machine learning approach for precipitation nowcasting. In Adv. Neural Inform. Process. Syst., 2015. 1" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 105 + ], + "type": "text", + "content": "[49] Hyeonjun Sim, Jihyong Oh, and Munchurl Kim. Xvfi: extreme video frame interpolation. In Int. Conf. Comput. Vis., 2021. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 107, + 545, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 107, + 545, + 139 + ], + "spans": [ + { + "bbox": [ + 307, + 107, + 545, + 139 + ], + "type": "text", + "content": "[50] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. A dataset of 101 human action classes from videos in the wild. *Cent. Res. Comput. Vis.*, 2012. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 141, + 545, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 141, + 545, + 183 + ], + "spans": [ + { + "bbox": [ + 307, + 141, + 545, + 183 + ], + "type": "text", + "content": "[51] Hang Su, Varun Jampani, Deqing Sun, Orazio Gallo, Erik Learned-Miller, and Jan Kautz. Pixel-adaptive convolutional neural networks. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 186, + 545, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 545, + 227 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 545, + 227 + ], + "type": "text", + "content": "[52] Yu-Chuan Su and Kristen Grauman. Leaving some stones unturned: dynamic feature prioritization for activity detection in streaming video. In *Eur. Conf. Comput. Vis.*, 2016. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 230, + 545, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 230, + 545, + 273 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 545, + 273 + ], + "type": "text", + "content": "[53] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. Pwc-net: Cnns for optical flow using pyramid, warping, and cost volume. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 2, 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 275, + 545, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 275, + 545, + 307 + ], + "spans": [ + { + "bbox": [ + 307, + 275, + 545, + 307 + ], + "type": "text", + "content": "[54] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Eur. Conf. Comput. Vis., 2020, 1, 2, 5, 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 309, + 545, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 309, + 545, + 351 + ], + "spans": [ + { + "bbox": [ + 307, + 309, + 545, + 351 + ], + "type": "text", + "content": "[55] Surat Teerapittayanon, Bradley McDanel, and Hsiang-Tsung Kung. Branchynet: Fast inference via early exiting from deep neural networks. In Int. Conf. Pattern Recog., 2016. 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 354, + 545, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 354, + 545, + 385 + ], + "spans": [ + { + "bbox": [ + 307, + 354, + 545, + 385 + ], + "type": "text", + "content": "[56] Andreas Veit and Serge Belongie. Convolutional networks with adaptive inference graphs. In Eur. Conf. Comput. Vis., 2018. 2" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 388, + 545, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 545, + 420 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 545, + 420 + ], + "type": "text", + "content": "[57] Thomas Verelst and Tinne Tuytelaars. Dynamic convolutions: Exploiting spatial sparsity for faster inference. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 422, + 545, + 464 + ], + "type": "text", + "content": "[58] Ruben Villegas, Jimei Yang, Seunghoon Hong, Xunyu Lin, and Honglak Lee. Decomposing motion and content for natural video sequence prediction. In Int. Conf. Learn. Represent., 2017. 1, 2, 5, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 467, + 545, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 467, + 545, + 509 + ], + "spans": [ + { + "bbox": [ + 307, + 467, + 545, + 509 + ], + "type": "text", + "content": "[59] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Guilin Liu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. Video-to-video synthesis. In Adv. Neural Inform. Process. Syst., 2018. 1, 2, 5, 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 511, + 545, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 511, + 545, + 544 + ], + "spans": [ + { + "bbox": [ + 307, + 511, + 545, + 544 + ], + "type": "text", + "content": "[60] Xin Wang, Fisher Yu, Zi-Yi Dou, Trevor Darrell, and Joseph E Gonzalez. Skipnet: Learning dynamic routing in convolutional networks. In Eur. Conf. Comput. Vis., 2018. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 545, + 545, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 545, + 545, + 578 + ], + "spans": [ + { + "bbox": [ + 307, + 545, + 545, + 578 + ], + "type": "text", + "content": "[61] Zhou Wang, Eero P Simoncelli, and Alan C Bovik. Multiscale structural similarity for image quality assessment. In Asilomar Conf. Signals Syst. Comput., 2003. 6" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 579, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 579, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 307, + 579, + 545, + 611 + ], + "type": "text", + "content": "[62] Yue Wu, Rongrong Gao, Jaesik Park, and Qifeng Chen. Future video synthesis with object motion prediction. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 1, 2, 5, 6" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 613, + 545, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 644 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 644 + ], + "type": "text", + "content": "[63] Yue Wu, Qiang Wen, and Qifeng Chen. Optimizing video prediction via video frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 1, 3, 5, 6" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 307, + 647, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 689 + ], + "type": "text", + "content": "[64] Zuxuan Wu, Caiming Xiong, Chih-Yao Ma, Richard Socher, and Larry S Davis. Adaframe: Adaptive frame selection for fast video recognition. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 691, + 545, + 712 + ], + "type": "text", + "content": "[65] Xiaoyu Xiang, Yapeng Tian, Yulun Zhang, Yun Fu, Jan P Allebach, and Chenliang Xu. Zooming slow-mo: Fast and" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "6130" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 73, + 287, + 600 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 73, + 286, + 95 + ], + "type": "text", + "content": "accurate one-stage space-time video super-resolution. In IEEE Conf. Comput. Vis. Pattern Recog., 2020. 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[66] Gang Xu, Jun Xu, Zhen Li, Liang Wang, Xing Sun, and Ming-Ming Cheng. Temporal modulation network for controllable space-time video super-resolution. In IEEE Conf. Comput. Vis. Pattern Recog., June 2021. 7, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 183 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 183 + ], + "type": "text", + "content": "[67] Haofei Xu, Jing Zhang, Jianfei Cai, Hamid Rezatofighi, and Dacheng Tao. Gmflow: Learning optical flow via global matching. In IEEE Conf. Comput. Vis. Pattern Recog., 2022. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 186, + 287, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 186, + 287, + 218 + ], + "spans": [ + { + "bbox": [ + 48, + 186, + 287, + 218 + ], + "type": "text", + "content": "[68] Xiangyu Xu, Li Siyao, Wenxiu Sun, Qian Yin, and Ming-Hsuan Yang. Quadratic video interpolation. In Adv. Neural Inform. Process. Syst., 2019. 2, 3, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 220, + 287, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 220, + 287, + 252 + ], + "spans": [ + { + "bbox": [ + 48, + 220, + 287, + 252 + ], + "type": "text", + "content": "[69] Tianfan Xue, Baian Chen, Jiajun Wu, Donglai Wei, and William T Freeman. Video enhancement with task-oriented flow. In Int. J. Comput. Vis., 2019. 2, 3, 6, 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 254, + 287, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 254, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 48, + 254, + 287, + 297 + ], + "type": "text", + "content": "[70] Serena Yeung, Olga Russakovsky, Greg Mori, and Li Fei-Fei. End-to-end learning of action detection from frame glimpses in videos. In IEEE Conf. Comput. Vis. Pattern Recog., 2016. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 299, + 287, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 299, + 287, + 342 + ], + "spans": [ + { + "bbox": [ + 48, + 299, + 287, + 342 + ], + "type": "text", + "content": "[71] Changqian Yu, Jingbo Wang, Chao Peng, Changxin Gao, Gang Yu, and Nong Sang. Bisenet: Bilateral segmentation network for real-time semantic segmentation. In Eur. Conf. Comput. Vis., 2018. 4" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 343, + 287, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 343, + 287, + 396 + ], + "spans": [ + { + "bbox": [ + 48, + 343, + 287, + 396 + ], + "type": "text", + "content": "[72] Guozhen Zhang, Yuhan Zhu, Haonan Wang, Youxin Chen, Gangshan Wu, and Limin Wang. Extracting motion and appearance via inter-frame attention for efficient video frame interpolation. In IEEE Conf. Comput. Vis. Pattern Recog., 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 399, + 287, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 399, + 287, + 442 + ], + "spans": [ + { + "bbox": [ + 48, + 399, + 287, + 442 + ], + "type": "text", + "content": "[73] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE Conf. Comput. Vis. Pattern Recog., 2018. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 445, + 287, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 445, + 287, + 487 + ], + "spans": [ + { + "bbox": [ + 48, + 445, + 287, + 487 + ], + "type": "text", + "content": "[74] Shuchang Zhou, Yuxin Wu, Zekun Ni, Xinyu Zhou, He Wen, and Yuheng Zou. Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients. arXiv preprint arXiv:1606.06160, 2016. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 489, + 287, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 489, + 287, + 520 + ], + "spans": [ + { + "bbox": [ + 48, + 489, + 287, + 520 + ], + "type": "text", + "content": "[75] Tinghui Zhou, Shubham Tulsiani, Weilun Sun, Jitendra Malik, and Alexei A Efros. View synthesis by appearance flow. In Eur. Conf. Comput. Vis., 2016. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 523, + 287, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 523, + 287, + 555 + ], + "spans": [ + { + "bbox": [ + 48, + 523, + 287, + 555 + ], + "type": "text", + "content": "[76] Xizhou Zhu, Han Hu, Stephen Lin, and Jifeng Dai. Deformable convnets v2: More deformable, better results. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 557, + 287, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 557, + 287, + 600 + ], + "spans": [ + { + "bbox": [ + 48, + 557, + 287, + 600 + ], + "type": "text", + "content": "[77] Yi Zhu, Karan Sapra, Fitsum A Reda, Kevin J Shih, Shawn Newsam, Andrew Tao, and Bryan Catanzaro. Improving semantic segmentation via video propagation and label relaxation. In IEEE Conf. Comput. Vis. Pattern Recog., 2019. 5" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 314, + 757 + ], + "type": "text", + "content": "6131" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/a806573e-912a-4e15-8891-1f914fce477d_content_list.json b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/a806573e-912a-4e15-8891-1f914fce477d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..bf9d2f6f807aa68668dd5df03e43a2777d09675f --- /dev/null +++ b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/a806573e-912a-4e15-8891-1f914fce477d_content_list.json @@ -0,0 +1,1854 @@ +[ + { + "type": "text", + "text": "A General Regret Bound of Preconditioned Gradient Method for DNN Training", + "text_level": 1, + "bbox": [ + 83, + 130, + 887, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hongwei Yong Ying Sun Lei Zhang The Hong Kong Polytechnic University", + "bbox": [ + 320, + 181, + 648, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "hongwei.yong@polyu.edu.hk, {csysun, cslzhang}@comp.polyu.edu.hk", + "bbox": [ + 205, + 219, + 761, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 233, + 268, + 313, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While adaptive learning rate methods, such as Adam, have achieved remarkable improvement in optimizing Deep Neural Networks (DNNs), they consider only the diagonal elements of the full preconditioned matrix. Though the full-matrix preconditioned gradient methods theoretically have a lower regret bound, they are impractical for use to train DNNs because of the high complexity. In this paper, we present a general regret bound with a constrained full-matrix preconditioned gradient, and show that the updating formula of the preconditioner can be derived by solving a cone-constrained optimization problem. With the block-diagonal and Kronecker-factorized constraints, a specific guide function can be obtained. By minimizing the upper bound of the guide function, we develop a new DNN optimizer, termed AdaBK. A series of techniques, including statistics updating, dampening, efficient matrix inverse root computation, and gradient amplitude preservation, are developed to make AdaBK effective and efficient to implement. The proposed AdaBK can be readily embedded into many existing DNN optimizers, e.g., SGDM and AdamW, and the corresponding SGDM_BK and AdamW_BK algorithms demonstrate significant improvements over existing DNN optimizers on benchmark vision tasks, including image classification, object detection and segmentation. The code is publicly available at https://github.com/Yonghongwei/AdaBK.", + "bbox": [ + 75, + 300, + 472, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 76, + 723, + 209, + 739 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Stochastic gradient descent (SGD) [26] and its variants [21, 23], which update the parameters along the opposite of their gradient directions, have achieved great success in optimizing deep neural networks (DNNs) [14, 24]. Instead of using a uniform learning rate for different parameters, Duchi et al. [5] proposed the AdaGrad method, which adopts an adaptive learning rate for each parameter, and proved that AdaGrad can achieve lower regret bound than SGD. Following AdaGrad, a class of adaptive learning rate gradient descent methods has been proposed. For example,", + "bbox": [ + 75, + 750, + 468, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "RMSProp [30] and AdaDelta [35] introduce the exponential moving average to replace the sum of second-order statistics of the gradient for computing the adaptive learning rate. Adam [15] further adopts the momentum into the gradient, and AdamW [22] employs a weight-decoupled strategy to improve the generalization performance. RAdam [18], Adabelief [38] and Ranger [19,32,37] are proposed to accelerate training and improve the generalization capability over Adam. The adaptive learning rate methods have become the mainstream DNN optimizers.", + "bbox": [ + 496, + 268, + 893, + 421 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In addition to AdaGrad, Duchi et al. [5] provided a full-matrix preconditioned gradient descent (PGD) method that adopts the matrix $\\mathbf{H}_T = (\\sum_{t=1}^T \\mathbf{g}_t \\mathbf{g}_t^\\top)^{\\frac{1}{2}}$ to adjust the gradient $\\mathbf{g}_T$ , where $t$ denotes the iteration number and $T$ is the number of the current iteration. It has been proved [5] that the preconditioned gradient $\\mathbf{H}_T^{-1} \\mathbf{g}_T$ has a lower regret bound than the adaptive learning rate methods that only consider the diagonal elements of $\\mathbf{H}_T$ . However, the full-matrix preconditioned gradient is impractical to use due to its high dimension, which limits its application to DNN optimization. Various works have been reported to solve this problem in parameter space by adding some structural constraints on the full-matrix $\\mathbf{H}_T$ . For instances, GGT [1] stores only the gradients of recent iterations so that the matrix inverse root can be computed efficiently by fast low-rank computation tricks. Yun et al. [34] proposed a mini-block diagonal matrix framework to reduce the cost through coordinate partitioning and grouping strategies. Gupta et al. [9] proposed to extend AdaGrad with Kronecker products of full-matrix preconditioners to make it more efficient in DNN training. Besides, natural gradient approaches [6, 7], which adopt the approximations of the Fisher matrix to correct the descent direction, can also be regarded as full-matrix preconditioners.", + "bbox": [ + 496, + 426, + 893, + 789 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The existing constrained PGD (CPGD) methods, however, are heuristic since manually designed approximations to the full matrix $\\pmb{H}_T$ are employed in them, while their influence on the regret bound is unknown. By far, they lack a general regret-bound theory that can guide us to design the full-matrix preconditioned gradient methods. On the other hand, the practicality and effectiveness of these precondi", + "bbox": [ + 496, + 794, + 893, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "CVF", + "bbox": [ + 106, + 2, + 181, + 42 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore.", + "bbox": [ + 236, + 0, + 807, + 46 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "7866", + "bbox": [ + 482, + 944, + 516, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tioner methods are also an issue, which prevents them from being widely used in training DNNs.", + "bbox": [ + 75, + 90, + 468, + 121 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the above-mentioned issues, in this paper we present a theorem to connect the regret bound of the constrained full-matrix preconditioner with a guide function. By minimizing the guide function under the constraints, an updating formula of the preconditioned gradient can be derived. That is, optimizing the guide function of the preconditioner will minimize its regret bound at the same time, while different constraints can yield different updating formulas. With the commonly-used constraints on DNN preconditioners, such as the block-diagonal and Kronecker-factorized constraints [7, 9], specific guide functions can be obtained. By minimizing the upper bound of the guide function, a new optimizer, namely AdaBK, is derived.", + "bbox": [ + 75, + 123, + 467, + 319 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We further propose a series of techniques, including statistics updating, dampening, efficient matrix inverse root computation and gradient norm recovery, to make AdaBK more practical to use for DNN optimization. By embedding AdaBK into SGDM and AdamW (or Adam), we develop two new DNN optimizers, SGDM_BK and AdamW_BK. With acceptable extra computation and memory cost, they achieve significant performance gain in convergence speed and generalization capability over state-of-the-art DNN optimizers, as demonstrated in our experiments in image classification, object detection and segmentation.", + "bbox": [ + 75, + 321, + 467, + 488 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For a better understanding of our proposed regret bound and the developed DNN optimizer, in Fig. 1, we illustrate the existing major DNN optimizers and their relationships. SGD and its momentum version (SGDM) apply the same learning rate to all parameters based on their gradient descent directions. The adaptive learning rate methods assign different learning rates to different parameters by using second-order information of the gradients, achieving better convergence performance. The adaptive learning rate methods can be viewed as special cases of PGD methods by considering only the diagonal elements of the full preconditioned matrix of gradients. Our method belongs to the class of PGD methods, while our proposed general regret bound of constrained PGD methods can be applied to the PGD optimizers under different constraints, including AdaGrad, Full-Matrix AdaGrad and our AdaBK.", + "bbox": [ + 75, + 489, + 467, + 731 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Notation system. We denote by $\\boldsymbol{w}_t$ and $\\boldsymbol{g}_t$ the weight vector and its gradient of a DNN model in the $t$ -th iteration. Denote by $\\boldsymbol{g}_{t,i}$ the gradient of the $i$ -th sample in a batch in the $t$ -th iteration, we have $\\boldsymbol{g}_t = \\frac{1}{n}\\sum_{i=1}^n\\boldsymbol{g}_{t,i}$ , where $n$ is the batch size. The notations $A \\succeq 0$ and $A \\succ 0$ for a matrix $A$ denote that $A$ is symmetric positive semidefinite (PSD) and symmetric positive definite, respectively. $A \\succeq B$ or $A - B \\succeq 0$ means that $A - B$ is PSD. $\\operatorname{Tr}(A)$ represents the trace of the matrix $A$ . For a PSD matrix $A$ , $A^\\alpha = U\\Sigma^\\alpha U^\\top$ , where $U\\Sigma U^\\top$ is the Singular Value Decomposition (SVD) of $A$ . $||\\boldsymbol{x}||_A = \\sqrt{\\boldsymbol{x}^\\top\\boldsymbol{A}\\boldsymbol{x}}$ is the Maha", + "bbox": [ + 75, + 734, + 467, + 901 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ed9490d0f611aa6f626fe3ae514f1f3953edb31975c0d66540d093ecc18bfa8f.jpg", + "image_caption": [ + "Figure 1. Illustration of the main DNN optimizers." + ], + "image_footnote": [], + "bbox": [ + 498, + 92, + 890, + 345 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "lanobis norm of $\\pmb{x}$ induced by PSD matrix $\\mathbf{A}$ , and its dual norm is $\\| \\pmb{x} \\|_{\\pmb{A}}^{*} = \\sqrt{\\pmb{x}^{\\top} \\pmb{A}^{-1} \\pmb{x}}$ . $\\pmb{A} \\otimes \\pmb{B}$ means the Kronecker product of $\\pmb{A}$ and $\\pmb{B}$ , while $\\pmb{A} \\odot \\pmb{B}$ and $\\pmb{A}^{\\odot \\alpha}$ are the element-wise matrix product and element-wise power operation, respectively. $\\operatorname{Diag}(\\pmb{x})$ is a diagonal matrix with diagonal vector $\\pmb{x}$ , and $\\operatorname{vec}(\\cdot)$ denotes the vectorization function.", + "bbox": [ + 498, + 400, + 890, + 492 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Background", + "text_level": 1, + "bbox": [ + 500, + 511, + 627, + 527 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Online Convex Optimization", + "text_level": 1, + "bbox": [ + 500, + 534, + 754, + 550 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The online convex optimization framework [10, 28] remains the most powerful and popular tool to analyze DNN optimization algorithms, including AdaGrad [5], Adam [15], Shampoo [9], etc. Given an arbitrary, unknown sequence of convex loss functions $\\{f_1(\\pmb{w}),\\dots,f_t(\\pmb{w}),\\dots,f_T(\\pmb{w})\\}$ , we aim to optimize the weight $\\pmb{w}_t$ in the $t$ -th iteration, and evaluate it on the loss function $f_{t}(\\pmb{w})$ . The goal of our optimization process is to minimize the regret, which is defined as follows [10, 28]:", + "bbox": [ + 498, + 555, + 890, + 690 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nR (T) = \\sum_ {t = 1} ^ {T} \\left(f _ {t} \\left(\\boldsymbol {w} _ {t}\\right) - f _ {t} (\\hat {\\boldsymbol {w}})\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 576, + 690, + 890, + 718 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\hat{\\boldsymbol{w}} = \\arg \\min_{\\boldsymbol{w}} \\sum_{t=1}^{T} f_t(\\boldsymbol{w})$ . Generally speaking, a lower regret bound means a more effective learning process.", + "bbox": [ + 498, + 718, + 890, + 747 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Regret Bound of Preconditioned Gradient", + "text_level": 1, + "bbox": [ + 500, + 758, + 857, + 773 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As in previous works [5, 9], an online mirror descent with an adaptive time-dependent regularization is adopted for online convex learning. In the $t$ -th iteration, suppose we have obtained the gradient $\\pmb{g}_t = \\nabla f_t(\\pmb{w}_t)$ , then given a PSD matrix $\\pmb{H}_t\\succeq \\mathbf{0}$ , the parameters are updated by optimizing the following objective function:", + "bbox": [ + 498, + 780, + 890, + 869 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {w} _ {t + 1} = \\arg \\min _ {\\boldsymbol {w}} \\eta \\boldsymbol {g} _ {t} ^ {\\top} \\boldsymbol {w} + \\frac {1}{2} \\left\\| \\boldsymbol {w} - \\boldsymbol {w} _ {t} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {2}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 550, + 869, + 890, + 897 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "7867", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The solution of Eq. (2) is exactly a preconditioned gradient descent step, which is", + "bbox": [ + 75, + 90, + 468, + 119 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {w} _ {t + 1} = \\boldsymbol {w} _ {t} - \\eta \\boldsymbol {H} _ {t} ^ {- 1} \\boldsymbol {g} _ {t}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 119, + 467, + 136 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Duchi et al. [5] have provided a regret bound for online mirror descent, as shown in Lemma 1:", + "bbox": [ + 75, + 137, + 467, + 165 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Lemma 1 [5, 9] For any sequence of matrices $H_T \\succeq \\ldots \\succeq H_1 \\succeq 0$ , the regret of online mirror descent holds that", + "bbox": [ + 76, + 167, + 468, + 198 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} R (T) \\leq \\frac {1}{2 \\eta} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {w} _ {t} - \\hat {\\boldsymbol {w}} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {2} - \\left\\| \\boldsymbol {w} _ {t + 1} - \\hat {\\boldsymbol {w}} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {2}\\right) \\\\ + \\frac {\\eta}{2} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {*}\\right) ^ {2}. \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 210, + 468, + 282 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "If we further assume $D = \\max_{t\\leq T}||\\pmb{w}_t - \\hat{\\pmb{w}} ||_2$ , then we have", + "bbox": [ + 76, + 284, + 468, + 299 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nR (T) \\leq \\frac {D ^ {2}}{2 \\eta} \\operatorname {T r} \\left(\\boldsymbol {H} _ {T}\\right) + \\frac {\\eta}{2} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {*}\\right) ^ {2}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 299, + 468, + 325 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our goal is to find a proper sequence of PSD matrices $\\{\\pmb{H}_1, \\pmb{H}_2, \\dots, \\pmb{H}_T\\}$ to minimize the regret bound in Eq (4) or (5). Duchi et al. [5] suggested to adopt $\\pmb{H}_T = (\\sum_{t=1}^{T} \\pmb{g}_t \\pmb{g}_t^\\top)^{\\frac{1}{2}}$ as the full matrix regularization matrix. However, it is hard to directly use it for DNN optimization due to the high dimension of parameter space. Therefore, Duchi et al. simplified this full-matrix $\\pmb{H}_T$ with its diagonal elements, resulting in the AdaGrad algorithm [5].", + "bbox": [ + 75, + 325, + 468, + 446 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. A General Regret Bound for Constrained Preconditioned Gradient", + "text_level": 1, + "bbox": [ + 76, + 462, + 468, + 496 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. The General Regret Bound", + "text_level": 1, + "bbox": [ + 76, + 505, + 320, + 522 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Directly adopting a full-matrix $H_{t}$ is absurd for optimizing a DNN because it is hard or even prohibitive to compute and store such a high-dimensional matrix. Hence, we need to reduce the dimension of $H_{t}$ with a constraint set $\\Psi$ , e.g., the set of the block-diagonal matrices [5]. In this section, we aim to construct a general and practical full-matrix regularization term in Eq. (2) to achieve the low regret bound in Eq. (4). For a general constraint set $\\Psi \\subseteq \\mathbb{R}^{d\\times d}$ , if it is a cone (i.e., $\\forall x\\in \\Psi ,\\theta >0,\\theta x\\in \\Psi$ holds), we have the following Theorem 1 and Lemma 2, whose proofs can be found in the supplementary materials.", + "bbox": [ + 75, + 529, + 468, + 695 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Theorem 1 For any cone constraint $\\Psi \\subseteq \\mathbb{R}^{d\\times d}$ , we define a guide function $F_{T}(S)$ on $\\Psi$ as", + "bbox": [ + 75, + 696, + 468, + 727 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nF _ {T} (\\boldsymbol {S}) = \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {S}} ^ {*}\\right) ^ {2}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 727, + 468, + 753 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and then define the matrix $\\mathbf{H}_T$ as", + "bbox": [ + 76, + 750, + 302, + 763 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {H} _ {T} = C _ {T} \\boldsymbol {S} _ {T}, \\quad \\boldsymbol {S} _ {T} = \\arg \\min _ {\\boldsymbol {S} \\in \\Psi , \\boldsymbol {S} \\succeq \\mathbf {0}, T r (\\boldsymbol {S}) \\leq 1} F _ {T} (\\boldsymbol {S}), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 86, + 763, + 467, + 786 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $C_T = \\sqrt{F_T(S_T)}$ . The regret of online mirror descent holds that", + "bbox": [ + 75, + 787, + 467, + 816 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} R (T) \\leq \\left(\\frac {D ^ {2}}{2 \\eta} + \\eta\\right) C _ {T} \\tag {8} \\\\ = \\left(\\frac {D ^ {2}}{2 \\eta} + \\eta\\right) \\sqrt {\\min _ {\\boldsymbol {S} \\in \\Psi , \\boldsymbol {S} \\succeq \\mathbf {0} , T r (\\boldsymbol {S}) \\leq 1} F _ {T} (\\boldsymbol {S})}. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 119, + 816, + 468, + 885 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The above theorem reveals that minimizing the guide", + "bbox": [ + 96, + 886, + 468, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "function $F_{T}(S)$ on cone $\\Psi$ will minimize the regret bound of the preconditioned gradient descent algorithm simultaneously. More importantly, given a cone constraint $\\Psi$ , the optimal $H_{T} = C_{T} S_{T}$ that achieves the lowest regret bound can be obtained by optimizing Eq. (7). From Theorem 1, we can know that the regret $R(T) \\leq O(\\sqrt{\\min_{S \\in \\Psi, S \\geq 0, \\operatorname{Tr}(S) \\leq 1} F_{T}(S)})$ . If two cones satisfy $\\Psi_{1} \\subseteq \\Psi_{2}$ , we have $\\sqrt{\\min_{S \\in \\Psi_{2}, S \\geq 0, \\operatorname{Tr}(S) \\leq 1} F_{T}(S)} \\leq \\sqrt{\\min_{S \\in \\Psi_{1}, S \\geq 0, \\operatorname{Tr}(S) \\leq 1} F_{T}(S)}$ . This also explains why full-matrix regularization can achieve the lowest regret bound. In addition, we have the following lemma:", + "bbox": [ + 496, + 90, + 890, + 256 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Lemma 2 Suppose that $\\Psi$ is the set of either diagonal matrices or full-matrices, according to the definition of $S_{T}$ and $H_{T}$ in Eq. (7), we have", + "bbox": [ + 496, + 257, + 890, + 301 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {H} _ {T} = \\operatorname {D i a g} \\left(\\left(\\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} \\odot \\boldsymbol {g} _ {t}\\right) ^ {\\odot \\frac {1}{2}}\\right), \\quad \\boldsymbol {H} _ {T} = \\left(\\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} \\boldsymbol {g} _ {t} ^ {\\top}\\right) ^ {\\frac {1}{2}}. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 504, + 301, + 890, + 354 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "From Lemma 2, we can easily see that the diagonal and full matrices used in AdaGrad [5] are two special cases of the results in Theorem 1.", + "bbox": [ + 496, + 356, + 890, + 400 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Layer-wise Block-diagonal Constraint", + "text_level": 1, + "bbox": [ + 498, + 411, + 826, + 428 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In practice, we need to choose a proper constraint set $\\Psi$ to regularize the structure of matrix $H_{T}$ . The diagonal constraint is the simplest constraint. However, it results in a very low effective dimension of $H_{T}$ so that the regret bound is high. We aim to find a more effective and practical constraint set over $H_{T}$ for DNN optimization.", + "bbox": [ + 496, + 435, + 890, + 525 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Instead of considering the full-matrix regularization of all parameters, one can consider the full-matrix regularization of parameters within one DNN layer. Similar ideas have been adopted in KFAC [7] and Shampoo [9], which assume that the matrix $H_{T}$ has a block diagonal structure and each sub-block matrix is used for one layer of a DNN. Suppose matrices $S_{l}$ and $H_{l}$ are for the $l$ -th layer, and $g_{l}$ is the gradient of weight in the $l$ -th layer, in order to obtain the updating formula with block-diagonal constraint, we could minimize the guide function $F_{T}(S)$ . There is", + "bbox": [ + 496, + 526, + 890, + 676 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nF _ {T} (\\boldsymbol {S}) = \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {S}} ^ {*}\\right) ^ {2} = \\sum_ {l = 1} ^ {L} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {l, t} \\right\\| _ {\\boldsymbol {S} _ {l}} ^ {*}\\right) ^ {2}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 676, + 890, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The above equation shows that the original optimization problem can be divided into a number of $L$ sub-problems, and we can solve these sub-problems independently. For the convenience of expression, we omit the subscript $l$ and analyze the sub-problem within one layer of a DNN in the following development.", + "bbox": [ + 496, + 715, + 890, + 806 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Kronecker-factorized Constraint", + "text_level": 1, + "bbox": [ + 500, + 816, + 787, + 830 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Because the dimension of the parameter space of one DNN layer can still be very high, we need to further constrain the structure of $H_{T}$ . The Kronecker-factorized constraint can be used to significantly reduce the parameter di", + "bbox": [ + 496, + 840, + 890, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "7868", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "mension within one layer [7, 9]. To be specific, for a fully-connected layer with weight $\\pmb{W} \\in \\mathbb{R}^{C_{out} \\times C_{in}}$ and $\\pmb{w} = \\mathrm{vec}(\\pmb{W})$ , its corresponding gradient is $\\pmb{G} \\in \\mathbb{R}^{C_{out} \\times C_{in}}$ and $\\pmb{g} = \\mathrm{vec}(\\pmb{G})$ . Let $\\pmb{S} = \\pmb{S}_1 \\otimes \\pmb{S}_2$ , where $\\pmb{S}_1 \\in \\mathbb{R}^{C_{out} \\times C_{out}}$ , $\\pmb{S}_2 \\in \\mathbb{R}^{C_{in} \\times C_{in}}$ and $\\pmb{S} \\in \\mathbb{R}^{C_{in} C_{out} \\times C_{in} C_{out}}$ , and $\\otimes$ is Kronecker product. Since $(S_1 \\otimes S_2)^{-1} = S_1^{-1} \\otimes S_2^{-1}$ , what we need to minimize becomes", + "bbox": [ + 76, + 90, + 472, + 195 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} F _ {T} (\\boldsymbol {S}) = \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {S} _ {1} \\otimes \\boldsymbol {S} _ {2}} ^ {*}\\right) ^ {2} = \\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} ^ {\\top} \\left(\\boldsymbol {S} _ {1} ^ {- 1} \\otimes \\boldsymbol {S} _ {2} ^ {- 1}\\right) \\boldsymbol {g} _ {t} \\\\ = \\operatorname {T r} \\left(\\left(\\boldsymbol {S} _ {1} ^ {- 1} \\otimes \\boldsymbol {S} _ {2} ^ {- 1}\\right) \\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} \\boldsymbol {g} _ {t} ^ {\\top}\\right) \\tag {11} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 195, + 483, + 266 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "under the constraints $\\{S_1, S_2 \\succeq 0, \\operatorname{Tr}(S_1) \\leq 1, \\operatorname{Tr}(S_2) \\leq 1\\}$ .", + "bbox": [ + 76, + 267, + 464, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Nevertheless, directly minimizing the $F_{T}(\\mathbf{S})$ in Eq. (11) is still difficult, and we construct an upper bound of $F_{T}(\\mathbf{S})$ to minimize. Since $\\pmb{g} = \\frac{1}{n}\\sum_{i=1}^{n}\\pmb{g}_{i}$ , where $\\pmb{g}_{i}$ is the gradient of sample $i$ and $n$ is the batch size, and $\\pmb{g}_{i} = \\mathrm{vec}(\\delta_{i}\\pmb{x}_{i}^{T}) = \\delta_{i} \\otimes \\pmb{x}_{i}$ , where $\\pmb{x}_{i}$ is the input feature and $\\delta_{i}$ is the output feature gradient of sample $i$ , we have the following lemma.", + "bbox": [ + 76, + 284, + 468, + 375 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Lemma 3 Denote by $L_{T} = \\sum_{t=1}^{T} \\sum_{i=1}^{n} \\delta_{ti} \\delta_{ti}^{\\top}$ and $\\mathbf{R}_{T} = \\sum_{t=1}^{T} \\sum_{i=1}^{n} \\mathbf{x}_{ti} \\mathbf{x}_{ti}^{\\top}$ , there is", + "bbox": [ + 76, + 375, + 468, + 407 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} F _ {T} (\\boldsymbol {S}) \\leq \\operatorname {T r} \\left(\\left(\\boldsymbol {S} _ {1} ^ {- 1} \\otimes \\boldsymbol {S} _ {2} ^ {- 1}\\right) \\frac {1}{n} \\sum_ {t = 1} ^ {T} \\sum_ {i = 1} ^ {n} \\boldsymbol {g} _ {t i} \\boldsymbol {g} _ {t i} ^ {\\top}\\right) \\tag {12} \\\\ \\leq \\frac {1}{n} T r (\\boldsymbol {S} _ {1} ^ {- 1} \\boldsymbol {L} _ {T}) T r (\\boldsymbol {S} _ {2} ^ {- 1} \\boldsymbol {R} _ {T}). \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 407, + 468, + 477 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We minimize the upper bound of $F_{T}(S)$ defined in Lemma 3. One can see that the upper bound can be divided into two independent problems w.r.t. $S_{1}$ and $S_{2}$ , respectively, which are", + "bbox": [ + 76, + 478, + 468, + 537 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {S _ {1} \\succeq \\mathbf {0}, \\operatorname {T r} (S _ {1}) \\leq 1} \\operatorname {T r} \\left(S _ {1} ^ {- 1} L _ {T}\\right) \\text {a n d} \\min _ {S _ {2} \\succeq \\mathbf {0}, \\operatorname {T r} (S _ {2}) \\leq 1} \\operatorname {T r} \\left(S _ {2} ^ {- 1} R _ {T}\\right). \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 76, + 537, + 468, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To solve the above problem, we have the following lemma: Lemma 4 If $A \\succ 0$ , we have:", + "bbox": [ + 76, + 575, + 467, + 604 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\arg \\min _ {\\boldsymbol {S} \\succeq \\mathbf {0}, T r (\\boldsymbol {S}) \\leq 1} T r (\\boldsymbol {S} ^ {- 1} \\boldsymbol {A}) = \\boldsymbol {A} ^ {\\frac {1}{2}} / T r (\\boldsymbol {A} ^ {\\frac {1}{2}}). \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 112, + 604, + 468, + 628 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proofs of Lemma 3 and Lemma 4 can be found in the supplementary materials. According to Lemma 4, we know that the solution of Eq. (13) is $S_{1,T} = L_{T}^{\\frac{1}{2}} / \\mathrm{Tr}(L_{T}^{\\frac{1}{2}})$ and $S_{2,T} = R_{T}^{\\frac{1}{2}} / \\mathrm{Tr}(R_{T}^{\\frac{1}{2}})$ . In practice, $L_{T}$ and $R_{T}$ will be added with a dampening term $\\epsilon I$ to ensure that they are symmetric and positive definite. Without considering the magnitude of $H_{T}$ , we can set", + "bbox": [ + 76, + 631, + 468, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\pmb{H}_{T} = \\pmb{H}_{1,T}\\otimes \\pmb{H}_{2,T},\\pmb{H}_{1,T} = \\pmb{L}_{T}^{\\frac{1}{2}},\\pmb{H}_{2,T} = \\pmb{R}_{T}^{\\frac{1}{2}}.$ (15) Then according to the property of Kronecker product, the online mirror descent updating formula in Eq. (3) becomes", + "bbox": [ + 76, + 744, + 468, + 792 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {W} _ {t + 1} = \\boldsymbol {W} _ {t} - \\eta \\boldsymbol {H} _ {1, t} ^ {- 1} \\boldsymbol {G} _ {t} \\boldsymbol {H} _ {2, t} ^ {- 1}. \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 792, + 467, + 808 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We ignore the magnitude of $H_{T}$ here because it will have no impact on the result after we introduce a gradient norm recovery operation in the algorithm, which will be described in the next section.", + "bbox": [ + 76, + 809, + 468, + 867 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Finally, the proposed vanilla optimizer, termed AdaBK, is summarized in Algorithm 1.", + "bbox": [ + 76, + 869, + 468, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Detailed Implementation", + "text_level": 1, + "bbox": [ + 500, + 90, + 733, + 106 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proposed AdaBK in Algorithm 1 involves the calculation of matrix inverse root, which may be unstable and inefficient. For an efficient and effective implementation of AdaBK in training DNNs, we propose a series of techniques.", + "bbox": [ + 496, + 116, + 890, + 191 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Efficient Matrix Inverse Root. As shown in Algorithm 1, we need to calculate the matrix inverse root of $L_{t}$ and $R_{t}$ . Traditional approaches usually use SVD to calculate it. Notwithstanding, SVD is inefficient and the existing deep learning frameworks (e.g., PyTorch) do not implement SVD on GPU well, making the training unstable or even not converging. Instead of using SVD, we adopt the Schur-Newton algorithm [8] to compute the matrix inverse root. For matrix $A$ , let $Y_{0} = A / \\operatorname{Tr}(A)$ and $Z_{0} = I$ . The Schur-Newton algorithm adopts the following iterations:", + "bbox": [ + 496, + 193, + 892, + 344 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l} \\boldsymbol {T} _ {k} = \\frac {1}{2} \\left(3 \\boldsymbol {I} - \\boldsymbol {Z} _ {k - 1} \\boldsymbol {Y} _ {k - 1}\\right); \\\\ \\boldsymbol {Y} _ {k} = \\boldsymbol {Y} _ {k - 1} \\boldsymbol {T} _ {k}, \\boldsymbol {Z} _ {k} = \\boldsymbol {T} _ {k} \\boldsymbol {Z} _ {k - 1}, k = 1, 2, \\dots , K. \\end{array} \\right. \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 514, + 344, + 890, + 390 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Then we have $A^{\\frac{1}{2}} \\approx Y_K \\sqrt{\\operatorname{Tr}(A)}$ , $A^{-\\frac{1}{2}} \\approx Z_K / \\sqrt{\\operatorname{Tr}(A)}$ . In practice, we find that setting $K = 10$ can achieve good enough precision for our problem.", + "bbox": [ + 496, + 390, + 890, + 436 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Statistics Updating. In Algorithm 1, $L_{t}$ and $R_{t}$ accumulate the statistics of output feature gradient $\\Delta_{t}$ and input feature $X_{t}$ , respectively. Hence the amplitude of $L_{t}$ and $R_{t}$ will increase during training. After certain iterations, the effective learning rate will become small, making the learning process inefficient. To solve this issue, we use the exponential moving average of $L_{t}$ and $R_{t}$ . Meanwhile, it is unnecessary to compute $L_{t}, R_{t}$ , and their inverse root in each iteration. Two hyper-parameters $T_{s}$ and $T_{ir}$ are introduced to control the frequency of updating $L_{t}$ and $R_{t}$ and their inverse root, respectively. This infrequent statistics updating strategy can significantly improve efficiency with a little performance drop. We use two additional statistics $\\widehat{L}_{t}$ and $\\widehat{R}_{t}$ to restore the matrix inverse root of $L_{t}$ and $R_{t}$ (please refer to Algorithm 2).", + "bbox": [ + 496, + 438, + 892, + 665 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Dampening Strategy. When the dimensions of $\\Delta_t$ and $X_t$ are high, $L_t$ and $R_t$ tend to be singular matrices with large condition numbers. A dampening term $\\epsilon I$ should be added into $L_t$ and $R_t$ to improve their condition number and enhance the stability of computing inverse root. As in [33], we adopt an adaptive dampening parameter $\\epsilon \\lambda_{max}$ where $\\lambda_{max}$ is the max singular value of the matrix $L_t$ or $R_t$ . With this setting, the condition number will be $\\frac{\\lambda_{max} + \\epsilon \\lambda_{max}}{\\lambda_{min} + \\epsilon \\lambda_{max}} \\leq \\frac{1 + \\epsilon}{\\epsilon}$ , bounded by a value determined by $\\epsilon$ . Meanwhile, the maximum singular value of the symmetric matrix ( $L_t$ or $R_t$ ) can be efficiently obtained by the power iteration method [2] as follows:", + "bbox": [ + 496, + 667, + 892, + 848 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{ \\begin{array}{l} \\boldsymbol {v} _ {k} = \\boldsymbol {A} \\boldsymbol {u} _ {k - 1}, \\\\ \\boldsymbol {u} _ {k} = \\boldsymbol {v} _ {k} / | | \\boldsymbol {v} _ {k} | | _ {2}, k = 1, 2, \\dots , K. \\end{array} \\right. \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 861, + 890, + 898 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "7869", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: AdaBK (Adaptive Regularization with Block-diagonal and Kronecker-factorized Constraints)" + ], + "code_body": "Input: $W_{0},L_{0} = \\epsilon I_{C_{out}},R_{0} = \\epsilon I_{C_{in}},\\eta$ Output: $W_{T}$ \n1 for $t = 1:T$ do \n2 Receive $X_{t} = [\\pmb{x}_{ti}]_{i = 1}^{n}$ by forward propagation; \n3 Receive $\\Delta_t = [\\delta_{ti}]_i^n$ by backward propagation; \n4 Compute gradient $G_{t}$ . \n5 Update preconditioners: \n6 $L_{t} = L_{t - 1} + \\Delta_{t}\\Delta_{t}^{\\top};$ \n7 $R_{t} = R_{t - 1} + X_{t}X_{t}^{\\top};$ \n8 Update weight: \n $W_{t + 1} = W_t - \\eta L_t^{-\\frac{1}{2}}G_tR_t^{-\\frac{1}{2}};$ \n9 end", + "bbox": [ + 83, + 141, + 419, + 315 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use $\\lambda_{max} \\approx ||\\pmb{v}_K||_2$ for our proposed adaptive dampening and set $K$ to 10 in our implementation.", + "bbox": [ + 75, + 356, + 468, + 386 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Gradient Norm Recovery. Since the amplitude of the preconditioned gradient $L_{t}^{-\\frac{1}{2}} G_{t} R_{t}^{-\\frac{1}{2}}$ may significantly differ from the amplitude of original $G_{t}$ , the optimal learning rate and weight decay will also differ from the original optimizer. It is expected that the well-tuned hyperparameters in current optimizers (e.g., SGDM, AdamW) can be directly used in our proposed AdaBK optimizer without further hyper-parameter tuning. To this end, we follow the strategy in [33] to re-scale the amplitude of the preconditioned gradient $\\widehat{G}_{t} = L_{t}^{-\\frac{1}{2}} G_{t} R_{t}^{-\\frac{1}{2}}$ to the original gradient $G_{t}$ by multiplying it with a scaling factor, i.e.,", + "bbox": [ + 75, + 386, + 468, + 559 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {G}} _ {t} = \\widehat {\\boldsymbol {G}} _ {t} \\frac {\\left| \\left| \\boldsymbol {G} _ {t} \\right| \\right| _ {2}}{\\left| \\left| \\widehat {\\boldsymbol {G}} _ {t} \\right| \\right| _ {2}}. \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 561, + 468, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "It is easy to know that $\\tilde{G}_t$ and $G_{t}$ have the same $L_{2}$ norm. With gradient norm recovery, the proposed AdaBK method can be easily embedded into existing optimizers without much extra hyperparameter tuning.", + "bbox": [ + 75, + 598, + 468, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Convolutional Layer. We have discussed the optimization of FC layers in Section 3. For the Conv layer, the derivation process is similar. The convolution operation can be formulated as matrix multiplication with the $im2col$ operation [31, 36], and then the Conv layer can be viewed as an FC layer with $\\mathfrak{A} = \\mathcal{U}_1(W)\\mathfrak{X}$ , where $\\mathfrak{A}$ and $\\mathfrak{X}$ are the output and input features after $im2col$ operation, and $\\mathcal{U}_1(\\cdot)$ is the mode 1 unfold operation of a tensor. For example, for a convolution weight $\\mathbf{W} \\in \\mathbb{R}^{C_{out} \\times C_{in} \\times k_1 \\times k_2}$ , we have $\\mathcal{U}_1(\\mathbf{W}) \\in \\mathbb{R}^{C_{out} \\times C_{in} k_1 k_2}$ . $\\mathcal{U}_1(\\mathbf{W})$ can be considered as the weight of the FC layer, and the remaining computation is the same as the FC layer.", + "bbox": [ + 75, + 659, + 468, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "**Embedding AdaBK into SGDM and AdamW.** With the above-introduced techniques, a more efficient and practical implementation of AdaBK can be obtained. The one-step preconditioned gradient of AdaBK is summarized in", + "bbox": [ + 75, + 839, + 468, + 900 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2: One Step Preconditioned Gradient of AdaBK" + ], + "code_body": "Input: $T_{s}, T_{ir}, \\alpha, \\epsilon, \\beta, L_{t-1}, R_{t-1}, \\widehat{L}_{t-1}, \\widehat{R}_{t-1}, X_{t} = [x_{ti}]_{i=1}^{n}$ , $\\Delta_{t} = [\\delta_{ti}]_{i=1}^{n}$ , $G_{t} = \\nabla W_{t} \\mathcal{L}$ \nOutput: $\\tilde{G}_{t}$ \n1 if $t \\% T_{s} = 0$ then \n2 $\\begin{array}{r}L_{t} = \\alpha L_{t-1} + (1 - \\alpha)\\Delta_{t}\\Delta_{t}^{\\top};\\\\ R_{t} = \\alpha R_{t-1} + (1 - \\alpha)X_{t}X_{t}^{\\top};\\end{array}$ \n3 else \n4 end \n7 if $t \\% T_{ir} = 0$ then \n8 $\\begin{array}{r}\\mathrm{Compute~}\\lambda_{max}^{L}\\mathrm{~and~}\\lambda_{max}^{R}\\mathrm{~by~Power~Iteration;}\\\\ \\mathrm{Compute~}\\widehat{L}_{t} = (L_{t} + \\lambda_{max}^{L}\\epsilon I)^{-\\frac{1}{2}}\\mathrm{~and}\\\\ \\widehat{R}_{t} = (R_{t} + \\lambda_{max}^{R}\\epsilon I)^{-\\frac{1}{2}}\\mathrm{~by~Schur-Newton~Iteration~Eq.~(17)};\\end{array}$ \n9 else \n11 $\\begin{array}{r}\\widehat{L}_{t} = \\widehat{L}_{t-1}\\mathrm{~and~}\\widehat{R}_{t} = \\widehat{L}_{t-1};\\\\ \\widehat{L}_{t} = \\widehat{L}_{t-1}\\mathrm{~and~}\\widehat{R}_{t} = \\widehat{L}_{t-1};\\\\ \\widehat{L}_{t} = \\widehat{L}_{t-1}\\mathrm{~and~}\\widehat{R}_{t} = \\widehat{L}_{t-1};\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{G}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{G}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{G}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{\\mathbf{G}}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{\\mathbf{G}}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{\\mathbf{G}}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_ {t}|[V|\\widehat{\\mathbf{G}}_ {t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_ {t}|[V|\\widehat{\\mathbf{G}}_ {t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_ {t}|[V|\\widehat{\\mathbf{G}}_ {t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbb{T}}_ {i t}-\\frac{\\partial}{\\partial x _ {i t}},\\quad\\forall i,\\forall t.\\end{array}$", + "bbox": [ + 500, + 125, + 880, + 344 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Algorithm 2. For a FC layer, the complexity of AdaBK is $T(O(\\frac{C_{in}^3 + C_{out}^3}{T_{ir}}) + O(\\frac{(C_{in}^2 + C_{out}^2)N}{T_s}) + O(C_{in}C_{out}(C_{in} + C_{out})))$ , where $T$ is the total number of iterations. For a Conv layer, its complexity is $T(O(\\frac{C_{in}^3k_1^3k_2^3 + C_{out}^3}{T_{ir}}) + O(\\frac{(C_{in}^2k_1^2k_2^2 + C_{out}^2)N}{T_s}) + O(C_{in}k_1k_2C_{out}(C_{in}k_1k_2 + C_{out})))$ . In our implementation, $T_s$ and $T_{ir}$ are set to 200 and 2000, respectively, and the complexity is acceptable. In practice, it only costs $10\\% \\sim 25\\%$ additional training time.", + "bbox": [ + 496, + 356, + 890, + 487 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "AdaBK can be embedded into many existing optimizers. In this paper, we embed it into the two commonly used DNN optimizers, i.e., SGDM and AdamW (or Adam), and name the obtained new optimizers as SGDM_BK and AdamW_BK accordingly. The detailed algorithms of SGDM_BK and AdamW_BK are summarized in the supplementary materials.", + "bbox": [ + 496, + 489, + 890, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 500, + 617, + 632, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate the proposed SGDM_BK and AdamW_BK optimizers on typical vision tasks, including image classification (on CIFAR100/CIFAR10 [16] and ImageNet [27]), object detection and segmentation (on COCO [17]). For the hyper-parameters of SGDM_BK and AdamW_BK, we set $\\alpha = 0.9$ , $T_s = 200$ , $T_{ir} = 2000$ , and $\\epsilon = 0.00001$ throughout the experiments if not specified. Ablation studies on hyper-parameter selection can be found in the supplementary material. All experiments are conducted under the Pytorch 1.11 framework with NVIDIA GeForce RTX 2080Ti and 3090 Ti GPUs.", + "bbox": [ + 496, + 645, + 890, + 810 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Image Classification", + "text_level": 1, + "bbox": [ + 500, + 829, + 691, + 844 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the image classification task, we compare SGDM_BK and AdamW_BK with the representative and state-of-the-art DNN optimizers, including SGDM, AdamW [22], Ada", + "bbox": [ + 496, + 854, + 890, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "7870", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/1e458a83e48bff462a72c215eabc418ab8b6b1a0106bd004682fd243e8f21a3d.jpg", + "table_caption": [ + "Table 1. Testing accuracies (%) on CIFAR100/CIFAR10. The best and second best results are highlighted in bold and italic fonts, respectively. The numbers in red color indicate the improvement of SGDM_BK/AdamW_BK over SGDM/AdamW, respectively." + ], + "table_footnote": [], + "table_body": "
CIFAR100
OptimizerSGDMAdamWAdagradRAdamAdabeliefShampooKFACWSGDMSGDM_BKAdamW_BK
ResNet1877.20 ± .3077.23 ± .1071.55 ± .2577.05 ± .1577.43 ± .3671.81 ± .4078.25 ± .2379.28 ± .2779.30 ± .07 (↑2.10)78.66 ± .34 (↑1.43)
ResNet5077.78 ± .4378.10 ± .1772.20 ± .1578.20 ± .1579.08 ± .2371.31 ± .5379.25 ± .2680.90 ± .2381.26 ± .20 (↑3.48)80.15 ± .19 (↑2.05)
VGG1170.80 ± .2971.20 ± .2967.70 ± .1871.08 ± .2472.45 ± .1663.56 ± .4472.75 ± .3173.42 ± .2873.89 ± .13 (↑3.09)73.09 ± .29 (↑1.89)
VGG1970.94 ± .3270.26 ± .2363.30 ± .5873.01 ± .2072.39 ± .2765.62 ± .5673.87 ± .4374.82 ± .2375.10 ± .13 (↑4.16)74.27 ± .25 (↑4.01)
DenseNet12179.53 ± .1978.05 ± .2671.27 ± .7978.65 ± .0579.88 ± .0874.95 ± .4279.84 ± .3381.23 ± .1081.18 ± .27 (↑1.65)79.93 ± .23 (↑1.88)
CIFAR10
ResNet1895.10 ± .0794.80 ± .1092.83 ± .1294.70 ± .1895.12 ± .1492.94 ± .2795.01 ± .1295.43 ± .0895.44 ± .12 (↑0.34)95.22 ± .13 (↑0.42)
ResNet5094.75 ± .3094.72 ± .1092.55 ± .3994.72 ± .1095.35 ± .0592.61 ± .2795.43 ± .1695.80 ± .1595.86 ± .05 (↑1.11)95.40 ± .07 (↑0.68)
VGG1192.17 ± .1992.02 ± .0890.25 ± .2592.00 ± .1892.45 ± .1889.01 ± .2992.82 ± .1192.95 ± .2093.14 ± .26 (↑0.97)92.96 ± .07 (↑0.94)
VGG1993.61 ± .0693.40 ± .0491.28 ± .1493.57 ± .1193.58 ± .1290.62 ± .3293.47 ± .0993.91 ± .1994.03 ± .15 (↑0.42)93.94 ± .10 (↑0.54)
DenseNet12195.37 ± .1794.80 ± .0792.95 ± .2395.02 ± .0895.37 ± .0494.37 ± .3695.18 ± .2295.72 ± .1495.70 ± .13 (↑0.33)95.40 ± .04 (↑0.60)
", + "bbox": [ + 80, + 125, + 890, + 260 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/95d5f8a360e7c716612609b9293b9013e21361a49fc4fd77af00cfb6266402c3.jpg", + "image_caption": [ + "Figure 2. Training loss curves (loss vs. epoch and loss vs. time) of SGDM, SGDM_BK, AdamW and AdamW_BK on CIFAR100 with ResNet18 and ResNet50 before 60 epochs." + ], + "image_footnote": [], + "bbox": [ + 81, + 273, + 279, + 426 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/342c6df23d7af2849d785567183a0e67e17121371633e6f349bd1565c9b0530d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 273, + 483, + 426 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e6f4a094088fc4b52cba4d7286a65c07b6c6751b262e4a8636b43bcf09b7d936.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 486, + 273, + 686, + 426 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/72a9d11cb4697d5f6359940bc160b3616e4d5d8d935f4e88871b8bf1d6fd6d89.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 687, + 273, + 887, + 426 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "grad [5], RAdam [19]1, and Adabelief [38]2, Shampoo [9]3, KFAC [7] [9]4, WSGDM [33]5. We tune learning rate and weight decay for each optimizer with grid search and the detailed settings for different optimizers can be found in the supplementary material.", + "bbox": [ + 75, + 491, + 468, + 568 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results on CIFAR100/10: We first testify the effectiveness of SGDM_BK and AdamW_BK with different DNN models on CIFAR100/CIFAR10 [16], including ResNet18, ResNet50 [12], VGG11 VGG19 [29] and DenseNet-121 [13]. All the DNN models are trained for 200 epochs with batch size 128 on one GPU. The learning rate is multiplied by 0.1 for every 60 epochs. The experiments are repeated 4 times and the results are reported in a \"mean $\\pm$ std\" format in Table 1. We can see that SGDM_BK and AdamW_BK achieve significant improvements over SGDM and AdamW, which are $1.44\\% \\sim 4.16\\%$ and $1.43\\% \\sim 4.01\\%$ on CIFAR100, and $0.28\\% \\sim 1.11\\%$ and $0.42\\% \\sim 0.94\\%$ on CIFAR10, respectively. They also surpass other compared optimizers for most of the used backbone networks.", + "bbox": [ + 75, + 569, + 470, + 795 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 2 shows the curves of training loss vs. epoch and training loss vs. time for SGDM, SGDM_BK, AdamW and AdamW_BK on CIFAR100 with ResNet18 and ResNet50 backbones before 60 epochs. One can see that SGDM_BK and AdamW_BK can significantly speed up the training process of SGDM and AdamW, respectively. Since SGDM_BK and AdamW_BK cost additional time in each iteration, for a fair comparison, we also show the curves of training loss vs. time. One can see that they still have great advantages over the original SGDM and AdamW.", + "bbox": [ + 496, + 492, + 892, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results on ImageNet-1k: To testify that SGDM_BK and AdamW_BK can also work well on large-scale datasets, we evaluate them on ImageNet-1k [27], which contains 1000 categories with 1.28 million images for training and 50K images for validation. ResNet18 and ResNet50 are selected as the backbone models with training batch size 256 on 4 GPUs, and the training settings follow the work in [3, 38]. The learning rate is multiplied by 0.1 for every 30 epochs. SGDM_BK and AdamW_BK adopt the same learning rate and weight decay as SGDM and AdamW, respectively. The top 1 accuracies on the validation set are reported in Table 2. One can see that SGDM_BK and AdamW_BK perform better than others. Meanwhile, we plot the training and validation accuracy curves in Figure 3, from which we see that the proposed AdaBK technique can largely speed up the training process.", + "bbox": [ + 496, + 643, + 893, + 883 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We also evaluate the proposed optimizer on Swin", + "bbox": [ + 517, + 885, + 890, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "1https://github.com/LiyuanLucasLiu/RAdam", + "bbox": [ + 96, + 806, + 374, + 820 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "2https : / / github . com / jintang - zhuang / Adabelief - Optimizer", + "bbox": [ + 78, + 821, + 467, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3 https://github.com/moskomule/shampoo.pytorch", + "bbox": [ + 96, + 842, + 405, + 853 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "4 https://github.com/alecwangcq/KFAC-Pytorch", + "bbox": [ + 96, + 854, + 392, + 864 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "5 https://github.com/Yonghongwei/W-SGDM-and-W-Adam", + "bbox": [ + 96, + 866, + 436, + 876 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "The model can be downloaded at https://github.com/weiaicunzai/pytorch-cifar100.", + "bbox": [ + 78, + 877, + 467, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "7871", + "bbox": [ + 482, + 944, + 513, + 955 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/0591e86f56914f831cfe5f436840f6971540ddc2f2cc27cd48bfaf600224af72.jpg", + "table_caption": [ + "Table 2. Top 1 accuracy (%) on the validation set of ImageNet-1k. The numbers in red color indicate the improvement of SGDM_BK/AdamW_BK over SGDM/AdamW, respectively." + ], + "table_footnote": [], + "table_body": "
OptimizerSGDMAdamWAdagradRAdamAdabeliefShampooKFACWSGDMSGDM_BKAdamW_BK
ResNet1870.4970.0162.2269.9270.0864.4569.6271.4371.59 (↑1.10)71.63 (↑1.62)
ResNet5076.3176.0269.3876.1276.2270.1176.3677.4877.62 (↑1.31)77.22 (↑1.10)
", + "bbox": [ + 81, + 121, + 888, + 167 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1f8f79953f2cfcfc736cabc6a9c4c433abbcae3e6536c60505982b20eb1d5e42.jpg", + "image_caption": [ + "Figure 3. Training and validation accuracy curves of SGDM, SGDM_BK, AdamW and AdamW_BK on ImageNet-1k with ResNet18 and ResNet50 backbones." + ], + "image_footnote": [], + "bbox": [ + 84, + 183, + 285, + 335 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/af82e3214ce84bc5a3024da478fa12b056a663a0e82d29b6cc990b42897a12b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 285, + 183, + 483, + 337 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/33fc8a4f43f004b3ae7f1e2d9898d9d0ffe10d3ec650adacac282db493b85408.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 483, + 183, + 684, + 335 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d64f394059c8fa4a7ac7c40f47b579034fb45d0875459760cb7d399532f1c38f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 686, + 183, + 888, + 335 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/8c7e519dc96d5939cbd2fc9ceeeb9c42f449300b3542655f2ff63dc54bd4ec58.jpg", + "table_caption": [ + "Table 3. Top 1 accuracy (%) on the validation set of ImageNet-1k." + ], + "table_footnote": [], + "table_body": "
OptimizerAdamWAdamW_BK
Swin-T81.1881.79 (↑0.61)
Swin-B83.0283.14 (↑0.12)
", + "bbox": [ + 151, + 409, + 392, + 454 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "transformer [20] backbone. We compare AdamW_BK with their default optimizer AdamW. The configurations follow the settings of the official MMClassification toolbox7. The results are shown in Table 3. We can see AdamW_BK can also achieve certain performance gain over AdamW.", + "bbox": [ + 75, + 468, + 468, + 546 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2. Detection and Segmentation", + "text_level": 1, + "bbox": [ + 76, + 555, + 328, + 571 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We then evaluate SGDM_BK and AdamW_BK on COCO [17] detection and segmentation tasks to show that they can work well beyond classification tasks and can be used to fine-tune pre-trained models. The models are pre-trained on ImageNet1k and fine-tuned on COCO train2017 (118K images), and then evaluated on COCO val2017 (40K images). The latest version of MMDetection toolbox [4] is used as to train our models. We test SGDM_BK and AdamW_BK by Faster-RCNN [25] and Mask-RCNN [11] with various backbones, including ResNet50 (R50), ResNet101 (R101) and Swin transformer [20].", + "bbox": [ + 75, + 579, + 468, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As mentioned in Section 4, with the gradient norm recovery operation, we can directly adopt the same hyperparameters (i.e., learning rate and weight decay) of SGDM and AdamW into SGDM_BK and AdamW_BK, respectively. To be specific, for R50 and R101 backbones, we compare the proposed optimizer with SGDM, WSGDM and AdamW. The learning rate and weight decay are set to", + "bbox": [ + 75, + 760, + 468, + 867 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "0.02 and 0.0001 for SGDM, WSGDM and SGDM_BK, and 0.0001 and 0.2 for AdamW and AdamW_BK, respectively.", + "bbox": [ + 500, + 392, + 890, + 422 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For the Swin transformer backbone, the learning rate and weight decay are set to 0.0001 and 0.02 for AdamW and AdamW_BK, respectively. The learning rate schedule is 1X for Faster-RCNN. Other configurations follow the settings of the official MMDetection toolbox8. For the default optimizers, we use their official results9. This experiment is conducted on NVIDIA GeForce RTX 3090 Ti GPUs.", + "bbox": [ + 496, + 424, + 892, + 530 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4 lists the Average Precision (AP) of object detection by Faster-RCNN. It can be seen that the models trained by SGDM_BK and AdamW_BK achieve clear performance gains of $1.6\\% \\sim 2.2\\%$ AP for R50 and R101 backbones. Fig. 4 shows the training loss curves of Faster-RCNN with ResNet50 backbone. One can see that SGDM_BK and AdamW_BK accelerate the training process over SGDM and AdamW. Table 5 shows the $\\mathrm{AP}^b$ of detection and $\\mathrm{AP}^m$ of segmentation by Mask-RCNN. We can see that SGDM_BK and AdamW_BK gain $1.5\\% \\sim 2.2\\%$ $\\mathrm{AP}^b$ and $1.2\\% \\sim 2.2\\%$ $\\mathrm{AP}^m$ for R50 and R101 backbones over SGDM and AdamW, respectively. For Swin transformer backbone, AdamW_BK also improves $0.7\\% \\sim 0.9\\%$ $\\mathrm{AP}^b$ and $0.3\\% \\sim 0.9\\%$ $\\mathrm{AP}^m$ over AdamW. Meanwhile, compared with WSGDM, the proposed SGDM_BK also outperforms it with $0.2\\% \\sim 0.6\\%$ AP gain. Moreover, Fig. 5 plots the training loss curves of Faster-RCNN with ResNet50, Swin-T (1X) and Swin-S (3X). The proposed SGDM_BK and AdamW_BK accelerate the train", + "bbox": [ + 496, + 531, + 893, + 820 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "8https://github.com/open-mmlab/mmdetection", + "bbox": [ + 514, + 834, + 808, + 847 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "9Please refer to https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn,https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn,and https://github.com/open-mmlab/mmdetection/tree/master/ configs/swin.", + "bbox": [ + 500, + 847, + 890, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "7https://github.com/open-mmlab/mmclassification/tree/master/confers/swin_transformer", + "bbox": [ + 76, + 875, + 467, + 898 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7872", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/3d3453b603220ca1bdb2cb73bfb8ad10a1fed481a0799d6769feb0c40afc88ec.jpg", + "table_caption": [ + "Table 4. Detection results of Faster-RCNN on COCO. $\\Delta$ means the gain of SGDM_BK over SGDM or AdamW_BK over AdamW. * indicates the default optimizer." + ], + "table_footnote": [], + "table_body": "
BackboneAlgorithmAPAP.5AP.75APsAPmAPl
R50SGDM*37.458.140.421.241.048.1
WSGDM39.460.643.123.142.950.7
SGDM.BK39.660.742.822.642.952.2
Δ↑2.2↑2.6↑2.4↑1.4↑1.9↑4.1
AdamW37.858.741.022.141.249.2
AdamW.BK39.460.342.922.542.852.3
Δ↑1.6↑1.6↑1.9↑0.4↑1.6↑3.1
R101SGDM*39.460.143.122.443.751.1
WSGDM41.161.645.124.045.254.3
SGDM.BK41.662.345.324.945.655.2
Δ↑2.2↑2.2↑2.2↑2.5↑1.9↑4.1
AdamW40.160.643.822.944.152.8
AdamW.BK41.762.145.524.445.456.2
Δ↑1.6↑1.5↑1.7↑1.5↑1.3↑3.4
", + "bbox": [ + 80, + 145, + 486, + 280 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/79bae9a7aa2dcd6bbb68846c9bdaa70ef766c6f63e971a9d28bd92026621d3e4.jpg", + "table_caption": [ + "Table 5. Detection and segmentation results of Mask-RCNN on COCO. $\\Delta$ means the gain of SGDM_BK over SGDM or AdamW_BK over AdamW. * indicates the default optimizer." + ], + "table_footnote": [], + "table_body": "
BackboneLr scheduleAlgorithm\\( {\\mathrm{{AP}}}^{b} \\)\\( {\\mathrm{{AP}}}_{5}^{b} \\)\\( {\\mathrm{{AP}}}_{7.5}^{b} \\)\\( {\\mathrm{{AP}}}^{m} \\)\\( {\\mathrm{{AP}}}_{5}^{m} \\)\\( {\\mathrm{{AP}}}_{7.5}^{m} \\)
R501XSGDM*38.258.841.434.755.737.2
W-SGDM39.860.843.436.457.638.9
SGDM_BK40.461.343.936.958.339.6
Δ↑2.2↑2.5↑2.5↑2.2↑2.6↑2.4
AdamW37.858.741.035.456.238.0
AdamW_BK40.060.643.536.758.039.3
Δ↑2.2↑1.9↑2.5↑1.3↑1.8↑1.3
R1001XSGDM*40.060.544.036.157.538.6
W-SGDM41.762.545.537.959.440.8
SGDM_BK42.262.946.138.160.040.7
Δ↑2.2↑2.4↑2.1↑2.0↑2.5↑2.1
AdamW40.761.144.637.258.440.1
AdamW_BK42.262.546.038.459.941.2
Δ↑1.5↑1.4↑1.4↑1.2↑1.5↑1.1
Swin-T1XAdamW*42.765.246.839.362.242.2
AdamW_BK43.665.947.840.263.143.1
Δ↑0.9↑0.7↑1.0↑0.9↑0.9↑0.9
Swin-T3XAdamW*46.068.250.341.665.344.7
AdamW_BK46.868.851.442.466.145.6
Δ↑0.8↑0.6↑1.1↑0.8↑0.8↑0.9
Swin-S3XAdamW*48.269.852.843.267.046.1
AdamW_BK48.970.453.843.567.446.8
Δ↑0.7↑0.6↑1.0↑0.3↑0.4↑0.7
", + "bbox": [ + 80, + 349, + 486, + 529 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ing process clearly. The results on COCO demonstrate that the proposed SGDM_BK and AdamW_BK can be easily adopted into the downstream tasks without additional hyper-parameter tuning.", + "bbox": [ + 75, + 575, + 468, + 637 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3. Memory Usage and Training Time", + "text_level": 1, + "bbox": [ + 76, + 643, + 379, + 660 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For full-matrix adaptive optimizers, one important concern is the training cost, including memory usage and training time. Here we compare the memory and time cost of our optimizers with SGDM [23], AdamW [22] and AdaGrad [5] on CIFAR100. ResNet50 is used as the backbone and one GeForce RTX 2080Ti GPU is used. The results are reported in Table 6. One can see that the embedding of AdaBK slightly increases the memory usage and training time ( $10\\% \\sim 25\\%$ extra training time and memory usage). Compared to the improvement of performance, the extra cost is affordable and worthwhile.", + "bbox": [ + 75, + 667, + 470, + 833 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 76, + 844, + 194, + 859 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work presented a general regret bound for the constrained full-matrix preconditioned gradient methods for", + "bbox": [ + 76, + 869, + 468, + 902 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b1b169f9eeaaae4e9972c07fe7d8ebb5fc3c9d1e987f1b1b2125d7553be6532e.jpg", + "image_caption": [ + "Figure 4. Training loss curves of ResNet50." + ], + "image_footnote": [], + "bbox": [ + 491, + 92, + 689, + 252 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/978f79c9f7191e093db6e1bef859a4fdd2c5551cb6ac45ca52e7fdc3a4ce5c9a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 692, + 95, + 888, + 251 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0d9ae172bab8d9066c7c413888d91765f50880626e31a70f410309dfb0def852.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 491, + 287, + 676, + 409 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e0b2a34f9902cd702eb05dd0b97a81a67c59f7b6488ec8ec9539bbab16179041.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 287, + 857, + 409 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/f8ff2e0f833104299d4ffbb23f3b23038df0be2f88d6f1049955713c9e1c211e.jpg", + "image_caption": [ + "Figure 5. Training loss curves of Mask-RCNN." + ], + "image_footnote": [], + "bbox": [ + 491, + 410, + 676, + 539 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/98a0f94ae891fabe8b52f2139e5ec9ded803730b82ceedab7712846a3fe7c56c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 676, + 410, + 857, + 539 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e1f4bba1265477c9c0de23327ac4ed6b1a3dbef5f05f21fce1f13227460f43af.jpg", + "table_caption": [ + "Table 6. Memory cost (MiB) and training time (h) of different optimizers with ResNet50." + ], + "table_footnote": [], + "table_body": "
OptimizerSGDMAdamWAdagradSGDM_BKAdamW_BK
Memory58675883586565256535
Time3.423.483.464.144.20
", + "bbox": [ + 504, + 604, + 887, + 643 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "DNN optimization. Different from previous full-matrix preconditioned methods, where the parameter update formulas are designed heuristically, we proved that given a cone constraint on the full-matrix preconditioner, the corresponding parameter update formula can be obtained by optimizing a guide function. Based on our theoretical analysis, we derived a specific guide function with the layer-wise block-diagonal constraint and Kronecker-factorized constraint. Through optimizing an upper bound of the guide function, a new preconditioned optimization algorithm, namely AdaBK, was obtained. We embedded AdaBK into two widely used optimizers, i.e., SGDM and AdamW, and the experimental results on image classification, object detection and segmentation tasks demonstrated that AdaBK can significantly improve the DNN optimization performance with only $10\\% \\sim 25\\%$ extra computation cost.", + "bbox": [ + 496, + 657, + 890, + 900 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "7873", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 78, + 89, + 173, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Naman Agarwal, Brian Bullins, Xinyi Chen, Elad Hazan, Karan Singh, Cyril Zhang, and Yi Zhang. Efficient full-matrix adaptive regularization. In International Conference on Machine Learning, pages 102-110. PMLR, 2019. 1", + "[2] Richard L Burden, J Douglas Faires, and Annette M Burden. Numerical analysis. Cengage learning, 2015. 4", + "[3] Jinghui Chen, Dongruo Zhou, Yiqi Tang, Ziyan Yang, Yuan Cao, and Quanquan Gu. Closing the generalization gap of adaptive gradient methods in training deep neural networks. arXiv preprint arXiv:1806.06763, 2018. 6", + "[4] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 7", + "[5] John Duchi, Elad Hazan, and Yoram Singer. Adaptive subgradient methods for online learning and stochastic optimization. Journal of machine learning research, 12(7), 2011. 1, 2, 3, 6, 8", + "[6] Thomas George, César Laurent, Xavier Bouthillier, Nicolas Ballas, and Pascal Vincent. Fast approximate natural gradient descent in a kronecker-factored eigenbasis. arXiv preprint arXiv:1806.03884, 2018. 1", + "[7] Roger Grosse and James Martens. A kronecker-factored approximate fisher matrix for convolution layers. In International Conference on Machine Learning, pages 573-582. PMLR, 2016. 1, 2, 3, 4, 6", + "[8] Chun-Hua Guo and Nicholas J Higham. A schur-newton method for the matrix\\boldsymbol{\\mathrm{boldmath}} $\\mathfrak{p}$ th root and its inverse. SIAM Journal on Matrix Analysis and Applications, 28(3):788-804, 2006. 4", + "[9] Vineet Gupta, Tomer Koren, and Yoram Singer. Shampoo: Preconditioned stochastic tensor optimization. In International Conference on Machine Learning, pages 1842-1850. PMLR, 2018. 1, 2, 3, 4, 6", + "[10] Elad Hazan et al. Introduction to online convex optimization. Foundations and Trends® in Optimization, 2(3-4):157-325, 2016. 2", + "[11] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 7", + "[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6", + "[13] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4700-4708, 2017. 6", + "[14] Ahmet Iscen, Giorgos Tolias, Yannis Avrithis, and Ondrej Chum. Label propagation for deep semi-supervised learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5070-5079, 2019. 1", + "[15] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 1, 2" + ], + "bbox": [ + 78, + 114, + 468, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5, 6", + "[17] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 5, 7", + "[18] Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. On the variance of the adaptive learning rate and beyond. arXiv preprint arXiv:1908.03265, 2019. 1", + "[19] Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. On the variance of the adaptive learning rate and beyond. arXiv preprint arXiv:1908.03265, 2019. 1, 6", + "[20] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 7", + "[21] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 1", + "[22] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 1, 5, 8", + "[23] Ning Qian. On the momentum term in gradient descent learning algorithms. Neural networks, 12(1):145-151, 1999. 1, 8", + "[24] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 1", + "[25] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pages 91-99, 2015. 7", + "[26] Herbert Robbins and Sutton Monro. A stochastic approximation method. The annals of mathematical statistics, pages 400-407, 1951. 1", + "[27] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015. 5, 6", + "[28] Shai Shalev-Shwartz et al. Online learning and online convex optimization. Foundations and Trends® in Machine Learning, 4(2):107-194, 2012. 2", + "[29] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6", + "[30] Tijmen Tieleman and Geoffrey Hinton. Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural Networks for Machine Learning, 4:26-31, 2012. 1", + "[31] Chengxi Ye, Matthew Evanusa, Hua He, Anton Mitrokhin, Tom Goldstein, James A Yorke, Cornelia Fermüller, and" + ], + "bbox": [ + 501, + 92, + 890, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "7874", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yiannis Aloimonos. Network deconvolution. arXiv preprint arXiv:1905.11926, 2019. 5", + "[32] Hongwei Yong, Jianqiang Huang, Xiansheng Hua, and Lei Zhang. Gradient centralization: A new optimization technique for deep neural networks. In European Conference on Computer Vision, pages 635-652. Springer, 2020. 1", + "[33] Hongwei Yong and Lei Zhang. An embedded feature whitening approach to deep neural network optimization. In the European Conference on Computer Vision, 2022. 4, 5, 6", + "[34] Jihun Yun, Aurelie C Lozano, and Eunho Yang. Stochastic gradient methods with block diagonal matrix adaptation. arXiv preprint arXiv:1905.10757, 2019. 1", + "[35] Matthew D Zeiler. Adadelta: an adaptive learning rate method. arXiv preprint arXiv:1212.5701, 2012. 1", + "[36] Huishuai Zhang, Wei Chen, and Tie-Yan Liu. Train feedforward neural network with layer-wise adaptive rate via approximating back-matching propagation. arXiv preprint arXiv:1802.09750, 2018. 5", + "[37] Michael R Zhang, James Lucas, Geoffrey Hinton, and Jimmy Ba. Lookahead optimizer: k steps forward, 1 step back. arXiv preprint arXiv:1907.08610, 2019. 1", + "[38] Juntang Zhuang, Tommy Tang, Yifan Ding, Sekhar C Tatikonda, Nicha Dvornek, Xenophon Papademetris, and James Duncan. Adbelief optimizer: Adapting stepsizes by the belief in observed gradients. Advances in neural information processing systems, 33:18795-18806, 2020. 1, 6" + ], + "bbox": [ + 78, + 90, + 468, + 460 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "7875", + "bbox": [ + 482, + 944, + 514, + 955 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/a806573e-912a-4e15-8891-1f914fce477d_model.json b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/a806573e-912a-4e15-8891-1f914fce477d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..d005ecc19acd8d232a3f13b41a32a8d67b610db6 --- /dev/null +++ b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/a806573e-912a-4e15-8891-1f914fce477d_model.json @@ -0,0 +1,2310 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.107, + 0.003, + 0.182, + 0.043 + ], + "angle": 0, + "content": "CVF" + }, + { + "type": "header", + "bbox": [ + 0.237, + 0.001, + 0.808, + 0.047 + ], + "angle": 0, + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.131, + 0.888, + 0.154 + ], + "angle": 0, + "content": "A General Regret Bound of Preconditioned Gradient Method for DNN Training" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.182, + 0.649, + 0.22 + ], + "angle": 0, + "content": "Hongwei Yong Ying Sun Lei Zhang The Hong Kong Polytechnic University" + }, + { + "type": "text", + "bbox": [ + 0.206, + 0.22, + 0.763, + 0.234 + ], + "angle": 0, + "content": "hongwei.yong@polyu.edu.hk, {csysun, cslzhang}@comp.polyu.edu.hk" + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.269, + 0.314, + 0.285 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.301, + 0.473, + 0.695 + ], + "angle": 0, + "content": "While adaptive learning rate methods, such as Adam, have achieved remarkable improvement in optimizing Deep Neural Networks (DNNs), they consider only the diagonal elements of the full preconditioned matrix. Though the full-matrix preconditioned gradient methods theoretically have a lower regret bound, they are impractical for use to train DNNs because of the high complexity. In this paper, we present a general regret bound with a constrained full-matrix preconditioned gradient, and show that the updating formula of the preconditioner can be derived by solving a cone-constrained optimization problem. With the block-diagonal and Kronecker-factorized constraints, a specific guide function can be obtained. By minimizing the upper bound of the guide function, we develop a new DNN optimizer, termed AdaBK. A series of techniques, including statistics updating, dampening, efficient matrix inverse root computation, and gradient amplitude preservation, are developed to make AdaBK effective and efficient to implement. The proposed AdaBK can be readily embedded into many existing DNN optimizers, e.g., SGDM and AdamW, and the corresponding SGDM_BK and AdamW_BK algorithms demonstrate significant improvements over existing DNN optimizers on benchmark vision tasks, including image classification, object detection and segmentation. The code is publicly available at https://github.com/Yonghongwei/AdaBK." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.724, + 0.21, + 0.74 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.75, + 0.47, + 0.903 + ], + "angle": 0, + "content": "Stochastic gradient descent (SGD) [26] and its variants [21, 23], which update the parameters along the opposite of their gradient directions, have achieved great success in optimizing deep neural networks (DNNs) [14, 24]. Instead of using a uniform learning rate for different parameters, Duchi et al. [5] proposed the AdaGrad method, which adopts an adaptive learning rate for each parameter, and proved that AdaGrad can achieve lower regret bound than SGD. Following AdaGrad, a class of adaptive learning rate gradient descent methods has been proposed. For example," + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.27, + 0.895, + 0.422 + ], + "angle": 0, + "content": "RMSProp [30] and AdaDelta [35] introduce the exponential moving average to replace the sum of second-order statistics of the gradient for computing the adaptive learning rate. Adam [15] further adopts the momentum into the gradient, and AdamW [22] employs a weight-decoupled strategy to improve the generalization performance. RAdam [18], Adabelief [38] and Ranger [19,32,37] are proposed to accelerate training and improve the generalization capability over Adam. The adaptive learning rate methods have become the mainstream DNN optimizers." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.427, + 0.895, + 0.79 + ], + "angle": 0, + "content": "In addition to AdaGrad, Duchi et al. [5] provided a full-matrix preconditioned gradient descent (PGD) method that adopts the matrix \\( \\mathbf{H}_T = (\\sum_{t=1}^T \\mathbf{g}_t \\mathbf{g}_t^\\top)^{\\frac{1}{2}} \\) to adjust the gradient \\( \\mathbf{g}_T \\), where \\( t \\) denotes the iteration number and \\( T \\) is the number of the current iteration. It has been proved [5] that the preconditioned gradient \\( \\mathbf{H}_T^{-1} \\mathbf{g}_T \\) has a lower regret bound than the adaptive learning rate methods that only consider the diagonal elements of \\( \\mathbf{H}_T \\). However, the full-matrix preconditioned gradient is impractical to use due to its high dimension, which limits its application to DNN optimization. Various works have been reported to solve this problem in parameter space by adding some structural constraints on the full-matrix \\( \\mathbf{H}_T \\). For instances, GGT [1] stores only the gradients of recent iterations so that the matrix inverse root can be computed efficiently by fast low-rank computation tricks. Yun et al. [34] proposed a mini-block diagonal matrix framework to reduce the cost through coordinate partitioning and grouping strategies. Gupta et al. [9] proposed to extend AdaGrad with Kronecker products of full-matrix preconditioners to make it more efficient in DNN training. Besides, natural gradient approaches [6, 7], which adopt the approximations of the Fisher matrix to correct the descent direction, can also be regarded as full-matrix preconditioners." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.795, + 0.895, + 0.903 + ], + "angle": 0, + "content": "The existing constrained PGD (CPGD) methods, however, are heuristic since manually designed approximations to the full matrix \\( \\pmb{H}_T \\) are employed in them, while their influence on the regret bound is unknown. By far, they lack a general regret-bound theory that can guide us to design the full-matrix preconditioned gradient methods. On the other hand, the practicality and effectiveness of these precondi" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.517, + 0.957 + ], + "angle": 0, + "content": "7866" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.122 + ], + "angle": 0, + "content": "tioner methods are also an issue, which prevents them from being widely used in training DNNs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.124, + 0.468, + 0.32 + ], + "angle": 0, + "content": "To address the above-mentioned issues, in this paper we present a theorem to connect the regret bound of the constrained full-matrix preconditioner with a guide function. By minimizing the guide function under the constraints, an updating formula of the preconditioned gradient can be derived. That is, optimizing the guide function of the preconditioner will minimize its regret bound at the same time, while different constraints can yield different updating formulas. With the commonly-used constraints on DNN preconditioners, such as the block-diagonal and Kronecker-factorized constraints [7, 9], specific guide functions can be obtained. By minimizing the upper bound of the guide function, a new optimizer, namely AdaBK, is derived." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.322, + 0.468, + 0.489 + ], + "angle": 0, + "content": "We further propose a series of techniques, including statistics updating, dampening, efficient matrix inverse root computation and gradient norm recovery, to make AdaBK more practical to use for DNN optimization. By embedding AdaBK into SGDM and AdamW (or Adam), we develop two new DNN optimizers, SGDM_BK and AdamW_BK. With acceptable extra computation and memory cost, they achieve significant performance gain in convergence speed and generalization capability over state-of-the-art DNN optimizers, as demonstrated in our experiments in image classification, object detection and segmentation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.491, + 0.468, + 0.732 + ], + "angle": 0, + "content": "For a better understanding of our proposed regret bound and the developed DNN optimizer, in Fig. 1, we illustrate the existing major DNN optimizers and their relationships. SGD and its momentum version (SGDM) apply the same learning rate to all parameters based on their gradient descent directions. The adaptive learning rate methods assign different learning rates to different parameters by using second-order information of the gradients, achieving better convergence performance. The adaptive learning rate methods can be viewed as special cases of PGD methods by considering only the diagonal elements of the full preconditioned matrix of gradients. Our method belongs to the class of PGD methods, while our proposed general regret bound of constrained PGD methods can be applied to the PGD optimizers under different constraints, including AdaGrad, Full-Matrix AdaGrad and our AdaBK." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.735, + 0.468, + 0.902 + ], + "angle": 0, + "content": "Notation system. We denote by \\( \\boldsymbol{w}_t \\) and \\( \\boldsymbol{g}_t \\) the weight vector and its gradient of a DNN model in the \\( t \\)-th iteration. Denote by \\( \\boldsymbol{g}_{t,i} \\) the gradient of the \\( i \\)-th sample in a batch in the \\( t \\)-th iteration, we have \\( \\boldsymbol{g}_t = \\frac{1}{n}\\sum_{i=1}^n\\boldsymbol{g}_{t,i} \\), where \\( n \\) is the batch size. The notations \\( A \\succeq 0 \\) and \\( A \\succ 0 \\) for a matrix \\( A \\) denote that \\( A \\) is symmetric positive semidefinite (PSD) and symmetric positive definite, respectively. \\( A \\succeq B \\) or \\( A - B \\succeq 0 \\) means that \\( A - B \\) is PSD. \\( \\operatorname{Tr}(A) \\) represents the trace of the matrix \\( A \\). For a PSD matrix \\( A \\), \\( A^\\alpha = U\\Sigma^\\alpha U^\\top \\), where \\( U\\Sigma U^\\top \\) is the Singular Value Decomposition (SVD) of \\( A \\). \\( ||\\boldsymbol{x}||_A = \\sqrt{\\boldsymbol{x}^\\top\\boldsymbol{A}\\boldsymbol{x}} \\) is the Maha" + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.093, + 0.892, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.544, + 0.357, + 0.846, + 0.371 + ], + "angle": 0, + "content": "Figure 1. Illustration of the main DNN optimizers." + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.401, + 0.892, + 0.493 + ], + "angle": 0, + "content": "lanobis norm of \\(\\pmb{x}\\) induced by PSD matrix \\(\\mathbf{A}\\), and its dual norm is \\(\\| \\pmb{x} \\|_{\\pmb{A}}^{*} = \\sqrt{\\pmb{x}^{\\top} \\pmb{A}^{-1} \\pmb{x}}\\). \\(\\pmb{A} \\otimes \\pmb{B}\\) means the Kronecker product of \\(\\pmb{A}\\) and \\(\\pmb{B}\\), while \\(\\pmb{A} \\odot \\pmb{B}\\) and \\(\\pmb{A}^{\\odot \\alpha}\\) are the element-wise matrix product and element-wise power operation, respectively. \\(\\operatorname{Diag}(\\pmb{x})\\) is a diagonal matrix with diagonal vector \\(\\pmb{x}\\), and \\(\\operatorname{vec}(\\cdot)\\) denotes the vectorization function." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.512, + 0.628, + 0.529 + ], + "angle": 0, + "content": "2. Background" + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.535, + 0.756, + 0.551 + ], + "angle": 0, + "content": "2.1. Online Convex Optimization" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.556, + 0.892, + 0.691 + ], + "angle": 0, + "content": "The online convex optimization framework [10, 28] remains the most powerful and popular tool to analyze DNN optimization algorithms, including AdaGrad [5], Adam [15], Shampoo [9], etc. Given an arbitrary, unknown sequence of convex loss functions \\(\\{f_1(\\pmb{w}),\\dots,f_t(\\pmb{w}),\\dots,f_T(\\pmb{w})\\}\\), we aim to optimize the weight \\(\\pmb{w}_t\\) in the \\(t\\)-th iteration, and evaluate it on the loss function \\(f_{t}(\\pmb{w})\\). The goal of our optimization process is to minimize the regret, which is defined as follows [10, 28]:" + }, + { + "type": "equation", + "bbox": [ + 0.577, + 0.691, + 0.891, + 0.719 + ], + "angle": 0, + "content": "\\[\nR (T) = \\sum_ {t = 1} ^ {T} \\left(f _ {t} \\left(\\boldsymbol {w} _ {t}\\right) - f _ {t} (\\hat {\\boldsymbol {w}})\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.719, + 0.891, + 0.748 + ], + "angle": 0, + "content": "where \\(\\hat{\\boldsymbol{w}} = \\arg \\min_{\\boldsymbol{w}} \\sum_{t=1}^{T} f_t(\\boldsymbol{w})\\). Generally speaking, a lower regret bound means a more effective learning process." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.759, + 0.858, + 0.775 + ], + "angle": 0, + "content": "2.2. Regret Bound of Preconditioned Gradient" + }, + { + "type": "text", + "bbox": [ + 0.499, + 0.781, + 0.892, + 0.871 + ], + "angle": 0, + "content": "As in previous works [5, 9], an online mirror descent with an adaptive time-dependent regularization is adopted for online convex learning. In the \\(t\\)-th iteration, suppose we have obtained the gradient \\(\\pmb{g}_t = \\nabla f_t(\\pmb{w}_t)\\), then given a PSD matrix \\(\\pmb{H}_t\\succeq \\mathbf{0}\\), the parameters are updated by optimizing the following objective function:" + }, + { + "type": "equation", + "bbox": [ + 0.552, + 0.871, + 0.891, + 0.898 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {w} _ {t + 1} = \\arg \\min _ {\\boldsymbol {w}} \\eta \\boldsymbol {g} _ {t} ^ {\\top} \\boldsymbol {w} + \\frac {1}{2} \\left\\| \\boldsymbol {w} - \\boldsymbol {w} _ {t} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {2}. \\tag {2}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7867" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.076, + 0.092, + 0.47, + 0.12 + ], + "angle": 0, + "content": "The solution of Eq. (2) is exactly a preconditioned gradient descent step, which is" + }, + { + "type": "equation", + "bbox": [ + 0.191, + 0.121, + 0.468, + 0.137 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {w} _ {t + 1} = \\boldsymbol {w} _ {t} - \\eta \\boldsymbol {H} _ {t} ^ {- 1} \\boldsymbol {g} _ {t}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.138, + 0.468, + 0.166 + ], + "angle": 0, + "content": "Duchi et al. [5] have provided a regret bound for online mirror descent, as shown in Lemma 1:" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.168, + 0.469, + 0.199 + ], + "angle": 0, + "content": "Lemma 1 [5, 9] For any sequence of matrices \\( H_T \\succeq \\ldots \\succeq H_1 \\succeq 0 \\), the regret of online mirror descent holds that" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.211, + 0.469, + 0.284 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} R (T) \\leq \\frac {1}{2 \\eta} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {w} _ {t} - \\hat {\\boldsymbol {w}} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {2} - \\left\\| \\boldsymbol {w} _ {t + 1} - \\hat {\\boldsymbol {w}} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {2}\\right) \\\\ + \\frac {\\eta}{2} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {*}\\right) ^ {2}. \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.285, + 0.469, + 0.3 + ], + "angle": 0, + "content": "If we further assume \\( D = \\max_{t\\leq T}||\\pmb{w}_t - \\hat{\\pmb{w}} ||_2 \\), then we have" + }, + { + "type": "equation", + "bbox": [ + 0.135, + 0.3, + 0.469, + 0.327 + ], + "angle": 0, + "content": "\\[\nR (T) \\leq \\frac {D ^ {2}}{2 \\eta} \\operatorname {T r} \\left(\\boldsymbol {H} _ {T}\\right) + \\frac {\\eta}{2} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {*}\\right) ^ {2}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.327, + 0.469, + 0.448 + ], + "angle": 0, + "content": "Our goal is to find a proper sequence of PSD matrices \\(\\{\\pmb{H}_1, \\pmb{H}_2, \\dots, \\pmb{H}_T\\}\\) to minimize the regret bound in Eq (4) or (5). Duchi et al. [5] suggested to adopt \\(\\pmb{H}_T = (\\sum_{t=1}^{T} \\pmb{g}_t \\pmb{g}_t^\\top)^{\\frac{1}{2}}\\) as the full matrix regularization matrix. However, it is hard to directly use it for DNN optimization due to the high dimension of parameter space. Therefore, Duchi et al. simplified this full-matrix \\(\\pmb{H}_T\\) with its diagonal elements, resulting in the AdaGrad algorithm [5]." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.463, + 0.469, + 0.497 + ], + "angle": 0, + "content": "3. A General Regret Bound for Constrained Preconditioned Gradient" + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.506, + 0.321, + 0.523 + ], + "angle": 0, + "content": "3.1. The General Regret Bound" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.53, + 0.469, + 0.696 + ], + "angle": 0, + "content": "Directly adopting a full-matrix \\( H_{t} \\) is absurd for optimizing a DNN because it is hard or even prohibitive to compute and store such a high-dimensional matrix. Hence, we need to reduce the dimension of \\( H_{t} \\) with a constraint set \\( \\Psi \\), e.g., the set of the block-diagonal matrices [5]. In this section, we aim to construct a general and practical full-matrix regularization term in Eq. (2) to achieve the low regret bound in Eq. (4). For a general constraint set \\( \\Psi \\subseteq \\mathbb{R}^{d\\times d} \\), if it is a cone (i.e., \\( \\forall x\\in \\Psi ,\\theta >0,\\theta x\\in \\Psi \\) holds), we have the following Theorem 1 and Lemma 2, whose proofs can be found in the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.697, + 0.469, + 0.728 + ], + "angle": 0, + "content": "Theorem 1 For any cone constraint \\(\\Psi \\subseteq \\mathbb{R}^{d\\times d}\\), we define a guide function \\(F_{T}(S)\\) on \\(\\Psi\\) as" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.728, + 0.469, + 0.754 + ], + "angle": 0, + "content": "\\[\nF _ {T} (\\boldsymbol {S}) = \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {S}} ^ {*}\\right) ^ {2}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.751, + 0.303, + 0.765 + ], + "angle": 0, + "content": "and then define the matrix \\(\\mathbf{H}_T\\) as" + }, + { + "type": "equation", + "bbox": [ + 0.088, + 0.765, + 0.468, + 0.787 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {H} _ {T} = C _ {T} \\boldsymbol {S} _ {T}, \\quad \\boldsymbol {S} _ {T} = \\arg \\min _ {\\boldsymbol {S} \\in \\Psi , \\boldsymbol {S} \\succeq \\mathbf {0}, T r (\\boldsymbol {S}) \\leq 1} F _ {T} (\\boldsymbol {S}), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.789, + 0.468, + 0.818 + ], + "angle": 0, + "content": "where \\( C_T = \\sqrt{F_T(S_T)} \\). The regret of online mirror descent holds that" + }, + { + "type": "equation", + "bbox": [ + 0.12, + 0.817, + 0.469, + 0.886 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} R (T) \\leq \\left(\\frac {D ^ {2}}{2 \\eta} + \\eta\\right) C _ {T} \\tag {8} \\\\ = \\left(\\frac {D ^ {2}}{2 \\eta} + \\eta\\right) \\sqrt {\\min _ {\\boldsymbol {S} \\in \\Psi , \\boldsymbol {S} \\succeq \\mathbf {0} , T r (\\boldsymbol {S}) \\leq 1} F _ {T} (\\boldsymbol {S})}. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.887, + 0.469, + 0.902 + ], + "angle": 0, + "content": "The above theorem reveals that minimizing the guide" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.092, + 0.892, + 0.257 + ], + "angle": 0, + "content": "function \\( F_{T}(S) \\) on cone \\( \\Psi \\) will minimize the regret bound of the preconditioned gradient descent algorithm simultaneously. More importantly, given a cone constraint \\( \\Psi \\), the optimal \\( H_{T} = C_{T} S_{T} \\) that achieves the lowest regret bound can be obtained by optimizing Eq. (7). From Theorem 1, we can know that the regret \\( R(T) \\leq O(\\sqrt{\\min_{S \\in \\Psi, S \\geq 0, \\operatorname{Tr}(S) \\leq 1} F_{T}(S)}) \\). If two cones satisfy \\( \\Psi_{1} \\subseteq \\Psi_{2} \\), we have \\( \\sqrt{\\min_{S \\in \\Psi_{2}, S \\geq 0, \\operatorname{Tr}(S) \\leq 1} F_{T}(S)} \\leq \\sqrt{\\min_{S \\in \\Psi_{1}, S \\geq 0, \\operatorname{Tr}(S) \\leq 1} F_{T}(S)} \\). This also explains why full-matrix regularization can achieve the lowest regret bound. In addition, we have the following lemma:" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.258, + 0.892, + 0.303 + ], + "angle": 0, + "content": "Lemma 2 Suppose that \\(\\Psi\\) is the set of either diagonal matrices or full-matrices, according to the definition of \\(S_{T}\\) and \\(H_{T}\\) in Eq. (7), we have" + }, + { + "type": "equation", + "bbox": [ + 0.506, + 0.303, + 0.891, + 0.356 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {H} _ {T} = \\operatorname {D i a g} \\left(\\left(\\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} \\odot \\boldsymbol {g} _ {t}\\right) ^ {\\odot \\frac {1}{2}}\\right), \\quad \\boldsymbol {H} _ {T} = \\left(\\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} \\boldsymbol {g} _ {t} ^ {\\top}\\right) ^ {\\frac {1}{2}}. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.357, + 0.892, + 0.401 + ], + "angle": 0, + "content": "From Lemma 2, we can easily see that the diagonal and full matrices used in AdaGrad [5] are two special cases of the results in Theorem 1." + }, + { + "type": "title", + "bbox": [ + 0.499, + 0.412, + 0.828, + 0.429 + ], + "angle": 0, + "content": "3.2. Layer-wise Block-diagonal Constraint" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.436, + 0.892, + 0.526 + ], + "angle": 0, + "content": "In practice, we need to choose a proper constraint set \\(\\Psi\\) to regularize the structure of matrix \\(H_{T}\\). The diagonal constraint is the simplest constraint. However, it results in a very low effective dimension of \\(H_{T}\\) so that the regret bound is high. We aim to find a more effective and practical constraint set over \\(H_{T}\\) for DNN optimization." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.527, + 0.892, + 0.677 + ], + "angle": 0, + "content": "Instead of considering the full-matrix regularization of all parameters, one can consider the full-matrix regularization of parameters within one DNN layer. Similar ideas have been adopted in KFAC [7] and Shampoo [9], which assume that the matrix \\( H_{T} \\) has a block diagonal structure and each sub-block matrix is used for one layer of a DNN. Suppose matrices \\( S_{l} \\) and \\( H_{l} \\) are for the \\( l \\)-th layer, and \\( g_{l} \\) is the gradient of weight in the \\( l \\)-th layer, in order to obtain the updating formula with block-diagonal constraint, we could minimize the guide function \\( F_{T}(S) \\). There is" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.677, + 0.891, + 0.717 + ], + "angle": 0, + "content": "\\[\nF _ {T} (\\boldsymbol {S}) = \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {S}} ^ {*}\\right) ^ {2} = \\sum_ {l = 1} ^ {L} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {l, t} \\right\\| _ {\\boldsymbol {S} _ {l}} ^ {*}\\right) ^ {2}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.717, + 0.892, + 0.807 + ], + "angle": 0, + "content": "The above equation shows that the original optimization problem can be divided into a number of \\( L \\) sub-problems, and we can solve these sub-problems independently. For the convenience of expression, we omit the subscript \\( l \\) and analyze the sub-problem within one layer of a DNN in the following development." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.817, + 0.788, + 0.832 + ], + "angle": 0, + "content": "3.3. Kronecker-factorized Constraint" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.841, + 0.892, + 0.901 + ], + "angle": 0, + "content": "Because the dimension of the parameter space of one DNN layer can still be very high, we need to further constrain the structure of \\( H_{T} \\). The Kronecker-factorized constraint can be used to significantly reduce the parameter di" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7868" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.077, + 0.092, + 0.473, + 0.196 + ], + "angle": 0, + "content": "mension within one layer [7, 9]. To be specific, for a fully-connected layer with weight \\( \\pmb{W} \\in \\mathbb{R}^{C_{out} \\times C_{in}} \\) and \\( \\pmb{w} = \\mathrm{vec}(\\pmb{W}) \\), its corresponding gradient is \\( \\pmb{G} \\in \\mathbb{R}^{C_{out} \\times C_{in}} \\) and \\( \\pmb{g} = \\mathrm{vec}(\\pmb{G}) \\). Let \\( \\pmb{S} = \\pmb{S}_1 \\otimes \\pmb{S}_2 \\), where \\( \\pmb{S}_1 \\in \\mathbb{R}^{C_{out} \\times C_{out}} \\), \\( \\pmb{S}_2 \\in \\mathbb{R}^{C_{in} \\times C_{in}} \\) and \\( \\pmb{S} \\in \\mathbb{R}^{C_{in} C_{out} \\times C_{in} C_{out}} \\), and \\( \\otimes \\) is Kronecker product. Since \\( (S_1 \\otimes S_2)^{-1} = S_1^{-1} \\otimes S_2^{-1} \\), what we need to minimize becomes" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.196, + 0.484, + 0.267 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} F _ {T} (\\boldsymbol {S}) = \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {S} _ {1} \\otimes \\boldsymbol {S} _ {2}} ^ {*}\\right) ^ {2} = \\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} ^ {\\top} \\left(\\boldsymbol {S} _ {1} ^ {- 1} \\otimes \\boldsymbol {S} _ {2} ^ {- 1}\\right) \\boldsymbol {g} _ {t} \\\\ = \\operatorname {T r} \\left(\\left(\\boldsymbol {S} _ {1} ^ {- 1} \\otimes \\boldsymbol {S} _ {2} ^ {- 1}\\right) \\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} \\boldsymbol {g} _ {t} ^ {\\top}\\right) \\tag {11} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.268, + 0.465, + 0.283 + ], + "angle": 0, + "content": "under the constraints \\(\\{S_1, S_2 \\succeq 0, \\operatorname{Tr}(S_1) \\leq 1, \\operatorname{Tr}(S_2) \\leq 1\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.285, + 0.469, + 0.375 + ], + "angle": 0, + "content": "Nevertheless, directly minimizing the \\( F_{T}(\\mathbf{S}) \\) in Eq. (11) is still difficult, and we construct an upper bound of \\( F_{T}(\\mathbf{S}) \\) to minimize. Since \\( \\pmb{g} = \\frac{1}{n}\\sum_{i=1}^{n}\\pmb{g}_{i} \\), where \\( \\pmb{g}_{i} \\) is the gradient of sample \\( i \\) and \\( n \\) is the batch size, and \\( \\pmb{g}_{i} = \\mathrm{vec}(\\delta_{i}\\pmb{x}_{i}^{T}) = \\delta_{i} \\otimes \\pmb{x}_{i} \\), where \\( \\pmb{x}_{i} \\) is the input feature and \\( \\delta_{i} \\) is the output feature gradient of sample \\( i \\), we have the following lemma." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.376, + 0.469, + 0.408 + ], + "angle": 0, + "content": "Lemma 3 Denote by \\(L_{T} = \\sum_{t=1}^{T} \\sum_{i=1}^{n} \\delta_{ti} \\delta_{ti}^{\\top}\\) and \\(\\mathbf{R}_{T} = \\sum_{t=1}^{T} \\sum_{i=1}^{n} \\mathbf{x}_{ti} \\mathbf{x}_{ti}^{\\top}\\), there is" + }, + { + "type": "equation", + "bbox": [ + 0.106, + 0.408, + 0.469, + 0.478 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} F _ {T} (\\boldsymbol {S}) \\leq \\operatorname {T r} \\left(\\left(\\boldsymbol {S} _ {1} ^ {- 1} \\otimes \\boldsymbol {S} _ {2} ^ {- 1}\\right) \\frac {1}{n} \\sum_ {t = 1} ^ {T} \\sum_ {i = 1} ^ {n} \\boldsymbol {g} _ {t i} \\boldsymbol {g} _ {t i} ^ {\\top}\\right) \\tag {12} \\\\ \\leq \\frac {1}{n} T r (\\boldsymbol {S} _ {1} ^ {- 1} \\boldsymbol {L} _ {T}) T r (\\boldsymbol {S} _ {2} ^ {- 1} \\boldsymbol {R} _ {T}). \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.479, + 0.469, + 0.538 + ], + "angle": 0, + "content": "We minimize the upper bound of \\( F_{T}(S) \\) defined in Lemma 3. One can see that the upper bound can be divided into two independent problems w.r.t. \\( S_{1} \\) and \\( S_{2} \\), respectively, which are" + }, + { + "type": "equation", + "bbox": [ + 0.078, + 0.538, + 0.469, + 0.576 + ], + "angle": 0, + "content": "\\[\n\\min _ {S _ {1} \\succeq \\mathbf {0}, \\operatorname {T r} (S _ {1}) \\leq 1} \\operatorname {T r} \\left(S _ {1} ^ {- 1} L _ {T}\\right) \\text {a n d} \\min _ {S _ {2} \\succeq \\mathbf {0}, \\operatorname {T r} (S _ {2}) \\leq 1} \\operatorname {T r} \\left(S _ {2} ^ {- 1} R _ {T}\\right). \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.577, + 0.468, + 0.605 + ], + "angle": 0, + "content": "To solve the above problem, we have the following lemma: Lemma 4 If \\( A \\succ 0 \\), we have:" + }, + { + "type": "equation", + "bbox": [ + 0.113, + 0.605, + 0.469, + 0.63 + ], + "angle": 0, + "content": "\\[\n\\arg \\min _ {\\boldsymbol {S} \\succeq \\mathbf {0}, T r (\\boldsymbol {S}) \\leq 1} T r (\\boldsymbol {S} ^ {- 1} \\boldsymbol {A}) = \\boldsymbol {A} ^ {\\frac {1}{2}} / T r (\\boldsymbol {A} ^ {\\frac {1}{2}}). \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.632, + 0.469, + 0.745 + ], + "angle": 0, + "content": "The proofs of Lemma 3 and Lemma 4 can be found in the supplementary materials. According to Lemma 4, we know that the solution of Eq. (13) is \\( S_{1,T} = L_{T}^{\\frac{1}{2}} / \\mathrm{Tr}(L_{T}^{\\frac{1}{2}}) \\) and \\( S_{2,T} = R_{T}^{\\frac{1}{2}} / \\mathrm{Tr}(R_{T}^{\\frac{1}{2}}) \\). In practice, \\( L_{T} \\) and \\( R_{T} \\) will be added with a dampening term \\( \\epsilon I \\) to ensure that they are symmetric and positive definite. Without considering the magnitude of \\( H_{T} \\), we can set" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.745, + 0.469, + 0.793 + ], + "angle": 0, + "content": "\\(\\pmb{H}_{T} = \\pmb{H}_{1,T}\\otimes \\pmb{H}_{2,T},\\pmb{H}_{1,T} = \\pmb{L}_{T}^{\\frac{1}{2}},\\pmb{H}_{2,T} = \\pmb{R}_{T}^{\\frac{1}{2}}.\\) (15) Then according to the property of Kronecker product, the online mirror descent updating formula in Eq. (3) becomes" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.793, + 0.468, + 0.809 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {W} _ {t + 1} = \\boldsymbol {W} _ {t} - \\eta \\boldsymbol {H} _ {1, t} ^ {- 1} \\boldsymbol {G} _ {t} \\boldsymbol {H} _ {2, t} ^ {- 1}. \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.81, + 0.469, + 0.868 + ], + "angle": 0, + "content": "We ignore the magnitude of \\( H_{T} \\) here because it will have no impact on the result after we introduce a gradient norm recovery operation in the algorithm, which will be described in the next section." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.469, + 0.902 + ], + "angle": 0, + "content": "Finally, the proposed vanilla optimizer, termed AdaBK, is summarized in Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.091, + 0.734, + 0.107 + ], + "angle": 0, + "content": "4. Detailed Implementation" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.117, + 0.892, + 0.192 + ], + "angle": 0, + "content": "The proposed AdaBK in Algorithm 1 involves the calculation of matrix inverse root, which may be unstable and inefficient. For an efficient and effective implementation of AdaBK in training DNNs, we propose a series of techniques." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.194, + 0.893, + 0.345 + ], + "angle": 0, + "content": "Efficient Matrix Inverse Root. As shown in Algorithm 1, we need to calculate the matrix inverse root of \\( L_{t} \\) and \\( R_{t} \\). Traditional approaches usually use SVD to calculate it. Notwithstanding, SVD is inefficient and the existing deep learning frameworks (e.g., PyTorch) do not implement SVD on GPU well, making the training unstable or even not converging. Instead of using SVD, we adopt the Schur-Newton algorithm [8] to compute the matrix inverse root. For matrix \\( A \\), let \\( Y_{0} = A / \\operatorname{Tr}(A) \\) and \\( Z_{0} = I \\). The Schur-Newton algorithm adopts the following iterations:" + }, + { + "type": "equation", + "bbox": [ + 0.515, + 0.345, + 0.892, + 0.391 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l} \\boldsymbol {T} _ {k} = \\frac {1}{2} \\left(3 \\boldsymbol {I} - \\boldsymbol {Z} _ {k - 1} \\boldsymbol {Y} _ {k - 1}\\right); \\\\ \\boldsymbol {Y} _ {k} = \\boldsymbol {Y} _ {k - 1} \\boldsymbol {T} _ {k}, \\boldsymbol {Z} _ {k} = \\boldsymbol {T} _ {k} \\boldsymbol {Z} _ {k - 1}, k = 1, 2, \\dots , K. \\end{array} \\right. \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.391, + 0.892, + 0.438 + ], + "angle": 0, + "content": "Then we have \\(A^{\\frac{1}{2}} \\approx Y_K \\sqrt{\\operatorname{Tr}(A)}\\), \\(A^{-\\frac{1}{2}} \\approx Z_K / \\sqrt{\\operatorname{Tr}(A)}\\). In practice, we find that setting \\(K = 10\\) can achieve good enough precision for our problem." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.439, + 0.893, + 0.666 + ], + "angle": 0, + "content": "Statistics Updating. In Algorithm 1, \\( L_{t} \\) and \\( R_{t} \\) accumulate the statistics of output feature gradient \\( \\Delta_{t} \\) and input feature \\( X_{t} \\), respectively. Hence the amplitude of \\( L_{t} \\) and \\( R_{t} \\) will increase during training. After certain iterations, the effective learning rate will become small, making the learning process inefficient. To solve this issue, we use the exponential moving average of \\( L_{t} \\) and \\( R_{t} \\). Meanwhile, it is unnecessary to compute \\( L_{t}, R_{t} \\), and their inverse root in each iteration. Two hyper-parameters \\( T_{s} \\) and \\( T_{ir} \\) are introduced to control the frequency of updating \\( L_{t} \\) and \\( R_{t} \\) and their inverse root, respectively. This infrequent statistics updating strategy can significantly improve efficiency with a little performance drop. We use two additional statistics \\( \\widehat{L}_{t} \\) and \\( \\widehat{R}_{t} \\) to restore the matrix inverse root of \\( L_{t} \\) and \\( R_{t} \\) (please refer to Algorithm 2)." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.668, + 0.893, + 0.849 + ], + "angle": 0, + "content": "Dampening Strategy. When the dimensions of \\(\\Delta_t\\) and \\(X_t\\) are high, \\(L_t\\) and \\(R_t\\) tend to be singular matrices with large condition numbers. A dampening term \\(\\epsilon I\\) should be added into \\(L_t\\) and \\(R_t\\) to improve their condition number and enhance the stability of computing inverse root. As in [33], we adopt an adaptive dampening parameter \\(\\epsilon \\lambda_{max}\\) where \\(\\lambda_{max}\\) is the max singular value of the matrix \\(L_t\\) or \\(R_t\\). With this setting, the condition number will be \\(\\frac{\\lambda_{max} + \\epsilon \\lambda_{max}}{\\lambda_{min} + \\epsilon \\lambda_{max}} \\leq \\frac{1 + \\epsilon}{\\epsilon}\\), bounded by a value determined by \\(\\epsilon\\). Meanwhile, the maximum singular value of the symmetric matrix (\\(L_t\\) or \\(R_t\\)) can be efficiently obtained by the power iteration method [2] as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.862, + 0.892, + 0.899 + ], + "angle": 0, + "content": "\\[\n\\left\\{ \\begin{array}{l} \\boldsymbol {v} _ {k} = \\boldsymbol {A} \\boldsymbol {u} _ {k - 1}, \\\\ \\boldsymbol {u} _ {k} = \\boldsymbol {v} _ {k} / | | \\boldsymbol {v} _ {k} | | _ {2}, k = 1, 2, \\dots , K. \\end{array} \\right. \\tag {18}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7869" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.084, + 0.095, + 0.427, + 0.14 + ], + "angle": 0, + "content": "Algorithm 1: AdaBK (Adaptive Regularization with Block-diagonal and Kronecker-factorized Constraints)" + }, + { + "type": "algorithm", + "bbox": [ + 0.084, + 0.142, + 0.42, + 0.316 + ], + "angle": 0, + "content": "Input: \\(W_{0},L_{0} = \\epsilon I_{C_{out}},R_{0} = \\epsilon I_{C_{in}},\\eta\\) Output: \\(W_{T}\\) \n1 for \\(t = 1:T\\) do \n2 Receive \\(X_{t} = [\\pmb{x}_{ti}]_{i = 1}^{n}\\) by forward propagation; \n3 Receive \\(\\Delta_t = [\\delta_{ti}]_i^n\\) by backward propagation; \n4 Compute gradient \\(G_{t}\\) . \n5 Update preconditioners: \n6 \\(L_{t} = L_{t - 1} + \\Delta_{t}\\Delta_{t}^{\\top};\\) \n7 \\(R_{t} = R_{t - 1} + X_{t}X_{t}^{\\top};\\) \n8 Update weight: \n\\(W_{t + 1} = W_t - \\eta L_t^{-\\frac{1}{2}}G_tR_t^{-\\frac{1}{2}};\\) \n9 end" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.357, + 0.47, + 0.387 + ], + "angle": 0, + "content": "We use \\(\\lambda_{max} \\approx ||\\pmb{v}_K||_2\\) for our proposed adaptive dampening and set \\(K\\) to 10 in our implementation." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.387, + 0.47, + 0.56 + ], + "angle": 0, + "content": "Gradient Norm Recovery. Since the amplitude of the preconditioned gradient \\( L_{t}^{-\\frac{1}{2}} G_{t} R_{t}^{-\\frac{1}{2}} \\) may significantly differ from the amplitude of original \\( G_{t} \\), the optimal learning rate and weight decay will also differ from the original optimizer. It is expected that the well-tuned hyperparameters in current optimizers (e.g., SGDM, AdamW) can be directly used in our proposed AdaBK optimizer without further hyper-parameter tuning. To this end, we follow the strategy in [33] to re-scale the amplitude of the preconditioned gradient \\( \\widehat{G}_{t} = L_{t}^{-\\frac{1}{2}} G_{t} R_{t}^{-\\frac{1}{2}} \\) to the original gradient \\( G_{t} \\) by multiplying it with a scaling factor, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.562, + 0.469, + 0.596 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {G}} _ {t} = \\widehat {\\boldsymbol {G}} _ {t} \\frac {\\left| \\left| \\boldsymbol {G} _ {t} \\right| \\right| _ {2}}{\\left| \\left| \\widehat {\\boldsymbol {G}} _ {t} \\right| \\right| _ {2}}. \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.599, + 0.47, + 0.659 + ], + "angle": 0, + "content": "It is easy to know that \\(\\tilde{G}_t\\) and \\(G_{t}\\) have the same \\(L_{2}\\) norm. With gradient norm recovery, the proposed AdaBK method can be easily embedded into existing optimizers without much extra hyperparameter tuning." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.66, + 0.469, + 0.84 + ], + "angle": 0, + "content": "Convolutional Layer. We have discussed the optimization of FC layers in Section 3. For the Conv layer, the derivation process is similar. The convolution operation can be formulated as matrix multiplication with the \\(im2col\\) operation [31, 36], and then the Conv layer can be viewed as an FC layer with \\(\\mathfrak{A} = \\mathcal{U}_1(W)\\mathfrak{X}\\), where \\(\\mathfrak{A}\\) and \\(\\mathfrak{X}\\) are the output and input features after \\(im2col\\) operation, and \\(\\mathcal{U}_1(\\cdot)\\) is the mode 1 unfold operation of a tensor. For example, for a convolution weight \\(\\mathbf{W} \\in \\mathbb{R}^{C_{out} \\times C_{in} \\times k_1 \\times k_2}\\), we have \\(\\mathcal{U}_1(\\mathbf{W}) \\in \\mathbb{R}^{C_{out} \\times C_{in} k_1 k_2}\\). \\(\\mathcal{U}_1(\\mathbf{W})\\) can be considered as the weight of the FC layer, and the remaining computation is the same as the FC layer." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.84, + 0.47, + 0.901 + ], + "angle": 0, + "content": "**Embedding AdaBK into SGDM and AdamW.** With the above-introduced techniques, a more efficient and practical implementation of AdaBK can be obtained. The one-step preconditioned gradient of AdaBK is summarized in" + }, + { + "type": "code_caption", + "bbox": [ + 0.507, + 0.095, + 0.848, + 0.123 + ], + "angle": 0, + "content": "Algorithm 2: One Step Preconditioned Gradient of AdaBK" + }, + { + "type": "algorithm", + "bbox": [ + 0.5, + 0.125, + 0.882, + 0.345 + ], + "angle": 0, + "content": "Input: \\(T_{s}, T_{ir}, \\alpha, \\epsilon, \\beta, L_{t-1}, R_{t-1}, \\widehat{L}_{t-1}, \\widehat{R}_{t-1}, X_{t} = [x_{ti}]_{i=1}^{n}\\), \\(\\Delta_{t} = [\\delta_{ti}]_{i=1}^{n}\\), \\(G_{t} = \\nabla W_{t} \\mathcal{L}\\) \nOutput: \\(\\tilde{G}_{t}\\) \n1 if \\(t \\% T_{s} = 0\\) then \n2 \\(\\begin{array}{r}L_{t} = \\alpha L_{t-1} + (1 - \\alpha)\\Delta_{t}\\Delta_{t}^{\\top};\\\\ R_{t} = \\alpha R_{t-1} + (1 - \\alpha)X_{t}X_{t}^{\\top};\\end{array}\\) \n3 else \n4 end \n7 if \\(t \\% T_{ir} = 0\\) then \n8 \\(\\begin{array}{r}\\mathrm{Compute~}\\lambda_{max}^{L}\\mathrm{~and~}\\lambda_{max}^{R}\\mathrm{~by~Power~Iteration;}\\\\ \\mathrm{Compute~}\\widehat{L}_{t} = (L_{t} + \\lambda_{max}^{L}\\epsilon I)^{-\\frac{1}{2}}\\mathrm{~and}\\\\ \\widehat{R}_{t} = (R_{t} + \\lambda_{max}^{R}\\epsilon I)^{-\\frac{1}{2}}\\mathrm{~by~Schur-Newton~Iteration~Eq.~(17)};\\end{array}\\) \n9 else \n11 \\(\\begin{array}{r}\\widehat{L}_{t} = \\widehat{L}_{t-1}\\mathrm{~and~}\\widehat{R}_{t} = \\widehat{L}_{t-1};\\\\ \\widehat{L}_{t} = \\widehat{L}_{t-1}\\mathrm{~and~}\\widehat{R}_{t} = \\widehat{L}_{t-1};\\\\ \\widehat{L}_{t} = \\widehat{L}_{t-1}\\mathrm{~and~}\\widehat{R}_{t} = \\widehat{L}_{t-1};\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{G}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{G}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{G}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{\\mathbf{G}}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{\\mathbf{G}}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{\\mathbf{G}}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_ {t}|[V|\\widehat{\\mathbf{G}}_ {t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_ {t}|[V|\\widehat{\\mathbf{G}}_ {t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_ {t}|[V|\\widehat{\\mathbf{G}}_ {t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbb{T}}_ {i t}-\\frac{\\partial}{\\partial x _ {i t}},\\quad\\forall i,\\forall t.\\end{array}\\)" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.357, + 0.892, + 0.488 + ], + "angle": 0, + "content": "Algorithm 2. For a FC layer, the complexity of AdaBK is \\( T(O(\\frac{C_{in}^3 + C_{out}^3}{T_{ir}}) + O(\\frac{(C_{in}^2 + C_{out}^2)N}{T_s}) + O(C_{in}C_{out}(C_{in} + C_{out}))) \\), where \\( T \\) is the total number of iterations. For a Conv layer, its complexity is \\( T(O(\\frac{C_{in}^3k_1^3k_2^3 + C_{out}^3}{T_{ir}}) + O(\\frac{(C_{in}^2k_1^2k_2^2 + C_{out}^2)N}{T_s}) + O(C_{in}k_1k_2C_{out}(C_{in}k_1k_2 + C_{out}))) \\). In our implementation, \\( T_s \\) and \\( T_{ir} \\) are set to 200 and 2000, respectively, and the complexity is acceptable. In practice, it only costs \\( 10\\% \\sim 25\\% \\) additional training time." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.49, + 0.892, + 0.596 + ], + "angle": 0, + "content": "AdaBK can be embedded into many existing optimizers. In this paper, we embed it into the two commonly used DNN optimizers, i.e., SGDM and AdamW (or Adam), and name the obtained new optimizers as SGDM_BK and AdamW_BK accordingly. The detailed algorithms of SGDM_BK and AdamW_BK are summarized in the supplementary materials." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.618, + 0.633, + 0.635 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.646, + 0.892, + 0.811 + ], + "angle": 0, + "content": "We evaluate the proposed SGDM_BK and AdamW_BK optimizers on typical vision tasks, including image classification (on CIFAR100/CIFAR10 [16] and ImageNet [27]), object detection and segmentation (on COCO [17]). For the hyper-parameters of SGDM_BK and AdamW_BK, we set \\(\\alpha = 0.9\\), \\(T_s = 200\\), \\(T_{ir} = 2000\\), and \\(\\epsilon = 0.00001\\) throughout the experiments if not specified. Ablation studies on hyper-parameter selection can be found in the supplementary material. All experiments are conducted under the Pytorch 1.11 framework with NVIDIA GeForce RTX 2080Ti and 3090 Ti GPUs." + }, + { + "type": "title", + "bbox": [ + 0.5, + 0.83, + 0.692, + 0.845 + ], + "angle": 0, + "content": "5.1. Image Classification" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.856, + 0.892, + 0.901 + ], + "angle": 0, + "content": "In the image classification task, we compare SGDM_BK and AdamW_BK with the representative and state-of-the-art DNN optimizers, including SGDM, AdamW [22], Ada" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7870" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.089, + 0.895, + 0.12 + ], + "angle": 0, + "content": "Table 1. Testing accuracies (%) on CIFAR100/CIFAR10. The best and second best results are highlighted in bold and italic fonts, respectively. The numbers in red color indicate the improvement of SGDM_BK/AdamW_BK over SGDM/AdamW, respectively." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.125, + 0.891, + 0.261 + ], + "angle": 0, + "content": "
CIFAR100
OptimizerSGDMAdamWAdagradRAdamAdabeliefShampooKFACWSGDMSGDM_BKAdamW_BK
ResNet1877.20 ± .3077.23 ± .1071.55 ± .2577.05 ± .1577.43 ± .3671.81 ± .4078.25 ± .2379.28 ± .2779.30 ± .07 (↑2.10)78.66 ± .34 (↑1.43)
ResNet5077.78 ± .4378.10 ± .1772.20 ± .1578.20 ± .1579.08 ± .2371.31 ± .5379.25 ± .2680.90 ± .2381.26 ± .20 (↑3.48)80.15 ± .19 (↑2.05)
VGG1170.80 ± .2971.20 ± .2967.70 ± .1871.08 ± .2472.45 ± .1663.56 ± .4472.75 ± .3173.42 ± .2873.89 ± .13 (↑3.09)73.09 ± .29 (↑1.89)
VGG1970.94 ± .3270.26 ± .2363.30 ± .5873.01 ± .2072.39 ± .2765.62 ± .5673.87 ± .4374.82 ± .2375.10 ± .13 (↑4.16)74.27 ± .25 (↑4.01)
DenseNet12179.53 ± .1978.05 ± .2671.27 ± .7978.65 ± .0579.88 ± .0874.95 ± .4279.84 ± .3381.23 ± .1081.18 ± .27 (↑1.65)79.93 ± .23 (↑1.88)
CIFAR10
ResNet1895.10 ± .0794.80 ± .1092.83 ± .1294.70 ± .1895.12 ± .1492.94 ± .2795.01 ± .1295.43 ± .0895.44 ± .12 (↑0.34)95.22 ± .13 (↑0.42)
ResNet5094.75 ± .3094.72 ± .1092.55 ± .3994.72 ± .1095.35 ± .0592.61 ± .2795.43 ± .1695.80 ± .1595.86 ± .05 (↑1.11)95.40 ± .07 (↑0.68)
VGG1192.17 ± .1992.02 ± .0890.25 ± .2592.00 ± .1892.45 ± .1889.01 ± .2992.82 ± .1192.95 ± .2093.14 ± .26 (↑0.97)92.96 ± .07 (↑0.94)
VGG1993.61 ± .0693.40 ± .0491.28 ± .1493.57 ± .1193.58 ± .1290.62 ± .3293.47 ± .0993.91 ± .1994.03 ± .15 (↑0.42)93.94 ± .10 (↑0.54)
DenseNet12195.37 ± .1794.80 ± .0792.95 ± .2395.02 ± .0895.37 ± .0494.37 ± .3695.18 ± .2295.72 ± .1495.70 ± .13 (↑0.33)95.40 ± .04 (↑0.60)
" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.274, + 0.28, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.274, + 0.484, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.487, + 0.275, + 0.687, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.688, + 0.275, + 0.888, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.438, + 0.895, + 0.467 + ], + "angle": 0, + "content": "Figure 2. Training loss curves (loss vs. epoch and loss vs. time) of SGDM, SGDM_BK, AdamW and AdamW_BK on CIFAR100 with ResNet18 and ResNet50 before 60 epochs." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.492, + 0.47, + 0.569 + ], + "angle": 0, + "content": "grad [5], RAdam [19]1, and Adabelief [38]2, Shampoo [9]3, KFAC [7] [9]4, WSGDM [33]5. We tune learning rate and weight decay for each optimizer with grid search and the detailed settings for different optimizers can be found in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.57, + 0.471, + 0.796 + ], + "angle": 0, + "content": "Results on CIFAR100/10: We first testify the effectiveness of SGDM_BK and AdamW_BK with different DNN models on CIFAR100/CIFAR10 [16], including ResNet18, ResNet50 [12], VGG11 VGG19 [29] and DenseNet-121 [13]. All the DNN models are trained for 200 epochs with batch size 128 on one GPU. The learning rate is multiplied by 0.1 for every 60 epochs. The experiments are repeated 4 times and the results are reported in a \"mean \\(\\pm\\) std\" format in Table 1. We can see that SGDM_BK and AdamW_BK achieve significant improvements over SGDM and AdamW, which are \\(1.44\\% \\sim 4.16\\%\\) and \\(1.43\\% \\sim 4.01\\%\\) on CIFAR100, and \\(0.28\\% \\sim 1.11\\%\\) and \\(0.42\\% \\sim 0.94\\%\\) on CIFAR10, respectively. They also surpass other compared optimizers for most of the used backbone networks." + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.808, + 0.375, + 0.821 + ], + "angle": 0, + "content": "1https://github.com/LiyuanLucasLiu/RAdam" + }, + { + "type": "page_footnote", + "bbox": [ + 0.08, + 0.822, + 0.468, + 0.842 + ], + "angle": 0, + "content": "2https : / / github . com / jintang - zhuang / Adabelief - Optimizer" + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.843, + 0.406, + 0.854 + ], + "angle": 0, + "content": "3 https://github.com/moskomule/shampoo.pytorch" + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.855, + 0.393, + 0.866 + ], + "angle": 0, + "content": "4 https://github.com/alecwangcq/KFAC-Pytorch" + }, + { + "type": "page_footnote", + "bbox": [ + 0.098, + 0.867, + 0.437, + 0.877 + ], + "angle": 0, + "content": "5 https://github.com/Yonghongwei/W-SGDM-and-W-Adam" + }, + { + "type": "page_footnote", + "bbox": [ + 0.08, + 0.878, + 0.468, + 0.9 + ], + "angle": 0, + "content": "The model can be downloaded at https://github.com/weiaicunzai/pytorch-cifar100." + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.808, + 0.468, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.493, + 0.893, + 0.644 + ], + "angle": 0, + "content": "Figure 2 shows the curves of training loss vs. epoch and training loss vs. time for SGDM, SGDM_BK, AdamW and AdamW_BK on CIFAR100 with ResNet18 and ResNet50 backbones before 60 epochs. One can see that SGDM_BK and AdamW_BK can significantly speed up the training process of SGDM and AdamW, respectively. Since SGDM_BK and AdamW_BK cost additional time in each iteration, for a fair comparison, we also show the curves of training loss vs. time. One can see that they still have great advantages over the original SGDM and AdamW." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.644, + 0.895, + 0.885 + ], + "angle": 0, + "content": "Results on ImageNet-1k: To testify that SGDM_BK and AdamW_BK can also work well on large-scale datasets, we evaluate them on ImageNet-1k [27], which contains 1000 categories with 1.28 million images for training and 50K images for validation. ResNet18 and ResNet50 are selected as the backbone models with training batch size 256 on 4 GPUs, and the training settings follow the work in [3, 38]. The learning rate is multiplied by 0.1 for every 30 epochs. SGDM_BK and AdamW_BK adopt the same learning rate and weight decay as SGDM and AdamW, respectively. The top 1 accuracies on the validation set are reported in Table 2. One can see that SGDM_BK and AdamW_BK perform better than others. Meanwhile, we plot the training and validation accuracy curves in Figure 3, from which we see that the proposed AdaBK technique can largely speed up the training process." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.886, + 0.892, + 0.902 + ], + "angle": 0, + "content": "We also evaluate the proposed optimizer on Swin" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.514, + 0.957 + ], + "angle": 0, + "content": "7871" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.895, + 0.118 + ], + "angle": 0, + "content": "Table 2. Top 1 accuracy (%) on the validation set of ImageNet-1k. The numbers in red color indicate the improvement of SGDM_BK/AdamW_BK over SGDM/AdamW, respectively." + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.122, + 0.89, + 0.168 + ], + "angle": 0, + "content": "
OptimizerSGDMAdamWAdagradRAdamAdabeliefShampooKFACWSGDMSGDM_BKAdamW_BK
ResNet1870.4970.0162.2269.9270.0864.4569.6271.4371.59 (↑1.10)71.63 (↑1.62)
ResNet5076.3176.0269.3876.1276.2270.1176.3677.4877.62 (↑1.31)77.22 (↑1.10)
" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.184, + 0.287, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.287, + 0.184, + 0.485, + 0.338 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.485, + 0.184, + 0.685, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.687, + 0.184, + 0.89, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.349, + 0.893, + 0.378 + ], + "angle": 0, + "content": "Figure 3. Training and validation accuracy curves of SGDM, SGDM_BK, AdamW and AdamW_BK on ImageNet-1k with ResNet18 and ResNet50 backbones." + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.392, + 0.468, + 0.406 + ], + "angle": 0, + "content": "Table 3. Top 1 accuracy (%) on the validation set of ImageNet-1k." + }, + { + "type": "table", + "bbox": [ + 0.152, + 0.41, + 0.393, + 0.455 + ], + "angle": 0, + "content": "
OptimizerAdamWAdamW_BK
Swin-T81.1881.79 (↑0.61)
Swin-B83.0283.14 (↑0.12)
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.469, + 0.47, + 0.547 + ], + "angle": 0, + "content": "transformer [20] backbone. We compare AdamW_BK with their default optimizer AdamW. The configurations follow the settings of the official MMClassification toolbox7. The results are shown in Table 3. We can see AdamW_BK can also achieve certain performance gain over AdamW." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.556, + 0.33, + 0.572 + ], + "angle": 0, + "content": "5.2. Detection and Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.58, + 0.47, + 0.76 + ], + "angle": 0, + "content": "We then evaluate SGDM_BK and AdamW_BK on COCO [17] detection and segmentation tasks to show that they can work well beyond classification tasks and can be used to fine-tune pre-trained models. The models are pre-trained on ImageNet1k and fine-tuned on COCO train2017 (118K images), and then evaluated on COCO val2017 (40K images). The latest version of MMDetection toolbox [4] is used as to train our models. We test SGDM_BK and AdamW_BK by Faster-RCNN [25] and Mask-RCNN [11] with various backbones, including ResNet50 (R50), ResNet101 (R101) and Swin transformer [20]." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.761, + 0.47, + 0.868 + ], + "angle": 0, + "content": "As mentioned in Section 4, with the gradient norm recovery operation, we can directly adopt the same hyperparameters (i.e., learning rate and weight decay) of SGDM and AdamW into SGDM_BK and AdamW_BK, respectively. To be specific, for R50 and R101 backbones, we compare the proposed optimizer with SGDM, WSGDM and AdamW. The learning rate and weight decay are set to" + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.393, + 0.892, + 0.424 + ], + "angle": 0, + "content": "0.02 and 0.0001 for SGDM, WSGDM and SGDM_BK, and 0.0001 and 0.2 for AdamW and AdamW_BK, respectively." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.425, + 0.893, + 0.531 + ], + "angle": 0, + "content": "For the Swin transformer backbone, the learning rate and weight decay are set to 0.0001 and 0.02 for AdamW and AdamW_BK, respectively. The learning rate schedule is 1X for Faster-RCNN. Other configurations follow the settings of the official MMDetection toolbox8. For the default optimizers, we use their official results9. This experiment is conducted on NVIDIA GeForce RTX 3090 Ti GPUs." + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.532, + 0.895, + 0.821 + ], + "angle": 0, + "content": "Table 4 lists the Average Precision (AP) of object detection by Faster-RCNN. It can be seen that the models trained by SGDM_BK and AdamW_BK achieve clear performance gains of \\(1.6\\% \\sim 2.2\\%\\) AP for R50 and R101 backbones. Fig. 4 shows the training loss curves of Faster-RCNN with ResNet50 backbone. One can see that SGDM_BK and AdamW_BK accelerate the training process over SGDM and AdamW. Table 5 shows the \\(\\mathrm{AP}^b\\) of detection and \\(\\mathrm{AP}^m\\) of segmentation by Mask-RCNN. We can see that SGDM_BK and AdamW_BK gain \\(1.5\\% \\sim 2.2\\%\\) \\(\\mathrm{AP}^b\\) and \\(1.2\\% \\sim 2.2\\%\\) \\(\\mathrm{AP}^m\\) for R50 and R101 backbones over SGDM and AdamW, respectively. For Swin transformer backbone, AdamW_BK also improves \\(0.7\\% \\sim 0.9\\%\\) \\(\\mathrm{AP}^b\\) and \\(0.3\\% \\sim 0.9\\%\\) \\(\\mathrm{AP}^m\\) over AdamW. Meanwhile, compared with WSGDM, the proposed SGDM_BK also outperforms it with \\(0.2\\% \\sim 0.6\\%\\) AP gain. Moreover, Fig. 5 plots the training loss curves of Faster-RCNN with ResNet50, Swin-T (1X) and Swin-S (3X). The proposed SGDM_BK and AdamW_BK accelerate the train" + }, + { + "type": "page_footnote", + "bbox": [ + 0.516, + 0.835, + 0.81, + 0.848 + ], + "angle": 0, + "content": "8https://github.com/open-mmlab/mmdetection" + }, + { + "type": "page_footnote", + "bbox": [ + 0.501, + 0.848, + 0.892, + 0.9 + ], + "angle": 0, + "content": "9Please refer to https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn,https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn,and https://github.com/open-mmlab/mmdetection/tree/master/ configs/swin." + }, + { + "type": "list", + "bbox": [ + 0.501, + 0.835, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.077, + 0.876, + 0.468, + 0.9 + ], + "angle": 0, + "content": "7https://github.com/open-mmlab/mmclassification/tree/master/confers/swin_transformer" + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7872" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.089, + 0.489, + 0.132 + ], + "angle": 0, + "content": "Table 4. Detection results of Faster-RCNN on COCO. \\(\\Delta\\) means the gain of SGDM_BK over SGDM or AdamW_BK over AdamW. * indicates the default optimizer." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.146, + 0.487, + 0.281 + ], + "angle": 0, + "content": "
BackboneAlgorithmAPAP.5AP.75APsAPmAPl
R50SGDM*37.458.140.421.241.048.1
WSGDM39.460.643.123.142.950.7
SGDM.BK39.660.742.822.642.952.2
Δ↑2.2↑2.6↑2.4↑1.4↑1.9↑4.1
AdamW37.858.741.022.141.249.2
AdamW.BK39.460.342.922.542.852.3
Δ↑1.6↑1.6↑1.9↑0.4↑1.6↑3.1
R101SGDM*39.460.143.122.443.751.1
WSGDM41.161.645.124.045.254.3
SGDM.BK41.662.345.324.945.655.2
Δ↑2.2↑2.2↑2.2↑2.5↑1.9↑4.1
AdamW40.160.643.822.944.152.8
AdamW.BK41.762.145.524.445.456.2
Δ↑1.6↑1.5↑1.7↑1.5↑1.3↑3.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.294, + 0.489, + 0.337 + ], + "angle": 0, + "content": "Table 5. Detection and segmentation results of Mask-RCNN on COCO. \\(\\Delta\\) means the gain of SGDM_BK over SGDM or AdamW_BK over AdamW. * indicates the default optimizer." + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.351, + 0.487, + 0.53 + ], + "angle": 0, + "content": "
BackboneLr scheduleAlgorithm\\( {\\mathrm{{AP}}}^{b} \\)\\( {\\mathrm{{AP}}}_{5}^{b} \\)\\( {\\mathrm{{AP}}}_{7.5}^{b} \\)\\( {\\mathrm{{AP}}}^{m} \\)\\( {\\mathrm{{AP}}}_{5}^{m} \\)\\( {\\mathrm{{AP}}}_{7.5}^{m} \\)
R501XSGDM*38.258.841.434.755.737.2
W-SGDM39.860.843.436.457.638.9
SGDM_BK40.461.343.936.958.339.6
Δ↑2.2↑2.5↑2.5↑2.2↑2.6↑2.4
AdamW37.858.741.035.456.238.0
AdamW_BK40.060.643.536.758.039.3
Δ↑2.2↑1.9↑2.5↑1.3↑1.8↑1.3
R1001XSGDM*40.060.544.036.157.538.6
W-SGDM41.762.545.537.959.440.8
SGDM_BK42.262.946.138.160.040.7
Δ↑2.2↑2.4↑2.1↑2.0↑2.5↑2.1
AdamW40.761.144.637.258.440.1
AdamW_BK42.262.546.038.459.941.2
Δ↑1.5↑1.4↑1.4↑1.2↑1.5↑1.1
Swin-T1XAdamW*42.765.246.839.362.242.2
AdamW_BK43.665.947.840.263.143.1
Δ↑0.9↑0.7↑1.0↑0.9↑0.9↑0.9
Swin-T3XAdamW*46.068.250.341.665.344.7
AdamW_BK46.868.851.442.466.145.6
Δ↑0.8↑0.6↑1.1↑0.8↑0.8↑0.9
Swin-S3XAdamW*48.269.852.843.267.046.1
AdamW_BK48.970.453.843.567.446.8
Δ↑0.7↑0.6↑1.0↑0.3↑0.4↑0.7
" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.576, + 0.47, + 0.638 + ], + "angle": 0, + "content": "ing process clearly. The results on COCO demonstrate that the proposed SGDM_BK and AdamW_BK can be easily adopted into the downstream tasks without additional hyper-parameter tuning." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.644, + 0.38, + 0.661 + ], + "angle": 0, + "content": "5.3. Memory Usage and Training Time" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.668, + 0.471, + 0.834 + ], + "angle": 0, + "content": "For full-matrix adaptive optimizers, one important concern is the training cost, including memory usage and training time. Here we compare the memory and time cost of our optimizers with SGDM [23], AdamW [22] and AdaGrad [5] on CIFAR100. ResNet50 is used as the backbone and one GeForce RTX 2080Ti GPU is used. The results are reported in Table 6. One can see that the embedding of AdaBK slightly increases the memory usage and training time (\\(10\\% \\sim 25\\%\\) extra training time and memory usage). Compared to the improvement of performance, the extra cost is affordable and worthwhile." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.845, + 0.196, + 0.861 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.871, + 0.47, + 0.903 + ], + "angle": 0, + "content": "This work presented a general regret bound for the constrained full-matrix preconditioned gradient methods for" + }, + { + "type": "image", + "bbox": [ + 0.493, + 0.093, + 0.691, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.693, + 0.096, + 0.889, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.558, + 0.255, + 0.822, + 0.269 + ], + "angle": 0, + "content": "Figure 4. Training loss curves of ResNet50." + }, + { + "type": "image", + "bbox": [ + 0.492, + 0.288, + 0.677, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.289, + 0.859, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.492, + 0.411, + 0.677, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.411, + 0.859, + 0.54 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.534, + 0.543, + 0.816, + 0.557 + ], + "angle": 0, + "content": "Figure 5. Training loss curves of Mask-RCNN." + }, + { + "type": "table_caption", + "bbox": [ + 0.499, + 0.574, + 0.892, + 0.602 + ], + "angle": 0, + "content": "Table 6. Memory cost (MiB) and training time (h) of different optimizers with ResNet50." + }, + { + "type": "table", + "bbox": [ + 0.505, + 0.606, + 0.888, + 0.645 + ], + "angle": 0, + "content": "
OptimizerSGDMAdamWAdagradSGDM_BKAdamW_BK
Memory58675883586565256535
Time3.423.483.464.144.20
" + }, + { + "type": "text", + "bbox": [ + 0.498, + 0.659, + 0.892, + 0.901 + ], + "angle": 0, + "content": "DNN optimization. Different from previous full-matrix preconditioned methods, where the parameter update formulas are designed heuristically, we proved that given a cone constraint on the full-matrix preconditioner, the corresponding parameter update formula can be obtained by optimizing a guide function. Based on our theoretical analysis, we derived a specific guide function with the layer-wise block-diagonal constraint and Kronecker-factorized constraint. Through optimizing an upper bound of the guide function, a new preconditioned optimization algorithm, namely AdaBK, was obtained. We embedded AdaBK into two widely used optimizers, i.e., SGDM and AdamW, and the experimental results on image classification, object detection and segmentation tasks demonstrated that AdaBK can significantly improve the DNN optimization performance with only \\(10\\% \\sim 25\\%\\) extra computation cost." + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.957 + ], + "angle": 0, + "content": "7873" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.08, + 0.09, + 0.174, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.115, + 0.47, + 0.171 + ], + "angle": 0, + "content": "[1] Naman Agarwal, Brian Bullins, Xinyi Chen, Elad Hazan, Karan Singh, Cyril Zhang, and Yi Zhang. Efficient full-matrix adaptive regularization. In International Conference on Machine Learning, pages 102-110. PMLR, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.469, + 0.199 + ], + "angle": 0, + "content": "[2] Richard L Burden, J Douglas Faires, and Annette M Burden. Numerical analysis. Cengage learning, 2015. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.2, + 0.47, + 0.255 + ], + "angle": 0, + "content": "[3] Jinghui Chen, Dongruo Zhou, Yiqi Tang, Ziyan Yang, Yuan Cao, and Quanquan Gu. Closing the generalization gap of adaptive gradient methods in training deep neural networks. arXiv preprint arXiv:1806.06763, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.256, + 0.469, + 0.323 + ], + "angle": 0, + "content": "[4] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.326, + 0.469, + 0.38 + ], + "angle": 0, + "content": "[5] John Duchi, Elad Hazan, and Yoram Singer. Adaptive subgradient methods for online learning and stochastic optimization. Journal of machine learning research, 12(7), 2011. 1, 2, 3, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.469, + 0.436 + ], + "angle": 0, + "content": "[6] Thomas George, César Laurent, Xavier Bouthillier, Nicolas Ballas, and Pascal Vincent. Fast approximate natural gradient descent in a kronecker-factored eigenbasis. arXiv preprint arXiv:1806.03884, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.438, + 0.469, + 0.492 + ], + "angle": 0, + "content": "[7] Roger Grosse and James Martens. A kronecker-factored approximate fisher matrix for convolution layers. In International Conference on Machine Learning, pages 573-582. PMLR, 2016. 1, 2, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.494, + 0.469, + 0.548 + ], + "angle": 0, + "content": "[8] Chun-Hua Guo and Nicholas J Higham. A schur-newton method for the matrix\\boldsymbol{\\mathrm{boldmath}}\\(\\mathfrak{p}\\)th root and its inverse. SIAM Journal on Matrix Analysis and Applications, 28(3):788-804, 2006. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.55, + 0.469, + 0.605 + ], + "angle": 0, + "content": "[9] Vineet Gupta, Tomer Koren, and Yoram Singer. Shampoo: Preconditioned stochastic tensor optimization. In International Conference on Machine Learning, pages 1842-1850. PMLR, 2018. 1, 2, 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.607, + 0.469, + 0.647 + ], + "angle": 0, + "content": "[10] Elad Hazan et al. Introduction to online convex optimization. Foundations and Trends® in Optimization, 2(3-4):157-325, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.649, + 0.469, + 0.69 + ], + "angle": 0, + "content": "[11] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.691, + 0.469, + 0.746 + ], + "angle": 0, + "content": "[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.747, + 0.469, + 0.803 + ], + "angle": 0, + "content": "[13] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4700-4708, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.804, + 0.469, + 0.858 + ], + "angle": 0, + "content": "[14] Ahmet Iscen, Giorgos Tolias, Yannis Avrithis, and Ondrej Chum. Label propagation for deep semi-supervised learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5070-5079, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.859, + 0.469, + 0.9 + ], + "angle": 0, + "content": "[15] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.115, + 0.47, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.12 + ], + "angle": 0, + "content": "[16] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.122, + 0.892, + 0.19 + ], + "angle": 0, + "content": "[17] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.192, + 0.892, + 0.246 + ], + "angle": 0, + "content": "[18] Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. On the variance of the adaptive learning rate and beyond. arXiv preprint arXiv:1908.03265, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.249, + 0.892, + 0.303 + ], + "angle": 0, + "content": "[19] Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. On the variance of the adaptive learning rate and beyond. arXiv preprint arXiv:1908.03265, 2019. 1, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.305, + 0.892, + 0.374 + ], + "angle": 0, + "content": "[20] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.376, + 0.892, + 0.416 + ], + "angle": 0, + "content": "[21] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.419, + 0.892, + 0.459 + ], + "angle": 0, + "content": "[22] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 1, 5, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.461, + 0.892, + 0.502 + ], + "angle": 0, + "content": "[23] Ning Qian. On the momentum term in gradient descent learning algorithms. Neural networks, 12(1):145-151, 1999. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.504, + 0.892, + 0.559 + ], + "angle": 0, + "content": "[24] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.561, + 0.892, + 0.616 + ], + "angle": 0, + "content": "[25] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pages 91-99, 2015. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.617, + 0.892, + 0.657 + ], + "angle": 0, + "content": "[26] Herbert Robbins and Sutton Monro. A stochastic approximation method. The annals of mathematical statistics, pages 400-407, 1951. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.66, + 0.892, + 0.729 + ], + "angle": 0, + "content": "[27] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.731, + 0.892, + 0.772 + ], + "angle": 0, + "content": "[28] Shai Shalev-Shwartz et al. Online learning and online convex optimization. Foundations and Trends® in Machine Learning, 4(2):107-194, 2012. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.774, + 0.892, + 0.815 + ], + "angle": 0, + "content": "[29] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.817, + 0.892, + 0.87 + ], + "angle": 0, + "content": "[30] Tijmen Tieleman and Geoffrey Hinton. Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural Networks for Machine Learning, 4:26-31, 2012. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.503, + 0.872, + 0.892, + 0.9 + ], + "angle": 0, + "content": "[31] Chengxi Ye, Matthew Evanusa, Hua He, Anton Mitrokhin, Tom Goldstein, James A Yorke, Cornelia Fermüller, and" + }, + { + "type": "list", + "bbox": [ + 0.503, + 0.093, + 0.892, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "7874" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.11, + 0.092, + 0.47, + 0.12 + ], + "angle": 0, + "content": "Yiannis Aloimonos. Network deconvolution. arXiv preprint arXiv:1905.11926, 2019. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.122, + 0.47, + 0.178 + ], + "angle": 0, + "content": "[32] Hongwei Yong, Jianqiang Huang, Xiansheng Hua, and Lei Zhang. Gradient centralization: A new optimization technique for deep neural networks. In European Conference on Computer Vision, pages 635-652. Springer, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.179, + 0.469, + 0.22 + ], + "angle": 0, + "content": "[33] Hongwei Yong and Lei Zhang. An embedded feature whitening approach to deep neural network optimization. In the European Conference on Computer Vision, 2022. 4, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.222, + 0.469, + 0.263 + ], + "angle": 0, + "content": "[34] Jihun Yun, Aurelie C Lozano, and Eunho Yang. Stochastic gradient methods with block diagonal matrix adaptation. arXiv preprint arXiv:1905.10757, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.264, + 0.469, + 0.292 + ], + "angle": 0, + "content": "[35] Matthew D Zeiler. Adadelta: an adaptive learning rate method. arXiv preprint arXiv:1212.5701, 2012. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.293, + 0.469, + 0.347 + ], + "angle": 0, + "content": "[36] Huishuai Zhang, Wei Chen, and Tie-Yan Liu. Train feedforward neural network with layer-wise adaptive rate via approximating back-matching propagation. arXiv preprint arXiv:1802.09750, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.349, + 0.469, + 0.39 + ], + "angle": 0, + "content": "[37] Michael R Zhang, James Lucas, Geoffrey Hinton, and Jimmy Ba. Lookahead optimizer: k steps forward, 1 step back. arXiv preprint arXiv:1907.08610, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.392, + 0.469, + 0.461 + ], + "angle": 0, + "content": "[38] Juntang Zhuang, Tommy Tang, Yifan Ding, Sekhar C Tatikonda, Nicha Dvornek, Xenophon Papademetris, and James Duncan. Adbelief optimizer: Adapting stepsizes by the belief in observed gradients. Advances in neural information processing systems, 33:18795-18806, 2020. 1, 6" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.092, + 0.47, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.483, + 0.945, + 0.516, + 0.956 + ], + "angle": 0, + "content": "7875" + } + ] +] \ No newline at end of file diff --git a/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/a806573e-912a-4e15-8891-1f914fce477d_origin.pdf b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/a806573e-912a-4e15-8891-1f914fce477d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2465024d0a9143a75fa79681badeb31df5fab768 --- /dev/null +++ b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/a806573e-912a-4e15-8891-1f914fce477d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd2d38baaaeff1bc5c3c61215c911892f3d3c12b9afb157e80f716aa490c4d80 +size 1165486 diff --git a/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/full.md b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/full.md new file mode 100644 index 0000000000000000000000000000000000000000..abad8244853ebcf91356c1549f484ab7860eb99a --- /dev/null +++ b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/full.md @@ -0,0 +1,375 @@ +# A General Regret Bound of Preconditioned Gradient Method for DNN Training + +Hongwei Yong Ying Sun Lei Zhang The Hong Kong Polytechnic University + +hongwei.yong@polyu.edu.hk, {csysun, cslzhang}@comp.polyu.edu.hk + +# Abstract + +While adaptive learning rate methods, such as Adam, have achieved remarkable improvement in optimizing Deep Neural Networks (DNNs), they consider only the diagonal elements of the full preconditioned matrix. Though the full-matrix preconditioned gradient methods theoretically have a lower regret bound, they are impractical for use to train DNNs because of the high complexity. In this paper, we present a general regret bound with a constrained full-matrix preconditioned gradient, and show that the updating formula of the preconditioner can be derived by solving a cone-constrained optimization problem. With the block-diagonal and Kronecker-factorized constraints, a specific guide function can be obtained. By minimizing the upper bound of the guide function, we develop a new DNN optimizer, termed AdaBK. A series of techniques, including statistics updating, dampening, efficient matrix inverse root computation, and gradient amplitude preservation, are developed to make AdaBK effective and efficient to implement. The proposed AdaBK can be readily embedded into many existing DNN optimizers, e.g., SGDM and AdamW, and the corresponding SGDM_BK and AdamW_BK algorithms demonstrate significant improvements over existing DNN optimizers on benchmark vision tasks, including image classification, object detection and segmentation. The code is publicly available at https://github.com/Yonghongwei/AdaBK. + +# 1. Introduction + +Stochastic gradient descent (SGD) [26] and its variants [21, 23], which update the parameters along the opposite of their gradient directions, have achieved great success in optimizing deep neural networks (DNNs) [14, 24]. Instead of using a uniform learning rate for different parameters, Duchi et al. [5] proposed the AdaGrad method, which adopts an adaptive learning rate for each parameter, and proved that AdaGrad can achieve lower regret bound than SGD. Following AdaGrad, a class of adaptive learning rate gradient descent methods has been proposed. For example, + +RMSProp [30] and AdaDelta [35] introduce the exponential moving average to replace the sum of second-order statistics of the gradient for computing the adaptive learning rate. Adam [15] further adopts the momentum into the gradient, and AdamW [22] employs a weight-decoupled strategy to improve the generalization performance. RAdam [18], Adabelief [38] and Ranger [19,32,37] are proposed to accelerate training and improve the generalization capability over Adam. The adaptive learning rate methods have become the mainstream DNN optimizers. + +In addition to AdaGrad, Duchi et al. [5] provided a full-matrix preconditioned gradient descent (PGD) method that adopts the matrix $\mathbf{H}_T = (\sum_{t=1}^T \mathbf{g}_t \mathbf{g}_t^\top)^{\frac{1}{2}}$ to adjust the gradient $\mathbf{g}_T$ , where $t$ denotes the iteration number and $T$ is the number of the current iteration. It has been proved [5] that the preconditioned gradient $\mathbf{H}_T^{-1} \mathbf{g}_T$ has a lower regret bound than the adaptive learning rate methods that only consider the diagonal elements of $\mathbf{H}_T$ . However, the full-matrix preconditioned gradient is impractical to use due to its high dimension, which limits its application to DNN optimization. Various works have been reported to solve this problem in parameter space by adding some structural constraints on the full-matrix $\mathbf{H}_T$ . For instances, GGT [1] stores only the gradients of recent iterations so that the matrix inverse root can be computed efficiently by fast low-rank computation tricks. Yun et al. [34] proposed a mini-block diagonal matrix framework to reduce the cost through coordinate partitioning and grouping strategies. Gupta et al. [9] proposed to extend AdaGrad with Kronecker products of full-matrix preconditioners to make it more efficient in DNN training. Besides, natural gradient approaches [6, 7], which adopt the approximations of the Fisher matrix to correct the descent direction, can also be regarded as full-matrix preconditioners. + +The existing constrained PGD (CPGD) methods, however, are heuristic since manually designed approximations to the full matrix $\pmb{H}_T$ are employed in them, while their influence on the regret bound is unknown. By far, they lack a general regret-bound theory that can guide us to design the full-matrix preconditioned gradient methods. On the other hand, the practicality and effectiveness of these precondi + +tioner methods are also an issue, which prevents them from being widely used in training DNNs. + +To address the above-mentioned issues, in this paper we present a theorem to connect the regret bound of the constrained full-matrix preconditioner with a guide function. By minimizing the guide function under the constraints, an updating formula of the preconditioned gradient can be derived. That is, optimizing the guide function of the preconditioner will minimize its regret bound at the same time, while different constraints can yield different updating formulas. With the commonly-used constraints on DNN preconditioners, such as the block-diagonal and Kronecker-factorized constraints [7, 9], specific guide functions can be obtained. By minimizing the upper bound of the guide function, a new optimizer, namely AdaBK, is derived. + +We further propose a series of techniques, including statistics updating, dampening, efficient matrix inverse root computation and gradient norm recovery, to make AdaBK more practical to use for DNN optimization. By embedding AdaBK into SGDM and AdamW (or Adam), we develop two new DNN optimizers, SGDM_BK and AdamW_BK. With acceptable extra computation and memory cost, they achieve significant performance gain in convergence speed and generalization capability over state-of-the-art DNN optimizers, as demonstrated in our experiments in image classification, object detection and segmentation. + +For a better understanding of our proposed regret bound and the developed DNN optimizer, in Fig. 1, we illustrate the existing major DNN optimizers and their relationships. SGD and its momentum version (SGDM) apply the same learning rate to all parameters based on their gradient descent directions. The adaptive learning rate methods assign different learning rates to different parameters by using second-order information of the gradients, achieving better convergence performance. The adaptive learning rate methods can be viewed as special cases of PGD methods by considering only the diagonal elements of the full preconditioned matrix of gradients. Our method belongs to the class of PGD methods, while our proposed general regret bound of constrained PGD methods can be applied to the PGD optimizers under different constraints, including AdaGrad, Full-Matrix AdaGrad and our AdaBK. + +Notation system. We denote by $\boldsymbol{w}_t$ and $\boldsymbol{g}_t$ the weight vector and its gradient of a DNN model in the $t$ -th iteration. Denote by $\boldsymbol{g}_{t,i}$ the gradient of the $i$ -th sample in a batch in the $t$ -th iteration, we have $\boldsymbol{g}_t = \frac{1}{n}\sum_{i=1}^n\boldsymbol{g}_{t,i}$ , where $n$ is the batch size. The notations $A \succeq 0$ and $A \succ 0$ for a matrix $A$ denote that $A$ is symmetric positive semidefinite (PSD) and symmetric positive definite, respectively. $A \succeq B$ or $A - B \succeq 0$ means that $A - B$ is PSD. $\operatorname{Tr}(A)$ represents the trace of the matrix $A$ . For a PSD matrix $A$ , $A^\alpha = U\Sigma^\alpha U^\top$ , where $U\Sigma U^\top$ is the Singular Value Decomposition (SVD) of $A$ . $||\boldsymbol{x}||_A = \sqrt{\boldsymbol{x}^\top\boldsymbol{A}\boldsymbol{x}}$ is the Maha + +![](images/ed9490d0f611aa6f626fe3ae514f1f3953edb31975c0d66540d093ecc18bfa8f.jpg) +Figure 1. Illustration of the main DNN optimizers. + +lanobis norm of $\pmb{x}$ induced by PSD matrix $\mathbf{A}$ , and its dual norm is $\| \pmb{x} \|_{\pmb{A}}^{*} = \sqrt{\pmb{x}^{\top} \pmb{A}^{-1} \pmb{x}}$ . $\pmb{A} \otimes \pmb{B}$ means the Kronecker product of $\pmb{A}$ and $\pmb{B}$ , while $\pmb{A} \odot \pmb{B}$ and $\pmb{A}^{\odot \alpha}$ are the element-wise matrix product and element-wise power operation, respectively. $\operatorname{Diag}(\pmb{x})$ is a diagonal matrix with diagonal vector $\pmb{x}$ , and $\operatorname{vec}(\cdot)$ denotes the vectorization function. + +# 2. Background + +# 2.1. Online Convex Optimization + +The online convex optimization framework [10, 28] remains the most powerful and popular tool to analyze DNN optimization algorithms, including AdaGrad [5], Adam [15], Shampoo [9], etc. Given an arbitrary, unknown sequence of convex loss functions $\{f_1(\pmb{w}),\dots,f_t(\pmb{w}),\dots,f_T(\pmb{w})\}$ , we aim to optimize the weight $\pmb{w}_t$ in the $t$ -th iteration, and evaluate it on the loss function $f_{t}(\pmb{w})$ . The goal of our optimization process is to minimize the regret, which is defined as follows [10, 28]: + +$$ +R (T) = \sum_ {t = 1} ^ {T} \left(f _ {t} \left(\boldsymbol {w} _ {t}\right) - f _ {t} (\hat {\boldsymbol {w}})\right), \tag {1} +$$ + +where $\hat{\boldsymbol{w}} = \arg \min_{\boldsymbol{w}} \sum_{t=1}^{T} f_t(\boldsymbol{w})$ . Generally speaking, a lower regret bound means a more effective learning process. + +# 2.2. Regret Bound of Preconditioned Gradient + +As in previous works [5, 9], an online mirror descent with an adaptive time-dependent regularization is adopted for online convex learning. In the $t$ -th iteration, suppose we have obtained the gradient $\pmb{g}_t = \nabla f_t(\pmb{w}_t)$ , then given a PSD matrix $\pmb{H}_t\succeq \mathbf{0}$ , the parameters are updated by optimizing the following objective function: + +$$ +\boldsymbol {w} _ {t + 1} = \arg \min _ {\boldsymbol {w}} \eta \boldsymbol {g} _ {t} ^ {\top} \boldsymbol {w} + \frac {1}{2} \left\| \boldsymbol {w} - \boldsymbol {w} _ {t} \right\| _ {\boldsymbol {H} _ {t}} ^ {2}. \tag {2} +$$ + +The solution of Eq. (2) is exactly a preconditioned gradient descent step, which is + +$$ +\boldsymbol {w} _ {t + 1} = \boldsymbol {w} _ {t} - \eta \boldsymbol {H} _ {t} ^ {- 1} \boldsymbol {g} _ {t}. \tag {3} +$$ + +Duchi et al. [5] have provided a regret bound for online mirror descent, as shown in Lemma 1: + +Lemma 1 [5, 9] For any sequence of matrices $H_T \succeq \ldots \succeq H_1 \succeq 0$ , the regret of online mirror descent holds that + +$$ +\begin{array}{l} R (T) \leq \frac {1}{2 \eta} \sum_ {t = 1} ^ {T} \left(\left\| \boldsymbol {w} _ {t} - \hat {\boldsymbol {w}} \right\| _ {\boldsymbol {H} _ {t}} ^ {2} - \left\| \boldsymbol {w} _ {t + 1} - \hat {\boldsymbol {w}} \right\| _ {\boldsymbol {H} _ {t}} ^ {2}\right) \\ + \frac {\eta}{2} \sum_ {t = 1} ^ {T} \left(\left\| \boldsymbol {g} _ {t} \right\| _ {\boldsymbol {H} _ {t}} ^ {*}\right) ^ {2}. \tag {4} \\ \end{array} +$$ + +If we further assume $D = \max_{t\leq T}||\pmb{w}_t - \hat{\pmb{w}} ||_2$ , then we have + +$$ +R (T) \leq \frac {D ^ {2}}{2 \eta} \operatorname {T r} \left(\boldsymbol {H} _ {T}\right) + \frac {\eta}{2} \sum_ {t = 1} ^ {T} \left(\left\| \boldsymbol {g} _ {t} \right\| _ {\boldsymbol {H} _ {t}} ^ {*}\right) ^ {2}. \tag {5} +$$ + +Our goal is to find a proper sequence of PSD matrices $\{\pmb{H}_1, \pmb{H}_2, \dots, \pmb{H}_T\}$ to minimize the regret bound in Eq (4) or (5). Duchi et al. [5] suggested to adopt $\pmb{H}_T = (\sum_{t=1}^{T} \pmb{g}_t \pmb{g}_t^\top)^{\frac{1}{2}}$ as the full matrix regularization matrix. However, it is hard to directly use it for DNN optimization due to the high dimension of parameter space. Therefore, Duchi et al. simplified this full-matrix $\pmb{H}_T$ with its diagonal elements, resulting in the AdaGrad algorithm [5]. + +# 3. A General Regret Bound for Constrained Preconditioned Gradient + +# 3.1. The General Regret Bound + +Directly adopting a full-matrix $H_{t}$ is absurd for optimizing a DNN because it is hard or even prohibitive to compute and store such a high-dimensional matrix. Hence, we need to reduce the dimension of $H_{t}$ with a constraint set $\Psi$ , e.g., the set of the block-diagonal matrices [5]. In this section, we aim to construct a general and practical full-matrix regularization term in Eq. (2) to achieve the low regret bound in Eq. (4). For a general constraint set $\Psi \subseteq \mathbb{R}^{d\times d}$ , if it is a cone (i.e., $\forall x\in \Psi ,\theta >0,\theta x\in \Psi$ holds), we have the following Theorem 1 and Lemma 2, whose proofs can be found in the supplementary materials. + +Theorem 1 For any cone constraint $\Psi \subseteq \mathbb{R}^{d\times d}$ , we define a guide function $F_{T}(S)$ on $\Psi$ as + +$$ +F _ {T} (\boldsymbol {S}) = \sum_ {t = 1} ^ {T} \left(\left\| \boldsymbol {g} _ {t} \right\| _ {\boldsymbol {S}} ^ {*}\right) ^ {2}, \tag {6} +$$ + +and then define the matrix $\mathbf{H}_T$ as + +$$ +\boldsymbol {H} _ {T} = C _ {T} \boldsymbol {S} _ {T}, \quad \boldsymbol {S} _ {T} = \arg \min _ {\boldsymbol {S} \in \Psi , \boldsymbol {S} \succeq \mathbf {0}, T r (\boldsymbol {S}) \leq 1} F _ {T} (\boldsymbol {S}), \tag {7} +$$ + +where $C_T = \sqrt{F_T(S_T)}$ . The regret of online mirror descent holds that + +$$ +\begin{array}{l} R (T) \leq \left(\frac {D ^ {2}}{2 \eta} + \eta\right) C _ {T} \tag {8} \\ = \left(\frac {D ^ {2}}{2 \eta} + \eta\right) \sqrt {\min _ {\boldsymbol {S} \in \Psi , \boldsymbol {S} \succeq \mathbf {0} , T r (\boldsymbol {S}) \leq 1} F _ {T} (\boldsymbol {S})}. \\ \end{array} +$$ + +The above theorem reveals that minimizing the guide + +function $F_{T}(S)$ on cone $\Psi$ will minimize the regret bound of the preconditioned gradient descent algorithm simultaneously. More importantly, given a cone constraint $\Psi$ , the optimal $H_{T} = C_{T} S_{T}$ that achieves the lowest regret bound can be obtained by optimizing Eq. (7). From Theorem 1, we can know that the regret $R(T) \leq O(\sqrt{\min_{S \in \Psi, S \geq 0, \operatorname{Tr}(S) \leq 1} F_{T}(S)})$ . If two cones satisfy $\Psi_{1} \subseteq \Psi_{2}$ , we have $\sqrt{\min_{S \in \Psi_{2}, S \geq 0, \operatorname{Tr}(S) \leq 1} F_{T}(S)} \leq \sqrt{\min_{S \in \Psi_{1}, S \geq 0, \operatorname{Tr}(S) \leq 1} F_{T}(S)}$ . This also explains why full-matrix regularization can achieve the lowest regret bound. In addition, we have the following lemma: + +Lemma 2 Suppose that $\Psi$ is the set of either diagonal matrices or full-matrices, according to the definition of $S_{T}$ and $H_{T}$ in Eq. (7), we have + +$$ +\boldsymbol {H} _ {T} = \operatorname {D i a g} \left(\left(\sum_ {t = 1} ^ {T} \boldsymbol {g} _ {t} \odot \boldsymbol {g} _ {t}\right) ^ {\odot \frac {1}{2}}\right), \quad \boldsymbol {H} _ {T} = \left(\sum_ {t = 1} ^ {T} \boldsymbol {g} _ {t} \boldsymbol {g} _ {t} ^ {\top}\right) ^ {\frac {1}{2}}. \tag {9} +$$ + +From Lemma 2, we can easily see that the diagonal and full matrices used in AdaGrad [5] are two special cases of the results in Theorem 1. + +# 3.2. Layer-wise Block-diagonal Constraint + +In practice, we need to choose a proper constraint set $\Psi$ to regularize the structure of matrix $H_{T}$ . The diagonal constraint is the simplest constraint. However, it results in a very low effective dimension of $H_{T}$ so that the regret bound is high. We aim to find a more effective and practical constraint set over $H_{T}$ for DNN optimization. + +Instead of considering the full-matrix regularization of all parameters, one can consider the full-matrix regularization of parameters within one DNN layer. Similar ideas have been adopted in KFAC [7] and Shampoo [9], which assume that the matrix $H_{T}$ has a block diagonal structure and each sub-block matrix is used for one layer of a DNN. Suppose matrices $S_{l}$ and $H_{l}$ are for the $l$ -th layer, and $g_{l}$ is the gradient of weight in the $l$ -th layer, in order to obtain the updating formula with block-diagonal constraint, we could minimize the guide function $F_{T}(S)$ . There is + +$$ +F _ {T} (\boldsymbol {S}) = \sum_ {t = 1} ^ {T} \left(\left\| \boldsymbol {g} _ {t} \right\| _ {\boldsymbol {S}} ^ {*}\right) ^ {2} = \sum_ {l = 1} ^ {L} \sum_ {t = 1} ^ {T} \left(\left\| \boldsymbol {g} _ {l, t} \right\| _ {\boldsymbol {S} _ {l}} ^ {*}\right) ^ {2}. \tag {10} +$$ + +The above equation shows that the original optimization problem can be divided into a number of $L$ sub-problems, and we can solve these sub-problems independently. For the convenience of expression, we omit the subscript $l$ and analyze the sub-problem within one layer of a DNN in the following development. + +# 3.3. Kronecker-factorized Constraint + +Because the dimension of the parameter space of one DNN layer can still be very high, we need to further constrain the structure of $H_{T}$ . The Kronecker-factorized constraint can be used to significantly reduce the parameter di + +mension within one layer [7, 9]. To be specific, for a fully-connected layer with weight $\pmb{W} \in \mathbb{R}^{C_{out} \times C_{in}}$ and $\pmb{w} = \mathrm{vec}(\pmb{W})$ , its corresponding gradient is $\pmb{G} \in \mathbb{R}^{C_{out} \times C_{in}}$ and $\pmb{g} = \mathrm{vec}(\pmb{G})$ . Let $\pmb{S} = \pmb{S}_1 \otimes \pmb{S}_2$ , where $\pmb{S}_1 \in \mathbb{R}^{C_{out} \times C_{out}}$ , $\pmb{S}_2 \in \mathbb{R}^{C_{in} \times C_{in}}$ and $\pmb{S} \in \mathbb{R}^{C_{in} C_{out} \times C_{in} C_{out}}$ , and $\otimes$ is Kronecker product. Since $(S_1 \otimes S_2)^{-1} = S_1^{-1} \otimes S_2^{-1}$ , what we need to minimize becomes + +$$ +\begin{array}{l} F _ {T} (\boldsymbol {S}) = \sum_ {t = 1} ^ {T} \left(\left\| \boldsymbol {g} _ {t} \right\| _ {\boldsymbol {S} _ {1} \otimes \boldsymbol {S} _ {2}} ^ {*}\right) ^ {2} = \sum_ {t = 1} ^ {T} \boldsymbol {g} _ {t} ^ {\top} \left(\boldsymbol {S} _ {1} ^ {- 1} \otimes \boldsymbol {S} _ {2} ^ {- 1}\right) \boldsymbol {g} _ {t} \\ = \operatorname {T r} \left(\left(\boldsymbol {S} _ {1} ^ {- 1} \otimes \boldsymbol {S} _ {2} ^ {- 1}\right) \sum_ {t = 1} ^ {T} \boldsymbol {g} _ {t} \boldsymbol {g} _ {t} ^ {\top}\right) \tag {11} \\ \end{array} +$$ + +under the constraints $\{S_1, S_2 \succeq 0, \operatorname{Tr}(S_1) \leq 1, \operatorname{Tr}(S_2) \leq 1\}$ . + +Nevertheless, directly minimizing the $F_{T}(\mathbf{S})$ in Eq. (11) is still difficult, and we construct an upper bound of $F_{T}(\mathbf{S})$ to minimize. Since $\pmb{g} = \frac{1}{n}\sum_{i=1}^{n}\pmb{g}_{i}$ , where $\pmb{g}_{i}$ is the gradient of sample $i$ and $n$ is the batch size, and $\pmb{g}_{i} = \mathrm{vec}(\delta_{i}\pmb{x}_{i}^{T}) = \delta_{i} \otimes \pmb{x}_{i}$ , where $\pmb{x}_{i}$ is the input feature and $\delta_{i}$ is the output feature gradient of sample $i$ , we have the following lemma. + +Lemma 3 Denote by $L_{T} = \sum_{t=1}^{T} \sum_{i=1}^{n} \delta_{ti} \delta_{ti}^{\top}$ and $\mathbf{R}_{T} = \sum_{t=1}^{T} \sum_{i=1}^{n} \mathbf{x}_{ti} \mathbf{x}_{ti}^{\top}$ , there is + +$$ +\begin{array}{l} F _ {T} (\boldsymbol {S}) \leq \operatorname {T r} \left(\left(\boldsymbol {S} _ {1} ^ {- 1} \otimes \boldsymbol {S} _ {2} ^ {- 1}\right) \frac {1}{n} \sum_ {t = 1} ^ {T} \sum_ {i = 1} ^ {n} \boldsymbol {g} _ {t i} \boldsymbol {g} _ {t i} ^ {\top}\right) \tag {12} \\ \leq \frac {1}{n} T r (\boldsymbol {S} _ {1} ^ {- 1} \boldsymbol {L} _ {T}) T r (\boldsymbol {S} _ {2} ^ {- 1} \boldsymbol {R} _ {T}). \\ \end{array} +$$ + +We minimize the upper bound of $F_{T}(S)$ defined in Lemma 3. One can see that the upper bound can be divided into two independent problems w.r.t. $S_{1}$ and $S_{2}$ , respectively, which are + +$$ +\min _ {S _ {1} \succeq \mathbf {0}, \operatorname {T r} (S _ {1}) \leq 1} \operatorname {T r} \left(S _ {1} ^ {- 1} L _ {T}\right) \text {a n d} \min _ {S _ {2} \succeq \mathbf {0}, \operatorname {T r} (S _ {2}) \leq 1} \operatorname {T r} \left(S _ {2} ^ {- 1} R _ {T}\right). \tag {13} +$$ + +To solve the above problem, we have the following lemma: Lemma 4 If $A \succ 0$ , we have: + +$$ +\arg \min _ {\boldsymbol {S} \succeq \mathbf {0}, T r (\boldsymbol {S}) \leq 1} T r (\boldsymbol {S} ^ {- 1} \boldsymbol {A}) = \boldsymbol {A} ^ {\frac {1}{2}} / T r (\boldsymbol {A} ^ {\frac {1}{2}}). \tag {14} +$$ + +The proofs of Lemma 3 and Lemma 4 can be found in the supplementary materials. According to Lemma 4, we know that the solution of Eq. (13) is $S_{1,T} = L_{T}^{\frac{1}{2}} / \mathrm{Tr}(L_{T}^{\frac{1}{2}})$ and $S_{2,T} = R_{T}^{\frac{1}{2}} / \mathrm{Tr}(R_{T}^{\frac{1}{2}})$ . In practice, $L_{T}$ and $R_{T}$ will be added with a dampening term $\epsilon I$ to ensure that they are symmetric and positive definite. Without considering the magnitude of $H_{T}$ , we can set + +$\pmb{H}_{T} = \pmb{H}_{1,T}\otimes \pmb{H}_{2,T},\pmb{H}_{1,T} = \pmb{L}_{T}^{\frac{1}{2}},\pmb{H}_{2,T} = \pmb{R}_{T}^{\frac{1}{2}}.$ (15) Then according to the property of Kronecker product, the online mirror descent updating formula in Eq. (3) becomes + +$$ +\boldsymbol {W} _ {t + 1} = \boldsymbol {W} _ {t} - \eta \boldsymbol {H} _ {1, t} ^ {- 1} \boldsymbol {G} _ {t} \boldsymbol {H} _ {2, t} ^ {- 1}. \tag {16} +$$ + +We ignore the magnitude of $H_{T}$ here because it will have no impact on the result after we introduce a gradient norm recovery operation in the algorithm, which will be described in the next section. + +Finally, the proposed vanilla optimizer, termed AdaBK, is summarized in Algorithm 1. + +# 4. Detailed Implementation + +The proposed AdaBK in Algorithm 1 involves the calculation of matrix inverse root, which may be unstable and inefficient. For an efficient and effective implementation of AdaBK in training DNNs, we propose a series of techniques. + +Efficient Matrix Inverse Root. As shown in Algorithm 1, we need to calculate the matrix inverse root of $L_{t}$ and $R_{t}$ . Traditional approaches usually use SVD to calculate it. Notwithstanding, SVD is inefficient and the existing deep learning frameworks (e.g., PyTorch) do not implement SVD on GPU well, making the training unstable or even not converging. Instead of using SVD, we adopt the Schur-Newton algorithm [8] to compute the matrix inverse root. For matrix $A$ , let $Y_{0} = A / \operatorname{Tr}(A)$ and $Z_{0} = I$ . The Schur-Newton algorithm adopts the following iterations: + +$$ +\left\{ \begin{array}{l} \boldsymbol {T} _ {k} = \frac {1}{2} \left(3 \boldsymbol {I} - \boldsymbol {Z} _ {k - 1} \boldsymbol {Y} _ {k - 1}\right); \\ \boldsymbol {Y} _ {k} = \boldsymbol {Y} _ {k - 1} \boldsymbol {T} _ {k}, \boldsymbol {Z} _ {k} = \boldsymbol {T} _ {k} \boldsymbol {Z} _ {k - 1}, k = 1, 2, \dots , K. \end{array} \right. \tag {17} +$$ + +Then we have $A^{\frac{1}{2}} \approx Y_K \sqrt{\operatorname{Tr}(A)}$ , $A^{-\frac{1}{2}} \approx Z_K / \sqrt{\operatorname{Tr}(A)}$ . In practice, we find that setting $K = 10$ can achieve good enough precision for our problem. + +Statistics Updating. In Algorithm 1, $L_{t}$ and $R_{t}$ accumulate the statistics of output feature gradient $\Delta_{t}$ and input feature $X_{t}$ , respectively. Hence the amplitude of $L_{t}$ and $R_{t}$ will increase during training. After certain iterations, the effective learning rate will become small, making the learning process inefficient. To solve this issue, we use the exponential moving average of $L_{t}$ and $R_{t}$ . Meanwhile, it is unnecessary to compute $L_{t}, R_{t}$ , and their inverse root in each iteration. Two hyper-parameters $T_{s}$ and $T_{ir}$ are introduced to control the frequency of updating $L_{t}$ and $R_{t}$ and their inverse root, respectively. This infrequent statistics updating strategy can significantly improve efficiency with a little performance drop. We use two additional statistics $\widehat{L}_{t}$ and $\widehat{R}_{t}$ to restore the matrix inverse root of $L_{t}$ and $R_{t}$ (please refer to Algorithm 2). + +Dampening Strategy. When the dimensions of $\Delta_t$ and $X_t$ are high, $L_t$ and $R_t$ tend to be singular matrices with large condition numbers. A dampening term $\epsilon I$ should be added into $L_t$ and $R_t$ to improve their condition number and enhance the stability of computing inverse root. As in [33], we adopt an adaptive dampening parameter $\epsilon \lambda_{max}$ where $\lambda_{max}$ is the max singular value of the matrix $L_t$ or $R_t$ . With this setting, the condition number will be $\frac{\lambda_{max} + \epsilon \lambda_{max}}{\lambda_{min} + \epsilon \lambda_{max}} \leq \frac{1 + \epsilon}{\epsilon}$ , bounded by a value determined by $\epsilon$ . Meanwhile, the maximum singular value of the symmetric matrix ( $L_t$ or $R_t$ ) can be efficiently obtained by the power iteration method [2] as follows: + +$$ +\left\{ \begin{array}{l} \boldsymbol {v} _ {k} = \boldsymbol {A} \boldsymbol {u} _ {k - 1}, \\ \boldsymbol {u} _ {k} = \boldsymbol {v} _ {k} / | | \boldsymbol {v} _ {k} | | _ {2}, k = 1, 2, \dots , K. \end{array} \right. \tag {18} +$$ + +Algorithm 1: AdaBK (Adaptive Regularization with Block-diagonal and Kronecker-factorized Constraints) +Input: $W_{0},L_{0} = \epsilon I_{C_{out}},R_{0} = \epsilon I_{C_{in}},\eta$ Output: $W_{T}$ +1 for $t = 1:T$ do +2 Receive $X_{t} = [\pmb{x}_{ti}]_{i = 1}^{n}$ by forward propagation; +3 Receive $\Delta_t = [\delta_{ti}]_i^n$ by backward propagation; +4 Compute gradient $G_{t}$ . +5 Update preconditioners: +6 $L_{t} = L_{t - 1} + \Delta_{t}\Delta_{t}^{\top};$ +7 $R_{t} = R_{t - 1} + X_{t}X_{t}^{\top};$ +8 Update weight: + $W_{t + 1} = W_t - \eta L_t^{-\frac{1}{2}}G_tR_t^{-\frac{1}{2}};$ +9 end + +We use $\lambda_{max} \approx ||\pmb{v}_K||_2$ for our proposed adaptive dampening and set $K$ to 10 in our implementation. + +Gradient Norm Recovery. Since the amplitude of the preconditioned gradient $L_{t}^{-\frac{1}{2}} G_{t} R_{t}^{-\frac{1}{2}}$ may significantly differ from the amplitude of original $G_{t}$ , the optimal learning rate and weight decay will also differ from the original optimizer. It is expected that the well-tuned hyperparameters in current optimizers (e.g., SGDM, AdamW) can be directly used in our proposed AdaBK optimizer without further hyper-parameter tuning. To this end, we follow the strategy in [33] to re-scale the amplitude of the preconditioned gradient $\widehat{G}_{t} = L_{t}^{-\frac{1}{2}} G_{t} R_{t}^{-\frac{1}{2}}$ to the original gradient $G_{t}$ by multiplying it with a scaling factor, i.e., + +$$ +\tilde {\boldsymbol {G}} _ {t} = \widehat {\boldsymbol {G}} _ {t} \frac {\left| \left| \boldsymbol {G} _ {t} \right| \right| _ {2}}{\left| \left| \widehat {\boldsymbol {G}} _ {t} \right| \right| _ {2}}. \tag {19} +$$ + +It is easy to know that $\tilde{G}_t$ and $G_{t}$ have the same $L_{2}$ norm. With gradient norm recovery, the proposed AdaBK method can be easily embedded into existing optimizers without much extra hyperparameter tuning. + +Convolutional Layer. We have discussed the optimization of FC layers in Section 3. For the Conv layer, the derivation process is similar. The convolution operation can be formulated as matrix multiplication with the $im2col$ operation [31, 36], and then the Conv layer can be viewed as an FC layer with $\mathfrak{A} = \mathcal{U}_1(W)\mathfrak{X}$ , where $\mathfrak{A}$ and $\mathfrak{X}$ are the output and input features after $im2col$ operation, and $\mathcal{U}_1(\cdot)$ is the mode 1 unfold operation of a tensor. For example, for a convolution weight $\mathbf{W} \in \mathbb{R}^{C_{out} \times C_{in} \times k_1 \times k_2}$ , we have $\mathcal{U}_1(\mathbf{W}) \in \mathbb{R}^{C_{out} \times C_{in} k_1 k_2}$ . $\mathcal{U}_1(\mathbf{W})$ can be considered as the weight of the FC layer, and the remaining computation is the same as the FC layer. + +**Embedding AdaBK into SGDM and AdamW.** With the above-introduced techniques, a more efficient and practical implementation of AdaBK can be obtained. The one-step preconditioned gradient of AdaBK is summarized in + +Algorithm 2: One Step Preconditioned Gradient of AdaBK +Input: $T_{s}, T_{ir}, \alpha, \epsilon, \beta, L_{t-1}, R_{t-1}, \widehat{L}_{t-1}, \widehat{R}_{t-1}, X_{t} = [x_{ti}]_{i=1}^{n}$ , $\Delta_{t} = [\delta_{ti}]_{i=1}^{n}$ , $G_{t} = \nabla W_{t} \mathcal{L}$ +Output: $\tilde{G}_{t}$ +1 if $t \% T_{s} = 0$ then +2 $\begin{array}{r}L_{t} = \alpha L_{t-1} + (1 - \alpha)\Delta_{t}\Delta_{t}^{\top};\\ R_{t} = \alpha R_{t-1} + (1 - \alpha)X_{t}X_{t}^{\top};\end{array}$ +3 else +4 end +7 if $t \% T_{ir} = 0$ then +8 $\begin{array}{r}\mathrm{Compute~}\lambda_{max}^{L}\mathrm{~and~}\lambda_{max}^{R}\mathrm{~by~Power~Iteration;}\\ \mathrm{Compute~}\widehat{L}_{t} = (L_{t} + \lambda_{max}^{L}\epsilon I)^{-\frac{1}{2}}\mathrm{~and}\\ \widehat{R}_{t} = (R_{t} + \lambda_{max}^{R}\epsilon I)^{-\frac{1}{2}}\mathrm{~by~Schur-Newton~Iteration~Eq.~(17)};\end{array}$ +9 else +11 $\begin{array}{r}\widehat{L}_{t} = \widehat{L}_{t-1}\mathrm{~and~}\widehat{R}_{t} = \widehat{L}_{t-1};\\ \widehat{L}_{t} = \widehat{L}_{t-1}\mathrm{~and~}\widehat{R}_{t} = \widehat{L}_{t-1};\\ \widehat{L}_{t} = \widehat{L}_{t-1}\mathrm{~and~}\widehat{R}_{t} = \widehat{L}_{t-1};\\ \widehat{L}_{t} = \widehat{\mathbf{G}}_{t}|[G_{t}||2];\\ \widehat{L}_{t} = \widehat{\mathbf{G}}_{t}|[G_{t}||2];\\ \widehat{L}_{t} = \widehat{\mathbf{G}}_{t}|[G_{t}||2];\\ \widehat{L}_{t} = \widehat{\mathbf{G}}_{t}|[G_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_{t}|[G_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_{t}|[G_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_{t}|[G_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_{t}|[V|\widehat{G}_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_{t}|[V|\widehat{G}_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_{t}|[V|\widehat{G}_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_{t}|[V|\widehat{\mathbf{G}}_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_{t}|[V|\widehat{\mathbf{G}}_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_{t}|[V|\widehat{\mathbf{G}}_{t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_ {t}|[V|\widehat{\mathbf{G}}_ {t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_ {t}|[V|\widehat{\mathbf{G}}_ {t}||2];\\ \widehat{L}_{t}=\widehat{\mathbf{G}}_ {t}|[V|\widehat{\mathbf{G}}_ {t}||2];\\ \widehat{L}_{t}=\widehat{\mathbb{T}}_ {i t}-\frac{\partial}{\partial x _ {i t}},\quad\forall i,\forall t.\end{array}$ + +Algorithm 2. For a FC layer, the complexity of AdaBK is $T(O(\frac{C_{in}^3 + C_{out}^3}{T_{ir}}) + O(\frac{(C_{in}^2 + C_{out}^2)N}{T_s}) + O(C_{in}C_{out}(C_{in} + C_{out})))$ , where $T$ is the total number of iterations. For a Conv layer, its complexity is $T(O(\frac{C_{in}^3k_1^3k_2^3 + C_{out}^3}{T_{ir}}) + O(\frac{(C_{in}^2k_1^2k_2^2 + C_{out}^2)N}{T_s}) + O(C_{in}k_1k_2C_{out}(C_{in}k_1k_2 + C_{out})))$ . In our implementation, $T_s$ and $T_{ir}$ are set to 200 and 2000, respectively, and the complexity is acceptable. In practice, it only costs $10\% \sim 25\%$ additional training time. + +AdaBK can be embedded into many existing optimizers. In this paper, we embed it into the two commonly used DNN optimizers, i.e., SGDM and AdamW (or Adam), and name the obtained new optimizers as SGDM_BK and AdamW_BK accordingly. The detailed algorithms of SGDM_BK and AdamW_BK are summarized in the supplementary materials. + +# 5. Experiments + +We evaluate the proposed SGDM_BK and AdamW_BK optimizers on typical vision tasks, including image classification (on CIFAR100/CIFAR10 [16] and ImageNet [27]), object detection and segmentation (on COCO [17]). For the hyper-parameters of SGDM_BK and AdamW_BK, we set $\alpha = 0.9$ , $T_s = 200$ , $T_{ir} = 2000$ , and $\epsilon = 0.00001$ throughout the experiments if not specified. Ablation studies on hyper-parameter selection can be found in the supplementary material. All experiments are conducted under the Pytorch 1.11 framework with NVIDIA GeForce RTX 2080Ti and 3090 Ti GPUs. + +# 5.1. Image Classification + +In the image classification task, we compare SGDM_BK and AdamW_BK with the representative and state-of-the-art DNN optimizers, including SGDM, AdamW [22], Ada + +Table 1. Testing accuracies (%) on CIFAR100/CIFAR10. The best and second best results are highlighted in bold and italic fonts, respectively. The numbers in red color indicate the improvement of SGDM_BK/AdamW_BK over SGDM/AdamW, respectively. + +
CIFAR100
OptimizerSGDMAdamWAdagradRAdamAdabeliefShampooKFACWSGDMSGDM_BKAdamW_BK
ResNet1877.20 ± .3077.23 ± .1071.55 ± .2577.05 ± .1577.43 ± .3671.81 ± .4078.25 ± .2379.28 ± .2779.30 ± .07 (↑2.10)78.66 ± .34 (↑1.43)
ResNet5077.78 ± .4378.10 ± .1772.20 ± .1578.20 ± .1579.08 ± .2371.31 ± .5379.25 ± .2680.90 ± .2381.26 ± .20 (↑3.48)80.15 ± .19 (↑2.05)
VGG1170.80 ± .2971.20 ± .2967.70 ± .1871.08 ± .2472.45 ± .1663.56 ± .4472.75 ± .3173.42 ± .2873.89 ± .13 (↑3.09)73.09 ± .29 (↑1.89)
VGG1970.94 ± .3270.26 ± .2363.30 ± .5873.01 ± .2072.39 ± .2765.62 ± .5673.87 ± .4374.82 ± .2375.10 ± .13 (↑4.16)74.27 ± .25 (↑4.01)
DenseNet12179.53 ± .1978.05 ± .2671.27 ± .7978.65 ± .0579.88 ± .0874.95 ± .4279.84 ± .3381.23 ± .1081.18 ± .27 (↑1.65)79.93 ± .23 (↑1.88)
CIFAR10
ResNet1895.10 ± .0794.80 ± .1092.83 ± .1294.70 ± .1895.12 ± .1492.94 ± .2795.01 ± .1295.43 ± .0895.44 ± .12 (↑0.34)95.22 ± .13 (↑0.42)
ResNet5094.75 ± .3094.72 ± .1092.55 ± .3994.72 ± .1095.35 ± .0592.61 ± .2795.43 ± .1695.80 ± .1595.86 ± .05 (↑1.11)95.40 ± .07 (↑0.68)
VGG1192.17 ± .1992.02 ± .0890.25 ± .2592.00 ± .1892.45 ± .1889.01 ± .2992.82 ± .1192.95 ± .2093.14 ± .26 (↑0.97)92.96 ± .07 (↑0.94)
VGG1993.61 ± .0693.40 ± .0491.28 ± .1493.57 ± .1193.58 ± .1290.62 ± .3293.47 ± .0993.91 ± .1994.03 ± .15 (↑0.42)93.94 ± .10 (↑0.54)
DenseNet12195.37 ± .1794.80 ± .0792.95 ± .2395.02 ± .0895.37 ± .0494.37 ± .3695.18 ± .2295.72 ± .1495.70 ± .13 (↑0.33)95.40 ± .04 (↑0.60)
+ +![](images/95d5f8a360e7c716612609b9293b9013e21361a49fc4fd77af00cfb6266402c3.jpg) +Figure 2. Training loss curves (loss vs. epoch and loss vs. time) of SGDM, SGDM_BK, AdamW and AdamW_BK on CIFAR100 with ResNet18 and ResNet50 before 60 epochs. + +![](images/342c6df23d7af2849d785567183a0e67e17121371633e6f349bd1565c9b0530d.jpg) + +![](images/e6f4a094088fc4b52cba4d7286a65c07b6c6751b262e4a8636b43bcf09b7d936.jpg) + +![](images/72a9d11cb4697d5f6359940bc160b3616e4d5d8d935f4e88871b8bf1d6fd6d89.jpg) + +grad [5], RAdam [19]1, and Adabelief [38]2, Shampoo [9]3, KFAC [7] [9]4, WSGDM [33]5. We tune learning rate and weight decay for each optimizer with grid search and the detailed settings for different optimizers can be found in the supplementary material. + +Results on CIFAR100/10: We first testify the effectiveness of SGDM_BK and AdamW_BK with different DNN models on CIFAR100/CIFAR10 [16], including ResNet18, ResNet50 [12], VGG11 VGG19 [29] and DenseNet-121 [13]. All the DNN models are trained for 200 epochs with batch size 128 on one GPU. The learning rate is multiplied by 0.1 for every 60 epochs. The experiments are repeated 4 times and the results are reported in a "mean $\pm$ std" format in Table 1. We can see that SGDM_BK and AdamW_BK achieve significant improvements over SGDM and AdamW, which are $1.44\% \sim 4.16\%$ and $1.43\% \sim 4.01\%$ on CIFAR100, and $0.28\% \sim 1.11\%$ and $0.42\% \sim 0.94\%$ on CIFAR10, respectively. They also surpass other compared optimizers for most of the used backbone networks. + +Figure 2 shows the curves of training loss vs. epoch and training loss vs. time for SGDM, SGDM_BK, AdamW and AdamW_BK on CIFAR100 with ResNet18 and ResNet50 backbones before 60 epochs. One can see that SGDM_BK and AdamW_BK can significantly speed up the training process of SGDM and AdamW, respectively. Since SGDM_BK and AdamW_BK cost additional time in each iteration, for a fair comparison, we also show the curves of training loss vs. time. One can see that they still have great advantages over the original SGDM and AdamW. + +Results on ImageNet-1k: To testify that SGDM_BK and AdamW_BK can also work well on large-scale datasets, we evaluate them on ImageNet-1k [27], which contains 1000 categories with 1.28 million images for training and 50K images for validation. ResNet18 and ResNet50 are selected as the backbone models with training batch size 256 on 4 GPUs, and the training settings follow the work in [3, 38]. The learning rate is multiplied by 0.1 for every 30 epochs. SGDM_BK and AdamW_BK adopt the same learning rate and weight decay as SGDM and AdamW, respectively. The top 1 accuracies on the validation set are reported in Table 2. One can see that SGDM_BK and AdamW_BK perform better than others. Meanwhile, we plot the training and validation accuracy curves in Figure 3, from which we see that the proposed AdaBK technique can largely speed up the training process. + +We also evaluate the proposed optimizer on Swin + +Table 2. Top 1 accuracy (%) on the validation set of ImageNet-1k. The numbers in red color indicate the improvement of SGDM_BK/AdamW_BK over SGDM/AdamW, respectively. + +
OptimizerSGDMAdamWAdagradRAdamAdabeliefShampooKFACWSGDMSGDM_BKAdamW_BK
ResNet1870.4970.0162.2269.9270.0864.4569.6271.4371.59 (↑1.10)71.63 (↑1.62)
ResNet5076.3176.0269.3876.1276.2270.1176.3677.4877.62 (↑1.31)77.22 (↑1.10)
+ +![](images/1f8f79953f2cfcfc736cabc6a9c4c433abbcae3e6536c60505982b20eb1d5e42.jpg) +Figure 3. Training and validation accuracy curves of SGDM, SGDM_BK, AdamW and AdamW_BK on ImageNet-1k with ResNet18 and ResNet50 backbones. + +![](images/af82e3214ce84bc5a3024da478fa12b056a663a0e82d29b6cc990b42897a12b8.jpg) + +![](images/33fc8a4f43f004b3ae7f1e2d9898d9d0ffe10d3ec650adacac282db493b85408.jpg) + +![](images/d64f394059c8fa4a7ac7c40f47b579034fb45d0875459760cb7d399532f1c38f.jpg) + +Table 3. Top 1 accuracy (%) on the validation set of ImageNet-1k. + +
OptimizerAdamWAdamW_BK
Swin-T81.1881.79 (↑0.61)
Swin-B83.0283.14 (↑0.12)
+ +transformer [20] backbone. We compare AdamW_BK with their default optimizer AdamW. The configurations follow the settings of the official MMClassification toolbox7. The results are shown in Table 3. We can see AdamW_BK can also achieve certain performance gain over AdamW. + +# 5.2. Detection and Segmentation + +We then evaluate SGDM_BK and AdamW_BK on COCO [17] detection and segmentation tasks to show that they can work well beyond classification tasks and can be used to fine-tune pre-trained models. The models are pre-trained on ImageNet1k and fine-tuned on COCO train2017 (118K images), and then evaluated on COCO val2017 (40K images). The latest version of MMDetection toolbox [4] is used as to train our models. We test SGDM_BK and AdamW_BK by Faster-RCNN [25] and Mask-RCNN [11] with various backbones, including ResNet50 (R50), ResNet101 (R101) and Swin transformer [20]. + +As mentioned in Section 4, with the gradient norm recovery operation, we can directly adopt the same hyperparameters (i.e., learning rate and weight decay) of SGDM and AdamW into SGDM_BK and AdamW_BK, respectively. To be specific, for R50 and R101 backbones, we compare the proposed optimizer with SGDM, WSGDM and AdamW. The learning rate and weight decay are set to + +0.02 and 0.0001 for SGDM, WSGDM and SGDM_BK, and 0.0001 and 0.2 for AdamW and AdamW_BK, respectively. + +For the Swin transformer backbone, the learning rate and weight decay are set to 0.0001 and 0.02 for AdamW and AdamW_BK, respectively. The learning rate schedule is 1X for Faster-RCNN. Other configurations follow the settings of the official MMDetection toolbox8. For the default optimizers, we use their official results9. This experiment is conducted on NVIDIA GeForce RTX 3090 Ti GPUs. + +Table 4 lists the Average Precision (AP) of object detection by Faster-RCNN. It can be seen that the models trained by SGDM_BK and AdamW_BK achieve clear performance gains of $1.6\% \sim 2.2\%$ AP for R50 and R101 backbones. Fig. 4 shows the training loss curves of Faster-RCNN with ResNet50 backbone. One can see that SGDM_BK and AdamW_BK accelerate the training process over SGDM and AdamW. Table 5 shows the $\mathrm{AP}^b$ of detection and $\mathrm{AP}^m$ of segmentation by Mask-RCNN. We can see that SGDM_BK and AdamW_BK gain $1.5\% \sim 2.2\%$ $\mathrm{AP}^b$ and $1.2\% \sim 2.2\%$ $\mathrm{AP}^m$ for R50 and R101 backbones over SGDM and AdamW, respectively. For Swin transformer backbone, AdamW_BK also improves $0.7\% \sim 0.9\%$ $\mathrm{AP}^b$ and $0.3\% \sim 0.9\%$ $\mathrm{AP}^m$ over AdamW. Meanwhile, compared with WSGDM, the proposed SGDM_BK also outperforms it with $0.2\% \sim 0.6\%$ AP gain. Moreover, Fig. 5 plots the training loss curves of Faster-RCNN with ResNet50, Swin-T (1X) and Swin-S (3X). The proposed SGDM_BK and AdamW_BK accelerate the train + +Table 4. Detection results of Faster-RCNN on COCO. $\Delta$ means the gain of SGDM_BK over SGDM or AdamW_BK over AdamW. * indicates the default optimizer. + +
BackboneAlgorithmAPAP.5AP.75APsAPmAPl
R50SGDM*37.458.140.421.241.048.1
WSGDM39.460.643.123.142.950.7
SGDM.BK39.660.742.822.642.952.2
Δ↑2.2↑2.6↑2.4↑1.4↑1.9↑4.1
AdamW37.858.741.022.141.249.2
AdamW.BK39.460.342.922.542.852.3
Δ↑1.6↑1.6↑1.9↑0.4↑1.6↑3.1
R101SGDM*39.460.143.122.443.751.1
WSGDM41.161.645.124.045.254.3
SGDM.BK41.662.345.324.945.655.2
Δ↑2.2↑2.2↑2.2↑2.5↑1.9↑4.1
AdamW40.160.643.822.944.152.8
AdamW.BK41.762.145.524.445.456.2
Δ↑1.6↑1.5↑1.7↑1.5↑1.3↑3.4
+ +Table 5. Detection and segmentation results of Mask-RCNN on COCO. $\Delta$ means the gain of SGDM_BK over SGDM or AdamW_BK over AdamW. * indicates the default optimizer. + +
BackboneLr scheduleAlgorithm\( {\mathrm{{AP}}}^{b} \)\( {\mathrm{{AP}}}_{5}^{b} \)\( {\mathrm{{AP}}}_{7.5}^{b} \)\( {\mathrm{{AP}}}^{m} \)\( {\mathrm{{AP}}}_{5}^{m} \)\( {\mathrm{{AP}}}_{7.5}^{m} \)
R501XSGDM*38.258.841.434.755.737.2
W-SGDM39.860.843.436.457.638.9
SGDM_BK40.461.343.936.958.339.6
Δ↑2.2↑2.5↑2.5↑2.2↑2.6↑2.4
AdamW37.858.741.035.456.238.0
AdamW_BK40.060.643.536.758.039.3
Δ↑2.2↑1.9↑2.5↑1.3↑1.8↑1.3
R1001XSGDM*40.060.544.036.157.538.6
W-SGDM41.762.545.537.959.440.8
SGDM_BK42.262.946.138.160.040.7
Δ↑2.2↑2.4↑2.1↑2.0↑2.5↑2.1
AdamW40.761.144.637.258.440.1
AdamW_BK42.262.546.038.459.941.2
Δ↑1.5↑1.4↑1.4↑1.2↑1.5↑1.1
Swin-T1XAdamW*42.765.246.839.362.242.2
AdamW_BK43.665.947.840.263.143.1
Δ↑0.9↑0.7↑1.0↑0.9↑0.9↑0.9
Swin-T3XAdamW*46.068.250.341.665.344.7
AdamW_BK46.868.851.442.466.145.6
Δ↑0.8↑0.6↑1.1↑0.8↑0.8↑0.9
Swin-S3XAdamW*48.269.852.843.267.046.1
AdamW_BK48.970.453.843.567.446.8
Δ↑0.7↑0.6↑1.0↑0.3↑0.4↑0.7
+ +ing process clearly. The results on COCO demonstrate that the proposed SGDM_BK and AdamW_BK can be easily adopted into the downstream tasks without additional hyper-parameter tuning. + +# 5.3. Memory Usage and Training Time + +For full-matrix adaptive optimizers, one important concern is the training cost, including memory usage and training time. Here we compare the memory and time cost of our optimizers with SGDM [23], AdamW [22] and AdaGrad [5] on CIFAR100. ResNet50 is used as the backbone and one GeForce RTX 2080Ti GPU is used. The results are reported in Table 6. One can see that the embedding of AdaBK slightly increases the memory usage and training time ( $10\% \sim 25\%$ extra training time and memory usage). Compared to the improvement of performance, the extra cost is affordable and worthwhile. + +# 6. Conclusion + +This work presented a general regret bound for the constrained full-matrix preconditioned gradient methods for + +![](images/b1b169f9eeaaae4e9972c07fe7d8ebb5fc3c9d1e987f1b1b2125d7553be6532e.jpg) +Figure 4. Training loss curves of ResNet50. + +![](images/978f79c9f7191e093db6e1bef859a4fdd2c5551cb6ac45ca52e7fdc3a4ce5c9a.jpg) + +![](images/0d9ae172bab8d9066c7c413888d91765f50880626e31a70f410309dfb0def852.jpg) + +![](images/e0b2a34f9902cd702eb05dd0b97a81a67c59f7b6488ec8ec9539bbab16179041.jpg) + +![](images/f8ff2e0f833104299d4ffbb23f3b23038df0be2f88d6f1049955713c9e1c211e.jpg) +Figure 5. Training loss curves of Mask-RCNN. + +![](images/98a0f94ae891fabe8b52f2139e5ec9ded803730b82ceedab7712846a3fe7c56c.jpg) + +Table 6. Memory cost (MiB) and training time (h) of different optimizers with ResNet50. + +
OptimizerSGDMAdamWAdagradSGDM_BKAdamW_BK
Memory58675883586565256535
Time3.423.483.464.144.20
+ +DNN optimization. Different from previous full-matrix preconditioned methods, where the parameter update formulas are designed heuristically, we proved that given a cone constraint on the full-matrix preconditioner, the corresponding parameter update formula can be obtained by optimizing a guide function. Based on our theoretical analysis, we derived a specific guide function with the layer-wise block-diagonal constraint and Kronecker-factorized constraint. Through optimizing an upper bound of the guide function, a new preconditioned optimization algorithm, namely AdaBK, was obtained. We embedded AdaBK into two widely used optimizers, i.e., SGDM and AdamW, and the experimental results on image classification, object detection and segmentation tasks demonstrated that AdaBK can significantly improve the DNN optimization performance with only $10\% \sim 25\%$ extra computation cost. + +# References + +[1] Naman Agarwal, Brian Bullins, Xinyi Chen, Elad Hazan, Karan Singh, Cyril Zhang, and Yi Zhang. Efficient full-matrix adaptive regularization. In International Conference on Machine Learning, pages 102-110. PMLR, 2019. 1 +[2] Richard L Burden, J Douglas Faires, and Annette M Burden. Numerical analysis. Cengage learning, 2015. 4 +[3] Jinghui Chen, Dongruo Zhou, Yiqi Tang, Ziyan Yang, Yuan Cao, and Quanquan Gu. Closing the generalization gap of adaptive gradient methods in training deep neural networks. arXiv preprint arXiv:1806.06763, 2018. 6 +[4] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 7 +[5] John Duchi, Elad Hazan, and Yoram Singer. Adaptive subgradient methods for online learning and stochastic optimization. Journal of machine learning research, 12(7), 2011. 1, 2, 3, 6, 8 +[6] Thomas George, César Laurent, Xavier Bouthillier, Nicolas Ballas, and Pascal Vincent. Fast approximate natural gradient descent in a kronecker-factored eigenbasis. arXiv preprint arXiv:1806.03884, 2018. 1 +[7] Roger Grosse and James Martens. A kronecker-factored approximate fisher matrix for convolution layers. In International Conference on Machine Learning, pages 573-582. PMLR, 2016. 1, 2, 3, 4, 6 +[8] Chun-Hua Guo and Nicholas J Higham. A schur-newton method for the matrix\boldsymbol{\mathrm{boldmath}} $\mathfrak{p}$ th root and its inverse. SIAM Journal on Matrix Analysis and Applications, 28(3):788-804, 2006. 4 +[9] Vineet Gupta, Tomer Koren, and Yoram Singer. Shampoo: Preconditioned stochastic tensor optimization. In International Conference on Machine Learning, pages 1842-1850. PMLR, 2018. 1, 2, 3, 4, 6 +[10] Elad Hazan et al. Introduction to online convex optimization. Foundations and Trends® in Optimization, 2(3-4):157-325, 2016. 2 +[11] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 7 +[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6 +[13] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4700-4708, 2017. 6 +[14] Ahmet Iscen, Giorgos Tolias, Yannis Avrithis, and Ondrej Chum. Label propagation for deep semi-supervised learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5070-5079, 2019. 1 +[15] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 1, 2 + +[16] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5, 6 +[17] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 5, 7 +[18] Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. On the variance of the adaptive learning rate and beyond. arXiv preprint arXiv:1908.03265, 2019. 1 +[19] Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. On the variance of the adaptive learning rate and beyond. arXiv preprint arXiv:1908.03265, 2019. 1, 6 +[20] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 7 +[21] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 1 +[22] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 1, 5, 8 +[23] Ning Qian. On the momentum term in gradient descent learning algorithms. Neural networks, 12(1):145-151, 1999. 1, 8 +[24] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 1 +[25] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pages 91-99, 2015. 7 +[26] Herbert Robbins and Sutton Monro. A stochastic approximation method. The annals of mathematical statistics, pages 400-407, 1951. 1 +[27] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015. 5, 6 +[28] Shai Shalev-Shwartz et al. Online learning and online convex optimization. Foundations and Trends® in Machine Learning, 4(2):107-194, 2012. 2 +[29] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6 +[30] Tijmen Tieleman and Geoffrey Hinton. Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural Networks for Machine Learning, 4:26-31, 2012. 1 +[31] Chengxi Ye, Matthew Evanusa, Hua He, Anton Mitrokhin, Tom Goldstein, James A Yorke, Cornelia Fermüller, and + +Yiannis Aloimonos. Network deconvolution. arXiv preprint arXiv:1905.11926, 2019. 5 +[32] Hongwei Yong, Jianqiang Huang, Xiansheng Hua, and Lei Zhang. Gradient centralization: A new optimization technique for deep neural networks. In European Conference on Computer Vision, pages 635-652. Springer, 2020. 1 +[33] Hongwei Yong and Lei Zhang. An embedded feature whitening approach to deep neural network optimization. In the European Conference on Computer Vision, 2022. 4, 5, 6 +[34] Jihun Yun, Aurelie C Lozano, and Eunho Yang. Stochastic gradient methods with block diagonal matrix adaptation. arXiv preprint arXiv:1905.10757, 2019. 1 +[35] Matthew D Zeiler. Adadelta: an adaptive learning rate method. arXiv preprint arXiv:1212.5701, 2012. 1 +[36] Huishuai Zhang, Wei Chen, and Tie-Yan Liu. Train feedforward neural network with layer-wise adaptive rate via approximating back-matching propagation. arXiv preprint arXiv:1802.09750, 2018. 5 +[37] Michael R Zhang, James Lucas, Geoffrey Hinton, and Jimmy Ba. Lookahead optimizer: k steps forward, 1 step back. arXiv preprint arXiv:1907.08610, 2019. 1 +[38] Juntang Zhuang, Tommy Tang, Yifan Ding, Sekhar C Tatikonda, Nicha Dvornek, Xenophon Papademetris, and James Duncan. Adbelief optimizer: Adapting stepsizes by the belief in observed gradients. Advances in neural information processing systems, 33:18795-18806, 2020. 1, 6 \ No newline at end of file diff --git a/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/images.zip b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..dc52cd2ccca29e033fd36dda0400255a3ea01b90 --- /dev/null +++ b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9b66733a495a83746f1aa04869efa09d0bc4803069d3f876c828cbd1793a91d +size 656798 diff --git a/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/layout.json b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3da1b13a81529ab1a123a43db60a8940c94f65c6 --- /dev/null +++ b/2023/A General Regret Bound of Preconditioned Gradient Method for DNN Training/layout.json @@ -0,0 +1,11719 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 51, + 103, + 543, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 103, + 543, + 121 + ], + "spans": [ + { + "bbox": [ + 51, + 103, + 543, + 121 + ], + "type": "text", + "content": "A General Regret Bound of Preconditioned Gradient Method for DNN Training" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 196, + 144, + 397, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 144, + 397, + 174 + ], + "spans": [ + { + "bbox": [ + 196, + 144, + 397, + 174 + ], + "type": "text", + "content": "Hongwei Yong Ying Sun Lei Zhang The Hong Kong Polytechnic University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 126, + 174, + 466, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 174, + 466, + 185 + ], + "spans": [ + { + "bbox": [ + 126, + 174, + 466, + 185 + ], + "type": "text", + "content": "hongwei.yong@polyu.edu.hk, {csysun, cslzhang}@comp.polyu.edu.hk" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "spans": [ + { + "bbox": [ + 143, + 213, + 192, + 225 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 238, + 289, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 238, + 289, + 550 + ], + "spans": [ + { + "bbox": [ + 46, + 238, + 289, + 550 + ], + "type": "text", + "content": "While adaptive learning rate methods, such as Adam, have achieved remarkable improvement in optimizing Deep Neural Networks (DNNs), they consider only the diagonal elements of the full preconditioned matrix. Though the full-matrix preconditioned gradient methods theoretically have a lower regret bound, they are impractical for use to train DNNs because of the high complexity. In this paper, we present a general regret bound with a constrained full-matrix preconditioned gradient, and show that the updating formula of the preconditioner can be derived by solving a cone-constrained optimization problem. With the block-diagonal and Kronecker-factorized constraints, a specific guide function can be obtained. By minimizing the upper bound of the guide function, we develop a new DNN optimizer, termed AdaBK. A series of techniques, including statistics updating, dampening, efficient matrix inverse root computation, and gradient amplitude preservation, are developed to make AdaBK effective and efficient to implement. The proposed AdaBK can be readily embedded into many existing DNN optimizers, e.g., SGDM and AdamW, and the corresponding SGDM_BK and AdamW_BK algorithms demonstrate significant improvements over existing DNN optimizers on benchmark vision tasks, including image classification, object detection and segmentation. The code is publicly available at https://github.com/Yonghongwei/AdaBK." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 573, + 128, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 128, + 586 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 128, + 586 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 594, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 594, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 46, + 594, + 287, + 715 + ], + "type": "text", + "content": "Stochastic gradient descent (SGD) [26] and its variants [21, 23], which update the parameters along the opposite of their gradient directions, have achieved great success in optimizing deep neural networks (DNNs) [14, 24]. Instead of using a uniform learning rate for different parameters, Duchi et al. [5] proposed the AdaGrad method, which adopts an adaptive learning rate for each parameter, and proved that AdaGrad can achieve lower regret bound than SGD. Following AdaGrad, a class of adaptive learning rate gradient descent methods has been proposed. For example," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 213, + 547, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 213, + 547, + 334 + ], + "spans": [ + { + "bbox": [ + 304, + 213, + 547, + 334 + ], + "type": "text", + "content": "RMSProp [30] and AdaDelta [35] introduce the exponential moving average to replace the sum of second-order statistics of the gradient for computing the adaptive learning rate. Adam [15] further adopts the momentum into the gradient, and AdamW [22] employs a weight-decoupled strategy to improve the generalization performance. RAdam [18], Adabelief [38] and Ranger [19,32,37] are proposed to accelerate training and improve the generalization capability over Adam. The adaptive learning rate methods have become the mainstream DNN optimizers." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "spans": [ + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "text", + "content": "In addition to AdaGrad, Duchi et al. [5] provided a full-matrix preconditioned gradient descent (PGD) method that adopts the matrix " + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_T = (\\sum_{t=1}^T \\mathbf{g}_t \\mathbf{g}_t^\\top)^{\\frac{1}{2}}" + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "text", + "content": " to adjust the gradient " + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "inline_equation", + "content": "\\mathbf{g}_T" + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "text", + "content": " denotes the iteration number and " + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "text", + "content": " is the number of the current iteration. It has been proved [5] that the preconditioned gradient " + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_T^{-1} \\mathbf{g}_T" + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "text", + "content": " has a lower regret bound than the adaptive learning rate methods that only consider the diagonal elements of " + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_T" + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "text", + "content": ". However, the full-matrix preconditioned gradient is impractical to use due to its high dimension, which limits its application to DNN optimization. Various works have been reported to solve this problem in parameter space by adding some structural constraints on the full-matrix " + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_T" + }, + { + "bbox": [ + 304, + 338, + 547, + 625 + ], + "type": "text", + "content": ". For instances, GGT [1] stores only the gradients of recent iterations so that the matrix inverse root can be computed efficiently by fast low-rank computation tricks. Yun et al. [34] proposed a mini-block diagonal matrix framework to reduce the cost through coordinate partitioning and grouping strategies. Gupta et al. [9] proposed to extend AdaGrad with Kronecker products of full-matrix preconditioners to make it more efficient in DNN training. Besides, natural gradient approaches [6, 7], which adopt the approximations of the Fisher matrix to correct the descent direction, can also be regarded as full-matrix preconditioners." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "spans": [ + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "content": "The existing constrained PGD (CPGD) methods, however, are heuristic since manually designed approximations to the full matrix " + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "inline_equation", + "content": "\\pmb{H}_T" + }, + { + "bbox": [ + 304, + 629, + 547, + 715 + ], + "type": "text", + "content": " are employed in them, while their influence on the regret bound is unknown. By far, they lack a general regret-bound theory that can guide us to design the full-matrix preconditioned gradient methods. On the other hand, the practicality and effectiveness of these precondi" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 145, + 0, + 494, + 37 + ], + "type": "text", + "content": "This CVPR paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "7866" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 96 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 96 + ], + "type": "text", + "content": "tioner methods are also an issue, which prevents them from being widely used in training DNNs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 46, + 98, + 286, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 98, + 286, + 253 + ], + "spans": [ + { + "bbox": [ + 46, + 98, + 286, + 253 + ], + "type": "text", + "content": "To address the above-mentioned issues, in this paper we present a theorem to connect the regret bound of the constrained full-matrix preconditioner with a guide function. By minimizing the guide function under the constraints, an updating formula of the preconditioned gradient can be derived. That is, optimizing the guide function of the preconditioner will minimize its regret bound at the same time, while different constraints can yield different updating formulas. With the commonly-used constraints on DNN preconditioners, such as the block-diagonal and Kronecker-factorized constraints [7, 9], specific guide functions can be obtained. By minimizing the upper bound of the guide function, a new optimizer, namely AdaBK, is derived." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 255, + 286, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 255, + 286, + 387 + ], + "spans": [ + { + "bbox": [ + 46, + 255, + 286, + 387 + ], + "type": "text", + "content": "We further propose a series of techniques, including statistics updating, dampening, efficient matrix inverse root computation and gradient norm recovery, to make AdaBK more practical to use for DNN optimization. By embedding AdaBK into SGDM and AdamW (or Adam), we develop two new DNN optimizers, SGDM_BK and AdamW_BK. With acceptable extra computation and memory cost, they achieve significant performance gain in convergence speed and generalization capability over state-of-the-art DNN optimizers, as demonstrated in our experiments in image classification, object detection and segmentation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 388, + 286, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 388, + 286, + 579 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 286, + 579 + ], + "type": "text", + "content": "For a better understanding of our proposed regret bound and the developed DNN optimizer, in Fig. 1, we illustrate the existing major DNN optimizers and their relationships. SGD and its momentum version (SGDM) apply the same learning rate to all parameters based on their gradient descent directions. The adaptive learning rate methods assign different learning rates to different parameters by using second-order information of the gradients, achieving better convergence performance. The adaptive learning rate methods can be viewed as special cases of PGD methods by considering only the diagonal elements of the full preconditioned matrix of gradients. Our method belongs to the class of PGD methods, while our proposed general regret bound of constrained PGD methods can be applied to the PGD optimizers under different constraints, including AdaGrad, Full-Matrix AdaGrad and our AdaBK." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "spans": [ + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": "Notation system. We denote by " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "\\boldsymbol{w}_t" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "\\boldsymbol{g}_t" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " the weight vector and its gradient of a DNN model in the " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": "-th iteration. Denote by " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "\\boldsymbol{g}_{t,i}" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " the gradient of the " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": "-th sample in a batch in the " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": "-th iteration, we have " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "\\boldsymbol{g}_t = \\frac{1}{n}\\sum_{i=1}^n\\boldsymbol{g}_{t,i}" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " is the batch size. The notations " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A \\succeq 0" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A \\succ 0" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " for a matrix " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " denote that " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " is symmetric positive semidefinite (PSD) and symmetric positive definite, respectively. " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A \\succeq B" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A - B \\succeq 0" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " means that " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A - B" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " is PSD. " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "\\operatorname{Tr}(A)" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " represents the trace of the matrix " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": ". For a PSD matrix " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A^\\alpha = U\\Sigma^\\alpha U^\\top" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "U\\Sigma U^\\top" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " is the Singular Value Decomposition (SVD) of " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "inline_equation", + "content": "||\\boldsymbol{x}||_A = \\sqrt{\\boldsymbol{x}^\\top\\boldsymbol{A}\\boldsymbol{x}}" + }, + { + "bbox": [ + 46, + 582, + 286, + 714 + ], + "type": "text", + "content": " is the Maha" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 305, + 73, + 545, + 274 + ], + "blocks": [ + { + "bbox": [ + 305, + 73, + 545, + 274 + ], + "lines": [ + { + "bbox": [ + 305, + 73, + 545, + 274 + ], + "spans": [ + { + "bbox": [ + 305, + 73, + 545, + 274 + ], + "type": "image", + "image_path": "ed9490d0f611aa6f626fe3ae514f1f3953edb31975c0d66540d093ecc18bfa8f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 332, + 282, + 517, + 293 + ], + "lines": [ + { + "bbox": [ + 332, + 282, + 517, + 293 + ], + "spans": [ + { + "bbox": [ + 332, + 282, + 517, + 293 + ], + "type": "text", + "content": "Figure 1. Illustration of the main DNN optimizers." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "spans": [ + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": "lanobis norm of " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": " induced by PSD matrix " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": ", and its dual norm is " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\| \\pmb{x} \\|_{\\pmb{A}}^{*} = \\sqrt{\\pmb{x}^{\\top} \\pmb{A}^{-1} \\pmb{x}}" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\pmb{A} \\otimes \\pmb{B}" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": " means the Kronecker product of " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\pmb{A}" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\pmb{B}" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": ", while " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\pmb{A} \\odot \\pmb{B}" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\pmb{A}^{\\odot \\alpha}" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": " are the element-wise matrix product and element-wise power operation, respectively. " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\operatorname{Diag}(\\pmb{x})" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": " is a diagonal matrix with diagonal vector " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "inline_equation", + "content": "\\operatorname{vec}(\\cdot)" + }, + { + "bbox": [ + 305, + 317, + 545, + 390 + ], + "type": "text", + "content": " denotes the vectorization function." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 405, + 384, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 405, + 384, + 418 + ], + "spans": [ + { + "bbox": [ + 306, + 405, + 384, + 418 + ], + "type": "text", + "content": "2. Background" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 423, + 462, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 423, + 462, + 436 + ], + "spans": [ + { + "bbox": [ + 306, + 423, + 462, + 436 + ], + "type": "text", + "content": "2.1. Online Convex Optimization" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "spans": [ + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "text", + "content": "The online convex optimization framework [10, 28] remains the most powerful and popular tool to analyze DNN optimization algorithms, including AdaGrad [5], Adam [15], Shampoo [9], etc. Given an arbitrary, unknown sequence of convex loss functions " + }, + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "inline_equation", + "content": "\\{f_1(\\pmb{w}),\\dots,f_t(\\pmb{w}),\\dots,f_T(\\pmb{w})\\}" + }, + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "text", + "content": ", we aim to optimize the weight " + }, + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "inline_equation", + "content": "\\pmb{w}_t" + }, + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "text", + "content": " in the " + }, + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "text", + "content": "-th iteration, and evaluate it on the loss function " + }, + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "inline_equation", + "content": "f_{t}(\\pmb{w})" + }, + { + "bbox": [ + 305, + 440, + 545, + 547 + ], + "type": "text", + "content": ". The goal of our optimization process is to minimize the regret, which is defined as follows [10, 28]:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 353, + 547, + 545, + 569 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 547, + 545, + 569 + ], + "spans": [ + { + "bbox": [ + 353, + 547, + 545, + 569 + ], + "type": "interline_equation", + "content": "R (T) = \\sum_ {t = 1} ^ {T} \\left(f _ {t} \\left(\\boldsymbol {w} _ {t}\\right) - f _ {t} (\\hat {\\boldsymbol {w}})\\right), \\tag {1}", + "image_path": "b1dd3ef0e67f3d9f70d9c442ebcf9d9272959f319bbe1bdd718aedc83e767e7e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 569, + 545, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 569, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 305, + 569, + 545, + 592 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 305, + 569, + 545, + 592 + ], + "type": "inline_equation", + "content": "\\hat{\\boldsymbol{w}} = \\arg \\min_{\\boldsymbol{w}} \\sum_{t=1}^{T} f_t(\\boldsymbol{w})" + }, + { + "bbox": [ + 305, + 569, + 545, + 592 + ], + "type": "text", + "content": ". Generally speaking, a lower regret bound means a more effective learning process." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 601, + 525, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 601, + 525, + 613 + ], + "spans": [ + { + "bbox": [ + 306, + 601, + 525, + 613 + ], + "type": "text", + "content": "2.2. Regret Bound of Preconditioned Gradient" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 618, + 545, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 618, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 305, + 618, + 545, + 689 + ], + "type": "text", + "content": "As in previous works [5, 9], an online mirror descent with an adaptive time-dependent regularization is adopted for online convex learning. In the " + }, + { + "bbox": [ + 305, + 618, + 545, + 689 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 305, + 618, + 545, + 689 + ], + "type": "text", + "content": "-th iteration, suppose we have obtained the gradient " + }, + { + "bbox": [ + 305, + 618, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\pmb{g}_t = \\nabla f_t(\\pmb{w}_t)" + }, + { + "bbox": [ + 305, + 618, + 545, + 689 + ], + "type": "text", + "content": ", then given a PSD matrix " + }, + { + "bbox": [ + 305, + 618, + 545, + 689 + ], + "type": "inline_equation", + "content": "\\pmb{H}_t\\succeq \\mathbf{0}" + }, + { + "bbox": [ + 305, + 618, + 545, + 689 + ], + "type": "text", + "content": ", the parameters are updated by optimizing the following objective function:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 337, + 689, + 545, + 711 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 689, + 545, + 711 + ], + "spans": [ + { + "bbox": [ + 337, + 689, + 545, + 711 + ], + "type": "interline_equation", + "content": "\\boldsymbol {w} _ {t + 1} = \\arg \\min _ {\\boldsymbol {w}} \\eta \\boldsymbol {g} _ {t} ^ {\\top} \\boldsymbol {w} + \\frac {1}{2} \\left\\| \\boldsymbol {w} - \\boldsymbol {w} _ {t} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {2}. \\tag {2}", + "image_path": "daf056b8195db4c7b19207ae9a10a3256463df50f951ab84b490e49ea1e6d82b.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7867" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 72, + 287, + 95 + ], + "type": "text", + "content": "The solution of Eq. (2) is exactly a preconditioned gradient descent step, which is" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 95, + 286, + 108 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 95, + 286, + 108 + ], + "spans": [ + { + "bbox": [ + 116, + 95, + 286, + 108 + ], + "type": "interline_equation", + "content": "\\boldsymbol {w} _ {t + 1} = \\boldsymbol {w} _ {t} - \\eta \\boldsymbol {H} _ {t} ^ {- 1} \\boldsymbol {g} _ {t}. \\tag {3}", + "image_path": "c11ee777380993bcc8c174466fedb1b98a839ec182275f25089bffb0e3efc581.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 46, + 109, + 286, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 109, + 286, + 131 + ], + "spans": [ + { + "bbox": [ + 46, + 109, + 286, + 131 + ], + "type": "text", + "content": "Duchi et al. [5] have provided a regret bound for online mirror descent, as shown in Lemma 1:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 133, + 287, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 133, + 287, + 157 + ], + "spans": [ + { + "bbox": [ + 47, + 133, + 287, + 157 + ], + "type": "text", + "content": "Lemma 1 [5, 9] For any sequence of matrices " + }, + { + "bbox": [ + 47, + 133, + 287, + 157 + ], + "type": "inline_equation", + "content": "H_T \\succeq \\ldots \\succeq H_1 \\succeq 0" + }, + { + "bbox": [ + 47, + 133, + 287, + 157 + ], + "type": "text", + "content": ", the regret of online mirror descent holds that" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 167, + 287, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 167, + 287, + 224 + ], + "spans": [ + { + "bbox": [ + 56, + 167, + 287, + 224 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} R (T) \\leq \\frac {1}{2 \\eta} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {w} _ {t} - \\hat {\\boldsymbol {w}} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {2} - \\left\\| \\boldsymbol {w} _ {t + 1} - \\hat {\\boldsymbol {w}} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {2}\\right) \\\\ + \\frac {\\eta}{2} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {*}\\right) ^ {2}. \\tag {4} \\\\ \\end{array}", + "image_path": "2414734cca0fee0ebf1926067cf960cf3e7cf80d3ef1ef72fb974b99dc7ef8fa.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 225, + 287, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 225, + 287, + 237 + ], + "spans": [ + { + "bbox": [ + 47, + 225, + 287, + 237 + ], + "type": "text", + "content": "If we further assume " + }, + { + "bbox": [ + 47, + 225, + 287, + 237 + ], + "type": "inline_equation", + "content": "D = \\max_{t\\leq T}||\\pmb{w}_t - \\hat{\\pmb{w}} ||_2" + }, + { + "bbox": [ + 47, + 225, + 287, + 237 + ], + "type": "text", + "content": ", then we have" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 237, + 287, + 258 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 237, + 287, + 258 + ], + "spans": [ + { + "bbox": [ + 82, + 237, + 287, + 258 + ], + "type": "interline_equation", + "content": "R (T) \\leq \\frac {D ^ {2}}{2 \\eta} \\operatorname {T r} \\left(\\boldsymbol {H} _ {T}\\right) + \\frac {\\eta}{2} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {H} _ {t}} ^ {*}\\right) ^ {2}. \\tag {5}", + "image_path": "b70678ea877473203152ae60aa8d3979882f3818f9e3f04e4699cea8e325ad46.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 258, + 287, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 258, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 258, + 287, + 354 + ], + "type": "text", + "content": "Our goal is to find a proper sequence of PSD matrices " + }, + { + "bbox": [ + 46, + 258, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\{\\pmb{H}_1, \\pmb{H}_2, \\dots, \\pmb{H}_T\\}" + }, + { + "bbox": [ + 46, + 258, + 287, + 354 + ], + "type": "text", + "content": " to minimize the regret bound in Eq (4) or (5). Duchi et al. [5] suggested to adopt " + }, + { + "bbox": [ + 46, + 258, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\pmb{H}_T = (\\sum_{t=1}^{T} \\pmb{g}_t \\pmb{g}_t^\\top)^{\\frac{1}{2}}" + }, + { + "bbox": [ + 46, + 258, + 287, + 354 + ], + "type": "text", + "content": " as the full matrix regularization matrix. However, it is hard to directly use it for DNN optimization due to the high dimension of parameter space. Therefore, Duchi et al. simplified this full-matrix " + }, + { + "bbox": [ + 46, + 258, + 287, + 354 + ], + "type": "inline_equation", + "content": "\\pmb{H}_T" + }, + { + "bbox": [ + 46, + 258, + 287, + 354 + ], + "type": "text", + "content": " with its diagonal elements, resulting in the AdaGrad algorithm [5]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 366, + 287, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 366, + 287, + 393 + ], + "spans": [ + { + "bbox": [ + 47, + 366, + 287, + 393 + ], + "type": "text", + "content": "3. A General Regret Bound for Constrained Preconditioned Gradient" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 400, + 196, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 400, + 196, + 414 + ], + "spans": [ + { + "bbox": [ + 47, + 400, + 196, + 414 + ], + "type": "text", + "content": "3.1. The General Regret Bound" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "spans": [ + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "text", + "content": "Directly adopting a full-matrix " + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "inline_equation", + "content": "H_{t}" + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "text", + "content": " is absurd for optimizing a DNN because it is hard or even prohibitive to compute and store such a high-dimensional matrix. Hence, we need to reduce the dimension of " + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "inline_equation", + "content": "H_{t}" + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "text", + "content": " with a constraint set " + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "text", + "content": ", e.g., the set of the block-diagonal matrices [5]. In this section, we aim to construct a general and practical full-matrix regularization term in Eq. (2) to achieve the low regret bound in Eq. (4). For a general constraint set " + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "inline_equation", + "content": "\\Psi \\subseteq \\mathbb{R}^{d\\times d}" + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "text", + "content": ", if it is a cone (i.e., " + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "inline_equation", + "content": "\\forall x\\in \\Psi ,\\theta >0,\\theta x\\in \\Psi" + }, + { + "bbox": [ + 46, + 419, + 287, + 551 + ], + "type": "text", + "content": " holds), we have the following Theorem 1 and Lemma 2, whose proofs can be found in the supplementary materials." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 552, + 287, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 552, + 287, + 576 + ], + "spans": [ + { + "bbox": [ + 46, + 552, + 287, + 576 + ], + "type": "text", + "content": "Theorem 1 For any cone constraint " + }, + { + "bbox": [ + 46, + 552, + 287, + 576 + ], + "type": "inline_equation", + "content": "\\Psi \\subseteq \\mathbb{R}^{d\\times d}" + }, + { + "bbox": [ + 46, + 552, + 287, + 576 + ], + "type": "text", + "content": ", we define a guide function " + }, + { + "bbox": [ + 46, + 552, + 287, + 576 + ], + "type": "inline_equation", + "content": "F_{T}(S)" + }, + { + "bbox": [ + 46, + 552, + 287, + 576 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 46, + 552, + 287, + 576 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 46, + 552, + 287, + 576 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 576, + 287, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 576, + 287, + 597 + ], + "spans": [ + { + "bbox": [ + 111, + 576, + 287, + 597 + ], + "type": "interline_equation", + "content": "F _ {T} (\\boldsymbol {S}) = \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {S}} ^ {*}\\right) ^ {2}, \\tag {6}", + "image_path": "ec7f9aaa272d6dff6e9f886bb9803adaa6b762ef093acb20aea4c30501181f5e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 594, + 185, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 594, + 185, + 605 + ], + "spans": [ + { + "bbox": [ + 47, + 594, + 185, + 605 + ], + "type": "text", + "content": "and then define the matrix " + }, + { + "bbox": [ + 47, + 594, + 185, + 605 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_T" + }, + { + "bbox": [ + 47, + 594, + 185, + 605 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 605, + 286, + 623 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 605, + 286, + 623 + ], + "spans": [ + { + "bbox": [ + 53, + 605, + 286, + 623 + ], + "type": "interline_equation", + "content": "\\boldsymbol {H} _ {T} = C _ {T} \\boldsymbol {S} _ {T}, \\quad \\boldsymbol {S} _ {T} = \\arg \\min _ {\\boldsymbol {S} \\in \\Psi , \\boldsymbol {S} \\succeq \\mathbf {0}, T r (\\boldsymbol {S}) \\leq 1} F _ {T} (\\boldsymbol {S}), \\tag {7}", + "image_path": "04f0a5d047a5ef75edeae7c26272d91e2fc23d4b491d846f0e72b67b10a59d82.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 624, + 286, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 624, + 286, + 647 + ], + "spans": [ + { + "bbox": [ + 46, + 624, + 286, + 647 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 46, + 624, + 286, + 647 + ], + "type": "inline_equation", + "content": "C_T = \\sqrt{F_T(S_T)}" + }, + { + "bbox": [ + 46, + 624, + 286, + 647 + ], + "type": "text", + "content": ". The regret of online mirror descent holds that" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 73, + 647, + 287, + 701 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 647, + 287, + 701 + ], + "spans": [ + { + "bbox": [ + 73, + 647, + 287, + 701 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} R (T) \\leq \\left(\\frac {D ^ {2}}{2 \\eta} + \\eta\\right) C _ {T} \\tag {8} \\\\ = \\left(\\frac {D ^ {2}}{2 \\eta} + \\eta\\right) \\sqrt {\\min _ {\\boldsymbol {S} \\in \\Psi , \\boldsymbol {S} \\succeq \\mathbf {0} , T r (\\boldsymbol {S}) \\leq 1} F _ {T} (\\boldsymbol {S})}. \\\\ \\end{array}", + "image_path": "55bc210c69f581b967e25910c2e58f03b10af9ca48bc0196928498a5dd05876a.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 59, + 702, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 702, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 59, + 702, + 287, + 714 + ], + "type": "text", + "content": "The above theorem reveals that minimizing the guide" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": "function " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "F_{T}(S)" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": " on cone " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": " will minimize the regret bound of the preconditioned gradient descent algorithm simultaneously. More importantly, given a cone constraint " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ", the optimal " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "H_{T} = C_{T} S_{T}" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": " that achieves the lowest regret bound can be obtained by optimizing Eq. (7). From Theorem 1, we can know that the regret " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "R(T) \\leq O(\\sqrt{\\min_{S \\in \\Psi, S \\geq 0, \\operatorname{Tr}(S) \\leq 1} F_{T}(S)})" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ". If two cones satisfy " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "\\Psi_{1} \\subseteq \\Psi_{2}" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "inline_equation", + "content": "\\sqrt{\\min_{S \\in \\Psi_{2}, S \\geq 0, \\operatorname{Tr}(S) \\leq 1} F_{T}(S)} \\leq \\sqrt{\\min_{S \\in \\Psi_{1}, S \\geq 0, \\operatorname{Tr}(S) \\leq 1} F_{T}(S)}" + }, + { + "bbox": [ + 304, + 72, + 545, + 203 + ], + "type": "text", + "content": ". This also explains why full-matrix regularization can achieve the lowest regret bound. In addition, we have the following lemma:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 204, + 545, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 204, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 304, + 204, + 545, + 239 + ], + "type": "text", + "content": "Lemma 2 Suppose that " + }, + { + "bbox": [ + 304, + 204, + 545, + 239 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 304, + 204, + 545, + 239 + ], + "type": "text", + "content": " is the set of either diagonal matrices or full-matrices, according to the definition of " + }, + { + "bbox": [ + 304, + 204, + 545, + 239 + ], + "type": "inline_equation", + "content": "S_{T}" + }, + { + "bbox": [ + 304, + 204, + 545, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 204, + 545, + 239 + ], + "type": "inline_equation", + "content": "H_{T}" + }, + { + "bbox": [ + 304, + 204, + 545, + 239 + ], + "type": "text", + "content": " in Eq. (7), we have" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 239, + 545, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 239, + 545, + 281 + ], + "spans": [ + { + "bbox": [ + 309, + 239, + 545, + 281 + ], + "type": "interline_equation", + "content": "\\boldsymbol {H} _ {T} = \\operatorname {D i a g} \\left(\\left(\\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} \\odot \\boldsymbol {g} _ {t}\\right) ^ {\\odot \\frac {1}{2}}\\right), \\quad \\boldsymbol {H} _ {T} = \\left(\\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} \\boldsymbol {g} _ {t} ^ {\\top}\\right) ^ {\\frac {1}{2}}. \\tag {9}", + "image_path": "c94c84f84e0307bb52575987185ccbe08572f2c32562fcb321c9669af05bba00.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 282, + 545, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 282, + 545, + 317 + ], + "spans": [ + { + "bbox": [ + 304, + 282, + 545, + 317 + ], + "type": "text", + "content": "From Lemma 2, we can easily see that the diagonal and full matrices used in AdaGrad [5] are two special cases of the results in Theorem 1." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 326, + 506, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 326, + 506, + 339 + ], + "spans": [ + { + "bbox": [ + 305, + 326, + 506, + 339 + ], + "type": "text", + "content": "3.2. Layer-wise Block-diagonal Constraint" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "spans": [ + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "text", + "content": "In practice, we need to choose a proper constraint set " + }, + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "text", + "content": " to regularize the structure of matrix " + }, + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "inline_equation", + "content": "H_{T}" + }, + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "text", + "content": ". The diagonal constraint is the simplest constraint. However, it results in a very low effective dimension of " + }, + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "inline_equation", + "content": "H_{T}" + }, + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "text", + "content": " so that the regret bound is high. We aim to find a more effective and practical constraint set over " + }, + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "inline_equation", + "content": "H_{T}" + }, + { + "bbox": [ + 304, + 345, + 545, + 416 + ], + "type": "text", + "content": " for DNN optimization." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "spans": [ + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "text", + "content": "Instead of considering the full-matrix regularization of all parameters, one can consider the full-matrix regularization of parameters within one DNN layer. Similar ideas have been adopted in KFAC [7] and Shampoo [9], which assume that the matrix " + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "inline_equation", + "content": "H_{T}" + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "text", + "content": " has a block diagonal structure and each sub-block matrix is used for one layer of a DNN. Suppose matrices " + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "inline_equation", + "content": "S_{l}" + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "inline_equation", + "content": "H_{l}" + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "text", + "content": " are for the " + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "text", + "content": "-th layer, and " + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "inline_equation", + "content": "g_{l}" + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "text", + "content": " is the gradient of weight in the " + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "text", + "content": "-th layer, in order to obtain the updating formula with block-diagonal constraint, we could minimize the guide function " + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "inline_equation", + "content": "F_{T}(S)" + }, + { + "bbox": [ + 304, + 417, + 545, + 536 + ], + "type": "text", + "content": ". There is" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 322, + 536, + 545, + 567 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 536, + 545, + 567 + ], + "spans": [ + { + "bbox": [ + 322, + 536, + 545, + 567 + ], + "type": "interline_equation", + "content": "F _ {T} (\\boldsymbol {S}) = \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {S}} ^ {*}\\right) ^ {2} = \\sum_ {l = 1} ^ {L} \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {l, t} \\right\\| _ {\\boldsymbol {S} _ {l}} ^ {*}\\right) ^ {2}. \\tag {10}", + "image_path": "f70ff14f1abb4324f4d4107627af749e35084529179e1574a2f040a19ccbb492.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "spans": [ + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "text", + "content": "The above equation shows that the original optimization problem can be divided into a number of " + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "text", + "content": " sub-problems, and we can solve these sub-problems independently. For the convenience of expression, we omit the subscript " + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 304, + 567, + 545, + 639 + ], + "type": "text", + "content": " and analyze the sub-problem within one layer of a DNN in the following development." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 306, + 647, + 482, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 647, + 482, + 658 + ], + "spans": [ + { + "bbox": [ + 306, + 647, + 482, + 658 + ], + "type": "text", + "content": "3.3. Kronecker-factorized Constraint" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "type": "text", + "content": "Because the dimension of the parameter space of one DNN layer can still be very high, we need to further constrain the structure of " + }, + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "type": "inline_equation", + "content": "H_{T}" + }, + { + "bbox": [ + 304, + 666, + 545, + 713 + ], + "type": "text", + "content": ". The Kronecker-factorized constraint can be used to significantly reduce the parameter di" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7868" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": "mension within one layer [7, 9]. To be specific, for a fully-connected layer with weight " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "\\pmb{W} \\in \\mathbb{R}^{C_{out} \\times C_{in}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "\\pmb{w} = \\mathrm{vec}(\\pmb{W})" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": ", its corresponding gradient is " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "\\pmb{G} \\in \\mathbb{R}^{C_{out} \\times C_{in}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "\\pmb{g} = \\mathrm{vec}(\\pmb{G})" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "\\pmb{S} = \\pmb{S}_1 \\otimes \\pmb{S}_2" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "\\pmb{S}_1 \\in \\mathbb{R}^{C_{out} \\times C_{out}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "\\pmb{S}_2 \\in \\mathbb{R}^{C_{in} \\times C_{in}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "\\pmb{S} \\in \\mathbb{R}^{C_{in} C_{out} \\times C_{in} C_{out}}" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "\\otimes" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": " is Kronecker product. Since " + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "inline_equation", + "content": "(S_1 \\otimes S_2)^{-1} = S_1^{-1} \\otimes S_2^{-1}" + }, + { + "bbox": [ + 47, + 72, + 289, + 155 + ], + "type": "text", + "content": ", what we need to minimize becomes" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 47, + 155, + 296, + 211 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 155, + 296, + 211 + ], + "spans": [ + { + "bbox": [ + 47, + 155, + 296, + 211 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} F _ {T} (\\boldsymbol {S}) = \\sum_ {t = 1} ^ {T} \\left(\\left\\| \\boldsymbol {g} _ {t} \\right\\| _ {\\boldsymbol {S} _ {1} \\otimes \\boldsymbol {S} _ {2}} ^ {*}\\right) ^ {2} = \\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} ^ {\\top} \\left(\\boldsymbol {S} _ {1} ^ {- 1} \\otimes \\boldsymbol {S} _ {2} ^ {- 1}\\right) \\boldsymbol {g} _ {t} \\\\ = \\operatorname {T r} \\left(\\left(\\boldsymbol {S} _ {1} ^ {- 1} \\otimes \\boldsymbol {S} _ {2} ^ {- 1}\\right) \\sum_ {t = 1} ^ {T} \\boldsymbol {g} _ {t} \\boldsymbol {g} _ {t} ^ {\\top}\\right) \\tag {11} \\\\ \\end{array}", + "image_path": "a945d14d91da5f343f32ccf3f2501c52bb6296902a5361158788c01bf2644d3b.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 212, + 284, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 212, + 284, + 224 + ], + "spans": [ + { + "bbox": [ + 47, + 212, + 284, + 224 + ], + "type": "text", + "content": "under the constraints " + }, + { + "bbox": [ + 47, + 212, + 284, + 224 + ], + "type": "inline_equation", + "content": "\\{S_1, S_2 \\succeq 0, \\operatorname{Tr}(S_1) \\leq 1, \\operatorname{Tr}(S_2) \\leq 1\\}" + }, + { + "bbox": [ + 47, + 212, + 284, + 224 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": "Nevertheless, directly minimizing the " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "F_{T}(\\mathbf{S})" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": " in Eq. (11) is still difficult, and we construct an upper bound of " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "F_{T}(\\mathbf{S})" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": " to minimize. Since " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "\\pmb{g} = \\frac{1}{n}\\sum_{i=1}^{n}\\pmb{g}_{i}" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "\\pmb{g}_{i}" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": " is the gradient of sample " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": " is the batch size, and " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "\\pmb{g}_{i} = \\mathrm{vec}(\\delta_{i}\\pmb{x}_{i}^{T}) = \\delta_{i} \\otimes \\pmb{x}_{i}" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "\\pmb{x}_{i}" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": " is the input feature and " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": " is the output feature gradient of sample " + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 47, + 225, + 287, + 297 + ], + "type": "text", + "content": ", we have the following lemma." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 297, + 287, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 287, + 323 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 287, + 323 + ], + "type": "text", + "content": "Lemma 3 Denote by " + }, + { + "bbox": [ + 47, + 297, + 287, + 323 + ], + "type": "inline_equation", + "content": "L_{T} = \\sum_{t=1}^{T} \\sum_{i=1}^{n} \\delta_{ti} \\delta_{ti}^{\\top}" + }, + { + "bbox": [ + 47, + 297, + 287, + 323 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 297, + 287, + 323 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_{T} = \\sum_{t=1}^{T} \\sum_{i=1}^{n} \\mathbf{x}_{ti} \\mathbf{x}_{ti}^{\\top}" + }, + { + "bbox": [ + 47, + 297, + 287, + 323 + ], + "type": "text", + "content": ", there is" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 64, + 323, + 287, + 378 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 323, + 287, + 378 + ], + "spans": [ + { + "bbox": [ + 64, + 323, + 287, + 378 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} F _ {T} (\\boldsymbol {S}) \\leq \\operatorname {T r} \\left(\\left(\\boldsymbol {S} _ {1} ^ {- 1} \\otimes \\boldsymbol {S} _ {2} ^ {- 1}\\right) \\frac {1}{n} \\sum_ {t = 1} ^ {T} \\sum_ {i = 1} ^ {n} \\boldsymbol {g} _ {t i} \\boldsymbol {g} _ {t i} ^ {\\top}\\right) \\tag {12} \\\\ \\leq \\frac {1}{n} T r (\\boldsymbol {S} _ {1} ^ {- 1} \\boldsymbol {L} _ {T}) T r (\\boldsymbol {S} _ {2} ^ {- 1} \\boldsymbol {R} _ {T}). \\\\ \\end{array}", + "image_path": "8f5175e1ef68b391846ff20e3484bf9195f55ea848d83e9c8046beb05c238278.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 379, + 287, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 379, + 287, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 379, + 287, + 426 + ], + "type": "text", + "content": "We minimize the upper bound of " + }, + { + "bbox": [ + 47, + 379, + 287, + 426 + ], + "type": "inline_equation", + "content": "F_{T}(S)" + }, + { + "bbox": [ + 47, + 379, + 287, + 426 + ], + "type": "text", + "content": " defined in Lemma 3. One can see that the upper bound can be divided into two independent problems w.r.t. " + }, + { + "bbox": [ + 47, + 379, + 287, + 426 + ], + "type": "inline_equation", + "content": "S_{1}" + }, + { + "bbox": [ + 47, + 379, + 287, + 426 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 379, + 287, + 426 + ], + "type": "inline_equation", + "content": "S_{2}" + }, + { + "bbox": [ + 47, + 379, + 287, + 426 + ], + "type": "text", + "content": ", respectively, which are" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 426, + 287, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 426, + 287, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 426, + 287, + 456 + ], + "type": "interline_equation", + "content": "\\min _ {S _ {1} \\succeq \\mathbf {0}, \\operatorname {T r} (S _ {1}) \\leq 1} \\operatorname {T r} \\left(S _ {1} ^ {- 1} L _ {T}\\right) \\text {a n d} \\min _ {S _ {2} \\succeq \\mathbf {0}, \\operatorname {T r} (S _ {2}) \\leq 1} \\operatorname {T r} \\left(S _ {2} ^ {- 1} R _ {T}\\right). \\tag {13}", + "image_path": "ef4dc2be25f48cbecbc45b00b984e6e5b7172d75e836ef989787f114a8aa27cd.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 456, + 286, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 456, + 286, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 456, + 286, + 479 + ], + "type": "text", + "content": "To solve the above problem, we have the following lemma: Lemma 4 If " + }, + { + "bbox": [ + 47, + 456, + 286, + 479 + ], + "type": "inline_equation", + "content": "A \\succ 0" + }, + { + "bbox": [ + 47, + 456, + 286, + 479 + ], + "type": "text", + "content": ", we have:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 479, + 287, + 498 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 479, + 287, + 498 + ], + "spans": [ + { + "bbox": [ + 69, + 479, + 287, + 498 + ], + "type": "interline_equation", + "content": "\\arg \\min _ {\\boldsymbol {S} \\succeq \\mathbf {0}, T r (\\boldsymbol {S}) \\leq 1} T r (\\boldsymbol {S} ^ {- 1} \\boldsymbol {A}) = \\boldsymbol {A} ^ {\\frac {1}{2}} / T r (\\boldsymbol {A} ^ {\\frac {1}{2}}). \\tag {14}", + "image_path": "d71aa0752f135c4ea5f0f6a13fbaa8741e661f319d4625b221c9deffc61e4a48.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "text", + "content": "The proofs of Lemma 3 and Lemma 4 can be found in the supplementary materials. According to Lemma 4, we know that the solution of Eq. (13) is " + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "inline_equation", + "content": "S_{1,T} = L_{T}^{\\frac{1}{2}} / \\mathrm{Tr}(L_{T}^{\\frac{1}{2}})" + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "inline_equation", + "content": "S_{2,T} = R_{T}^{\\frac{1}{2}} / \\mathrm{Tr}(R_{T}^{\\frac{1}{2}})" + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "text", + "content": ". In practice, " + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "inline_equation", + "content": "L_{T}" + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "inline_equation", + "content": "R_{T}" + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "text", + "content": " will be added with a dampening term " + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "inline_equation", + "content": "\\epsilon I" + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "text", + "content": " to ensure that they are symmetric and positive definite. Without considering the magnitude of " + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "inline_equation", + "content": "H_{T}" + }, + { + "bbox": [ + 47, + 500, + 287, + 590 + ], + "type": "text", + "content": ", we can set" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 590, + 287, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 590, + 287, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 590, + 287, + 628 + ], + "type": "inline_equation", + "content": "\\pmb{H}_{T} = \\pmb{H}_{1,T}\\otimes \\pmb{H}_{2,T},\\pmb{H}_{1,T} = \\pmb{L}_{T}^{\\frac{1}{2}},\\pmb{H}_{2,T} = \\pmb{R}_{T}^{\\frac{1}{2}}." + }, + { + "bbox": [ + 47, + 590, + 287, + 628 + ], + "type": "text", + "content": " (15) Then according to the property of Kronecker product, the online mirror descent updating formula in Eq. (3) becomes" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 102, + 628, + 286, + 640 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 628, + 286, + 640 + ], + "spans": [ + { + "bbox": [ + 102, + 628, + 286, + 640 + ], + "type": "interline_equation", + "content": "\\boldsymbol {W} _ {t + 1} = \\boldsymbol {W} _ {t} - \\eta \\boldsymbol {H} _ {1, t} ^ {- 1} \\boldsymbol {G} _ {t} \\boldsymbol {H} _ {2, t} ^ {- 1}. \\tag {16}", + "image_path": "a287492d7b43235b123d359f5036d509fe793ee4b4eab8955308cd252740ecdb.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 641, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 641, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 47, + 641, + 287, + 687 + ], + "type": "text", + "content": "We ignore the magnitude of " + }, + { + "bbox": [ + 47, + 641, + 287, + 687 + ], + "type": "inline_equation", + "content": "H_{T}" + }, + { + "bbox": [ + 47, + 641, + 287, + 687 + ], + "type": "text", + "content": " here because it will have no impact on the result after we introduce a gradient norm recovery operation in the algorithm, which will be described in the next section." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 714 + ], + "type": "text", + "content": "Finally, the proposed vanilla optimizer, termed AdaBK, is summarized in Algorithm 1." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 306, + 72, + 449, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 72, + 449, + 84 + ], + "spans": [ + { + "bbox": [ + 306, + 72, + 449, + 84 + ], + "type": "text", + "content": "4. Detailed Implementation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 92, + 545, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 92, + 545, + 152 + ], + "spans": [ + { + "bbox": [ + 304, + 92, + 545, + 152 + ], + "type": "text", + "content": "The proposed AdaBK in Algorithm 1 involves the calculation of matrix inverse root, which may be unstable and inefficient. For an efficient and effective implementation of AdaBK in training DNNs, we propose a series of techniques." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "spans": [ + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "text", + "content": "Efficient Matrix Inverse Root. As shown in Algorithm 1, we need to calculate the matrix inverse root of " + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "inline_equation", + "content": "L_{t}" + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "inline_equation", + "content": "R_{t}" + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "text", + "content": ". Traditional approaches usually use SVD to calculate it. Notwithstanding, SVD is inefficient and the existing deep learning frameworks (e.g., PyTorch) do not implement SVD on GPU well, making the training unstable or even not converging. Instead of using SVD, we adopt the Schur-Newton algorithm [8] to compute the matrix inverse root. For matrix " + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "inline_equation", + "content": "Y_{0} = A / \\operatorname{Tr}(A)" + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "inline_equation", + "content": "Z_{0} = I" + }, + { + "bbox": [ + 304, + 153, + 546, + 273 + ], + "type": "text", + "content": ". The Schur-Newton algorithm adopts the following iterations:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 273, + 545, + 309 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 273, + 545, + 309 + ], + "spans": [ + { + "bbox": [ + 315, + 273, + 545, + 309 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l} \\boldsymbol {T} _ {k} = \\frac {1}{2} \\left(3 \\boldsymbol {I} - \\boldsymbol {Z} _ {k - 1} \\boldsymbol {Y} _ {k - 1}\\right); \\\\ \\boldsymbol {Y} _ {k} = \\boldsymbol {Y} _ {k - 1} \\boldsymbol {T} _ {k}, \\boldsymbol {Z} _ {k} = \\boldsymbol {T} _ {k} \\boldsymbol {Z} _ {k - 1}, k = 1, 2, \\dots , K. \\end{array} \\right. \\tag {17}", + "image_path": "05548af2cd226abb170dbb6f741ba96dc216a9b8de718f279aa6150502851ca9.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 309, + 545, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 309, + 545, + 346 + ], + "spans": [ + { + "bbox": [ + 304, + 309, + 545, + 346 + ], + "type": "text", + "content": "Then we have " + }, + { + "bbox": [ + 304, + 309, + 545, + 346 + ], + "type": "inline_equation", + "content": "A^{\\frac{1}{2}} \\approx Y_K \\sqrt{\\operatorname{Tr}(A)}" + }, + { + "bbox": [ + 304, + 309, + 545, + 346 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 309, + 545, + 346 + ], + "type": "inline_equation", + "content": "A^{-\\frac{1}{2}} \\approx Z_K / \\sqrt{\\operatorname{Tr}(A)}" + }, + { + "bbox": [ + 304, + 309, + 545, + 346 + ], + "type": "text", + "content": ". In practice, we find that setting " + }, + { + "bbox": [ + 304, + 309, + 545, + 346 + ], + "type": "inline_equation", + "content": "K = 10" + }, + { + "bbox": [ + 304, + 309, + 545, + 346 + ], + "type": "text", + "content": " can achieve good enough precision for our problem." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "spans": [ + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": "Statistics Updating. In Algorithm 1, " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "L_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "R_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " accumulate the statistics of output feature gradient " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "\\Delta_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " and input feature " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "X_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": ", respectively. Hence the amplitude of " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "L_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "R_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " will increase during training. After certain iterations, the effective learning rate will become small, making the learning process inefficient. To solve this issue, we use the exponential moving average of " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "L_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "R_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": ". Meanwhile, it is unnecessary to compute " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "L_{t}, R_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": ", and their inverse root in each iteration. Two hyper-parameters " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "T_{s}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "T_{ir}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " are introduced to control the frequency of updating " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "L_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "R_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " and their inverse root, respectively. This infrequent statistics updating strategy can significantly improve efficiency with a little performance drop. We use two additional statistics " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "\\widehat{L}_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "\\widehat{R}_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " to restore the matrix inverse root of " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "L_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "inline_equation", + "content": "R_{t}" + }, + { + "bbox": [ + 304, + 347, + 546, + 527 + ], + "type": "text", + "content": " (please refer to Algorithm 2)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "spans": [ + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": "Dampening Strategy. When the dimensions of " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "\\Delta_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "X_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " are high, " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "L_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "R_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " tend to be singular matrices with large condition numbers. A dampening term " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "\\epsilon I" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " should be added into " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "L_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "R_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " to improve their condition number and enhance the stability of computing inverse root. As in [33], we adopt an adaptive dampening parameter " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "\\epsilon \\lambda_{max}" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "\\lambda_{max}" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " is the max singular value of the matrix " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "L_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "R_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": ". With this setting, the condition number will be " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "\\frac{\\lambda_{max} + \\epsilon \\lambda_{max}}{\\lambda_{min} + \\epsilon \\lambda_{max}} \\leq \\frac{1 + \\epsilon}{\\epsilon}" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": ", bounded by a value determined by " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": ". Meanwhile, the maximum singular value of the symmetric matrix (" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "L_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "inline_equation", + "content": "R_t" + }, + { + "bbox": [ + 304, + 529, + 546, + 672 + ], + "type": "text", + "content": ") can be efficiently obtained by the power iteration method [2] as follows:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 357, + 682, + 545, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 682, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 357, + 682, + 545, + 712 + ], + "type": "interline_equation", + "content": "\\left\\{ \\begin{array}{l} \\boldsymbol {v} _ {k} = \\boldsymbol {A} \\boldsymbol {u} _ {k - 1}, \\\\ \\boldsymbol {u} _ {k} = \\boldsymbol {v} _ {k} / | | \\boldsymbol {v} _ {k} | | _ {2}, k = 1, 2, \\dots , K. \\end{array} \\right. \\tag {18}", + "image_path": "19a86556e5cdafc2723e5fe55bd9b0ad387197113b645d01f6af4def258da81c.jpg" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7869" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 51, + 112, + 257, + 250 + ], + "blocks": [ + { + "bbox": [ + 51, + 75, + 261, + 110 + ], + "lines": [ + { + "bbox": [ + 51, + 75, + 261, + 110 + ], + "spans": [ + { + "bbox": [ + 51, + 75, + 261, + 110 + ], + "type": "text", + "content": "Algorithm 1: AdaBK (Adaptive Regularization with Block-diagonal and Kronecker-factorized Constraints)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "lines": [ + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "spans": [ + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": "Input: " + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "inline_equation", + "content": "W_{0},L_{0} = \\epsilon I_{C_{out}},R_{0} = \\epsilon I_{C_{in}},\\eta" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": " Output: " + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "inline_equation", + "content": "W_{T}" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": " \n1 for " + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "inline_equation", + "content": "t = 1:T" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": " do \n2 Receive " + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "inline_equation", + "content": "X_{t} = [\\pmb{x}_{ti}]_{i = 1}^{n}" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": " by forward propagation; \n3 Receive " + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "inline_equation", + "content": "\\Delta_t = [\\delta_{ti}]_i^n" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": " by backward propagation; \n4 Compute gradient " + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "inline_equation", + "content": "G_{t}" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": " . \n5 Update preconditioners: \n6 " + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "inline_equation", + "content": "L_{t} = L_{t - 1} + \\Delta_{t}\\Delta_{t}^{\\top};" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": " \n7 " + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "inline_equation", + "content": "R_{t} = R_{t - 1} + X_{t}X_{t}^{\\top};" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": " \n8 Update weight: \n" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "inline_equation", + "content": "W_{t + 1} = W_t - \\eta L_t^{-\\frac{1}{2}}G_tR_t^{-\\frac{1}{2}};" + }, + { + "bbox": [ + 51, + 112, + 257, + 250 + ], + "type": "text", + "content": " \n9 end" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 46, + 282, + 287, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 282, + 287, + 306 + ], + "spans": [ + { + "bbox": [ + 46, + 282, + 287, + 306 + ], + "type": "text", + "content": "We use " + }, + { + "bbox": [ + 46, + 282, + 287, + 306 + ], + "type": "inline_equation", + "content": "\\lambda_{max} \\approx ||\\pmb{v}_K||_2" + }, + { + "bbox": [ + 46, + 282, + 287, + 306 + ], + "type": "text", + "content": " for our proposed adaptive dampening and set " + }, + { + "bbox": [ + 46, + 282, + 287, + 306 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 46, + 282, + 287, + 306 + ], + "type": "text", + "content": " to 10 in our implementation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "spans": [ + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "text", + "content": "Gradient Norm Recovery. Since the amplitude of the preconditioned gradient " + }, + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "inline_equation", + "content": "L_{t}^{-\\frac{1}{2}} G_{t} R_{t}^{-\\frac{1}{2}}" + }, + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "text", + "content": " may significantly differ from the amplitude of original " + }, + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "inline_equation", + "content": "G_{t}" + }, + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "text", + "content": ", the optimal learning rate and weight decay will also differ from the original optimizer. It is expected that the well-tuned hyperparameters in current optimizers (e.g., SGDM, AdamW) can be directly used in our proposed AdaBK optimizer without further hyper-parameter tuning. To this end, we follow the strategy in [33] to re-scale the amplitude of the preconditioned gradient " + }, + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "inline_equation", + "content": "\\widehat{G}_{t} = L_{t}^{-\\frac{1}{2}} G_{t} R_{t}^{-\\frac{1}{2}}" + }, + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "text", + "content": " to the original gradient " + }, + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "inline_equation", + "content": "G_{t}" + }, + { + "bbox": [ + 46, + 306, + 287, + 443 + ], + "type": "text", + "content": " by multiplying it with a scaling factor, i.e.," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 129, + 445, + 287, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 445, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 129, + 445, + 287, + 472 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {G}} _ {t} = \\widehat {\\boldsymbol {G}} _ {t} \\frac {\\left| \\left| \\boldsymbol {G} _ {t} \\right| \\right| _ {2}}{\\left| \\left| \\widehat {\\boldsymbol {G}} _ {t} \\right| \\right| _ {2}}. \\tag {19}", + "image_path": "1b7dab141f8ca2a2e5bf2a0e3b69b7af4799987b26fcacea39bad0f5e106e9d5.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "spans": [ + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "content": "It is easy to know that " + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "inline_equation", + "content": "\\tilde{G}_t" + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "inline_equation", + "content": "G_{t}" + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "content": " have the same " + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 46, + 474, + 287, + 521 + ], + "type": "text", + "content": " norm. With gradient norm recovery, the proposed AdaBK method can be easily embedded into existing optimizers without much extra hyperparameter tuning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": "Convolutional Layer. We have discussed the optimization of FC layers in Section 3. For the Conv layer, the derivation process is similar. The convolution operation can be formulated as matrix multiplication with the " + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "inline_equation", + "content": "im2col" + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": " operation [31, 36], and then the Conv layer can be viewed as an FC layer with " + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathfrak{A} = \\mathcal{U}_1(W)\\mathfrak{X}" + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathfrak{A}" + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathfrak{X}" + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": " are the output and input features after " + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "inline_equation", + "content": "im2col" + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": " operation, and " + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{U}_1(\\cdot)" + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": " is the mode 1 unfold operation of a tensor. For example, for a convolution weight " + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{W} \\in \\mathbb{R}^{C_{out} \\times C_{in} \\times k_1 \\times k_2}" + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{U}_1(\\mathbf{W}) \\in \\mathbb{R}^{C_{out} \\times C_{in} k_1 k_2}" + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{U}_1(\\mathbf{W})" + }, + { + "bbox": [ + 46, + 522, + 287, + 665 + ], + "type": "text", + "content": " can be considered as the weight of the FC layer, and the remaining computation is the same as the FC layer." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "spans": [ + { + "bbox": [ + 46, + 665, + 287, + 713 + ], + "type": "text", + "content": "**Embedding AdaBK into SGDM and AdamW.** With the above-introduced techniques, a more efficient and practical implementation of AdaBK can be obtained. The one-step preconditioned gradient of AdaBK is summarized in" + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 306, + 99, + 539, + 273 + ], + "blocks": [ + { + "bbox": [ + 310, + 75, + 518, + 97 + ], + "lines": [ + { + "bbox": [ + 310, + 75, + 518, + 97 + ], + "spans": [ + { + "bbox": [ + 310, + 75, + 518, + 97 + ], + "type": "text", + "content": "Algorithm 2: One Step Preconditioned Gradient of AdaBK" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "lines": [ + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "spans": [ + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "text", + "content": "Input: " + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "inline_equation", + "content": "T_{s}, T_{ir}, \\alpha, \\epsilon, \\beta, L_{t-1}, R_{t-1}, \\widehat{L}_{t-1}, \\widehat{R}_{t-1}, X_{t} = [x_{ti}]_{i=1}^{n}" + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "inline_equation", + "content": "\\Delta_{t} = [\\delta_{ti}]_{i=1}^{n}" + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "inline_equation", + "content": "G_{t} = \\nabla W_{t} \\mathcal{L}" + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "text", + "content": " \nOutput: " + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "inline_equation", + "content": "\\tilde{G}_{t}" + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "text", + "content": " \n1 if " + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "inline_equation", + "content": "t \\% T_{s} = 0" + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "text", + "content": " then \n2 " + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "inline_equation", + "content": "\\begin{array}{r}L_{t} = \\alpha L_{t-1} + (1 - \\alpha)\\Delta_{t}\\Delta_{t}^{\\top};\\\\ R_{t} = \\alpha R_{t-1} + (1 - \\alpha)X_{t}X_{t}^{\\top};\\end{array}" + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "text", + "content": " \n3 else \n4 end \n7 if " + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "inline_equation", + "content": "t \\% T_{ir} = 0" + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "text", + "content": " then \n8 " + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "inline_equation", + "content": "\\begin{array}{r}\\mathrm{Compute~}\\lambda_{max}^{L}\\mathrm{~and~}\\lambda_{max}^{R}\\mathrm{~by~Power~Iteration;}\\\\ \\mathrm{Compute~}\\widehat{L}_{t} = (L_{t} + \\lambda_{max}^{L}\\epsilon I)^{-\\frac{1}{2}}\\mathrm{~and}\\\\ \\widehat{R}_{t} = (R_{t} + \\lambda_{max}^{R}\\epsilon I)^{-\\frac{1}{2}}\\mathrm{~by~Schur-Newton~Iteration~Eq.~(17)};\\end{array}" + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "text", + "content": " \n9 else \n11 " + }, + { + "bbox": [ + 306, + 99, + 539, + 273 + ], + "type": "inline_equation", + "content": "\\begin{array}{r}\\widehat{L}_{t} = \\widehat{L}_{t-1}\\mathrm{~and~}\\widehat{R}_{t} = \\widehat{L}_{t-1};\\\\ \\widehat{L}_{t} = \\widehat{L}_{t-1}\\mathrm{~and~}\\widehat{R}_{t} = \\widehat{L}_{t-1};\\\\ \\widehat{L}_{t} = \\widehat{L}_{t-1}\\mathrm{~and~}\\widehat{R}_{t} = \\widehat{L}_{t-1};\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t} = \\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[G_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{G}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{G}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{G}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{\\mathbf{G}}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{\\mathbf{G}}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_{t}|[V|\\widehat{\\mathbf{G}}_{t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_ {t}|[V|\\widehat{\\mathbf{G}}_ {t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_ {t}|[V|\\widehat{\\mathbf{G}}_ {t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbf{G}}_ {t}|[V|\\widehat{\\mathbf{G}}_ {t}||2];\\\\ \\widehat{L}_{t}=\\widehat{\\mathbb{T}}_ {i t}-\\frac{\\partial}{\\partial x _ {i t}},\\quad\\forall i,\\forall t.\\end{array}" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "algorithm" + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "spans": [ + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "text", + "content": "Algorithm 2. For a FC layer, the complexity of AdaBK is " + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "inline_equation", + "content": "T(O(\\frac{C_{in}^3 + C_{out}^3}{T_{ir}}) + O(\\frac{(C_{in}^2 + C_{out}^2)N}{T_s}) + O(C_{in}C_{out}(C_{in} + C_{out})))" + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "text", + "content": " is the total number of iterations. For a Conv layer, its complexity is " + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "inline_equation", + "content": "T(O(\\frac{C_{in}^3k_1^3k_2^3 + C_{out}^3}{T_{ir}}) + O(\\frac{(C_{in}^2k_1^2k_2^2 + C_{out}^2)N}{T_s}) + O(C_{in}k_1k_2C_{out}(C_{in}k_1k_2 + C_{out})))" + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "text", + "content": ". In our implementation, " + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "inline_equation", + "content": "T_s" + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "inline_equation", + "content": "T_{ir}" + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "text", + "content": " are set to 200 and 2000, respectively, and the complexity is acceptable. In practice, it only costs " + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "inline_equation", + "content": "10\\% \\sim 25\\%" + }, + { + "bbox": [ + 304, + 282, + 545, + 386 + ], + "type": "text", + "content": " additional training time." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 388, + 545, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 388, + 545, + 472 + ], + "spans": [ + { + "bbox": [ + 304, + 388, + 545, + 472 + ], + "type": "text", + "content": "AdaBK can be embedded into many existing optimizers. In this paper, we embed it into the two commonly used DNN optimizers, i.e., SGDM and AdamW (or Adam), and name the obtained new optimizers as SGDM_BK and AdamW_BK accordingly. The detailed algorithms of SGDM_BK and AdamW_BK are summarized in the supplementary materials." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 306, + 489, + 387, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 489, + 387, + 502 + ], + "spans": [ + { + "bbox": [ + 306, + 489, + 387, + 502 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "spans": [ + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "text", + "content": "We evaluate the proposed SGDM_BK and AdamW_BK optimizers on typical vision tasks, including image classification (on CIFAR100/CIFAR10 [16] and ImageNet [27]), object detection and segmentation (on COCO [17]). For the hyper-parameters of SGDM_BK and AdamW_BK, we set " + }, + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "inline_equation", + "content": "\\alpha = 0.9" + }, + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "inline_equation", + "content": "T_s = 200" + }, + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "inline_equation", + "content": "T_{ir} = 2000" + }, + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "inline_equation", + "content": "\\epsilon = 0.00001" + }, + { + "bbox": [ + 304, + 511, + 545, + 642 + ], + "type": "text", + "content": " throughout the experiments if not specified. Ablation studies on hyper-parameter selection can be found in the supplementary material. All experiments are conducted under the Pytorch 1.11 framework with NVIDIA GeForce RTX 2080Ti and 3090 Ti GPUs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 657, + 423, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 657, + 423, + 669 + ], + "spans": [ + { + "bbox": [ + 306, + 657, + 423, + 669 + ], + "type": "text", + "content": "5.1. Image Classification" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 545, + 713 + ], + "type": "text", + "content": "In the image classification task, we compare SGDM_BK and AdamW_BK with the representative and state-of-the-art DNN optimizers, including SGDM, AdamW [22], Ada" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7870" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 99, + 545, + 206 + ], + "blocks": [ + { + "bbox": [ + 46, + 70, + 547, + 95 + ], + "lines": [ + { + "bbox": [ + 46, + 70, + 547, + 95 + ], + "spans": [ + { + "bbox": [ + 46, + 70, + 547, + 95 + ], + "type": "text", + "content": "Table 1. Testing accuracies (%) on CIFAR100/CIFAR10. The best and second best results are highlighted in bold and italic fonts, respectively. The numbers in red color indicate the improvement of SGDM_BK/AdamW_BK over SGDM/AdamW, respectively." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 99, + 545, + 206 + ], + "lines": [ + { + "bbox": [ + 49, + 99, + 545, + 206 + ], + "spans": [ + { + "bbox": [ + 49, + 99, + 545, + 206 + ], + "type": "table", + "html": "
CIFAR100
OptimizerSGDMAdamWAdagradRAdamAdabeliefShampooKFACWSGDMSGDM_BKAdamW_BK
ResNet1877.20 ± .3077.23 ± .1071.55 ± .2577.05 ± .1577.43 ± .3671.81 ± .4078.25 ± .2379.28 ± .2779.30 ± .07 (↑2.10)78.66 ± .34 (↑1.43)
ResNet5077.78 ± .4378.10 ± .1772.20 ± .1578.20 ± .1579.08 ± .2371.31 ± .5379.25 ± .2680.90 ± .2381.26 ± .20 (↑3.48)80.15 ± .19 (↑2.05)
VGG1170.80 ± .2971.20 ± .2967.70 ± .1871.08 ± .2472.45 ± .1663.56 ± .4472.75 ± .3173.42 ± .2873.89 ± .13 (↑3.09)73.09 ± .29 (↑1.89)
VGG1970.94 ± .3270.26 ± .2363.30 ± .5873.01 ± .2072.39 ± .2765.62 ± .5673.87 ± .4374.82 ± .2375.10 ± .13 (↑4.16)74.27 ± .25 (↑4.01)
DenseNet12179.53 ± .1978.05 ± .2671.27 ± .7978.65 ± .0579.88 ± .0874.95 ± .4279.84 ± .3381.23 ± .1081.18 ± .27 (↑1.65)79.93 ± .23 (↑1.88)
CIFAR10
ResNet1895.10 ± .0794.80 ± .1092.83 ± .1294.70 ± .1895.12 ± .1492.94 ± .2795.01 ± .1295.43 ± .0895.44 ± .12 (↑0.34)95.22 ± .13 (↑0.42)
ResNet5094.75 ± .3094.72 ± .1092.55 ± .3994.72 ± .1095.35 ± .0592.61 ± .2795.43 ± .1695.80 ± .1595.86 ± .05 (↑1.11)95.40 ± .07 (↑0.68)
VGG1192.17 ± .1992.02 ± .0890.25 ± .2592.00 ± .1892.45 ± .1889.01 ± .2992.82 ± .1192.95 ± .2093.14 ± .26 (↑0.97)92.96 ± .07 (↑0.94)
VGG1993.61 ± .0693.40 ± .0491.28 ± .1493.57 ± .1193.58 ± .1290.62 ± .3293.47 ± .0993.91 ± .1994.03 ± .15 (↑0.42)93.94 ± .10 (↑0.54)
DenseNet12195.37 ± .1794.80 ± .0792.95 ± .2395.02 ± .0895.37 ± .0494.37 ± .3695.18 ± .2295.72 ± .1495.70 ± .13 (↑0.33)95.40 ± .04 (↑0.60)
", + "image_path": "1e458a83e48bff462a72c215eabc418ab8b6b1a0106bd004682fd243e8f21a3d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 50, + 217, + 171, + 338 + ], + "blocks": [ + { + "bbox": [ + 50, + 217, + 171, + 338 + ], + "lines": [ + { + "bbox": [ + 50, + 217, + 171, + 338 + ], + "spans": [ + { + "bbox": [ + 50, + 217, + 171, + 338 + ], + "type": "image", + "image_path": "95d5f8a360e7c716612609b9293b9013e21361a49fc4fd77af00cfb6266402c3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 346, + 547, + 369 + ], + "lines": [ + { + "bbox": [ + 46, + 346, + 547, + 369 + ], + "spans": [ + { + "bbox": [ + 46, + 346, + 547, + 369 + ], + "type": "text", + "content": "Figure 2. Training loss curves (loss vs. epoch and loss vs. time) of SGDM, SGDM_BK, AdamW and AdamW_BK on CIFAR100 with ResNet18 and ResNet50 before 60 epochs." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 173, + 217, + 296, + 338 + ], + "blocks": [ + { + "bbox": [ + 173, + 217, + 296, + 338 + ], + "lines": [ + { + "bbox": [ + 173, + 217, + 296, + 338 + ], + "spans": [ + { + "bbox": [ + 173, + 217, + 296, + 338 + ], + "type": "image", + "image_path": "342c6df23d7af2849d785567183a0e67e17121371633e6f349bd1565c9b0530d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 298, + 217, + 420, + 338 + ], + "blocks": [ + { + "bbox": [ + 298, + 217, + 420, + 338 + ], + "lines": [ + { + "bbox": [ + 298, + 217, + 420, + 338 + ], + "spans": [ + { + "bbox": [ + 298, + 217, + 420, + 338 + ], + "type": "image", + "image_path": "e6f4a094088fc4b52cba4d7286a65c07b6c6751b262e4a8636b43bcf09b7d936.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 421, + 217, + 543, + 338 + ], + "blocks": [ + { + "bbox": [ + 421, + 217, + 543, + 338 + ], + "lines": [ + { + "bbox": [ + 421, + 217, + 543, + 338 + ], + "spans": [ + { + "bbox": [ + 421, + 217, + 543, + 338 + ], + "type": "image", + "image_path": "72a9d11cb4697d5f6359940bc160b3616e4d5d8d935f4e88871b8bf1d6fd6d89.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 389, + 287, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 389, + 287, + 450 + ], + "spans": [ + { + "bbox": [ + 46, + 389, + 287, + 450 + ], + "type": "text", + "content": "grad [5], RAdam [19]1, and Adabelief [38]2, Shampoo [9]3, KFAC [7] [9]4, WSGDM [33]5. We tune learning rate and weight decay for each optimizer with grid search and the detailed settings for different optimizers can be found in the supplementary material." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "spans": [ + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "text", + "content": "Results on CIFAR100/10: We first testify the effectiveness of SGDM_BK and AdamW_BK with different DNN models on CIFAR100/CIFAR10 [16], including ResNet18, ResNet50 [12], VGG11 VGG19 [29] and DenseNet-121 [13]. All the DNN models are trained for 200 epochs with batch size 128 on one GPU. The learning rate is multiplied by 0.1 for every 60 epochs. The experiments are repeated 4 times and the results are reported in a \"mean " + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "text", + "content": " std\" format in Table 1. We can see that SGDM_BK and AdamW_BK achieve significant improvements over SGDM and AdamW, which are " + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "inline_equation", + "content": "1.44\\% \\sim 4.16\\%" + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "inline_equation", + "content": "1.43\\% \\sim 4.01\\%" + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "text", + "content": " on CIFAR100, and " + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "inline_equation", + "content": "0.28\\% \\sim 1.11\\%" + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "inline_equation", + "content": "0.42\\% \\sim 0.94\\%" + }, + { + "bbox": [ + 46, + 451, + 288, + 630 + ], + "type": "text", + "content": " on CIFAR10, respectively. They also surpass other compared optimizers for most of the used backbone networks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 390, + 546, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 390, + 546, + 510 + ], + "spans": [ + { + "bbox": [ + 304, + 390, + 546, + 510 + ], + "type": "text", + "content": "Figure 2 shows the curves of training loss vs. epoch and training loss vs. time for SGDM, SGDM_BK, AdamW and AdamW_BK on CIFAR100 with ResNet18 and ResNet50 backbones before 60 epochs. One can see that SGDM_BK and AdamW_BK can significantly speed up the training process of SGDM and AdamW, respectively. Since SGDM_BK and AdamW_BK cost additional time in each iteration, for a fair comparison, we also show the curves of training loss vs. time. One can see that they still have great advantages over the original SGDM and AdamW." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 510, + 547, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 510, + 547, + 700 + ], + "spans": [ + { + "bbox": [ + 304, + 510, + 547, + 700 + ], + "type": "text", + "content": "Results on ImageNet-1k: To testify that SGDM_BK and AdamW_BK can also work well on large-scale datasets, we evaluate them on ImageNet-1k [27], which contains 1000 categories with 1.28 million images for training and 50K images for validation. ResNet18 and ResNet50 are selected as the backbone models with training batch size 256 on 4 GPUs, and the training settings follow the work in [3, 38]. The learning rate is multiplied by 0.1 for every 30 epochs. SGDM_BK and AdamW_BK adopt the same learning rate and weight decay as SGDM and AdamW, respectively. The top 1 accuracies on the validation set are reported in Table 2. One can see that SGDM_BK and AdamW_BK perform better than others. Meanwhile, we plot the training and validation accuracy curves in Figure 3, from which we see that the proposed AdaBK technique can largely speed up the training process." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "spans": [ + { + "bbox": [ + 317, + 701, + 545, + 714 + ], + "type": "text", + "content": "We also evaluate the proposed optimizer on Swin" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 59, + 639, + 229, + 650 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 639, + 229, + 650 + ], + "spans": [ + { + "bbox": [ + 59, + 639, + 229, + 650 + ], + "type": "text", + "content": "1https://github.com/LiyuanLucasLiu/RAdam" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 651, + 286, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 651, + 286, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 651, + 286, + 666 + ], + "type": "text", + "content": "2https : / / github . com / jintang - zhuang / Adabelief - Optimizer" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 59, + 667, + 248, + 676 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 667, + 248, + 676 + ], + "spans": [ + { + "bbox": [ + 59, + 667, + 248, + 676 + ], + "type": "text", + "content": "3 https://github.com/moskomule/shampoo.pytorch" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 59, + 677, + 240, + 685 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 677, + 240, + 685 + ], + "spans": [ + { + "bbox": [ + 59, + 677, + 240, + 685 + ], + "type": "text", + "content": "4 https://github.com/alecwangcq/KFAC-Pytorch" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 59, + 686, + 267, + 694 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 686, + 267, + 694 + ], + "spans": [ + { + "bbox": [ + 59, + 686, + 267, + 694 + ], + "type": "text", + "content": "5 https://github.com/Yonghongwei/W-SGDM-and-W-Adam" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 695, + 286, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 695, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 695, + 286, + 712 + ], + "type": "text", + "content": "The model can be downloaded at https://github.com/weiaicunzai/pytorch-cifar100." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "7871" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 96, + 544, + 133 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 547, + 93 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 547, + 93 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 547, + 93 + ], + "type": "text", + "content": "Table 2. Top 1 accuracy (%) on the validation set of ImageNet-1k. The numbers in red color indicate the improvement of SGDM_BK/AdamW_BK over SGDM/AdamW, respectively." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 96, + 544, + 133 + ], + "lines": [ + { + "bbox": [ + 50, + 96, + 544, + 133 + ], + "spans": [ + { + "bbox": [ + 50, + 96, + 544, + 133 + ], + "type": "table", + "html": "
OptimizerSGDMAdamWAdagradRAdamAdabeliefShampooKFACWSGDMSGDM_BKAdamW_BK
ResNet1870.4970.0162.2269.9270.0864.4569.6271.4371.59 (↑1.10)71.63 (↑1.62)
ResNet5076.3176.0269.3876.1276.2270.1176.3677.4877.62 (↑1.31)77.22 (↑1.10)
", + "image_path": "0591e86f56914f831cfe5f436840f6971540ddc2f2cc27cd48bfaf600224af72.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 52, + 145, + 175, + 266 + ], + "blocks": [ + { + "bbox": [ + 52, + 145, + 175, + 266 + ], + "lines": [ + { + "bbox": [ + 52, + 145, + 175, + 266 + ], + "spans": [ + { + "bbox": [ + 52, + 145, + 175, + 266 + ], + "type": "image", + "image_path": "1f8f79953f2cfcfc736cabc6a9c4c433abbcae3e6536c60505982b20eb1d5e42.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 46, + 276, + 546, + 299 + ], + "lines": [ + { + "bbox": [ + 46, + 276, + 546, + 299 + ], + "spans": [ + { + "bbox": [ + 46, + 276, + 546, + 299 + ], + "type": "text", + "content": "Figure 3. Training and validation accuracy curves of SGDM, SGDM_BK, AdamW and AdamW_BK on ImageNet-1k with ResNet18 and ResNet50 backbones." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 175, + 145, + 296, + 267 + ], + "blocks": [ + { + "bbox": [ + 175, + 145, + 296, + 267 + ], + "lines": [ + { + "bbox": [ + 175, + 145, + 296, + 267 + ], + "spans": [ + { + "bbox": [ + 175, + 145, + 296, + 267 + ], + "type": "image", + "image_path": "af82e3214ce84bc5a3024da478fa12b056a663a0e82d29b6cc990b42897a12b8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 296, + 145, + 419, + 266 + ], + "blocks": [ + { + "bbox": [ + 296, + 145, + 419, + 266 + ], + "lines": [ + { + "bbox": [ + 296, + 145, + 419, + 266 + ], + "spans": [ + { + "bbox": [ + 296, + 145, + 419, + 266 + ], + "type": "image", + "image_path": "33fc8a4f43f004b3ae7f1e2d9898d9d0ffe10d3ec650adacac282db493b85408.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 420, + 145, + 544, + 266 + ], + "blocks": [ + { + "bbox": [ + 420, + 145, + 544, + 266 + ], + "lines": [ + { + "bbox": [ + 420, + 145, + 544, + 266 + ], + "spans": [ + { + "bbox": [ + 420, + 145, + 544, + 266 + ], + "type": "image", + "image_path": "d64f394059c8fa4a7ac7c40f47b579034fb45d0875459760cb7d399532f1c38f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 93, + 324, + 240, + 360 + ], + "blocks": [ + { + "bbox": [ + 47, + 310, + 286, + 321 + ], + "lines": [ + { + "bbox": [ + 47, + 310, + 286, + 321 + ], + "spans": [ + { + "bbox": [ + 47, + 310, + 286, + 321 + ], + "type": "text", + "content": "Table 3. Top 1 accuracy (%) on the validation set of ImageNet-1k." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 93, + 324, + 240, + 360 + ], + "lines": [ + { + "bbox": [ + 93, + 324, + 240, + 360 + ], + "spans": [ + { + "bbox": [ + 93, + 324, + 240, + 360 + ], + "type": "table", + "html": "
OptimizerAdamWAdamW_BK
Swin-T81.1881.79 (↑0.61)
Swin-B83.0283.14 (↑0.12)
", + "image_path": "8c7e519dc96d5939cbd2fc9ceeeb9c42f449300b3542655f2ff63dc54bd4ec58.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 371, + 287, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 371, + 287, + 433 + ], + "spans": [ + { + "bbox": [ + 46, + 371, + 287, + 433 + ], + "type": "text", + "content": "transformer [20] backbone. We compare AdamW_BK with their default optimizer AdamW. The configurations follow the settings of the official MMClassification toolbox7. The results are shown in Table 3. We can see AdamW_BK can also achieve certain performance gain over AdamW." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 440, + 201, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 440, + 201, + 453 + ], + "spans": [ + { + "bbox": [ + 47, + 440, + 201, + 453 + ], + "type": "text", + "content": "5.2. Detection and Segmentation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 459, + 287, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 459, + 287, + 601 + ], + "spans": [ + { + "bbox": [ + 46, + 459, + 287, + 601 + ], + "type": "text", + "content": "We then evaluate SGDM_BK and AdamW_BK on COCO [17] detection and segmentation tasks to show that they can work well beyond classification tasks and can be used to fine-tune pre-trained models. The models are pre-trained on ImageNet1k and fine-tuned on COCO train2017 (118K images), and then evaluated on COCO val2017 (40K images). The latest version of MMDetection toolbox [4] is used as to train our models. We test SGDM_BK and AdamW_BK by Faster-RCNN [25] and Mask-RCNN [11] with various backbones, including ResNet50 (R50), ResNet101 (R101) and Swin transformer [20]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 602, + 287, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 602, + 287, + 687 + ], + "spans": [ + { + "bbox": [ + 46, + 602, + 287, + 687 + ], + "type": "text", + "content": "As mentioned in Section 4, with the gradient norm recovery operation, we can directly adopt the same hyperparameters (i.e., learning rate and weight decay) of SGDM and AdamW into SGDM_BK and AdamW_BK, respectively. To be specific, for R50 and R101 backbones, we compare the proposed optimizer with SGDM, WSGDM and AdamW. The learning rate and weight decay are set to" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 311, + 545, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 311, + 545, + 335 + ], + "spans": [ + { + "bbox": [ + 306, + 311, + 545, + 335 + ], + "type": "text", + "content": "0.02 and 0.0001 for SGDM, WSGDM and SGDM_BK, and 0.0001 and 0.2 for AdamW and AdamW_BK, respectively." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 336, + 546, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 336, + 546, + 420 + ], + "spans": [ + { + "bbox": [ + 304, + 336, + 546, + 420 + ], + "type": "text", + "content": "For the Swin transformer backbone, the learning rate and weight decay are set to 0.0001 and 0.02 for AdamW and AdamW_BK, respectively. The learning rate schedule is 1X for Faster-RCNN. Other configurations follow the settings of the official MMDetection toolbox8. For the default optimizers, we use their official results9. This experiment is conducted on NVIDIA GeForce RTX 3090 Ti GPUs." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "spans": [ + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "content": "Table 4 lists the Average Precision (AP) of object detection by Faster-RCNN. It can be seen that the models trained by SGDM_BK and AdamW_BK achieve clear performance gains of " + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "1.6\\% \\sim 2.2\\%" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "content": " AP for R50 and R101 backbones. Fig. 4 shows the training loss curves of Faster-RCNN with ResNet50 backbone. One can see that SGDM_BK and AdamW_BK accelerate the training process over SGDM and AdamW. Table 5 shows the " + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^b" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "content": " of detection and " + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^m" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "content": " of segmentation by Mask-RCNN. We can see that SGDM_BK and AdamW_BK gain " + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "1.5\\% \\sim 2.2\\%" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^b" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "1.2\\% \\sim 2.2\\%" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^m" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "content": " for R50 and R101 backbones over SGDM and AdamW, respectively. For Swin transformer backbone, AdamW_BK also improves " + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "0.7\\% \\sim 0.9\\%" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^b" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "0.3\\% \\sim 0.9\\%" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}^m" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "content": " over AdamW. Meanwhile, compared with WSGDM, the proposed SGDM_BK also outperforms it with " + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "inline_equation", + "content": "0.2\\% \\sim 0.6\\%" + }, + { + "bbox": [ + 304, + 421, + 547, + 650 + ], + "type": "text", + "content": " AP gain. Moreover, Fig. 5 plots the training loss curves of Faster-RCNN with ResNet50, Swin-T (1X) and Swin-S (3X). The proposed SGDM_BK and AdamW_BK accelerate the train" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 661, + 495, + 671 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 661, + 495, + 671 + ], + "spans": [ + { + "bbox": [ + 315, + 661, + 495, + 671 + ], + "type": "text", + "content": "8https://github.com/open-mmlab/mmdetection" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 671, + 545, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 671, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 306, + 671, + 545, + 712 + ], + "type": "text", + "content": "9Please refer to https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn,https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn,and https://github.com/open-mmlab/mmdetection/tree/master/ configs/swin." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 693, + 286, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 693, + 286, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 693, + 286, + 712 + ], + "type": "text", + "content": "7https://github.com/open-mmlab/mmclassification/tree/master/confers/swin_transformer" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7872" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 115, + 298, + 222 + ], + "blocks": [ + { + "bbox": [ + 47, + 70, + 299, + 104 + ], + "lines": [ + { + "bbox": [ + 47, + 70, + 299, + 104 + ], + "spans": [ + { + "bbox": [ + 47, + 70, + 299, + 104 + ], + "type": "text", + "content": "Table 4. Detection results of Faster-RCNN on COCO. " + }, + { + "bbox": [ + 47, + 70, + 299, + 104 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 47, + 70, + 299, + 104 + ], + "type": "text", + "content": " means the gain of SGDM_BK over SGDM or AdamW_BK over AdamW. * indicates the default optimizer." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 115, + 298, + 222 + ], + "lines": [ + { + "bbox": [ + 49, + 115, + 298, + 222 + ], + "spans": [ + { + "bbox": [ + 49, + 115, + 298, + 222 + ], + "type": "table", + "html": "
BackboneAlgorithmAPAP.5AP.75APsAPmAPl
R50SGDM*37.458.140.421.241.048.1
WSGDM39.460.643.123.142.950.7
SGDM.BK39.660.742.822.642.952.2
Δ↑2.2↑2.6↑2.4↑1.4↑1.9↑4.1
AdamW37.858.741.022.141.249.2
AdamW.BK39.460.342.922.542.852.3
Δ↑1.6↑1.6↑1.9↑0.4↑1.6↑3.1
R101SGDM*39.460.143.122.443.751.1
WSGDM41.161.645.124.045.254.3
SGDM.BK41.662.345.324.945.655.2
Δ↑2.2↑2.2↑2.2↑2.5↑1.9↑4.1
AdamW40.160.643.822.944.152.8
AdamW.BK41.762.145.524.445.456.2
Δ↑1.6↑1.5↑1.7↑1.5↑1.3↑3.4
", + "image_path": "3d3453b603220ca1bdb2cb73bfb8ad10a1fed481a0799d6769feb0c40afc88ec.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 49, + 277, + 298, + 419 + ], + "blocks": [ + { + "bbox": [ + 47, + 232, + 299, + 266 + ], + "lines": [ + { + "bbox": [ + 47, + 232, + 299, + 266 + ], + "spans": [ + { + "bbox": [ + 47, + 232, + 299, + 266 + ], + "type": "text", + "content": "Table 5. Detection and segmentation results of Mask-RCNN on COCO. " + }, + { + "bbox": [ + 47, + 232, + 299, + 266 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 47, + 232, + 299, + 266 + ], + "type": "text", + "content": " means the gain of SGDM_BK over SGDM or AdamW_BK over AdamW. * indicates the default optimizer." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 277, + 298, + 419 + ], + "lines": [ + { + "bbox": [ + 49, + 277, + 298, + 419 + ], + "spans": [ + { + "bbox": [ + 49, + 277, + 298, + 419 + ], + "type": "table", + "html": "
BackboneLr scheduleAlgorithm\\( {\\mathrm{{AP}}}^{b} \\)\\( {\\mathrm{{AP}}}_{5}^{b} \\)\\( {\\mathrm{{AP}}}_{7.5}^{b} \\)\\( {\\mathrm{{AP}}}^{m} \\)\\( {\\mathrm{{AP}}}_{5}^{m} \\)\\( {\\mathrm{{AP}}}_{7.5}^{m} \\)
R501XSGDM*38.258.841.434.755.737.2
W-SGDM39.860.843.436.457.638.9
SGDM_BK40.461.343.936.958.339.6
Δ↑2.2↑2.5↑2.5↑2.2↑2.6↑2.4
AdamW37.858.741.035.456.238.0
AdamW_BK40.060.643.536.758.039.3
Δ↑2.2↑1.9↑2.5↑1.3↑1.8↑1.3
R1001XSGDM*40.060.544.036.157.538.6
W-SGDM41.762.545.537.959.440.8
SGDM_BK42.262.946.138.160.040.7
Δ↑2.2↑2.4↑2.1↑2.0↑2.5↑2.1
AdamW40.761.144.637.258.440.1
AdamW_BK42.262.546.038.459.941.2
Δ↑1.5↑1.4↑1.4↑1.2↑1.5↑1.1
Swin-T1XAdamW*42.765.246.839.362.242.2
AdamW_BK43.665.947.840.263.143.1
Δ↑0.9↑0.7↑1.0↑0.9↑0.9↑0.9
Swin-T3XAdamW*46.068.250.341.665.344.7
AdamW_BK46.868.851.442.466.145.6
Δ↑0.8↑0.6↑1.1↑0.8↑0.8↑0.9
Swin-S3XAdamW*48.269.852.843.267.046.1
AdamW_BK48.970.453.843.567.446.8
Δ↑0.7↑0.6↑1.0↑0.3↑0.4↑0.7
", + "image_path": "79bae9a7aa2dcd6bbb68846c9bdaa70ef766c6f63e971a9d28bd92026621d3e4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 456, + 287, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 456, + 287, + 505 + ], + "spans": [ + { + "bbox": [ + 46, + 456, + 287, + 505 + ], + "type": "text", + "content": "ing process clearly. The results on COCO demonstrate that the proposed SGDM_BK and AdamW_BK can be easily adopted into the downstream tasks without additional hyper-parameter tuning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 510, + 232, + 523 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 510, + 232, + 523 + ], + "spans": [ + { + "bbox": [ + 47, + 510, + 232, + 523 + ], + "type": "text", + "content": "5.3. Memory Usage and Training Time" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 529, + 288, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 529, + 288, + 660 + ], + "spans": [ + { + "bbox": [ + 46, + 529, + 288, + 660 + ], + "type": "text", + "content": "For full-matrix adaptive optimizers, one important concern is the training cost, including memory usage and training time. Here we compare the memory and time cost of our optimizers with SGDM [23], AdamW [22] and AdaGrad [5] on CIFAR100. ResNet50 is used as the backbone and one GeForce RTX 2080Ti GPU is used. The results are reported in Table 6. One can see that the embedding of AdaBK slightly increases the memory usage and training time (" + }, + { + "bbox": [ + 46, + 529, + 288, + 660 + ], + "type": "inline_equation", + "content": "10\\% \\sim 25\\%" + }, + { + "bbox": [ + 46, + 529, + 288, + 660 + ], + "type": "text", + "content": " extra training time and memory usage). Compared to the improvement of performance, the extra cost is affordable and worthwhile." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 669, + 119, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 119, + 681 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 119, + 681 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 287, + 715 + ], + "type": "text", + "content": "This work presented a general regret bound for the constrained full-matrix preconditioned gradient methods for" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 301, + 73, + 422, + 200 + ], + "blocks": [ + { + "bbox": [ + 301, + 73, + 422, + 200 + ], + "lines": [ + { + "bbox": [ + 301, + 73, + 422, + 200 + ], + "spans": [ + { + "bbox": [ + 301, + 73, + 422, + 200 + ], + "type": "image", + "image_path": "b1b169f9eeaaae4e9972c07fe7d8ebb5fc3c9d1e987f1b1b2125d7553be6532e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 341, + 201, + 503, + 213 + ], + "lines": [ + { + "bbox": [ + 341, + 201, + 503, + 213 + ], + "spans": [ + { + "bbox": [ + 341, + 201, + 503, + 213 + ], + "type": "text", + "content": "Figure 4. Training loss curves of ResNet50." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 424, + 76, + 544, + 199 + ], + "blocks": [ + { + "bbox": [ + 424, + 76, + 544, + 199 + ], + "lines": [ + { + "bbox": [ + 424, + 76, + 544, + 199 + ], + "spans": [ + { + "bbox": [ + 424, + 76, + 544, + 199 + ], + "type": "image", + "image_path": "978f79c9f7191e093db6e1bef859a4fdd2c5551cb6ac45ca52e7fdc3a4ce5c9a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 301, + 228, + 414, + 324 + ], + "blocks": [ + { + "bbox": [ + 301, + 228, + 414, + 324 + ], + "lines": [ + { + "bbox": [ + 301, + 228, + 414, + 324 + ], + "spans": [ + { + "bbox": [ + 301, + 228, + 414, + 324 + ], + "type": "image", + "image_path": "0d9ae172bab8d9066c7c413888d91765f50880626e31a70f410309dfb0def852.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 414, + 228, + 525, + 324 + ], + "blocks": [ + { + "bbox": [ + 414, + 228, + 525, + 324 + ], + "lines": [ + { + "bbox": [ + 414, + 228, + 525, + 324 + ], + "spans": [ + { + "bbox": [ + 414, + 228, + 525, + 324 + ], + "type": "image", + "image_path": "e0b2a34f9902cd702eb05dd0b97a81a67c59f7b6488ec8ec9539bbab16179041.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 301, + 325, + 414, + 427 + ], + "blocks": [ + { + "bbox": [ + 301, + 325, + 414, + 427 + ], + "lines": [ + { + "bbox": [ + 301, + 325, + 414, + 427 + ], + "spans": [ + { + "bbox": [ + 301, + 325, + 414, + 427 + ], + "type": "image", + "image_path": "f8ff2e0f833104299d4ffbb23f3b23038df0be2f88d6f1049955713c9e1c211e.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 326, + 430, + 499, + 441 + ], + "lines": [ + { + "bbox": [ + 326, + 430, + 499, + 441 + ], + "spans": [ + { + "bbox": [ + 326, + 430, + 499, + 441 + ], + "type": "text", + "content": "Figure 5. Training loss curves of Mask-RCNN." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 414, + 325, + 525, + 427 + ], + "blocks": [ + { + "bbox": [ + 414, + 325, + 525, + 427 + ], + "lines": [ + { + "bbox": [ + 414, + 325, + 525, + 427 + ], + "spans": [ + { + "bbox": [ + 414, + 325, + 525, + 427 + ], + "type": "image", + "image_path": "98a0f94ae891fabe8b52f2139e5ec9ded803730b82ceedab7712846a3fe7c56c.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 309, + 479, + 543, + 510 + ], + "blocks": [ + { + "bbox": [ + 305, + 454, + 545, + 476 + ], + "lines": [ + { + "bbox": [ + 305, + 454, + 545, + 476 + ], + "spans": [ + { + "bbox": [ + 305, + 454, + 545, + 476 + ], + "type": "text", + "content": "Table 6. Memory cost (MiB) and training time (h) of different optimizers with ResNet50." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 309, + 479, + 543, + 510 + ], + "lines": [ + { + "bbox": [ + 309, + 479, + 543, + 510 + ], + "spans": [ + { + "bbox": [ + 309, + 479, + 543, + 510 + ], + "type": "table", + "html": "
OptimizerSGDMAdamWAdagradSGDM_BKAdamW_BK
Memory58675883586565256535
Time3.423.483.464.144.20
", + "image_path": "e1f4bba1265477c9c0de23327ac4ed6b1a3dbef5f05f21fce1f13227460f43af.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "spans": [ + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": "DNN optimization. Different from previous full-matrix preconditioned methods, where the parameter update formulas are designed heuristically, we proved that given a cone constraint on the full-matrix preconditioner, the corresponding parameter update formula can be obtained by optimizing a guide function. Based on our theoretical analysis, we derived a specific guide function with the layer-wise block-diagonal constraint and Kronecker-factorized constraint. Through optimizing an upper bound of the guide function, a new preconditioned optimization algorithm, namely AdaBK, was obtained. We embedded AdaBK into two widely used optimizers, i.e., SGDM and AdamW, and the experimental results on image classification, object detection and segmentation tasks demonstrated that AdaBK can significantly improve the DNN optimization performance with only " + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "inline_equation", + "content": "10\\% \\sim 25\\%" + }, + { + "bbox": [ + 304, + 521, + 545, + 713 + ], + "type": "text", + "content": " extra computation cost." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7873" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "spans": [ + { + "bbox": [ + 48, + 71, + 106, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 91, + 287, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 91, + 287, + 135 + ], + "type": "text", + "content": "[1] Naman Agarwal, Brian Bullins, Xinyi Chen, Elad Hazan, Karan Singh, Cyril Zhang, and Yi Zhang. Efficient full-matrix adaptive regularization. In International Conference on Machine Learning, pages 102-110. PMLR, 2019. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 135, + 287, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 287, + 157 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 287, + 157 + ], + "type": "text", + "content": "[2] Richard L Burden, J Douglas Faires, and Annette M Burden. Numerical analysis. Cengage learning, 2015. 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 158, + 287, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 158, + 287, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 158, + 287, + 201 + ], + "type": "text", + "content": "[3] Jinghui Chen, Dongruo Zhou, Yiqi Tang, Ziyan Yang, Yuan Cao, and Quanquan Gu. Closing the generalization gap of adaptive gradient methods in training deep neural networks. arXiv preprint arXiv:1806.06763, 2018. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 202, + 287, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 202, + 287, + 255 + ], + "spans": [ + { + "bbox": [ + 53, + 202, + 287, + 255 + ], + "type": "text", + "content": "[4] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 258, + 287, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 287, + 300 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 287, + 300 + ], + "type": "text", + "content": "[5] John Duchi, Elad Hazan, and Yoram Singer. Adaptive subgradient methods for online learning and stochastic optimization. Journal of machine learning research, 12(7), 2011. 1, 2, 3, 6, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 302, + 287, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 287, + 345 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 287, + 345 + ], + "type": "text", + "content": "[6] Thomas George, César Laurent, Xavier Bouthillier, Nicolas Ballas, and Pascal Vincent. Fast approximate natural gradient descent in a kronecker-factored eigenbasis. arXiv preprint arXiv:1806.03884, 2018. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 346, + 287, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 346, + 287, + 389 + ], + "spans": [ + { + "bbox": [ + 53, + 346, + 287, + 389 + ], + "type": "text", + "content": "[7] Roger Grosse and James Martens. A kronecker-factored approximate fisher matrix for convolution layers. In International Conference on Machine Learning, pages 573-582. PMLR, 2016. 1, 2, 3, 4, 6" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "spans": [ + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "text", + "content": "[8] Chun-Hua Guo and Nicholas J Higham. A schur-newton method for the matrix\\boldsymbol{\\mathrm{boldmath}}" + }, + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "inline_equation", + "content": "\\mathfrak{p}" + }, + { + "bbox": [ + 53, + 391, + 287, + 434 + ], + "type": "text", + "content": "th root and its inverse. SIAM Journal on Matrix Analysis and Applications, 28(3):788-804, 2006. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 435, + 287, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 435, + 287, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 435, + 287, + 479 + ], + "type": "text", + "content": "[9] Vineet Gupta, Tomer Koren, and Yoram Singer. Shampoo: Preconditioned stochastic tensor optimization. In International Conference on Machine Learning, pages 1842-1850. PMLR, 2018. 1, 2, 3, 4, 6" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 480, + 287, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 480, + 287, + 512 + ], + "spans": [ + { + "bbox": [ + 48, + 480, + 287, + 512 + ], + "type": "text", + "content": "[10] Elad Hazan et al. Introduction to online convex optimization. Foundations and Trends® in Optimization, 2(3-4):157-325, 2016. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 514, + 287, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 514, + 287, + 546 + ], + "spans": [ + { + "bbox": [ + 48, + 514, + 287, + 546 + ], + "type": "text", + "content": "[11] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 547, + 287, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 547, + 287, + 590 + ], + "spans": [ + { + "bbox": [ + 48, + 547, + 287, + 590 + ], + "type": "text", + "content": "[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 591, + 287, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 591, + 287, + 635 + ], + "spans": [ + { + "bbox": [ + 48, + 591, + 287, + 635 + ], + "type": "text", + "content": "[13] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4700-4708, 2017. 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 636, + 287, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 636, + 287, + 679 + ], + "spans": [ + { + "bbox": [ + 48, + 636, + 287, + 679 + ], + "type": "text", + "content": "[14] Ahmet Iscen, Giorgos Tolias, Yannis Avrithis, and Ondrej Chum. Label propagation for deep semi-supervised learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5070-5079, 2019. 1" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "spans": [ + { + "bbox": [ + 48, + 680, + 287, + 712 + ], + "type": "text", + "content": "[15] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 1, 2" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 307, + 73, + 545, + 712 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "spans": [ + { + "bbox": [ + 307, + 73, + 545, + 95 + ], + "type": "text", + "content": "[16] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5, 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 96, + 545, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 96, + 545, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 96, + 545, + 150 + ], + "type": "text", + "content": "[17] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 5, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 152, + 545, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 545, + 194 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 545, + 194 + ], + "type": "text", + "content": "[18] Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. On the variance of the adaptive learning rate and beyond. arXiv preprint arXiv:1908.03265, 2019. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 197, + 545, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 197, + 545, + 239 + ], + "spans": [ + { + "bbox": [ + 307, + 197, + 545, + 239 + ], + "type": "text", + "content": "[19] Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. On the variance of the adaptive learning rate and beyond. arXiv preprint arXiv:1908.03265, 2019. 1, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "spans": [ + { + "bbox": [ + 307, + 241, + 545, + 296 + ], + "type": "text", + "content": "[20] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021. 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 307, + 297, + 545, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 297, + 545, + 329 + ], + "spans": [ + { + "bbox": [ + 307, + 297, + 545, + 329 + ], + "type": "text", + "content": "[21] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 307, + 331, + 545, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 331, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 307, + 331, + 545, + 363 + ], + "type": "text", + "content": "[22] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 1, 5, 8" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 307, + 365, + 545, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 365, + 545, + 397 + ], + "spans": [ + { + "bbox": [ + 307, + 365, + 545, + 397 + ], + "type": "text", + "content": "[23] Ning Qian. On the momentum term in gradient descent learning algorithms. Neural networks, 12(1):145-151, 1999. 1, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 399, + 545, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 545, + 442 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 545, + 442 + ], + "type": "text", + "content": "[24] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 307, + 444, + 545, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 444, + 545, + 487 + ], + "spans": [ + { + "bbox": [ + 307, + 444, + 545, + 487 + ], + "type": "text", + "content": "[25] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pages 91-99, 2015. 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 307, + 488, + 545, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 488, + 545, + 520 + ], + "spans": [ + { + "bbox": [ + 307, + 488, + 545, + 520 + ], + "type": "text", + "content": "[26] Herbert Robbins and Sutton Monro. A stochastic approximation method. The annals of mathematical statistics, pages 400-407, 1951. 1" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 522, + 545, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 522, + 545, + 577 + ], + "spans": [ + { + "bbox": [ + 307, + 522, + 545, + 577 + ], + "type": "text", + "content": "[27] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015. 5, 6" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 307, + 578, + 545, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 578, + 545, + 611 + ], + "spans": [ + { + "bbox": [ + 307, + 578, + 545, + 611 + ], + "type": "text", + "content": "[28] Shai Shalev-Shwartz et al. Online learning and online convex optimization. Foundations and Trends® in Machine Learning, 4(2):107-194, 2012. 2" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 307, + 613, + 545, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 613, + 545, + 645 + ], + "spans": [ + { + "bbox": [ + 307, + 613, + 545, + 645 + ], + "type": "text", + "content": "[29] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 307, + 647, + 545, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 647, + 545, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 647, + 545, + 689 + ], + "type": "text", + "content": "[30] Tijmen Tieleman and Geoffrey Hinton. Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural Networks for Machine Learning, 4:26-31, 2012. 1" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 307, + 690, + 545, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 690, + 545, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 690, + 545, + 712 + ], + "type": "text", + "content": "[31] Chengxi Ye, Matthew Evanusa, Hua He, Anton Mitrokhin, Tom Goldstein, James A Yorke, Cornelia Fermüller, and" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7874" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 72, + 287, + 365 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 287, + 95 + ], + "type": "text", + "content": "Yiannis Aloimonos. Network deconvolution. arXiv preprint arXiv:1905.11926, 2019. 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "spans": [ + { + "bbox": [ + 48, + 96, + 287, + 140 + ], + "type": "text", + "content": "[32] Hongwei Yong, Jianqiang Huang, Xiansheng Hua, and Lei Zhang. Gradient centralization: A new optimization technique for deep neural networks. In European Conference on Computer Vision, pages 635-652. Springer, 2020. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 141, + 287, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 287, + 174 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 287, + 174 + ], + "type": "text", + "content": "[33] Hongwei Yong and Lei Zhang. An embedded feature whitening approach to deep neural network optimization. In the European Conference on Computer Vision, 2022. 4, 5, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 175, + 287, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 175, + 287, + 208 + ], + "spans": [ + { + "bbox": [ + 48, + 175, + 287, + 208 + ], + "type": "text", + "content": "[34] Jihun Yun, Aurelie C Lozano, and Eunho Yang. Stochastic gradient methods with block diagonal matrix adaptation. arXiv preprint arXiv:1905.10757, 2019. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 209, + 287, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 209, + 287, + 231 + ], + "spans": [ + { + "bbox": [ + 48, + 209, + 287, + 231 + ], + "type": "text", + "content": "[35] Matthew D Zeiler. Adadelta: an adaptive learning rate method. arXiv preprint arXiv:1212.5701, 2012. 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 232, + 287, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 232, + 287, + 274 + ], + "spans": [ + { + "bbox": [ + 48, + 232, + 287, + 274 + ], + "type": "text", + "content": "[36] Huishuai Zhang, Wei Chen, and Tie-Yan Liu. Train feedforward neural network with layer-wise adaptive rate via approximating back-matching propagation. arXiv preprint arXiv:1802.09750, 2018. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 276, + 287, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 276, + 287, + 308 + ], + "spans": [ + { + "bbox": [ + 48, + 276, + 287, + 308 + ], + "type": "text", + "content": "[37] Michael R Zhang, James Lucas, Geoffrey Hinton, and Jimmy Ba. Lookahead optimizer: k steps forward, 1 step back. arXiv preprint arXiv:1907.08610, 2019. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 310, + 287, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 310, + 287, + 365 + ], + "spans": [ + { + "bbox": [ + 48, + 310, + 287, + 365 + ], + "type": "text", + "content": "[38] Juntang Zhuang, Tommy Tang, Yifan Ding, Sekhar C Tatikonda, Nicha Dvornek, Xenophon Papademetris, and James Duncan. Adbelief optimizer: Adapting stepsizes by the belief in observed gradients. Advances in neural information processing systems, 33:18795-18806, 2020. 1, 6" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "7875" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file